input
stringlengths
2.65k
237k
output
stringclasses
1 value
self._values.get("worker_system_disk_category") return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result) @builtins.property def worker_system_disk_size( self, ) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]: ''' :Property: workerSystemDiskSize: Worker disk system disk size, the unit is GiB. Default to 120. ''' result = self._values.get("worker_system_disk_size") return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], result) def __eq__(self, rhs: typing.Any) -> builtins.bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs: typing.Any) -> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "RosKubernetesClusterProps(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) class RosManagedEdgeKubernetesCluster( ros_cdk_core.RosResource, metaclass=jsii.JSIIMeta, jsii_type="@alicloud/ros-cdk-cs.RosManagedEdgeKubernetesCluster", ): '''A ROS template type: ``ALIYUN::CS::ManagedEdgeKubernetesCluster``.''' def __init__( self, scope: ros_cdk_core.Construct, id: builtins.str, props: "RosManagedEdgeKubernetesClusterProps", enable_resource_property_constraint: builtins.bool, ) -> None: '''Create a new ``ALIYUN::CS::ManagedEdgeKubernetesCluster``. :param scope: - scope in which this resource is defined. :param id: - scoped id of the resource. :param props: - resource properties. :param enable_resource_property_constraint: - ''' jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint]) @jsii.member(jsii_name="renderProperties") def _render_properties( self, props: typing.Mapping[builtins.str, typing.Any], ) -> typing.Mapping[builtins.str, typing.Any]: ''' :param props: - ''' return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props])) @jsii.python.classproperty # type: ignore[misc] @jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME") def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str: '''The resource type name for this resource class.''' return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="attrClusterId") def attr_cluster_id(self) -> ros_cdk_core.IResolvable: ''' :Attribute: ClusterId: Cluster instance ID. ''' return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrClusterId")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="attrDefaultUserKubeConfig") def attr_default_user_kube_config(self) -> ros_cdk_core.IResolvable: ''' :Attribute: DefaultUserKubeConfig: Default user kubernetes config which is used for configuring cluster credentials. ''' return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrDefaultUserKubeConfig")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="attrNodes") def attr_nodes(self) -> ros_cdk_core.IResolvable: ''' :Attribute: Nodes: The list of cluster nodes. ''' return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrNodes")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="attrPrivateUserKubConfig") def attr_private_user_kub_config(self) -> ros_cdk_core.IResolvable: ''' :Attribute: PrivateUserKubConfig: Private user kubernetes config which is used for configuring cluster credentials. ''' return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrPrivateUserKubConfig")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="attrScalingConfigurationId") def attr_scaling_configuration_id(self) -> ros_cdk_core.IResolvable: ''' :Attribute: ScalingConfigurationId: Scaling configuration id ''' return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrScalingConfigurationId")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="attrScalingGroupId") def attr_scaling_group_id(self) -> ros_cdk_core.IResolvable: ''' :Attribute: ScalingGroupId: Scaling group id ''' return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrScalingGroupId")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="attrScalingRuleId") def attr_scaling_rule_id(self) -> ros_cdk_core.IResolvable: ''' :Attribute: ScalingRuleId: Scaling rule id ''' return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrScalingRuleId")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="attrTaskId") def attr_task_id(self) -> ros_cdk_core.IResolvable: ''' :Attribute: TaskId: Task ID. Automatically assigned by the system, the user queries the task status. ''' return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrTaskId")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="attrWorkerRamRoleName") def attr_worker_ram_role_name(self) -> ros_cdk_core.IResolvable: ''' :Attribute: WorkerRamRoleName: Worker ram role name. ''' return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrWorkerRamRoleName")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="rosProperties") def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]: return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="enableResourcePropertyConstraint") def enable_resource_property_constraint(self) -> builtins.bool: return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint")) @enable_resource_property_constraint.setter def enable_resource_property_constraint(self, value: builtins.bool) -> None: jsii.set(self, "enableResourcePropertyConstraint", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="name") def name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]: ''' :Property: name: The name of the cluster. The cluster name can use uppercase and lowercase letters, Chinese characters, numbers, and dashes. ''' return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "name")) @name.setter def name(self, value: typing.Union[builtins.str, ros_cdk_core.IResolvable]) -> None: jsii.set(self, "name", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="numOfNodes") def num_of_nodes(self) -> typing.Union[jsii.Number, ros_cdk_core.IResolvable]: ''' :Property: numOfNodes: Number of worker nodes. The range is [0,300] ''' return typing.cast(typing.Union[jsii.Number, ros_cdk_core.IResolvable], jsii.get(self, "numOfNodes")) @num_of_nodes.setter def num_of_nodes( self, value: typing.Union[jsii.Number, ros_cdk_core.IResolvable], ) -> None: jsii.set(self, "numOfNodes", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="addons") def addons( self, ) -> typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosManagedEdgeKubernetesCluster.AddonsProperty"]]]]: ''' :Property: addons: The add-ons to be installed for the cluster. ''' return typing.cast(typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosManagedEdgeKubernetesCluster.AddonsProperty"]]]], jsii.get(self, "addons")) @addons.setter def addons( self, value: typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosManagedEdgeKubernetesCluster.AddonsProperty"]]]], ) -> None: jsii.set(self, "addons", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="cloudMonitorFlags") def cloud_monitor_flags( self, ) -> typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]]: ''' :Property: cloudMonitorFlags: Whether to install the cloud monitoring plugin: true: indicates installation false: Do not install Default to false ''' return typing.cast(typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]], jsii.get(self, "cloudMonitorFlags")) @cloud_monitor_flags.setter def cloud_monitor_flags( self, value: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]], ) -> None: jsii.set(self, "cloudMonitorFlags", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="clusterSpec") def cluster_spec( self, ) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]: ''' :Property: clusterSpec: The edge managed cluster spec. Value: ack.pro.small: Professional hosting cluster, namely: "ACK Pro version cluster". ack.standard: Standard hosting cluster. Default value: ack.standard. The value can be empty. When it is empty, a standard managed cluster will be created. ''' return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "clusterSpec")) @cluster_spec.setter def cluster_spec( self, value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], ) -> None: jsii.set(self, "clusterSpec", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="containerCidr") def container_cidr( self, ) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]: ''' :Property: containerCidr: The container network segment cannot conflict with the VPC network segment. When the system is selected to automatically create a VPC, the network segment 172.16.0.0/16 is used by default. ''' return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "containerCidr")) @container_cidr.setter def container_cidr( self, value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], ) -> None: jsii.set(self, "containerCidr", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="disableRollback") def disable_rollback( self, ) -> typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]]: ''' :Property: disableRollback: Whether the failure was rolled back: true: indicates that it fails to roll back false: rollback failed The default is true. If rollback fails, resources produced during the creation process will be released. False is not recommended. ''' return typing.cast(typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]], jsii.get(self, "disableRollback")) @disable_rollback.setter def disable_rollback( self, value: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]], ) -> None: jsii.set(self, "disableRollback", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="endpointPublicAccess") def endpoint_public_access( self, ) -> typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]]: ''' :Property: endpointPublicAccess: Whether to enable the public network API Server: true: which means that the public network API Server is open. false: If set to false, the API server on the public network will not be created, only the API server on the private network will be created.Default to true. ''' return typing.cast(typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]], jsii.get(self, "endpointPublicAccess")) @endpoint_public_access.setter def endpoint_public_access( self, value: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]], ) -> None: jsii.set(self, "endpointPublicAccess", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="keyPair") def key_pair( self, ) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]: ''' :Property: keyPair: Key pair name. Specify one of KeyPair or LoginPassword. ''' return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "keyPair")) @key_pair.setter def key_pair( self, value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], ) -> None: jsii.set(self, "keyPair", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="loginPassword") def login_password( self, ) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]: ''' :Property: loginPassword: SSH login password. Password rules are 8-30 characters and contain three items (upper and lower case letters, numbers, and special symbols). Specify one of KeyPair or LoginPassword. ''' return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "loginPassword")) @login_password.setter def login_password( self, value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], ) -> None: jsii.set(self, "loginPassword", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="profile") def profile( self, ) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]: ''' :Property: profile: Edge cluster ID. The default value is Edge. ''' return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "profile")) @profile.setter def profile( self, value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], ) -> None: jsii.set(self, "profile", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="proxyMode") def proxy_mode( self, ) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]: ''' :Property: proxyMode: kube-proxy proxy mode, supports both iptables and ipvs modes. The default is iptables. ''' return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "proxyMode")) @proxy_mode.setter def proxy_mode( self, value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], ) -> None: jsii.set(self, "proxyMode", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="serviceCidr") def service_cidr( self, ) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]: ''' :Property: serviceCidr: The service network segment cannot conflict with the VPC network segment and the container network segment. When the system is selected to automatically create a VPC, the network segment 172.19.0.0/20 is used by default. ''' return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "serviceCidr")) @service_cidr.setter def service_cidr( self, value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], ) -> None: jsii.set(self, "serviceCidr", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="snatEntry") def snat_entry( self, ) -> typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]]: ''' :Property: snatEntry: Whether to configure SNAT for the network. When a VPC can access the public network environment, set it to false. When an existing VPC cannot access the public network environment: When set to True, SNAT is configured and the public network environment can be accessed at this time. If set to false, it means that SNAT is not configured and the public network environment cannot be accessed at this time. Default to true. ''' return typing.cast(typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]], jsii.get(self, "snatEntry")) @snat_entry.setter def snat_entry( self, value: typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]], ) -> None: jsii.set(self, "snatEntry", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="tags") def tags( self, )
Cabin's directory, the condensed ones are not # listed on METADATA.pb, so the check must FAIL, even if we do not # explicitely include them in the set of files to be checked: assert_results_contain(check(cabin_fonts), FAIL, 'file-not-declared', 'with some font files not declared...') def test_check_metadata_italic_style(): """ METADATA.pb font.style "italic" matches font internals ? """ from fontbakery.constants import MacStyle check = CheckTester(googlefonts_profile, "com.google.fonts/check/metadata/italic_style") # Our reference Merriweather Italic is known to good ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Italic.ttf")) assert_PASS(check(ttFont), 'with a good font...') # now let's introduce issues on the FULL_FONT_NAME entries # to test the "bad-fullfont-name" codepath: for i, name in enumerate(ttFont['name'].names): if name.nameID == NameID.FULL_FONT_NAME: backup = name.string ttFont['name'].names[i].string = "BAD VALUE".encode(name.getEncoding()) assert_results_contain(check(ttFont), FAIL, 'bad-fullfont-name', 'with a bad NameID.FULL_FONT_NAME entry...') # and restore the good value: ttFont['name'].names[i].string = backup # And, finally, let's flip off that italic bit # and get a "bad-macstyle" FAIL (so much fun!): ttFont['head'].macStyle &= ~MacStyle.ITALIC assert_results_contain(check(ttFont), FAIL, 'bad-macstyle', 'with bad macstyle bit value...') def test_check_metadata_normal_style(): """ METADATA.pb font.style "normal" matches font internals ? """ check = CheckTester(googlefonts_profile, "com.google.fonts/check/metadata/normal_style") from fontbakery.constants import MacStyle # This one is pretty similar to check/metadata/italic_style # You may want to take a quick look above... # Our reference Merriweather Regular is known to be good here. ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf")) assert_PASS(check(ttFont), 'with a good font...') # now we sadically insert brokenness into # each occurrence of the FONT_FAMILY_NAME nameid: for i, name in enumerate(ttFont['name'].names): if name.nameID == NameID.FONT_FAMILY_NAME: backup = name.string ttFont['name'].names[i].string = "Merriweather-Italic".encode(name.getEncoding()) assert_results_contain(check(ttFont), FAIL, 'familyname-italic', 'with a non-italic font that has a "-Italic" in FONT_FAMILY_NAME...') # and restore the good value: ttFont['name'].names[i].string = backup # now let's do the same with # occurrences of the FULL_FONT_NAME nameid: for i, name in enumerate(ttFont['name'].names): if name.nameID == NameID.FULL_FONT_NAME: backup = name.string ttFont['name'].names[i].string = "Merriweather-Italic".encode(name.getEncoding()) assert_results_contain(check(ttFont), FAIL, 'fullfont-italic', 'with a non-italic font that has a "-Italic" in FULL_FONT_NAME...') # and restore the good value: ttFont['name'].names[i].string = backup # And, finally, again, we flip a bit and... # # Note: This time the boolean logic is the quite opposite in comparison # to the test for com.google.fonts/check/metadata/italic_style above. # Here we have to set the bit back to 1 to get a wrongful "this font is an italic" setting: ttFont['head'].macStyle |= MacStyle.ITALIC assert_results_contain(check(ttFont), FAIL, 'bad-macstyle', 'with bad macstyle bit value...') def test_check_metadata_nameid_family_and_full_names(): """ METADATA.pb font.name and font.full_name fields match the values declared on the name table? """ check = CheckTester(googlefonts_profile, "com.google.fonts/check/metadata/nameid/family_and_full_names") # Our reference Merriweather Regular is known to be good here. ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf")) assert_PASS(check(ttFont), 'with a good font...') # There we go again! # Breaking FULL_FONT_NAME entries one by one: for i, name in enumerate(ttFont['name'].names): if name.nameID == NameID.FULL_FONT_NAME: backup = name.string ttFont['name'].names[i].string = "This is utterly wrong!".encode(name.getEncoding()) assert_results_contain(check(ttFont), FAIL, 'fullname-mismatch', 'with a METADATA.pb / FULL_FONT_NAME mismatch...') # and restore the good value: ttFont['name'].names[i].string = backup # And then we do the same with FONT_FAMILY_NAME entries: for i, name in enumerate(ttFont['name'].names): if name.nameID == NameID.FONT_FAMILY_NAME: backup = name.string ttFont['name'].names[i].string = ("I'm listening to" " The Players with Hiromasa Suzuki - Galaxy (1979)").encode(name.getEncoding()) assert_results_contain(check(ttFont), FAIL, 'familyname-mismatch', 'with a METADATA.pb / FONT_FAMILY_NAME mismatch...') # and restore the good value: ttFont['name'].names[i].string = backup def test_check_metadata_fontname_not_camel_cased(): """ METADATA.pb: Check if fontname is not camel cased. """ check = CheckTester(googlefonts_profile, "com.google.fonts/check/metadata/fontname_not_camel_cased") # Our reference Cabin Regular is known to be good font = TEST_FILE("cabin/Cabin-Regular.ttf") assert_PASS(check(font), 'with a good font...') # Then we FAIL with a CamelCased name: md = check["font_metadata"] md.name = "GollyGhost" assert_results_contain(check(font, {"font_metadata": md}), FAIL, 'camelcase', 'with a bad font name (CamelCased)...') # And we also make sure the check PASSes with a few known good names: for good_name in ["VT323", "PT Sans", "Amatic SC"]: md.name = good_name assert_PASS(check(font, {"font_metadata": md}), f'with a good font name "{good_name}"...') def test_check_metadata_match_name_familyname(): """ METADATA.pb: Check font name is the same as family name. """ check = CheckTester(googlefonts_profile, "com.google.fonts/check/metadata/match_name_familyname") # Our reference Cabin Regular is known to be good font = TEST_FILE("cabin/Cabin-Regular.ttf") assert_PASS(check(font), 'with a good font...') # Then we FAIL with mismatching names: family_md = check["family_metadata"] font_md = check["font_metadata"] family_md.name = "Some Fontname" font_md.name = "Something Else" assert_results_contain(check(font, {"family_metadata": family_md, "font_metadata": font_md}), FAIL, 'mismatch', 'with bad font/family name metadata...') def test_check_check_metadata_canonical_weight_value(): """ METADATA.pb: Check that font weight has a canonical value. """ check = CheckTester(googlefonts_profile, "com.google.fonts/check/metadata/canonical_weight_value") font = TEST_FILE("cabin/Cabin-Regular.ttf") check(font) md = check["font_metadata"] for w in [100, 200, 300, 400, 500, 600, 700, 800, 900]: md.weight = w assert_PASS(check(font, {"font_metadata": md}), f'with a good weight value ({w})...') for w in [150, 250, 350, 450, 550, 650, 750, 850]: md.weight = w assert_results_contain(check(font, {"font_metadata": md}), FAIL, 'bad-weight', 'with a bad weight value ({w})...') def test_check_metadata_os2_weightclass(): """ Checking OS/2 usWeightClass matches weight specified at METADATA.pb """ check = CheckTester(googlefonts_profile, "com.google.fonts/check/metadata/os2_weightclass") # === test cases for Variable Fonts === # Our reference Jura is known to be good ttFont = TTFont(TEST_FILE("varfont/jura/Jura[wght].ttf")) assert_PASS(check(ttFont), f'with a good metadata...') # Should report if a bad weight value is ifound though: md = check["font_metadata"] good_value = md.weight bad_value = good_value + 100 md.weight = bad_value assert_results_contain(check(ttFont, {"font_metadata": md}), FAIL, 'mismatch', f'with a bad metadata...') ttFont = TTFont(TEST_FILE("leaguegothic-vf/LeagueGothic[wdth].ttf")) assert_PASS(check(ttFont), f'with a good VF that lacks a "wght" axis....') # See: https://github.com/googlefonts/fontbakery/issues/3529 # === test cases for Static Fonts === # Our reference Montserrat family is a good 18-styles family: for fontfile in MONTSERRAT_RIBBI + MONTSERRAT_NON_RIBBI: ttFont = TTFont(fontfile) assert_PASS(check(ttFont), f'with a good font ({fontfile})...') # but should report bad weight values: md = check["font_metadata"] good_value = md.weight bad_value = good_value + 50 md.weight = bad_value assert_results_contain(check(ttFont, {"font_metadata": md}), FAIL, 'mismatch', f'with bad metadata for {fontfile}...') # If font is Thin or ExtraLight, ensure that this check can # accept both 100, 250 for Thin and 200, 275 for ExtraLight if "Thin" in fontfile: ttFont["OS/2"].usWeightClass = 100 assert_PASS(check(ttFont), f'with weightclass 100 on ({fontfile})...') ttFont["OS/2"].usWeightClass = 250 assert_PASS(check(ttFont), f'with weightclass 250 on ({fontfile})...') if "ExtraLight" in fontfile: ttFont["OS/2"].usWeightClass = 200 assert_PASS(check(ttFont), f'with weightClass 200 on ({fontfile})...') ttFont["OS/2"].usWeightClass = 275 assert_PASS(check(ttFont), f'with weightClass 275 on ({fontfile})...') def NOT_IMPLEMENTED_test_check_metadata_match_weight_postscript(): """ METADATA.pb: Metadata weight matches postScriptName. """ # check = CheckTester(googlefonts_profile, # "com.google.fonts/check/metadata/match_weight_postscript") # TODO: Implement-me! # # code-paths: # - FAIL, "METADATA.pb: Font weight value is invalid." # - FAIL, "METADATA.pb: Mismatch between postScriptName and weight value." # - PASS def NOT_IMPLEMENTED_test_check_metadata_canonical_style_names(): """ METADATA.pb: Font styles are named canonically? """ # check = CheckTester(googlefonts_profile, # "com.google.fonts/check/metadata/canonical_style_names") # TODO: Implement-me! # # code-paths: # - SKIP "Applicable only to font styles declared as 'italic' or 'normal' on METADATA.pb." # - FAIL, "italic" "Font style should be italic." # - FAIL, "normal" "Font style should be normal." # - PASS "Font styles are named canonically." def test_check_unitsperem_strict(): """ Stricter unitsPerEm criteria for Google Fonts. """ check = CheckTester(googlefonts_profile, "com.google.fonts/check/unitsperem_strict") ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf")) PASS_VALUES = [16, 32, 64, 128, 256, 512, 1024] # Good for better performance on legacy renderers PASS_VALUES.extend([500, 1000]) # or common typical values PASS_VALUES.extend([2000, 2048]) # not so common, but still ok WARN_LARGE_VALUES = [2500, 4000, 4096] # uncommon and large, # but we've seen legitimate cases such as the # Big Shoulders Family which uses 4000 since # it needs more details. # and finally the bad ones, including: FAIL_VALUES = [0, 1, 2, 4, 8, 15, 16385] # simply invalid FAIL_VALUES.extend([100, 1500, 5000]) # suboptimal (uncommon and not power of two) FAIL_VALUES.extend([8192, 16384]) # and valid ones suggested by the opentype spec, # but too large, causing undesireable filesize bloat. for pass_value in PASS_VALUES: ttFont["head"].unitsPerEm = pass_value assert_PASS(check(ttFont), f'with unitsPerEm = {pass_value}...') for warn_value in WARN_LARGE_VALUES: ttFont["head"].unitsPerEm = warn_value assert_results_contain(check(ttFont), WARN, 'large-value', f'with unitsPerEm = {warn_value}...') for fail_value in FAIL_VALUES: ttFont["head"].unitsPerEm = fail_value assert_results_contain(check(ttFont), FAIL, 'bad-value', f'with unitsPerEm = {fail_value}...') def NOT_IMPLEMENTED_test_check_version_bump(): """ Version number has increased since previous release on Google Fonts? """ # check = CheckTester(googlefonts_profile, # "com.google.fonts/check/version_bump") # TODO: Implement-me! # # code-paths: # - FAIL, "Version number is equal to version on Google Fonts." # - FAIL, "Version number is
<filename>MMLL/models/POM6/MLC/MLC.py # -*- coding: utf-8 -*- ''' Multiclass Logistic Classifier model under POM6 ''' __author__ = "<NAME>" __date__ = "Jan 2021" import numpy as np from MMLL.models.Common_to_all_POMs import Common_to_all_POMs from transitions import State from transitions.extensions import GraphMachine from pympler import asizeof #asizeof.asizeof(my_object) import pickle import dill import time class Model(): """ Multiclass Logistic Classifier model. """ def __init__(self): self.w_dict = {} self.classes = None self.is_trained = False self.supported_formats = ['pkl', 'onnx', 'pmml'] t = time.time() seed = int((t - int(t)) * 10000) np.random.seed(seed=seed) def sigm(self, x): """ Computes the sigmoid function Parameters ---------- x: float input value Returns ------- sigm(x): float """ return 1 / (1 + np.exp(-x)) def predict(self, X): """ Predicts outputs given the inputs Parameters ---------- X_b: ndarray Matrix with the input values Returns ------- prediction_values: ndarray """ X_b = np.hstack((np.ones((X.shape[0], 1)), X.astype(float))) preds_dict = {} NCLA = len(self.classes) NP = X_b.shape[0] O = [] for cla in self.classes: s = np.dot(X_b, self.w_dict[cla]).ravel() o = self.sigm(s) preds_dict.update({cla: o}) O.append(o) O = np.array(O) winners = list(np.argmax(O, axis=0)) o = [self.classes[pos] for pos in winners] return preds_dict, o def save(self, filename=None): """ Saves the trained model to file. The valid file extensions are: - "pkl": saves the model as a Python3 pickle file - "onnx": saves the model using Open Neural Network Exchange format (ONNX)' - "pmml": saves the model using Predictive Model Markup Language (PMML)' Parameters ---------- filename: string path+filename """ if filename is None: print('=' * 80) print('Model Save Error: A valid filename must be provided, otherwise nothing is saved. The valid file extensions are:') print('\t - "pkl": saves the model as a Python3 pickle file') print('\t - "onnx": saves the model using Open Neural Network Exchange format (ONNX)') print('\t - "pmml": saves the model using Predictive Model Markup Language (PMML)') print('=' * 80) else: # Checking filename extension extension = filename.split('.')[-1] if extension not in self.supported_formats: print('=' * 80) print('Model Save Error: Unsupported format. The valid file extensions are:') print('\t - "pkl": saves the model as a Python3 pickle file') print('\t - "onnx": saves the model using Open Neural Network Exchange format (ONNX)') print('\t - "pmml": saves the model using Predictive Model Markup Language (PMML)') print('=' * 80) else: if not self.is_trained: print('=' * 80) print('Model Save Error: model not trained yet, nothing to save.') print('=' * 80) else: try: if extension == 'pkl': with open(filename, 'wb') as f: pickle.dump(self, f) print('=' * 80) print('Model saved at %s in pickle format.' %filename) print('=' * 80) elif extension == 'onnx': from sklearn import linear_model from skl2onnx import convert_sklearn # conda install -c conda-forge skl2onnx from skl2onnx.common.data_types import FloatTensorType export_model = linear_model.LogisticRegression(multi_class='ovr', solver='liblinear') NI = self.w_dict[self.classes[0]][1:].ravel().shape[0] X = np.random.normal(0, 1, (100, NI)) y = np.array((self.classes * 100)[0: 100]).ravel() #y_ = np.copy(y) #for i, cla in enumerate(self.classes): # y_[y_ == cla] = i #y_ = y_.astype(int) export_model.fit(X, y) W = [] for cla in self.classes: W.append(self.w_dict[cla].ravel()) W = np.array(W) export_model.coef_ = W[:, 1:] export_model.intercept_ = W[:, 0].ravel() # Convert into ONNX format input_type = [('float_input', FloatTensorType([None, NI]))] onnx_model = convert_sklearn(export_model, initial_types=input_type) with open(filename, "wb") as f: f.write(onnx_model.SerializeToString()) print('=' * 80) print('Model saved at %s in ONNX format.' %filename) print('=' * 80) elif extension == 'pmml': from sklearn import linear_model export_model = linear_model.LogisticRegression(multi_class='ovr', solver='liblinear') NI = self.w_dict[self.classes[0]][1:].ravel().shape[0] X = np.random.normal(0, 1, (100, NI)) y = np.array((self.classes * 100)[0: 100]).ravel() #y_ = np.copy(y) #for i, cla in enumerate(self.classes): # y_[y_ == cla] = i #y_ = y_.astype(int) export_model.fit(X, y) W = [] for cla in self.classes: W.append(self.w_dict[cla].ravel()) W = np.array(W) export_model.coef_ = W[:, 1:] export_model.intercept_ = W[:, 0].ravel() from sklearn2pmml import sklearn2pmml # pip install git+https://github.com/jpmml/sklearn2pmml.git from sklearn2pmml.pipeline import PMMLPipeline pipeline = PMMLPipeline([("classifier", export_model)]) sklearn2pmml(pipeline, filename, with_repr = True) print('=' * 80) print('Model saved at %s in PMML format.' %filename) print('=' * 80) else: print('=' * 80) print('Model Save Error: model cannot be saved at %s.' %filename) print('=' * 80) except: print('=' * 80) print('Model Save Error: model cannot be saved at %s, please check the provided path/filename.' %filename) print('=' * 80) raise class MLC_Master(Common_to_all_POMs): """ This class implements the Multiclass Logistic Classifier model, run at Master node. It inherits from Common_to_all_POMs. """ def __init__(self, master_address, workers_addresses, model_type, comms, logger, verbose=False, **kwargs): """ Create a :class:`MLC_Master` instance. Parameters ---------- master_address: string address of the master node workers_addresses: list of strings list of the addresses of the workers comms: comms object instance object providing communications logger: class:`logging.Logger` logging object instance verbose: boolean indicates if messages are print or not on screen kwargs: Keyword arguments. """ super().__init__() self.pom = 6 self.model_type = model_type self.name = self.model_type + '_Master' # Name self.master_address = master_address # Convert workers_addresses -> '0', '1', + send_to dict self.broadcast_addresses = workers_addresses self.Nworkers = len(workers_addresses) # Nworkers self.workers_addresses = list(range(self.Nworkers)) self.workers_addresses = [str(x) for x in self.workers_addresses] self.all_workers_addresses = [str(x) for x in self.workers_addresses] self.send_to = {} self.receive_from = {} for k in range(self.Nworkers): self.send_to.update({str(k): workers_addresses[k]}) self.receive_from.update({workers_addresses[k]: str(k)}) self.logger = logger # logger self.comms = comms # comms lib self.state_dict = None # State of the main script self.verbose = verbose # print on screen when true self.NI = None self.model = Model() self.epsilon = 0.00000001 # to avoid log(0) self.momentum = 0 self.state_dict = {} # dictionary storing the execution state for k in range(0, self.Nworkers): self.state_dict.update({self.workers_addresses[k]: ''}) # we extract the model_parameters as extra kwargs, to be all jointly processed try: kwargs.update(kwargs['model_parameters']) del kwargs['model_parameters'] except Exception as err: pass self.process_kwargs(kwargs) self.create_FSM_master() self.FSMmaster.master_address = master_address self.message_counter = 100 # used to number the messages self.cryptonode_address = None self.newNI_dict = {} self.train_data_is_ready = False self.grady_dict = {} self.s_dict = {} self.grads_dict = {} self.Ztr_dict = {} self.NPtr_dict = {} t = time.time() seed = int((t - int(t)) * 10000) np.random.seed(seed=seed) try: if self.target_data_description['NT'] == 1: if self.target_data_description['output_type'][0]['type'] == 'cat': self.classes = self.target_data_description['output_type'][0]['values'] else: self.display('Target values must be categorical (string)') sys.exit() else: self.display('The case with more than one target is not covered yet.') sys.exit() except Exception as err: self.display('The target_data_description is not well defined, please check.', str(err)) raise def create_FSM_master(self): """ Creates a Finite State Machine to be run at the Master Node Parameters ---------- None """ self.display(self.name + ': creating FSM') states_master = [ State(name='waiting_order', on_enter=['while_waiting_order']), State(name='update_tr_data', on_enter=['while_update_tr_data']), State(name='computing_XTw', on_enter=['while_computing_XTw']), State(name='computing_oi', on_enter=['while_computing_oi']), State(name='updating_w', on_enter=['while_updating_w']), ] transitions_master = [ ['go_update_tr_data', 'waiting_order', 'update_tr_data'], ['go_waiting_order', 'update_tr_data', 'waiting_order'], ['go_computing_XTw', 'waiting_order', 'computing_XTw'], ['go_waiting_order', 'computing_XTw', 'waiting_order'], ['go_computing_oi', 'waiting_order', 'computing_oi'], ['go_waiting_order', 'computing_oi', 'waiting_order'], ['go_updating_w', 'waiting_order', 'updating_w'], ['go_waiting_order', 'updating_w', 'waiting_order'], ] class FSM_master(object): self.name = 'FSM_master' def while_waiting_order(self, MLmodel): MLmodel.display(MLmodel.name + ': WAITING for instructions...') return def while_update_tr_data(self, MLmodel): # Always all, only once try: action = 'update_tr_data' data = {} packet = {'action': action, 'to': 'MLmodel', 'data': data, 'sender': MLmodel.master_address} message_id = MLmodel.master_address+'_'+str(MLmodel.message_counter) packet.update({'message_id': message_id}) MLmodel.message_counter += 1 size_bytes = asizeof.asizeof(dill.dumps(packet)) MLmodel.display('COMMS_MASTER_BROADCAST %s, id = %s, bytes=%s' % (action, message_id, str(size_bytes)), verbose=False) MLmodel.comms.broadcast(packet) MLmodel.display(MLmodel.name + ': broadcasted update_tr_data to all Workers') except Exception as err: raise ''' message = "ERROR: %s %s" % (str(err), str(type(err))) MLmodel.display('\n ' + '='*50 + '\n' + message + '\n ' + '='*50 + '\n' ) MLmodel.display('ERROR AT while_update_tr_data') import code code.interact(local=locals()) ''' return def while_computing_XTw(self, MLmodel): try: MLmodel.display('PROC_MASTER_START', verbose=False) action = 'computing_XTw' MLmodel.ACxaxb_dict = {} xaxbP_dict = {} for cla in MLmodel.classes: MLmodel.x = MLmodel.model.w_dict[cla].T NItrain = MLmodel.x.shape[1] K = int(NItrain / 2) # Guardar tmp_dict = {} tmp_dict.update({'A': np.random.uniform(-10, 10, K).reshape((1, K))}) tmp_dict.update({'C': np.random.uniform(-10, 10, K).reshape((1, K))}) tmp_dict.update({'xa': MLmodel.x[:, 0:K]}) tmp_dict.update({'xb': MLmodel.x[:, K:]}) MLmodel.ACxaxb_dict.update({cla: tmp_dict}) # Enviar #xa_ = MLmodel.xa + MLmodel.A #xb_ = MLmodel.xb + MLmodel.C #P = MLmodel.A + MLmodel.C # warning, check the sum is nonzero (low prob...) tmp_dict = {} tmp_dict.update({'xa_': MLmodel.ACxaxb_dict[cla]['xa'] + MLmodel.ACxaxb_dict[cla]['A']}) tmp_dict.update({'xb_': MLmodel.ACxaxb_dict[cla]['xb'] + MLmodel.ACxaxb_dict[cla]['C']}) tmp_dict.update({'P': MLmodel.ACxaxb_dict[cla]['A'] + MLmodel.ACxaxb_dict[cla]['C']}) xaxbP_dict.update({cla: tmp_dict}) MLmodel.display('PROC_MASTER_END', verbose=False) # broadcasts xaxbP_dict action = 'sending_xaxbP' data = {'xaxbP_dict': xaxbP_dict, 'classes': MLmodel.classes} del xaxbP_dict packet
emcee medperiod = np.median(samples['period']) delta = (4*medperiod**2)/(2*np.pi*(np.max(x)-np.min(x))) deltap = medperiod**2/(2*np.pi*(np.max(x)-np.min(x))) rmsperiod = np.sqrt(np.mean((samples['period']-medperiod)**2)) unimodal = False if rmsperiod < delta: print('Unimodal PDF, running emcee') unimodal = True medoffset = np.median(samples['offset']) rmsoffset = np.sqrt(np.mean((samples['offset']-medoffset)**2)) # Set up the MCMC sampler ndim, nwalkers = 2, 10 delta = [rmsperiod*5,rmsoffset*5] initpar = [bestperiod, bestoffset] pos = [initpar + delta*np.random.randn(ndim) for i in range(nwalkers)] emsampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, args=args,kwargs=kwargs) steps = 100 out = emsampler.run_mcmc(pos, steps) emsamples = emsampler.chain[:, np.int(steps/2):, :].reshape((-1, ndim)) # The maximum likelihood parameters bestind = np.unravel_index(np.argmax(emsampler.lnprobability),emsampler.lnprobability.shape) pars_ml = emsampler.chain[bestind[0],bestind[1],:] labels = ['Period','Offset'] for i in range(ndim): mcmc = np.percentile(emsamples[:, i], [16, 50, 84]) q = np.diff(mcmc) print(r'%s = %.3f -%.3f/+%.3f' % (labels[i], pars_ml[i], q[0], q[1])) bestperiod = pars_ml[0] bestoffset = pars_ml[1] bestlnprob = emsampler.lnprobability[bestind[0],bestind[1]] print('Best period = %.4f' % bestperiod) print('Best offset = %.4f' % bestoffset) print('Best lnprob = %.4f' % bestlnprob) self._bestperiod = bestperiod self._bestoffset = bestoffset self._bestlnprob = bestlnprob self._unimodal = unimodal return samples, trials def plots(self,plotbase='sampler'): """ Make the plots.""" x = self._args[0] y = self._args[1] err = self._args[2] ndata = len(x) bestperiod = self._bestperiod bestoffset = self._bestoffset bestlnprob = self._bestlnprob samples = self._samples trials = self._trials # Make plots matplotlib.use('Agg') fig,ax = plt.subplots(2,1) fig.set_figheight(10) fig.set_figwidth(10) # 2D density map im,b,c,d = stats.binned_statistic_2d(trials['offset'],np.log10(trials['period']),trials['lnprob'],statistic='mean',bins=(250,250)) z1 = ax[0].imshow(im,aspect='auto',origin='lower',extent=(c[0],c[-1],b[0],b[-1])) ax[0].set_xlabel('log(Period)') ax[0].set_ylabel('Offset') plt.colorbar(z1,ax=ax[0],label='Mean ln(Prob)') # Period histogram hist,a,b = stats.binned_statistic(np.log10(trials['period']),trials['lnprob'],statistic='mean',bins=1000) ax[1].plot(a[0:-1],hist) ax[1].set_xlabel('log(Period)') ax[1].set_ylabel('Mean ln(Prob)') fig.savefig(plotbase+'_trials.png',bbox_inches='tight') plt.close(fig) print('Saving to '+plotbase+'_trials.png') # Too few samples for corner plot #sampdata = np.zeros((len(samples),3),float) #sampdata[:,0] = samples['period'] #sampdata[:,1] = samples['offset'] #sampdata[:,2] = samples['amplitude'] #samplabels = ['Period','Offset','Amplitude'] #fig = corner.corner(sampdata, labels=samplabels) #plt.savefig(plotbase+'_corner.png',bbox_inches='tight') #plt.close(fig) #print('Corner plot saved to '+plotbase+'_corner.png') # Plot offset vs. period color-coded by lnprob fig = plt.figure(figsize=(10,5)) plt.scatter(np.log10(samples['period']),samples['offset'],c=samples['lnprob']) plt.xlabel('log(Period)') plt.ylabel('Offset') plt.colorbar(label='ln(Prob)') fig.savefig(plotbase+'_samples.png',bbox_inches='tight') plt.close(fig) print('Saving to '+plotbase+'_samples.png') ## Plot best-fit model ## one panel per band, mag vs. phase #fig,ax = plt.subplots(nband,1) #fig.set_figheight(10) #fig.set_figwidth(10) #phase = (data['jd']/bestperiod + bestoffset) % 1 #tmpl = np.interp(phase,template['phase'],template['mag']) #for i,b in enumerate(uband): # ind = bandindex[b] # tphase = (np.linspace(0,1,100)+bestoffset) % 1 # si = np.argsort(tphase) # tphase = tphase[si] # tmag = np.interp(tphase,template['phase'],template['mag']) # model = tmag*ampratios[b]*bestamplitude+bestmeanmag[b] # dd = np.hstack((data['mag'][ind],model)) # yr = [np.max(dd)+0.05*dln.valrange(dd),np.min(dd)-0.05*dln.valrange(dd)] # ax[i].plot(tphase,model,c='blue',zorder=1) # ax[i].errorbar(phase[ind],data['mag'][ind],yerr=data['err'][ind],c='gray',fmt='none',zorder=2) # ax[i].scatter(phase[ind],data['mag'][ind],c='black',zorder=3) # txt = 'Band '+str(b) # if ampratios is not None: # txt += ' Amp Ratio=%.3f' % ampratios[b] # ax[i].annotate(txt,xy=(0.02,yr[1]+0.10*dln.valrange(dd)),ha='left') # ax[i].annotate('Mean Mag=%.3f' % bestmeanmag[b],xy=(0.02,yr[1]+0.20*dln.valrange(dd)),ha='left') # ax[i].set_xlabel('Phase') # ax[i].set_ylabel('Magnitude') # ax[i].set_xlim(0,1) # ax[i].set_ylim(yr) # if i==0: # ax[i].set_title('Period=%.3f Offset=%.3f Amplitude=%.3f ln(Prob)=%.3f' % (bestperiod,bestoffset,bestamplitude,bestlnprob)) #fig.savefig(plotbase+'_best.png',bbox_inches='tight') #plt.close(fig) #print('Saving to '+plotbase+'_best.png') def sampler(catalog,template,pmin=0.1,pmax=None,ampratios=None,minerror=0.02, minsample=128,npoints=200000,plotbase='sampler'): """ catalog : table Catalog of data points, just have mag, err, jd, band template : table Template information. pmin : float, optional Minimum period to search in days. Default is 0.1 days. pmax : float, optional Maximum period to search in days. Default is 2 x time baseline. ampratios : dict, optional Amplitude ratios. Keys should be the unique band names and values should be the amplitue ratios. If this is not input, then a ratio of 1.0 is used. minerror : float, optional Minimum error to use. Default is 0.02. minsample : int, optional Mininum number of samples to return. Default is 128. npoints : int, optional Number of points to use per loop. Default is 200,000. plotbase : str, optional Base name for output plots. Default is "sampler". """ # MAKE THIS MORE GENERIC, GIVE INPUT model(), lnlikelihood(), and prior() # functions. t0 = time.time() # Create the sampling for Period (pmin to pmax) and phase offset (0-1) # Internal catalog data = Table(catalog).copy() data['wt'] = 1/np.maximum(data['err'],minerror)**2 # Only keep bands with 2+ observations uband = np.unique(data['band']) badind = np.array([],int) for i,b in enumerate(uband): ind, = np.where(data['band']==b) if len(ind)<2: print('band '+str(b)+' only has '+str(len(ind))+' observations. Not using') badind = np.hstack((badind,ind)) if len(badind)>0: data.remove_rows(badind) ndata = len(data) print(str(ndata)+' data points') print('time baselines = %.2f' % (np.max(data['jd'])-np.min(data['jd']))) # Get band index uband = np.unique(data['band']) nband = len(uband) bandindex = {} for i,b in enumerate(uband): ind, = np.where(data['band']==b) bandindex[b] = ind print(str(len(uband))+' bands = ',', '.join(np.char.array(uband).astype(str))) # No amplitude ratios input if ampratios is None: ampratios = {} for b in uband: ampratios[b] = 1.0 # Period range if pmax is None: pmax = (np.max(data['jd'])-np.min(data['jd']))*2 lgminp = np.log10(pmin) lgmaxp = np.log10(pmax) print('Pmin = %.3f' % pmin) print('Pmax = %.3f' % pmax) # Pre-calculate some terms that are constant totwtdict = {} totwtydict = {} for b in uband: ind = bandindex[b] totwtdict[b] = np.sum(data['wt'][ind]) totwtydict[b] = np.sum(data['wt'][ind] * data['mag'][ind]) # Loop until we have enough samples nsamples = 0 samplelist = [] count = 0 dtt = [('period',float),('offset',float),('amplitude',float),('lnlikelihood',float),('lnprob',float)] for b in uband: dtt += [('mag'+str(b),float)] trials = None while (nsamples<minsample): # Uniformly sample from log(pmin) to log(pmax) period = np.random.rand(npoints)*(lgmaxp-lgminp)+lgminp period = 10**period # Uniformly sample from 0 to 1 offset = np.random.rand(npoints) # Get phase and template points phase = (data['jd'].reshape(-1,1)/period.reshape(1,-1) + offset.reshape(1,-1)) % 1 tmpl = np.interp(phase.ravel(),template['phase'],template['mag']) tmpl = tmpl.reshape(ndata,npoints) # -- Find best fitting values for linear parameters --- # Calculate amplitude # term1 = Sum of XY # term2 = Sum of X * Y / W # term3 = Sum of X^2 # term4 = Sum of X * X / W # amplitude = (term1 - term2)/(term3 - term4) term1,term2,term3,term4 = 0,0,0,0 totwtxdict = {} for b in uband: ind = bandindex[b] totwtx1 = np.sum(data['wt'][ind].reshape(-1,1) * tmpl[ind,:]*ampratios[b],axis=0) totwtxdict[b] = totwtx1 totwtx2 = np.sum(data['wt'][ind].reshape(-1,1) * (tmpl[ind,:]*ampratios[b])**2,axis=0) totwtxy = np.sum(data['wt'][ind].reshape(-1,1) * tmpl[ind,:]*ampratios[b] * data['mag'][ind].reshape(-1,1),axis=0) term1 += totwtxy term2 += totwtx1 * totwtydict[b] / totwtdict[b] term3 += totwtx2 term4 += totwtx1**2 / totwtdict[b] amplitude = (term1-term2)/(term3-term4) # Calculate best mean magnitudes # mean mag = (Y - amplitude * X)/W meanmag = {} for b in uband: meanmag1 = (totwtydict[b] - amplitude * totwtxdict[b])/totwtdict[b] meanmag[b] = meanmag1 # Calculate likelihood/chisq model = np.zeros((ndata,npoints),float) resid = np.zeros((ndata,npoints),float) wtresid = np.zeros((ndata,npoints),float) for b in uband: ind = bandindex[b] model1 = tmpl[ind,:]*ampratios[b]*amplitude+meanmag[b] model[ind,:] = model1 resid[ind,:] = data['mag'][ind].reshape(-1,1)-model1 wtresid[ind,:] = resid[ind,:]**2 * data['wt'][ind].reshape(-1,1) lnlikelihood = -0.5*np.sum(wtresid,axis=0) lnlikelihood += -0.5*np.sum(np.log(2*np.pi*data['err']**2)) # Calculate ln probability = ln prior + ln likelihood # use flat prior, divide by area lnprior = np.ones(npoints,float) + np.log(1/(1.0*(lgmaxp-lgminp))) lnprob = lnprior + lnlikelihood # Save the information trials1 = np.zeros(npoints,dtype=dtt) trials1['period'] = period trials1['offset'] = offset trials1['amplitude'] = amplitude for k in meanmag.keys(): trials1['mag'+str(k)] = meanmag[k] trials1['lnlikelihood'] = lnlikelihood trials1['lnprob'] = lnprob if trials is None: trials = trials1 else: trials = np.hstack((trials,trials1)) # REJECT NEGATIVE AMPLITUDES?? # Rejection sampling draw = np.random.rand(npoints) #maxprob = np.max(np.exp(lnprob)) #ind, = np.where(draw < np.exp(lnprob)/maxprob) ind, = np.where(draw < np.exp(lnprob)) if len(ind)>0: for i in ind: samp = {'period':period[i],'offset':offset[i],'amplitude':amplitude[i]} for k in meanmag.keys(): samp[k] = meanmag[k][i] samp['lnlikelihood'] = lnlikelihood[i] samp['lnprob'] = lnprob[i] samplelist.append(samp) nsamples += len(ind) print(count+1,nsamples) count += 1 # Convert sample list to table dt = [('period',float),('offset',float),('amplitude',float)] for k in meanmag.keys(): dt += [('mag'+str(k),float)] dt += [('lnlikelihood',float),('lnprob',float)] samples = np.zeros(len(samplelist),dtype=dt) for i,samp in enumerate(samplelist): samples['period'][i] = samp['period'] samples['offset'][i] = samp['offset'] samples['amplitude'][i] = samp['amplitude'] samples['lnlikelihood'][i] = samp['lnlikelihood'] samples['lnprob'][i] = samp['lnprob'] for k in meanmag.keys(): samples['mag'+str(k)][i] = samp[k] # Convert to astropy tables samples = Table(samples) trials = Table(trials) best = np.argmax(trials['lnprob']) bestperiod = trials['period'][best] bestoffset = trials['offset'][best] bestlnprob = trials['lnprob'][best] bestamplitude = trials['amplitude'][best] bestmeanmag = {} for b in uband: bestmeanmag[b] = trials['mag'+str(b)][best] print('Best period = %.4f' % bestperiod) print('Best offset = %.4f' % bestoffset) print('Best amplitude = %.4f' % bestamplitude) print('Best lnprob = %.4f' % bestlnprob) ntrials = npoints*count print('ntrials = ',ntrials) # If unimodal, run emcee medperiod = np.median(samples['period']) delta = (4*medperiod**2)/(2*np.pi*(np.max(data['jd'])-np.min(data['jd']))) deltap = medperiod**2/(2*np.pi*(np.max(data['jd'])-np.min(data['jd']))) rmsperiod = np.sqrt(np.mean((samples['period']-medperiod)**2)) if rmsperiod < delta: print('Unimodal PDF, running emcee') # Set up the MCMC sampler ndim, nwalkers = 2,
<reponame>jeffmacinnes/pyneal """ Create Mask GUI: GUI to set inputs and option for creating a mask to use during Pyneal real-time analysis. Pyneal requires that all masks be in subject functional space; this tool helps create those. All of the settings are stored in a createMaskConfig.yaml file. This GUI reads that file to obtain initial settings, and then once the user hits 'submit' the file is overwritten with new settings """ import os from os.path import join from pathlib import Path import wx import yaml import nibabel as nib pynealColor = '#B04555' class CreateMaskFrame(wx.Frame): def __init__(self, parent, title='Create mask', settingsFile=None): super(CreateMaskFrame, self).__init__(parent, title=title) self.createMaskGUI_dir = os.path.dirname(os.path.abspath(__file__)) self.pynealDir = str(Path(os.path.abspath(__file__)).resolve().parents[3]) self.MNI_standardsDir = join(self.pynealDir, 'utils/MNI_templates') self.masksDir = join(self.pynealDir, 'utils/masks') # initialize all gui panels and settings self.settingsFile = settingsFile self.InitSettings() self.InitUI() def InitSettings(self): """ Initialize values for all settings """ defaultSettings = { 'subjFunc': ['', str], 'createFuncBrainMask': [True, bool], 'transformMaskToFunc': [False, bool], 'subjAnat': ['', str], 'skullStrip': [True, bool], 'MNI_standard': [join(self.MNI_standardsDir, 'MNI152_T1_1mm_brain.nii.gz'), str], 'MNI_mask': ['', str], 'outputPrefix': ['test', str] } # initialize dictionary that will eventually hold the new settings newSettings = {} # load the settingsFile, if it exists and is not empty if os.path.isfile(self.settingsFile) and os.path.getsize(self.settingsFile) > 0: # open the file, load all settings from the file into a dict with open(self.settingsFile, 'r') as ymlFile: loadedSettings = yaml.safe_load(ymlFile) # Go through all default settings, and see if there is # a loaded setting that should overwrite the default for k in defaultSettings.keys(): # does this key exist in the loaded settings if k in loadedSettings.keys(): loadedValue = loadedSettings[k] # does the dtype of the value match what is # specifed by the default? if type(loadedValue) == defaultSettings[k][1]: newSettings[k] = loadedValue else: # throw error and quit print('Problem loading the settings file!') print('{} setting expecting dtype {}, but got {}'.format( k, defaultSettings[k][1], type(loadedValue) )) sys.exit() # if the loaded file doesn't have this setting, take the default else: newSettings[k] = defaultSettings[k][0] # if no settings file exists, use the defaults else: for k in defaultSettings.keys(): newSettings[k] = defaultSettings[k][0] # set the loaded settings dict self.GUI_settings = newSettings def InitUI(self): """ Initialize all GUI windows and widgets """ # set defaults fonts font = self.GetFont() font.SetFaceName('Helvetica') font.SetPointSize(15) self.SetFont(font) self.headerFont = wx.Font(wx.FontInfo(20).FaceName('Helvetica').Bold().AntiAliased(True)) # standard width of entry widgets self.pathEntryW = 200 # create master panel self.createMaskPanel = wx.Panel(self, -1) self.createMaskPanel.SetBackgroundColour("white") # create top level vert sizer that we'll add other sub sizers to vbox = wx.BoxSizer(wx.VERTICAL) # create sub boxes logoSizer = self.createLogoBox() funcSizer = self.createFuncBox() wholebrainMaskSizer = self.createBrainMaskBox() mniMaskSizer = self.createMniMaskBox() submitSizer = self.createSubmitBox() # add the sizers holding each box to the top level sizer vbox.Add(logoSizer, flag=wx.ALL | wx.ALIGN_CENTRE_HORIZONTAL, border=5, proportion=0) vbox.Add(funcSizer, flag=wx.EXPAND | wx.ALL, border=5, proportion=0) vbox.Add(wx.StaticLine(self.createMaskPanel, -1, size=(380, -1)), flag=wx.ALL | wx.ALIGN_CENTRE_HORIZONTAL, border=10, proportion=0) vbox.Add(wholebrainMaskSizer, flag=wx.EXPAND | wx.ALL, border=5, proportion=0) vbox.Add(wx.StaticLine(self.createMaskPanel, -1, size=(380, -1)), flag=wx.ALL | wx.ALIGN_CENTRE_HORIZONTAL, border=10, proportion=0) vbox.Add(mniMaskSizer, flag=wx.EXPAND | wx.ALL, border=5, proportion=0) vbox.Add(wx.StaticLine(self.createMaskPanel, -1, size=(380, -1)), flag=wx.ALL | wx.ALIGN_CENTRE_HORIZONTAL, border=10, proportion=0) vbox.Add(submitSizer, flag=wx.EXPAND | wx.ALL, border=10, proportion=0) # update appearance of "transform MNI mask..." options self.updateTransformMaskOptsVisibility() # set the top level sizer to control the master panel self.createMaskPanel.SetSizer(vbox) vbox.Fit(self) # center the frame on the screen self.Centre() ### (VIEW) -- subbox creation methods ------------------------------------- def createLogoBox(self): """ draw the logo box at top of GUI """ logoSizer = wx.BoxSizer(wx.HORIZONTAL) # add the logo logoBMP = wx.Bitmap(join(self.createMaskGUI_dir, 'images/createMaskLogo.bmp')) logoImg = wx.StaticBitmap(self.createMaskPanel, -1, logoBMP) # add image to sizer for this box logoSizer.Add(logoImg, flag=wx.ALIGN_CENTRE_HORIZONTAL | wx.EXPAND, proportion=0) return logoSizer def createFuncBox(self): """ draw the box for inputing the 4D func image """ funcSizer = wx.GridBagSizer(vgap=5, hgap=5) # add text, entry, and change button funcLabel = wx.StaticText(self.createMaskPanel, -1, size=(80,-1), style=wx.ALIGN_RIGHT, label='4D FUNC:') self.funcEntry = wx.TextCtrl(self.createMaskPanel, -1, size=(self.pathEntryW, -1), style=wx.TE_LEFT, value=self.GUI_settings['subjFunc']) self.funcChangeBtn = wx.Button(self.createMaskPanel, -1, size=(70,-1), label='change') self.funcChangeBtn.Bind(wx.EVT_BUTTON, self.onChangeFunc) # add widgets to sizer funcSizer.Add(funcLabel, pos=(0,0), span=(1,1), border=5, flag=wx.ALL) funcSizer.Add(self.funcEntry, pos=(0,1), span=(1,2), border=5, flag=wx.EXPAND | wx.ALL) funcSizer.Add(self.funcChangeBtn, pos=(0,3), span=(1,1), border=5, flag=wx.ALL) funcSizer.AddGrowableCol(1,1) return funcSizer def createBrainMaskBox(self): """ draw the box with checkbox for whole brain mask """ brainMaskSizer = wx.GridBagSizer(vgap=5, hgap=5) # add checkbox and text emptySpace = wx.StaticText(self.createMaskPanel, -1, label=" ", size=(100, -1)) self.brainMaskCheckBox = wx.CheckBox(self.createMaskPanel, -1, style=wx.CHK_2STATE | wx.ALIGN_RIGHT, label='Create FUNC whole-brain mask') self.brainMaskCheckBox.SetValue(self.GUI_settings['createFuncBrainMask']) self.brainMaskCheckBox.Bind(wx.EVT_CHECKBOX, self.onBrainMaskToggled) brainMaskSizer.Add(emptySpace, pos=(0,0), span=(1,1), border=5, flag=wx.EXPAND | wx.ALL) brainMaskSizer.Add(self.brainMaskCheckBox, pos=(0,1), span=(1,1), border=5, flag=wx.ALIGN_RIGHT | wx.ALL) return brainMaskSizer def createMniMaskBox(self): """ draw box with all settings for MNI mask transformation """ mniMaskSizer = wx.GridBagSizer(vgap=5, hgap=5) # transform mni mask checkbox row self.transformMaskCheckBox = wx.CheckBox(self.createMaskPanel, -1, style=wx.CHK_2STATE | wx.ALIGN_CENTER_HORIZONTAL, label='Transform MNI mask to FUNC') self.transformMaskCheckBox.SetValue(self.GUI_settings['transformMaskToFunc']) self.transformMaskCheckBox.Bind(wx.EVT_CHECKBOX, self.onTransformMaskToggled) mniMaskSizer.Add(self.transformMaskCheckBox, pos=(0,1), span=(1,1), border=5, flag=wx.ALIGN_CENTER_HORIZONTAL | wx.ALL) # hi-res anat input row self.anatLabel = wx.StaticText(self.createMaskPanel, -1, size=(100,-1), style=wx.ALIGN_RIGHT, label='hi-res ANAT:') self.anatEntry = wx.TextCtrl(self.createMaskPanel, -1, size=(self.pathEntryW, -1), style=wx.TE_LEFT, value=self.GUI_settings['subjFunc']) self.anatChangeBtn = wx.Button(self.createMaskPanel, -1, size=(70,-1), label='change') self.anatChangeBtn.Bind(wx.EVT_BUTTON, self.onChangeAnat) mniMaskSizer.Add(self.anatLabel, pos=(1,0), span=(1,1), border=5, flag=wx.ALL) mniMaskSizer.Add(self.anatEntry, pos=(1,1), span=(1,2), border=5, flag=wx.EXPAND | wx.ALL) mniMaskSizer.Add(self.anatChangeBtn, pos=(1,3), span=(1,1), border=5, flag=wx.ALL) # skull strip row self.skullStripCheckBox = wx.CheckBox(self.createMaskPanel, -1, style=wx.CHK_2STATE) self.skullStripCheckBox.SetValue(self.GUI_settings['skullStrip']) self.skullStripCheckBox.Bind(wx.EVT_CHECKBOX, self.onSkullStripToggled) self.skullStripText = wx.StaticText(self.createMaskPanel, -1, style=wx.ALIGN_RIGHT, label='Skull strip?') mniMaskSizer.Add(self.skullStripText, pos=(2,1), span=(1,1), border=5, flag=wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT | wx.TOP) mniMaskSizer.Add(self.skullStripCheckBox, pos=(2,2), span=(1,1), border=5, flag=wx.ALIGN_CENTER_VERTICAL | wx.ALL) # MNI standard row self.mniStdLabel = wx.StaticText(self.createMaskPanel, -1, size=(100,-1), style=wx.ALIGN_RIGHT, label='MNI standard:') self.mniStdEntry = wx.TextCtrl(self.createMaskPanel, -1, size=(self.pathEntryW, -1), style=wx.TE_LEFT, value=self.GUI_settings['MNI_standard']) self.mniStdChangeBtn = wx.Button(self.createMaskPanel, -1, size=(70,-1), label='change') self.mniStdChangeBtn.Bind(wx.EVT_BUTTON, self.onChangeMniStd) mniMaskSizer.Add(self.mniStdLabel, pos=(3,0), span=(1,1), border=5, flag=wx.ALL) mniMaskSizer.Add(self.mniStdEntry, pos=(3,1), span=(1,2), border=5, flag=wx.EXPAND | wx.ALL) mniMaskSizer.Add(self.mniStdChangeBtn, pos=(3,3), span=(1,1), border=5, flag=wx.ALL) # MNI mask row self.mniMaskLabel = wx.StaticText(self.createMaskPanel, -1, size=(100,-1), style=wx.ALIGN_RIGHT, label='MNI mask:') self.mniMaskEntry = wx.TextCtrl(self.createMaskPanel, -1, size=(self.pathEntryW, -1), style=wx.TE_LEFT, value=self.GUI_settings['MNI_mask']) self.mniMaskChangeBtn = wx.Button(self.createMaskPanel, -1, size=(70,-1), label='change') self.mniMaskChangeBtn.Bind(wx.EVT_BUTTON, self.onChangeMniMask) mniMaskSizer.Add(self.mniMaskLabel, pos=(4,0), span=(1,1), border=5, flag=wx.ALL) mniMaskSizer.Add(self.mniMaskEntry, pos=(4,1), span=(1,2), border=5, flag=wx.EXPAND | wx.ALL) mniMaskSizer.Add(self.mniMaskChangeBtn, pos=(4,3), span=(1,1), border=5, flag=wx.ALL) # Output prefix row self.outputPrefixLabel = wx.StaticText(self.createMaskPanel, -1, size=(100,-1), style=wx.ALIGN_RIGHT, label='Output Prefix:') self.outputPrefixEntry = wx.TextCtrl(self.createMaskPanel, -1, size=(180, -1), style=wx.TE_LEFT, value=self.GUI_settings['outputPrefix']) mniMaskSizer.Add(self.outputPrefixLabel, pos=(5,0), span=(1,1), border=5, flag=wx.ALL) mniMaskSizer.Add(self.outputPrefixEntry, pos=(5,1), span=(1,2), border=5, flag=wx.ALL) # add widgets to sizer mniMaskSizer.AddGrowableCol(1,1) return mniMaskSizer def createSubmitBox(self): """ create the submit button box """ submitSizer = wx.BoxSizer(wx.VERTICAL) # divider bmp = wx.Bitmap(join(self.createMaskGUI_dir, 'images/headerThin.bmp')) headerImg = wx.StaticBitmap(self.createMaskPanel, -1, bmp) submitSizer.Add(headerImg, proportion=0, flag=wx.ALIGN_CENTRE_HORIZONTAL | wx.TOP) btnSize = (200, 20) submitBtn = wx.Button(self.createMaskPanel, -1, label='Submit', size=btnSize) submitBtn.Bind(wx.EVT_BUTTON, self.onSubmit) submitSizer.Add(submitBtn, proportion=0, border=5, flag=wx.ALIGN_CENTRE_HORIZONTAL | wx.ALL) return submitSizer ### (CONTROL) -- Event Handling and User Interaction ---------------------- def onChangeFunc(self, e): """ open a file dialog for selecting the input 4D func file """ # get current value from GUI currentFunc = self.funcEntry.GetValue() if os.path.exists(currentFunc): startDir = os.path.split(currentFunc)[0] else: startDir = self.pynealDir wildcard = '*.gz' funcPath = self.openFileDlg(msg="Choose a 4D func nifti (.nii.gz) to use as reference", wildcard=wildcard, startDir=startDir) # update widgets if funcPath is not None: if funcPath != self.GUI_settings['subjFunc']: # set the new path self.GUI_settings['subjFunc'] = funcPath self.funcEntry.SetValue(self.GUI_settings['subjFunc']) def onBrainMaskToggled(self, e): self.GUI_settings['createFuncBrainMask'] = self.brainMaskCheckBox.GetValue() def onTransformMaskToggled(self, e): self.GUI_settings['transformMaskToFunc'] = self.transformMaskCheckBox.GetValue() # update appearance of transform mni mask options self.updateTransformMaskOptsVisibility() def onChangeAnat(self, e): """ open a file dialog for selecting the hi-res ANAT file """ # get current value from GUI currentAnat = self.anatEntry.GetValue() if os.path.exists(currentAnat): startDir = os.path.split(currentAnat)[0] else: startDir = self.pynealDir wildcard = '*.gz' anatPath = self.openFileDlg(msg="Choose hi-res ANAT (.nii.gz) for this subject", wildcard=wildcard, startDir=startDir) # update widgets if anatPath is not None: if anatPath != self.GUI_settings['subjAnat']: # set the new path self.GUI_settings['subjAnat'] = anatPath self.anatEntry.SetValue(self.GUI_settings['subjAnat']) def onSkullStripToggled(self, e): self.GUI_settings['skullStrip'] = self.skullStripCheckBox.GetValue() def onChangeMniStd(self, e): """ open a file dialog for selecting new MNI standard """ # get current value from GUI currentMniStd = self.mniStdEntry.GetValue() if os.path.exists(currentMniStd): startDir = os.path.split(currentMniStd)[0] else: startDir = self.MNI_standardsDir wildcard = '*.gz' mniStdPath = self.openFileDlg(msg="Choose the MNI standard (.nii.gz) with same dims/orientation as mask", wildcard=wildcard, startDir=startDir) # update widgets if mniStdPath is not None: if mniStdPath != self.GUI_settings['MNI_standard']: # set the new mask path self.GUI_settings['MNI_standard'] = mniStdPath self.mniStdEntry.SetValue(self.GUI_settings['MNI_standard']) def onChangeMniMask(self, e): """ open a file dialog for selecting new MNI mask """ # get current value from GUI currentMniMask = self.mniMaskEntry.GetValue() if os.path.exists(currentMniMask): startDir = os.path.split(currentMniMask)[0] else: startDir = self.masksDir wildcard = '*.gz' maskPath = self.openFileDlg(msg="Choose the MNI-space mask (.nii.gz)", wildcard=wildcard, startDir=startDir)
<reponame>ooici/pyon #!/usr/bin/env python __author__ = '<NAME> <<EMAIL>>' from mock import Mock, sentinel, patch, MagicMock from gevent.event import Event from gevent import spawn from gevent.queue import Queue import Queue as PQueue import time from nose.plugins.attrib import attr from pyon.util.unit_test import PyonTestCase from pyon.core import bootstrap from pyon.core.bootstrap import CFG from pyon.net.channel import BaseChannel, SendChannel, RecvChannel, BidirClientChannel, SubscriberChannel, ChannelClosedError, ServerChannel, ChannelError, ChannelShutdownMessage, ListenChannel, PublisherChannel from pyon.net.transport import NameTrio, BaseTransport, AMQPTransport from pyon.util.int_test import IonIntegrationTestCase @attr('UNIT') class TestBaseChannel(PyonTestCase): def test_init(self): ch = BaseChannel() self.assertIsNone(ch._close_callback) ch = BaseChannel(close_callback=sentinel.closecb) self.assertEquals(ch._close_callback, sentinel.closecb) def test_declare_exchange_point(self): # make sure no xp param results in assertion ch = BaseChannel() self.assertRaises(AssertionError, ch._declare_exchange, None) transport = Mock() ch.on_channel_open(transport) ch._declare_exchange('hello') self.assertTrue(transport.declare_exchange_impl.called) self.assertIn('hello', transport.declare_exchange_impl.call_args[0]) self.assertIn('exchange_type', transport.declare_exchange_impl.call_args[1]) self.assertIn('durable', transport.declare_exchange_impl.call_args[1]) self.assertIn('auto_delete', transport.declare_exchange_impl.call_args[1]) @patch('pyon.net.channel.log', Mock()) # to avoid having to put it in signature def test_close(self): # without close callback transport = Mock() ch = BaseChannel() ch.on_channel_open(transport) ch._fsm.current_state = ch.S_ACTIVE ch.close() transport.close.assert_called_once_with() def test_close_with_callback(self): # with close callback cbmock = Mock() ch = BaseChannel(close_callback=cbmock) ch._fsm.current_state = ch.S_ACTIVE ch.close() cbmock.assert_called_once_with(ch) def test_on_channel_open(self): ch = BaseChannel() transport = Mock() ch.on_channel_open(transport) transport.add_on_close_callback.assert_called_once_with(ch.on_channel_close) self.assertEquals(ch._transport, transport) def test_on_channel_close(self): ch = BaseChannel() ch.on_channel_open(Mock()) ch._transport.channel_number = 1 ch.on_channel_close(ch, 0, 'hi') self.assertIsNone(ch._transport) def test_on_channel_closed_with_error_callback(self): ch = BaseChannel() ch.on_channel_open(Mock()) ch._transport.channel_number = 1 closemock = Mock() ch.set_closed_error_callback(closemock) ch.on_channel_close(ch, 1, 'hi') closemock.assert_called_once_with(ch, 1, 'hi') @patch('pyon.net.channel.log') def test_on_channel_close_with_error_in_error_callback(self, logmock): ch = BaseChannel() ch.on_channel_open(Mock()) ch._transport.channel_number = 1 closemock = Mock() closemock.side_effect = StandardError ch.set_closed_error_callback(closemock) ch.on_channel_close(ch, 1, 'hi') self.assertEquals(logmock.warn.call_count, 1) def test_get_channel_id(self): ch = BaseChannel() self.assertTrue(ch.get_channel_id() is None) ch.on_channel_open(Mock()) self.assertEquals(ch.get_channel_id(), ch._transport.channel_number) def test__ensure_transport(self): ch = BaseChannel() with self.assertRaises(ChannelError): with ch._ensure_transport(): pass @attr('UNIT') class TestSendChannel(PyonTestCase): def setUp(self): self.ch = SendChannel() def test_connect(self): self.ch.connect(NameTrio('xp', 'key')) self.assertTrue(hasattr(self.ch._send_name, 'exchange')) self.assertTrue(hasattr(self.ch._send_name, 'queue')) self.assertEquals(self.ch._send_name.exchange, 'xp') self.assertEquals(self.ch._send_name.queue, 'key') self.assertEquals(self.ch._exchange, 'xp') def test_send(self): _sendmock = Mock() self.ch._send = _sendmock np = NameTrio('xp', 'key') self.ch.connect(np) self.ch.send('data', {'header':sentinel.headervalue}) _sendmock.assert_called_once_with(np, 'data', headers={'header':sentinel.headervalue}) def test__send(self): transport = Mock() transport.channel_number = sentinel.channel_number self.ch.on_channel_open(transport) # test sending in params self.ch._send(NameTrio('xp', 'namen'), 'daten') # get our props self.assertTrue(transport.publish_impl.called) self.assertIn('exchange', transport.publish_impl.call_args[1]) self.assertIn('routing_key', transport.publish_impl.call_args[1]) self.assertIn('body', transport.publish_impl.call_args[1]) self.assertIn('immediate', transport.publish_impl.call_args[1]) self.assertIn('mandatory', transport.publish_impl.call_args[1]) self.assertIn('properties', transport.publish_impl.call_args[1]) props = transport.publish_impl.call_args[1].get('properties') self.assertEquals(props, {}) # try another call to _send with a header self.ch._send(NameTrio('xp', 'namen'), 'daten', headers={'custom':'val'}) # make sure our property showed up props = transport.publish_impl.call_args[1].get('properties') self.assertIn('custom', props) self.assertEquals(props['custom'], 'val') @attr('UNIT') class TestRecvChannel(PyonTestCase): def setUp(self): self.ch = RecvChannel() def _create_channel(self): """ Test helper method, creates mocked up broker interaction. """ ch = RecvChannel() ch._declare_exchange = Mock() ch._declare_queue = Mock() ch._declare_queue.return_value = sentinel.anon_queue ch._bind = Mock() return ch def test_setup_listener(self): # sub in mocks for _declare_exchange, _declare_queue, _bind mxp = Mock() mdq = Mock() mdq.return_value = sentinel.anon_queue mb = Mock() def create_channel(): ch = RecvChannel() ch._declare_exchange = mxp ch._declare_queue = mdq ch._bind = mb return ch ch = create_channel() self.assertFalse(ch._setup_listener_called) # call setup listener, defining xp, queue, binding ch.setup_listener(NameTrio(sentinel.xp, sentinel.queue, sentinel.binding)) self.assertTrue(hasattr(ch, '_recv_name')) self.assertTrue(hasattr(ch._recv_name, 'exchange')) self.assertTrue(hasattr(ch._recv_name, 'queue')) self.assertEquals(ch._recv_name.exchange, sentinel.xp) self.assertEquals(ch._recv_name.queue, sentinel.queue) mxp.assert_called_once_with(sentinel.xp) mdq.assert_called_once_with(sentinel.queue) mb.assert_called_once_with(sentinel.binding) # you can only call setup_listener once self.assertTrue(ch._setup_listener_called) # calling it again does nothing, does not touch anything ch.setup_listener(NameTrio(sentinel.xp2, sentinel.queue2)) self.assertTrue(hasattr(ch._recv_name, 'exchange')) self.assertTrue(hasattr(ch._recv_name, 'queue')) self.assertEquals(ch._recv_name.exchange, sentinel.xp) self.assertEquals(ch._recv_name.queue, sentinel.queue) mxp.assert_called_once_with(sentinel.xp) mdq.assert_called_once_with(sentinel.queue) mb.assert_called_once_with(sentinel.binding) # call setup listener, passing a custom bind this time ch = create_channel() ch.setup_listener(NameTrio(sentinel.xp2, sentinel.queue2), binding=sentinel.binding) mxp.assert_called_with(sentinel.xp2) mdq.assert_called_with(sentinel.queue2) mb.assert_called_with(sentinel.binding) # call setup_listener, use anonymous queue name and no binding (will get return value we set above) ch = create_channel() ch.setup_listener(NameTrio(sentinel.xp3)) mxp.assert_called_with(sentinel.xp3) mdq.assert_called_with(None) mb.assert_called_with(sentinel.anon_queue) # call setup_listener with anon queue name but with binding ch = create_channel() ch.setup_listener(NameTrio(sentinel.xp4), binding=sentinel.binding2) mxp.assert_called_with(sentinel.xp4) mdq.assert_called_with(None) mb.assert_called_with(sentinel.binding2) def test_setup_listener_existing_recv_name(self): ch = self._create_channel() recv_name = NameTrio(sentinel.xp, sentinel.queue, sentinel.binding) ch._recv_name = recv_name ch.setup_listener() self.assertEquals(ch._recv_name, recv_name) def test_setup_listener_existing_recv_name_with_differing_name(self): ch = self._create_channel() recv_name = NameTrio(sentinel.xp, sentinel.queue, sentinel.binding) ch._recv_name = recv_name ch.setup_listener(name=NameTrio(sentinel.xp, sentinel.queue, sentinel.notbinding)) self.assertNotEquals(ch._recv_name, recv_name) self.assertEquals(ch._recv_name.exchange, sentinel.xp) self.assertEquals(ch._recv_name.queue, sentinel.queue) self.assertEquals(ch._recv_name.binding, sentinel.notbinding) def test__destroy_queue_no_recv_name(self): self.assertRaises(AssertionError, self.ch.destroy_listener) def test__destroy_queue(self): self.ch._recv_name = NameTrio(sentinel.xp, sentinel.queue) self.ch.on_channel_open(Mock(BaseTransport)) self.ch.destroy_listener() self.assertTrue(self.ch._transport.delete_queue_impl.called) self.assertIn('queue', self.ch._transport.delete_queue_impl.call_args[1]) self.assertIn(sentinel.queue, self.ch._transport.delete_queue_impl.call_args[1].itervalues()) def test_destroy_listener(self): m = Mock() self.ch._destroy_queue = m self.ch.destroy_listener() m.assert_called_once_with() def test__destroy_binding_no_recv_name_or_binding(self): self.assertRaises(AssertionError, self.ch._destroy_binding) def test__destroy_binding(self): self.ch._recv_name = NameTrio(sentinel.xp, sentinel.queue) self.ch._recv_binding = sentinel.binding self.ch.on_channel_open(Mock(BaseTransport)) self.ch._destroy_binding() self.assertTrue(self.ch._transport.unbind_impl.called) self.assertIn('queue', self.ch._transport.unbind_impl.call_args[1]) self.assertIn('exchange', self.ch._transport.unbind_impl.call_args[1]) self.assertIn('binding', self.ch._transport.unbind_impl.call_args[1]) self.assertIn(sentinel.queue, self.ch._transport.unbind_impl.call_args[1].itervalues()) self.assertIn(sentinel.xp, self.ch._transport.unbind_impl.call_args[1].itervalues()) self.assertIn(sentinel.binding, self.ch._transport.unbind_impl.call_args[1].itervalues()) def test_start_consume(self): transport = MagicMock() self.ch.on_channel_open(transport) self.ch._fsm.current_state = self.ch.S_ACTIVE transport.start_consume_impl.return_value = sentinel.consumer_tag # set up recv name for queue self.ch._recv_name = NameTrio(sentinel.xp, sentinel.queue) self.ch.start_consume() self.assertTrue(self.ch._consuming) self.assertEquals(self.ch._fsm.current_state, self.ch.S_ACTIVE) self.assertEquals(self.ch._consumer_tag, sentinel.consumer_tag) transport.start_consume_impl.assert_called_once_with(self.ch._on_deliver, queue=sentinel.queue, no_ack=self.ch._consumer_no_ack, exclusive=self.ch._consumer_exclusive) def test_start_consume_already_started(self): self.ch._on_start_consume = Mock() self.ch._consuming = True self.ch.start_consume() # noops due to consuming flag already on self.assertFalse(self.ch._on_start_consume.called) @patch('pyon.net.channel.log') def test_start_consume_with_consumer_tag_and_auto_delete(self, mocklog): transport = AMQPTransport(Mock()) self.ch.on_channel_open(transport) self.ch._fsm.current_state = self.ch.S_ACTIVE # set up recv name for queue self.ch._recv_name = NameTrio(sentinel.xp, sentinel.queue) self.ch._consumer_tag = sentinel.consumer_tag self.ch._queue_auto_delete = True self.ch.start_consume() self.assertTrue(mocklog.warn.called) def test_stop_consume(self): transport = MagicMock() self.ch.on_channel_open(transport) # pretend we're consuming self.ch._fsm.current_state = self.ch.S_ACTIVE self.ch._consuming = True # set a sentinel as our consumer tag self.ch._consumer_tag = sentinel.consumer_tag # now make the call self.ch.stop_consume() self.assertEquals(self.ch._fsm.current_state, self.ch.S_ACTIVE) self.assertFalse(self.ch._consuming) self.assertTrue(transport.stop_consume_impl.called) self.assertIn(sentinel.consumer_tag, transport.stop_consume_impl.call_args[0]) def test_stop_consume_havent_started(self): self.ch._on_stop_consume = Mock() self.ch.stop_consume() self.assertFalse(self.ch._on_stop_consume.called) def test_stop_consume_raises_warning_with_auto_delete(self): transport = AMQPTransport(Mock()) transport.stop_consume_impl = Mock() self.ch.on_channel_open(transport) #transport.channel_number = sentinel.channel_number self.ch._consumer_tag = sentinel.consumer_tag self.ch._recv_name = NameTrio(sentinel.ex, sentinel.queue, sentinel.binding) self.ch._fsm.current_state = self.ch.S_ACTIVE self.ch._consuming = True #self.ch._ensure_transport = MagicMock() self.ch._queue_auto_delete = True self.ch.stop_consume() self.assertTrue(self.ch._transport.stop_consume_impl.called) self.assertIn(self.ch._consumer_tag, self.ch._transport.stop_consume_impl.call_args[0]) def test_recv(self): # replace recv_queue with a mock obj rqmock = Mock(spec=RecvChannel.SizeNotifyQueue) self.ch._recv_queue = rqmock rqmock.get.return_value = sentinel.recv m = self.ch.recv() self.assertEquals(m, sentinel.recv) self.assertTrue(rqmock.get.called) def test_recv_shutdown(self): # replace recv_queue with a mock obj rqmock = Mock(spec=RecvChannel.SizeNotifyQueue) self.ch._recv_queue = rqmock rqmock.get.return_value = ChannelShutdownMessage() self.assertRaises(ChannelClosedError, self.ch.recv) @patch('pyon.net.channel.BaseChannel') @patch('pyon.net.channel.ChannelShutdownMessage') @patch('pyon.net.channel.log', Mock()) # to avoid having to put it in signature def test_close_impl(self, mockshutdown, mockbasechannel): # no auto stop consuming, no auto delete of queue without recv_name set # should have a shutdown message inserted mockrq = Mock(spec=RecvChannel.SizeNotifyQueue) self.ch._recv_queue = mockrq self.ch.close_impl() # odd test quirk: have to assert mockshutdown was called once here (by close_impl), # before i can test that put was called with it, becuase i have to call it again just # to get the return value of it. mockshutdown.assert_called_once_with() mockrq.put.assert_called_once_with(mockshutdown()) mockbasechannel.close_impl.assert_called_once_with(self.ch) def test_declare_queue(self): self.ch.on_channel_open(Mock(BaseTransport)) # needs a recv name self.ch._recv_name = (NameTrio(str(sentinel.xp))) qd = self.ch._declare_queue(str(sentinel.queue)) # can't join a sentinel self.assertTrue(self.ch._transport.declare_queue_impl.called) self.assertIn('queue', self.ch._transport.declare_queue_impl.call_args[1]) self.assertIn('auto_delete', self.ch._transport.declare_queue_impl.call_args[1]) self.assertIn('durable', self.ch._transport.declare_queue_impl.call_args[1]) composed = ".".join([str(sentinel.xp), str(sentinel.queue)]) self.assertIn(composed, self.ch._transport.declare_queue_impl.call_args[1].itervalues()) self.assertIn(self.ch.queue_auto_delete, self.ch._transport.declare_queue_impl.call_args[1].itervalues()) self.assertIn(self.ch.queue_durable, self.ch._transport.declare_queue_impl.call_args[1].itervalues()) # should have set recv_name self.assertTrue(hasattr(self.ch._recv_name, 'exchange')) self.assertTrue(hasattr(self.ch._recv_name, 'queue')) self.assertEquals(self.ch._recv_name.exchange, str(sentinel.xp)) # we passed in str() versions self.assertEquals(self.ch._recv_name.queue, self.ch._transport.declare_queue_impl()) self.assertEquals(qd, self.ch._transport.declare_queue_impl()) def test__bind_no_name(self): self.assertRaises(AssertionError, self.ch._bind, sentinel.binding) def test__bind(self): self.ch._recv_name = NameTrio(sentinel.xp, sentinel.queue) self.ch.on_channel_open(Mock()) self.ch._bind(sentinel.binding) self.assertTrue(self.ch._transport.bind_impl.called) self.assertIn('queue', self.ch._transport.bind_impl.call_args[1]) self.assertIn('exchange', self.ch._transport.bind_impl.call_args[1]) self.assertIn('binding', self.ch._transport.bind_impl.call_args[1]) self.assertIn(sentinel.queue, self.ch._transport.bind_impl.call_args[1].itervalues()) self.assertIn(sentinel.xp, self.ch._transport.bind_impl.call_args[1].itervalues()) self.assertIn(sentinel.binding, self.ch._transport.bind_impl.call_args[1].itervalues()) def test__on_deliver(self): # mock up the method frame (delivery_tag is really only one we care about) m = Mock() m.consumer_tag = sentinel.consumer_tag m.delivery_tag = sentinel.delivery_tag m.redelivered = sentinel.redelivered m.exchange = sentinel.exchange m.routing_key = sentinel.routing_key # mock up the header-frame h = Mock() h.headers = { 'this_exists': sentinel.exists } # use a mock for the recv queue rqmock = Mock(spec=RecvChannel.SizeNotifyQueue) self.ch._recv_queue = rqmock # now we can call! self.ch._on_deliver(sentinel.chan, m, h, sentinel.body) # assert the call rqmock.put.assert_called_once_with((sentinel.body, h.headers, sentinel.delivery_tag)) # assert the headers look ok self.assertIn(sentinel.exists, rqmock.put.call_args[0][0][1].itervalues()) def test_ack(self): transport = Mock() transport.channel_number = sentinel.channel_number self.ch.on_channel_open(transport) self.ch.ack(sentinel.delivery_tag) transport.ack_impl.assert_called_once_with(sentinel.delivery_tag) def test_reject(self): transport = Mock() transport.channel_number = sentinel.channel_number self.ch.on_channel_open(transport) self.ch.reject(sentinel.delivery_tag, requeue=True) transport.reject_impl.assert_called_once_with(sentinel.delivery_tag, requeue=True) def test_reset(self): self.ch.reset() self.assertEquals(self.ch._fsm.current_state, self.ch.S_INIT) def test_reset_when_consuming(self): # have to setup a lot here, can't just mock # _on_stop_consume because the FSM holds onto it transport = MagicMock() self.ch.on_channel_open(transport) # pretend we're consuming self.ch._fsm.current_state = self.ch.S_ACTIVE self.ch._consuming = True # set a sentinel as our consumer tag self.ch._consumer_tag = sentinel.consumer_tag self.ch.reset() self.assertEquals(self.ch._fsm.current_state, self.ch.S_ACTIVE) self.assertTrue(transport.stop_consume_impl.called) def test_get_stats(self): transport = Mock() self.ch.on_channel_open(transport) self.ch._recv_name = NameTrio(sentinel.ex, sentinel.queue) self.ch.get_stats() self.ch._transport.get_stats_impl.assert_called_once_with(queue=sentinel.queue) def test_purge(self): transport = Mock() self.ch.on_channel_open(transport) self.ch._recv_name = NameTrio(sentinel.ex, sentinel.queue) self.ch._purge() self.ch._transport.purge_impl.assert_called_once_with(queue=sentinel.queue) @attr('UNIT') @patch('pyon.net.channel.SendChannel') class TestPublisherChannel(PyonTestCase): # @TODO: have to do this because i'm patching the class, anything to be done? def test_verify_service(self, mocksendchannel): PyonTestCase.test_verify_service(self) def test_init(self, mocksendchannel): pubchan = PublisherChannel() def test_send_no_name(self, mocksendchannel): pubchan = PublisherChannel() self.assertRaises(AssertionError, pubchan.send, sentinel.data) def test_send(self, mocksendchannel): depmock = Mock() pubchan = PublisherChannel() pubchan._declare_exchange = depmock pubchan._send_name = NameTrio(sentinel.xp, sentinel.routing_key) pubchan.send(sentinel.data) depmock.assert_called_once_with(sentinel.xp) mocksendchannel.send.assert_called_once_with(pubchan, sentinel.data, headers=None) @attr('UNIT') @patch('pyon.net.channel.SendChannel') class TestBidirClientChannel(PyonTestCase): # @TODO: have to do this because i'm patching the class, anything to be done? def test_verify_service(self, mocksendchannel): PyonTestCase.test_verify_service(self) def setUp(self): self.ch = BidirClientChannel() def test__send_with_reply_to(self, mocksendchannel):
import script_bash import script_briteonyx from throw_out_your_templates_3_core_visitor_map import VisitorMap from structure_bash import * from structure_briteonyx import * class Script(script_briteonyx.Script): def __init__(self, content): script_briteonyx.Script.__init__(self) self._content = content def build(): return Script([ source_header(), '''# NOTE: Assumes this project has been activated using the BriteOnyx framework. # NOTE: We MUST NOT EVER 'exit' during BriteOnyx bootstrap or activation #################################################################################################### # NOTE: Uncomment the following two lines for debugging # set -o verbose # set -o xtrace # TODO: SOMEDAY: Add inverse commands to isolate debugging #################################################################################################### # Declare needed functionality to support the BriteOnyx framework # NOTE: We use the 'bo' prefix by convention for all our BriteOnyx support functions. #################################################################################################### # Functions in this section should NOT call functions from following sections boLog () { # Log the message $1 to STDERR # NOTE: Should only be called from this script # $1 = message echo -e "$1" >&2 } && export -f boLog boNodeCanonical () { # Return the canonical pathname for file system node $1 # NOTE: Must be called via command substitution, e.g.: # "$(boNodeCanonical '$1')" [[ $# -eq 1 ]] || return 100 # $1 = pathname of file system node declare Result # NOTE: This call to "readlink" is not supported on Apple Mac OS X, so deal with it... Result="$(readlink -m $1)" [[ $? -eq 0 ]] && echo "$Result" && return 0 [[ "$1" = "." ]] && echo "$PWD" && return 0 echo "$1" } && export -f boNodeCanonical boTrace () { # Trace message $1 # $1 = message [[ -n "$BO_Trace" ]] && boLog "TRACE: $1" } && export -f boTrace boTraceEntry () { # Trace the entry of execution into caller with source location name $1 and line $2 called with # argument count $3 and arguments $4 [[ $# -eq 4 ]] || return 100 # $1 = caller source location name ($FUNCNAME or $0) # $2 = caller source location line ($LINENO) # $3 = caller argument count ($#) # $4 = caller arguments ($*) boTrace "'$1:$2' called with $3 args: '$4'" } && export -f boTraceEntry boTraceValue () { # Trace value $2 described as $1 [[ $# -eq 2 ]] || return 100 # $1 = description of value # $2 = value boTrace "$1 = '$2'" } && export -f boTraceValue boTraceVariable () { # Trace environment variable $1 [[ $# -eq 1 ]] || return 100 # $1 = name of environment variable declare -r Name="$1" declare -r Value="${!Name}" boTraceValue "Variable '$Name'" "$Value" } && export -f boTraceVariable #################################################################################################### # Functions in this section should NOT call functions from following sections boDirectoryExists () { boNodeIsDirectory "$1" } && export -f boDirectoryExists boFileExists () { boNodeIsFile "$1" } && export -f boFileExists boNodeExists () { # Return whether node $1 exists [[ $# -eq 1 ]] || return 100 # $1 = node pathname [[ -e "$1" ]] } && export -f boNodeExists boNodeIsDirectory () { # Return whether node $1 is a directory [[ $# -eq 1 ]] || return 100 # $1 = node pathname [[ -d "$1" ]] } && export -f boNodeIsDirectory boNodeIsFile () { # Return whether node $1 is a file [[ $# -eq 1 ]] || return 100 # $1 = node pathname [[ -f "$1" ]] } && export -f boNodeIsFile boVariableIsMissing () { # Return whether environment variable $1 is missing (undefined or empty) [[ $# -eq 1 ]] || return 100 # $1 = name of environment variable declare -r Name="$1" declare -r Value="${!Name}" [[ -z "$Value" ]] } && export -f boVariableIsMissing #################################################################################################### # Functions in this section should NOT call functions from following sections boLogDebug () { boLog "DEBUG: $1" } && export -f boLogDebug boLogError () { boLog "ERROR: $1" } && export -f boLogError boLogFatal () { boLog "FATAL: $1" } && export -f boLogFatal boLogInfo () { boLog "INFO: $1" } && export -f boLogInfo boLogWarn () { boLog "WARN: $1" } && export -f boLogWarn #################################################################################################### # Functions in this section should NOT call functions from following sections boAbort () { boTraceEntry "$FUNCNAME" "$LINENO" $# "$*" # Abort execution due to previous command's status $3 while reporting fatal log message $5 # (including source location name $1 and line $2) and propagating outgoing status code $4 # TODO: Rename to boFail? [[ $# -eq 5 ]] || return 100 # $1 = caller source location name ($FUNCNAME or $0) # $2 = caller source location line ($LINENO) # $3 = incoming status code from previous command ($?, non-zero) # $4 = outgoing status code (repeat $? unless overriding) # $5 = message [[ "$3" -eq 0 ]] && return 100 boLogFatal "ABORT: Status $3 at '$1:$2' -> status $4: $5" return "$4" } && export -f boAbort boFailed () { boTraceEntry "$FUNCNAME" "$LINENO" $# "$*" # Log failed execution due to previous command's status $3 as reported at source location name # $1 and line $2, then propagate the failed status [[ $# -eq 3 ]] || return 100 # $1 = caller source location name ($FUNCNAME or $0) # $2 = caller source location line ($LINENO) # $3 = incoming status code from previous command ($?, non-zero) [[ "$3" -eq 0 ]] && return 100 boLogFatal "FAILED: Status $3 at '$1:$2'" return "$3" } && export -f boFailed #################################################################################################### # Functions in this section should NOT call functions from following sections boArgsRequire () { boTraceEntry "$FUNCNAME" "$LINENO" $# "$*" # Require that the actual argument count $3 equal the expected argument count $4 in the caller # with source location name $1 and line $2 [[ $# -eq 4 ]] || return 100 # $1 = caller source location name ($FUNCNAME or $0) # $2 = caller source location line ($LINENO) # $3 = actual argument count ($#) # $4 = expected argument count declare -r Msg="Expected $4 arguments but got $3!" [[ $3 -eq $4 ]] || boAbort "$1" "$2" $? 100 "$Msg" || return $? } && export -f boArgsRequire #################################################################################################### # Functions in this section should NOT call functions from following sections boDirectoryCreate () { boTraceEntry "$FUNCNAME" "$LINENO" $# "$*" # Create directory $1, if it does not already exist boArgsRequire "$FUNCNAME" "$LINENO" $# 1 || return $? # $1 = directory pathname declare Msg="Directory '$1' already exists, skipping creation." boNodeIsDirectory "$1" && boLogDebug "$Msg" && return $? Msg="Unable to create directory '$1', failed!" mkdir -p "$1" || boAbort "$FUNCNAME" "$LINENO" $? 100 "$Msg" || return $? boDirectoryRequire "$1" || boAbort "$FUNCNAME" "$LINENO" $? 100 "$Msg" || return $? } && export -f boDirectoryCreate boDirectoryRequire () { boTraceEntry "$FUNCNAME" "$LINENO" $# "$*" # Require directory $1, abort if it is missing boArgsRequire "$FUNCNAME" "$LINENO" $# 1 || return $? # $1 = pathname of required directory boNodeIsDirectory "$1" && return $? Msg="Directory '$1' is required but is missing!" boNodeExists "$1" || boAbort "$FUNCNAME" "$LINENO" $? 100 "$Msg" || return $? Msg="Directory '$1' is required but is blocked by a non-directory!" boAbort "$FUNCNAME" "$LINENO" 100 100 "$Msg" || return $? } && export -f boDirectoryRequire boFileRequire () { boTraceEntry "$FUNCNAME" "$LINENO" $# "$*" # Require that file $1 exists, abort if it is missing # TODO: Should we check other characteristics like readability or executability? boArgsRequire "$FUNCNAME" "$LINENO" $# 1 || return $? # $1 = required script file pathname declare -r Msg="File '$1' is required but missing!" boNodeIsFile "$1" || boAbort "$FUNCNAME" "$LINENO" $? 100 "$Msg" || return $? } && export -f boFileRequire boScriptRequire () { # Require that script file $1 exists, abort if it is missing # TODO: Should we check other characteristics like readability or executability? boFileRequire "$1" || boFailed "$FUNCNAME" "$LINENO" $? || return $? } && export -f boScriptRequire boVariableRequire () { boTraceEntry "$FUNCNAME" "$LINENO" $# "$*" # Require environment variable $1, abort if it is missing boArgsRequire "$FUNCNAME" "$LINENO" $# 1 || return $? # $1 = name of required environment variable declare -r Msg="Variable '$1' is required but is undefined or empty!" ! boVariableIsMissing "$1" || boAbort "$FUNCNAME" "$LINENO" $? 100 "$Msg" || return $? } && export -f boVariableRequire #################################################################################################### # Functions in this section should NOT
# ' % kmergrammar # ' % <NAME> mm2842 # ' % 15th May 2017 # ' # Introduction # ' Some of the code below is still under active development # ' ## Required libraries # + name = 'import_libraries', echo=False import os import sys import numpy as np import pandas as pd import sqlalchemy import logging import time from math import log import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from itertools import product from sklearn import metrics from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.externals import joblib # + name= 'hello_world', echo=False def hello_world(): print("Aksunai qanuipit!") def createKmerSet(kmersize): """ write all possible kmers :param kmersize: integer, 8 :return uniq_kmers: list of sorted unique kmers """ kmerSet = set() nucleotides = ["a", "c", "g", "t"] kmerall = product(nucleotides, repeat=kmersize) for i in kmerall: kmer = ''.join(i) kmerSet.add(kmer) uniq_kmers = sorted(list(kmerSet)) return uniq_kmers def compute_kmer_entropy(kmer): """ compute shannon entropy for each kmer :param kmer: string :return entropy: float """ prob = [float(kmer.count(c)) / len(kmer) for c in dict.fromkeys(list(kmer))] entropy = - sum([p * log(p) / log(2.0) for p in prob]) return round(entropy, 2) def make_stopwords(kmersize): """ write filtered out kmers :param kmersize: integer, 8 :return stopwords: list of sorted low-complexity kmers """ kmersize_filter = {5: 1.3, 6: 1.3, 7: 1.3, 8: 1.3, 9: 1.3, 10: 1.3} limit_entropy = kmersize_filter.get(kmersize) kmerSet = set() nucleotides = ["a", "c", "g", "t"] kmerall = product(nucleotides, repeat=kmersize) for n in kmerall: kmer = ''.join(n) if compute_kmer_entropy(kmer) < limit_entropy: kmerSet.add(make_newtoken(kmer)) else: continue stopwords = sorted(list(kmerSet)) return stopwords def createNewtokenSet(kmersize): """ write all possible newtokens :param kmersize: integer, 8 :return uniq_newtokens: list of sorted unique newtokens """ newtokenSet = set() uniq_kmers = createKmerSet(kmersize) for kmer in uniq_kmers: newtoken = make_newtoken(kmer) newtokenSet.add(newtoken) uniq_newtokens = sorted(list(newtokenSet)) return uniq_newtokens def make_newtoken(kmer): """ write a collapsed kmer and kmer reverse complementary as a newtoken :param kmer: string e.g., "AT" :return newtoken: string e.g., "atnta" :param kmer: string e.g., "TA" :return newtoken: string e.g., "atnta" """ kmer = str(kmer).lower() newtoken = "n".join(sorted([kmer, kmer.translate(str.maketrans('tagc', 'atcg'))[::-1]])) return newtoken def write_ngrams(sequence): """ write a bag of newtokens of size n :param sequence: string e.g., "ATCG" :param (intern) kmerlength e.g., 2 :return newtoken_string: string e.g., "atnta" "gatc" "cgcg" """ seq = str(sequence).lower() finalstart = (len(seq) - kmerlength) + 1 allkmers = [seq[start:(start + kmerlength)] for start in range(0, finalstart)] tokens = [make_newtoken(kmer) for kmer in allkmers if len(kmer) == kmerlength and "n" not in kmer] newtoken_string = " ".join(tokens) return newtoken_string def save_plot_prc(precision, recall, avg_prec, figure_file, name): """ make plot for precission recall :param precission: precission :param recall: recall :param avg_prec: avg_prec :param figure_file: figure_file :param name: name :return plot precission recall curve """ plt.clf() title = 'Precision Recall Curve - double strand ' + name plt.title(title) plt.plot(recall, precision, label='Precission = %0.2f' % avg_prec) plt.legend(loc='lower right') plt.plot([0, 1], [0, 1], 'r--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('Recall') plt.ylabel('Precision') plt.savefig(figure_file) def save_plot_roc(false_positive_rate, true_positive_rate, roc_auc, figure_file, name): """ make plot for roc_auc :param false_positive_rate: false_positive_rate :param true_positive_rate: true_positive_rate :param roc_auc: roc_auc :param figure_file: figure_file :param name: name :return roc_auc """ plt.clf() title = 'Receiver Operating Characteristic - double strand ' + name plt.title(title) plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.2f' % roc_auc) plt.legend(loc='lower right') plt.plot([0, 1], [0, 1], 'r--') plt.xlim([-0.1, 1.2]) plt.ylim([-0.1, 1.2]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.savefig(figure_file) if sys.argv[1] == "-help": print( "Usage: python kgrammar_bag-of-k-mer_training_testing.py [kmersize, integer] [mode filtered, 'True', or 'False' (i.e., mode full)] [dataset_name, string]") print("Example: python kgrammar_bag-of-k-mer_training_testing.py 8 False FEA4") quit() else: kmersize = sys.argv[1] # e.g 8 if sys.argv[2] == 'True': filtered = True full = False mode = "_mode_filtered_" elif sys.argv[2] == 'False': filtered = False full = True mode = "_mode_full_" dataset_name = sys.argv[3] # e.g "KN1" kmerlength = int(kmersize) newtoken_size = 1 + (kmerlength * 2) pathname = os.path.dirname(sys.argv[0]) WORKING_DIR = os.path.abspath(pathname) all_tokens = createNewtokenSet(kmerlength) if kmerlength > 4: stpwrds = make_stopwords(kmerlength) else: filtered = False full = True mode = "_mode_full_" print("for k < 5 only full mode is available!") expected_tokens = len(all_tokens) run_id = str(int(time.time())) file_name = WORKING_DIR + '/output/bag-of-k-mers/' + dataset_name + '/kgrammar_bag-of-k-mers_model_' + run_id + '_' + dataset_name + '_' + str( kmerlength) + '_' + mode + '.txt' logging.basicConfig(level=logging.INFO, filename=file_name, filemode="a+", format="%(asctime)-15s %(levelname)-8s %(message)s") logging.info("kmer_grammar_bag-of-k-mers RUN ID") logging.info(run_id) logging.info("WORKING_DIR") logging.info(WORKING_DIR) logging.info("input: kmerlength") logging.info(str(kmersize)) logging.info("input: dataset") logging.info(str(dataset_name)) logging.info("input: filtered") logging.info(filtered) inengine = 'sqlite:///' + WORKING_DIR + '/input_databases/' + dataset_name + '/data_model.db' dbcon = sqlalchemy.create_engine(inengine) logging.info(inengine) print('*' * 80) print("Kgrammer run id: ", run_id) print("-d %s -k %d -filtered %s" % (dataset_name, kmerlength, str(sys.argv[2]))) trainquery = "SELECT * FROM train ORDER BY RANDOM()" dftrain = pd.read_sql_query(trainquery, dbcon) dftrain.columns = ["chr_num", "left_idx", "right_idx", "dna_string", "bound"] print("training set is ready") testquery = "SELECT * FROM test ORDER BY RANDOM()" dftest = pd.read_sql_query(testquery, dbcon) dftest.columns = ["chr_num", "left_idx", "right_idx", "dna_string", "bound"] print("test set is ready") print("Collecting tokens") dftrain["tokens"] = dftrain["dna_string"].apply(write_ngrams) dftest["tokens"] = dftest["dna_string"].apply(write_ngrams) train_tokens = dftrain["tokens"].tolist() test_tokens = dftest["tokens"].tolist() print("Collecting labels") train_labels = dftrain["bound"].tolist() test_labels = dftest["bound"].tolist() unique_train_labels = len(list(set(train_labels))) unique_test_labels = len(list(set(test_labels))) # Check that labels are as many as expected for binary classification if unique_train_labels < 2 or unique_test_labels < 2: print("ERROR: Expected 2 train and test labels. Got %d train labels and %d test labels" % ( unique_train_labels, unique_test_labels)) logging.info("Unique train labels = %d" % unique_train_labels) logging.info("Unique test labels = %d" % unique_test_labels) print("log file: " + WORKING_DIR + '/' + file_name) quit() Y_DEV = np.asarray(train_labels) Y_holdout = np.asarray(test_labels) print("Building a vocabulary from tokens") tmpvectorizer = TfidfVectorizer(min_df=1, max_df=1.0, sublinear_tf=True, use_idf=True) X_TFIDF_ALL = tmpvectorizer.fit_transform(all_tokens) # newtoken sequences to numeric index. vcblry = tmpvectorizer.get_feature_names() if full: print("keeping all low-complexity k-mers") kmer_names = vcblry feature_names = np.asarray(kmer_names) # key transformation to use the fancy index into the report else: print("removing %d low-complexity k-mers" % len(stpwrds)) kmer_names = [x for x in vcblry if x not in stpwrds] feature_names = np.asarray(kmer_names) # key transformation to use the fancy index into the report # Check that tokens are as many as expected math.pow(4, kmerlength)/2 if len(kmer_names) > expected_tokens: print("ERROR: Expected %d tokens. Obtained %d tokens" % (expected_tokens, len(kmer_names))) logging.info("Expecting %d tokens" % expected_tokens) logging.info("Feature index contains %d tokens" % len(kmer_names)) logging.info("ERROR: expected %d tokens, got %d tokens" % (expected_tokens, len(kmer_names))) logging.info("ERROR: More features than expected!") print("log file: " + WORKING_DIR + '/' + file_name) quit() else: print("Expected %d tokens. Obtained %d tokens" % (expected_tokens, len(kmer_names))) logging.info("Feature index contains %d tokens" % len(kmer_names)) print("Extracting features from the training data using TfidfVectorizer") vectorizer = TfidfVectorizer(min_df=1, max_df=1.0, sublinear_tf=True, use_idf=True, vocabulary=kmer_names) # vectorizer for kmer frequencies X_TFIDF_DEV = vectorizer.fit_transform(train_tokens) print("train_samples: %d, n_features: %d" % X_TFIDF_DEV.shape) print("Positive n_labels: %d Negative n_labels: %d" % (train_labels.count(0), train_labels.count(1))) logging.info("Train dataset") logging.info("n_samples: %d, n_features: %d" % X_TFIDF_DEV.shape) logging.info("Positive n_labels: %d Negative n_labels: %d" % (test_labels.count(0), test_labels.count(1))) print("Extracting features from the holdout data using TfidfVectorizer") X_TFIDF_test = vectorizer.fit_transform(test_tokens) print("test_samples: %d, n_features: %d" % X_TFIDF_test.shape) print("Positive n_labels: %d Negative n_labels: %d" % (train_labels.count(0), train_labels.count(1))) logging.info("Test dataset") logging.info("n_samples: %d, n_features: %d" % X_TFIDF_test.shape) logging.info("Positive n_labels: %d Negative n_labels: %d" % (test_labels.count(0), test_labels.count(1))) print("Fiting a LogisticRegression (LR) model to the training set") TFIDF_LR = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False) logging.info(TFIDF_LR) TFIDF_LR.fit(X_TFIDF_DEV, Y_DEV) print("Predicting labels for holdout set") LR_hold_TFIDF_pred = TFIDF_LR.predict(X_TFIDF_test) # y_pred LR_hold_TFIDF_prob = TFIDF_LR.predict_proba(X_TFIDF_test)[:, 1] # y_score print("Evaluating model") print(metrics.classification_report(Y_holdout, LR_hold_TFIDF_pred)) # y_true, y_pred print("Accuracy") print(metrics.accuracy_score(Y_holdout, LR_hold_TFIDF_pred)) # y_true, y_pred logging.info("LR evaluation") logging.info(metrics.classification_report(Y_holdout, LR_hold_TFIDF_pred)) # y_true, y_pred logging.info("Accuracy") logging.info(metrics.accuracy_score(Y_holdout, LR_hold_TFIDF_pred)) # y_true, y_pred logging.info("ROC_AUC") logging.info(metrics.roc_auc_score(Y_holdout, LR_hold_TFIDF_prob)) # y_true, y_score fpr, tpr, thresholds = metrics.roc_curve(Y_holdout, LR_hold_TFIDF_prob, pos_label=1) roc_auc = metrics.auc(fpr, tpr) roc_figure_file = WORKING_DIR + "/output/bag-of-k-mers/" + dataset_name + "/kgrammar_bag-of-k-mer_model_roc_" + mode + dataset_name + "_" + kmersize + "_" + run_id + ".png" save_plot_roc(fpr, tpr, roc_auc, roc_figure_file, dataset_name) precision, recall, thresholds = metrics.precision_recall_curve(Y_holdout, LR_hold_TFIDF_prob, pos_label=1) avg_prc = metrics.average_precision_score(Y_holdout, LR_hold_TFIDF_prob) prc_figure_file = WORKING_DIR + "/output/bag-of-k-mers/" + dataset_name + "/kgrammar_bag-of-k-mer_model_prc" + mode + dataset_name + "_" + kmersize + "_" + run_id + ".png" save_plot_prc(precision, recall, avg_prc, prc_figure_file, dataset_name) # Export the kmer weights from the LR classifier to a sqlite3 database if hasattr(TFIDF_LR, 'coef_'): top = np.argsort(TFIDF_LR.coef_[0])[-5:] # select the top 5 index botton = np.argsort(TFIDF_LR.coef_[0])[:5] # select the bottom 5 index logging.info("database table LR_results") logging.info("top 5 positive kmers") logging.info(" ".join([i.split('n')[0].upper() for i in feature_names[top]])) logging.info(" ".join([i.split('n')[1].upper() for i in feature_names[top]])) logging.info("top 5 negative kmers") logging.info(" ".join([i.split('n')[0].upper() for i in feature_names[botton]])) logging.info(" ".join([i.split('n')[1].upper() for i in feature_names[botton]])) print("Saving data to database table LR_results") print('*' * 80) print("%s: %s" % ("pos kmers", " ".join([i.split('n')[0].upper() for i in feature_names[top]]))) print("%s: %s" % ("pos kmers", " ".join([i.split('n')[1].upper() for i in feature_names[top]]))) print() # making room print("%s: %s"
a name for this space group, returns True or False. The space group name can be in several forms: the short name, the longer PDB-style name, or the space group number. """ if name == self.short_name: return True if name == self.pdb_name: return True if name == self.point_group_name: return True if name == self.number: return True return False def iter_equivalent_positions(self, vec): """Iterate the symmetry equivalent positions of the argument vector. The vector must already be in fractional coordinates, and the symmetry equivalent vectors are also in fractional coordinates. """ for symop in self.symop_list: yield symop(vec) ## spacegroup definitions sg1 = SpaceGroup( number = 1, num_sym_equiv = 1, num_primitive_sym_equiv = 1, short_name = "P1", point_group_name = "PG1", crystal_system = "TRICLINIC", pdb_name = "P 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0)]) sg2 = SpaceGroup( number = 2, num_sym_equiv = 2, num_primitive_sym_equiv = 2, short_name = "P-1", point_group_name = "PG1bar", crystal_system = "TRICLINIC", pdb_name = "P -1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_mZ, Tr_0_0_0)]) sg3 = SpaceGroup( number = 3, num_sym_equiv = 2, num_primitive_sym_equiv = 2, short_name = "P2", point_group_name = "PG2", crystal_system = "MONOCLINIC", pdb_name = "P 1 2 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_0_0)]) sg4 = SpaceGroup( number = 4, num_sym_equiv = 2, num_primitive_sym_equiv = 2, short_name = "P21", point_group_name = "PG2", crystal_system = "MONOCLINIC", pdb_name = "P 1 21 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_12_0)]) sg5 = SpaceGroup( number = 5, num_sym_equiv = 4, num_primitive_sym_equiv = 2, short_name = "C2", point_group_name = "PG2", crystal_system = "MONOCLINIC", pdb_name = "C 1 2 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_0_0), SymOp(Rot_X_Y_Z, Tr_12_12_0), SymOp(Rot_mX_Y_mZ, Tr_12_12_0)]) sg6 = SpaceGroup( number = 6, num_sym_equiv = 2, num_primitive_sym_equiv = 2, short_name = "Pm", point_group_name = "PGm", crystal_system = "MONOCLINIC", pdb_name = "P 1 m 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_0_0_0)]) sg7 = SpaceGroup( number = 7, num_sym_equiv = 2, num_primitive_sym_equiv = 2, short_name = "Pc", point_group_name = "PGm", crystal_system = "MONOCLINIC", pdb_name = "P 1 c 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_0_0_12)]) sg8 = SpaceGroup( number = 8, num_sym_equiv = 4, num_primitive_sym_equiv = 2, short_name = "Cm", point_group_name = "PGm", crystal_system = "MONOCLINIC", pdb_name = "C 1 m 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_0_0_0), SymOp(Rot_X_Y_Z, Tr_12_12_0), SymOp(Rot_X_mY_Z, Tr_12_12_0)]) sg9 = SpaceGroup( number = 9, num_sym_equiv = 4, num_primitive_sym_equiv = 2, short_name = "Cc", point_group_name = "PGm", crystal_system = "MONOCLINIC", pdb_name = "C 1 c 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_0_0_12), SymOp(Rot_X_Y_Z, Tr_12_12_0), SymOp(Rot_X_mY_Z, Tr_12_12_12)]) sg10 = SpaceGroup( number = 10, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "P2/m", point_group_name = "PG2/m", crystal_system = "MONOCLINIC", pdb_name = "P 1 2/m 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_0_0), SymOp(Rot_mX_mY_mZ, Tr_0_0_0)]) sg11 = SpaceGroup( number = 11, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "P21/m", point_group_name = "PG2/m", crystal_system = "MONOCLINIC", pdb_name = "P 1 21/m 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_12_0), SymOp(Rot_mX_mY_mZ, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_0_12_0)]) sg12 = SpaceGroup( number = 12, num_sym_equiv = 8, num_primitive_sym_equiv = 4, short_name = "C2/m", point_group_name = "PG2/m", crystal_system = "MONOCLINIC", pdb_name = "C 1 2/m 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_0_0), SymOp(Rot_mX_mY_mZ, Tr_0_0_0), SymOp(Rot_X_Y_Z, Tr_12_12_0), SymOp(Rot_X_mY_Z, Tr_12_12_0), SymOp(Rot_mX_Y_mZ, Tr_12_12_0), SymOp(Rot_mX_mY_mZ, Tr_12_12_0)]) sg13 = SpaceGroup( number = 13, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "P2/c", point_group_name = "PG2/m", crystal_system = "MONOCLINIC", pdb_name = "P 1 2/c 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_0_12), SymOp(Rot_mX_mY_mZ, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_0_0_12)]) sg14 = SpaceGroup( number = 14, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "P21/c", point_group_name = "PG2/m", crystal_system = "MONOCLINIC", pdb_name = "P 1 21/c 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_mZ, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_12_12), SymOp(Rot_X_mY_Z, Tr_0_12_12)]) sg15 = SpaceGroup( number = 15, num_sym_equiv = 8, num_primitive_sym_equiv = 4, short_name = "C2/c", point_group_name = "PG2/m", crystal_system = "MONOCLINIC", pdb_name = "C 1 2/c 1", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_0_12), SymOp(Rot_mX_mY_mZ, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_0_0_12), SymOp(Rot_X_Y_Z, Tr_12_12_0), SymOp(Rot_mX_Y_mZ, Tr_12_12_12), SymOp(Rot_mX_mY_mZ, Tr_12_12_0), SymOp(Rot_X_mY_Z, Tr_12_12_12)]) sg16 = SpaceGroup( number = 16, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "P222", point_group_name = "PG222", crystal_system = "ORTHORHOMBIC", pdb_name = "P 2 2 2", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_0_0), SymOp(Rot_X_mY_mZ, Tr_0_0_0)]) sg17 = SpaceGroup( number = 17, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "P2221", point_group_name = "PG222", crystal_system = "ORTHORHOMBIC", pdb_name = "P 2 2 21", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_12), SymOp(Rot_mX_Y_mZ, Tr_0_0_12), SymOp(Rot_X_mY_mZ, Tr_0_0_0)]) sg18 = SpaceGroup( number = 18, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "P21212", point_group_name = "PG222", crystal_system = "ORTHORHOMBIC", pdb_name = "P 21 21 2", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_12_12_0), SymOp(Rot_X_mY_mZ, Tr_12_12_0)]) sg19 = SpaceGroup( number = 19, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "P212121", point_group_name = "PG222", crystal_system = "ORTHORHOMBIC", pdb_name = "P 21 21 21", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_12_0_12), SymOp(Rot_mX_Y_mZ, Tr_0_12_12), SymOp(Rot_X_mY_mZ, Tr_12_12_0)]) sg20 = SpaceGroup( number = 20, num_sym_equiv = 8, num_primitive_sym_equiv = 4, short_name = "C2221", point_group_name = "PG222", crystal_system = "ORTHORHOMBIC", pdb_name = "C 2 2 21", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_12), SymOp(Rot_mX_Y_mZ, Tr_0_0_12), SymOp(Rot_X_mY_mZ, Tr_0_0_0), SymOp(Rot_X_Y_Z, Tr_12_12_0), SymOp(Rot_mX_mY_Z, Tr_12_12_12), SymOp(Rot_mX_Y_mZ, Tr_12_12_12), SymOp(Rot_X_mY_mZ, Tr_12_12_0)]) sg21 = SpaceGroup( number = 21, num_sym_equiv = 8, num_primitive_sym_equiv = 4, short_name = "C222", point_group_name = "PG222", crystal_system = "ORTHORHOMBIC", pdb_name = "C 2 2 2", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_0_0), SymOp(Rot_X_mY_mZ, Tr_0_0_0), SymOp(Rot_X_Y_Z, Tr_12_12_0), SymOp(Rot_mX_mY_Z, Tr_12_12_0), SymOp(Rot_mX_Y_mZ, Tr_12_12_0), SymOp(Rot_X_mY_mZ, Tr_12_12_0)]) sg22 = SpaceGroup( number = 22, num_sym_equiv = 16, num_primitive_sym_equiv = 4, short_name = "F222", point_group_name = "PG222", crystal_system = "ORTHORHOMBIC", pdb_name = "F 2 2 2", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_0_0), SymOp(Rot_X_mY_mZ, Tr_0_0_0), SymOp(Rot_X_Y_Z, Tr_0_12_12), SymOp(Rot_mX_mY_Z, Tr_0_12_12), SymOp(Rot_mX_Y_mZ, Tr_0_12_12), SymOp(Rot_X_mY_mZ, Tr_0_12_12), SymOp(Rot_X_Y_Z, Tr_12_0_12), SymOp(Rot_mX_mY_Z, Tr_12_0_12), SymOp(Rot_mX_Y_mZ, Tr_12_0_12), SymOp(Rot_X_mY_mZ, Tr_12_0_12), SymOp(Rot_X_Y_Z, Tr_12_12_0), SymOp(Rot_mX_mY_Z, Tr_12_12_0), SymOp(Rot_mX_Y_mZ, Tr_12_12_0), SymOp(Rot_X_mY_mZ, Tr_12_12_0)]) sg23 = SpaceGroup( number = 23, num_sym_equiv = 8, num_primitive_sym_equiv = 4, short_name = "I222", point_group_name = "PG222", crystal_system = "ORTHORHOMBIC", pdb_name = "I 2 2 2", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_0), SymOp(Rot_X_mY_mZ, Tr_0_0_0), SymOp(Rot_mX_Y_mZ, Tr_0_0_0), SymOp(Rot_X_Y_Z, Tr_12_12_12), SymOp(Rot_mX_mY_Z, Tr_12_12_12), SymOp(Rot_X_mY_mZ, Tr_12_12_12), SymOp(Rot_mX_Y_mZ, Tr_12_12_12)]) sg24 = SpaceGroup( number = 24, num_sym_equiv = 8, num_primitive_sym_equiv = 4, short_name = "I212121", point_group_name = "PG222", crystal_system = "ORTHORHOMBIC", pdb_name = "I 21 21 21", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_12_0_12), SymOp(Rot_mX_Y_mZ, Tr_0_12_12), SymOp(Rot_X_mY_mZ, Tr_12_12_0), SymOp(Rot_X_Y_Z, Tr_12_12_12), SymOp(Rot_mX_mY_Z, Tr_0_12_0), SymOp(Rot_mX_Y_mZ, Tr_12_0_0), SymOp(Rot_X_mY_mZ, Tr_0_0_12)]) sg25 = SpaceGroup( number = 25, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "Pmm2", point_group_name = "PGmm2", crystal_system = "ORTHORHOMBIC", pdb_name = "P m m 2", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_0_0_0), SymOp(Rot_mX_Y_Z, Tr_0_0_0)]) sg26 = SpaceGroup( number = 26, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "Pmc21", point_group_name = "PGmm2", crystal_system = "ORTHORHOMBIC", pdb_name = "P m c 21", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_12), SymOp(Rot_X_mY_Z, Tr_0_0_12), SymOp(Rot_mX_Y_Z, Tr_0_0_0)]) sg27 = SpaceGroup( number = 27, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "Pcc2", point_group_name = "PGmm2", crystal_system = "ORTHORHOMBIC", pdb_name = "P c c 2", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_0_0_12), SymOp(Rot_mX_Y_Z, Tr_0_0_12)]) sg28 = SpaceGroup( number = 28, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "Pma2", point_group_name = "PGmm2", crystal_system = "ORTHORHOMBIC", pdb_name = "P m a 2", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_12_0_0), SymOp(Rot_mX_Y_Z, Tr_12_0_0)]) sg29 = SpaceGroup( number = 29, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "Pca21", point_group_name = "PGmm2", crystal_system = "ORTHORHOMBIC", pdb_name = "P c a 21", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_12), SymOp(Rot_X_mY_Z, Tr_12_0_0), SymOp(Rot_mX_Y_Z, Tr_12_0_12)]) sg30 = SpaceGroup( number = 30, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "Pnc2", point_group_name = "PGmm2", crystal_system = "ORTHORHOMBIC", pdb_name = "P n c 2", symop_list = [ SymOp(Rot_X_Y_Z, Tr_0_0_0), SymOp(Rot_mX_mY_Z, Tr_0_0_0), SymOp(Rot_X_mY_Z, Tr_0_12_12), SymOp(Rot_mX_Y_Z, Tr_0_12_12)]) sg31 = SpaceGroup( number = 31, num_sym_equiv = 4, num_primitive_sym_equiv = 4, short_name = "Pmn21", point_group_name = "PGmm2", crystal_system = "ORTHORHOMBIC", pdb_name = "P m n 21", symop_list = [
extent=[0, 2, 0, 1], cmap=cmap_u) figure.plot_panel_imshow(dns_target[0, :, :, 0], 0, 3, vmin=0.4, vmax=1.2, origin='upper', extent=[0, 2, 0, 1], cmap=cmap_u) figure.plot_panel_imshow(lr_target[0, :, :, 1], 1, 0, vmin=-0.1, vmax=0.1, origin='upper', extent=[0, 2, 0, 1], cmap=cmap_v) figure.plot_panel_imshow(hr_target[0, :, :, 1], 1, 1, vmin=-0.1, vmax=0.1, origin='upper', extent=[0, 2, 0, 1], cmap=cmap_v) figure.plot_panel_imshow(hr_predic[0, :, :, 1], 1, 2, vmin=-0.1, vmax=0.1, origin='upper', extent=[0, 2, 0, 1], cmap=cmap_v) figure.plot_panel_imshow(dns_target[0, :, :, 1], 1, 3, vmin=-0.1, vmax=0.1, origin='upper', extent=[0, 2, 0, 1], cmap=cmap_v) figure.set_labels([None, '$y/h$'], 0, 0, labelpad=[None, None], flip = [False, False]) figure.set_labels(['$x/h$', '$y/h$'], 1, 0, labelpad=[None, None], flip = [False, False]) figure.set_labels(['$x/h$', None], 1, 1, labelpad=[None, None], flip = [False, False]) figure.set_labels(['$x/h$', None], 1, 2, labelpad=[None, None], flip = [False, False]) figure.set_labels(['$x/h$', None], 1, 3, labelpad=[None, None], flip = [False, False]) figure.set_ticks([[], [0, 1]], 0, 0) figure.set_ticks([[0, 1, 2], [0, 1]], 1, 0) figure.set_ticks([[], []], 0, 1) figure.set_ticks([[0, 1, 2], []], 1, 1) figure.set_ticks([[], []], 0, 2) figure.set_ticks([[0, 1, 2], []], 1, 2) figure.set_ticks([[], []], 0, 3) figure.set_ticks([[0, 1, 2], []], 1, 3) figure.set_tick_params(0, 0, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(1, 0, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(0, 1, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(1, 1, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(0, 2, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(1, 2, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(0, 3, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(1, 3, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) caxu = figure.fig.add_axes([0.92, 0.55, 0.015, 0.28]) caxv = figure.fig.add_axes([0.92, 0.16, 0.015, 0.28]) figure.fig.colorbar(figure.im[0], cax=caxu, orientation='vertical', shrink=1.0, extendfrac=0, ticks=[0.4, 0.8, 1.2]) figure.fig.colorbar(figure.im[4], cax=caxv, orientation='vertical', shrink=1.0, extendfrac=0, ticks=[-0.1, 0, 0.1]) elif case == 'pinball': hr_target[np.repeat(fl_target, 2, axis=3) == 0] = np.nan """ TheArtist environment """ # Initialize class figure = TheArtist() # Initialize figure figure.generate_figure_environment(cols=4, rows=2, fig_width_pt=510, ratio=0.5, regular=True) Res = 25 xmin = -5 ymin = -4 + 4 / Res xhr = xhr / Res + xmin yhr = yhr / Res + ymin """ Define colormaps """ # Streamwise velocity cmap_u = matplotlib.cm.get_cmap("viridis").copy() # Wall-normal velocity cmap_v = matplotlib.cm.get_cmap("RdYlBu_r").copy() # Assign black to NaN values cmap_u.set_bad(color='k') cmap_v.set_bad(color='k') figure.plot_panel_imshow(lr_target[0, :, :, 0], 0, 0, vmin=-1.5, vmax=1.5, origin='upper', extent=[xhr.min(), xhr.max(), yhr.min(), yhr.max()], cmap=cmap_u) figure.plot_panel_imshow(hr_target[0, :, :, 0], 0, 1, vmin=-1.5, vmax=1.5, origin='upper', extent=[xhr.min(), xhr.max(), yhr.min(), yhr.max()], cmap=cmap_u) figure.plot_panel_imshow(hr_predic[0, :, :, 0], 0, 2, vmin=-1.5, vmax=1.5, origin='upper', extent=[xhr.min(), xhr.max(), yhr.min(), yhr.max()], cmap=cmap_u) figure.plot_panel_imshow(dns_target[0, :, :, 0], 0, 3, vmin=-1.5, vmax=1.5, origin='upper', extent=[xhr.min(), xhr.max(), yhr.min(), yhr.max()], cmap=cmap_u) figure.plot_panel_imshow(lr_target[0, :, :, 1], 1, 0, vmin=-0.7, vmax=0.7, origin='upper', extent=[xhr.min(), xhr.max(), yhr.min(), yhr.max()], cmap=cmap_v) figure.plot_panel_imshow(hr_target[0, :, :, 1], 1, 1, vmin=-0.7, vmax=0.7, origin='upper', extent=[xhr.min(), xhr.max(), yhr.min(), yhr.max()], cmap=cmap_v) figure.plot_panel_imshow(hr_predic[0, :, :, 1], 1, 2, vmin=-0.7, vmax=0.7, origin='upper', extent=[xhr.min(), xhr.max(), yhr.min(), yhr.max()], cmap=cmap_v) figure.plot_panel_imshow(dns_target[0, :, :, 1], 1, 3, vmin=-0.7, vmax=0.7, origin='upper', extent=[xhr.min(), xhr.max(), yhr.min(), yhr.max()], cmap=cmap_v) figure.set_labels([None, '$y/D$'], 0, 0, labelpad=[None, None], flip = [False, False]) figure.set_labels(['$x/D$', '$y/D$'], 1, 0, labelpad=[None, None], flip = [False, False]) figure.set_labels(['$x/D$', None], 1, 1, labelpad=[None, None], flip = [False, False]) figure.set_labels(['$x/D$', None], 1, 2, labelpad=[None, None], flip = [False, False]) figure.set_labels(['$x/D$', None], 1, 3, labelpad=[None, None], flip = [False, False]) for idx in range(4): for idy in range(2): draw_circle1 = plt.Circle((- 1.299, 0), 0.5, edgecolor=None, facecolor='gray') draw_circle2 = plt.Circle((0, 0.75), 0.5, edgecolor=None, facecolor='gray') draw_circle3 = plt.Circle((0, -0.75), 0.5, edgecolor=None, facecolor='gray') figure.axs[idy, idx].add_artist(draw_circle1) figure.axs[idy, idx].add_artist(draw_circle2) figure.axs[idy, idx].add_artist(draw_circle3) figure.set_ticks([[], [-4, 0, 4]], 0, 0) figure.set_ticks([[-5, 0, 5, 10, 15], [-4, 0, 4]], 1, 0) figure.set_ticks([[], []], 0, 1) figure.set_ticks([[-5, 0, 5, 10, 15], []], 1, 1) figure.set_ticks([[], []], 0, 2) figure.set_ticks([[-5, 0, 5, 10, 15], []], 1, 2) figure.set_ticks([[], []], 0, 3) figure.set_ticks([[-5, 0, 5, 10, 15], []], 1, 3) figure.set_tick_params(0, 0, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(1, 0, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(0, 1, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(1, 1, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(0, 2, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(1, 2, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(0, 3, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(1, 3, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) caxu = figure.fig.add_axes([0.92, 0.57, 0.015, 0.24]) caxv = figure.fig.add_axes([0.92, 0.18, 0.015, 0.24]) figure.fig.colorbar(figure.im[0], cax=caxu, orientation='vertical', shrink=1.0, extendfrac=0, ticks=[-1.5, 0, 1.5]) figure.fig.colorbar(figure.im[4], cax=caxv, orientation='vertical', shrink=1.0, extendfrac=0, ticks=[-0.7, 0, 0.7]) elif case == 'exptbl': hr_target[np.repeat(fl_target, 2, axis=3) == 0] = np.nan """ TheArtist environment """ # Initialize class figure = TheArtist() # Initialize figure figure.generate_figure_environment(cols=3, rows=2, fig_width_pt=472, ratio=1.0, regular=True) """ Define colormaps """ # Streamwise velocity cmap_u = matplotlib.cm.get_cmap("viridis").copy() # Wall-normal velocity cmap_v = matplotlib.cm.get_cmap("RdYlBu_r").copy() # Assign black to NaN values cmap_u.set_bad(color='k') cmap_v.set_bad(color='k') figure.plot_panel_imshow(lr_target[0, :, :, 0], 0, 0, vmin=0.4, vmax=1.2, origin='upper', extent=[0, 1, 0, 1], cmap=cmap_u) figure.plot_panel_imshow(hr_target[0, :, :, 0], 0, 1, vmin=0.4, vmax=1.2, origin='upper', extent=[0, 1, 0, 1], cmap=cmap_u) figure.plot_panel_imshow(hr_predic[0, :, :, 0], 0, 2, vmin=0.4, vmax=1.2, origin='upper', extent=[0, 1, 0, 1], cmap=cmap_u) figure.plot_panel_imshow(lr_target[0, :, :, 1], 1, 0, vmin=-0.1, vmax=0.1, origin='upper', extent=[0, 1, 0, 1], cmap=cmap_v) figure.plot_panel_imshow(hr_target[0, :, :, 1], 1, 1, vmin=-0.1, vmax=0.1, origin='upper', extent=[0, 1, 0, 1], cmap=cmap_v) figure.plot_panel_imshow(hr_predic[0, :, :, 1], 1, 2, vmin=-0.1, vmax=0.1, origin='upper', extent=[0, 1, 0, 1], cmap=cmap_v) figure.set_labels([None, '$y/h$'], 0, 0, labelpad=[None, None], flip = [False, False]) figure.set_labels(['$x/h$', '$y/h$'], 1, 0, labelpad=[None, None], flip = [False, False]) figure.set_labels(['$x/h$', None], 1, 1, labelpad=[None, None], flip = [False, False]) figure.set_labels(['$x/h$', None], 1, 2, labelpad=[None, None], flip = [False, False]) figure.set_ticks([[], [0, 1]], 0, 0) figure.set_ticks([[0, 1], [0, 1]], 1, 0) figure.set_ticks([[], []], 0, 1) figure.set_ticks([[0, 1], []], 1, 1) figure.set_ticks([[], []], 0, 2) figure.set_ticks([[0, 1], []], 1, 2) figure.set_tick_params(0, 0, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(1, 0, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(0, 1, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(1, 1, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(0, 2, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(1, 2, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) caxu = figure.fig.add_axes([0.92, 0.55, 0.015, 0.28]) caxv = figure.fig.add_axes([0.92, 0.16, 0.015, 0.28]) figure.fig.colorbar(figure.im[0], cax=caxu, orientation='vertical', shrink=1.0, extendfrac=0, ticks=[0.4, 0.8, 1.2]) figure.fig.colorbar(figure.im[4], cax=caxv, orientation='vertical', shrink=1.0, extendfrac=0, ticks=[-0.1, 0, 0.1]) elif case == 'sst': hr_target[fl_target == 0] = np.nan """ TheArtist environment """ # Initialize class figure = TheArtist() # Initialize figure figure.generate_figure_environment(cols=4, rows=1, fig_width_pt=472, ratio=0.6, regular=True) """ Define colormaps """ # Streamwise velocity cmap_t = matplotlib.cm.get_cmap("plasma").copy() # Assign black to NaN values cmap_t.set_bad(color='w') figure.plot_panel_imshow(lr_target[0, :, :, 0], 0, 0, vmin=0, vmax=30, origin='lower', extent=[0, 360, -90, 90], cmap=cmap_t) figure.plot_panel_imshow(hr_target[0, :, :, 0], 0, 1, vmin=0, vmax=30, origin='lower', extent=[0, 360, -90, 90], cmap=cmap_t) figure.plot_panel_imshow(hr_predic[0, :, :, 0], 0, 2, vmin=0, vmax=30, origin='lower', extent=[0, 360, -90, 90], cmap=cmap_t) figure.plot_panel_imshow(dns_target[0, :, :, 0], 0, 3, vmin=0, vmax=30, origin='lower', extent=[0, 360, -90, 90], cmap=cmap_t) figure.set_labels(['Longitude', 'Latitude'], 0, 0, labelpad=[None, None], flip = [False, False]) figure.set_labels(['Longitude', None], 0, 1, labelpad=[None, None], flip = [False, False]) figure.set_labels(['Longitude', None], 0, 2, labelpad=[None, None], flip = [False, False]) figure.set_labels(['Longitude', None], 0, 3, labelpad=[None, None], flip = [False, False]) figure.set_ticks([[0, 180, 360], [-90, 0, 90]], 0, 0) figure.set_ticks([[0, 180, 360], []], 0, 1) figure.set_ticks([[0, 180, 360], []], 0, 2) figure.set_ticks([[0, 180, 360], []], 0, 3) figure.set_tick_params(0, 0, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(0, 1, axis="both", direction="in", which="both", pad=4, bottom=True, top=False, left=True, right=False, labelbottom=True, labelleft=True, length=0) figure.set_tick_params(0, 2, axis="both", direction="in", which="both",
that a valid # NuFlavIntGroup is specified (i.e., the target NuFlavIntGroup doesn't # span multiple NuFlavIntGroups). all_keys = list(args[:-1]) new_val = deepcopy(args[-1]) tgt_grp = NuFlavIntGroup(all_keys) for (flavints, key) in self.flavints_to_keys: match = False # Identical match to existing NuFlavIntGroup if tgt_grp == flavints: all_keys[0] = key match = True if match: branch_keys = all_keys[:-1] node_key = all_keys[-1] lvl = self for k in branch_keys: lvl = dict.__getitem__(lvl, k) old_val = dict.__getitem__(lvl, node_key) dict.__setitem__(lvl, node_key, new_val) try: self.validate(self) except: dict.__setitem__(lvl, node_key, old_val) raise return # If you get this far, no match was found raise ValueError( 'Could not set data for NuFlavInt(Group) %s; valid' ' NuFlavInt(Group)s for this object are: %s' % (str(tgt_grp), '. '.join([str(nfig) for nfig in self.grouped])) ) def get(self, *args): """Get data corresponding to a NuFlavInt or NuFlavIntGroup that comprises a subset of a NuFlavIntGroup represented in this container. * If `arg` is a NuFlavInt object or a string convertible to one, the branch whose NuFlavIntGroup contains this NuFlavInt is returned * If `arg` is a NuFlavIntGroup object or a string convertible to one and if it is a subset of a NuFlavIntGroup within this object, the corresponding node is returned * Subsequent `arg`s are treated as integer or string indices in sub-structures within the NuFlavIntGroup branch * If the NuFlavInt or NuFlavIntGroup corresponding to `arg` is not a subset of a single NuFlavIntGroups in this object, an exception is raised """ #with BarSep('_'): all_keys = list(args) #print('all_keys:', all_keys) tgt_grp = NuFlavIntGroup(all_keys[0]) #print('tgt_grp0:', tgt_grp) #print('flavints_to_keys:', self.flavints_to_keys) for (flavints, key) in self.flavints_to_keys: #print('flavints:', flavints, 'type:', type(flavints)) #print('key:', key, 'type:', type(key)) match = False # Identical if tgt_grp == flavints: all_keys[0] = key match = True #print('found exact match:', tgt_grp, '==', flavints) # Requested flavints are strict subset elif not tgt_grp - flavints: all_keys[0] = key match = True #print('found subset match:', tgt_grp, 'in', flavints) logging.debug('Requesting data for subset (%s) of' ' grouping %s', str(tgt_grp), str(flavints)) # Get it if match: branch_keys = all_keys[:-1] node_key = all_keys[-1] lvl = self for k in branch_keys: lvl = dict.__getitem__(lvl, k) #print('node_key:', node_key, 'type:', type(node_key)) return deepcopy(dict.__getitem__(lvl, node_key)) # If you get this far, no match was found raise ValueError('Could not locate data for group %s' % str(tgt_grp)) def xlateGroupsStr(val): """Translate a ","-separated string into separate `NuFlavIntGroup`s. val ","-delimited list of valid NuFlavIntGroup strings, e.g.: "nuall_nc,nue,numu_cc+numubar_cc" Note that specifying NO interaction type results in both interaction types being selected, e.g. "nue" implies "nue_cc+nue_nc". For other details of how the substrings are interpreted, see docs for NuFlavIntGroup. returns: grouped, ungrouped grouped, ungrouped lists of NuFlavIntGroups; the first will have more than one flavint in each NuFlavIntGroup whereas the second will have just one flavint in each NuFlavIntGroup. Either list can be of 0-length. This function does not enforce mutual-exclusion on flavints in the various flavint groupings, but does list any flavints not grouped together in the `ungrouped` return arg. Mutual exclusion can be enforced through set operations upon return. """ # What flavints to group together grouped = [NuFlavIntGroup(s) for s in re.split('[,;]', val)] # Find any flavints not included in the above groupings all_flavints = set(ALL_NUFLAVINTS) all_grouped_flavints = set(NuFlavIntGroup(grouped)) ungrouped = [NuFlavIntGroup(k) for k in sorted(all_flavints.difference(all_grouped_flavints))] return grouped, ungrouped # pylint: disable=line-too-long def test_IntType(): """IntType unit tests""" #========================================================================== # Test IntType #========================================================================== ref = CC assert IntType('\n\t _cc \n') == ref try: IntType('numubarcc') except ValueError: pass else: raise Exception() assert IntType(1) == ref assert IntType(1.0) == ref assert IntType(CC) == ref assert IntType(NuFlavInt('numubarcc')) == ref for int_code in [1, 2]: IntType(int_code) IntType(float(int_code)) logging.info('<< PASS : test_IntType >>') # pylint: disable=line-too-long def test_NuFlav(): """NuFlav unit tests""" all_f_codes = [12, -12, 14, -14, 16, -16] #========================================================================== # Test NuFlav #========================================================================== ref = NuFlav('numu') assert ref.code == 14 assert (-ref).code == -14 assert ref.bar_code == 1 assert (-ref).bar_code == -1 assert ref.particle assert not (-ref).particle assert not ref.antiparticle assert (-ref).antiparticle #assert NuFlav('\n\t _ nu_ mu_ cc\n\t\r') == ref #assert NuFlav('numucc') == ref assert NuFlav(14) == ref assert NuFlav(14.0) == ref assert NuFlav(NuFlav('numu')) == ref assert NuFlav(NuFlavInt('numucc')) == ref assert NuFlav(NuFlavInt('numunc')) == ref for f in all_f_codes: NuFlav(f) NuFlav(float(f)) for (f, bnb) in product(['e', 'mu', 'tau'], ['', 'bar']): NuFlav('nu_' + f + '_' + bnb) logging.info('<< PASS : test_NuFlav >>') # pylint: disable=line-too-long def test_NuFlavInt(): """NuFlavInt unit tests""" all_f_codes = [12, -12, 14, -14, 16, -16] all_i_codes = [1, 2] #========================================================================== # Test NuFlavInt #========================================================================== try: NuFlavInt('numu') except ValueError: pass # Equality fi_comb = [fic for fic in product(all_f_codes, all_i_codes)] for (fi0, fi1) in product(fi_comb, fi_comb): if fi0 == fi1: assert NuFlavInt(fi0) == NuFlavInt(fi1) else: assert NuFlavInt(fi0) != NuFlavInt(fi1) assert NuFlavInt((12, 1)) != 'xyz' # Sorting: this is my desired sort order nfl0 = [NUECC, NUEBARCC, NUMUCC, NUMUBARCC, NUTAUCC, NUTAUBARCC, NUENC, NUEBARNC, NUMUNC, NUMUBARNC, NUTAUNC, NUTAUBARNC] nfl1 = deepcopy(nfl0) np.random.shuffle(nfl1) nfl_sorted = sorted(nfl1) assert all([v0 == nfl_sorted[n] for n, v0 in enumerate(nfl0)]), str(nfl_sorted) assert len(nfl0) == len(nfl_sorted) # Test NuFlavInt instantiation _ = NuFlav('nue') _ = IntType('cc') _ = IntType('nc') _ = NuFlav('nuebar') flavs = list(ALL_NUFLAVS) flavs.extend(['nue', 'numu', 'nutau', 'nu_e', 'nu e', 'Nu E', 'nuebar', 'nu e bar']) flavs.extend(all_f_codes) _ = NuFlavInt('nuebarnc') # Instantiate with combinations of flavs and int types for f, i in product(flavs, [1, 2, 'cc', 'nc', CC, NC]): ref = NuFlavInt(f, i) assert NuFlavInt((f, i)) == ref assert NuFlavInt(flav=f, int_type=i) == ref if isinstance(f, str) and isinstance(i, str): assert NuFlavInt(f+i) == ref assert NuFlavInt(f + '_' + i) == ref assert NuFlavInt(f + ' ' + i) == ref # Instantiate with already-instantiated `NuFlavInt`s assert NuFlavInt(NUECC) == NuFlavInt('nuecc') assert NuFlavInt(NUEBARNC) == NuFlavInt('nuebarnc') # test negating flavint nk = NuFlavInt('numucc') assert -nk == NuFlavInt('numubarcc') logging.info('<< PASS : test_NuFlavInt >>') # pylint: disable=line-too-long def test_NuFlavIntGroup(): """NuFlavIntGroup unit tests""" all_f_codes = [12, -12, 14, -14, 16, -16] all_i_codes = [1, 2] #========================================================================== # Test NuFlavIntGroup #========================================================================== fi_comb = [fic for fic in product(all_f_codes, all_i_codes)] nfl0 = [NuFlavInt(fic) for fic in fi_comb] nfl1 = [NuFlavInt(fic) for fic in fi_comb] nfl_sorted = sorted(nfl1) nkg0 = NuFlavIntGroup(nfl0) nkg1 = NuFlavIntGroup(nfl_sorted) assert nkg0 == nkg1 assert nkg0 != 'xyz' assert nkg0 != 'xyz' # Test inputs assert NuFlavIntGroup('nuall,nuallbar').flavs == tuple([NuFlav(c) for c in all_f_codes]), str(NuFlavIntGroup('nuall,nuallbar').flavs) # # Test NuFlavIntGroup instantiation # nue = NuFlav('nue') numu = NuFlav('numu') nue_cc = NuFlavInt('nue_cc') nue_nc = NuFlavInt('nue_nc') # Empty args NuFlavIntGroup() NuFlavIntGroup([]) # String flavor promoted to CC+NC assert set(NuFlavIntGroup('nue').flavints) == set((nue_cc, nue_nc)) # NuFlav promoted to CC+NC assert set(NuFlavIntGroup(nue).flavints) == set((nue_cc, nue_nc)) # List of single flav str same as above assert set(NuFlavIntGroup(['nue']).flavints) == set((nue_cc, nue_nc)) # List of single flav same as above assert set(NuFlavIntGroup([nue]).flavints) == set((nue_cc, nue_nc)) # Single flavint spec assert set(NuFlavIntGroup(nue_cc).flavints) == set((nue_cc,)) # Str with single flavint spec assert set(NuFlavIntGroup('nue_cc').flavints) == set((nue_cc,)) # List of single str containing single flavint spec assert set(NuFlavIntGroup(['nue_cc']).flavints) == set((nue_cc,)) # Multiple flavints as *args assert set(NuFlavIntGroup(nue_cc, nue_nc).flavints) == set((nue_cc, nue_nc)) # List of flavints assert set(NuFlavIntGroup([nue_cc, nue_nc]).flavints) == set((nue_cc, nue_nc)) # List of single str containing multiple flavints spec assert set(NuFlavIntGroup(['nue_cc,nue_nc']).flavints) == set((nue_cc, nue_nc)) # List of str containing flavints spec assert set(NuFlavIntGroup(['nue_cc', 'nue_nc']).flavints) == set((nue_cc, nue_nc)) # Another NuFlavIntGroup assert set(NuFlavIntGroup(NuFlavIntGroup(nue_cc, nue_nc)).flavints) == set((nue_cc, nue_nc)) # Addition of flavints promoted to NuFlavIntGroup assert nue_cc + nue_nc == NuFlavIntGroup(nue) # Addition of flavs promoted to NuFlavIntGroup including both CC & NC assert nue + numu == NuFlavIntGroup(nue, numu) # Test remove nkg = NuFlavIntGroup('nue_cc+numucc') nkg.remove(NuFlavInt((12, 1))) assert nkg == NuFlavIntGroup('numucc') # Test del nkg = NuFlavIntGroup('nue_cc+numucc') del nkg[0] assert nkg == NuFlavIntGroup('numucc') # Equivalent object when converting to string and back to NuFlavIntGroup from # that string for n in range(1, len(ALL_NUFLAVINTS)+1): logging.debug('NuFlavIntGroup --> str --> NuFlavIntGroup, n = %d', n) for comb in combinations(ALL_NUFLAVINTS, n): ref = NuFlavIntGroup(comb) assert ref == NuFlavIntGroup(str(ref)) # Ordering desired_order = [ NuFlavIntGroup(s) for s in [ 'nuecc', 'nuebarcc', 'numucc', 'numubarcc', 'nutaucc', 'nutaubarcc', 'nuallnc', 'nuallbarnc' ] ] groups = flavintGroupsFromString('nuallnc, nuallbarnc') assert groups == desired_order,
import copy import warnings import re import numpy as np import pandas as pd import matplotlib as mpl from matplotlib import pyplot as plt import fitgrid # enforce some common structure for summary dataframes # scraped out of different fit objects. # _TIME is a place holder and replaced by the grid.time value on the fly INDEX_NAMES = ['_TIME', 'model', 'beta', 'key'] # each model, beta combination has all these values, # some are per-beta, some are per-model KEY_LABELS = [ '2.5_ci', '97.5_ci', 'AIC', 'DF', 'Estimate', 'P-val', 'SE', 'SSresid', 'T-stat', 'has_warning', 'logLike', 'sigma2', ] # special treatment for per-model values ... broadcast to all params PER_MODEL_KEY_LABELS = ['AIC', 'SSresid', 'has_warning', 'logLike', 'sigma2'] def summarize( epochs_fg, modeler, LHS, RHS, parallel=True, n_cores=4, **kwargs ): """Fit the data with one or more model formulas and return summary information. Convenience wrapper, useful for keeping memory use manageable when gathering betas and fit measures for a stack of models. Parameters ---------- epochs_fg : fitgrid.epochs.Epochs as returned by `fitgrid.epochs_from_dataframe()` or `fitgrid.from_hdf()`, *NOT* a `pandas.DataFrame`. modeler : {'lm', 'lmer'} class of model to fit, `lm` for OLS, `lmer` for linear mixed-effects. Note: the RHS formula language must match the modeler. LHS : list of str the data columns to model RHS : model formula or list of model formulas to fit see the Python package `patsy` docs for `lm` formula langauge and the R library `lme4` docs for the `lmer` formula langauge. parallel : bool n_cores : int number of cores to use. See what works, but golden rule if running on a shared machine. **kwargs : key=value arguments passed to the modeler, optional Returns ------- summary_df : `pandas.DataFrame` indexed by `timestamp`, `model_formula`, `beta`, and `key`, where the keys are `ll.l_ci`, `uu.u_ci`, `AIC`, `DF`, `Estimate`, `P-val`, `SE`, `T-stat`, `has_warning`, `logLike`. Examples -------- >>> lm_formulas = [ '1 + fixed_a + fixed_b + fixed_a:fixed_b', '1 + fixed_a + fixed_b', '1 + fixed_a, '1 + fixed_b, '1', ] >>> lm_summary_df = fitgrid.utils.summarize( epochs_fg, 'lm', LHS=['MiPf', 'MiCe', 'MiPa', 'MiOc'], RHS=lmer_formulas, parallel=True, n_cores=4 ) >>> lmer_formulas = [ '1 + fixed_a + (1 + fixed_a | random_a) + (1 | random_b)', '1 + fixed_a + (1 | random_a) + (1 | random_b)', '1 + fixed_a + (1 | random_a)', ] >>> lmer_summary_df = fitgrid.utils.summarize( epochs_fg, 'lmer', LHS=['MiPf', 'MiCe', 'MiPa', 'MiOc'], RHS=lmer_formulas, parallel=True, n_cores=12, REML=False ) """ FutureWarning('fitgrid summaries are in early days, subject to change') # modicum of guarding msg = None if isinstance(epochs_fg, pd.DataFrame): msg = ( "Convert dataframe to fitgrid epochs with " "fitgrid.epochs_from_dataframe()" ) elif not isinstance(epochs_fg, fitgrid.epochs.Epochs): msg = f"epochs_fg must be a fitgrid.Epochs not {type(epochs_fg)}" if msg is not None: raise TypeError(msg) # select modler if modeler == 'lm': _modeler = fitgrid.lm _scraper = _lm_get_summaries_df elif modeler == 'lmer': _modeler = fitgrid.lmer _scraper = _lmer_get_summaries_df else: raise ValueError("modeler must be 'lm' or 'lmer'") # promote RHS scalar str to singleton list RHS = np.atleast_1d(RHS).tolist() # loop through model formulas fitting and scraping summaries summaries = [] for _rhs in RHS: summaries.append( _scraper( _modeler( epochs_fg, LHS=LHS, RHS=_rhs, parallel=parallel, n_cores=n_cores, **kwargs, ) ) ) summary_df = pd.concat(summaries) _check_summary_df(summary_df, epochs_fg) return summary_df # ------------------------------------------------------------ # private-ish summary helpers for scraping summary info from fits # ------------------------------------------------------------ def _check_summary_df(summary_df, fg_obj): # check the fg_obj.time has propagated to the summary and the # rest of the index is OK. fg_obj can be fitgrid.Epochs, # LMGrid or LMERGrid, they all have a time attribute assert any( [ isinstance(fg_obj, fgtype) for fgtype in [ fitgrid.epochs.Epochs, fitgrid.fitgrid.LMFitGrid, fitgrid.fitgrid.LMERFitGrid, ] ] ) if not ( summary_df.index.names == [fg_obj.time] + INDEX_NAMES[1:] and all(summary_df.index.levels[-1] == KEY_LABELS) ): raise ValueError( "uh oh ... fitgrid summary dataframe bug, please post an issue" ) def _update_INDEX_NAMES(lxgrid, index_names): """use the grid time column name for the summary index""" assert index_names[0] == '_TIME' _index_names = copy.copy(index_names) _index_names[0] = lxgrid.time return _index_names def _lm_get_summaries_df(fg_ols, ci_alpha=0.05): """scrape fitgrid.LMFitgrid OLS info into a tidy dataframe Parameters ---------- fg_ols : fitgrid.LMFitGrid ci_alpha : float {.05} alpha for confidence interval Returns ------- summaries_df : pd.DataFrame index.names = [`_TIME`, `model`, `beta`, `key`] where `_TIME` is the `fg_ols.time` and columns are the `fg_ols` columns Notes ----- The `summaries_df` row and column indexes are munged to match fitgrid.lmer._get_summaries_df() """ # set time column from the grid, always index.names[0] _index_names = _update_INDEX_NAMES(fg_ols, INDEX_NAMES) _time = _index_names[0] # grab and tidy the formula RHS rhs = fg_ols.tester.model.formula.split('~')[1].strip() rhs = re.sub(r"\s+", " ", rhs) # fitgrid returns them in the last column of the index param_names = fg_ols.params.index.get_level_values(-1).unique() # fetch a master copy of the model info model_vals = [] model_key_attrs = [ ("DF", "df_resid"), ("AIC", "aic"), ("logLike", 'llf'), ("SSresid", 'ssr'), ("sigma2", 'mse_resid'), ] for (key, attr) in model_key_attrs: vals = None vals = getattr(fg_ols, attr).copy() if vals is None: raise AttributeError(f"model: {rhs} attribute: {attr}") vals['key'] = key model_vals.append(vals) # statsmodels result wrappers have different versions of llf! aics = (-2 * fg_ols.llf) + 2 * (fg_ols.df_model + fg_ols.k_constant) if not np.allclose(fg_ols.aic, aics): msg = ( "uh oh ...statsmodels OLS aic and llf calculations have changed." " please report an issue to fitgrid" ) raise ValueError(msg) # build model has_warnings with False for ols warnings = pd.DataFrame( np.zeros(model_vals[0].shape).astype('bool'), columns=model_vals[0].columns, index=model_vals[0].index, ) warnings['key'] = 'has_warning' model_vals.append(warnings) model_vals = pd.concat(model_vals) # constants across the model model_vals['model'] = rhs # replicate the model info for each beta # ... horribly redundant but mighty handy when slicing later pmvs = [] for p in param_names: pmv = model_vals.copy() # pmv['param'] = p pmv['beta'] = p pmvs.append(pmv) pmvs = ( pd.concat(pmvs).reset_index().set_index(_index_names) ) # INDEX_NAMES) # lookup the param_name specifc info for this bundle summaries = [] # select model point estimates mapped like so (key, OLS_attribute) sv_attrs = [ ('Estimate', 'params'), # coefficient value ('SE', 'bse'), ('P-val', 'pvalues'), ('T-stat', 'tvalues'), ] for idx, (key, attr) in enumerate(sv_attrs): attr_vals = getattr(fg_ols, attr).copy() # ! don't mod the _grid if attr_vals is None: raise AttributeError(f"not found: {attr}") attr_vals.index.set_names('beta', level=-1, inplace=True) attr_vals['model'] = rhs attr_vals['key'] = key # update list of beta bundles summaries.append( attr_vals.reset_index().set_index(_index_names) ) # INDEX_NAMES)) # special handling for confidence interval ci_bounds = [ f"{bound:.1f}_ci" for bound in [100 * (1 + (b * (1 - ci_alpha))) / 2.0 for b in [-1, 1]] ] cis = fg_ols.conf_int(alpha=ci_alpha) cis.index = cis.index.rename([_time, 'beta', 'key']) cis.index = cis.index.set_levels(ci_bounds, 'key') cis['model'] = rhs summaries.append(cis.reset_index().set_index(_index_names)) summaries_df = pd.concat(summaries) # add the parmeter model info summaries_df = pd.concat([summaries_df, pmvs]).sort_index().astype(float) _check_summary_df(summaries_df, fg_ols) return summaries_df def _lmer_get_summaries_df(fg_lmer): """scrape a single model fitgrid.LMERFitGrid into a standard summary format Note: some values are fitgrid attributes (via pymer), others are derived Parameters ---------- fg_lmer : fitgrid.LMERFitGrid """ def scrape_sigma2(fg_lmer): # sigma2 is extracted from fg_lmer.ranef_var ... # residuals should be in the last row of ranef_var at each Time ranef_var = fg_lmer.ranef_var # set the None index names assert ranef_var.index.names == [fg_lmer.time, None, None] ranef_var.index.set_names([fg_lmer.time, 'key', 'value'], inplace=True) assert 'Residual' == ranef_var.index.get_level_values(1).unique()[-1] assert all( ['Name', 'Var', 'Std'] == ranef_var.index.get_level_values(2).unique() ) # slice out the Residual Variance at each time point # and drop all but the Time indexes to make Time x Chan sigma2 = ranef_var.query( 'key=="Residual" and value=="Var"' ).reset_index(['key', 'value'], drop=True) return sigma2 # set time column from the grid, always index.names[0] _index_names = _update_INDEX_NAMES(fg_lmer, INDEX_NAMES) _time = _index_names[0] # look these up directly pymer_attribs = ['AIC', 'has_warning', 'logLike'] # x=lmer_fg caclulate or extract from other attributes derived_attribs = { # since pymer4 0.7.1 the Lmer model.resid are renamed # model.residuals and come back as a well-behaved # dataframe of floats rather than rpy2 objects "SSresid": lambda lmer: lmer.residuals.apply(lambda x: x ** 2) .groupby([fg_lmer.time]) .sum(), 'sigma2': lambda x: scrape_sigma2(x), } # grab and tidy the formulat RHS from the first grid cell rhs = fg_lmer.tester.formula.split('~')[1].strip() rhs = re.sub(r"\s+", "", rhs) # coef estimates and stats ... these are 2-D summaries_df = fg_lmer.coefs.copy() # don't mod the original summaries_df.index.names = [_time, 'beta', 'key'] summaries_df = summaries_df.query("key != 'Sig'") # drop the stars summaries_df.index = summaries_df.index.remove_unused_levels() summaries_df.insert(0, 'model', rhs) summaries_df.set_index('model', append=True, inplace=True) summaries_df.reset_index(['key', 'beta'], inplace=True)
import logging from cobra.core.dictlist import DictList from cobrakbase.core.kbaseobject import KBaseObjectBase, KBaseObject from cobrakbase.core.utils import seed_coefficients_to_string from cobrakbase.core.utils import get_str, get_int, get_id_from_ref logger = logging.getLogger(__name__) class KBaseFBASolution(KBaseObjectBase): def get_reaction_variable_by_id(self, rxn_id): for v in self.dara['FBAReactionVariables']: v_id = v['modelreaction_ref'].split('/')[-1] if rxn_id == v_id: return v return None @property def objective_value(self): return self.data['objectiveValue'] class KBaseFBAModelCompartment(KBaseObjectBase): pass class KBaseFBAModelMetabolite(KBaseObjectBase): @property def formula(self): return get_str('formula', None, self.data) @property def charge(self): return get_int('charge', 0, self.data) @property def compartment(self): return self.data['modelcompartment_ref'].split('/')[-1] @property def annotation(self): annotation = {} if 'dblinks' in self.data: for db in self.data['dblinks']: if db == "BiGG2": annotation["bigg.metabolite"] = self.data['dblinks'][db][0] if db == "LigandCompound": annotation["kegg.compound"] = self.data['dblinks'][db][0] if db == "ChEBI": annotation["chebi"] = self.data['dblinks'][db][0] if db == "MetaNetX": annotation["metanetx.chemical"] = self.data['dblinks'][db][0] if db == "MetaCyc": annotation["biocyc"] = self.data['dblinks'][db][0] if db == "ModelSeed": annotation["seed.compound"] = self.data['dblinks'][db][0] if db == "HMDB": annotation["hmdb"] = self.data['dblinks'][db][0] return annotation def get_seed_id(self): return self.data['compound_ref'].split('/')[-1] def get_reference(self, database): if 'dblinks' in self.data and database in self.data['dblinks']: if not len(self.data['dblinks'][database]) == 1: print('bad!', self.data['dblinks'][database]) return self.data['dblinks'][database][0] return None def get_references(self, database): if 'dblinks' in self.data and database in self.data['dblinks']: if not len(self.data['dblinks'][database]) == 0: print('bad!', self.data['dblinks'][database]) return self.data['dblinks'][database][0] return None def get_original_id(self): if 'string_attributes' in self.data and 'original_id' in self.data['string_attributes']: return self.data['string_attributes']['original_id'] return None def __str__(self): return self.id class KBaseFBAModelReaction: def __init__(self, json=None, dblinks={}, name="reaction", id="rxn00000", model=None): self.model = model if not json == None: self.data = json else: self.data = { 'aliases': [], 'dblinks': dblinks, 'direction': direction, 'edits': {}, 'gapfill_data': {}, 'id': id, 'maxforflux': maxforflux, 'maxrevflux': maxrevflux, 'modelReactionProteins': [ {'complex_ref': '~/template/complexes/name/cpx00700', 'modelReactionProteinSubunits': [ {'feature_refs': ['~/genome/features/id/b3177'], 'note': '', 'optionalSubunit': 0, 'role': 'Dihydropteroate synthase (EC 2.5.1.15)', 'triggering': 1}], 'note': '', 'source': ''}], 'modelReactionReagents': modelReactionReagents, 'modelcompartment_ref': '~/modelcompartments/id/c0', 'name': name, 'numerical_attributes': {}, 'probability': 0, 'protons': 0, 'reaction_ref': '~/template/reactions/id/' + reaction.id, 'string_attributes': {} } @property def bounds(self): bounds = self.get_bounds() return (bounds[0], bounds[1]) @property def compartments(self): return set(map(lambda m : m.compartment, self.metabolites)) @property def metabolites(self): metabolites = {} stoichiometry = self.stoichiometry if not self.model == None: found = set() for cpd_id in stoichiometry: if self.model.metabolites.has_id(cpd_id): metabolite = self.model.metabolites.get_by_id(cpd_id) metabolites[metabolite] = stoichiometry[metabolite.id] found.add(metabolite.id) #for metabolite in self.model.metabolites: # if metabolite.id in stoichiometry.keys(): # found.add(metabolite.id) # metabolites[metabolite] = stoichiometry[metabolite.id] missing = set(stoichiometry) - found if len(missing) > 0: logger.warning('undeclated metabolites: %s', missing) else: logger.warning('model not assigned') return metabolites @property def gene_reaction_rule(self): gpr = self.get_gpr() return self.get_gpr_string(gpr) @property def direction(self): lb, ub = self.get_reaction_constraints() if lb < 0 and ub > 0: return '=' elif lb == 0 and ub > 0: return '>' elif lb < 0 and ub == 0: return '<' return '?' @property def annotation(self): annotation = {} if 'dblinks' in self.data: for db in self.data['dblinks']: if db == "BiGG": annotation["bigg.reaction"] = self.data['dblinks'][db][0] if db == "LigandReaction": annotation["kegg.reaction"] = self.data['dblinks'][db][0] if db == "MetaCyc": annotation["biocyc"] = self.data['dblinks'][db][0] if db == "ModelSeedReaction": annotation["seed.reaction"] = self.data['dblinks'][db][0] return annotation def get_reaction_constraints_from_direction(self): if 'direction' in self.data: if self.data['direction'] == '>': return 0, 1000 elif self.data['direction'] == '<': return -1000, 0 else: return -1000, 1000 return None, None def get_reaction_constraints(self): #clean this function ! if 'maxrevflux' in self.data and 'maxforflux' in self.data: if self.data['maxrevflux'] == 1000000 and self.data['maxforflux'] == 1000000 and 'direction' in self.data: return self.get_reaction_constraints_from_direction() return -1 * self.data['maxrevflux'], self.data['maxforflux'] return -1000, 1000 def get_reference(self, database): if 'dblinks' in self.data and database in self.data['dblinks']: if not len(self.data['dblinks'][database]) == 1: print('bad!', self.data['dblinks'][database]) return self.data['dblinks'][database][0] return None def get_references(self, database): if 'dblinks' in self.data and database in self.data['dblinks']: if not len(self.data['dblinks'][database]) == 0: print('bad!', self.data['dblinks'][database]) return self.data['dblinks'][database][0] return None def get_original_id(self): if 'string_attributes' in self.data and 'original_id' in self.data['string_attributes']: return self.data['string_attributes']['original_id'] return None @property def id(self): return self.data['id'] @property def name(self): return self.data['name'] @property def stoichiometry(self): s = {} for o in self.data['modelReactionReagents']: c_id = o['modelcompound_ref'].split('/')[-1] value = o['coefficient'] s[c_id] = value return s def get_bounds(self): if 'maxrevflux' not in self.data or 'maxforflux' not in self.data: import math from modelseedpy.core.msmodel import get_reaction_constraints_from_direction lb, ub = get_reaction_constraints_from_direction(self.data['direction']) return math.fabs(lb), math.fabs(ub), self.data['direction'] maxrevflux = self.data['maxrevflux'] maxforflux = self.data['maxforflux'] direction = '=' if maxrevflux == 0 and maxforflux > 0: direction = '>' elif maxrevflux > 0 and maxforflux == 0: direction = '<' elif maxrevflux == 0 and maxforflux == 0: direction = '0' return maxrevflux, maxforflux, direction def get_gpr_string(self, gpr): ors = [] for ands in gpr: a = [] for g in ands: a.append(g) ors.append(" and ".join(a)) gpr_string = "(" + (") or (".join(ors)) + ")" if gpr_string == "()": return "" #if gpr_string.startswith("(") and gpr_string.endswith(")"): # gpr_string = gpr_string[1:-1].strip() return gpr_string def get_gpr(self): gpr = [] for mrp in self.data['modelReactionProteins']: #print(mrp.keys()) gpr_and = set() for mrps in mrp['modelReactionProteinSubunits']: #print(mrps.keys()) for feature_ref in mrps['feature_refs']: gpr_and.add(get_id_from_ref(feature_ref)) if len(gpr_and) > 0: gpr.append(gpr_and) return gpr def __str__(self): direction = self.direction op = '<?>' if direction == '=': op = '<=>' elif direction == '>': op = '-->' elif direction == '<': op = '<--' else: op = '<?>' eq = seed_coefficients_to_string(self.data['modelReactionReagents'], op) return '{}: {}'.format(self.id, eq) class KBaseFBAModel(KBaseObject): def __init__(self, data=None, info=None, args=None): super().__init__(data, info, args, 'KBaseFBA.FBAModel') self.metabolites = DictList() model_compounds = [] ids = set() for o in self.data['modelcompounds']: if o['id'] not in ids: ids.add(o['id']) model_compounds.append(o) self.data['modelcompounds'] = model_compounds self.update_indexes() def update_indexes(self): if self.data is not None: self.metabolites += [KBaseFBAModelMetabolite(i) for i in self.get_metabolites()] def get_compartments(self): return self.data['modelcompartments'] def get_reactions(self): if self.data == None or not 'modelreactions' in self.data: return [] return self.data['modelreactions'] def get_metabolites(self): if self.data == None or not 'modelcompounds' in self.data: return [] return self.data['modelcompounds'] def get_compartment(self, id): for c in self.get_compartments(): if c['id'] == id: return c return None def get_metabolite(self, id): for m in self.get_metabolites(): if m['id'] == id: return KBaseFBAModelMetabolite(m) return None def get_metabolite_degree(self, cpd_id): return len(self.find_reaction_by_compound_id(cpd_id)) def build_peakstring(self,link,additional_peak_hash = {}): peakstring_hash = dict() for met in self.modelcompounds: if hasattr(met,'dblinks'): if link in met.dblinks: array = met.dblinks[link] for form in array: if form not in peakstring_hash: peakstring_hash[form] = list() peakstring_hash[form].append(met.id) for peak in additional_peak_hash: if peak not in peakstring_hash: peakstring_hash[peak] = list() for cpd in additional_peak_hash[peak]: if cpd not in peakstring_hash[peak]: peakstring_hash[peak].append(cpd) print(len(peakstring_hash)) peakstring = "" for form in peakstring_hash: peakstring += form+":1" for cpd in peakstring_hash[form]: peakstring += ":"+cpd peakstring += ";" return peakstring @property def reactions(self): return [KBaseFBAModelReaction(i, model=self) for i in self.get_reactions()] def get_reaction(self, id): for r in self.get_reactions(): if r['id'] == id: return KBaseFBAModelReaction(r) return None def add_reaction(self, rxn): #validate self.data['modelreactions'].append(rxn) return -1 def add_metabolite(self, cpd, force=False): #validate if not force: #validate 1 self.data['modelcompounds'].append(cpd) return -1 def find_compound_ids_by_compartment(self, id): ids = set() for m in self.get_metabolites(): if m['modelcompartment_ref'].endswith(id): ids.add(m['id']) return ids def find_compound_id_from_oid(self, oid): for m in self.get_metabolites(): sid = None if 'string_attributes' in m and 'original_id' in m['string_attributes']: sid = m['string_attributes']['original_id'] if sid == oid: return m['id'] return None def find_reaction_id_from_oid(self, oid): for r in self.get_reactions(): sid = None if 'string_attributes' in r and 'original_id' in r['string_attributes']: sid = r['string_attributes']['original_id'] if sid == oid: return r['id'] return None def find_reaction_by_compound_id(self, modelcompound_id): rxn_list = [] for modelreaction in self.data['modelreactions']: #print(modelreaction.keys()) contains = False for mrr in modelreaction['modelReactionReagents']: if modelcompound_id in mrr['modelcompound_ref']: contains = True if contains: rxn_list.append(modelreaction) return rxn_list def delete_compartment(self, id, store=True): c = self.get_compartment(id) if not c == None: if store and not 'modelcompartments_removed' in self.data: self.data['modelcompartments_removed'] = {} if store: self.data['modelcompartments_removed'][c['id']] = c ids = self.find_compound_ids_by_compartment(id) print(c['id'], 'removing compounds:', len(ids)) self.delete_compounds(ids, store) compartments = [o for o in self.get_compartments() if not o['id'] == id] self.data['modelcompartments'] = compartments return c def delete_compounds(self, c_ids, store=True): metabolites = [] if store and not 'modelcompounds_removed' in
the same entity ID and timestamp. - Source is modified during an import. This includes adding, updating, or removing source data and/or metadata. Examples of updating metadata include but are not limited to changing storage location, storage class, or retention policy. - Online serving cluster is under-provisioned. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest`): The request object. Request message for [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. entity_type (:class:`str`): Required. The resource name of the EntityType grouping the Features for which values are being imported. Format: ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesResponse` Response message for [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([entity_type]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = featurestore_service.ImportFeatureValuesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if entity_type is not None: request.entity_type = entity_type # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.import_feature_values, default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("entity_type", request.entity_type),) ), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, featurestore_service.ImportFeatureValuesResponse, metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, ) # Done; return the response. return response async def batch_read_feature_values( self, request: featurestore_service.BatchReadFeatureValuesRequest = None, *, featurestore: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Batch reads Feature values from a Featurestore. This API enables batch reading Feature values, where each read instance in the batch may read Feature values of entities from one or more EntityTypes. Point-in-time correctness is guaranteed for Feature values of each read instance as of each instance's read timestamp. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest`): The request object. Request message for [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. (- Next Id: 6 -) featurestore (:class:`str`): Required. The resource name of the Featurestore from which to query Feature values. Format: ``projects/{project}/locations/{location}/featurestores/{featurestore}`` This corresponds to the ``featurestore`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesResponse` Response message for [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([featurestore]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = featurestore_service.BatchReadFeatureValuesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if featurestore is not None: request.featurestore = featurestore # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.batch_read_feature_values, default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("featurestore", request.featurestore),) ), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, featurestore_service.BatchReadFeatureValuesResponse, metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, ) # Done; return the response. return response async def export_feature_values( self, request: featurestore_service.ExportFeatureValuesRequest = None, *, entity_type: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Exports Feature values from all the entities of a target EntityType. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest`): The request object. Request message for [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. entity_type (:class:`str`): Required. The resource name of the EntityType from which to export Feature values. Format: ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesResponse` Response message for [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([entity_type]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = featurestore_service.ExportFeatureValuesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if entity_type is not None: request.entity_type = entity_type # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.export_feature_values, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("entity_type", request.entity_type),) ), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, featurestore_service.ExportFeatureValuesResponse, metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, ) # Done; return the response. return response async def search_features( self, request: featurestore_service.SearchFeaturesRequest = None, *, location: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchFeaturesAsyncPager: r"""Searches Features matching a query in a given project. Args: request (:class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest`): The request object. Request message for [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. location (:class:`str`): Required. The resource name of the Location to search Features. Format: ``projects/{project}/locations/{location}`` This corresponds to the ``location`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesAsyncPager: Response message for [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([location]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = featurestore_service.SearchFeaturesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if location is not None: request.location = location # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.search_features, default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("location", request.location),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.SearchFeaturesAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done;
<reponame>ga1008/mongochecker import json import os import re import sys import time import urllib from urllib.parse import quote_plus import pymongo from BaseColor.base_colors import red, yellow, cyan, green, hred, blue, hgreen from pymongo.errors import OperationFailure from tqdm import tqdm from MongodbDuplicateChecker.gears import printer, gen_hash class MongodbDuplicateChecker(object): def __init__(self, args): self.default_mos_path = 'mongodb_server.json' mos_file = self._mos_path(args) mos = self._get_mos(mos_file) mongodb_uri = self._get_uri(mos) self.client = pymongo.MongoClient(mongodb_uri) mongodb = self._db_name(mos) self.db = self.client[mongodb] self.collection = self._get_collection(mos) self.db_set = self.db[self.collection] self.check_keys = self._get_check_keys(mos) def start(self): print() printer('system start', fill_with='=', alignment='m') printer(f'processing target [ {hred(self.collection)} ]') db_data = self.db_set.find({}, {x: 1 for x in self.check_keys}, no_cursor_timeout=True, batch_size=1000) self._process(db_data=db_data) printer(f'[ {yellow(self.collection)} ] duplicate check done') def _process(self, db_data): warn1 = hred(" Data is INVALUABLE! ") warn2 = red(" Please make sure you are fully understanding what you are doing in the following steps! ") print() printer("$", fill_with='$', alignment='m', msg_head_tail=['', '']) printer(warn1, fill_with='$', alignment='m', msg_head_tail=['', '']) printer(warn2, fill_with='$', alignment='m', msg_head_tail=['', '']) printer("$", fill_with='$', alignment='m', msg_head_tail=['', '']) print() time.sleep(3) counter = set() del_set = set() del_success = 0 total = 0 printer('start checking') t = tqdm(total=self.db_set.count_documents({x: 1 for x in self.check_keys})) for data in db_data: d_lis = [] for x in self.check_keys: value = eval("data.get('" + "', {}).get('".join(x.split('.')) + "', '')") if value: d_lis.append(str(value)) if d_lis: d_str = '-'.join(d_lis) if d_str not in counter: counter.add(d_str) else: del_set.add(data.get("_id")) t.update() total += 1 t.close() printer('check done') duplicate_count = len(del_set) if duplicate_count > 0: printer(f"done! total: [ {yellow(total)} ], duplicate data: [ {red(duplicate_count)} ]") del_sta = input('Do you warn to delete them all? (y/N): ').lower() if del_sta == 'y': tq = tqdm(total=duplicate_count) for dt in del_set: try: self.db_set.delete_one({"_id": dt}) del_success += 1 except Exception as E: printer(f"delete err! {E}") continue tq.update() tq.close() printer(f'delete success: [ {red(del_success)} ]') else: printer(f'total: [ {blue(total)} ], no duplicate data found') def _get_check_keys(self, mos): check_keys = mos.get('check_keys') if check_keys: return check_keys doc = self.db_set.find_one() doc = dict(doc) if doc else dict() keys_lis = self._get_key_path(doc) print() printer(f'keys in [ {yellow(self.collection)} ]', fill_with='*', alignment='m') for i, name in enumerate(keys_lis): printer(f"[ {red(i)} ]: {yellow(name)}") printer('', fill_with='*') print(f"input the {red('nums')} of the keys to check duplicate(such as: 1,2,3)") print("empty to cancel") sel = input(red(" 'i' ") + "to manual input: ").strip() if sel == 'i': input_keys = input("MANUAL INPUT(use ',' to separate each keys: key1,key2.key_lv2,key3): ").strip() input_keys = [x.strip() for x in input_keys.split(',')] if input_keys else [] if not input_keys: print("wrong input!") exit(0) return input_keys if sel: sel = re.findall(r'\d+', sel) sel = [int(x) for x in sel if int(x) in range(len(keys_lis))] if sel else [] return [keys_lis[x] for x in sel] else: ch = input('wrong input, do you want to input again?(y/n)').lower() if ch == 'y': self._get_check_keys(mos) else: exit(0) def _get_key_path(self, dic, key_up='', sep='.'): """ 递归获取多层字典的所有的 key, 可以以指定的分割符组合 :param dic: 源字典 :param key_up: 上层键, 第一次传入是空字符 :param sep: 上下层的键的分割符, 默认是 . :return: 返回键列表 """ se = list() for k, v in dic.items(): i_k = "{}{}{}".format(key_up, sep, k) if key_up else k if isinstance(v, dict): se.extend(self._get_key_path(v, i_k, sep)) else: se.append(i_k) return se def _get_collection(self, mos): names = mos.get('collection') if names: return names names = self.db.list_collection_names(include_system_collections=False) print() printer(f'{yellow("collection")} names:', fill_with='*', alignment='m', msg_head_tail=['*', '*']) for i, name in enumerate(names): printer(f"[ {red(i)} ]: {yellow(name)}") printer('', fill_with='*') sel = input(f"chose the {red('num')} of the collection's name to process: ") sel = re.findall(r'\d+', sel) sel = int(sel[0]) if sel else None if sel not in [x for x in range(len(names))]: raise KeyboardInterrupt('wrong input ! ') return names[sel] def _db_name(self, mos): names = mos.get('db') if names: return names try: names = self.client.list_database_names() self._save_into_file(mos) except OperationFailure: printer('the mongodb setting maybe wrong! please check it and restart') for k, v in mos.items(): printer(red(f"{k}: {v}")) sys.exit(1) print() printer(f"{yellow('database')} names:", fill_with='*', alignment='m', msg_head_tail=['*', '*']) for i, name in enumerate(names): printer(f"[ {red(i)} ]: {yellow(name)}") printer('', fill_with='*') sel = input(f"input the {red('num')} of the database's name to process: ") sel = re.findall(r'\d+', sel) sel = int(sel[0]) if sel else None if sel not in [x for x in range(len(names))]: print("wrong input ! ") exit(0) return names[sel] def _mos_path(self, args): args = args or self.default_mos_path return args def _get_mos(self, mos_file): if os.path.exists(mos_file): with open(mos_file, 'r') as rf: mos_lis_raw = json.loads(rf.read()) if isinstance(mos_lis_raw, dict): mos_lis_raw = [mos_lis_raw] print() printer(f'servers found [ {red(mos_file)} ]:', fill_with='*', alignment='m') for i, dic in enumerate(mos_lis_raw): mos_info = f"{dic.get('host')}:{dic.get('port')}, {dic.get('name')}, {dic.get('source')}" printer(f"[ {red(i)} ]: {yellow(mos_info)}") printer('', fill_with='*') print('chose the num of the server') selection = input('Or input "m" to new setting: ').strip() if not selection: print("wrong input") exit(0) if selection == 'm': return self._get_new_server_setting() selection = re.findall(r'\d+', selection) selection = int(selection[0]) if selection else None if selection not in [x for x in range(len(mos_lis_raw))]: raise KeyboardInterrupt('wrong input! ') mos_temp = mos_lis_raw[selection] return mos_temp else: print() printer('mongodb_servers file not exits! ') print(f"INPUT [ {red('p')} ] setting file path") inp = input(f'INPUT [ {red("m")} ] manual setting').lower() if inp == 'p': m_path = input('path to mongodb setting: ') return self._get_mos(m_path) elif inp == 'm': return self._get_new_server_setting() else: raise KeyboardInterrupt('wrong input! ') def _get_new_server_setting(self): m_server = input('server(127.0.0.1): ') m_port = input('port(27017): ') or '0' m_name = input('user(root): ') m_pwd = input('password(<PASSWORD>): ') m_source = input('source database(admin): ') m_dic = {"host": m_server or '1192.168.127.12', "port": int(m_port) or 27017, "name": m_name or 'root', "password": m_pwd or '<PASSWORD>', "source": m_source or 'admin' } return m_dic def _save_into_file(self, dic): with open(self.default_mos_path, 'w+') as wf: in_str = json.dumps([dic]) wf.write(in_str) @staticmethod def _get_uri(mos): uri = 'mongodb://{}:{}@{}:{}/{}'.format( mos['name'], mos['password'], mos['host'], mos['port'], mos['source']) return uri def __del__(self): printer('system exit', fill_with='=') try: self.client.close() except: pass class MongodbCopy(object): def __init__(self, args=None): self.default_mos_path = self._mos_path(args, '') self.save_mos_filename = 'mongocopy' self.fromdb_collection = None self.from_db_name = None self.fromdb_client = None self.fromdb = None self.fromdb_set = None self.f_db_signature = None self.todb_collection = None self.todb_client = None self.todb = None self.todb_set = None self.condition = {} self.t_db_signature = None self.tdb_str = None self.f_mos = self._get_from_mos() self.t_mos = self._get_to_mos() self.filter = self._get_filter() self._save_mos(self.f_mos, self.t_mos) def start_copy(self): try: printer('scanning documents ...') t_count = self.todb_set.count_documents({}) f_count = self.fromdb_set.count_documents(self.condition) sk_str = f'skip documents?\n' sk_str += f" source db [ {red(f_count)} ]\n" sk_str += f" target db [ {red(t_count)} ]\n" sk_str += f"empty to set as [ {yellow(t_count)} ]: " f_skip = input(sk_str) or str(t_count) f_skip = int(f_skip) if f_skip and isinstance(f_skip, str) and f_skip.isdigit() else 0 to_data_set = set() self.t_db_signature += f".{self.todb_collection}" self.f_db_signature += f".{self.fromdb_collection}" print() cp_msg = f"copy start" printer(cp_msg, alignment='m', msg_head_tail=['', ''], fill_with='*') printer(f"from [ {blue(self.f_db_signature)} ] to [ {green(self.t_db_signature)} ]", alignment='m', msg_head_tail=['', '']) if self.filter: printer('filter keys: ') for fk in self.filter: printer(f" {red(fk)}") printer('', msg_head_tail=['', '']) to_data = self.todb_set.find({}, {x: 1 for x in self.filter}, no_cursor_timeout=True, batch_size=1000) printer(f'preparing target [ {self.tdb_str} ] data') tt = tqdm(total=t_count) for data in to_data: tt.update() try: d_lis = [] for x in self.filter: value = eval("data.get('" + "', {}).get('".join(x.split('.')) + "', '')") if value: d_lis.append(str(value)) if d_lis: to_data_set.add(gen_hash(json.dumps(d_lis))) except: pass tt.close() time.sleep(0.1) printer('target ready') if self.filter: from_data = self.fromdb_set.find(self.condition, {x: 1 for x in self.filter}, no_cursor_timeout=True, batch_size=500, skip=f_skip) else: from_data = self.fromdb_set.find(self.condition, no_cursor_timeout=True, batch_size=500, skip=f_skip) printer('start copy, it may take long long time, you can take a snap or do sth else...') time.sleep(0.1) t = tqdm(total=f_count-f_skip) count = 0 dup_count = 0 for fd in from_data: try: d_key = '' d_key_l = [] for x in self.filter: value = eval("fd.get('" + "', {}).get('".join(x.split('.')) + "', '')") if value: d_key_l.append(str(value)) if d_key_l: d_key = gen_hash(json.dumps(d_key_l)) except: d_key = gen_hash(json.dumps([x for x in fd.values()])) if d_key not in to_data_set: try: self.todb_set.insert_one(fd) count += 1 except Exception as E: pass else: dup_count += 1 t.update() t.close() time.sleep(0.1) printer('process done') printer(f'copy done! total: [ {yellow(f_count-f_skip)} ], success: [ {green(count)} ], duplicate [ {red(dup_count)} ], skip [ {cyan(f_skip)} ]') except KeyboardInterrupt: self.end() def _get_filter(self): filter_ori = self.t_mos.get('filter') i_info = "input check keys(input 'i' to direct insert, empty to show all keys): " if filter_ori: i_info = f"input check keys({filter_ori}, input 'i' to direct insert, empty to show all keys): " sel = filter_ori or input(i_info) if sel.lower() == 'i': return {} if sel: try:
<filename>Ground_control/gc_functions/upload_mission.py<gh_stars>0 #!/usr/bin/env python # /*************************************************************************** # MavLink LoRa node (ROS) upload mission example script # Copyright (c) 2018, <NAME> <<EMAIL>> <<EMAIL>> # SDU UAS Center, http://sdu.dk/uas # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # **************************************************************************** ''' Script to upload mission for healthdrone test flights Revision 2019-09-18 FMA First published version ''' # imports import rospy import time import std_msgs.msg._Empty # mission messages from mavlink_lora.msg import mavlink_lora_mission_list from mavlink_lora.msg import mavlink_lora_mission_item_int, mavlink_lora_mission_ack, mavlink_lora_command_set_mode # parameters for mission upload mavlink_lora_pub_topic = '/mavlink_interface/mission/mavlink_upload_mission' mavlink_lora_set_mode_pub_topic = '/mavlink_interface/command/set_mode' mavlink_lora_clear_all = '/mavlink_interface/mission/mavlink_clear_all' time_start = 0 time_end = 0 class UploadMissionNode: def __init__(self): self.lat = 0 self.lon = 0 self.alt = 0 self.way_lat = [] self.way_lon = [] self.way_alt = [] # self.load_mission() self.static_alt = 32 # pubs self.mavlink_msg_pub = rospy.Publisher(mavlink_lora_pub_topic, mavlink_lora_mission_list, queue_size=0) self.mavlink_set_mode_pub = rospy.Publisher(mavlink_lora_set_mode_pub_topic, mavlink_lora_command_set_mode, queue_size=0) self.mavlink_clear_mission = rospy.Publisher(mavlink_lora_clear_all, std_msgs.msg.Empty, queue_size=0) def send_mavlink_set_mode(self, mode, custom_mode, custom_sub_mode): # make msg and publish it msg = mavlink_lora_command_set_mode() msg.mode = mode msg.custom_mode = custom_mode msg.custom_sub_mode = custom_sub_mode self.mavlink_set_mode_pub.publish(msg) def get_current_mission(self): lat = [] lon = [] for idx in range(len(self.way_lat)): lat.append(self.way_lat[idx]/10000000) lon.append(self.way_lon[idx]/10000000) return lat, lon def load_mission_from_path(self, path, alt): self.way_lat = [] self.way_lon = [] self.way_alt = [] for points in path: self.way_lat.append(int(float(points[1]) * 10000000)) self.way_lon.append(int(float(points[0]) * 10000000)) self.way_alt.append(int(alt)) def load_mission(self): file = open('gc_functions/mission_1.txt', 'r') self.way_lat = [] self.way_lon = [] self.way_alt = [] for line in file.readlines(): o, t, th = line.split(" ") self.way_lat.append(int(float(o) * 10000000)) self.way_lon.append(int(float(t) * 10000000)) self.way_alt.append(int(th)) def load_mission2(self): file = open('gc_functions/mission_2.txt', 'r') self.way_lat = [] self.way_lon = [] self.way_alt = [] for line in file.readlines(): o, t, th = line.split(" ") self.way_lat.append(int(float(o) * 10000000)) self.way_lon.append(int(float(t) * 10000000)) self.way_alt.append(int(th)) def set_waypoints(self, lat, lon, alt, sequ): way = mavlink_lora_mission_item_int() way.target_system = 0 way.target_component = 0 way.seq = sequ way.frame = 6 way.command = 16 way.param1 = 0 way.param2 = 5 way.param3 = 0 way.x = lat way.y = lon way.z = alt way.autocontinue = 1 return way def upload_mission(self, current_lat, current_lon): self.lat = int(current_lat * 10000000) self.lon = int(current_lon * 10000000) if self.lat == 0 and self.lon == 0: print("No GPS Mission not uploaded") return seq = 0 missionlist = mavlink_lora_mission_list() # Takeoff takeoff = mavlink_lora_mission_item_int() takeoff.target_system = 0 takeoff.target_component = 0 takeoff.seq = seq takeoff.frame = 6 # global pos, relative alt_int takeoff.command = 22 takeoff.x = self.lat takeoff.y = self.lon takeoff.z = self.static_alt takeoff.param1 = 5 takeoff.current = 1 takeoff.autocontinue = 1 missionlist.waypoints.append(takeoff) for i in range(len(self.way_alt)): seq = i + 1 wp = self.set_waypoints(self.way_lat[i], self.way_lon[i], self.way_alt[i], seq) missionlist.waypoints.append(wp) landing = mavlink_lora_mission_item_int() landing.target_system = 0 landing.target_component = 0 landing.seq = seq + 1 landing.frame = 6 # global pos, relative alt_int landing.command = 21 landing.param1 = 5 # abort alt landing.param2 = 0 # precision landing. 0 = normal landing landing.x = self.lat landing.y = self.lon landing.z = 10 landing.autocontinue = 0 missionlist.waypoints.append(landing) self.mavlink_msg_pub.publish(missionlist) def go_to_loiter_mode(self): self.send_mavlink_set_mode(1, 4, 3) def update_mission(self, current_lat, current_lon, current_alt, sim): # Put uav in loiter mode self.send_mavlink_set_mode(1, 4, 3) # Clear mission # msg = std_msgs.msg.Empty() # self.mavlink_clear_mission.publish(msg) # Upload mission self.lat = int(current_lat * 10000000) self.lon = int(current_lon * 10000000) if self.lat == 0 and self.lon == 0: print("No GPS Mission not uploaded") return seq = 0 missionlist = mavlink_lora_mission_list() # if sim: # print("Added speed points x2") # speed = mavlink_lora_mission_item_int() # speed.target_system = 0 # speed.target_component = 0 # speed.seq = seq # Sequence in the list. Starts from 0 and every item increments by one # speed.frame = 2 # mission command frame # speed.command = 178 # change speed id # speed.param1 = 0 # air_speed # speed.param2 = 4 # m/s # speed.param3 = -1 # no change # speed.param4 = 0 # absolute or relative. relative = 1 # speed.autocontinue = 1 # automatic continue to next waypoint when this is reached # missionlist.waypoints.append(speed) # seq += 1 # # speed1 = mavlink_lora_mission_item_int() # speed1.target_system = 0 # speed1.target_component = 0 # speed1.seq = seq # Sequence in the list. Starts from 0 and every item increments by one # speed1.frame = 2 # mission command frame # speed1.command = 178 # change speed id # speed1.param1 = 0 # air_speed # speed1.param2 = 5 # m/s # speed1.param3 = -1 # no change # speed1.param4 = 0 # absolute or relative. relative = 1 # speed1.autocontinue = 1 # automatic continue to next waypoint when this is reached # missionlist.waypoints.append(speed1) # seq += 1 # Add a Takeoff point print("Added TakeOff") takeoff = mavlink_lora_mission_item_int() takeoff.target_system = 0 takeoff.target_component = 0 takeoff.seq = seq takeoff.frame = 6 # global pos, relative alt_int takeoff.command = 22 takeoff.x = self.lat takeoff.y = self.lon takeoff.z = self.static_alt takeoff.param1 = 5 takeoff.current = 1 takeoff.autocontinue = 1 missionlist.waypoints.append(takeoff) seq += 1 for i in range(len(self.way_alt)): wp = self.set_waypoints(self.way_lat[i], self.way_lon[i], self.way_alt[i], seq) missionlist.waypoints.append(wp) seq += 1 landing = mavlink_lora_mission_item_int() landing.target_system = 0 landing.target_component = 0 landing.seq = seq landing.frame = 6 # global pos, relative alt_int landing.command = 21 landing.param1 = 20 # abort alt landing.param2 = 0 # precision landing. 0 = normal landing landing.x = self.way_lat[len(self.way_lat)-1] landing.y = self.way_lon[len(self.way_lon)-1] landing.z = 32 landing.autocontinue = 0 missionlist.waypoints.append(landing) self.mavlink_msg_pub.publish(missionlist) def land(self, current_lat, current_lon): # Upload mission self.lat = int(current_lat * 10000000) self.lon = int(current_lon * 10000000) if self.lat == 0 and self.lon == 0: print("No GPS Mission not uploaded") return seq = 0 missionlist = mavlink_lora_mission_list() takeoff = mavlink_lora_mission_item_int() takeoff.target_system = 0 takeoff.target_component = 0 takeoff.seq = seq takeoff.frame = 6 # global pos, relative alt_int takeoff.command = 22 takeoff.x = self.lat takeoff.y = self.lon takeoff.z = self.static_alt takeoff.param1 = 5 takeoff.current = 1 takeoff.autocontinue = 1 missionlist.waypoints.append(takeoff) seq += 1 landing = mavlink_lora_mission_item_int() landing.target_system = 0 landing.target_component = 0 landing.seq = seq landing.frame = 6 # global pos, relative alt_int landing.command = 21 landing.param1 = 20 # abort alt landing.param2 = 0 # precision landing. 0 = normal landing landing.x = self.lat landing.y = self.lon landing.z = 32 landing.autocontinue = 0 missionlist.waypoints.append(landing) self.mavlink_msg_pub.publish(missionlist) def land_takeoff(self, current_lat, current_lon): # Upload mission self.lat = int(current_lat * 10000000) self.lon = int(current_lon * 10000000) if self.lat == 0 and self.lon == 0: print("No GPS Mission not uploaded") return seq = 0 missionlist = mavlink_lora_mission_list() takeoff = mavlink_lora_mission_item_int() takeoff.target_system = 0 takeoff.target_component = 0 takeoff.seq = seq takeoff.frame = 6 # global pos, relative alt_int takeoff.command = 21 takeoff.x = self.lat takeoff.y = self.lon takeoff.z = 5 takeoff.param1 = 5 takeoff.current = 1 takeoff.autocontinue = 1 missionlist.waypoints.append(takeoff) seq += 1 landing = mavlink_lora_mission_item_int() landing.target_system = 0 landing.target_component = 0 landing.seq = seq landing.frame = 6 # global pos, relative alt_int landing.command = 21 landing.param1 = 20 # abort alt landing.param2 = 0 # precision landing. 0
from functools import reduce from itertools import chain from .extraction import order_nodes, is_gap, is_copy, HeadDeps, ModDeps, make_functor, get_argument from .graphutils import * from .milltypes import (polarize_and_index_many, polarize_and_index, WordType, EmptyType, DiamondType, ModalType, PolarizedType, BoxType, FunctorType, depolarize) from .transformations import _cats_of_type AxiomLinks = Set[Tuple[int, int]] def unwrap(wordtype: WordType) -> WordType: return wordtype.content if isinstance(wordtype, DiamondType) else wordtype def is_functor(wordtype: WordType) -> bool: return isinstance(wordtype, FunctorType) or isinstance(wordtype, ModalType) and is_functor(wordtype.content) def get_result(wordtype: WordType) -> WordType: if isinstance(wordtype, FunctorType): return wordtype.result if isinstance(wordtype, ModalType) and isinstance(wordtype.content, FunctorType): return wordtype.content.result raise ProofError(f'{wordtype} has no result') def match(links: AxiomLinks, positive: WordType, negative: WordType) -> AxiomLinks: if positive != negative: raise ProofError(f'Formulas are not equal.\t{positive}\t{negative}.') if any(map(lambda x: not is_indexed(x), [positive, negative])): raise ProofError(f'Input formulas are not fully indexed.\t{positive}\t{negative}.') if isinstance(positive, PolarizedType) and isinstance(negative, PolarizedType): if not positive.polarity: raise ProofError(f'Positive formula has negative index.\t{positive}\t{negative}.') if negative.polarity: raise ProofError(f'Negative formula has positive index.\t{positive}\t{negative}.') if positive.index in set(map(fst, links)): raise ProofError(f'Positive formula already assigned.\n{positive}\n{negative}\n{links}.') if negative.index in set(map(snd, links)): raise ProofError(f'Negative formula already assigned.\t{positive}\t{negative}.') links = links.union({(positive.index, negative.index)}) elif is_functor(negative) and is_functor(positive): links = match(links, unwrap(get_argument(negative)), unwrap(get_argument(positive))) links = match(links, unwrap(get_result(positive)), unwrap(get_result(negative))) else: raise ProofError(f'Unexpected types {positive} {negative}.') return links def merge_proof(core: AxiomLinks, local: AxiomLinks) -> AxiomLinks: for k, v in local: if k in set(map(fst, core)) and (k, v) not in core: raise ProofError('Positive formula already assigned in core proof.\t{}\t{}\t{}'.format(k, v, core)) if v in set(map(snd, core)) and (k, v) not in core: raise ProofError('Negative formula already assigned in core proof.\t{}\t{}\t{}'.format(k, v, core)) core = core.union({(k, v)}) return core def merge_proofs(locals_: List[AxiomLinks], core: Optional[AxiomLinks] = None) -> AxiomLinks: if core is None: return reduce(merge_proof, locals_) if locals_ else set() return reduce(merge_proof, locals_, core) def make_links(dag: DAG[str, str]) -> AxiomLinks: if not dag.edges: return set() deannotate_dag(dag) idx = annotate_leaves(dag) add_ghost_nodes(dag) core_proof = iterate_simple_fringe(dag) delete_ghost_nodes(dag) crd_proof = match_copies_with_crds(dag) gap_proof = match_copied_gaps_with_crds(dag) root_type = dag.attribs[fst(list(dag.get_roots()))]['type'] idx, conclusion = polarize_and_index(root_type.depolarize(), False, idx) core_proof = match(core_proof, root_type, conclusion) proof = merge_proofs([core_proof, crd_proof, gap_proof]) correctness_check(proof, idx) return proof def correctness_check(proof: AxiomLinks, idx: int) -> None: positives = set(map(fst, proof)) negatives = set(map(snd, proof)) if set.union(positives, negatives) != set(range(idx)): raise ProofError('Unmatched types.') def iterate_simple_fringe(dag: DAG[str, str]) -> AxiomLinks: proof: AxiomLinks = set() while True: temp = annotate_simple_branches(dag) if temp is None: break proof = merge_proof(proof, temp) return proof def annotate_simple_branches(dag: DAG[str, str]) -> Optional[AxiomLinks]: parents = get_simple_branches(dag) if not parents: return None temp = list(map(lambda parent: annotate_simple_branch(dag, parent), parents)) branch_proofs, branch_outputs = list(zip(*temp)) update_types(dag, parents, branch_outputs) return merge_proofs(branch_proofs) def annotate_simple_branch(dag: DAG[str, str], parent: str) -> Tuple[AxiomLinks, WordType]: def simplify_crd(crd_type: WordType, arg_types_: List[WordType]) -> WordType: xs = isolate_xs(crd_type) result = last_instance_of(crd_type) if len(set(map(depolarize, arg_types_))) > 1: raise ProofError('Non polymorphic conjunction.') if arg_types == xs: return crd_type else: xs = list(map(get_result, xs)) result = get_result(result) crd_type = reduce(lambda res_, arg_: FunctorType(DiamondType(arg_, 'cnj'), res_), reversed(xs), result) return simplify_crd(crd_type, arg_types_) def is_gap_copy_parent(edge_: Edge[str, str]) -> bool: return edge_.dep in HeadDeps and is_copy(dag, edge_.target) and is_gap(dag, edge_.target, HeadDeps) outgoing = dag.outgoing(parent) outgoing = set(filter(lambda edge: not is_copy(dag, edge.target) or is_gap_copy_parent(edge), outgoing)) heads = list(filter(lambda edge: edge.dep in HeadDeps and not isinstance(dag.attribs[edge.target]['type'], EmptyType), outgoing)) if len(heads) != 1: raise ProofError(f'Too many heads: {heads}.') head = fst(heads) outgoing = set(filter(lambda edge: edge.dep not in HeadDeps, outgoing)) modding_edges = list(filter(lambda edge: edge.dep in ModDeps, outgoing)) arg_edges = list(filter(lambda edge: edge.dep not in ModDeps, outgoing)) mods = order_nodes(dag, list(map(lambda edge: edge.target, modding_edges))) args = order_nodes(dag, list(map(lambda edge: edge.target, arg_edges))) arg_edges = sorted(arg_edges, key=lambda x: args.index(x.target)) arg_types = list(map(lambda edge: get_simple_argument(dag, edge.target), arg_edges)) arg_deps = list(map(lambda edge: edge.dep, arg_edges)) mod_types = list(map(lambda n: get_simple_argument(dag, n), mods)) if dag.attribs[parent]['cat'] == 'conj': branch_output = simplify_crd(dag.attribs[head.target]['type'], arg_types) else: branch_output = get_simple_functor(dag, head.target) arg_proof, branch_output = align_args(branch_output, arg_types, arg_deps) mod_proof, branch_output = align_mods(branch_output, mod_types) return merge_proof(arg_proof, mod_proof), branch_output def align_args(functor: WordType, argtypes: List[WordType], deps: List[str]) -> Tuple[AxiomLinks, WordType]: def color_fold(functor_: WordType) -> List[Tuple[str, WordType]]: def step(x: WordType) -> Optional[Tuple[Tuple[str, WordType], WordType]]: if is_functor(x): arg, res = get_argument(x), get_result(x) if isinstance(arg, DiamondType): return (arg.modality, arg.content), res if isinstance(x, BoxType) and x.modality == 'det': return ('np_hd', arg), res return None return list(unfoldr(step, functor_)) def make_pairs(argdeps_: List[Tuple[str, WordType]], functor_argdeps_: List[Tuple[str, WordType]]) \ -> Tuple[List[Tuple[WordType, WordType]], List[Tuple[str, WordType]]]: ret = [] rem = [] for neg in functor_argdeps_: if neg in argdeps_: pos = argdeps_.pop(argdeps_.index(neg)) ret.append((snd(pos), snd(neg))) else: rem.append(neg) return ret, rem def match_args(proof_: AxiomLinks, pair: Tuple[WordType, WordType]) -> AxiomLinks: return match(proof_, fst(pair), snd(pair)) proof: Set[Tuple[int, int]] = set() if argtypes: functor_argcolors = color_fold(functor) functor_argcolors = list(filter(lambda ac: fst(ac) not in ModDeps, functor_argcolors)) material_argcolors = list(zip(deps, argtypes)) pairs, rem = make_pairs(material_argcolors, functor_argcolors) proof = reduce(match_args, pairs, proof) return proof, reduce(lambda x, y: make_functor(argument=y[1], dep=y[0], result=x), rem, get_functor_result(functor)) return proof, functor def align_mods(mod_input: WordType, mods: List[WordType]) -> Tuple[AxiomLinks, WordType]: def match_modchain(proof_: AxiomLinks, modpair: Tuple[WordType, WordType]) -> AxiomLinks: prev = fst(modpair) curr = snd(modpair) if is_functor(prev) and is_functor(curr): return match(proof_, get_result(prev), get_argument(curr)) raise ProofError(f'Modifiers {prev}, {curr} are not FunctorTypes.') proof: Set[Tuple[int, int]] = set() if mods: mod_output = get_result(last(mods)) proof = match(proof, mod_input, get_argument(fst(mods))) zipped_mods = list(zip(mods, mods[1:])) proof = reduce(match_modchain, zipped_mods, proof) return proof, mod_output return proof, mod_input def match_copies_with_crds(dag: DAG[Node, Any]) -> AxiomLinks: def get_copy_color(copy_: Node) -> str: incoming_ = set(map(lambda inc_: inc_.dep, dag.incoming(copy_))) assert len(incoming_) == 1 return fst(list(incoming_)) copies = list(filter(lambda node: is_copy(dag, node) and not is_gap(dag, node, HeadDeps), dag.nodes)) copy_colors = list(map(lambda copy: get_copy_color(copy), copies)) copy_types: List[WordType] = list(map(lambda copy: dag.attribs[copy]['type'], copies)) conjunction_hierarchies: List[List[Tuple[Node, List[Optional[Node]]]]] conjunction_hierarchies = list(map(lambda node: participating_conjunctions(dag, node), copies)) proofs = list(map(lambda ch, ct, cc: intra_crd_match(dag, ch, ct, cc), conjunction_hierarchies, copy_types, copy_colors)) proof = merge_proofs(proofs) crds = list(map(lambda ch: get_crd_type(dag, fst(fst(ch))), conjunction_hierarchies)) results = list(map(lambda crd: last_instance_of(crd), crds)) matches = list(map(lambda res, ct, cc: identify_missing(res, ct, cc), results, copy_types, copy_colors)) return reduce(lambda proof_, pair: match(proof_, fst(pair), snd(pair)), zip(copy_types, matches), proof) def participating_conjunctions(dag: DAG[Node, str], node: Node, exclude_heads: bool = False) -> \ List[Tuple[Node, List[Optional[Node]]]]: def impose_order(conjunctions_: List[Node]) -> List[Tuple[Node, List[Optional[Node]]]]: def link_daughter_with_conjs(daughter: Node, conjs: List[Node]) -> Nodes: return set(filter(lambda conj_: dag.exists_path(daughter, conj_) or daughter == conj_, conjs)) def filter_maximally_linked(candidates: Nodes) -> Nodes: return set(filter(lambda candidate: not any(map(lambda contestant: len(dag.distinct_paths_to(contestant, node)) > len(dag.distinct_paths_to(candidate, node)), candidates)), candidates)) def select_lowest(candidates: Nodes) -> Optional[Node]: filtered = set(filter(lambda candidate: not any(map(lambda contestant: contestant in dag.points_to(candidate), candidates)), candidates)) return fst(list(filtered)) if filtered else None # each conj paired with its daughters daughters: List[Tuple[Node, List[Node]]] daughters = list(map(lambda conj: (conj, get_conjunction_daughters(dag, conj)), conjunctions)) # each conj daughter also paired with the set of conjunctions it covers paired: List[Tuple[Node, List[Nodes]]] paired = list(map(lambda pair: (fst(pair), list(map(lambda daughter: link_daughter_with_conjs(daughter, conjunctions_), snd(pair)))), daughters)) maximal: List[Tuple[Node, List[Nodes]]] maximal = list(map(lambda pair: (fst(pair), list(map(lambda daughter_group: filter_maximally_linked(daughter_group), snd(pair)))), paired)) lowest = list(map(lambda pair: (fst(pair), list(map(lambda daughter_group: select_lowest(daughter_group), snd(pair)))), maximal)) return lowest incoming = list(filter(lambda edge: edge.dep not in HeadDeps or not exclude_heads, dag.incoming(node))) parents = set(map(lambda edge: edge.source, incoming)) top = dag.first_common_predecessor(parents) if top is None or dag.attribs[top]['cat'] != 'conj': raise ProofError('Top is not a conj or no top.') conjunctions = _cats_of_type(dag, 'conj', dag.points_to(top).intersection(dag.pointed_by(node)).union({top})) conjunctions = set(filter(lambda conj: len(dag.distinct_paths_to(conj, node)) > 1, conjunctions)) return sorted(impose_order(list(conjunctions)), key=lambda pair: fst(pair) == top, reverse=True) def intra_crd_match(dag: DAG[Node, str], hierarchy: List[Tuple[Node, List[Optional[Node]]]], copy_type: WordType, copy_color: str) -> AxiomLinks: isolated: List[Tuple[List[WordType], List[Optional[Node]]]] isolated = list(map(lambda pair: (isolate_xs(get_crd_type(dag, fst(pair))), snd(pair)), hierarchy)) zipped: List[List[Tuple[WordType, Optional[Node]]]] zipped = [list(zip(*x)) for x in isolated] filtered: List[Tuple[WordType, Optional[Node]]] filtered = list(filter(lambda pair: snd(pair) is not None, chain.from_iterable(zipped))) paired: List[Tuple[WordType, WordType]] paired = list(map(lambda pair: (fst(pair), last_instance_of(get_crd_type(dag, snd(pair)))), filtered)) matches = list(map(lambda pair: (identify_missing(fst(pair), copy_type, copy_color), identify_missing(snd(pair), copy_type, copy_color)), paired)) return reduce(lambda proof_, pair: match(proof_, fst(pair), snd(pair)), matches, set()) def match_copied_gaps_with_crds(dag: DAG[str, str]) -> AxiomLinks: def extract_color(type_: WordType) -> str: if is_functor(type_): arg = get_argument(type_) if is_functor(arg): if isinstance(arg, BoxType): return arg.modality arg_of_arg = get_argument(arg) if isinstance(arg_of_arg, DiamondType): return arg_of_arg.modality raise ProofError(f'Expected {type_} to be a higher order functor.') def extract_nested_argument(type_: WordType) -> WordType: if is_functor(type_): arg = get_argument(type_) if is_functor(arg): return unwrap(get_argument(arg)) raise ProofError(f'Expected {type_} to be a higher order functor.') gaps = list(filter(lambda node: is_copy(dag, node) and is_gap(dag, node, HeadDeps), dag.nodes)) gap_types: List[WordType] = list(map(lambda node: dag.attribs[node]['type'], gaps)) gap_colors = list(map(lambda
<filename>metalibm_functions/ml_div.py # -*- coding: utf-8 -*- ############################################################################### # This file is part of metalibm (https://github.com/kalray/metalibm) ############################################################################### # MIT License # # Copyright (c) 2018 Kalray # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ############################################################################### # last-modified: Oct 5th, 2018 # # Description: Meta-implementation of floating-point division ############################################################################### import sys import sollya from sollya import Interval S2 = sollya.SollyaObject(2) from metalibm_core.core.attributes import ML_Debug from metalibm_core.core.ml_operations import * from metalibm_core.core.ml_formats import * from metalibm_core.core.precisions import ML_CorrectlyRounded from metalibm_core.code_generation.c_code_generator import CCodeGenerator from metalibm_core.code_generation.generic_processor import GenericProcessor from metalibm_core.code_generation.code_object import CodeObject from metalibm_core.code_generation.code_function import CodeFunction from metalibm_core.code_generation.code_constant import C_Code from metalibm_core.core.ml_optimization_engine import OptimizationEngine from metalibm_core.core.polynomials import * from metalibm_core.core.ml_table import ML_NewTable from metalibm_core.core.ml_function import ML_FunctionBasis from metalibm_core.code_generation.gappa_code_generator import GappaCodeGenerator from metalibm_core.utility.gappa_utils import execute_gappa_script_extract from metalibm_core.utility.ml_template import ML_NewArgTemplate, DefaultArgTemplate from metalibm_core.utility.debug_utils import debug_multi class NR_Iteration(object): """ Newton-Raphson iteration generator """ def __init__(self, approx, divisor, force_fma=False): """ @param approx initial approximation of 1.0 / @p divisor @param divisor reciprocal input @param force_fma force the use of Fused Multiply and Add """ self.approx = approx self.divisor = divisor self.force_fma = force_fma if force_fma: self.error = FusedMultiplyAdd(divisor, approx, 1.0, specifier=FusedMultiplyAdd.SubtractNegate) self.new_approx = FusedMultiplyAdd(self.error, self.approx, self.approx, specifier=FusedMultiplyAdd.Standard) else: self.error = 1 - divisor * approx self.new_approx = self.approx + self.error * self.approx def get_hint_rules(self, gcg, gappa_code, exact): divisor = self.divisor.get_handle().get_node() approx = self.approx.get_handle().get_node() new_approx = self.new_approx.get_handle().get_node() Attributes.set_default_precision(ML_Exact) if self.force_fma: rule0 = FusedMultiplyAdd(divisor, approx, 1.0, specifier = FusedMultiplyAdd.SubtractNegate) else: rule0 = 1.0 - divisor * approx rule1 = 1.0 - divisor * (approx - exact) - 1.0 rule2 = new_approx - exact subrule = approx * (2 - divisor * approx) rule3 = (new_approx - subrule) - (approx - exact) * (approx - exact) * divisor if self.force_fma: new_error = FusedMultiplyAdd(divisor, approx, 1.0, specifier = FusedMultiplyAdd.SubtractNegate) rule4 = FusedMultiplyAdd(new_error, approx, approx) else: rule4 = approx + (1 - divisor * approx) * approx Attributes.unset_default_precision() # registering hints gcg.add_hint(gappa_code, rule0, rule1) gcg.add_hint(gappa_code, rule2, rule3) gcg.add_hint(gappa_code, subrule, rule4) def dividend_mult(div_approx, inv_approx, dividend, divisor, index): """ Second part of iteration to converge to dividend / divisor from inv_approx ~ 1 / divisor and div_approx ~ dividend / divisor """ # yerr = dividend - div_approx * divisor yerr = FMSN(div_approx, divisor, dividend) yerr.set_attributes(tag="yerr%d" % index, debug=debug_multi) # new_div = div_approx + yerr * inv_approx new_div = FMA(yerr, inv_approx, div_approx) new_div.set_attributes(tag="new_div%d" % index, debug=debug_multi) return new_div def compute_reduced_reciprocal(init_approx, vy, num_iteration): """ Compute the correctly rounded approximation of 1.0 / vy using @p init_approx as starting point and execution @p num_iteration Newton-Raphson iteration(s) """ current_approx = init_approx inv_iteration_list = [] # compute precision (up to accuracy) approximation of 1 / _vy for i in range(num_iteration): new_iteration = NR_Iteration(current_approx, vy, force_fma=True) #False if (i != num_iteration - 1) else True) inv_iteration_list.append(new_iteration) current_approx = new_iteration.new_approx current_approx.set_attributes(tag="iter_%d" % i, debug=debug_multi) # multiplication correction iteration # to get correctly rounded full division _vx / _vy current_approx.set_attributes(tag = "final_recp_approx", debug=debug_multi) return inv_iteration_list, current_approx def compute_reduced_division(vx, vy, recp_approx): """ From an initial accurate approximation @p recp_approx of 1.0 / vy, computes an approximation to accuracy @p accuracy of vx / vy """ # vx and vy are assumed to be in [1, 2[ # which means vx / vy is in [0.5, 2] Attributes.set_default_rounding_mode(ML_RoundToNearest) Attributes.set_default_silent(True) # multiplication correction iteration # to get correctly rounded full division _vx / _vy current_div_approx = vx * recp_approx num_dividend_mult_iteration = 1 for i in range(num_dividend_mult_iteration): current_div_approx = dividend_mult(current_div_approx, recp_approx, vx, vy, i) # last iteration yerr_last = FMSN(current_div_approx, vy, vx) #, clearprevious = True) Attributes.unset_default_rounding_mode() Attributes.unset_default_silent() last_div_approx = FMA( yerr_last, recp_approx, current_div_approx, rounding_mode=ML_GlobalRoundMode) yerr_last.set_attributes(tag = "yerr_last", debug=debug_multi) result = last_div_approx return yerr_last, result def scaling_div_result(div_approx, scaling_ex, scaling_factor_y, precision): """ Reconstruct division result from approximation of scaled inputs vx was scaled by scaling_factor_x = 2**-ex vy was scaled by scaling_factor_y = 2**-ey so real result is = div_approx * scaling_factor_y / scaling_factor_x = div_approx * 2**(-ey + ex) """ # To avoid overflow / underflow when computing 2**(-ey + ex) # the scaling could be performed in 2 steps # 1. multiplying by 2**-ey # 2. multiplying by 2**ex unscaling_ex = ExponentInsertion(scaling_ex, precision=precision) unscaled_result = div_approx * unscaling_ex * scaling_factor_y unscaled_result.set_attributes(debug=debug_multi, tag="unscaled_result") return unscaled_result def subnormalize_result(recp_approx, div_approx, ex, ey, yerr_last, precision): """ If the result of the division is subnormal, an extended approximation of division must first be obtained and then subnormalize to ensure correct rounding """ # TODO: fix extended precision determination extended_precision = { ML_Binary64: ML_DoubleDouble, ML_Binary32: ML_SingleSingle, }[precision] # we make an extra step in extended precision ext_pre_result = FMA(yerr_last, recp_approx, div_approx, precision=extended_precision, tag="ext_pre_result") # subnormalize the result according to final result exponent subnormal_pre_result_ext = SpecificOperation( ext_pre_result, ex - ey, precision=extended_precision, specifier=SpecificOperation.Subnormalize, tag="subnormal_pre_result", debug=debug_multi) subnormal_pre_result = subnormal_pre_result_ext.hi sub_scale_factor = ex - ey subnormal_result = subnormal_pre_result * ExponentInsertion(sub_scale_factor, precision=precision) return subnormal_result def bit_match(fp_optree, bit_id, likely = False, **kwords): return NotEqual(BitLogicAnd(TypeCast(fp_optree, precision = ML_Int64), 1 << bit_id), 0, likely = likely, **kwords) def extract_and_inject_sign(sign_source, sign_dest, int_precision = ML_Int64, fp_precision = ML_Binary64, **kwords): int_sign_dest = sign_dest if isinstance(sign_dest.get_precision(), ML_Fixed_Format) else TypeCast(sign_dest, precision = int_precision) return TypeCast(BitLogicOr(BitLogicAnd(TypeCast(sign_source, precision = int_precision), 1 << (sign_source.precision.bit_size - 1)), int_sign_dest), precision = fp_precision) class ML_Division(ML_FunctionBasis): function_name = "ml_div" arity = 2 def __init__(self, args=DefaultArgTemplate): # initializing base class ML_FunctionBasis.__init__(self, args=args) self.num_iter = args.num_iter @staticmethod def get_default_args(**args): """ Generate a default argument structure set specifically for the Hyperbolic Cosine """ default_div_args = { "precision": ML_Binary32, "accuracy": ML_CorrectlyRounded, "target": GenericProcessor(), "output_file": "my_div.c", "function_name": "my_div", "language": C_Code, "num_iter": 3, "passes": ["beforecodegen:expand_multi_precision"], "vector_size": 1, "arity": ML_Division.arity, } default_div_args.update(args) return DefaultArgTemplate(**default_div_args) def generate_scheme(self): # We wish to compute vx / vy vx = self.implementation.add_input_variable("x", self.precision) vy = self.implementation.add_input_variable("y", self.precision) # maximum exponent magnitude (to avoid overflow/ underflow during # intermediary computations max_exp_mag = self.precision.get_emax() - 3 int_prec = self.precision.get_integer_format() exact_ex = ExponentExtraction(vx, tag = "exact_ex", precision=int_prec, debug=debug_multi) exact_ey = ExponentExtraction(vy, tag = "exact_ey", precision=int_prec, debug=debug_multi) ex = Max(Min(exact_ex, max_exp_mag), -max_exp_mag, tag="ex") ey = Max(Min(exact_ey, max_exp_mag), -max_exp_mag, tag="ey") Attributes.set_default_rounding_mode(ML_RoundToNearest) Attributes.set_default_silent(True) # computing the inverse square root init_approx = None scaling_factor_x = ExponentInsertion(-ex, tag="sfx_ei", precision=self.precision, debug=debug_multi) scaling_factor_y = ExponentInsertion(-ey, tag="sfy_ei", precision=self.precision, debug=debug_multi) # scaled version of vx and vy scaled_vx = vx * scaling_factor_x scaled_vy = vy * scaling_factor_y scaled_vx.set_attributes(tag="scaled_vx", debug=debug_multi) scaled_vy.set_attributes(tag="scaled_vy", debug=debug_multi) # We need a first approximation to 1 / scaled_vy dummy_seed = ReciprocalSeed(EmptyOperand(precision=self.precision), precision=self.precision) if self.processor.is_supported_operation(dummy_seed): init_approx = ReciprocalSeed(scaled_vy, precision=self.precision, tag="init_approx", debug=debug_multi) else: # generate tabulated version of seed raise NotImplementedError current_approx_std = init_approx # correctly-rounded inverse computation num_iteration = self.num_iter Attributes.unset_default_rounding_mode() Attributes.unset_default_silent() # check if inputs are zeros x_zero = Test(vx, specifier=Test.IsZero, likely=False, precision=ML_Bool) y_zero = Test(vy, specifier=Test.IsZero, likely=False, precision=ML_Bool) comp_sign = Test(vx, vy, specifier=Test.CompSign, tag = "comp_sign", debug = debug_multi ) # check if divisor is NaN y_nan = Test(vy, specifier=Test.IsNaN, likely=False, precision=ML_Bool) # check if inputs are signaling NaNs x_snan = Test(vx, specifier=Test.IsSignalingNaN, likely=False, precision=ML_Bool) y_snan = Test(vy, specifier=Test.IsSignalingNaN, likely=False, precision=ML_Bool) # check if inputs are infinities x_inf = Test(vx, specifier=Test.IsInfty, likely=False, tag="x_inf", precision=ML_Bool) y_inf = Test(vy, specifier=Test.IsInfty, likely=False, tag="y_inf", debug=debug_multi, precision=ML_Bool) scheme = None gappa_vx, gappa_vy = None, None # gappa_init_approx = None # gappa_current_approx = None # initial reciprocal approximation of 1.0 / scaled_vy inv_iteration_list, recp_approx = compute_reduced_reciprocal(init_approx, scaled_vy, self.num_iter)
classes in a discrete dataset """ return dict(is_logits=self.is_logits, num_classes=self.num_classes, soft_interpolation=self.soft_interpolation) @property def subclass(self): """Specifies the primary subclass of an instance of DatasetBuilder that can be instantiated on its own using self.rebuild_dataset and typically either DiscreteDataset or ContinuousDataset """ return DiscreteDataset def __init__(self, *args, is_logits=False, num_classes=2, soft_interpolation=0.6, **kwargs): """Initialize a model-based optimization dataset and prepare that dataset by loading that dataset from disk and modifying its distribution Arguments: *args: list a list of positional arguments passed to the super class constructor of the DiscreteDataset class, which typically includes a list of x shards and y shards; see dataset_builder.py is_logits: bool a value that indicates whether the design values contained in the model-based optimization dataset have already been converted to logits and need not be converted again num_classes: int an integer representing the number of classes in the distribution that the integer data points are sampled from which cannot be None and must also be greater than 1 soft_interpolation: float a floating point hyper parameter used when converting design values from integers to a floating point representation as logits, which interpolates between a uniform and dirac distribution 1.0 = dirac, 0.0 -> uniform **kwargs: dict additional keyword arguments which are used to parameterize the data set generation process, including which shard files are used if multiple sets of data set shard files can be loaded """ # set a hyper parameter that controls the conversion from # integers to floating point logits for the dataset self.soft_interpolation = soft_interpolation self.num_classes = num_classes self.is_logits = is_logits # initialize the dataset using the method in the base class super(DiscreteDataset, self).__init__(*args, **kwargs) def batch_transform(self, x_batch, y_batch, return_x=True, return_y=True): """Apply a transformation to batches of samples from a model-based optimization data set, including sub sampling and normalization and potentially other used defined transformations Arguments: x_batch: np.ndarray a numpy array representing a batch of design values sampled from a model-based optimization data set y_batch: np.ndarray a numpy array representing a batch of prediction values sampled from a model-based optimization data set return_x: bool a boolean indicator that specifies whether the generator yields design values at every iteration; note that at least one of return_x and return_y must be set to True return_y: bool a boolean indicator that specifies whether the generator yields prediction values at every iteration; note that at least one of return_x and return_y must be set to True Returns: x_batch: np.ndarray a numpy array representing a batch of design values sampled from a model-based optimization data set y_batch: np.ndarray a numpy array representing a batch of prediction values sampled from a model-based optimization data set """ # convert the design values from integers to logits if self.is_logits and return_x \ and np.issubdtype(x_batch.dtype, np.integer): x_batch = self.to_logits(x_batch) # convert the design values from logits to integers elif not self.is_logits and return_x \ and np.issubdtype(x_batch.dtype, np.floating): x_batch = self.to_integers(x_batch) # return processed batches of designs an predictions return super(DiscreteDataset, self).batch_transform( x_batch, y_batch, return_x=return_x, return_y=return_y) def update_x_statistics(self): """A helpful function that calculates the mean and standard deviation of the designs and predictions in a model-based optimization dataset either iteratively or all at once using numpy """ # handle corner case when we need statistics but they were # not computed yet and the dataset is currently mapped to integers original_is_logits = self.is_logits self.is_logits = True super(DiscreteDataset, self).update_x_statistics() self.is_logits = original_is_logits def rebuild_dataset(self, x_shards, y_shards, visible_mask): """Initialize a model-based optimization dataset and prepare that dataset by loading that dataset from disk and modifying its distribution of designs and predictions Arguments: x_shards: Union[ np.ndarray, RemoteResource, Iterable[np.ndarray], Iterable[RemoteResource]] a single shard or a list of shards representing the design values in a model-based optimization dataset; shards are loaded lazily if RemoteResource otherwise loaded in memory immediately y_shards: Union[ np.ndarray, RemoteResource, Iterable[np.ndarray], Iterable[RemoteResource]] a single shard or a list of shards representing prediction values in a model-based optimization dataset; shards are loaded lazily if RemoteResource otherwise loaded in memory immediately visible_mask: np.ndarray a numpy array of shape [dataset_size] containing boolean entries specifying which samples are visible in the provided Iterable Returns: dataset: DatasetBuilder an instance of a data set builder subclass containing a copy of all statistics associated with this dataset """ # build the dataset using the super class method dataset = super(DiscreteDataset, self)\ .rebuild_dataset(x_shards, y_shards, visible_mask) # carry over the shape and the data type of the designs dataset.input_shape = self.input_shape dataset.input_size = self.input_size dataset.input_dtype = self.input_dtype # potentially convert the dataset from integers to logits dataset.is_logits = self.is_logits return dataset def map_normalize_x(self): """a function that standardizes the design values 'x' to have zero empirical mean and unit empirical variance in the dataset """ # check that the dataset is in a form that supports normalization if not self.is_logits: raise ValueError("cannot normalize discrete design values") # call the normalization method of the super class super(DiscreteDataset, self).map_normalize_x() def normalize_x(self, x): """a function that standardizes the design values 'x' to have zero empirical mean and unit empirical variance Arguments: x: np.ndarray a design value represented as a numpy array potentially given as a batch of designs which shall be normalized according to dataset statistics Returns: x: np.ndarray a design value represented as a numpy array potentially given as a batch of designs which has been normalized using dataset statistics """ # check that the dataset is in a form that supports normalization if not np.issubdtype(x.dtype, np.floating): raise ValueError("cannot normalize discrete design values") # call the normalization method of the super class return super(DiscreteDataset, self).normalize_x(x) def map_denormalize_x(self): """a function that un-standardizes the design values 'x' which have zero empirical mean and unit empirical variance in the dataset """ # check that the dataset is in a form that supports denormalization if not self.is_logits: raise ValueError("cannot denormalize discrete design values") # call the normalization method of the super class super(DiscreteDataset, self).map_denormalize_x() def denormalize_x(self, x): """a function that un-standardizes the design values 'x' which have zero empirical mean and unit empirical variance Arguments: x: np.ndarray a design value represented as a numpy array potentially given as a batch of designs which shall be denormalized according to dataset statistics Returns: x: np.ndarray a design value represented as a numpy array potentially given as a batch of designs which has been denormalized using dataset statistics """ # check that the dataset is in a form that supports denormalization if not np.issubdtype(x.dtype, np.floating): raise ValueError("cannot denormalize discrete design values") # call the normalization method of the super class return super(DiscreteDataset, self).denormalize_x(x) def to_logits(self, x): """A helper function that accepts design values represented as a numpy array of integers as input and converts them to floating point logits of a certain probability distribution Arguments: x: np.ndarray a numpy array containing design values represented as integers which are going to be converted into a floating point representation of a certain probability distribution Returns: x: np.ndarray a numpy array containing design values represented as floating point numbers which have be converted from integer samples of a certain probability distribution """ # check that the input format is correct if not np.issubdtype(x.dtype, np.integer): raise ValueError("cannot convert non-integers to logits") # convert the integers to one hot vectors one_hot_x = one_hot(x, self.num_classes) # build a uniform distribution to interpolate between uniform_prior = np.full_like(one_hot_x, 1 / float(self.num_classes)) # interpolate between a dirac distribution and a uniform prior soft_x = self.soft_interpolation * one_hot_x + ( 1.0 - self.soft_interpolation) * uniform_prior # convert to log probabilities x = np.log(soft_x) # remove one degree of freedom caused by \sum_i p_i = 1.0 return (x[:, :, 1:] - x[:, :, :1]).astype(np.float32) def to_integers(self, x): """A helper function that accepts design values represented as a numpy array of floating point logits as input and converts them to integer representing the max of the distribution Arguments: x: np.ndarray a numpy
import time from datetime import datetime from dateutil.tz import tzlocal import decimal D = decimal.Decimal import sys import logging from operator import itemgetter from . import (config, exceptions, bitcoin) b26_digits = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # Obsolete in Python 3.4, with enum module. BET_TYPE_NAME = {0: 'BullCFD', 1: 'BearCFD', 2: 'Equal', 3: 'NotEqual'} BET_TYPE_ID = {'BullCFD': 0, 'BearCFD': 1, 'Equal': 2, 'NotEqual': 3} def rowtracer(cursor, sql): dictionary = {} description = cursor.getdescription() for i in range(len(description)): dictionary[description[i][0]] = sql[i] return dictionary def bitcoind_check (db): # Check blocktime of last block to see if Bitcoind is running behind. block_count = bitcoin.rpc('getblockcount', []) block_hash = bitcoin.rpc('getblockhash', [block_count]) block = bitcoin.rpc('getblock', [block_hash]) time_behind = time.time() - block['time'] # How reliable is the block time?! if time_behind > 60 * 60 * 2: # Two hours. raise exceptions.BitcoindError('Bitcoind is running about {} seconds behind.'.format(round(time_behind))) def database_check (db): # Check Counterparty database to see if the counterpartyd server has caught up with Bitcoind. cursor = db.cursor() TRIES = 7 for i in range(TRIES): try: cursor.execute('''SELECT * FROM blocks ORDER BY block_index ASC''') except Exception: # TODO raise exceptions.DatabaseError('Counterparty database does not exist. Run the server command to create it.') last_block = cursor.fetchall()[-1] if last_block['block_index'] == bitcoin.rpc('getblockcount', []): cursor.close() return time.sleep(1) raise exceptions.DatabaseError('Counterparty database is behind Bitcoind. Is the counterpartyd server running?') def do_order_by(results, order_by, order_dir): if not len(results) or not order_by: #empty results, or not ordering return results assert isinstance(results, list) and isinstance(results[0], dict) if order_by not in results[0]: raise KeyError("Specified order_by property '%s' does not exist in returned data" % order_by) if order_dir not in ('asc', 'desc'): raise Exception("Invalid order_dir: '%s'. Must be 'asc' or 'desc'" % order_dir) return sorted(results, key=itemgetter(order_by), reverse=order_dir=='desc') def get_limit_to_blocks(start_block, end_block, col_names=['block_index',]): if (start_block is not None and not isinstance(start_block, int)) \ or (end_block is not None and not isinstance(end_block, int)): raise ValueError("start_block and end_block must be either an integer, or None") assert isinstance(col_names, list) and len(col_names) in [1, 2] if start_block is None and end_block is None: return '' elif len(col_names) == 1: col_name = col_names[0] if start_block and end_block: block_limit_clause = " WHERE %s >= %s AND %s <= %s" % (col_name, start_block, col_name, end_block) elif start_block: block_limit_clause = " WHERE %s >= %s" % (col_name, start_block) elif end_block: block_limit_clause = " WHERE %s <= %s" % (col_name, end_block) else: #length of 2 if start_block and end_block: block_limit_clause = " WHERE (%s >= %s OR %s >= %s) AND (%s <= %s OR %s <= %s)" % ( col_name[0], start_block, col_name[1], start_block, col_name[0], end_block, col_name[1], end_block) elif start_block: block_limit_clause = " WHERE %s >= %s OR %s >= %s" % ( col_name[0], start_block, col_name[1], start_block) elif end_block: block_limit_clause = " WHERE %s >= %s OR %s >= %s" % ( col_name[0], end_block, col_name[1], end_block) return block_limit_clause def short (string): if len(string) == 64: length = 8 elif len(string) == 128: length = 16 short = string[:length] + '...' + string[-length:] return short def isodt (epoch_time): return datetime.fromtimestamp(epoch_time, tzlocal()).isoformat() def get_time_left (unmatched, block_index=None): """order or bet""" """zero time left means it expires *this* block; that is, expire when strictly less than 0""" if not block_index: block_index = bitcoin.rpc('getblockcount', []) return unmatched['block_index'] + unmatched['expiration'] - block_index def get_order_match_time_left (matched, block_index=None): """order_match or bet_match""" if not block_index: block_index = bitcoin.rpc('getblockcount', []) tx0_time_left = matched['tx0_block_index'] + matched['tx0_expiration'] - block_index tx1_time_left = matched['tx1_block_index'] + matched['tx1_expiration'] - block_index return min(tx0_time_left, tx1_time_left) def valid_asset_name (asset_name): if asset_name in ('BTC', 'XCP'): return True if len(asset_name) < 4: return False for c in asset_name: if c not in b26_digits: return False return True def get_asset_id (asset): if not valid_asset_name(asset): raise exceptions.AssetError('Invalid asset name.') if asset == 'BTC': return 0 elif asset == 'XCP': return 1 # Convert the Base 26 string to an integer. n = 0 s = asset for c in s: n *= 26 if c not in b26_digits: raise exceptions.InvalidBase26Error('Not an uppercase ASCII character:', c) digit = b26_digits.index(c) n += digit # Minimum of four letters long. if not n > 26**3: raise exceptions.AssetError('Invalid asset name.') return n def get_asset_name (asset_id): if asset_id == 0: return 'BTC' elif asset_id == 1: return 'XCP' # Minimum of four letters long. if not asset_id > 26**3: raise exceptions.AssetError('Invalid asset name.') # Divide that integer into Base 26 string. res = [] n = asset_id while n > 0: n, r = divmod (n, 26) res.append(b26_digits[r]) asset = ''.join(res[::-1]) if not valid_asset_name(asset): raise exceptions.AssetError('Invalid asset name.') return asset def debit (db, address, asset, amount): debit_cursor = db.cursor() assert asset != 'BTC' # Never BTC. assert type(amount) == int if asset == 'BTC': raise exceptions.BalanceError('Cannot debit bitcoins from a Counterparty address!') balances = get_balances(db, address=address, asset=asset) if not len(balances) == 1: old_balance = 0 else: old_balance = balances[0]['amount'] assert type(old_balance) == int if old_balance >= amount: balance = round(old_balance - amount) balance = min(balance, config.MAX_INT) debit_cursor.execute('''UPDATE balances \ SET amount=? \ WHERE (address=? and asset=?)''', (balance, address, asset)) validity = 'Valid' else: validity = 'Invalid: insufficient funds' # Record debit. logging.debug('Debit: {} of {} from {}'.format(devise(db, amount, asset, 'output'), asset, address)) debit_cursor.execute('''INSERT INTO debits( address, asset, amount) VALUES(?,?,?)''', (address, asset, amount) ) debit_cursor.close() return validity def credit (db, address, asset, amount): credit_cursor = db.cursor() assert asset != 'BTC' # Never BTC. assert type(amount) == int balances = get_balances(db, address=address, asset=asset) if len(balances) != 1: assert balances == [] credit_cursor.execute('''INSERT INTO balances( address, asset, amount) VALUES(?,?,?)''', (address, asset, amount) ) else: old_balance = balances[0]['amount'] assert type(old_balance) == int balance = round(old_balance + amount) balance = min(balance, config.MAX_INT) credit_cursor.execute('''UPDATE balances SET amount=? \ WHERE (address=? and asset=?)''', (balance, address, asset)) # Record credit. logging.debug('Credit: {} of {} to {}'.format(devise(db, amount, asset, 'output'), asset, address)) credit_cursor.execute('''INSERT INTO credits( address, asset, amount) VALUES(?,?,?)''', (address, asset, amount) ) credit_cursor.close() def devise (db, quantity, asset, dest, divisible=None): FOUR = D(10) ** -4 EIGHT = D(10) ** -8 quantity = D(quantity) if asset in ('leverage', 'price', 'odds', 'value'): if dest == 'output': return quantity.quantize(FOUR) elif dest == 'input': # Hackish if asset == 'leverage': return round(quantity) else: return float(quantity) if asset in ('fee_multiplier',): return D(quantity / D(1e8)).quantize(FOUR) if divisible == None: if asset in ('BTC', 'XCP'): divisible = True else: issuances = get_issuances(db, validity='Valid', asset=asset) if not issuances: raise exceptions.AssetError('No such asset: {}'.format(asset)) divisible = issuances[0]['divisible'] if divisible: if dest == 'output': quantity = D(quantity / config.UNIT).quantize(EIGHT) if quantity == quantity.to_integral(): return str(float(quantity)) # For divisible assets, display the decimal point. else: return str(quantity.quantize(EIGHT).normalize()) elif dest == 'input': quantity = D(quantity * config.UNIT).quantize(EIGHT) if quantity == quantity.to_integral(): return int(quantity) else: raise exceptions.QuantityError('Divisible assets have only eight decimal places of precision.') else: return quantity.quantize(EIGHT) else: if quantity != round(quantity): raise exceptions.QuantityError('Fractional quantities of indivisible assets.') return round(quantity) def get_debits (db, address=None, asset=None, order_by=None, order_dir='asc'): """This does not include BTC.""" cursor = db.cursor() cursor.execute('''SELECT * FROM debits''') debits = [] for debit in cursor.fetchall(): if address and debit['address'] != address: continue if asset != None and debit['asset'] != asset: continue debits.append(dict(debit)) cursor.close() return do_order_by(debits, order_by, order_dir) def get_credits (db, address=None, asset=None, order_by=None, order_dir='asc'): """This does not include BTC.""" cursor = db.cursor() cursor.execute('''SELECT * FROM credits''') credits = [] for credit in cursor.fetchall(): if address and credit['address'] != address: continue if asset != None and credit['asset'] != asset: continue credits.append(dict(credit)) cursor.close() return do_order_by(credits, order_by, order_dir) def get_balances (db, address=None, asset=None, order_by=None, order_dir='asc'): """This should never be used to check Bitcoin balances.""" cursor = db.cursor() cursor.execute('''SELECT * FROM balances''') balances = [] for balance in cursor.fetchall(): if address and balance['address'] != address: continue if asset != None and balance['asset'] != asset: continue if asset == 'BTC': raise Exception balances.append(dict(balance)) cursor.close() return do_order_by(balances, order_by, order_dir) def get_sends (db, validity=None, source=None, destination=None, order_by='tx_index', order_dir='asc', start_block=None, end_block=None): cursor = db.cursor() cursor.execute('''SELECT * FROM sends%s''' % get_limit_to_blocks(start_block, end_block)) sends = [] for send in cursor.fetchall(): if validity and send['validity'] != validity: continue if source and send['source'] != source: continue if destination and send['destination'] != destination: continue sends.append(dict(send)) cursor.close() return do_order_by(sends, order_by, order_dir) def get_orders (db,
import trio from typing import Type, List as PyList, Set, Dict from typing import Optional from itertools import zip_longest from e2db.models import ( Fork, BeaconState, Validator, ValidatorStatus, ValidatorEpochBalance, ValidatorOddBalance, BeaconBlockBody, BeaconBlock, SignedBeaconBlock, Eth1Data, Eth1BlockVote, ProposerSlashing, ProposerSlashingInclusion, AttesterSlashing, AttesterSlashingInclusion, AttestationData, IndexedAttestation, PendingAttestation, DepositData, Deposit, DepositInclusion, SignedVoluntaryExit, SignedVoluntaryExitInclusion, Checkpoint, format_epoch, BitsAttestation, CanonBeaconBlock, CanonBeaconState, CanonBeaconEpoch, StakingStats, Base ) from eth2spec.phase0 import spec import eth2fastspec from sqlalchemy_mate import ExtendedBase from sqlalchemy.orm import Session from sqlalchemy.dialects import postgresql import traceback def upsert_all(session: Session, table: Type[Base], data: PyList[ExtendedBase]): if session.bind.dialect.name == "postgresql": # Special postgres dialect upsert, to avoid talking to the database for every value individually values = list(map(lambda x: x.to_dict(), data)) insert_stmt = postgresql.insert(table.__table__).values(values) pk = insert_stmt.table.primary_key update_columns = {col.name: col for col in insert_stmt.excluded if col.name not in pk} update_stmt = insert_stmt.on_conflict_do_update( index_elements=pk, set_=update_columns, ) session.execute(update_stmt) else: # slower, but for sqlite it's fast anyway table: ExtendedBase # noinspection PyTypeChecker table.upsert_all(session, data) def upsert(session: Session, inst: ExtendedBase): # noinspection PyTypeChecker upsert_all(session, inst.__class__, [inst]) def store_state(session: Session, state: spec.BeaconState): state_root = state.hash_tree_root() eth1_data = state.eth1_data eth1_data_root = eth1_data.hash_tree_root() upsert(session, Eth1Data( data_root=eth1_data_root, deposit_root=eth1_data.deposit_root, deposit_count=eth1_data.deposit_count, block_hash=eth1_data.block_hash, )) fork = state.fork upsert(session, Fork( current_version=fork.current_version, previous_version=fork.previous_version, epoch=fork.epoch, )) prev_just_ch = state.previous_justified_checkpoint prev_just_ch_root = prev_just_ch.hash_tree_root() upsert(session, Checkpoint( checkpoint_root=prev_just_ch_root, epoch=prev_just_ch.epoch, block_root=prev_just_ch.root, )) curr_just_ch = state.current_justified_checkpoint curr_just_ch_root = curr_just_ch.hash_tree_root() upsert(session, Checkpoint( checkpoint_root=curr_just_ch_root, epoch=curr_just_ch.epoch, block_root=curr_just_ch.root, )) finalized_ch = state.finalized_checkpoint finalized_ch_root = finalized_ch.hash_tree_root() upsert(session, Checkpoint( checkpoint_root=finalized_ch_root, epoch=finalized_ch.epoch, block_root=finalized_ch.root, )) header = state.latest_block_header.copy() if header.state_root == spec.Bytes32(): header.state_root = state_root header_root = header.hash_tree_root() upsert(session, BeaconState( state_root=state_root, latest_block_root=header_root, slot=state.slot, eth1_data_root=eth1_data_root, fork=fork.current_version, eth1_deposit_index=state.eth1_deposit_index, validators_root=state.validators.hash_tree_root(), balances=state.hash_tree_root(), total_slashings=spec.Gwei(sum(state.slashings.readonly_iter())), prev_epoch_att_count=len(state.previous_epoch_attestations), curr_epoch_att_count=len(state.current_epoch_attestations), justification_bits=''.join('1' if state.justification_bits[i] else '0' for i in range(spec.JUSTIFICATION_BITS_LENGTH)), prev_just_checkpoint=prev_just_ch_root, curr_just_checkpoint=curr_just_ch_root, finalized_checkpoint=finalized_ch_root, )) def store_validator_all(session: Session, curr_state: spec.BeaconState, is_canon: bool): header = curr_state.latest_block_header.copy() header.state_root = curr_state.hash_tree_root() block_root = header.hash_tree_root() slot = curr_state.slot result_validators = [] result_validator_statuses = [] result_balances = [] epoch = spec.compute_epoch_at_slot(slot) for i, (v, b) in enumerate(zip(curr_state.validators.readonly_iter(), curr_state.balances.readonly_iter())): # Create new validator result_validators.append(Validator( intro_block_root=block_root, validator_index=i, intro_slot=slot, pubkey=v.pubkey, withdrawal_credentials=v.withdrawal_credentials, )) # Update validator status if it's a new or changed validator result_validator_statuses.append(ValidatorStatus( intro_block_root=block_root, validator_index=i, intro_slot=slot, effective_balance=v.effective_balance, slashed=bool(v.slashed), activation_eligibility_epoch=format_epoch(v.activation_eligibility_epoch), activation_epoch=format_epoch(v.activation_epoch), exit_epoch=format_epoch(v.exit_epoch), withdrawable_epoch=format_epoch(v.withdrawable_epoch), )) # And its balance if canon and epoch if is_canon: result_balances.append(ValidatorEpochBalance( epoch=epoch, validator_index=i, balance=b, eff_balance=v.effective_balance, )) if len(result_validators) > 0: upsert_all(session, Validator, result_validators) if len(result_validator_statuses) > 0: upsert_all(session, ValidatorStatus, result_validator_statuses) if len(result_balances): upsert_all(session, ValidatorEpochBalance, result_balances) def store_validator_diff(session: Session, prev_state: spec.BeaconState, curr_state: spec.BeaconState, is_canon: bool): header = curr_state.latest_block_header.copy() header.state_root = curr_state.hash_tree_root() block_root = header.hash_tree_root() slot = curr_state.slot if prev_state.validators.hash_tree_root() != curr_state.validators.hash_tree_root(): prev: Optional[spec.Validator] curr: Optional[spec.Validator] print("checking validators diff") # First put them in lists to avoid len() lookups reducing it to O(n^2) performance. prev_vals = list(prev_state.validators.readonly_iter()) curr_vals = list(curr_state.validators.readonly_iter()) result_validators = [] result_validator_statuses = [] for i, (prev, curr) in enumerate(zip_longest(prev_vals, curr_vals)): assert curr is not None if prev is None: # Create new validator result_validators.append(Validator( intro_block_root=block_root, validator_index=i, intro_slot=slot, pubkey=curr.pubkey, withdrawal_credentials=curr.withdrawal_credentials, )) if prev is None or prev != curr: # Update validator status if it's a new or changed validator result_validator_statuses.append(ValidatorStatus( intro_block_root=block_root, validator_index=i, intro_slot=slot, effective_balance=curr.effective_balance, slashed=bool(curr.slashed), activation_eligibility_epoch=format_epoch(curr.activation_eligibility_epoch), activation_epoch=format_epoch(curr.activation_epoch), exit_epoch=format_epoch(curr.exit_epoch), withdrawable_epoch=format_epoch(curr.withdrawable_epoch), )) if len(result_validators) > 0: print("Upserting validators") upsert_all(session, Validator, result_validators) if len(result_validator_statuses) > 0: print("Upserting validator statuses") upsert_all(session, ValidatorStatus, result_validator_statuses) if is_canon: if slot % spec.SLOTS_PER_EPOCH == 0: epoch = spec.compute_epoch_at_slot(slot) curr_bal: spec.Gwei curr_val: spec.Validator print("checking epoch balances diff") curr_bals = list(curr_state.balances.readonly_iter()) curr_vals = list(curr_state.validators.readonly_iter()) result_balances = [] for i, (curr_bal, curr_val) in enumerate(zip(curr_bals, curr_vals)): result_balances.append(ValidatorEpochBalance( epoch=epoch, validator_index=i, balance=curr_bal, eff_balance=curr_val.effective_balance, )) upsert_all(session, ValidatorEpochBalance, result_balances) elif prev_state.balances.hash_tree_root() != curr_state.balances.hash_tree_root(): prev_bals = list(prev_state.balances.readonly_iter()) curr_bals = list(curr_state.balances.readonly_iter()) result_balances = [] for i, (prev, curr) in enumerate(zip_longest(prev_bals, curr_bals)): if prev is None or prev != curr: # Only track changes, and key by block-root, to be able to reorg without overwrite/deletes. result_balances.append(ValidatorOddBalance( intro_block_root=block_root, intro_slot=slot, validator_index=i, balance=curr, )) if len(result_balances) > 0: print("Upserting odd validator balances") upsert_all(session, ValidatorOddBalance, result_balances) def store_staking_stats(session: Session, post_state: spec.BeaconState, post_epc: eth2fastspec.EpochsContext): # Calculate the staking data as if it were an epoch process pre-computation epoch_process = eth2fastspec.prepare_epoch_process_state(post_epc, post_state) # Compute the live stake stats from the epoch process statuses. # The prev epoch was already computed (since it's used for the rewards/penalties part of the transition), # but we want live stats as well. curr_source_unsl_stake, curr_target_unsl_stake, curr_head_unsl_stake = 0, 0, 0 for status in epoch_process.statuses: if eth2fastspec.has_markers(status.flags, eth2fastspec.FLAG_PREV_SOURCE_ATTESTER | eth2fastspec.FLAG_UNSLASHED): curr_source_unsl_stake += status.validator.effective_balance if eth2fastspec.has_markers(status.flags, eth2fastspec.FLAG_PREV_TARGET_ATTESTER): curr_target_unsl_stake += status.validator.effective_balance if eth2fastspec.has_markers(status.flags, eth2fastspec.FLAG_PREV_HEAD_ATTESTER): curr_head_unsl_stake += status.validator.effective_balance upsert(session, StakingStats( state_root=post_state.hash_tree_root(), slot=post_state.slot, total_active_stake=epoch_process.total_active_stake, prev_unslashed_source_stake=epoch_process.prev_epoch_unslashed_stake.source_stake, prev_unslashed_target_stake=epoch_process.prev_epoch_unslashed_stake.target_stake, prev_unslashed_head_stake=epoch_process.prev_epoch_unslashed_stake.head_stake, curr_unslashed_source_stake=curr_source_unsl_stake, curr_unslashed_target_stake=curr_target_unsl_stake, curr_unslashed_head_stake=curr_head_unsl_stake, prev_epoch_target_stake=epoch_process.prev_epoch_target_stake, curr_epoch_target_stake=epoch_process.curr_epoch_target_stake, active_validators=epoch_process.active_validators, )) def store_block(session: Session, post_state: spec.BeaconState, signed_block: spec.SignedBeaconBlock): block = signed_block.message block_root = block.hash_tree_root() body = block.body # Eth1 eth1_data = body.eth1_data eth1_data_root = eth1_data.hash_tree_root() upsert(session, Eth1Data( data_root=eth1_data_root, deposit_root=eth1_data.deposit_root, deposit_count=eth1_data.deposit_count, block_hash=eth1_data.block_hash, )) upsert(session, Eth1BlockVote( beacon_block_root=block_root, slot=block.slot, eth1_data_root=eth1_data_root, proposer_index=block.proposer_index, )) def handle_header(block: spec.BeaconBlockHeader): upsert(session, BeaconBlock( block_root=block.hash_tree_root(), slot=block.slot, proposer_index=block.proposer_index, parent_root=block.parent_root, state_root=block.state_root, body_root=block.body_root, )) def handle_signed_header(signed_block: spec.SignedBeaconBlockHeader): upsert(session, SignedBeaconBlock( root=signed_block.hash_tree_root(), signature=signed_block.signature, block_root=signed_block.message.hash_tree_root(), )) # Ugly but effective: collect operations, ensuring they are unique first, and then upsert as batch. # Proposer slashings proposer_slashing: spec.ProposerSlashing result_prop_slashing = [] result_prop_slashing_inc = [] for i, proposer_slashing in enumerate(body.proposer_slashings.readonly_iter()): handle_header(proposer_slashing.signed_header_1.message) handle_header(proposer_slashing.signed_header_2.message) handle_signed_header(proposer_slashing.signed_header_1) handle_signed_header(proposer_slashing.signed_header_2) result_prop_slashing.append(ProposerSlashing( root=proposer_slashing.hash_tree_root(), signed_header_1=proposer_slashing.signed_header_1.hash_tree_root(), signed_header_2=proposer_slashing.signed_header_2.hash_tree_root(), )) result_prop_slashing_inc.append(ProposerSlashingInclusion( intro_block_root=block_root, intro_index=i, root=proposer_slashing.hash_tree_root(), )) if len(result_prop_slashing) > 0: upsert_all(session, ProposerSlashing, result_prop_slashing) if len(result_prop_slashing_inc) > 0: upsert_all(session, ProposerSlashingInclusion, result_prop_slashing_inc) result_checkpoints: Set[spec.Checkpoint] = set() result_att_datas: Set[spec.AttestationData] = set() def handle_att_data(data: spec.AttestationData): result_checkpoints.add(data.source) result_checkpoints.add(data.target) result_att_datas.add(data) result_indexed_atts: Set[spec.IndexedAttestation] = set() bits_to_indexed: Dict[spec.Root, spec.Root] = dict() def handle_indexed_att(indexed: spec.IndexedAttestation): result_indexed_atts.add(indexed) # Attester slashings attester_slashing: spec.AttesterSlashing result_att_slashing = [] result_att_slashing_inc = [] for i, attester_slashing in enumerate(body.attester_slashings.readonly_iter()): handle_att_data(attester_slashing.attestation_1.data) handle_att_data(attester_slashing.attestation_2.data) handle_indexed_att(attester_slashing.attestation_1) handle_indexed_att(attester_slashing.attestation_2) result_att_slashing.append(AttesterSlashing( root=attester_slashing.hash_tree_root(), attestation_1=attester_slashing.attestation_1.hash_tree_root(), attestation_2=attester_slashing.attestation_2.hash_tree_root(), )) result_att_slashing_inc.append(AttesterSlashingInclusion( intro_block_root=block_root, intro_index=i, root=attester_slashing.hash_tree_root(), )) # Attestations attestation: spec.Attestation result_pending_atts: PyList[spec.IndexedAttestation] = [] for i, attestation in enumerate(body.attestations.readonly_iter()): data = attestation.data handle_att_data(data) indexed = spec.get_indexed_attestation(post_state, attestation) bits_to_indexed[spec.Root(attestation.hash_tree_root())] = spec.Root(indexed.hash_tree_root()) handle_indexed_att(indexed) result_pending_atts.append(indexed) if len(result_checkpoints) > 0: upsert_all(session, Checkpoint, [ Checkpoint( checkpoint_root=ch.hash_tree_root(), epoch=ch.epoch, block_root=ch.root, ) for ch in result_checkpoints ]) if len(result_att_datas) > 0: upsert_all(session, AttestationData, [ AttestationData( att_data_root=data.hash_tree_root(), slot=data.slot, index=data.index, beacon_block_root=data.beacon_block_root, source=data.source.hash_tree_root(), target=data.target.hash_tree_root(), ) for data in result_att_datas ]) if len(bits_to_indexed) > 0: upsert_all(session, BitsAttestation, [ BitsAttestation( bits_attestation_root=attestation_root, indexed_attestation_root=indexed_root, ) for attestation_root, indexed_root in bits_to_indexed.items() ]) if len(result_indexed_atts) > 0: upsert_all(session, IndexedAttestation, [ IndexedAttestation( indexed_attestation_root=indexed.hash_tree_root(), attesting_indices=', '.join(map(str, indexed.attesting_indices.readonly_iter())), data=indexed.data.hash_tree_root(), signature=indexed.signature, ) for indexed in result_indexed_atts ]) if len(result_pending_atts) > 0: upsert_all(session, PendingAttestation, [ PendingAttestation( intro_block_root=block_root, intro_index=i, indexed_att=indexed.hash_tree_root(), inclusion_delay=block.slot - indexed.data.slot, proposer_index=block.proposer_index, ) for i, indexed in enumerate(result_pending_atts) ]) # After inserting the attestations, do the attester slashings (attestations may be foreign key) if len(result_att_slashing) > 0: upsert_all(session, AttesterSlashing, result_att_slashing) if len(result_att_slashing_inc) > 0: upsert_all(session, AttesterSlashingInclusion, result_att_slashing_inc) # Deposits deposit: spec.Deposit pre_dep_count = post_state.eth1_deposit_index - len(body.deposits) result_dep_datas: Set[spec.DepositData] = set() result_deps: PyList[Deposit] = [] result_dep_incl: PyList[DepositInclusion] = [] for i, deposit in enumerate(body.deposits.readonly_iter()): data = deposit.data dep_data_root = data.hash_tree_root() result_dep_datas.add(data) result_deps.append(Deposit( root=deposit.hash_tree_root(), deposit_index=pre_dep_count + i, dep_tree_root=post_state.eth1_data.deposit_root, data=dep_data_root, )) result_dep_incl.append(DepositInclusion( intro_block_root=block_root, intro_index=i, root=deposit.hash_tree_root(), )) if len(result_dep_datas) > 0: upsert_all(session, DepositData, [ DepositData( data_root=data.hash_tree_root(), pubkey=data.pubkey, withdrawal_credentials=data.withdrawal_credentials, amount=data.amount, signature=data.signature, ) for data in result_dep_datas ]) if len(result_deps) > 0: upsert_all(session, Deposit, result_deps) if len(result_dep_incl) > 0: upsert_all(session, DepositInclusion, result_dep_incl) # Voluntary Exits sig_vol_exit: spec.SignedVoluntaryExit vol_exits = list(body.voluntary_exits.readonly_iter()) if len(vol_exits) > 0: upsert_all(session, SignedVoluntaryExit, [ SignedVoluntaryExit( root=sig_vol_exit.hash_tree_root(), epoch=sig_vol_exit.message.epoch, validator_index=sig_vol_exit.message.validator_index, signature=sig_vol_exit.signature, ) for sig_vol_exit in vol_exits ]) upsert_all(session, SignedVoluntaryExitInclusion, [ SignedVoluntaryExitInclusion( intro_block_root=block_root, intro_index=i, root=sig_vol_exit.hash_tree_root(), ) for i, sig_vol_exit in enumerate(vol_exits) ]) # The body upsert(session, BeaconBlockBody( body_root=body.hash_tree_root(), randao_reveal=body.randao_reveal, eth1_data_root=body.eth1_data.hash_tree_root(), graffiti=body.graffiti, # Operations proposer_slashings_count=len(body.proposer_slashings), attester_slashings_count=len(body.attester_slashings), attestations_count=len(body.attestations), deposits_count=len(body.deposits), voluntary_exits_count=len(body.voluntary_exits), )) # The block itself upsert(session, BeaconBlock( block_root=block_root, slot=block.slot, proposer_index=block.proposer_index, parent_root=block.parent_root, state_root=block.state_root, body_root=body.hash_tree_root(), )) # Block signature upsert(session, SignedBeaconBlock( root=signed_block.hash_tree_root(), signature=signed_block.signature, block_root=block_root, )) def calc_beacon_proposer_index(state: BeaconState, slot: spec.Slot) -> spec.ValidatorIndex: epoch = spec.compute_epoch_at_slot(slot) seed = spec.hash(spec.get_seed(state, epoch, spec.DOMAIN_BEACON_PROPOSER) + spec.int_to_bytes(state.slot, length=8)) indices = spec.get_active_validator_indices(state, epoch) return spec.compute_proposer_index(state, indices, seed) def store_canon_chain(session: Session, post: spec.BeaconState, signed_block: Optional[spec.SignedBeaconBlock]): proposer_index: spec.ValidatorIndex if signed_block is not None: block = signed_block.message assert post.slot == block.slot upsert(session, CanonBeaconBlock( slot=block.slot, block_root=block.hash_tree_root(), )) proposer_index = block.proposer_index else: proposer_index = calc_beacon_proposer_index(post, post.slot) upsert(session, CanonBeaconState( slot=post.slot, state_root=post.hash_tree_root(), proposer_index=proposer_index, empty_slot=(signed_block is None) )) if post.slot % spec.SLOTS_PER_EPOCH == 0: upsert(session, CanonBeaconEpoch( state_root=post.hash_tree_root(), epoch=spec.compute_epoch_at_slot(post.slot), )) async def ev_eth2_state_loop(session: Session, recv: trio.MemoryReceiveChannel): prev_state: spec.BeaconState state: spec.BeaconState block: Optional[spec.SignedBeaconBlock] epoch_process: Optional[eth2fastspec.EpochProcess] async for (prev_state, post_state, block, prev_epc, post_epc,
self._margin_unsup)] if self.monitor_unsup_outputs: im = np.hstack( (montage(self.fake_out_unsup[0].asnumpy()[:9, 0]), montage(self.fake_out_unsup_aug[0].asnumpy()[:9, 0]), montage( np.abs( self.fake_out_unsup[0].asnumpy()[:9, 0] - self.fake_out_unsup_aug[0].asnumpy()[:9, 0])))) [plt.imsave('%s/ep%04d_%02d_%d' % ( self.result_folder_figure_train_unsup, self.current_epoch, self.current_it, i), im) for i in range(1)] def optimize_D(self): if hasattr(self, 'A_rp_unsup'): # choose unsup data if avail. tmp_input = self.A_rp_unsup else: tmp_input = self.A_rp fake_out = [self.netG(A_rp) for A_rp in tmp_input] fake_out = [fo[0] if self.lambda_aux > 0 else fo for fo in fake_out] if hasattr(self, 'wp_unsup'): tmp_wp = self.wp_unsup else: tmp_wp = self.wp fake_out = [nd.where(wp, fo, wp - 1) for wp, fo in zip(tmp_wp, fake_out)] fake_concat = [self.image_pool.query(nd.concat(A_rp, fo, dim=1)) for A_rp, fo in zip(self.A_rp, fake_out)] with autograd.record(): # Train with fake image # Use image pooling to utilize history images output = [self.netD(fc) for fc in fake_concat] fake_label = [nd.zeros_like(op) for op in output] err_DB_fake = [self.criterionGAN(op, fl) for op, fl in zip(output, fake_label)] [self.metric.update([fl, ], [op, ]) for fl, op in zip(fake_label, output)] # self.metric.update([fake_label[0], ], [output[0], ]) # Train with real image real_concat = [nd.concat(A_rp, _C, dim=1) for A_rp, _C in zip(self.A_rp, self.C)] output = [self.netD(rc) for rc in real_concat] real_label = [nd.ones_like(op) for op in output] err_DB_real = [self.criterionGAN(op, rl) for op, rl in zip(output, real_label)] self.err_DB = [(edb + edf) * 0.5 for edb, edf in zip(err_DB_real, err_DB_fake)] [self.metric.update([rl, ], [op, ]) for rl, op in zip(real_label, output)] for err_DB in self.err_DB: err_DB.backward() # with amp.scale_loss(self.err_DB, self.trainerD) as scaled_loss: # autograd.backward(scaled_loss) self.trainerD.step(self.batch_size) def create_net(self, upscale_factor=1): from mxnet.gluon import nn import mxnet.gluon.contrib.nn as contrib_nn def conv_factory(opts, num_filters, kernel_size, stride=1, group=1): """A convenience function for convolution with BatchNorm & activation""" pad = int((kernel_size - 1) / 2) out = nn.HybridSequential() out.add(nn.BatchNorm()) if opts.activation == 'leaky': out.add(nn.LeakyReLU(opts.alpha)) else: out.add(nn.Activation(opts.activation)) out.add(nn.Conv2D(channels=num_filters, kernel_size=(kernel_size, kernel_size), strides=(stride, stride), use_bias=opts.use_bias, padding=(pad, pad), groups=group)) return out class Options: """""" def __init__(self): super(Options, self).__init__() self.activation = 'relu' self.use_bias = False class SuperResolutionNet(gluon.HybridBlock): def __init__(self, upscale_factor, opts): super(SuperResolutionNet, self).__init__() with self.name_scope(): self.conv1 = conv_factory(opts, num_filters=64, kernel_size=5, stride=1) self.conv2 = conv_factory(opts, num_filters=64, kernel_size=3, stride=1) self.conv3 = conv_factory(opts, num_filters=32, kernel_size=3, stride=1) self.conv4 = conv_factory(opts, num_filters=upscale_factor ** 2, kernel_size=3, stride=1) self.pxshuf = contrib_nn.PixelShuffle2D(upscale_factor) def hybrid_forward(self, F, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = F.tanh(self.pxshuf(x)) return x return SuperResolutionNet(upscale_factor, opts=Options()) def optimize_G(self): """Optimize generator""" if np.array( [self.lambda_C, self.lambda_D, self.lambda_consistency, self.lambda_unsup, self.lambda_embedding_unsup, self.lambda0, self.lambda_CL, self.lambda_aux]).sum() == 0: # No extra loss with autograd.record(): self.fake_out = [self.netG(A_rp) for A_rp in self.A_rp] self.loss_true_density_train = [self.trueDensity_train(fake_out, C, m, margin) for C, fake_out, m, margin in zip(self.C, self.fake_out, self.m, self._margin)] self.loss_G = self.loss_true_density_train [loss_G.backward() for loss_G in self.loss_G] else: with autograd.record(): self.fake_out = [self.netG(A_rp) for A_rp in self.A_rp] # Supervised learning self.var0 = [nd.square(coef) for coef in self.netG.coef_G._data] self.loss_true_density_train = [self.trueDensity_train(fake_out, C, m, margin) for fake_out, C, m, margin in zip(self.fake_out, self.C, self.m, self._margin)] self.loss_G = [((1 / var) * l + nd.log(var)) for l, var in zip(self.loss_true_density_train, self.var0)] ############################### Consistency Loss ############################### if self.lambda_consistency > 0: fake_out_T2 = [self.netG(A_rp) for A_rp in [nd.concat(A_rp[:, 0:1], nd.zeros_like(A_rp[:, 0:1]), dim=1) for A_rp in self.A_rp]] # masked out ADC channel fake_out_ADC = [self.netG(A_rp) for A_rp in [nd.concat(nd.zeros_like(A_rp[:, 1:2]), A_rp[:, 1:2], dim=1) for A_rp in self.A_rp]] # masked out T2 channel self.loss_consistency_train = [self.density_corr(_fake_out_T2, _fake_out_ADC, wp) for _fake_out_T2, _fake_out_ADC, wp in zip(fake_out_T2, fake_out_ADC, self.wp)] self.var1 = [nd.square(coef) for coef in self.netG.coef_consistency._data] self.loss_G = [l0 + ((1 / var) * l1 + nd.log(var)) for l0, l1, var in zip(self.loss_G, self.loss_consistency_train, self.var1)] ############################### Correlation Loss ############################### if self.lambda_C > 0: self.var2 = [nd.square(coef) for coef in self.netG.coef_C._data] self.loss_corr_train = [self.density_corr(fake_out, C, m) for C, fake_out, m in zip(self.C, self.fake_out, self.m)] self.loss_G = [l0 + ((1 / var) * l1 + nd.log(var)) for l0, l1, var in zip(self.loss_G, self.loss_corr_train, self.var2)] ############################### Unsupervised learning ############################### if self.lambda_unsup > 0: self.compare_unsup() self.var3 = [nd.square(coef) for coef in self.netG.coef_unsup._data] self.loss_G = [l0 + ((1 / var) * l1 + nd.log(var)) for l0, l1, var in zip(self.loss_G, self.unsup_loss, self.var3)] ############################## Feature Comparision ############################### if self.lambda_D > 0: self.var4 = [nd.square(coef) for coef in self.netG.coef_D._data] self.loss_features = [self.feature_difference( nd.L2Normalization(self.D_features(nd.where(m, C, m - 1))), nd.L2Normalization(self.D_features(nd.where(m, fake_out, m - 1))), nd.ones((C.shape[0]), ctx=C.context) ).mean() for m, C, fake_out in zip(self.m, self.C, self.fake_out)] self.loss_G = [l0 + ((1 / var) * l1 * .1 + nd.log(var)) for l0, l1, var in zip(self.loss_G, self.loss_features, self.var4)] ############################### Classification Loss ############################### if self.lambda_CL > 0: qml_pred = [-nd.abs((fake_out - qml) * m) for fake_out, qml, m in zip(self.fake_out, self.qml, self.m)] self.var5 = [nd.square(coef) for coef in self.netG.coef_CL._data] self.loss_CL = [self.cross_entropy(_qml_pred, qml_gt[:, 0], m) for _qml_pred, qml_gt, m in zip(qml_pred, self.qml_gt, self.m)] self.loss_G = [l0 + ((1 / var) * l1 + nd.log(var)) for l0, l1, var in zip(self.loss_G, self.loss_CL, self.var5)] ############################### Unsupervised embedding learning ############################### if self.lambda_embedding_unsup > 0: self.compare_embedding_unsup() self.var6 = [nd.square(coef) for coef in self.netG.coef_embedding_unsup._data] self.loss_G = [l0 + ((1 / var) * l1 + nd.log(var)) for l0, l1, var in zip(self.loss_G, self.unsup_embedding_loss, self.var6)] ################################### GAN ################################### # if self.lambda0 > 0: # fake_concat = [nd.concat(A_rp, fake_out, dim=1) for A_rp, fake_out in zip(self.A_rp, self.fake_out)] # output = [self.netD(_fake_concat) for _fake_concat in fake_concat] # self.wp_rz = [self.resize_wp(wp, _output) for wp, _output in zip(self.wp, output)] # real_label = [nd.ones(op.shape, ctx=_ctx) for op, _ctx in zip(output, self.ctx)] # self.realistic_loss = [self.criterionGAN(_output, _real_label, wp_rz) for _output, _real_label, wp_rz in # zip(output, real_label, self.wp_rz)] # else: # self.realistic_loss = [nd.zeros_like(l) for l in self.loss_true_density_train] # # self.loss_G = [realistic_loss * self.lambda0 + loss_true_density_train * self.lambda1 for # realistic_loss, loss_true_density_train in # zip(self.realistic_loss, self.loss_true_density_train)] ############################### Aux Out ############################### # if self.lambda_aux > 0: # aux_lbs = self.A_unsup if self.reconstruct_input else self.wp # self.aux_loss = [self.aux_fn(aux_out, aux_lb, wp_unsup) for aux_out, aux_lb, wp_unsup in # zip(self.aux_out, aux_lbs, self.wp_unsup)] # else: # self.aux_loss = [nd.zeros_like(loss_G) for loss_G in self.loss_G] # Final generator loss [loss_G.backward() for loss_G in self.loss_G] self.trainerG.step(1, ignore_stale_grad=False) if self.use_l_coefs: self.trainer_coefs.step(1, ignore_stale_grad=False) [self.save_training_outputs(self.A_rp[i], self.fake_out[i], self.C[i], self.m[i], prefix='', suffix='_%02d_%d' % (self.current_it, i)) if self.monitor_training_outputs else None for i in range(len(self.ctx))] def update_running_loss(self, first_iter=False, num_batch=None): """Compute running loss""" if num_batch is None: if first_iter: loss_fields = [field for field in self.__dict__.keys() if ('loss' in field) or ('err' in field)] self.running_loss_fields = ['running_' + field for field in loss_fields] [self.__setattr__(field, 0.) for field in self.running_loss_fields] for loss_field in self.running_loss_fields: _loss = nd.concatenate(list(self.__getattribute__(loss_field.replace('running_', '')))) self.__setattr__(loss_field, (self.__getattribute__(loss_field) + _loss.mean().asscalar())) else: for loss_field in self.running_loss_fields: self.__setattr__(loss_field, (self.__getattribute__(loss_field) / num_batch)) def update_mxboard(self, sw, epoch, val_data=None): """ SHOW STATS AND IMAGES ON TENSORBOARD. THIS SHOULD BE RUN AFTER RUnNNING UPDATE_RUNNING_LOSS """ for loss_field in self.running_loss_fields: _loss = self.__getattribute__(loss_field) _loss = _loss.mean().asscalar() if isinstance(_loss, nd.NDArray) else _loss.mean() if 'loss_true_density' in loss_field: # True density sw.add_scalar('loss/true_density_loss', _loss, global_step=epoch) else: # GAN loss loss_type = loss_field.split('_')[0] + '_' + \ loss_field.split('_')[1] + '_' + \ loss_field.split('_')[2] # sw.add_scalar('loss/' + loss_type, {loss_field: _loss}, global_step=epoch) sw.add_scalar('loss/' + loss_type, _loss, global_step=epoch) if hasattr(self, 'running_loss_true_density_val'): sw.add_scalar('loss/true_density_loss_val', self.running_loss_true_density_val, global_step=epoch) metric_list = metrics.update_mxboard_metric_multi_maps( sw, data=val_data, global_step=epoch, metric_names=['r_whole', 'l1_whole', 'ssim_whole', ], prefix='validation_', num_input_channels=self.n_A_channel_idx, c_thr=self.C_thr, density_range=self.density_range, root=self.root) # 'r', 'l1', 'ssim', 'nmi', # if hasattr(self, 'current_margin'): # if self.current_it >= (self.total_iter * .5): # if metric_list['l1_whole_EPI'] >= .07: # exit() sw.add_scalar('loss_margin', self.current_margin, global_step=epoch) ####################################### # Map input data to 0 - 1 for c in range(val_data[0].shape[1]): val_data[0][:, c] = (val_data[0][:, c] - val_data[0][:, c].min()) / ( val_data[0][:, c].max() - val_data[0][:, c].min()) * val_data[4][:, 0] """ MULTIPLE CHANNELS OF EACH IMAGE ARE SPLIT INTO SEPARATE IMAGES """ _val_data = [] for i in range(len(val_data)): for j in range(val_data[i].shape[1]): _val_data.append(val_data[i][:, j:j + 1]) ####################################### """ NORM TO 0-1 RANGE IF NECESSARY """ if self.to_11: # Normalize image from [-1, 1] to [0, 1] for i in range(self.n_A_channel_idx, self.n_A_channel_idx + self.num_channels_out * 2): # prediction and label _val_data[i] = self.normalize_01(_val_data[i], [-1, 1]) * _val_data[-1] ####################################### """ SAVE FIRST IMAGE TO FOLDER & UPDATE BEST METRICS """ to_save_montage = self.update_best_metrics(metric_list) print(self.best_metrics) if to_save_montage: np.save(f'{self.result_folder_figure_val}_data_{self.current_it}.npy', _val_data) self.save_montage_im(_val_data) ####################################### """ DROP LAST CHANNEL (WP) IN _val_data BECAUSE IT IS NO LONGER
#!/usr/bin/python # # Functions for x843 encoded objects # # @author <NAME> # @email <EMAIL> # from __future__ import unicode_literals, division, absolute_import, print_function import datetime, sys if sys.version_info < (3,): from urllib import quote as urlquote from urllib2 import ( Request as URLRequest, urlopen ) else: from urllib.parse import quote as urlquote from urllib.request import ( Request as URLRequest, urlopen ) from . import cert, error, util from base64 import b64encode from .asn1crypto import core, algos, ocsp, x509 from .verify import verify as verifyocsp from .crypto.rsa import VerificationError from .env import * class OCSPRequest(ocsp.OCSPRequest): @staticmethod def fromBitString(req): "Converts the ASN.1 encoded bitstring into and ASN.1 object" return OCSPRequest().load(req) @staticmethod def fromPEMFile(reqFile): return OCSPRequest.fromBitString(util.fromPEMFile(reqFile)) @staticmethod def fromDERFile(reqFile): return OCSPRequest.fromBitString(util.fromDERFile(reqFile)) def toBitString(self): return self.dump() def writeDERFile(self, file=sys.stdout): util.encodeDERFile(self.toBitString(), file=file) def writePEMFile(self, file=sys.stdout): util.encodePEMFile(self.toBitString(), file=file, header='-----BEGIN OCSP REQUEST-----', footer='-----END OCSP REQUEST-----') def getRequests(self): """ :return: The requests in the format: (issuer_name_hash, issuer_key_hash, serial_number) """ requests = list() for req in self['tbs_request']['request_list']: _nameHash = req['req_cert']['issuer_name_hash'].native _keyHash = req['req_cert']['issuer_key_hash'].native _serial = req['req_cert']['serial_number'].native requests.append((_nameHash, _keyHash, _serial)) return requests def getNonce(self): """ :return: The nonce bitstring """ if self.nonce_value: return self.nonce_value.native return None def prettify(self, space=4, file=sys.stdout): util.prettify('OCSPRequest', self, space=space, file=file) def printCLI(self, prefix='', space=4, file=sys.stdout): padding = ' '*space print('{}Requests'.format(prefix), file=file) for req in self.getRequests(): print('{}{}Issuer Name Hash:'.format(prefix, padding), req[0].encode('hex'), file=file) print('{}{}Issuer Key Hash:'.format(prefix, padding), req[1].encode('hex'), file=file) print('{}{}Serial Number: {:X}'.format(prefix, padding, req[2]), file=file) print('{}{}--'.format(prefix, padding), file=file) if self.getNonce(): print('{}Nonce:'.format(prefix), self.getNonce().encode('hex'), file=file) class OCSPRequestBuilder: "Structure for building an ASN.1 OCSPRequest object" def __init__(self): self.requestList = ocsp.Requests() self.requestExtns = None self.nonce = None def addRequest(self, cacert, serial): hashAlgo = algos.DigestAlgorithm() hashAlgo['algorithm'] = algos.DigestAlgorithmId(u'sha1') certId = ocsp.CertId() certId['hash_algorithm'] = hashAlgo certId['issuer_name_hash'] = cacert.getNameSha1() certId['issuer_key_hash'] = cacert.getKeySha1() certId['serial_number'] = core.Integer(serial) req = ocsp.Request() req['req_cert'] = certId self.requestList[len(self.requestList)] = req def generateNonce(self): if not self.requestExtns: self.requestExtns = ocsp.TBSRequestExtensions() if not self.nonce: self.nonce = util.generateNonceSecure(20) extn = ocsp.TBSRequestExtension() extn['extn_id'] = ocsp.TBSRequestExtensionId(u'nonce') extn['critical'] = True extn['extn_value'] = self.nonce self.requestExtns[len(self.requestExtns)] = extn return self.nonce def build(self): assert len(self.requestList) > 0, 'Must provide at least 1 request' tbsReq = ocsp.TBSRequest() tbsReq['request_list'] = self.requestList if self.requestExtns: tbsReq['request_extensions'] = self.requestExtns ocspReq = OCSPRequest() ocspReq['tbs_request'] = tbsReq return ocspReq class Response(ocsp.SingleResponse): "Structure defining an ASN.1 OCSP `SingleResponse`" @staticmethod def fromBitString(res): "Converts the ASN.1 encoded bitstring into and ASN.1 object" return Response().load(res) @staticmethod def fromPEMFile(resFile): assert False, 'This object cannot be read from PEM format' @staticmethod def fromDERFile(resFile): return Response.fromBitString(util.fromDERFile(resFile)) def toBitString(self): return self.dump() def writeDERFile(self, file=sys.stdout): util.encodeDERFile(self.toBitString(), file=file) def writePEMFile(self, file=sys.stdout): assert False, 'This object cannot be written in PEM format' def getCertId(self): """ :return: The CertId in the format: (issuer_name_hash, issuer_key_hash, serial_number) """ certId = list() _nameHash = self['cert_id']['issuer_name_hash'].native _keyHash = self['cert_id']['issuer_key_hash'].native _serial = self['cert_id']['serial_number'].native return (_nameHash, _keyHash, _serial) def getCertStatus(self): """ :return: good, revoked, or unknown """ _status = self['cert_status'].chosen if isinstance(_status, ocsp.RevokedInfo): return 'revoked' return self['cert_status'].chosen.native def getRevokedInfo(self): """ :return: None or the Revoked Info in the format: (DateTime of revocation, reason) """ _status = self['cert_status'].chosen if not isinstance(_status, ocsp.RevokedInfo): return None _time = _status['revocation_time'].native _reason = _status['revocation_reason'] if _reason.native: _reason = _reason.human_friendly else: _reason = None return (_time, _reason) def getThisUpdate(self): """ :return: DateTime of the This Update time """ return self['this_update'].native def getNextUpdate(self): """ :return: DateTime of the Next Update time """ return self['next_update'].native def isValid(self): """ :return: True or an error """ _now = datetime.datetime.now(util.UTC()) _thisUpdate = self.getThisUpdate() _nextUpdate = self.getNextUpdate() if not _thisUpdate or _thisUpdate > _now: raise error.ThisUpdateError() if not _nextUpdate or _nextUpdate < _now: raise error.NextUpdateError() status = self.getCertStatus() if not status or status == 'unknown': raise error.UnknownStatusError() if status == 'revoked': raise error.RevokedStatusError(self.getRevokedInfo()[1]) return True def prettify(self, space=4, file=sys.stdout): util.prettify('OCSPSingleResponse', self, space=space, file=file) def printCLI(self, prefix='', space=4, file=sys.stdout): padding = ' '*space certId = self.getCertId() print('{}{}Issuer Name Hash:'.format(prefix, padding), certId[0].encode('hex'), file=file) print('{}{}Issuer Key Hash:'.format(prefix, padding), certId[1].encode('hex'), file=file) print('{}{}Serial Number: {:X}'.format(prefix, padding, certId[2]), file=file) print('{}{}Cert Status:'.format(prefix, padding), self.getCertStatus(), file=file) revokedInfo = self.getRevokedInfo() if revokedInfo: print('{}{}Revoked Info:'.format(prefix, padding), revokedInfo[0], '-', revokedInfo[1], file=file) print('{}{}This Update:'.format(prefix, padding), self.getThisUpdate(), file=file) print('{}{}Next Update:'.format(prefix, padding), self.getNextUpdate(), file=file) try: self.isValid() print('{}{}Valid: True'.format(prefix, padding), file=file) except error.Error as e: print('{}{}Valid:'.format(prefix, padding), str(e), file=file) print('{}{}--'.format(prefix, padding), file=file) class OCSPResponse(ocsp.OCSPResponse): "Structure defining an ASN.1 OCSPResponse object" @staticmethod def fromBitString(res): "Converts the ASN.1 encoded bitstring into and ASN.1 object" return OCSPResponse().load(res) @staticmethod def fromPEMFile(resFile): return OCSPResponse.fromBitString(util.fromPEMFile(resFile)) @staticmethod def fromDERFile(resFile): return OCSPResponse.fromBitString(util.fromDERFile(resFile)) def toBitString(self): return self.dump() def writeDERFile(self, file=sys.stdout): util.encodeDERFile(self.toBitString(), file=file) def writePEMFile(self, file=sys.stdout): util.encodePEMFile(self.toBitString(), file=file, header='-----BEGIN OCSP RESPONSE-----', footer='-----END OCSP RESPONSE-----') def getStatus(self): """ :return: 'successful' 'malformed_request' 'internal_error' 'try_later' 'sign_required' 'unauthorized' """ return self['response_status'].native.encode('utf8') def getResponderId(self): """ :return: String of the Responder ID """ _choice = self.response_data['responder_id'].chosen if isinstance(_choice, x509.Name): return ', '.join(['{}={}'.format(a, b) for a, b in cert.parseNameObject(_choice)]) else: return _choice.native.encode('hex') def getProducedAt(self): """ :return: DateTime of the Produced At time """ return self.response_data['produced_at'].native def getNonce(self): """ :return: BitString of the nonce """ if self.nonce_value: return self.nonce_value.native return None def getResponses(self): """ :return: A list of Response() """ responses = list() for res in self.response_data['responses']: responses.append(Response.load(res.dump())) return responses def getSigningCertificate(self): """ :return: None or a List containing the OCSP Signing certificate chain """ ret = list() _certchain = self.basic_ocsp_response['certs'] if not _certchain: return None for c in _certchain: ret.append(cert.Certificate().load(c.dump())) if len(ret) == 0: return None return ret def getSignatureAlgorithm(self): """ :return: None or a string of the signature algorithm """ _signedDigestAlgo = self.basic_ocsp_response['signature_algorithm'] if not _signedDigestAlgo: return None _sigOid = _signedDigestAlgo['algorithm'] if not _sigOid: return None return _sigOid.map(_sigOid.dotted).encode('utf8') def getSignature(self): """ :return: None or a BitString of the signature """ _signature = self.basic_ocsp_response['signature'] if not _signature: return None return _signature.native def isValid(self, filter=None): """ :return: True or an error """ for res in self.getResponses(): certId = res.getCertId() if not filter or certId in filter: res.isValid() self.verifySignature() return True def verifySignature(self): """ :return: None or the signing certificate """ return verifySignature(self) def prettify(self, space=4, file=sys.stdout): util.prettify('OCSPResponse', self, space=space, file=file) def printCLI(self, filter=None, prefix='', space=4, file=sys.stdout, issuer=None): """ filter list if certIDs: (issuer_name_hash, issuer_key_hash, serial) space indent space size (default: 4) file file to print to (default: stdout) """ padding = ' '*space status = self.getStatus() print('{}Response Status:'.format(prefix), status, file=file) if status == 'successful': print('{}Responder ID:'.format(prefix), self.getResponderId(), file=file) print('{}Responses'.format(prefix), file=file) for res in self.getResponses(): certId = res.getCertId() if not filter or certId in filter: res.printCLI(prefix=prefix, space=space, file=file) if self.getNonce(): print('{}Nonce:'.format(prefix), self.getNonce().encode('hex'), file=file) # Signature print('{}Signature Algorithm:'.format(prefix), self.getSignatureAlgorithm(), file=file) try: signer = self.verifySignature() print('{}Signature Verify:'.format(prefix), bool(signer), '-', signer.getSubject(), file=file) except error.InvalidOCSPSignature as e: print('{}Signature Verify:'.format(prefix), str(e), file=file) # Signing certificate _certificates = self.getSigningCertificate() if not _certificates: return print('{}OCSP-Signing Certificate Chain'.format(prefix), file=file) for c in _certificates: print('{}{}Issuer:'.format(prefix, padding), c.getIssuer(), file=file) print('{}{}Subject:'.format(prefix, padding), c.getSubject(), file=file) print('{}{}Not Before:'.format(prefix, padding), c.getNotBefore(), file=file) print('{}{}Not After:'.format(prefix, padding), c.getNotAfter(), file=file) try: c.isValid() print('{}{}Valid: True'.format(prefix, padding), file=file) except error.Error as e: print('{}{}Valid:'.format(prefix, padding), str(e), file=file) if issuer: try: c.verifySignature(issuer) print('{}{}Signature Verify: True'.format(prefix, padding), file=file) except error.InvalidIssuerSignature as e: print('{}{}Signature Verify: False -'.format(prefix, padding), str(e), file=file) print('{}{}--'.format(prefix, padding), file=file) def buildOCSPRequest(*reqs, **kwargs): """ reqs (cert, serial), ... kwargs 'nonce' True to generate a nonce :return: OCSPRequest object """ ocspBuilder = OCSPRequestBuilder() for c, serial in reqs: ocspBuilder.addRequest(c, int(serial)) if 'nonce' in kwargs and kwargs['nonce']: ocspBuilder.generateNonce() return ocspBuilder.build() def ocspPostRequest(url, ocspReq, timeout=1): if url[-1] == '/': url = url[:-1] data = ocspReq if isinstance(ocspReq, OCSPRequest): data = ocspReq.toBitString() headers = { 'Content-Type' : 'application/ocsp-request', 'Accept' : 'application/ocsp-response', 'User-Agent' : USER_AGENT } req = URLRequest(url, data=data, headers=headers) res = urlopen(req, timeout=timeout) if res: return OCSPResponse.fromBitString(res.read()) return None def ocspGetRequest(url, ocspReq, timeout=1): if url[-1] == '/': url = url[:-1] data = ocspReq if isinstance(ocspReq, OCSPRequest): data = ocspReq.toBitString() headers = { 'Accept' : 'application/ocsp-response', 'User-Agent' : USER_AGENT } encoded = urlquote.quote_plus(b64encode(data)) req = URLRequest('{}/{}'.format(url, encoded), headers=headers) res = urlopen(req, timeout=timeout) if res: return OCSPResponse.fromBitString(res.read()) return None def verifySignature(ocspRes): """ :return: The signing certificate or an error """ _algo = ocspRes.getSignatureAlgorithm() _sig = ocspRes.getSignature() if not _algo or not _sig: raise error.InvalidOCSPSignature() _msg = ocspRes.basic_ocsp_response['tbs_response_data'].dump() _certificates = ocspRes.getSigningCertificate() if not _certificates: raise error.InvalidOCSPSignature() for c in _certificates: _key = c.getPublicKey() try: if bool(verifyocsp(_algo, _msg, _sig, _key)): return c except
GRR server URL. grr_auth: Tuple containing a (username, password) combination. hunt_id: ID of GRR hunt to retrieve results from. approvers: comma-separated list of GRR approval recipients. verbose: toggle for verbose output. """ super(GRRHuntDownloader, self).__init__( reason, grr_server_url, grr_auth, approvers=approvers, verbose=verbose) self.hunt_id = hunt_id self._hunt = self.grr_api.Hunt(self.hunt_id).Get() @staticmethod def launch_collector( reason, grr_server_url, grr_auth, hunt_id, approvers='', verbose=False): """Downloads the files found during a given GRR Hunt. Args: reason: justification for GRR access. grr_server_url: GRR server URL. grr_auth: Tuple containing a (username, password) combination. hunt_id: GRR Hunt id to download files from. approvers: comma-separated list of GRR approval recipients. verbose: toggle for verbose output. Returns: An list containing the started Hunt downloader collector. """ approvers = [approver for approver in approvers.split(',') if approver] hunt_downloader = GRRHuntDownloader( reason, grr_server_url, grr_auth, hunt_id, approvers, verbose) hunt_downloader.start() return [hunt_downloader] class GRRHostCollector(BaseCollector): """Collect artifacts with GRR. Attributes: output_path: Path to where to store collected items. grr_api: GRR HTTP API client. host: Target of GRR collection. reason: Justification for GRR access. approvers: list of GRR approval recipients. """ _CHECK_APPROVAL_INTERVAL_SEC = 10 _CHECK_FLOW_INTERVAL_SEC = 10 _CLIENT_ID_REGEX = re.compile(r'^c\.[0-9a-f]{16}$', re.IGNORECASE) def __init__( self, hostname, reason, grr_server_url, grr_auth, approvers=None, verbose=False, keepalive=False): """Initializes a GRR collector. Args: hostname: hostname of machine. reason: justification for GRR access. grr_server_url: GRR server URL. grr_auth: Tuple containing a (username, password) combination. approvers: comma-separated list of GRR approval recipients. verbose: toggle for verbose output. keepalive: toggle for scheduling a KeepAlive flow. """ super(GRRHostCollector, self).__init__(verbose=verbose) if approvers is None: approvers = [] self.output_path = tempfile.mkdtemp() self.grr_api = grr_api.InitHttp(api_endpoint=grr_server_url, auth=grr_auth) self.host = hostname self.reason = reason self.approvers = approvers self._client_id = self._GetClientId(hostname) self._client = None self.keepalive = keepalive self.flow_id = None def collect(self): """Collect artifacts. Not implemented, as this is an abstract class. """ raise NotImplementedError def _GetClientId(self, hostname): """Search GRR by hostname and get the latest active client. Args: hostname: hostname to search for. Returns: str: ID of most recently active client. Raises: RuntimeError: if no client ID found for hostname. """ if self._CLIENT_ID_REGEX.match(hostname): return hostname # Search for the hostname in GRR syslog.syslog('Searching for client') self.console_out.VerboseOut('Searching for client: {0:s}'.format(hostname)) search_result = self.grr_api.SearchClients(hostname) result = {} for client in search_result: client_id = client.client_id client_fqdn = client.data.os_info.fqdn client_last_seen_at = client.data.last_seen_at if hostname.lower() in client_fqdn.lower(): result[client_id] = client_last_seen_at if not result: raise RuntimeError('Could not get client_id for {0:s}'.format(hostname)) active_client_id = sorted(result, key=result.get, reverse=True)[0] last_seen_timestamp = result[active_client_id] # Remove microseconds and create datetime object last_seen_datetime = datetime.datetime.utcfromtimestamp( last_seen_timestamp / 1000000) # Timedelta between now and when the client was last seen, in minutes. # First, count total seconds. This will return a float. last_seen_seconds = ( datetime.datetime.utcnow() - last_seen_datetime).total_seconds() last_seen_minutes = int(round(last_seen_seconds)) / 60 syslog.syslog('{0:s}: Found active client'.format(active_client_id)) self.console_out.VerboseOut( 'Found active client: {0:s}'.format(active_client_id)) self.console_out.VerboseOut( 'Client last seen: {0:s} ({1:d} minutes ago)'.format( last_seen_datetime.strftime('%Y-%m-%dT%H:%M:%S+0000'), last_seen_minutes)) return active_client_id def _GetClient(self, client_id, reason, approvers): """Get GRR client dictionary and make sure valid approvals exist. Args: client_id: GRR client ID. reason: justification for GRR access. approvers: comma-separated list of GRR approval recipients. Returns: GRR API Client object Raises: ValueError: if no approvals exist and no approvers are specified. """ client = self.grr_api.Client(client_id) self.console_out.VerboseOut('Checking for client approval') try: client.ListFlows() except grr_errors.AccessForbiddenError: syslog.syslog('{0:s}: No valid client approval found'.format(client_id)) self.console_out.VerboseOut('No valid client approval found') if not approvers: raise ValueError( 'GRR client needs approval but no approvers specified ' '(hint: use --approvers)') self.console_out.VerboseOut( 'Client approval request sent to: {0:s} (reason: {1:s})'.format( approvers, reason)) self.console_out.VerboseOut( 'Waiting for approval (this can take a while...)') # Send a request for approval and wait until there is a valid one # available in GRR. client.CreateApproval(reason=reason, notified_users=approvers) syslog.syslog('{0:s}: Request for client approval sent'.format(client_id)) while True: try: client.ListFlows() break except grr_errors.AccessForbiddenError: time.sleep(self._CHECK_APPROVAL_INTERVAL_SEC) syslog.syslog('{0:s}: Client approval is valid'.format(client_id)) self.console_out.VerboseOut('Client approval is valid') return client.Get() def _LaunchFlow(self, name, args): """Create specified flow, setting KeepAlive if requested. Args: name: string containing flow name. args: proto (*FlowArgs) for type of flow, as defined in GRR flow proto. Returns: string containing ID of launched flow """ # Start the flow and get the flow ID flow = self._client.CreateFlow(name=name, args=args) flow_id = flow.flow_id syslog.syslog('Flow {0:s}: Scheduled'.format(flow_id)) self.console_out.VerboseOut('Flow {0:s}: Scheduled'.format(flow_id)) if self.keepalive: flow_name = 'KeepAlive' flow_args = flows_pb2.KeepAliveArgs() keepalive_flow = self._client.CreateFlow(name=flow_name, args=flow_args) syslog.syslog('KeepAlive scheduled') self.console_out.VerboseOut( 'KeepAlive Flow:{0:s} scheduled'.format(keepalive_flow.flow_id)) return flow_id def _AwaitFlow(self, flow_id): """Awaits flow completion. Args: flow_id: string containing ID of flow to await. Raises: RuntimeError: if flow error encountered. """ # Wait for the flow to finish self.console_out.VerboseOut('Flow {0:s}: Waiting to finish'.format(flow_id)) while True: try: status = self._client.Flow(flow_id).Get().data except grr_errors.UnknownError: raise RuntimeError( 'Unable to stat flow {0:s} for host {1:s}'.format( flow_id, self.host)) state = status.state if state == flows_pb2.FlowContext.ERROR: # TODO(jbn): If one artifact fails, what happens? Test. raise RuntimeError( 'Flow {0:s}: FAILED! Backtrace from GRR:\n\n{1:s}'.format( flow_id, status.context.backtrace)) elif state == flows_pb2.FlowContext.TERMINATED: syslog.syslog('Flow {0:s}: Complete'.format(flow_id)) self.console_out.VerboseOut( 'Flow {0:s}: Finished successfully'.format(flow_id)) break time.sleep(self._CHECK_FLOW_INTERVAL_SEC) # Download the files collected by the flow syslog.syslog('Flow {0:s}: Downloading artifacts'.format(flow_id)) self.console_out.VerboseOut( 'Flow {0:s}: Downloading artifacts'.format(flow_id)) collected_file_path = self._DownloadFiles(flow_id) if collected_file_path: syslog.syslog('Flow {0:s}: Downloaded artifacts'.format(flow_id)) self.console_out.VerboseOut( 'Flow {0:s}: Downloaded: {1:s}'.format(flow_id, collected_file_path)) def PrintStatus(self): """Print status of flow. Raises: RuntimeError: if error encountered getting flow data. """ self._client = self._GetClient(self._client_id, self.reason, self.approvers) try: status = self._client.Flow(self.flow_id).Get().data except grr_errors.UnknownError: raise RuntimeError( 'Unable to stat flow {0:s} for host {1:s}'.format( self.flow_id, self.host)) state = status.state if state == flows_pb2.FlowContext.ERROR: msg = 'ERROR' elif state == flows_pb2.FlowContext.TERMINATED: msg = 'Complete' elif state == flows_pb2.FlowContext.RUNNING: msg = 'Running...' self.console_out.StdOut( 'Status of flow {0:s}: {1:s}\n'.format(self.flow_id, msg)) def _DownloadFiles(self, flow_id): """Download files from the specified flow. Args: flow_id: GRR flow ID. Returns: str: path of downloaded files. """ if not os.path.isdir(self.output_path): os.makedirs(self.output_path) output_file_path = os.path.join( self.output_path, '.'.join((flow_id, 'zip'))) if os.path.exists(output_file_path): self.console_out.StdOut( '{0:s} already exists: Skipping'.format(output_file_path)) return None flow = self._client.Flow(flow_id) file_archive = flow.GetFilesArchive() file_archive.WriteToFile(output_file_path) # Unzip archive for processing and remove redundant zip with zipfile.ZipFile(output_file_path) as archive: archive.extractall(path=self.output_path) os.remove(output_file_path) return output_file_path @property def collection_name(self): """Name for the collection of collected artifacts.""" collection_name = self._client.data.os_info.fqdn self.console_out.VerboseOut( 'Artifact collection name: {0:s}'.format(collection_name)) return self._client.data.os_info.fqdn @staticmethod def launch_collector( hosts, flow_id, reason, grr_server_url, grr_auth, artifact_list='', file_list='', use_tsk=False, approvers='', verbose=False, keepalive=False, status=False): """Launches a series of GRR Artifact collectors. Iterates over the values of hosts and starts a GRRArtifactCollector for each. Args: hosts: List of strings representing hosts to collect artifacts from. flow_id: ID of GRR flow to retrieve artifacts from. reason: justification for GRR access (usually a SEM ID). grr_server_url: GRR server URL. grr_auth: Tuple containing a (username, password) combination. artifact_list: comma-separated list of GRR-defined artifacts. file_list: comma-separated list of GRR file paths. use_tsk: toggle for use_tsk flag on GRR flow. approvers: comma-separated list of GRR approval recipients. verbose: toggle for verbose output. keepalive: toggle for scheduling a KeepAlive flow. status: print the status of each collector. Returns: A list of started collectors for each path """ collectors = [] approvers = [approver for approver in approvers.split(',') if approver] file_list = [item for item in file_list.split(',') if item] artifact_list = [item for item in artifact_list.split(',') if item] for hostname in hosts.split(','): host_collectors = [] # Launch artifact collector if artifacts present or if no file/flow passed if artifact_list or not (file_list or flow_id): host_collectors.append( GRRArtifactCollector( hostname, reason, grr_server_url, grr_auth, artifact_list, use_tsk, approvers, verbose=verbose, keepalive=keepalive)) if file_list: host_collectors.append( GRRFileCollector( hostname, reason, grr_server_url, grr_auth, file_list, approvers, verbose=verbose, keepalive=keepalive)) if flow_id: host_collectors.append( GRRFlowCollector( hostname, reason, grr_server_url, grr_auth, flow_id, approvers, verbose=verbose)) for collector in host_collectors: if flow_id and status: collector.PrintStatus() else: collector.start() collectors.append(collector) return collectors class GRRArtifactCollector(GRRHostCollector): """Artifact collector for GRR flows. Attributes: artifacts: comma-separated list of GRR-defined artifacts. use_tsk: Toggle for use_tsk flag on GRR flow. reason: Justification for GRR access. approvers: list of GRR approval recipients. """ _DEFAULT_ARTIFACTS_LINUX = [ 'LinuxAuditLogs', 'LinuxAuthLogs', 'LinuxCronLogs', 'LinuxWtmp', 'AllUsersShellHistory', 'ZeitgeistDatabase' ] _DEFAULT_ARTIFACTS_DARWIN = [ 'OSXAppleSystemLogs', 'OSXAuditLogs', 'OSXBashHistory', 'OSXInstallationHistory', 'OSXInstallationLog', 'OSXInstallationTime', 'OSXLaunchAgents', 'OSXLaunchDaemons', 'OSXMiscLogs', 'OSXRecentItems', 'OSXSystemLogs', 'OSXUserApplicationLogs', 'OSXQuarantineEvents' ] _DEFAULT_ARTIFACTS_WINDOWS = [ 'WindowsAppCompatCache', 'WindowsEventLogs', 'WindowsPrefetchFiles', 'WindowsScheduledTasks', 'WindowsSearchDatabase', 'WindowsSuperFetchFiles', 'WindowsSystemRegistryFiles', 'WindowsUserRegistryFiles', 'WindowsXMLEventLogTerminalServices' ] def __init__( self, hostname, reason, grr_server_url, grr_auth, artifacts=None, use_tsk=False,
<gh_stars>0 """Testing the Decorators that check a functions input or output.""" import typing from asyncio import AbstractEventLoop import numpy as np import pandas as pd import pytest from pandera import ( Check, Column, DataFrameSchema, DateTime, Field, Float, Int, SchemaModel, String, check_input, check_io, check_output, check_types, errors, ) from pandera.engines.pandas_engine import Engine from pandera.typing import DataFrame, Index, Series try: from typing import Literal # type: ignore except ImportError: # Remove this after dropping python 3.6 from typing_extensions import Literal # type: ignore def test_check_function_decorators() -> None: """ Tests 5 different methods that are common across the @check_input and @check_output decorators. """ in_schema = DataFrameSchema( { "a": Column( Int, [ Check(lambda x: x >= 1, element_wise=True), Check(lambda s: s.mean() > 0), ], ), "b": Column( String, Check(lambda x: x in ["x", "y", "z"], element_wise=True), ), "c": Column( DateTime, Check( lambda x: pd.Timestamp("2018-01-01") <= x, element_wise=True, ), ), "d": Column( Float, Check(lambda x: np.isnan(x) or x < 3, element_wise=True), nullable=True, ), }, ) out_schema = DataFrameSchema( { "e": Column(String, Check(lambda s: s == "foo")), "f": Column( String, Check(lambda x: x in ["a", "b"], element_wise=True) ), } ) # case 1: simplest path test - df is first argument and function returns # single dataframe as output. @check_input(in_schema) @check_output(out_schema) def test_func1(dataframe, x): # pylint: disable=W0613 # disables unused-arguments because handling the second argument is # what is being tested. return dataframe.assign(e="foo", f=["a", "b", "a"]) # case 2: input and output validation using positional arguments @check_input(in_schema, 1) @check_output(out_schema, 0) def test_func2(x, dataframe): return dataframe.assign(e="foo", f=["a", "b", "a"]), x # case 3: dataframe to validate is called as a keyword argument and the # output is in a dictionary @check_input(in_schema, "in_dataframe") @check_output(out_schema, "out_dataframe") def test_func3(x, in_dataframe=None): return { "x": x, "out_dataframe": in_dataframe.assign(e="foo", f=["a", "b", "a"]), } # case 4: dataframe is a positional argument but the obj_getter in the # check_input decorator refers to the argument name of the dataframe @check_input(in_schema, "dataframe") @check_output(out_schema) def test_func4(x, dataframe): # pylint: disable=W0613 # disables unused-arguments because handling the second argument is # what is being tested. return dataframe.assign(e="foo", f=["a", "b", "a"]) df = pd.DataFrame( { "a": [1, 2, 3], "b": ["x", "y", "z"], "c": [ pd.Timestamp("2018-01-01"), pd.Timestamp("2018-01-03"), pd.Timestamp("2018-01-02"), ], "d": [np.nan, 1.0, 2.0], } ) # call function with a dataframe passed as a positional argument df = test_func1(df, "foo") assert isinstance(df, pd.DataFrame) # call function with a dataframe passed as a first keyword argument df = test_func1(dataframe=df, x="foo") assert isinstance(df, pd.DataFrame) # call function with a dataframe passed as a second keyword argument df = test_func1(x="foo", dataframe=df) assert isinstance(df, pd.DataFrame) df, x = test_func2("foo", df) assert x == "foo" assert isinstance(df, pd.DataFrame) result = test_func3("foo", in_dataframe=df) assert result["x"] == "foo" assert isinstance(df, pd.DataFrame) # case 5: even if the pandas object to validate is called as a positional # argument, the check_input decorator should still be able to handle # it. result = test_func3("foo", df) assert result["x"] == "foo" assert isinstance(df, pd.DataFrame) df = test_func4("foo", df) assert x == "foo" assert isinstance(df, pd.DataFrame) def test_check_function_decorator_errors() -> None: """Test that the check_input and check_output decorators error properly.""" # case 1: checks that the input and output decorators error when different # types are passed in and out @check_input(DataFrameSchema({"column1": Column(Int)})) @check_output(DataFrameSchema({"column2": Column(Float)})) def test_func(df): return df with pytest.raises( errors.SchemaError, match=r"^error in check_input decorator of function", ): test_func(pd.DataFrame({"column2": ["a", "b", "c"]})) with pytest.raises( errors.SchemaError, match=r"^error in check_input decorator of function", ): test_func(df=pd.DataFrame({"column2": ["a", "b", "c"]})) with pytest.raises( errors.SchemaError, match=r"^error in check_output decorator of function", ): test_func(pd.DataFrame({"column1": [1, 2, 3]})) # case 2: check that if the input decorator refers to an index that's not # in the function signature, it will fail in a way that's easy to interpret @check_input(DataFrameSchema({"column1": Column(Int)}), 1) def test_incorrect_check_input_index(df): return df with pytest.raises( IndexError, match=r"^error in check_input decorator of function" ): test_incorrect_check_input_index(pd.DataFrame({"column1": [1, 2, 3]})) def test_check_input_method_decorators() -> None: """Test the check_input and check_output decorator behaviours when the dataframe is changed within the function being checked""" in_schema = DataFrameSchema({"column1": Column(String)}) out_schema = DataFrameSchema({"column2": Column(Int)}) dataframe = pd.DataFrame({"column1": ["a", "b", "c"]}) def _transform_helper(df): return df.assign(column2=[1, 2, 3]) class TransformerClass: """Contains functions with different signatures representing the way that the decorators can be called.""" # pylint: disable=E0012,C0111,C0116,W0613,R0201 # disables missing-function-docstring as this is a factory method # disables unused-arguments because handling the second argument is # what is being tested and this is intentional. # disables no-self-use because having TransformerClass with functions # is cleaner. @check_input(in_schema) @check_output(out_schema) def transform_first_arg(self, df): return _transform_helper(df) @check_input(in_schema) @check_output(out_schema) def transform_first_arg_with_two_func_args(self, df, x): return _transform_helper(df) @check_input(in_schema, 0) @check_output(out_schema) def transform_first_arg_with_list_getter(self, df): return _transform_helper(df) @check_input(in_schema, 1) @check_output(out_schema) def transform_secord_arg_with_list_getter(self, x, df): return _transform_helper(df) @check_input(in_schema, "df") @check_output(out_schema) def transform_secord_arg_with_dict_getter(self, x, df): return _transform_helper(df) def _assert_expectation(result_df): assert isinstance(result_df, pd.DataFrame) assert "column2" in result_df.columns transformer = TransformerClass() # call method with a dataframe passed as a positional argument _assert_expectation(transformer.transform_first_arg(dataframe)) # call method with a dataframe passed as a first keyword argument _assert_expectation(transformer.transform_first_arg(df=dataframe)) # call method with a dataframe passed as a second keyword argument _assert_expectation( transformer.transform_first_arg_with_two_func_args( x="foo", df=dataframe ) ) _assert_expectation( transformer.transform_first_arg_with_list_getter(dataframe) ) _assert_expectation( transformer.transform_secord_arg_with_list_getter(None, dataframe) ) _assert_expectation( transformer.transform_secord_arg_with_dict_getter(None, dataframe) ) def test_check_io() -> None: # pylint: disable=too-many-locals """Test that check_io correctly validates/invalidates data.""" schema = DataFrameSchema({"col": Column(Int, Check.gt(0))}) @check_io(df1=schema, df2=schema, out=schema) def simple_func(df1, df2): return df1.assign(col=df1["col"] + df2["col"]) @check_io(df1=schema, df2=schema) def simple_func_no_out(df1, df2): return df1.assign(col=df1["col"] + df2["col"]) @check_io(out=(1, schema)) def output_with_obj_getter(df): return None, df @check_io(out=[(0, schema), (1, schema)]) def multiple_outputs_tuple(df): return df, df @check_io( out=[(0, schema), ("foo", schema), (lambda x: x[2]["bar"], schema)] ) def multiple_outputs_dict(df): return {0: df, "foo": df, 2: {"bar": df}} @check_io(df=schema, out=schema, head=1) def validate_head(df): return df @check_io(df=schema, out=schema, tail=1) def validate_tail(df): return df @check_io(df=schema, out=schema, sample=1, random_state=100) def validate_sample(df): return df @check_io(df=schema, out=schema, lazy=True) def validate_lazy(df): return df @check_io(df=schema, out=schema, inplace=True) def validate_inplace(df): return df df1 = pd.DataFrame({"col": [1, 1, 1]}) df2 = pd.DataFrame({"col": [2, 2, 2]}) invalid_df = pd.DataFrame({"col": [-1, -1, -1]}) expected = pd.DataFrame({"col": [3, 3, 3]}) for fn, valid, invalid, out in [ (simple_func, [df1, df2], [invalid_df, invalid_df], expected), (simple_func_no_out, [df1, df2], [invalid_df, invalid_df], expected), (output_with_obj_getter, [df1], [invalid_df], (None, df1)), (multiple_outputs_tuple, [df1], [invalid_df], (df1, df1)), ( multiple_outputs_dict, [df1], [invalid_df], {0: df1, "foo": df1, 2: {"bar": df1}}, ), (validate_head, [df1], [invalid_df], df1), (validate_tail, [df1], [invalid_df], df1), (validate_sample, [df1], [invalid_df], df1), (validate_lazy, [df1], [invalid_df], df1), (validate_inplace, [df1], [invalid_df], df1), ]: result = fn(*valid) # type: ignore[operator] if isinstance(result, pd.Series): assert (result == out).all() if isinstance(result, pd.DataFrame): assert (result == out).all(axis=None) else: assert result == out expected_error = ( errors.SchemaErrors if fn is validate_lazy else errors.SchemaError ) with pytest.raises(expected_error): fn(*invalid) # type: ignore[operator] # invalid out schema types for out_schema in [1, 5.0, "foo", {"foo": "bar"}, ["foo"]]: # mypy finds correctly the wrong usage @check_io(out=out_schema) # type: ignore[arg-type] def invalid_out_schema_type(df): return df with pytest.raises((TypeError, ValueError)): invalid_out_schema_type(df1) @pytest.mark.parametrize( "obj_getter", [1.5, 0.1, ["foo"], {1, 2, 3}, {"foo": "bar"}] ) def test_check_input_output_unrecognized_obj_getter(obj_getter) -> None: """ Test that check_input and check_output raise correct errors on unrecognized dataframe object getters """ schema = DataFrameSchema({"column": Column(int)}) @check_input(schema, obj_getter) def test_check_input_fn(df): return df @check_output(schema, obj_getter) def test_check_output_fn(df): return df for fn in [test_check_input_fn, test_check_output_fn]: with pytest.raises(TypeError): fn(pd.DataFrame({"column": [1, 2, 3]})) @pytest.mark.parametrize( "out,error,msg", [ (1, TypeError, None), (1.5, TypeError, None), ("foo", TypeError, None), (["foo"], ValueError, "too many values to unpack"), ( (None, "foo"), AttributeError, "'str' object has no attribute 'validate'", ), ( [(None, "foo")], AttributeError, "'str' object has no attribute 'validate'", ), ], ) def test_check_io_unrecognized_obj_getter(out, error, msg) -> None: """ Test that check_io raise correct errors on unrecognized decorator arguments """ @check_io(out=out) def test_check_io_fn(df): return df with pytest.raises(error, match=msg): test_check_io_fn(pd.DataFrame({"column": [1, 2, 3]})) # required to be a global: see # https://pydantic-docs.helpmanual.io/usage/postponed_annotations/ class OnlyZeroesSchema(SchemaModel): # pylint:disable=too-few-public-methods """Schema with a single column containing zeroes.""" a: Series[int] = Field(eq=0) def test_check_types_arguments() -> None: """Test that check_types forwards key-words arguments to validate.""" df = pd.DataFrame({"a": [0, 0]}) @check_types() def transform_empty_parenthesis( df: DataFrame[OnlyZeroesSchema], ) -> DataFrame[OnlyZeroesSchema]: # pylint: disable=unused-argument return df transform_empty_parenthesis(df) @check_types(head=1) def transform_head( df: DataFrame[OnlyZeroesSchema], # pylint: disable=unused-argument ) -> DataFrame[OnlyZeroesSchema]: return pd.DataFrame({"a": [0, 0]}) transform_head(df) @check_types(tail=1) def
""" {This script plots SMF and BMF from all 3 surveys} """ # Libs from cosmo_utils.utils.stats_funcs import Stats_one_arr from cosmo_utils.utils import work_paths as cwpaths from collections import OrderedDict import matplotlib.pyplot as plt from matplotlib import rc import pandas as pd import numpy as np import argparse import math __author__ = '{<NAME>}' rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=10) rc('text', usetex=True) def read_catl(path_to_file, survey): """ Reads survey catalog from file Parameters ---------- path_to_file: string Path to survey catalog file survey: string Name of survey Returns --------- catl: pandas dataframe Survey catalog with grpcz, abs rmag and stellar mass limits volume: float Volume of survey cvar: float Cosmic variance of survey """ if survey == 'eco': columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag', 'logmstar', 'logmgas', 'grp', 'grpn', 'logmh', 'logmh_s', 'fc', 'grpmb', 'grpms'] # 13878 galaxies eco_buff = pd.read_csv(path_to_file,delimiter=",", header=0, \ usecols=columns) if h == 1.0: volume = 151829.26 # Survey volume without buffer [Mpc/h]^3 in h=1.0 cz_measurement = eco_buff.grpcz.values elif h == 0.7: #Survey volume without buffer [Mpc/h]^3 volume = 151829.26 * 2.915 # convert from h = 1.0 to 0.7 cz_measurement = eco_buff.cz.values cvar = 0.125 if mass == 'smf': # 6456 galaxies catl = eco_buff.loc[(cz_measurement >= 3000) & \ (cz_measurement <= 7000) & (eco_buff.absrmag.values <= -17.33) & \ (eco_buff.logmstar.values >= 8.9)] elif mass == 'bmf': # Removing stellar mass cut catl = eco_buff.loc[(cz_measurement >= 3000) & \ (cz_measurement <= 7000) & (eco_buff.absrmag.values <= -17.33)] elif survey == 'resolvea' or survey == 'resolveb': columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag', 'logmstar', 'logmgas', 'grp', 'grpn', 'grpnassoc', 'logmh', 'logmh_s', 'fc', 'grpmb', 'grpms', 'f_a', 'f_b'] # 2286 galaxies resolve_live18 = pd.read_csv(path_to_file, delimiter=",", header=0, \ usecols=columns) if h == 1.0: volume = 13172.384 # Survey volume without buffer [Mpc/h]^3 cz_measurement = resolve_live18.grpcz.values elif h == 0.7: #Survey volume without buffer [Mpc/h]^3 volume = 13172.384 * 2.915 # convert from h = 1.0 to 0.7 cz_measurement = resolve_live18.cz.values cvar = 0.30 if survey == 'resolvea': if mass == 'smf': catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) & \ (cz_measurement > 4500) & (cz_measurement < 7000) & \ (resolve_live18.absrmag.values < -17.33) & \ (resolve_live18.logmstar.values >= 8.9)] elif mass == 'bmf': catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) & \ (cz_measurement > 4500) & (cz_measurement < 7000) & \ (resolve_live18.absrmag.values < -17.33)] elif survey == 'resolveb': if mass == 'smf': # 487 - cz, 369 - grpcz catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) & \ (cz_measurement > 4500) & (cz_measurement < 7000) & \ (resolve_live18.absrmag.values < -17) & \ (resolve_live18.logmstar.values >= 8.7)] elif mass == 'bmf': catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) & \ (cz_measurement > 4500) & (cz_measurement < 7000) & \ (resolve_live18.absrmag.values < -17)] if h == 1.0: volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3 elif h == 0.7: #Survey volume without buffer [Mpc/h]^3 volume = 4709.8373 * 2.915 # convert from h = 1.0 to 0.7 cvar = 0.58 return catl, volume, cvar def diff_smf(mstar_arr, volume, cvar_err): """ Calculates differential stellar mass function Parameters ---------- mstar_arr: numpy array Array of stellar masses volume: float Volume of survey or simulation cvar_err: float Cosmic variance of survey h1_bool: boolean True if units of masses are h=1, False if units of masses are not h=1 Returns --------- maxis: array Array of x-axis mass values phi: array Array of y-axis values err_tot: array Array of error values per bin bins: array Array of bin edge values """ if h == 1.0: logmstar_arr = np.log10((10**mstar_arr) / 2.041) bin_num = 12 elif h == 0.7: logmstar_arr = mstar_arr bin_num = 16 if survey == 'eco' or survey == 'resolvea': bins = np.linspace(8.9, 11.8, bin_num) print("{0} : {1}".format(survey,len(logmstar_arr[logmstar_arr>=8.9]))) elif survey == 'resolveb': bins = np.linspace(8.7, 11.8, bin_num) print("{0} : {1}".format(survey,len(logmstar_arr[logmstar_arr>=8.7]))) # Unnormalized histogram and bin edges phi, edg = np.histogram(logmstar_arr, bins=bins) # paper used 17 bins dm = edg[1] - edg[0] # Bin width maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers # Normalized to volume and bin width err_poiss = np.sqrt(phi) / (volume * dm) err_cvar = cvar_err #/ (volume * dm) print('Poisson error: {0}'.format(err_poiss)) err_tot = np.sqrt(err_cvar**2 + err_poiss**2) phi = phi / (volume * dm) # not a log quantity err_cvar = err_cvar*phi print('Cosmic variance error: {0}'.format(err_cvar)) return maxis, phi, err_tot, bins def calc_bary(mstar_arr, mgas_arr): """ Calculates baryonic mass from stellar and gas mass Parameters ---------- mstar_arr: numpy array Array of stellar masses mgass_arr: numpy array Array of gas masses Returns --------- logmbary: numpy array Array of baryonic masses bin_num: int Number of bins to use """ if h == 1.0: logmbary = np.log10(((10**mstar_arr) + (10**mgas_arr)) / 2.041) bin_num = 12 elif h == 0.7: logmbary = np.log10((10**mstar_arr) + (10**mgas_arr)) bin_num = 16 return logmbary, bin_num def diff_bmf(logmbary_arr, volume, cvar_err, bin_num): """ Calculates differential baryonic mass function Parameters ---------- mass_arr: numpy array Array of baryonic masses volume: float Volume of survey cvar_err: float Cosmic variance of survey bin_num: int Number of bins to use Returns --------- maxis: array Array of x-axis mass values phi: array Array of y-axis values err_tot: array Array of error values per bin bins: array Array of bin edge values """ # Unnormalized histogram and bin edges if survey == 'eco' or survey == 'resolvea': bins = np.linspace(9.4,12.0,bin_num) print("{0} : {1}".format(survey,len(logmbary_arr[logmbary_arr>=9.4]))) if survey == 'resolveb': bins = np.linspace(9.1,12.0,bin_num) print("{0} : {1}".format(survey,len(logmbary_arr[logmbary_arr>=9.1]))) phi, edg = np.histogram(logmbary_arr, bins=bins) # paper used 17 bins dm = edg[1] - edg[0] # Bin width maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers # Normalized to volume and bin width err_poiss = np.sqrt(phi) / (volume * dm) err_cvar = cvar_err #/ (volume * dm) print('Poisson error: {0}'.format(err_poiss)) err_tot = np.sqrt(err_cvar**2 + err_poiss**2) phi = phi / (volume * dm) # not a log quantity err_cvar = phi * err_cvar print('Cosmic variance error: {0}'.format(err_cvar)) err_tot = err_cvar * phi return maxis, phi, err_tot, bins def plot_massfunc(maxis_70, phi_70, err_70, maxis_100, phi_100, err_100): """ Plot SMF from data, best fit param values and param values corresponding to 68th percentile 1000 lowest chi^2 values Parameters ---------- maxis_70: array Array of x-axis mass values for data SMF assuming h=0.7 phi_70: array Array of y-axis values for data SMF assuming h=0.7 err_70: array Array of error values per bin of data SMF assuming h=0.7 maxis_100: array Array of x-axis mass values for data SMF assuming h=1.0 phi_100: array Array of y-axis values for data SMF assuming h=1.0 err_100: array Array of error values per bin of data SMF assuming h=1.0 Returns --------- Nothing; SMF plot is saved in figures repository """ if survey == 'resolvea': line_label = 'RESOLVE-A' elif survey == 'resolveb': line_label = 'RESOLVE-B' elif survey == 'eco': line_label = 'ECO' fig1 = plt.figure(figsize=(10,10)) plt.plot(maxis_70,phi_70,'k-') plt.fill_between(maxis_70,phi_70-err_70,phi_70+err_70,color='g',alpha=0.3) plt.errorbar(maxis_70,phi_70,yerr=err_70,color='k',fmt='-s',ecolor='k',\ markersize=4,capsize=5,capthick=0.5,label='{0} h=0.7'.format(line_label),\ zorder=10) plt.plot(maxis_100,phi_100,'k--') plt.fill_between(maxis_100,phi_100-err_100,phi_100+err_100,color='b',alpha=0.3) plt.errorbar(maxis_100,phi_100,yerr=err_100,color='k',fmt='--s',ecolor='k',\ markersize=4,capsize=5,capthick=0.5,label='{0} h=1.0'.format(line_label),\ zorder=10) plt.yscale('log') plt.ylim(10**-5,10**-1) if mass == 'smf': plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h^{-2}} \right]$', fontsize=15) # if h == 0.7: # plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h_{70}}^{-2} \right]$', fontsize=15) # elif h == 1.0: # plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h_{100}}^{-2} \right]$', fontsize=15) elif mass == 'bmf': plt.xlabel(r'\boldmath$\log_{10}\ M_{bary} \left[\mathrm{M_\odot}\, \mathrm{h^{-2}} \right]$', fontsize=15) # if h == 0.7: # plt.xlabel(r'\boldmath$\log_{10}\ M_{bary} \left[\mathrm{M_\odot}\, \mathrm{h_{70}}^{-2} \right]$', fontsize=15) # elif h == 1.0: # plt.xlabel(r'\boldmath$\log_{10}\ M_{bary} \left[\mathrm{M_\odot}\, \mathrm{h_{100}}^{-2} \right]$', fontsize=15) plt.ylabel(r'\boldmath$\Phi \left[\mathrm{dex}^{-1}\,\mathrm{Mpc}^{-3}\,\mathrm{h^{3}} \right]$', fontsize=15) handles, labels = plt.gca().get_legend_handles_labels() by_label = OrderedDict(zip(labels, handles)) plt.legend(by_label.values(), by_label.keys(), loc='best',prop={'size': 10}) plt.show() # plt.savefig(path_to_figures + '{0}_{1}.png'.format(mass,survey)) def plot_smf_bmf(maxis_smf, maxis_bmf, phi_smf, phi_bmf, err_smf, err_bmf): """ Plot SMF and BMF from data Parameters ---------- maxis_smf: array Array of x-axis mass values for data SMF assuming h=1.0 phi_smf: array Array of y-axis values for data SMF assuming h=1.0 err_smf: array Array of error values per bin of data SMF assuming h=1.0 maxis_bmf: array Array of x-axis mass values for data BMF assuming h=1.0 phi_bmf: array Array of y-axis values for data BMF assuming h=1.0 err_bmf: array Array of error values per bin of data BMF
status" ) # TenderMultipleLotAuctionResourceTest def get_tender_lots_auction(self): self.app.authorization = ("Basic", ("auction", "")) response = self.app.get("/tenders/{}/auction".format(self.tender_id), status=403) self.assertEqual(response.status, "403 Forbidden") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"][0]["description"], "Can't get auction info in current ({}) tender status".format(self.forbidden_auction_actions_status), ) self.set_status("active.auction") response = self.app.get("/tenders/{}/auction".format(self.tender_id)) self.assertEqual(response.status, "200 OK") self.assertEqual(response.content_type, "application/json") auction = response.json["data"] self.assertNotEqual(auction, self.initial_data) self.assertIn("dateModified", auction) self.assertIn("minimalStep", auction) self.assertIn("lots", auction) self.assertIn("items", auction) self.assertNotIn("procuringEntity", auction) self.assertNotIn("tenderers", auction["bids"][0]) self.assertEqual( auction["bids"][0]["lotValues"][0]["value"]["amount"], self.initial_bids[0]["lotValues"][0]["value"]["amount"] ) self.assertEqual( auction["bids"][1]["lotValues"][0]["value"]["amount"], self.initial_bids[1]["lotValues"][0]["value"]["amount"] ) self.assertEqual( auction["bids"][0]["lotValues"][1]["value"]["amount"], self.initial_bids[0]["lotValues"][1]["value"]["amount"] ) self.assertEqual( auction["bids"][1]["lotValues"][1]["value"]["amount"], self.initial_bids[1]["lotValues"][1]["value"]["amount"] ) self.set_status("active.qualification") response = self.app.get("/tenders/{}/auction".format(self.tender_id), status=403) self.assertEqual(response.status, "403 Forbidden") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"][0]["description"], "Can't get auction info in current (active.qualification) tender status", ) def post_tender_lots_auction(self): self.app.authorization = ("Basic", ("auction", "")) lot_id = self.initial_lots[0]["id"] response = self.app.post_json("/tenders/{}/auction".format(self.tender_id), {"data": {}}, status=403) self.assertEqual(response.status, "403 Forbidden") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"][0]["description"], "Can't report auction results in current ({}) tender status".format(self.forbidden_auction_actions_status), ) # should not affect changing status if self.initial_data["procurementMethodType"] in ("belowThreshold", "simple.defense"): with change_auth(self.app, ("Basic", ("token", ""))): self.app.post_json( f"/tenders/{self.tender_id}/complaints", {"data": test_draft_claim}, ) self.set_status("active.auction") response = self.app.post_json( "/tenders/{}/auction".format(self.tender_id), {"data": {"bids": [{"invalid_field": "invalid_value"}]}}, status=422, ) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"], [{"description": {"invalid_field": "Rogue field"}, "location": "body", "name": "bids"}], ) patch_data = { "bids": [ { "id": self.initial_bids[-1]["id"], "lotValues": [{"value": {"amount": 409, "currency": "UAH", "valueAddedTaxIncluded": True}}], } ] } response = self.app.post_json(f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": patch_data}, status=422) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"][0]["description"], ["Number of auction results did not match the number of tender bids"] ) update_patch_data(self, patch_data, key="lotValues", start=-2, interval=-1) patch_data["bids"][-1]["id"] = "some_id" response = self.app.post_json(f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": patch_data}, status=422) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual(response.json["errors"][0]["description"], {"id": ["Hash value is wrong length."]}) patch_data["bids"][-1]["id"] = "00000000000000000000000000000000" response = self.app.post_json(f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": patch_data}, status=422) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual(response.json["errors"][0]["description"], ["Auction bids should be identical to the tender bids"]) # patch_data["bids"][-1]["id"] = self.initial_bids[0]["id"] patch_data["bids"] = [{"lotValues": [{}, {}, {}]} for b in self.initial_bids] response = self.app.post_json(f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": patch_data}, status=422) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"][0]["description"], ["Number of lots of auction results did not match the number of tender lots"], ) patch_data["bids"] = [{"lotValues": [{"relatedLot": lot_id}, {"relatedLot": lot_id}]} for b in self.initial_bids] response = self.app.post_json(f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": patch_data}, status=422) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") # self.assertEqual(response.json['errors'][0]["description"], [{u'lotValues': [{u'relatedLot': [u'relatedLot should be one of lots of bid']}]}]) self.assertEqual( response.json["errors"][0]["description"], ['Auction bid.lotValues should be identical to the tender bid.lotValues'] ) num = 0 for lot in self.initial_lots: patch_data["bids"] = [{"lotValues": [{"value": {"amount": 10 ** num}} for _ in b["lotValues"]]} for b in self.initial_bids] num += 1 response = self.app.post_json(f"/tenders/{self.tender_id}/auction/{lot['id']}", {"data": patch_data}) self.assertEqual(response.status, "200 OK") self.assertEqual(response.content_type, "application/json") tender = response.json["data"] for b in tender["bids"]: self.assertEqual(b["lotValues"][0]["value"]["amount"], 1) self.assertEqual(b["lotValues"][1]["value"]["amount"], 10) self.assertEqual("active.qualification", tender["status"]) self.assertIn("tenderers", tender["bids"][0]) self.assertIn("name", tender["bids"][0]["tenderers"][0]) # self.assertIn(tender["awards"][0]["id"], response.headers['Location']) self.assertEqual(tender["awards"][0]["bid_id"], self.initial_bids[0]["id"]) self.assertEqual(tender["awards"][0]["value"]["amount"], 1) self.assertEqual(tender["awards"][0]["suppliers"], self.initial_bids[0]["tenderers"]) response = self.app.post_json("/tenders/{}/auction".format(self.tender_id), {"data": patch_data}, status=403) self.assertEqual(response.status, "403 Forbidden") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"][0]["description"], "Can't report auction results in current (active.qualification) tender status", ) def post_tender_lots_auction_weighted_value(self): if self.tender_class.procurementMethodType.default not in ("openua", "openeu", "simple.defense"): self.skipTest("weightedValue is not implemented") self.app.authorization = ("Basic", ("auction", "")) self.set_status("active.auction") patch_data = {"bids": []} update_patch_data(self, patch_data, key="lotValues", with_weighted_value=True) for bid in patch_data["bids"]: bid["lotValues"] = [bid["lotValues"][0].copy() for i in self.initial_lots] for lot in self.initial_lots: response = self.app.post_json( "/tenders/{}/auction/{}".format(self.tender_id, lot["id"]), {"data": patch_data} ) self.assertEqual(response.status, "200 OK") self.assertEqual(response.content_type, "application/json") tender = response.json["data"] first_bid_weighted_amount = tender["bids"][0]["lotValues"][0]["weightedValue"]["amount"] last_bid_weighted_amount = tender["bids"][-1]["lotValues"][0]["weightedValue"]["amount"] first_bid_patch_weighted_amount = patch_data["bids"][0]["lotValues"][0]["weightedValue"]["amount"] last_bid_patch_weighted_amount = patch_data["bids"][-1]["lotValues"][0]["weightedValue"]["amount"] self.assertEqual(first_bid_weighted_amount, last_bid_patch_weighted_amount) self.assertEqual(last_bid_weighted_amount, first_bid_patch_weighted_amount) self.assertEqual("active.qualification", tender["status"]) self.assertEqual(tender["awards"][0]["weightedValue"]["amount"], first_bid_patch_weighted_amount) def patch_tender_lots_auction(self): self.app.authorization = ("Basic", ("auction", "")) lot_id = self.initial_lots[0]["id"] response = self.app.patch_json(f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": {}}, status=403) self.assertEqual(response.status, "403 Forbidden") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"][0]["description"], "Can't update auction urls in current ({}) tender status".format(self.forbidden_auction_actions_status), ) self.set_status("active.auction") self.check_chronograph() response = self.app.patch_json( f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": {"bids": [{"invalid_field": "invalid_value"}]}}, status=422, ) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"], [{"description": {"invalid_field": "Rogue field"}, "location": "body", "name": "bids"}], ) patch_data = { "auctionUrl": "http://auction-sandbox.openprocurement.org/tenders/{}".format(self.tender_id), "bids": [ { "id": b["id"], "participationUrl": "http://auction-sandbox.openprocurement.org/tenders/id", } for b in self.initial_bids ], } response = self.app.patch_json(f"/tenders/{self.tender_id}/auction", {"data": patch_data}, status=422) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"], [ { "description": [{"participationUrl": ["url should be posted for each lot of bid"]}], "location": "body", "name": "bids", } ], ) del patch_data["bids"][0]["participationUrl"] patch_data["bids"][0]["lotValues"] = [ { "participationUrl": "http://auction-sandbox.openprocurement.org/tenders/{}?key_for_bid={}".format( self.tender_id, self.initial_bids[0]["id"] ) } ] patch_data = { "lots": [{"auctionUrl": "http://auction.openprocurement.org/tenders/id"}], "bids": [ {"lotValues": [{"participationUrl": "http://auction.openprocurement.org/id"} for v in b["lotValues"]]} for b in self.initial_bids ], } response = self.app.patch_json(f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": patch_data}, status=422) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"], [{'location': 'body', 'name': 'lots', 'description': ['Number of lots did not match the number of tender lots']}] ) patch_data["lots"].append({}) patch_data["bids"][1]["id"] = "some_id" response = self.app.patch_json(f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": patch_data}, status=422) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual(response.json["errors"][0]["description"], {"id": ["Hash value is wrong length."]}) patch_data["bids"][1]["id"] = "00000000000000000000000000000000" response = self.app.patch_json(f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": patch_data}, status=422) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual(response.json["errors"][0]["description"], ["Auction bids should be identical to the tender bids"]) patch_data["bids"][1]["id"] = self.initial_bids[0]["id"] patch_data["lots"][1]["id"] = "00000000000000000000000000000000" response = self.app.patch_json(f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": patch_data}, status=422) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual(response.json["errors"][0]["description"], ["Auction lots should be identical to the tender lots"]) patch_data = { "lots": [{"auctionUrl": "http://auction.openprocurement.org/tenders/id"}, {}], "bids": [ {"lotValues": [{"participationUrl": "http://auction.openprocurement.org/id"}, {}, {}]} for b in self.initial_bids ], } response = self.app.patch_json(f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": patch_data}, status=422) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"][0]["description"], ["Number of lots of auction results did not match the number of tender lots"], ) for bid in patch_data["bids"]: bid["lotValues"] = [bid["lotValues"][0].copy() for i in self.initial_lots] response = self.app.patch_json("/tenders/{}/auction".format(self.tender_id), {"data": patch_data}, status=422) self.assertEqual( response.json["errors"][0], {"location": "body", "name": "bids", "description": [ {"participationUrl": ["url should be posted for each lot of bid"]}]} ) for lot in self.initial_lots: patch_data = { "lots": [ {"auctionUrl": f"http://auction.prozorro.gov.ua/{l['id']}"} if l["id"] == lot["id"] else {} for l in self.initial_lots ], "bids": [ {"lotValues": [ {"participationUrl": f"http://auction.prozorro.gov.ua/{v['relatedLot']}"} if v["relatedLot"] == lot["id"] else {} for v in b["lotValues"] ]} for b in self.initial_bids ] } response = self.app.patch_json("/tenders/{}/auction/{}".format(self.tender_id, lot["id"]), {"data": patch_data}) self.assertEqual(response.status, "200 OK") self.assertEqual(response.content_type, "application/json") resp = response.json["data"] for bid in resp["bids"]: for l in bid["lotValues"]: self.assertEqual(l["participationUrl"], f"http://auction.prozorro.gov.ua/{l['relatedLot']}") for l in resp["lots"]: self.assertEqual(l["auctionUrl"], f"http://auction.prozorro.gov.ua/{l['id']}") self.app.authorization = ("Basic", ("token", "")) cancellation = dict(**test_cancellation) cancellation.update({ "status": "active", "cancellationOf": "lot", "relatedLot": self.initial_lots[0]["id"], }) if RELEASE_2020_04_19 > get_now(): response = self.app.post_json("/tenders/{}/cancellations".format(self.tender_id), {"data": cancellation}) self.assertEqual(response.status, "201 Created") self.app.authorization = ("Basic", ("auction", "")) response = self.app.patch_json( "/tenders/{}/auction/{}".format(self.tender_id, self.initial_lots[0]["id"]), {"data": patch_data}, status=403 ) self.assertEqual(response.status, "403 Forbidden") self.assertEqual(response.content_type, "application/json") self.assertEqual(response.json["errors"][0]["description"], "Can update auction urls only in active lot status") def post_tender_lots_auction_document(self): self.app.authorization = ("Basic", ("auction", "")) lot_id = self.initial_lots[0]["id"] response = self.app.post( "/tenders/{}/documents".format(self.tender_id), upload_files=[("file", "name.doc", b"content")], status=403 ) self.assertEqual(response.status, "403 Forbidden") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"][0]["description"], "Can't add document in current ({}) tender status".format( self.forbidden_auction_document_create_actions_status ), ) self.set_status("active.auction") response = self.app.post( "/tenders/{}/documents".format(self.tender_id), upload_files=[("file", "name.doc", b"content")] ) self.assertEqual(response.status, "201 Created") self.assertEqual(response.content_type, "application/json") doc_id = response.json["data"]["id"] key = response.json["data"]["url"].split("?")[-1].split("=")[-1] response = self.app.patch_json( "/tenders/{}/documents/{}".format(self.tender_id, doc_id), {"data": {"documentOf": "lot", "relatedItem": self.initial_lots[0]["id"]}}, ) self.assertEqual(response.status, "200 OK") self.assertEqual(response.content_type, "application/json") self.assertEqual(response.json["data"]["documentOf"], "lot") self.assertEqual(response.json["data"]["relatedItem"], self.initial_lots[0]["id"]) patch_data = {"bids": [ { "lotValues": [ {"relatedLot": i["id"]} for i in self.initial_lots ], } for b in self.initial_bids ]} response = self.app.post_json(f"/tenders/{self.tender_id}/auction/{lot_id}", {"data": patch_data}) self.assertEqual(response.status, "200 OK") self.assertEqual(response.content_type, "application/json") response = self.app.put( "/tenders/{}/documents/{}".format(self.tender_id, doc_id), upload_files=[("file", "name.doc", b"content_with_names")], ) self.assertEqual(response.status, "200 OK") self.assertEqual(response.content_type, "application/json") self.assertEqual(doc_id, response.json["data"]["id"]) key2 = response.json["data"]["url"].split("?")[-1].split("=")[-1] self.assertNotEqual(key, key2) self.set_status("complete") response = self.app.post( "/tenders/{}/documents".format(self.tender_id), upload_files=[("file", "name.doc", b"content")], status=403 ) self.assertEqual(response.status, "403 Forbidden") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"][0]["description"], "Can't add document in current (complete) tender status" ) # TenderFeaturesAuctionResourceTest def get_tender_auction_feature(self): self.app.authorization = ("Basic", ("auction", "")) response = self.app.get("/tenders/{}/auction".format(self.tender_id), status=403) self.assertEqual(response.status, "403 Forbidden") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"][0]["description"], "Can't get auction info in current ({}) tender status".format(self.forbidden_auction_actions_status), ) self.set_status("active.auction") response = self.app.get("/tenders/{}/auction".format(self.tender_id)) self.assertEqual(response.status, "200 OK") self.assertEqual(response.content_type, "application/json") auction = response.json["data"] self.assertNotEqual(auction, self.initial_data) self.assertIn("dateModified", auction) self.assertIn("minimalStep", auction) self.assertNotIn("procuringEntity", auction) self.assertNotIn("tenderers", auction["bids"][0]) self.assertEqual(auction["bids"][0]["value"]["amount"], self.initial_bids[0]["value"]["amount"]) self.assertEqual(auction["bids"][1]["value"]["amount"], self.initial_bids[1]["value"]["amount"]) self.assertIn("features", auction) self.assertIn("parameters", auction["bids"][0]) def post_tender_auction_feature(self): self.app.authorization = ("Basic", ("auction", "")) response = self.app.patch_json("/tenders/{}/auction".format(self.tender_id), {"data": {}}, status=403) self.assertEqual(response.status, "403 Forbidden") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"][0]["description"], "Can't update auction urls in current ({}) tender status".format(self.forbidden_auction_actions_status), ) self.set_status("active.auction") response = self.app.post_json( "/tenders/{}/auction".format(self.tender_id), {"data": {"bids": [{"invalid_field": "invalid_value"}]}}, status=422, ) self.assertEqual(response.status, "422 Unprocessable Entity") self.assertEqual(response.content_type, "application/json") self.assertEqual( response.json["errors"], [{"description": {"invalid_field": "Rogue field"}, "location": "body", "name": "bids"}], ) patch_data = { "bids": [ { "id": self.initial_bids[-1]["id"], "value": {"amount": 459, "currency": "UAH", "valueAddedTaxIncluded": True},
255)) g = max(0, min(g + brightness, 255)) b = max(0, min(b + brightness, 255)) return r, g, b elif isinstance(brightness, float) and -1. <= brightness <= 1.: if brightness < 0: r1 = g1 = b1 = 0 else: r1 = g1 = b1 = 255 r = int(r + (r1 - r) * abs(brightness)) g = int(g + (g1 - g) * abs(brightness)) b = int(b + (b1 - b) * abs(brightness)) return r, g, b else: return color return int(r), int(g), int(b) def get_color2(self, item): if item.find(":") == -1: return self._palette[self._scheme][item] else: split = item.split(":") cadd = lambda c, d: (c[0] + d[0], c[1] + d[1], c[2] + d[2]) if split[0] == "darker": return max(cadd(self.get_color(split[1]), (-20, -20, -20)), (0, 0, 0)) if split[0] == "dark": return max(cadd(self.get_color(split[1]), (-40, -40, -40)), (0, 0, 0)) if split[0] == "lighter": return min(cadd(self.get_color(split[1]), (20, 20, 20)), (250, 250, 250)) if split[0] == "light": return min(cadd(self.get_color(split[1]), (40, 40, 40)), (250, 250, 250)) if split[0] == "transparent": return self.get_color(split[1]) + (int(split[2].rstrip("%")) / 100,) def __getitem__(self, item): # type: (str) -> tuple """Mapping emulation. Operates similarly with get_color() """ return self.get_color(item) @staticmethod def load_from_file(path): # type: (str) -> GUI.ColorPalette """Loads a ColorPalette from a json file. """ f = open(path, "rU") colordata = json.load(f) toreturn = GUI.ColorPalette() for key in dict(colordata).keys(): toreturn._palette[key] = colordata.get(key) f.close() return toreturn @staticmethod def html_to_rgb(colorstring): # type: (str) -> Tuple[int, int, int] """Converts a web format rgb color (`#RRGGGBB`) into a 3-int tuple rgb color """ colorstring = colorstring.strip().strip('#') if len(colorstring) != 6: raise ValueError("input #{} is not in #RRGGBB format".format(colorstring)) elif False in map(lambda ch: ch.lower in '0123456789abcdef', colorstring): raise ValueError("invalid #RRGGBB format (unexpected character)") rgb = colorstring[:2], colorstring[2:4], colorstring[4:] return tuple(int(n, 16) for n in rgb) @staticmethod def rgb_to_html(rgb_tuple): # type: (Tuple[int, int, int]) -> str return '#{:02X}{:02X}{:02X}'.format(*rgb_tuple) # NOTE: new in 1.01 - Event base class for LongClickEvent and IntermediateUpdateEvent class Event(object): """Base class for LongClickEvent and IntermediateUpdateEvent events. It does nothing. """ __slots__ = () class LongClickEvent(Event): """Represents a long screen touch or mouse button press. """ def __init__(self, mouse_down): # type: (pygame.event.Event) -> None """LongClickEvent instance initializer. :param mouse_down: the corresponding pygame.MOUSEBUTTONDOWN event instance. """ self.mouse_down = mouse_down # type: pygame.event.Event self.mouse_down_time = datetime.now() # type: datetime.time self.mouse_up = None # type: pygame.event.Event self.mouse_up_time = None # type: datetime.time self.intermediate_points = [] # type: list self.pos = self.mouse_down.pos # type: tuple def intermediate_update(self, mouse_move): # type: (pygame.event.Event) -> None """Captures and updates the mouse movement path during a long click event. :param mouse_move: the corresponding pygame.MOUSEMOTION event instance. """ if self.mouse_up is None and ( len(self.intermediate_points) == 0 or mouse_move.pos != self.intermediate_points[-1]): self.intermediate_points.append(mouse_move.pos) def end(self, mouse_up): # type: (pygame.event.Event) -> None """Terminates the long click event processing. :param mouse_up: the corresponding pygame.MOUSEBUTTONUP event instance. """ self.mouse_up = mouse_up self.mouse_up_time = datetime.now() self.pos = self.mouse_up.pos # NOTE: new in 1.01 - getLatestUpdate() turned into a property. @property def latest_update(self): # type: () -> Tuple[int, int] """Gets the last mouse position during the long click event. If there's not a position, the current position is returned. """ if len(self.intermediate_points) == 0: return self.pos else: return self.intermediate_points[-1] def is_valid_longclick(self, time=300): # type: (int) -> bool """Checks timestamps against a time interval. :param time: the time interval in milliseconds. """ delta = self.mouse_up_time - self.mouse_down_time return (delta.microseconds / 1000) >= time class IntermediateUpdateEvent(Event): """Represents? """ # NOTE: new in 1.01 - __slots__ added, to reduce memory usage. __slots__ = 'pos', 'source_event' def __init__(self, pos, src): # type: Tuple[int, int], pygame.event.Event) -> None """IntermediateUpdateEvent instance initializer. :param pos: a mouse position. :param src: the source event. """ self.pos = pos # type: tuple self.source_event = src # type: pygame.event.Event class EventQueue(object): """Represents a queue of events to be processed (or handled). """ # NOTE: new in 1.01 - __slots__ added, to reduce memory usage. __slots__ = ('events',) def __init__(self): # type: () -> None """EventQueue instance initializer. """ self.events = [] # type: list # NOTE: new in 1.01 - the empty property. @property def empty(self): # type: () -> bool """True if there is no events in the queue, False otherwise. """ return len(self.events) == 0 # NOTE: new in 1.01 - the tail property. @property def tail(self): # type: () -> Any """Gets or sets the last event of the queue (returns None if its empty). """ if not self.empty: return self.events[-1] return None @tail.setter def tail(self, value): # type: (Any) -> None if not self.empty: self.events[-1] = value def check(self): # type: () -> None """Updates the event queue. """ # for event in pygame.event.get(): # print(event.type == pygame.MOUSEBUTTONUP, self.tail.__class__.__name__) # # last_event = None # type: GUI.LongClickEvent # if not self.empty and isinstance(self.tail, GUI.LongClickEvent): # last_event = self.tail # # if event.type == pygame.QUIT: # State.exit() # # elif event.type == pygame.MOUSEBUTTONUP and last_event is not None: # print("MOUSE UP") # last_event.end(event) # if not last_event.is_valid_longclick(): # self.tail = last_event.mouse_up # # elif event.type == pygame.MOUSEBUTTONDOWN: # self.events.append(GUI.LongClickEvent(event)) # # elif event.type == pygame.MOUSEMOTION and last_event is not None: # last_event.intermediate_update(event) for event in pygame.event.get(): empty = len(self.events) == 0 if event.type == pygame.QUIT: State.exit() if event.type == pygame.MOUSEBUTTONDOWN: self.events.append(GUI.LongClickEvent(event)) if (event.type == pygame.MOUSEMOTION and not empty and isinstance(self.events[-1], GUI.LongClickEvent)): self.events[-1].intermediate_update(event) if (event.type == pygame.MOUSEBUTTONUP and not empty and isinstance(self.events[-1], GUI.LongClickEvent)): self.events[-1].end(event) if not self.events[-1].check_valid_longclick(): self.events[-1] = self.events[-1].mouse_up def get_latest(self): # type: () -> GUI.Event """Removes and returns the last event of the queue. """ if self.empty: return None return self.events.pop() def remove_event(self, ev): # type: (GUI.Event) -> None """Removes the given event from the queue. """ if ev in self.events: self.events.remove(ev) @property def latest_complete(self): # type: () -> Event """Gets the last complete event from the queue or None if the queue is empty. The event returned is removed from the queue. """ for event in reversed(self.events[:]): if isinstance(event, GUI.LongClickEvent): if event.mouse_up is None: self.events.remove(event) return event else: return GUI.IntermediateUpdateEvent(self.tail.latest_update, self.tail) else: self.events.remove(event) return event return None def clear(self): # type: () -> None """Removes all events from the queue. """ del self.events[:] # remember to replace this by list.clear() in Python3 # NOTE: New on 1.01 - A namedtuple enumerating the component event types. CompEvt = namedtuple( "CompEvt", "on_click on_longclick on_intermediate_updt".split() )("onClick", "onLongClick", "onIntermediateUpdate") # Note: new on 1.01 - A namedtuple enumerating the component event data. CompEvtData = namedtuple( "CompEvtData", "on_click_data on_longclick_data on_intermediate_updt_data".split() )("onClickData", "onLongClickData", "onIntermediateUpdateData") class Component(object): """Component is the base class of ui elements of the GUI toolkit. """ def __init__(self, position, **data): # type: (Tuple[int, int], ...) -> None """Component instance initializer. :param position: the component position. :param data: optional set of keyword arguments related to the component. """ self.position = list(deepcopy(position)) self.event_bindings = {comp_evt: None for comp_evt in GUI.CompEvt} self.event_data = {evt_data: None for evt_data in GUI.CompEvtData} self.data = data self.surface = data.get("surface", None) self.border = 0 self.border_color = (0, 0, 0) self.resizable = data.get("resizable", False) self.originals = [list(deepcopy(position)), data.get("width", data["surface"].get_width() if data.get("surface", False) is not False else 0), data.get("height", data["surface"].get_height() if data.get("surface", False) is not False else 0) ] self.width = self.originals[1] self.height = self.originals[2] self.computed_width = 0 self.computed_height = 0 self.computed_position = [0, 0] self.rect = pygame.Rect(self.computed_position, (self.computed_width, self.computed_height)) self._inner_click_coordinates = (-1, -1) self.inner_offset = [0, 0] self.internal_click_overrides = {} self.set_dimensions() for comp_evt in GUI.CompEvt: self.event_bindings[comp_evt] = data.get(comp_evt) for comp_data in GUI.CompEvtData: self.event_data[comp_data] = data.get(comp_data) if "border" in data: self.border = int(data["border"]) self.border_color = data.get("borderColor", state.color_palette.get_color(GUI.Palette.background)) def _percent_to_pix(self, value, scale): # type: (str, int) -> int """Converts a percentage value (as str) to a pixel value (as int). """ return int(int(value.rstrip("%")) * scale) def set_dimensions(self): old_surface = self.surface.copy() if self.surface is not None else None # type: pygame.Surface if self.data.get("fixedSize", False): self.computed_width = self.data.get("width") self.computed_height = self.data.get("height") self.rect = pygame.Rect(self.computed_position, (self.computed_width, self.computed_height)) self.surface = pygame.Surface((self.computed_width, self.computed_height),
- # osid.learning.ObjectiveRequisiteSession.get_all_requisite_objectives return self._get_provider_session('objective_requisite_session').get_all_requisite_objectives(*args, **kwargs) def get_dependent_objectives(self, *args, **kwargs): """Pass through to provider ObjectiveRequisiteSession.get_dependent_objectives""" # Implemented from kitosid template for - # osid.learning.ObjectiveRequisiteSession.get_dependent_objectives return self._get_provider_session('objective_requisite_session').get_dependent_objectives(*args, **kwargs) def is_objective_required(self, *args, **kwargs): """Pass through to provider ObjectiveRequisiteSession.is_objective_required""" # Implemented from kitosid template for - # osid.learning.ObjectiveRequisiteSession.is_objective_required return self._get_provider_session('objective_requisite_session').is_objective_required(*args, **kwargs) def get_equivalent_objectives(self, *args, **kwargs): """Pass through to provider ObjectiveRequisiteSession.get_equivalent_objectives""" # Implemented from kitosid template for - # osid.learning.ObjectiveRequisiteSession.get_equivalent_objectives return self._get_provider_session('objective_requisite_session').get_equivalent_objectives(*args, **kwargs) ## # The following methods are from osid.learning.ObjectiveRequisiteAssignmentSession def can_assign_requisites(self): """Pass through to provider ObjectiveRequisiteAssignmentSession.can_assign_requisites""" # Implemented from kitosid template for - # osid.learning.ObjectiveRequisiteAssignmentSession.can_assign_requisites return self._get_provider_session('objective_requisite_assignment_session').can_assign_requisites() def assign_objective_requisite(self, *args, **kwargs): """Pass through to provider ObjectiveRequisiteAssignmentSession.assign_objective_requisite""" # Implemented from kitosid template for - # osid.learning.ObjectiveRequisiteAssignmentSession.assign_objective_requisite return self._get_provider_session('objective_requisite_assignment_session').assign_objective_requisite(*args, **kwargs) def unassign_objective_requisite(self, *args, **kwargs): """Pass through to provider ObjectiveRequisiteAssignmentSession.unassign_objective_requisite""" # Implemented from kitosid template for - # osid.learning.ObjectiveRequisiteAssignmentSession.unassign_objective_requisite return self._get_provider_session('objective_requisite_assignment_session').unassign_objective_requisite(*args, **kwargs) def assign_equivalent_objective(self, *args, **kwargs): """Pass through to provider ObjectiveRequisiteAssignmentSession.assign_equivalent_objective""" # Implemented from kitosid template for - # osid.learning.ObjectiveRequisiteAssignmentSession.assign_equivalent_objective return self._get_provider_session('objective_requisite_assignment_session').assign_equivalent_objective(*args, **kwargs) def unassign_equivalent_objective(self, *args, **kwargs): """Pass through to provider ObjectiveRequisiteAssignmentSession.unassign_equivalent_objective""" # Implemented from kitosid template for - # osid.learning.ObjectiveRequisiteAssignmentSession.unassign_equivalent_objective return self._get_provider_session('objective_requisite_assignment_session').unassign_equivalent_objective(*args, **kwargs) ## # The following methods are from osid.learning.ActivityLookupSession def can_lookup_activities(self): """Pass through to provider ActivityLookupSession.can_lookup_activities""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.can_lookup_resources_template return self._get_provider_session('activity_lookup_session').can_lookup_activities() def use_comparative_activity_view(self): """Pass through to provider ActivityLookupSession.use_comparative_activity_view""" self._object_views['activity'] = COMPARATIVE # self._get_provider_session('activity_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_comparative_activity_view() except AttributeError: pass def use_plenary_activity_view(self): """Pass through to provider ActivityLookupSession.use_plenary_activity_view""" self._object_views['activity'] = PLENARY # self._get_provider_session('activity_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_activity_view() except AttributeError: pass def get_activity(self, *args, **kwargs): """Pass through to provider ActivityLookupSession.get_activity""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.get_resource_template return self._get_provider_session('activity_lookup_session').get_activity(*args, **kwargs) def get_activities_by_ids(self, *args, **kwargs): """Pass through to provider ActivityLookupSession.get_activities_by_ids""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.get_resources_by_ids_template return self._get_provider_session('activity_lookup_session').get_activities_by_ids(*args, **kwargs) def get_activities_by_genus_type(self, *args, **kwargs): """Pass through to provider ActivityLookupSession.get_activities_by_genus_type""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.get_resources_by_genus_type_template return self._get_provider_session('activity_lookup_session').get_activities_by_genus_type(*args, **kwargs) def get_activities_by_parent_genus_type(self, *args, **kwargs): """Pass through to provider ActivityLookupSession.get_activities_by_parent_genus_type""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type_template return self._get_provider_session('activity_lookup_session').get_activities_by_parent_genus_type(*args, **kwargs) def get_activities_by_record_type(self, *args, **kwargs): """Pass through to provider ActivityLookupSession.get_activities_by_record_type""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.get_resources_by_record_type_template return self._get_provider_session('activity_lookup_session').get_activities_by_record_type(*args, **kwargs) def get_activities_for_objective(self, *args, **kwargs): """Pass through to provider ActivityLookupSession.get_activities_for_objective""" # Implemented from kitosid template for - # osid.resource.ActivityLookupSession.get_activities_for_objective return self._get_provider_session('activity_lookup_session').get_activities_for_objective(*args, **kwargs) def get_activities_for_objectives(self, *args, **kwargs): """Pass through to provider ActivityLookupSession.get_activities_for_objectives""" # Implemented from kitosid template for - # osid.resource.ActivityLookupSession.get_activities_for_objectives return self._get_provider_session('activity_lookup_session').get_activities_for_objectives(*args, **kwargs) def get_activities_by_asset(self, *args, **kwargs): """Pass through to provider unimplemented""" raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs)) def get_activities_by_assets(self, *args, **kwargs): """Pass through to provider unimplemented""" raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs)) def get_activities(self): """Pass through to provider ActivityLookupSession.get_activities""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.get_resources_template return self._get_provider_session('activity_lookup_session').get_activities() activities = property(fget=get_activities) ## # The following methods are from osid.learning.ActivityQuerySession def can_search_activities(self): """Pass through to provider ActivityQuerySession.can_search_activities""" # Implemented from kitosid template for - # osid.resource.ResourceQuerySession.can_search_resources_template return self._get_provider_session('activity_query_session').can_search_activities() def get_activity_query(self): """Pass through to provider ActivityQuerySession.get_activity_query""" # Implemented from kitosid template for - # osid.resource.ResourceQuerySession.get_item_query_template return self._get_provider_session('activity_query_session').get_activity_query() activity_query = property(fget=get_activity_query) def get_activities_by_query(self, *args, **kwargs): """Pass through to provider ActivityQuerySession.get_activities_by_query""" # Implemented from kitosid template for - # osid.resource.ResourceQuerySession.get_items_by_query_template return self._get_provider_session('activity_query_session').get_activities_by_query(*args, **kwargs) ## # The following methods are from osid.learning.ActivityAdminSession def can_create_activities(self): """Pass through to provider ActivityAdminSession.can_create_activities""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.can_create_resources return self._get_provider_session('activity_admin_session').can_create_activities() def can_create_activity_with_record_types(self, *args, **kwargs): """Pass through to provider ActivityAdminSession.can_create_activity_with_record_types""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.can_create_resource_with_record_types return self._get_provider_session('activity_admin_session').can_create_activity_with_record_types(*args, **kwargs) def get_activity_form_for_create(self, *args, **kwargs): """Pass through to provider ActivityAdminSession.get_activity_form_for_create""" # Implemented from - # osid.learning.ActivityAdminSession.get_activity_form_for_create_template return self._get_provider_session('activity_admin_session').get_activity_form_for_create(*args, **kwargs) def create_activity(self, *args, **kwargs): """Pass through to provider ActivityAdminSession.create_activity""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.create_resource return self._get_provider_session('activity_admin_session').create_activity(*args, **kwargs) def can_update_activities(self): """Pass through to provider ActivityAdminSession.can_update_activities""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.can_update_resources return self._get_provider_session('activity_admin_session').can_update_activities() def get_activity_form_for_update(self, *args, **kwargs): """Pass through to provider ActivityAdminSession.get_activity_form_for_update""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.get_resource_form_for_update return self._get_provider_session('activity_admin_session').get_activity_form_for_update(*args, **kwargs) def get_activity_form(self, *args, **kwargs): """Pass through to provider ActivityAdminSession.get_activity_form_for_update""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.get_resource_form_for_update # This method might be a bit sketchy. Time will tell. if isinstance(args[-1], list) or 'activity_record_types' in kwargs: return self.get_activity_form_for_create(*args, **kwargs) else: return self.get_activity_form_for_update(*args, **kwargs) def duplicate_activity(self, activity_id): # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.get_resource_form_for_update return self._get_provider_session('activity_admin_session').duplicate_activity(activity_id) def update_activity(self, *args, **kwargs): """Pass through to provider ActivityAdminSession.update_activity""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.update_resource # Note: The OSID spec does not require returning updated object return self._get_provider_session('activity_admin_session').update_activity(*args, **kwargs) def save_activity(self, activity_form, *args, **kwargs): """Pass through to provider ActivityAdminSession.update_activity""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.update_resource if activity_form.is_for_update(): return self.update_activity(activity_form, *args, **kwargs) else: return self.create_activity(activity_form, *args, **kwargs) def can_delete_activities(self): """Pass through to provider ActivityAdminSession.can_delete_activities""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.can_delete_resources return self._get_provider_session('activity_admin_session').can_delete_activities() def delete_activity(self, *args, **kwargs): """Pass through to provider ActivityAdminSession.delete_activity""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.delete_resource self._get_provider_session('activity_admin_session').delete_activity(*args, **kwargs) def can_manage_activity_aliases(self): """Pass through to provider ActivityAdminSession.can_manage_activity_aliases""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.can_manage_resource_aliases_template return self._get_provider_session('activity_admin_session').can_manage_activity_aliases() def alias_activity(self, *args, **kwargs): """Pass through to provider ActivityAdminSession.alias_activity""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.alias_resources self._get_provider_session('activity_admin_session').alias_activity(*args, **kwargs) ## # The following methods are from osid.learning.ProficiencyLookupSession def can_lookup_proficiencies(self): """Pass through to provider ProficiencyLookupSession.can_lookup_proficiencies""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.can_lookup_resources_template return self._get_provider_session('proficiency_lookup_session').can_lookup_proficiencies() def use_comparative_proficiency_view(self): """Pass through to provider ProficiencyLookupSession.use_comparative_proficiency_view""" self._object_views['proficiency'] = COMPARATIVE # self._get_provider_session('proficiency_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_comparative_proficiency_view() except AttributeError: pass def use_plenary_proficiency_view(self): """Pass through to provider ProficiencyLookupSession.use_plenary_proficiency_view""" self._object_views['proficiency'] = PLENARY # self._get_provider_session('proficiency_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_proficiency_view() except AttributeError: pass def use_effective_proficiency_view(self): """Pass through to provider unimplemented""" raise Unimplemented('Unimplemented in dlkit.services') def use_any_effective_proficiency_view(self): """Pass through to provider unimplemented""" raise Unimplemented('Unimplemented in dlkit.services') def get_proficiency(self, *args, **kwargs): """Pass through to provider ProficiencyLookupSession.get_proficiency""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.get_resource_template return self._get_provider_session('proficiency_lookup_session').get_proficiency(*args, **kwargs) def get_proficiencies_by_ids(self, *args, **kwargs): """Pass through to provider ProficiencyLookupSession.get_proficiencies_by_ids""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.get_resources_by_ids_template return self._get_provider_session('proficiency_lookup_session').get_proficiencies_by_ids(*args, **kwargs) def get_proficiencies_by_genus_type(self, *args, **kwargs): """Pass through to provider ProficiencyLookupSession.get_proficiencies_by_genus_type""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.get_resources_by_genus_type_template return self._get_provider_session('proficiency_lookup_session').get_proficiencies_by_genus_type(*args, **kwargs) def get_proficiencies_by_parent_genus_type(self, *args, **kwargs): """Pass through to provider ProficiencyLookupSession.get_proficiencies_by_parent_genus_type""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type_template return self._get_provider_session('proficiency_lookup_session').get_proficiencies_by_parent_genus_type(*args, **kwargs) def get_proficiencies_by_record_type(self, *args, **kwargs): """Pass through to provider ProficiencyLookupSession.get_proficiencies_by_record_type""" # Implemented from kitosid template for - # osid.resource.ResourceLookupSession.get_resources_by_record_type_template return self._get_provider_session('proficiency_lookup_session').get_proficiencies_by_record_type(*args, **kwargs) def get_proficiencies_on_date(self, *args, **kwargs): """Pass through to provider unimplemented""" raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs)) def get_proficiencies_by_genus_type_on_date(self, *args, **kwargs): """Pass through to provider unimplemented""" raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs)) def get_proficiencies_for_objective(self, *args, **kwargs): """Pass through to provider unimplemented""" raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs)) def get_proficiencies_for_objective_on_date(self, *args, **kwargs): """Pass through to provider unimplemented""" raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs)) def get_proficiencies_by_genus_type_for_objective(self, *args, **kwargs): """Pass through to provider unimplemented""" raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs)) def get_proficiencies_by_genus_type_for_objective_on_date(self, *args, **kwargs): """Pass through to provider unimplemented""" raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs)) def get_proficiencies_for_objectives(self, *args, **kwargs): """Pass through to provider unimplemented""" raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs)) def get_proficiencies_for_resource(self, *args, **kwargs): """Pass through to provider ProficiencyLookupSession.get_proficiencies_for_resource""" # Implemented from kitosid template for - # osid.relationship.RelationshipLookupSession.get_relationships_for_source_template return self._get_provider_session('proficiency_lookup_session').get_proficiencies_for_resource(*args, **kwargs) def get_proficiencies_for_resource_on_date(self, *args, **kwargs): """Pass through to provider ProficiencyLookupSession.get_proficiencies_for_resource_on_date""" # Implemented from kitosid template for - # osid.relationship.RelationshipLookupSession.get_relationships_for_source_on_date_template return self._get_provider_session('proficiency_lookup_session').get_proficiencies_for_resource_on_date(*args, **kwargs) def get_proficiencies_by_genus_type_for_resource(self, *args, **kwargs): """Pass through to provider unimplemented""" raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs='
<filename>landlibrary/importers/FAOLEX/faolex-2017/utils.py #!/usr/bin/python # -*- coding: UTF-8 -*- def getUNM49code(label): return { 'Africa': "002", 'Americas': "019", 'Asia': "142", 'Caribbean': "029", 'Central Africa': "017", 'Central America': "013", 'Central Asia': "143", 'CIS (Commonwealth of Independent States)': ['ARM', 'BLR', 'KAZ', 'KGZ', 'MDA', 'RUS', 'TJK', 'TKM', 'UKR', 'UZB'], 'Eastern Africa': "014", 'Eastern Asia': "030", 'Eastern Europe': "151", 'Europe': "150", 'European Union Countries': ["AUT", "BEL", "BGR", "CYP", "CZE", "DEU", "DNK", "ESP", "EST", "FIN", "FRA", "GBR", "GRC", "HRV", "HUN", "IRL", "ITA", "LTU", "LUX", "LVA", "MLT", "NLD", "POL", "PRT", "ROU", "SVK", "SVN", "SWE"], 'Landlocked Developing Countries': ["AFG","ARM","AZE","BDI","BFA","BOL","BTN","BWA","CAF","ETH","KAZ","KGZ","LAO", "LSO","MDA","MKD","MLI","MNG","MWI","NER","NPL","PRY","RWA","SSD","SWZ","TCD","TJK","TKM","UGA","UZB","ZMB","ZWE"], 'Least Developed Countries': None, 'North America': "021", 'North Asia': ['RUS'], 'Northern Africa': "015", 'Northern Europe': "154", 'Oceania': "009", 'Small Island Developing States': ["ABW", "AIA", "ASM", "ATG", "BHR", "BHS", "BLZ", "BRB", "COK", "COM", "CPV", "CUB", "CUW", "DMA", "DOM", "FJI", "FSM", "GNB", "GRD", "GUM", "GUY", "HTI", "JAM", "KIR", "KNA", "LCA", "MDV", "MHL", "MNP", "MSR", "NCL", "NIU", "NRU", "PLW", "PNG", "PRI", "PYF", "SGP", "SLB", "STP", "SUR", "SXM", "SYC", "TLS", "TON", "TTO", "TUV", "VCT", "VGB", "VIR", "VUT", "WSM"], 'South America': "005", 'South-Eastern Asia': "035", 'Southern Africa': "018", 'Southern Asia': "034", 'Southern Europe': "039", 'Western Africa': "011", 'Western Asia': "145", 'Western Europe': "155", }[label] def getISO639_1code(label): return { "Albanian" : "sq", "Arabic" : "ar", "Azerbaijani" : "az", "Bosnian" : "bs", "Bulgarian" : "bg", "Catalan" : "ca", "Chinese" : "zh", "Chineset" : "zh", "Croatian" : "hr", "Czech" : "cs", "Danish" : "da", "Dutch" : "nl", "English" : "en", "Farsi" : "fa", "French" : "fr", "German" : "de", "Greek" : "el", "Hungarian" : "hu", "Indonesian" : "id", "Italian" : "it", "Macedonian" : "mk", "Montenegrin" : "sr", "Norwegian" : "no", "Polish" : "pl", "Portuguese" : "pt", "Russian" : "ru", "Serbian" : "sr", "Slovak" : "sk", "Slovenian" : "sl", "Spanish" : "es", "Swedish" : "sv", "Turkish" : "tr", "Ukrainian" : "uk", }[label] def get_publisher(label): return { "www.legis-palop.org" : u"Projecto Apoio ao Desenvolvimento dos Sistemas Judiciários", "www.qbz.gov.al" : u"Government of Albania", "www.qpz.gov.al" : u"Government of Albania", "www.bopa.ad" : u"Government of Andorra", "www.laws.gov.ag" : u"Government of Antigua and Barbuda", "www.ris.bka.gv.at" : u"Austrian Federal Government", "www.e-qanun.az" : u"Government of Azerbaijan", "http://www.presidence.gov.bi/" : u"Government of Burundi", "www.assemblee.bi" : u"Government of Burundi", "www.just.fgov.be" : u"Government of Belgium", "www.droit-afrique" : u"Droit-Afrique", "www.legiburkina.bf" : u"Government of Burkina Faso", "bdlaws.minlaw.gov.bd" : u"Government of Bangladesh", "www.parliament.bg" : u"Government of Bulgaria", "www.legalaffairs.gov.bh" : u"Government of Bahrain", "www.bahamas.gov.bs" : u"Government of the Bahamas", "www.skupstinabd.ba" : u"Government of Bosnia and Herzegovina", "http://president.gov.by" : u"Government of Belarus", "pravo.by" : u"Government of Belarus", "www.bermudalaws.bm" : u"Government of Bermuda", "www.planalto.gov.br" : u"Government of Brazil", "www.senado.gov.br" : u"Government of Brazil", "www.caricomlaw.org" : u"Legal Services of the Caribbean Community", "www.agc.gov.bn" : u"Government of the Sultanate of Brunei Darussalam", "www.nab.gov.bt" : u"Royal Government of Bhutan", "www.laws.gov.bw" : u"Government of Botswana", "www.gc.ca" : u"Government of Canada", "www.admin.ch" : u"Government of Switzerland", "www.gov.cn" : u"Government of the People's Republic of China", "www.leganet.cd" : u"Partenariat pour le développement social", "www.paclii.org" : u"Pacific Islands Legal Information Institute", "www.imprenta.gov.co" : u"Government of Colombia", "www.legis-palop.org" : u"Projecto Apoio ao Desenvolvimento dos Sistemas Judiciários", "www.gaceta.go.cr" : u"Government of Costa Rica", "www.imprentanacional.go.cr" : u"Government of Costa Rica", "www.gacetaoficial.cu" : u"Government of Cuba", "www.gaceteaoficial.cu" : u"Government of Cuba", "www.gov.ky" : u"Government of the Cayman Islands", "www.cylaw.org" : u"CyLaw", "www.bgbl.de" : u"Government of Germany", "www.retsinfo.dk" : u"Government of Denmark", "www.retsinformation.dk" : u"Government of Denmark", "www.suprema.gov.do" : u"Government of the Dominican Republic", "www.joradp.dz" : u"Government of Algeria", "www.registroficial.gob.ec" : u"Government of Ecuador", "www.registroficial.gob.ecu" : u"Government of Ecuador", "www.registroficial.gov.ec" : u"Government of Ecuador", "www.tribunalconstitucional.gov.ec" : u"Government of Ecuador", "www.boe.es" : u"Government of Spain", "www.ethiopar.net" : u"Government of Ethiopia", "www.finlex.fi" : u"FINLEX", "www.fiji.gov.fj" : u"Government of Fiji", "www.paclii.org" : u"Pacific Islands Legal Information Institute", "www.legifrance.gouv.fr" : u"Government of France", "www.opsi.gov.uk" : u"Government of the United Kingdom", "www.scotland-legislation.hmso.gov.uk" : u"Government of the United Kingdom", "www.epa.gov.gh" : u"Environmental Protection Agency Ghana", "www.nawasa.gd" : u"Grenada National Water & Sewerage Authority", "www.guamcourts.org" : u"Government of Guam", "www.guamlegislature.com" : u"Government of Guam", "legalaffairs.gov.gy" : u"Government of Guyana", "www.legalaffairs.gov.gy" : u"Government of Guyana", "www.hah.hr" : u"Government of Croatia", "www.nn.hr" : u"Government of Croatia", "http://njt.hu/" : u"Government of Hungary", "www.magyarorszag.hu" : u"Government of Hungary", "http://www.indolaw.org/" : u"Government of Indonesia", "www.downtoearth-indonesia.org" : u"Government of Indonesia", "www.indolaw.org" : u"Government of Indonesia", "india.gov.in" : u"Government of India", "www.commonlii.org" : u"Government of India", "www.india.gov.in" : u"Government of India", "www.agriculture.gov.ie" : u"Government of Ireland", "www.bailii.org" : u"British and Irish Legal Information Institute", "www.bailli.org" : u"British and Irish Legal Information Institute", "www.irishstatutebook.ie" : u"Government of Ireland", "www.oireachtas.ie" : u"Government of Ireland", "http://rc.majlis.ir/fa" : u"Government of Ireland", "http://www.dastour.ir/" : u"Government of Ireland", "www.japarliament.gov.jm" : u"Government of Jamaica", "www.jerseylaw.je" : u"Jersey Legal Information Board", "www.japaneselawtranslation.go.jp" : u"Government of Japan", "http://adilet.zan.kz" : u"Government of the Republic of Kazakhstan", "http://www.government.kz" : u"Government of the Republic of Kazakhstan", "zher.kz" : u"Government of the Republic of Kazakhstan", "kenyalaw.org" : u"Kenya Law", "www.kenyalaw.org" : u"Kenya Law", "http://cbd.minjust.gov.kg" : u"Government of Kyrgyzstan", "www.minjust.gov.kg" : u"Government of Kyrgyzstan", "www.klri.re.kr" : u"Government of South Korea", "http://erml.moe.gov.lb" : u"Government of Lebanon", "http://jo.pcm.gov.lb" : u"Government of Lebanon", "www.moe.gov.lb" : u"Government of Lebanon", "fornis.net" : u"Forestry Research Network of Sub-Saharan Africa", "legislature.gov.lr" : u"Government of Republic of Liberia", "www.fda.gov.lr" : u"Forestry Development Authority (FDA) - Republic of Liberia", "www.gesetze.li" : u"Government of Liechtenstein", "www.commonlii.org" : u"Commonwealth Legal Information Institute", "www.documents.gov.lk" : u"Government of Sri Lanka", "www.legilux.lu" : u"Government of Luxembourg", "http://www.sgg.gov.ma/Législation/BulletinsOfficiels.aspx" : u"Government of Morocco", "www.sgg.gov.ma" : u"Government of Morocco", "http://www.cnlegis.gov.mg/" : u"Government of Madagascar", "www.assemblee-nationale.mg" : u"Government of Madagascar", "dof.gob.mx/" : u"Government of Mexico", "www.vanuatu.usp.ac.fj" : u"University of the South Pacific", "http://mali.eregulations.org" : u"Government of Mali", "http://www.journal-officiel.ml/1/56/fr/journal-officiel/le-journal-officiel.html" : u"Government of Mali", "http://www.sgg-mali.ml/JO/2016/mali-jo-2016-16.pdf" : u"Government of Mali", "www.journal-officiel.ml/1/56/fr/journal-officiel/le-journal-officiel.html" : u"Government of Mali", "www.sgg-mali.ml" : u"Government of Mali", "www.doi.gov.mt" : u"Government of Malta", "www.mrt.gov.me" : u"Government of Montenegro", "www.cnmileg.gov.mp" : u"Government of the Northern Mariana Islands", "www.legis-palop.org" : u"Projecto Apoio ao Desenvolvimento dos Sistemas Judiciários", "http://www.rimgerddes.org/" : u"Groupe d'Etudes et de Recherches sur la Démocratie et le Développement Economique et Social en Mauritanie", "agc.gov.ms" : u"Government of Montserrat", "montserrat.worldlegislation.com" : u"Government of Montserrat", "attorneygeneral.govmu.org" : u"Government of Mauritius", "mauritiusassembly.govmu.org" : u"Government of Mauritius", "supremecourt.intnet.mu" : u"Government of Mauritius", "www.africanlawlibrary.net" : u"African Law Library", "www.gov.mu" : u"Government of Mauritius", "www.malawilii.org" : u"Malawi Legal Information Institute", "www.cljlaw.com" : u"CLJ Legal Network Malaysia", "www.lac.org.na" : u"Legal Assistance Center Namibia", "www.parliament.gov.na" : u"Government of the Republic of Namibia", "www.saflii.org" : u"South African Legal Information Institute", "www.droit-afrique.com" : u"Droit-Afrique", "www.aksgonline.com" : u"Government of Nigeria", "www.oyohouseofassembly.org" : u"Government of Nigeria", "www.placng.org" : u"Government of Nigeria", "www.asamblea.gob.ni" : u"Government of Nicaragua", "www.lagaceta.gob.ni" : u"Government of Nicaragua", "www.vanuatu.usp.ac.fj" : u"University of the South Pacific", "www.overheid.nl" : u"Government of the Netherlands", "www.lovdata.no" : u"Government of Norway", "www.moad.gov.np" : u"Government of Nepal", "www.legislation.govt.nz" : u"Government of New Zealand", "www.asamblea.gob.pa" : u"Government of Panama", "www.gaceetaoficial.gob.pa" : u"Government of Panama", "www.gacetaoficial.gob.pa" : u"Government of Panama", "www.minjus.gob.pe" : u"Government of Peru", "www.chanrobles.com" : u"<NAME>", "www.denr.gov.ph" : u"Government of Philippines", "www.gov.ph" : u"Government of Philippines", "www.paclii.org" : u"Pacific Islands Legal Information Institute", "www.pacli.org" : u"Pacific Islands Legal
), "AL12Ons" : ( 24252, 24253 ), "AL12AgeRec" : ( 24253, 24255 ), "AL12Rec" : ( 24255, 24256 ), "AL12c" : ( 24256, 24257 ), "AL13" : ( 24257, 24258 ), "AL13AgeOns" : ( 24258, 24260 ), "AL13Ons" : ( 24260, 24261 ), "AL13AgeRec" : ( 24261, 24263 ), "AL13Rec" : ( 24263, 24264 ), "AL13b" : ( 24264, 24265 ), "AL14" : ( 24265, 24266 ), "AL14AgeOns" : ( 24266, 24268 ), "AL14Ons" : ( 24268, 24269 ), "AL14AgeRec" : ( 24269, 24271 ), "AL14Rec" : ( 24271, 24272 ), "AL14b" : ( 24272, 24273 ), "AL15" : ( 24273, 24274 ), "AL15a" : ( 24274, 24275 ), "AL15AgeOns" : ( 24275, 24277 ), "AL15Ons" : ( 24277, 24278 ), "AL15AgeRec" : ( 24278, 24280 ), "AL15Rec" : ( 24280, 24281 ), "AL16" : ( 24281, 24282 ), "AL16a" : ( 24282, 24283 ), "AL16b" : ( 24283, 24286 ), "AL16b1" : ( 24286, 24287 ), "AL16AgeOns" : ( 24287, 24289 ), "AL16Ons" : ( 24289, 24290 ), "AL16AgeRec" : ( 24290, 24292 ), "AL16Rec" : ( 24292, 24293 ), "AL16d" : ( 24293, 24294 ), "AL17" : ( 24294, 24295 ), "AL17AgeOns" : ( 24295, 24297 ), "AL17Ons" : ( 24297, 24298 ), "AL17AgeRec" : ( 24298, 24300 ), "AL17Rec" : ( 24300, 24301 ), "AL17b" : ( 24301, 24304 ), "AL17b1" : ( 24304, 24305 ), "AL18" : ( 24305, 24306 ), "AL18a" : ( 24306, 24307 ), "AL18AgeOns" : ( 24307, 24309 ), "AL18Ons" : ( 24309, 24310 ), "AL18AgeRec" : ( 24310, 24312 ), "AL18Rec" : ( 24312, 24313 ), "AL18c" : ( 24313, 24314 ), "AL19" : ( 24314, 24315 ), "AL19AgeOns" : ( 24315, 24317 ), "AL19Ons" : ( 24317, 24318 ), "AL19AgeRec" : ( 24318, 24320 ), "AL19Rec" : ( 24320, 24321 ), "AL20" : ( 24321, 24322 ), "AL20AgeOns" : ( 24322, 24324 ), "AL20Ons" : ( 24324, 24325 ), "AL21" : ( 24325, 24326 ), "AL21Another" : ( 24326, 24327 ), "AL21a_Specify" : ( 24327, 24407 ), "AL21a_Code" : ( 24407, 24426 ), "AL21Another2" : ( 24426, 24427 ), "AL21a_Specify2" : ( 24427, 24507 ), "AL21a_Code2" : ( 24507, 24526 ), "AL21Another3" : ( 24526, 24527 ), "AL21a_Specify3" : ( 24527, 24607 ), "AL21a_Code3" : ( 24607, 24626 ), "AL21Another4" : ( 24626, 24627 ), "AL21a_Specify4" : ( 24627, 24707 ), "AL21a_Code4" : ( 24707, 24726 ), "AL21_SPECIFY" : ( 24726, 24951 ), "AL21AgeOns" : ( 24951, 24953 ), "AL21Ons" : ( 24953, 24954 ), "AL21AgeRec" : ( 24954, 24956 ), "AL21Rec" : ( 24956, 24957 ), "AL21c" : ( 24957, 24958 ), "AL21d" : ( 24958, 24959 ), "AL21d_SPECIFY" : ( 24959, 25184 ), "AL22" : ( 25184, 25185 ), "AL22AgeOns" : ( 25185, 25187 ), "AL22Ons" : ( 25187, 25188 ), "AL22AgeRec" : ( 25188, 25190 ), "AL22Rec" : ( 25190, 25191 ), "AL22b" : ( 25191, 25193 ), "AL22b1" : ( 25193, 25194 ), "AL22c" : ( 25194, 25195 ), "AL23" : ( 25195, 25196 ), "AL23b" : ( 25196, 25198 ), "AL23AgeOns" : ( 25198, 25200 ), "AL23Ons" : ( 25200, 25201 ), "AL23AgeRec" : ( 25201, 25203 ), "AL23Rec" : ( 25203, 25204 ), "AL23b1" : ( 25204, 25205 ), "AL23c" : ( 25205, 25206 ), "AL24" : ( 25206, 25207 ), "AL24b" : ( 25207, 25209 ), "AL24AgeOns" : ( 25209, 25211 ), "AL24Ons" : ( 25211, 25212 ), "AL24AgeRec" : ( 25212, 25214 ), "AL24Rec" : ( 25214, 25215 ), "AL24b1" : ( 25215, 25216 ), "AL24c" : ( 25216, 25217 ), "AL25" : ( 25217, 25218 ), "AL25AgeOns" : ( 25218, 25220 ), "AL25Ons" : ( 25220, 25221 ), "AL25AgeRec" : ( 25221, 25223 ), "AL25Rec" : ( 25223, 25224 ), "AL25b" : ( 25224, 25225 ), "AL26a1" : ( 25225, 25226 ), "AL26a2" : ( 25226, 25227 ), "AL26a3" : ( 25227, 25228 ), "AL26a4" : ( 25228, 25229 ), "AL26a5" : ( 25229, 25230 ), "AL26a6" : ( 25230, 25231 ), "AL26a7" : ( 25231, 25232 ), "AL26a8" : ( 25232, 25233 ), "AL1_ao26" : ( 25233, 25235 ), "AL2_ao26" : ( 25235, 25237 ), "AL3_ao26" : ( 25237, 25239 ), "AL4_ao26" : ( 25239, 25241 ), "AL5_ao26" : ( 25241, 25243 ), "AL6_ao26" : ( 25243, 25245 ), "AL7_ao26" : ( 25245, 25247 ), "AL8_ao26" : ( 25247, 25249 ), "AL26b1" : ( 25249, 25250 ), "AL26b2" : ( 25250, 25251 ), "AL26b3" : ( 25251, 25252 ), "AL26b4" : ( 25252, 25253 ), "AL26b5" : ( 25253, 25254 ), "AL26b6" : ( 25254, 25255 ), "AL26b7" : ( 25255, 25256 ), "AL26b8" : ( 25256, 25257 ), "AL26AgeRec" : ( 25257, 25259 ), "AL26Rec" : ( 25259, 25260 ), "AL26c" : ( 25260, 25261 ), "AL27" : ( 25261, 25262 ), "AL27AgeOns" : ( 25262, 25264 ), "AL27Ons" : ( 25264, 25265 ), "AL27AgeRec" : ( 25265, 25267 ), "AL27Rec" : ( 25267, 25268 ), "AL27b" : ( 25268, 25269 ), "AL27c" : ( 25269, 25270 ), "AL28" : ( 25270, 25271 ), "AL28AgeOns" : ( 25271, 25273 ), "AL28Ons" : ( 25273, 25274 ), "AL28AgeRec" : ( 25274, 25276 ), "AL28Rec" : ( 25276, 25277 ), "AL28b" : ( 25277, 25279 ), "AL28b1" : ( 25279, 25280 ), "AL28c" : ( 25280, 25281 ), "AL29" : ( 25281, 25282 ), "AL29AgeOns" : ( 25282, 25284 ), "AL29Ons" : ( 25284, 25285 ), "AL29AgeRec" : ( 25285, 25287 ), "AL29Rec" : ( 25287, 25288 ), "AL29b" : ( 25288, 25290 ), "AL29b1" : ( 25290, 25291 ), "AL29c" : ( 25291, 25292 ), "AL31_1" : ( 25292, 25293 ), "AL31_2" : ( 25293, 25294 ), "AL31_3" : ( 25294, 25295 ), "AL31_4" : ( 25295, 25296 ), "AL31_5" : ( 25296, 25297 ), "AL31_6" : ( 25297, 25298 ), "AL31_7" : ( 25298, 25299 ), "AL31_7SPECIFY" : ( 25299, 25379 ), "AL31AgeOns" : ( 25379, 25381 ), "AL31Ons" : ( 25381, 25382 ), "AL31b" : ( 25382, 25383 ), "AL32" : ( 25383, 25384 ), "AL32_specify1" : ( 25384, 25464 ), "AL32_code1" : ( 25464, 25467 ), "AL32_specify2" : ( 25467, 25547 ), "AL32_code2" : ( 25547, 25550 ), "AL32AgeOns" : ( 25550, 25552 ), "AL32Ons" : ( 25552, 25553 ), "AL32Another" : ( 25553, 25554 ), "AL32AgeRec" : ( 25554, 25556 ), "AL32Rec" : ( 25556, 25557 ), "AL32c" : ( 25557, 25558 ), "AL33_1" : ( 25558, 25559 ), "AL33_2" : ( 25559, 25560 ), "AL33_3" : ( 25560, 25561 ), "AL33_4" : ( 25561, 25562 ), "AL33_5" : ( 25562, 25563 ), "AL33a" : ( 25563, 25564 ), "AL33AgeOns" : ( 25564, 25566 ), "AL33Ons" : ( 25566, 25567 ), "AL33AgeRec" : ( 25567, 25569 ), "AL33Rec" : ( 25569, 25570 ), "AL34" : ( 25570, 25571 ), "AL34AgeOns" : ( 25571, 25573 ), "AL34Ons" : ( 25573, 25574 ), "AL35" : ( 25574, 25575 ), "AL35AgeOns" : ( 25575, 25577 ), "AL35Ons" : ( 25577, 25578 ), "al37aQsx" : ( 25578, 25579 ), "al37aQsx2" : ( 25579, 25580 ), "al37aQsx3" : ( 25580, 25581 ), "al37aQsx4" : ( 25581, 25582 ), "al37aQsx5" : ( 25582, 25583 ), "al37aQsx6" : ( 25583, 25584 ), "al37aQsx7" : ( 25584, 25585 ), "al37aQsx8" : ( 25585, 25586 ), "al37aQsx9" : ( 25586, 25587 ), "al37aQsx10" : ( 25587, 25588 ), "AL37_1" : ( 25588, 25589 ), "AL37_2" : ( 25589, 25590 ), "AL37_3" : ( 25590, 25591 ), "AL37_4" : ( 25591, 25592 ), "AL37_5" : ( 25592, 25593 ), "AL37_6" : ( 25593, 25594 ), "AL37_7" : ( 25594, 25595 ), "AL37_8" : ( 25595, 25596 ), "AL37_9" : ( 25596, 25597 ), "AL37_10" : ( 25597, 25598 ), "AL37a1" : ( 25598, 25599 ), "AL37a2"
2 * np.cos(2 * theta) + 2 * np.cos(2 * phi) + 14 ) / 16 assert np.allclose(res, expected, atol=tol, rtol=0) def test_pauliz_hadamard(self, theta, phi, varphi, tol): """Test that a tensor product involving PauliZ and PauliY and hadamard works correctly""" dev = qml.device("default.qubit.tf", wires=3) obs = qml.PauliZ(0) @ qml.Hadamard(1) @ qml.PauliY(2) dev.reset() dev.apply( [ qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.RX(varphi, wires=[2]), qml.CNOT(wires=[0, 1]), qml.CNOT(wires=[1, 2]) ], obs.diagonalizing_gates() ) res = dev.var(obs) expected = ( 3 + np.cos(2 * phi) * np.cos(varphi) ** 2 - np.cos(2 * theta) * np.sin(varphi) ** 2 - 2 * np.cos(theta) * np.sin(phi) * np.sin(2 * varphi) ) / 4 assert np.allclose(res, expected, atol=tol, rtol=0) def test_hermitian(self, theta, phi, varphi, tol): """Test that a tensor product involving qml.Hermitian works correctly""" dev = qml.device("default.qubit.tf", wires=3) A = np.array( [ [-6, 2 + 1j, -3, -5 + 2j], [2 - 1j, 0, 2 - 1j, -5 + 4j], [-3, 2 + 1j, 0, -4 + 3j], [-5 - 2j, -5 - 4j, -4 - 3j, -6], ] ) obs = qml.PauliZ(0) @ qml.Hermitian(A, wires=[1, 2]) dev.apply( [ qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.RX(varphi, wires=[2]), qml.CNOT(wires=[0, 1]), qml.CNOT(wires=[1, 2]) ], obs.diagonalizing_gates() ) res = dev.var(obs) expected = ( 1057 - np.cos(2 * phi) + 12 * (27 + np.cos(2 * phi)) * np.cos(varphi) - 2 * np.cos(2 * varphi) * np.sin(phi) * (16 * np.cos(phi) + 21 * np.sin(phi)) + 16 * np.sin(2 * phi) - 8 * (-17 + np.cos(2 * phi) + 2 * np.sin(2 * phi)) * np.sin(varphi) - 8 * np.cos(2 * theta) * (3 + 3 * np.cos(varphi) + np.sin(varphi)) ** 2 - 24 * np.cos(phi) * (np.cos(phi) + 2 * np.sin(phi)) * np.sin(2 * varphi) - 8 * np.cos(theta) * ( 4 * np.cos(phi) * ( 4 + 8 * np.cos(varphi) + np.cos(2 * varphi) - (1 + 6 * np.cos(varphi)) * np.sin(varphi) ) + np.sin(phi) * ( 15 + 8 * np.cos(varphi) - 11 * np.cos(2 * varphi) + 42 * np.sin(varphi) + 3 * np.sin(2 * varphi) ) ) ) / 16 assert np.allclose(res, expected, atol=tol, rtol=0) ##################################################### # QNode-level integration tests ##################################################### class TestQNodeIntegration: """Integration tests for default.qubit.tf. This test ensures it integrates properly with the PennyLane UI, in particular the new QNode.""" def test_defines_correct_capabilities(self): """Test that the device defines the right capabilities""" dev = qml.device("default.qubit.tf", wires=1) cap = dev.capabilities() capabilities = { "model": "qubit", "supports_finite_shots": True, "supports_tensor_observables": True, "returns_probs": True, "returns_state": True, "supports_reversible_diff": False, "supports_inverse_operations": True, "supports_analytic_computation": True, "passthru_interface": 'tf', "passthru_devices": { "tf": "default.qubit.tf", "autograd": "default.qubit.autograd", "jax": "default.qubit.jax", }, } assert cap == capabilities def test_load_tensornet_tf_device(self): """Test that the tensor network plugin loads correctly""" dev = qml.device("default.qubit.tf", wires=2) assert dev.num_wires == 2 assert dev.shots == 1000 assert dev.analytic assert dev.short_name == "default.qubit.tf" assert dev.capabilities()["passthru_interface"] == "tf" def test_qubit_circuit(self, tol): """Test that the tensor network plugin provides correct result for a simple circuit using the old QNode.""" p = tf.Variable(0.543) dev = qml.device("default.qubit.tf", wires=1) @qml.qnode(dev, interface="tf") def circuit(x): qml.RX(x, wires=0) return qml.expval(qml.PauliY(0)) expected = -tf.math.sin(p) assert isinstance(circuit, qml.qnodes.PassthruQNode) assert np.isclose(circuit(p), expected, atol=tol, rtol=0) def test_correct_state(self, tol): """Test that the device state is correct after applying a quantum function on the device""" dev = qml.device("default.qubit.tf", wires=2) state = dev.state expected = np.array([1, 0, 0, 0]) assert np.allclose(state, expected, atol=tol, rtol=0) @qml.qnode(dev, interface="tf", diff_method="backprop") def circuit(): qml.Hadamard(wires=0) qml.RZ(np.pi / 4, wires=0) return qml.expval(qml.PauliZ(0)) circuit() state = dev.state amplitude = np.exp(-1j * np.pi / 8) / np.sqrt(2) expected = np.array([amplitude, 0, np.conj(amplitude), 0]) assert np.allclose(state, expected, atol=tol, rtol=0) @pytest.mark.parametrize("theta", [0.5432, -0.232]) @pytest.mark.parametrize("op,func", single_qubit_param) def test_one_qubit_param_gates(self, theta, op, func, init_state, tol): """Test the integration of the one-qubit single parameter rotations by passing a TF data structure as a parameter""" dev = qml.device("default.qubit.tf", wires=1) state = init_state(1) @qml.qnode(dev, interface='tf') def circuit(params): qml.QubitStateVector(state, wires=[0]) op(params[0], wires=[0]) return qml.expval(qml.PauliZ(0)) # Pass a TF Variable to the qfunc params = tf.Variable(np.array([theta])) circuit(params) res = dev.state expected = func(theta) @ state assert np.allclose(res.numpy(), expected, atol=tol, rtol=0) @pytest.mark.parametrize("theta", [0.5432, 4.213]) @pytest.mark.parametrize("op,func", two_qubit_param) def test_two_qubit_param_gates(self, theta, op, func, init_state, tol): """Test the integration of the two-qubit single parameter rotations by passing a TF data structure as a parameter""" dev = qml.device("default.qubit.tf", wires=2) state = init_state(2) @qml.qnode(dev, interface='tf') def circuit(params): qml.QubitStateVector(state, wires=[0,1]) op(params[0], wires=[0, 1]) return qml.expval(qml.PauliZ(0)) # Pass a TF Variable to the qfunc params = tf.Variable(np.array([theta])) circuit(params) res = dev.state expected = func(theta) @ state assert np.allclose(res.numpy(), expected, atol=tol, rtol=0) def test_controlled_rotation_integration(self, init_state, tol): """Test the integration of the two-qubit controlled rotation by passing a TF data structure as a parameter""" dev = qml.device("default.qubit.tf", wires=2) a = 1.7 b = 1.3432 c = -0.654 state = init_state(2) @qml.qnode(dev, interface='tf') def circuit(params): qml.QubitStateVector(state, wires=[0,1]) qml.CRot(params[0], params[1], params[2], wires=[0,1]) return qml.expval(qml.PauliZ(0)) # Pass a TF Variable to the qfunc params = tf.Variable(np.array([a,b,c])) circuit(params) res = dev.state expected = CRot3(a, b, c) @ state assert np.allclose(res.numpy(), expected, atol=tol, rtol=0) class TestPassthruIntegration: """Tests for integration with the PassthruQNode""" def test_jacobian_variable_multiply(self, tol): """Test that jacobian of a QNode with an attached default.qubit.tf device gives the correct result in the case of parameters multiplied by scalars""" x = tf.Variable(0.43316321) y = tf.Variable(0.2162158) z = tf.Variable(0.75110998) dev = qml.device("default.qubit.tf", wires=1) @qml.qnode(dev, interface="tf", diff_method="backprop") def circuit(p): qml.RX(3 * p[0], wires=0) qml.RY(p[1], wires=0) qml.RX(p[2] / 2, wires=0) return qml.expval(qml.PauliZ(0)) with tf.GradientTape() as tape: res = circuit([x, y, z]) expected = tf.math.cos(3 * x) * tf.math.cos(y) * tf.math.cos(z / 2) - tf.math.sin( 3 * x ) * tf.math.sin(z / 2) assert np.allclose(res, expected, atol=tol, rtol=0) res = tf.concat(tape.jacobian(res, [x, y, z]), axis=0) expected = np.array( [ -3 * ( tf.math.sin(3 * x) * tf.math.cos(y) * tf.math.cos(z / 2) + tf.math.cos(3 * x) * tf.math.sin(z / 2) ), -tf.math.cos(3 * x) * tf.math.sin(y) * tf.math.cos(z / 2), -0.5 * ( tf.math.sin(3 * x) * tf.math.cos(z / 2) + tf.math.cos(3 * x) * tf.math.cos(y) * tf.math.sin(z / 2) ), ] ) assert np.allclose(res, expected, atol=tol, rtol=0) def test_jacobian_repeated(self, tol): """Test that jacobian of a QNode with an attached default.qubit.tf device gives the correct result in the case of repeated parameters""" x = 0.43316321 y = 0.2162158 z = 0.75110998 p = tf.Variable([x, y, z]) dev = qml.device("default.qubit.tf", wires=1) @qml.qnode(dev, interface="tf", diff_method="backprop") def circuit(x): qml.RX(x[1], wires=0) qml.Rot(x[0], x[1], x[2], wires=0) return qml.expval(qml.PauliZ(0)) with tf.GradientTape() as tape: res = circuit(p) expected = np.cos(y) ** 2 - np.sin(x) * np.sin(y) ** 2 assert np.allclose(res, expected, atol=tol, rtol=0) res = tape.jacobian(res, p) expected = np.array( [-np.cos(x) * np.sin(y) ** 2, -2 * (np.sin(x) + 1) * np.sin(y) * np.cos(y), 0] ) assert np.allclose(res, expected, atol=tol, rtol=0) def test_jacobian_agrees_backprop_parameter_shift(self, tol): """Test that jacobian of a QNode with an attached default.qubit.tf device gives the correct result with respect to the parameter-shift method""" p = np.array([0.43316321, 0.2162158, 0.75110998, 0.94714242]) def circuit(x): for i in range(0, len(p), 2): qml.RX(x[i], wires=0) qml.RY(x[i + 1], wires=1) for i in range(2): qml.CNOT(wires=[i, i + 1]) return qml.expval(qml.PauliZ(0)), qml.var(qml.PauliZ(1)) dev1 = qml.device("default.qubit.tf", wires=3) dev2 = qml.device("default.qubit.tf", wires=3) circuit1 = qml.QNode(circuit, dev1, diff_method="backprop", interface="tf") circuit2 = qml.QNode(circuit, dev2, diff_method="parameter-shift") p_tf = tf.Variable(p) with tf.GradientTape() as tape: res = circuit1(p_tf) assert np.allclose(res, circuit2(p), atol=tol, rtol=0) res = tape.jacobian(res, p_tf) assert np.allclose(res, circuit2.jacobian([p]), atol=tol, rtol=0) def test_state_differentiability(self, tol): """Test that the device state can be differentiated""" dev = qml.device("default.qubit.tf", wires=1) @qml.qnode(dev, diff_method="backprop", interface="tf") def circuit(a): qml.RY(a, wires=0) return qml.expval(qml.PauliZ(0)) a = tf.Variable(0.54) with tf.GradientTape() as tape: circuit(a) res = tf.abs(dev.state) ** 2 res = res[1] - res[0] grad = tape.gradient(res, a) expected = tf.sin(a) assert np.allclose(grad, expected, atol=tol, rtol=0) def test_prob_differentiability(self, tol): """Test that the device probability can be differentiated""" dev = qml.device("default.qubit.tf", wires=2) @qml.qnode(dev, diff_method="backprop", interface="tf") def circuit(a, b): qml.RX(a, wires=0) qml.RY(b, wires=1) qml.CNOT(wires=[0, 1]) return qml.probs(wires=[1]) a = tf.Variable(0.54) b = tf.Variable(0.12) with tf.GradientTape() as tape: # get the probability of wire 1 prob_wire_1 = circuit(a, b)[0] # compute Prob(|1>_1) - Prob(|0>_1) res = prob_wire_1[1] - prob_wire_1[0] expected = -tf.cos(a) * tf.cos(b)
[]: user = funs.user_check(booster, booster.guild) for i in server['boost']['reward']: user['inv'].append(funs.creat_item(booster.guild.id, i)) funs.user_update(booster.id, booster.guild, 'inv', user['inv']) if after.premium_since != None and before.premium_since != after.premium_since and before.premium_since != None: await on_nitro_boost(before) #log try: server = servers.find_one({"server": after.guild.id}) if server['mod']['log_channel'] != {}: counter = 0 log = server['mod']['log_channel']['logging'] channel = await self.bot.fetch_channel(server['mod']['log_channel']['channel']) emb = discord.Embed(title = f'Обновление пользователя', description = f'Пользователь {before.mention} был обновлён', color=0xE28112 ) if 'member_status' in log or 'all' in log or 'member' in log: if before.status != after.status: emb.add_field(name = ' | Пользователь обновил статус', value = f'Изначальный статус: `{ before.status}`\nСейчас статус: `{after.status }`', inline = True) counter += 1 if 'member_nick' in log or 'all' in log or 'member' in log: if before.nick != after.nick: emb.add_field(name = ' | Обновление никнейма пользователя', value = f'Изначальный ник: `{ before.nick}`\nСейчас ник: `{after.nick}`', inline = True) counter += 1 if 'member_roles' in log or 'all' in log or 'member' in log: if before.roles != after.roles: drf = list( (set(after.roles) | set(before.roles)) - (set(after.roles) & set(before.roles)) ) ddr = [] #удалённые роли adr = [] #добавленые роли for i in drf: if i in before.roles and i not in after.roles: ddr.append(i) if i in after.roles and i not in before.roles: adr.append(i) if ddr != []: text = '' for i in ddr: text += f'{i.mention} ' emb.add_field(name = ' | Удалённые роли у пользователя', value = text, inline = True) counter += 1 if adr != []: text = '' for i in adr: text += f'{i.mention} ' emb.add_field(name = ' | Добавленные роли у пользователя', value = text, inline = True) counter += 1 if 'member_top_role' in log or 'all' in log or 'member' in log: if before.top_role != after.top_role: emb.add_field(name = ' | Высшая роль пользователя изменилась', value = f'Изначальный роль: `{ before.top_role}`\nСейчас роль: `{after.top_role}`', inline = True) counter += 1 if counter != 0: await channel.send(embed = emb) except Exception: pass #log @commands.Cog.listener() async def on_guild_channel_update(self, before, after): try: if type(before) == discord.channel.CategoryChannel: words = ["категории", 'Категория', 'была обновлена.'] else: words = ['канала', 'Канал', 'был обновлён.'] server = servers.find_one({"server": before.guild.id}) if server['mod']['log_channel'] != {}: counter = 0 log = server['mod']['log_channel']['logging'] channel = await self.bot.fetch_channel(server['mod']['log_channel']['channel']) emb = discord.Embed(title = f'Обновление {words[0]}', description = f'{words[1]} {before.mention} {words[2]}', color=0xE28112 ) if 'channel_name' in log or 'all' in log or 'channel' in log: if before.name != after.name: emb.add_field(name = ' | Обновление названия', value = f'Изначальное название: `{ before.name }`\nСейчас называется: `{ after.name }`', inline = True) counter += 1 if 'channel_category' in log or 'all' in log or 'channel' in log: if before.category != after.category: emb.add_field(name = ' | Обновление категории', value = f'Изначальная категория: `{ before.category }`\nСейчас в категории: `{ after.category }`', inline = True) counter += 1 if 'channel_rights' in log or 'all' in log or 'channel' in log: if before.overwrites != after.overwrites: dr = {} for i in dict(before.overwrites): n = [] for b in dict(before.overwrites)[i]: n.append(b) dr.update({str(i.id): n}) dr2 = {} for i in dict(after.overwrites): n = [] for b in dict(after.overwrites)[i]: n.append(b) dr2.update({str(i.id): n}) afd = {} for x in dr2: md = {} for nx in dr2[str(x)]: md.update({ str(list(nx)[0]) : list(nx)[1]}) afd.update({x: md}) drf = {} #совпадения двух словарей ddr = [] #удалённые роли adr = {} #добавленые роли for key in dr: try: if dr2[key] != dr[key]: drf.update({key: list((set(dr2[key]) | set(dr[key])) - (set(dr2[key]) & set(dr[key]))) }) except KeyError: ddr.append(key) #добавленные роли for key2 in dr2: try: dr[key2] except KeyError: for l in dr2[key2]: if list(l)[1] != None: try: adr[key2].append( list(l)) except KeyError: adr.update({ key2: [ list(l) ] }) if adr != {}: text = '' op = '' for i in adr: if before.guild.get_member(int(i)) != None: memb = before.guild.get_member(int(i)) text += f'{memb.mention} ' op == 'пользователя' if before.guild.get_role(int(i)) != None: rol = before.guild.get_role(int(i)) text += f'{rol.mention} ' op = 'роли' for n in adr[i]: text += f'| `{n[0]}` {n[1]}\n' emb.add_field(name = f' | Добавление прав для {op}', value = text.replace('True','<:n:869159450588635196>').replace('False','<:f:869169592201777224>'), inline = True) counter += 1 if ddr != []: text = '' counter2 = 0 op = '' for i in ddr: if before.guild.get_member(int(i)) != None: memb = before.guild.get_member(int(i)) text += f'{memb.mention}\n' op == 'пользователя' if before.guild.get_role(int(i)) != None: rol = before.guild.get_role(int(i)) text += f'{rol.mention}\n' op = 'роли' emb.add_field(name = f' | Удаление прав у {op}', value = text, inline = True) counter += 1 if drf != {}: text = '' op = '' for i in drf: if before.guild.get_member(int(i)) != None: memb = before.guild.get_member(int(i)) text += f'Пользователь {memb.mention} \n' if before.guild.get_role(int(i)) != None: rol = before.guild.get_role(int(i)) text += f'Роль {rol.mention} \n' counter2 = 0 for n in drf[i]: if afd[i][n[0]] != n[1]: counter2 += 1 text += f'{n[1]} ➜ {afd[i][n[0]]} | `{n[0]}`\n' emb.add_field(name = f' | Имзенение прав', value = f'{text}'.replace('True','<:n:869159450588635196>').replace('False','<:f:869169592201777224>').replace('None','<:m:869169622618873906>'), inline = True) counter += 1 if 'channel_roles' in log or 'all' in log or 'channel' in log: if before.changed_roles != after.changed_roles: nd = '' yd = '' for i in before.changed_roles: if i not in after.changed_roles: nd = i.mention for i in after.changed_roles: if i not in before.changed_roles: yd = i.mention if yd != '': emb.add_field(name = ' | Роль добавлена в права доступа', value = f'{yd}', inline = True) counter += 1 if nd != '': emb.add_field(name = ' | Роль убрана из прав доступа', value = f'{nd}', inline = True) counter += 1 if 'channel_permissions_synced' in log or 'all' in log or 'channel' in log: if before.permissions_synced != after.permissions_synced: if after.permissions_synced == True: words = 'Права канала были синхронизированы с категорией' else: words = 'Права канала более не синхронизированы с категорией' emb.add_field(name = ' | Синхронизация', value = words, inline = True) counter += 1 if 'channel_position' in log or 'all' in log or 'channel' in log: if before.position != after.position: emb.add_field(name = f' | Измениение позиции {words[0]}', value = f'Изначальная позиция: {before.position}\nСейчас позиция: {after.position}', inline = True) counter += 1 if type(after) == discord.channel.TextChannel: if 'channel_slowmode' in log or 'all' in log or 'channel' in log: if before.slowmode_delay != after.slowmode_delay: emb.add_field(name = f' | Медленный режим изменён', value = f'Изначальная ожидание: {funs.time_end(before.slowmode_delay)}\nСейчас ожидание: {funs.time_end(after.slowmode_delay)}', inline = True) counter += 1 if 'channel_topic' in log or 'all' in log or 'channel' in log: if before.topic != after.topic: emb.add_field(name = f' | Изменение темы', value = f'Изначальная тема: `{before.topic}`\nСейчас тема: `{after.topic}`', inline = True) counter += 1 if 'channel_nsfw' in log or 'all' in log or 'channel' in log: if before.is_nsfw() != after.is_nsfw(): emb.add_field(name = f' | Изменение nsfw', value = f'Изначально: {before.is_nsfw()}\nСейчас: {after.is_nsfw()}'.replace('True','<:n:869159450588635196>').replace('False','<:f:869169592201777224>'), inline = True) counter += 1 if type(after) in [discord.channel.VoiceChannel, discord.channel.StageChannel]: if 'channel_bitrate' in log or 'all' in log or 'channel' in log: if before.bitrate != after.bitrate: emb.add_field(name = f' | Изменение битрейта', value = f'Изначальный битрейт: `{before.bitrate}`\nСейчас битрейт: `{after.bitrate}`', inline = True) counter += 1 if 'channel_rtc_region' in log or 'all' in log or 'channel' in log: if before.rtc_region != after.rtc_region: emb.add_field(name = f' | Изменение региона', value = f'Изначальный регион: `{before.rtc_region}`\nСейчас регион: `{after.rtc_region}`', inline = True) counter += 1 if type(after) == discord.channel.VoiceChannel: if 'channel_user_limit' in log or 'all' in log or 'channel' in log: if before.user_limit != after.user_limit: emb.add_field(name = f' | Изменение лимита', value = f'Изначальный лимит: `{before.user_limit}`\nСейчас лимит: `{after.user_limit}`', inline = True) counter += 1 if counter != 0: await channel.send(embed = emb) except Exception: pass @commands.Cog.listener() async def on_guild_channel_create(self, channel): try: server = servers.find_one({"server": channel.guild.id}) if server['mod']['log_channel'] != {}: if 'channel_create' in server['mod']['log_channel']['logging'] or 'all' in server['mod']['log_channel']['logging'] or 'channel' in server['mod']['log_channel']['logging']: dr = {} drf = {} #совпадения двух словарей for i in dict(channel.overwrites): n = [] for b in dict(channel.overwrites)[i]: n.append(b) dr.update({str(i.id): n}) for key in dr: for l in dr[key]: if list(l)[1] != None: try: drf[key].append( list(l)) except KeyError: drf.update({ key: [ list(l) ] }) text = '' for i in drf: if channel.guild.get_member(int(i)) != None: memb
return subclass(*args_, **kwargs_) if DeleteDangerousGoodsHandlingUnitReply.subclass: return DeleteDangerousGoodsHandlingUnitReply.subclass(*args_, **kwargs_) else: return DeleteDangerousGoodsHandlingUnitReply(*args_, **kwargs_) factory = staticmethod(factory) def get_ns_prefix_(self): return self.ns_prefix_ def set_ns_prefix_(self, ns_prefix): self.ns_prefix_ = ns_prefix def get_HighestSeverity(self): return self.HighestSeverity def set_HighestSeverity(self, HighestSeverity): self.HighestSeverity = HighestSeverity def get_Notifications(self): return self.Notifications def set_Notifications(self, Notifications): self.Notifications = Notifications def add_Notifications(self, value): self.Notifications.append(value) def insert_Notifications_at(self, index, value): self.Notifications.insert(index, value) def replace_Notifications_at(self, index, value): self.Notifications[index] = value def get_TransactionDetail(self): return self.TransactionDetail def set_TransactionDetail(self, TransactionDetail): self.TransactionDetail = TransactionDetail def get_Version(self): return self.Version def set_Version(self, Version): self.Version = Version def get_CompletedShipmentDetail(self): return self.CompletedShipmentDetail def set_CompletedShipmentDetail(self, CompletedShipmentDetail): self.CompletedShipmentDetail = CompletedShipmentDetail def validate_NotificationSeverityType(self, value): result = True # Validate type NotificationSeverityType, a restriction on xs:string. if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None: if not isinstance(value, str): lineno = self.gds_get_node_lineno_() self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, }) return False value = value enumerations = ['ERROR', 'FAILURE', 'NOTE', 'SUCCESS', 'WARNING'] if value not in enumerations: lineno = self.gds_get_node_lineno_() self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on NotificationSeverityType' % {"value" : encode_str_2_3(value), "lineno": lineno} ) result = False return result def hasContent_(self): if ( self.HighestSeverity is not None or self.Notifications or self.TransactionDetail is not None or self.Version is not None or self.CompletedShipmentDetail is not None ): return True else: return False def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DeleteDangerousGoodsHandlingUnitReply', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('DeleteDangerousGoodsHandlingUnitReply') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None and name_ == 'DeleteDangerousGoodsHandlingUnitReply': name_ = self.original_tagname_ if UseCapturedNS_ and self.ns_prefix_: namespaceprefix_ = self.ns_prefix_ + ':' showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='DeleteDangerousGoodsHandlingUnitReply') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='DeleteDangerousGoodsHandlingUnitReply', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='DeleteDangerousGoodsHandlingUnitReply'): pass def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DeleteDangerousGoodsHandlingUnitReply', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.HighestSeverity is not None: namespaceprefix_ = self.HighestSeverity_nsprefix_ + ':' if (UseCapturedNS_ and self.HighestSeverity_nsprefix_) else '' showIndent(outfile, level, pretty_print) outfile.write('<%sHighestSeverity>%s</%sHighestSeverity>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.HighestSeverity), input_name='HighestSeverity')), namespaceprefix_ , eol_)) for Notifications_ in self.Notifications: namespaceprefix_ = self.Notifications_nsprefix_ + ':' if (UseCapturedNS_ and self.Notifications_nsprefix_) else '' Notifications_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Notifications', pretty_print=pretty_print) if self.TransactionDetail is not None: namespaceprefix_ = self.TransactionDetail_nsprefix_ + ':' if (UseCapturedNS_ and self.TransactionDetail_nsprefix_) else '' self.TransactionDetail.export(outfile, level, namespaceprefix_, namespacedef_='', name_='TransactionDetail', pretty_print=pretty_print) if self.Version is not None: namespaceprefix_ = self.Version_nsprefix_ + ':' if (UseCapturedNS_ and self.Version_nsprefix_) else '' self.Version.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Version', pretty_print=pretty_print) if self.CompletedShipmentDetail is not None: namespaceprefix_ = self.CompletedShipmentDetail_nsprefix_ + ':' if (UseCapturedNS_ and self.CompletedShipmentDetail_nsprefix_) else '' self.CompletedShipmentDetail.export(outfile, level, namespaceprefix_, namespacedef_='', name_='CompletedShipmentDetail', pretty_print=pretty_print) def build(self, node, gds_collector_=None): self.gds_collector_ = gds_collector_ if SaveElementTreeNode: self.gds_elementtree_node_ = node already_processed = set() self.ns_prefix_ = node.prefix self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_) return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None): if nodeName_ == 'HighestSeverity': value_ = child_.text value_ = self.gds_parse_string(value_, node, 'HighestSeverity') value_ = self.gds_validate_string(value_, node, 'HighestSeverity') self.HighestSeverity = value_ self.HighestSeverity_nsprefix_ = child_.prefix # validate type NotificationSeverityType self.validate_NotificationSeverityType(self.HighestSeverity) elif nodeName_ == 'Notifications': obj_ = Notification.factory(parent_object_=self) obj_.build(child_, gds_collector_=gds_collector_) self.Notifications.append(obj_) obj_.original_tagname_ = 'Notifications' elif nodeName_ == 'TransactionDetail': obj_ = TransactionDetail.factory(parent_object_=self) obj_.build(child_, gds_collector_=gds_collector_) self.TransactionDetail = obj_ obj_.original_tagname_ = 'TransactionDetail' elif nodeName_ == 'Version': obj_ = VersionId.factory(parent_object_=self) obj_.build(child_, gds_collector_=gds_collector_) self.Version = obj_ obj_.original_tagname_ = 'Version' elif nodeName_ == 'CompletedShipmentDetail': obj_ = CompletedDangerousGoodsShipmentDetail.factory(parent_object_=self) obj_.build(child_, gds_collector_=gds_collector_) self.CompletedShipmentDetail = obj_ obj_.original_tagname_ = 'CompletedShipmentDetail' # end class DeleteDangerousGoodsHandlingUnitReply class DeleteDangerousGoodsHandlingUnitRequest(GeneratedsSuper): __hash__ = GeneratedsSuper.__hash__ subclass = None superclass = None def __init__(self, WebAuthenticationDetail=None, ClientDetail=None, TransactionDetail=None, Version=None, UploadId=None, HandlingUnitGroupId=None, TrackingNumbers=None, gds_collector_=None, **kwargs_): self.gds_collector_ = gds_collector_ self.gds_elementtree_node_ = None self.original_tagname_ = None self.parent_object_ = kwargs_.get('parent_object_') self.ns_prefix_ = None self.WebAuthenticationDetail = WebAuthenticationDetail self.WebAuthenticationDetail_nsprefix_ = None self.ClientDetail = ClientDetail self.ClientDetail_nsprefix_ = None self.TransactionDetail = TransactionDetail self.TransactionDetail_nsprefix_ = None self.Version = Version self.Version_nsprefix_ = None self.UploadId = UploadId self.UploadId_nsprefix_ = None self.HandlingUnitGroupId = HandlingUnitGroupId self.HandlingUnitGroupId_nsprefix_ = None if TrackingNumbers is None: self.TrackingNumbers = [] else: self.TrackingNumbers = TrackingNumbers self.TrackingNumbers_nsprefix_ = None def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, DeleteDangerousGoodsHandlingUnitRequest) if subclass is not None: return subclass(*args_, **kwargs_) if DeleteDangerousGoodsHandlingUnitRequest.subclass: return DeleteDangerousGoodsHandlingUnitRequest.subclass(*args_, **kwargs_) else: return DeleteDangerousGoodsHandlingUnitRequest(*args_, **kwargs_) factory = staticmethod(factory) def get_ns_prefix_(self): return self.ns_prefix_ def set_ns_prefix_(self, ns_prefix): self.ns_prefix_ = ns_prefix def get_WebAuthenticationDetail(self): return self.WebAuthenticationDetail def set_WebAuthenticationDetail(self, WebAuthenticationDetail): self.WebAuthenticationDetail = WebAuthenticationDetail def get_ClientDetail(self): return self.ClientDetail def set_ClientDetail(self, ClientDetail): self.ClientDetail = ClientDetail def get_TransactionDetail(self): return self.TransactionDetail def set_TransactionDetail(self, TransactionDetail): self.TransactionDetail = TransactionDetail def get_Version(self): return self.Version def set_Version(self, Version): self.Version = Version def get_UploadId(self): return self.UploadId def set_UploadId(self, UploadId): self.UploadId = UploadId def get_HandlingUnitGroupId(self): return self.HandlingUnitGroupId def set_HandlingUnitGroupId(self, HandlingUnitGroupId): self.HandlingUnitGroupId = HandlingUnitGroupId def get_TrackingNumbers(self): return self.TrackingNumbers def set_TrackingNumbers(self, TrackingNumbers): self.TrackingNumbers = TrackingNumbers def add_TrackingNumbers(self, value): self.TrackingNumbers.append(value) def insert_TrackingNumbers_at(self, index, value): self.TrackingNumbers.insert(index, value) def replace_TrackingNumbers_at(self, index, value): self.TrackingNumbers[index] = value def hasContent_(self): if ( self.WebAuthenticationDetail is not None or self.ClientDetail is not None or self.TransactionDetail is not None or self.Version is not None or self.UploadId is not None or self.HandlingUnitGroupId is not None or self.TrackingNumbers ): return True else: return False def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DeleteDangerousGoodsHandlingUnitRequest', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('DeleteDangerousGoodsHandlingUnitRequest') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None and name_ == 'DeleteDangerousGoodsHandlingUnitRequest': name_ = self.original_tagname_ if UseCapturedNS_ and self.ns_prefix_: namespaceprefix_ = self.ns_prefix_ + ':' showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='DeleteDangerousGoodsHandlingUnitRequest') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='DeleteDangerousGoodsHandlingUnitRequest', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='DeleteDangerousGoodsHandlingUnitRequest'): pass def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DeleteDangerousGoodsHandlingUnitRequest', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.WebAuthenticationDetail is not None: namespaceprefix_ = self.WebAuthenticationDetail_nsprefix_ + ':' if (UseCapturedNS_ and self.WebAuthenticationDetail_nsprefix_) else '' self.WebAuthenticationDetail.export(outfile, level, namespaceprefix_, namespacedef_='', name_='WebAuthenticationDetail', pretty_print=pretty_print) if self.ClientDetail is not None: namespaceprefix_ = self.ClientDetail_nsprefix_ + ':' if (UseCapturedNS_ and self.ClientDetail_nsprefix_) else '' self.ClientDetail.export(outfile, level, namespaceprefix_, namespacedef_='', name_='ClientDetail', pretty_print=pretty_print) if self.TransactionDetail is not None: namespaceprefix_ = self.TransactionDetail_nsprefix_ + ':' if (UseCapturedNS_ and self.TransactionDetail_nsprefix_) else '' self.TransactionDetail.export(outfile, level, namespaceprefix_, namespacedef_='', name_='TransactionDetail', pretty_print=pretty_print) if self.Version is not None: namespaceprefix_ = self.Version_nsprefix_ + ':' if (UseCapturedNS_ and self.Version_nsprefix_) else '' self.Version.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Version', pretty_print=pretty_print) if self.UploadId is not None: namespaceprefix_ = self.UploadId_nsprefix_ + ':' if (UseCapturedNS_ and self.UploadId_nsprefix_) else '' showIndent(outfile, level, pretty_print) outfile.write('<%sUploadId>%s</%sUploadId>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.UploadId), input_name='UploadId')), namespaceprefix_ , eol_)) if self.HandlingUnitGroupId is not None: namespaceprefix_ = self.HandlingUnitGroupId_nsprefix_ + ':' if (UseCapturedNS_ and self.HandlingUnitGroupId_nsprefix_) else '' showIndent(outfile, level, pretty_print) outfile.write('<%sHandlingUnitGroupId>%s</%sHandlingUnitGroupId>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.HandlingUnitGroupId), input_name='HandlingUnitGroupId')), namespaceprefix_ , eol_)) for TrackingNumbers_ in self.TrackingNumbers: namespaceprefix_ = self.TrackingNumbers_nsprefix_ + ':' if (UseCapturedNS_ and self.TrackingNumbers_nsprefix_) else '' showIndent(outfile, level, pretty_print) outfile.write('<%sTrackingNumbers>%s</%sTrackingNumbers>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(TrackingNumbers_), input_name='TrackingNumbers')), namespaceprefix_ , eol_)) def build(self, node, gds_collector_=None): self.gds_collector_ = gds_collector_ if SaveElementTreeNode: self.gds_elementtree_node_ = node already_processed = set() self.ns_prefix_ = node.prefix self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_) return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None): if nodeName_ == 'WebAuthenticationDetail': obj_ = WebAuthenticationDetail.factory(parent_object_=self) obj_.build(child_, gds_collector_=gds_collector_) self.WebAuthenticationDetail = obj_ obj_.original_tagname_ = 'WebAuthenticationDetail' elif nodeName_ == 'ClientDetail': obj_ = ClientDetail.factory(parent_object_=self) obj_.build(child_, gds_collector_=gds_collector_) self.ClientDetail = obj_ obj_.original_tagname_ = 'ClientDetail' elif nodeName_ == 'TransactionDetail': obj_ = TransactionDetail.factory(parent_object_=self) obj_.build(child_, gds_collector_=gds_collector_) self.TransactionDetail = obj_ obj_.original_tagname_ = 'TransactionDetail' elif nodeName_ == 'Version': obj_ = VersionId.factory(parent_object_=self) obj_.build(child_, gds_collector_=gds_collector_) self.Version = obj_ obj_.original_tagname_ = 'Version' elif nodeName_ == 'UploadId': value_ = child_.text value_ = self.gds_parse_string(value_, node, 'UploadId') value_ = self.gds_validate_string(value_, node, 'UploadId') self.UploadId = value_ self.UploadId_nsprefix_ = child_.prefix elif nodeName_ == 'HandlingUnitGroupId':
<reponame>GMLC-1-4-2/battery_interface<filename>src/fleets/fuel_cell_fleet/fuelcell_fleet.py<gh_stars>1-10 # -*- coding: utf-8 -*- # !/usr/bin/env python3 """ @authors: <EMAIL>, <EMAIL>, Description: This class implements the FuelCell FleetInterface to integrate with a fleet of FuelCells """ import sys from os.path import dirname, abspath, join from warnings import simplefilter, filterwarnings, warn import configparser from datetime import datetime from numpy import polyfit, convolve, RankWarning, log, exp, ndarray from pandas import read_csv from scipy.optimize import fsolve from matplotlib.pyplot import figure, subplot2grid sys.path.insert(0, dirname(dirname(dirname(abspath(__file__))))) from fleet_interface import FleetInterface from fleet_response import FleetResponse from csv import writer import csv from grid_info_artificial_inertia import GridInfo from utils import ensure_ddir simplefilter('ignore', RankWarning) filterwarnings("ignore", category=RuntimeWarning) if sys.version_info >= (3,6,7): from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() class FuelCellFleet(FleetInterface): """ FuelCell Fleet Class """ def __init__(self, grid_info, mdl_config="config.ini", mdl_type="FuelCell", **kwargs): """ :param GridInfo: GridInfo object derived from the GridInfo Class :param mdl_config: File location for config.ini :param mdl_config: Model parameters to be loaded from config.ini :param kwargs: """ # Load the config file that has model parameters self.base_path = dirname(abspath(__file__)) self.config = configparser.ConfigParser() self.config.read(join(self.base_path, mdl_config)) # Read the model parameters from config file with defaults as fallback self.fc_model_name = self.config.get(mdl_type, "Name", fallback="Default FuelCell Fleet") self.fc_T = float(self.config.get(mdl_type, "T", fallback=70)) self.fc_Ts = float(self.config.get(mdl_type, "Ts", fallback=25)) self.fc_Pe = float(self.config.get(mdl_type, "Pe", fallback=101000)) self.fc_Pe_out = float(self.config.get(mdl_type, "Pe_out", fallback=50000000)) self.fc_size = float(self.config.get(mdl_type, "Fc_size", fallback=1.6)) self.fc_A = float(self.config.get(mdl_type, "A", fallback=200)) self.fc_Nc = float(self.config.get(mdl_type, "Nc", fallback=20)) self.fc_F = float(self.config.get(mdl_type, "F", fallback=96485.34)) self.fc_ne0 = float(self.config.get(mdl_type, "ne0", fallback=2)) self.fc_charge_lvl_min = float(self.config.get(mdl_type, "charge_lvl_min", fallback=19)) self.fc_charge_lvl_max = float(self.config.get(mdl_type, "charge_lvl_max", fallback=95)) self.fc_DG_25 = float(self.config.get(mdl_type, "DG_25", fallback=237100.0)) self.fc_DG_200 = float(self.config.get(mdl_type, "DG_200", fallback=220370.0)) self.fc_DH = float(self.config.get(mdl_type, "DH", fallback=286000.0)) self.fc_R = float(self.config.get(mdl_type, "R", fallback=8.31447)) self.fc_Nfc = float(self.config.get(mdl_type, "Nfc", fallback=50)) self.fc_V_tank = float(self.config.get(mdl_type, "V_tank", fallback=0.3)) self.fc_Nt = float(self.config.get(mdl_type, "Nt", fallback=8)) self.fc_I_initial = int(self.config.get(mdl_type, "I_initial", fallback=0)) self.fc_I_final = int(self.config.get(mdl_type, "I_final", fallback=200)) self.fc_I_step = int(self.config.get(mdl_type, "I_step", fallback=1)) self.fc_alpha = float(self.config.get(mdl_type, "alpha", fallback=0.5)) self.fc_a = float(self.config.get(mdl_type, "a0", fallback=3e-5)) self.fc_b = float(self.config.get(mdl_type, "b", fallback=8e-6)) self.fc_i_n = float(self.config.get(mdl_type, "i_n", fallback=-0.002)) self.fc_i_0 = float(self.config.get(mdl_type, "i_0", fallback=6.7e-5)) self.fc_i_L = float(self.config.get(mdl_type, "i_L", fallback=0.9)) self.fc_R_tot = float(self.config.get(mdl_type, "R_tot", fallback=-0.030)) self.fc_Patm = float(self.config.get(mdl_type, "Patm", fallback=101000)) self.fc_Tau_e = float(self.config.get(mdl_type, "Tau_e", fallback=80)) self.fc_Lambda = float(self.config.get(mdl_type, "Lambda", fallback=0.00333)) self.fc_pH2 = float(self.config.get(mdl_type, "pH2", fallback=0.5)) self.fc_pO2 = float(self.config.get(mdl_type, "pO2", fallback=0.8)) self.fc_x01 = float(self.config.get(mdl_type, "x0_1", fallback=0.7)) self.fc_x02 = float(self.config.get(mdl_type, "x0_2", fallback=80)) self.fc_Pmin_fleet = float(self.config.get(mdl_type, "Pmin_fleet", fallback=30)) self.fc_Pmax_fleet = float(self.config.get(mdl_type, "Pmax_fleet", fallback=130)) self.fc_At = float(self.config.get(mdl_type, "At", fallback=47)) self.fc_len = float(self.config.get(mdl_type, "len", fallback=2.54)) self.fc_Phi_0 = float(self.config.get(mdl_type, "Phi_0", fallback=5.9e-5)) self.fc_E_phi = float(self.config.get(mdl_type, "E_phi", fallback=42.7)) self.fc_b1 = float(self.config.get(mdl_type, "b1", fallback=1.55e-5)) self.fc_LHV_H2 = float(self.config.get(mdl_type, "LHV_H2", fallback=120000)) self.fc_ser_wght = float(self.config.get(mdl_type, "service_weight", fallback=1.0)) self.is_P_priority = self.bool_check(self.config.get(mdl_type, "is_P_priority", fallback=True)) self.FW21_Enabled = self.bool_check(self.config.get(mdl_type, "FW21_Enabled", fallback=False)) self.is_autonomous = self.bool_check(self.config.get(mdl_type, "is_autonomous", fallback=False)) self.fc_db_UF = float(self.config.get(mdl_type, "db_UF", fallback=0.36)) self.fc_db_OF = float(self.config.get(mdl_type, "db_OF", fallback=0.36)) self.fc_k_UF = float(self.config.get(mdl_type, "k_UF", fallback=0.05)) self.fc_k_OF = float(self.config.get(mdl_type, "k_OF", fallback=0.05)) self.__P_pre = float(self.config.get(mdl_type, "P_pre", fallback=-1.0)) # Establish the grid locations that the fuel cell fleet is connected in the grid if isinstance(grid_info, GridInfo): self.grid = grid_info else: # Set services to false if GridInfo is not called correctly during fleet init. self.is_P_priority = True self.is_autonomous = False self.FW21_Enabled = False warn("GridInfo() needs to be loaded with AI data. " "Please check your FuelCell fleet initialization code!! " "Fleet services will be set to False") # Pre-load a base profile for the Electrolyer based on price-demand response curve self.fc_pbase = join(self.base_path, self.config.get(mdl_type, "power_data", fallback="Pbase.txt")) if self.config.sections()[0] != mdl_type: print("Error reading config.ini file for model:"+"\t"*3+"%s [FAIL]!!" % mdl_type) print("Model found in config.ini file:"+"\t"*5+"%s!!\n" "Default modelling parameters will be used!!\n" % self.config.sections()[0]) else: print("Model parameters found for:"+"\t"*5+"%s [OKAY]\n" % self.config.sections()[0]) # Compute state parameters for the FuelCell fleet model self.f = None self.__Vi = self.__Ir = self.__Vr_age = self.__Ir_age = self.__p_base = 0.0 self.__V_act = self.__Ii = self.__Vr = self.__Ir_age = self.__id0 = 0.0 self.__expo = self.__conv = self.__ne = self.__ne_age = 0.0 self.__ed_cell = self.__e_cell = self.__p_tot_ideal = self.__p_tot_age = 0.0 self.__Qh2_m = self.__Qh2_m_age = 0.0 self.__m_dotH2 = self.__m_dotH2_age = 0.0 self.lka_h2 = self.__Phi = 0.0 self.__eta_ds = 0.0 self.__inc = 0 self.fleet_rating = self.fc_Nfc*self.fc_size # Set the operating range for the FuelCell default fleet - 6kW <= Pfc <= 240kW self.P_opt = lambda p_req, p_min, p_max: max(p_min, min(p_max, p_req)) # Operating temperature of FuelCell in deg. C. self.fc_T += 273.15 # Min. state of charge in the hydrogen tank self.min_charge = self.fc_charge_lvl_min*1e-2*self.fc_Pe_out # Max. state of charge in the hydrogen tank self.max_charge = self.fc_charge_lvl_max*1e-2*self.fc_Pe_out # Initial state of charge (pressure) in the tank w/ and w/o aging self.P_tank_ideal = self.P_tank_age = self.soc_ideal = self.soc_age = self.max_charge # Initial number of H2 moles in the tank w/ and w/o aging self.ni = (self.P_tank_ideal*self.fc_V_tank/self.fc_R/(self.fc_Ts+273.15))*self.fc_Nt # initial moles w/ and w/o ageing self.moles_ideal = self.moles_age = self.ni self.DG = self.fc_DG_200+(200-self.fc_T+273.15)/175*(self.fc_DG_25-self.fc_DG_200) self.V_rev = self.DG/self.fc_ne0/self.fc_F # Thermo-neutral voltage self.Vtn = self.fc_DH/self.fc_ne0/self.fc_F # (A) self.b1 = self.fc_b/self.fc_A # (A) self.i_n1 = self.fc_i_n*self.fc_A # (A) self.i_01 = self.fc_i_0*self.fc_A # (Ohm) self.R_tot1 = self.fc_R_tot/self.fc_A self.fc_At *= self.fc_Nt self.fc_len /= 1e3 # Fit the base power profile for FuelCell self.p = self.fit_pdat(self.fc_pbase, 5 * 60) # Output metrics dataframe self.metrics = [['ts', 'Vr_ideal', 'Vr_age', 'ne_ideal', 'ne_age', 'Soc_ideal', 'Soc_age', 'Lka_H2', 'nds']] def process_request(self, fleet_request): resp = self.fc_model(fleet_request.ts_req, fleet_request.sim_step, fleet_request.P_req, False, fleet_request.start_time) return resp def forecast(self, requests): soc_state, soc_state_age, state = self.soc_ideal, self.soc_age, self.__inc resp = [self.fc_model(req.ts_req, req.sim_step, req.P_req, True, req.start_time) for req in requests] self.soc_ideal, self.soc_age, self.__inc = soc_state, soc_state_age, state return resp def fc_model(self, ts, sim_step, Preq, forecast=False, start_time=None): """ :param ts: Request created for current time-step: datetime :param sim_step: Request for simulation time-step: timedelta object :param Preq: Request for current real power request :param forecast: Returns fleet response forecast :param start_time: Request for current real power request :return resp: Fleet response object """ if self.P_tank_ideal <= self.min_charge: pass else: # Create base power profile if isinstance(self.p, ndarray): self.__p_base = sum([self.p[j] * (self.__inc + 1) ** (21 - j) for j in range(22)]) else: self.__p_base = self.p # Ptogrid: Pbase for None or zero requests since FC is a source if Preq is None or float(Preq) == 0.0: Preq = self.__p_base # Ptogrid: Preq+Pbase for +ve or -ve requests else: Preq += self.__p_base # Compute the power generated by the fleet self.__fc_p_calc(Preq) if self.FW21_Enabled and self.is_autonomous: # all in kW Preq, self.f = self.frequency_watt(p_pre=self.__p_tot_ideal, p_avl=self.fc_Pmax_fleet, p_min=self.fc_Pmin_fleet, ts=ts, start_time=start_time) self.__p_tot_ideal = Preq # Update SOC self.__soc_calc() self.__inc += 1 # Response # Power injected is positive resp = FleetResponse() resp.ts = ts resp.sim_step = sim_step resp.C = 0 resp.dT_hold_limit = 0 resp.E = self.soc_ideal resp.Eff_charge = 1.0 resp.Eff_discharge = self.__eta_ds resp.P_dot_down = 0 resp.P_dot_up = 0 resp.P_togrid = self.__p_tot_ideal resp.P_togrid_max = self.fc_Pmax_fleet resp.P_togrid_min = self.fc_Pmin_fleet resp.P_service = resp.P_togrid - self.__p_base resp.P_service_max = self.fc_Pmax_fleet-self.__p_base resp.P_service_min = self.__p_base-self.fc_Pmin_fleet resp.Q_dot_down = 0 resp.Q_dot_up = 0 resp.Q_service = 0 resp.Q_service_max = 0 resp.Q_service_min = 0 resp.Q_togrid = 0 resp.Q_togrid_max = 0 resp.Q_togrid_min = 0 resp.T_restore = 0 resp.P_base = self.__p_base resp.Q_base = 0 resp.Strike_price = 0 resp.SOC_cost = 0 # Impact metrics if not forecast: self.metrics.append([str(ts), str(self.__Vr), str(self.__Vr_age), str(self.__ne), str(self.__ne_age), str(self.soc_ideal * 1e2), str(self.soc_age * 1e2), str(self.lka_h2), str(self.__eta_ds * 1e2)]) # Print Soc every 5 secs. if self.__inc % 5000 == 0: print("Soc:%4.2f%%" % (resp.E*1e2)) return resp def output_metrics(self, filename): base_path = dirname(abspath(__file__)) with open(join(base_path, str(filename)+'.csv'), 'w', newline='') as out: write = writer(out) write.writerows(self.metrics) print("Impact metrics created has been created"+"\t"*5+" [OKAY]\n") def output_impact_metrics(self, service_name): metrics_dir = join(dirname(dirname(dirname(abspath(__file__)))), 'integration_test', service_name) ensure_ddir(metrics_dir) metrics_filename = 'ImpactMetrics_' + service_name + '_FuelCell' + '_' + datetime.now().strftime('%Y%m%dT%H%M') + '.csv' with open(join(metrics_dir, metrics_filename), 'w') as csvfile: writer = csv.writer(csvfile) writer.writerows(self.metrics) def assigned_service_kW(self): return self.fc_ser_wght*self.fleet_rating @staticmethod def __vifc_calc(x, Pri, V_rev, T, R, alpha, F, i_n, i_0, R_tot, a, b, pH2, pO2): Vact = (R*T/alpha/F)*log((x[1]+i_n)/i_0) Vohm = (x[1] + i_n)*R_tot Vcon = a*exp(b*x[1]) E_cell = V_rev+R*T/2/F*log(pH2*pO2**0.5) fb = list([x[0]-E_cell+Vact+Vohm-Vcon]) fb.append(x[0]*x[1]-Pri) return fb def __fc_p_calc(self, p_req): # Check for operating range of the fleet Preq = float(self.P_opt(abs(p_req), self.fc_Pmin_fleet, self.fc_Pmax_fleet)) Pr = abs(Preq) * 1e3 / self.fc_Nfc / self.fc_Nc # Watts # Compute voltage and current for 1 FuelCell stack self.__Vi, self.__Ir = fsolve(self.__vifc_calc, [self.fc_x01, self.fc_x02], args=(Pr, self.V_rev, self.fc_T, self.fc_R, self.fc_alpha, self.fc_F, self.i_n1, self.i_01, self.R_tot1, self.fc_a, self.b1, self.fc_pH2, self.fc_pO2)) self.__Vact = (self.fc_R
p_after.''' p = self parent_v = p_after._parentVnode() # Returns None if p.v is None # Init the ivars. p.stack = p_after.stack[:] p._childIndex = p_after._childIndex + 1 # Set the links. child = p.v n = p_after._childIndex + 1 child._addLink(n, parent_v, adjust=adjust) #@+node:ekr.20080416161551.215: *4* p._linkAsNthChild def _linkAsNthChild(self, parent, n, adjust=True): '''(low-level position method) Link self as the n'th child of the parent.''' p = self parent_v = parent.v # Init the ivars. p.stack = parent.stack[:] p.stack.append((parent_v, parent._childIndex),) p._childIndex = n child = p.v child._addLink(n, parent_v, adjust=adjust) #@+node:ekr.20080416161551.216: *4* p._linkAsRoot def _linkAsRoot(self, oldRoot): """Link self as the root node.""" p = self assert(p.v) hiddenRootNode = p.v.context.hiddenRootNode # if oldRoot: oldRootNode = oldRoot.v # else: oldRootNode = None # Init the ivars. p.stack = [] p._childIndex = 0 parent_v = hiddenRootNode child = p.v if not oldRoot: parent_v.children = [] child._addLink(0, parent_v) return p #@+node:ekr.20080416161551.212: *4* p._parentVnode def _parentVnode(self): '''Return the parent VNode. Return the hiddenRootNode if there is no other parent.''' p = self if p.v: data = p.stack and p.stack[-1] if data: v, junk = data return v else: return p.v.context.hiddenRootNode else: return None #@+node:ekr.20131219220412.16582: *4* p._relinkAsCloneOf def _relinkAsCloneOf(self, p2): '''A low-level method to replace p.v by a p2.v.''' p = self v, v2 = p.v, p2.v parent_v = p._parentVnode() if not parent_v: g.internalError('no parent_v', p) return if parent_v.children[p._childIndex] == v: parent_v.children[p._childIndex] = v2 v2.parents.append(parent_v) # p.v no longer truly exists. # p.v = p2.v else: g.internalError( 'parent_v.children[childIndex] != v', p, parent_v.children, p._childIndex, v) #@+node:ekr.20080416161551.217: *4* p._unlink def _unlink(self): '''Unlink the receiver p from the tree.''' p = self; n = p._childIndex parent_v = p._parentVnode() # returns None if p.v is None child = p.v assert(p.v) assert(parent_v) # Delete the child. if (0 <= n < len(parent_v.children) and parent_v.children[n] == child ): # This is the only call to v._cutlink. child._cutLink(n, parent_v) else: self.badUnlink(parent_v, n, child) #@+node:ekr.20090706171333.6226: *5* p.badUnlink def badUnlink(self, parent_v, n, child): if 0 <= n < len(parent_v.children): g.trace('**can not happen: children[%s] != p.v' % (n)) g.trace('parent_v.children...\n', g.listToString(parent_v.children)) g.trace('parent_v', parent_v) g.trace('parent_v.children[n]', parent_v.children[n]) g.trace('child', child) g.trace('** callers:', g.callers()) if g.app.unitTesting: assert False, 'children[%s] != p.v' else: g.trace('**can not happen: bad child index: %s, len(children): %s' % ( n, len(parent_v.children))) g.trace('parent_v.children...\n', g.listToString(parent_v.children)) g.trace('parent_v', parent_v, 'child', child) g.trace('** callers:', g.callers()) if g.app.unitTesting: assert False, 'bad child index: %s' % (n) #@+node:ekr.20080416161551.199: *3* p.moveToX #@+at These routines change self to a new position "in place". # That is, these methods must _never_ call p.copy(). # # When moving to a nonexistent position, these routines simply set p.v = None, # leaving the p.stack unchanged. This allows the caller to "undo" the effect of # the invalid move by simply restoring the previous value of p.v. # # These routines all return self on exit so the following kind of code will work: # after = p.copy().moveToNodeAfterTree() #@+node:ekr.20080416161551.200: *4* p.moveToBack def moveToBack(self): """Move self to its previous sibling.""" p = self; n = p._childIndex parent_v = p._parentVnode() # Returns None if p.v is None. # Do not assume n is in range: this is used by positionExists. if parent_v and p.v and 0 < n <= len(parent_v.children): p._childIndex -= 1 p.v = parent_v.children[n - 1] else: p.v = None return p #@+node:ekr.20080416161551.201: *4* p.moveToFirstChild def moveToFirstChild(self): """Move a position to it's first child's position.""" p = self if p.v and p.v.children: p.stack.append((p.v, p._childIndex),) p.v = p.v.children[0] p._childIndex = 0 else: p.v = None return p #@+node:ekr.20080416161551.202: *4* p.moveToLastChild def moveToLastChild(self): """Move a position to it's last child's position.""" p = self if p.v and p.v.children: p.stack.append((p.v, p._childIndex),) n = len(p.v.children) p.v = p.v.children[n - 1] p._childIndex = n - 1 else: p.v = None return p #@+node:ekr.20080416161551.203: *4* p.moveToLastNode def moveToLastNode(self): """Move a position to last node of its tree. N.B. Returns p if p has no children.""" p = self # Huge improvement for 4.2. while p.hasChildren(): p.moveToLastChild() return p #@+node:ekr.20080416161551.204: *4* p.moveToNext def moveToNext(self): """Move a position to its next sibling.""" p = self; n = p._childIndex parent_v = p._parentVnode() # Returns None if p.v is None. if not p.v: g.trace('no p.v:', p, g.callers()) if p.v and parent_v and len(parent_v.children) > n + 1: p._childIndex = n + 1 p.v = parent_v.children[n + 1] else: p.v = None return p #@+node:ekr.20080416161551.205: *4* p.moveToNodeAfterTree def moveToNodeAfterTree(self): """Move a position to the node after the position's tree.""" p = self while p: if p.hasNext(): p.moveToNext() break p.moveToParent() return p #@+node:ekr.20080416161551.206: *4* p.moveToNthChild def moveToNthChild(self, n): p = self if p.v and len(p.v.children) > n: p.stack.append((p.v, p._childIndex),) p.v = p.v.children[n] p._childIndex = n else: p.v = None return p #@+node:ekr.20080416161551.207: *4* p.moveToParent def moveToParent(self): """Move a position to its parent position.""" p = self if p.v and p.stack: p.v, p._childIndex = p.stack.pop() else: p.v = None return p #@+node:ekr.20080416161551.208: *4* p.moveToThreadBack def moveToThreadBack(self): """Move a position to it's threadBack position.""" p = self if p.hasBack(): p.moveToBack() p.moveToLastNode() else: p.moveToParent() return p #@+node:ekr.20080416161551.209: *4* p.moveToThreadNext def moveToThreadNext(self): """Move a position to threadNext position.""" p = self if p.v: if p.v.children: p.moveToFirstChild() elif p.hasNext(): p.moveToNext() else: p.moveToParent() while p: if p.hasNext(): p.moveToNext() break #found p.moveToParent() # not found. return p #@+node:ekr.20080416161551.210: *4* p.moveToVisBack & helper def moveToVisBack(self, c): """Move a position to the position of the previous visible node.""" p = self limit, limitIsVisible = c.visLimit() while p: # Short-circuit if possible. back = p.back() if back and back.hasChildren() and back.isExpanded(): p.moveToThreadBack() elif back: p.moveToBack() else: p.moveToParent() # Same as p.moveToThreadBack() if p: if limit: done, val = self.checkVisBackLimit(limit, limitIsVisible, p) if done: return val # A position or None if p.isVisible(c): return p return p #@+node:ekr.20090715145956.6166: *5* checkVisBackLimit def checkVisBackLimit(self, limit, limitIsVisible, p): '''Return done, p or None''' c = p.v.context if limit == p: if limitIsVisible and p.isVisible(c): return True, p else: return True, None elif limit.isAncestorOf(p): return False, None else: return True, None #@+node:ekr.20080416161551.211: *4* p.moveToVisNext & helper def moveToVisNext(self, c): """Move a position to the position of the next visible node.""" p = self limit, limitIsVisible = c.visLimit() while p: if p.hasChildren(): if p.isExpanded(): p.moveToFirstChild() else: p.moveToNodeAfterTree() elif p.hasNext(): p.moveToNext() else: p.moveToThreadNext() if p: if limit and self.checkVisNextLimit(limit,p): return None if p.isVisible(c): return p.copy() return p #@+node:ekr.20090715145956.6167: *5* checkVisNextLimit def checkVisNextLimit(self, limit, p): '''Return True is p is outside limit of visible nodes.''' return limit != p and not limit.isAncestorOf(p) #@+node:ekr.20150316175921.6: *4* p.safeMoveToThreadNext def safeMoveToThreadNext(self): ''' Move a position to threadNext position. Issue an error if any vnode is an ancestor of itself. ''' p = self if p.v: child_v = p.v.children and p.v.children[0] if child_v: for parent in p.self_and_parents(): if child_v == parent.v: g.app.structure_errors += 1 g.error('vnode: %s is its own parent' % child_v) # Allocating a new vnode would be difficult. # Just remove child_v from parent.v.children. parent.v.children = [ v2 for v2 in parent.v.children if not v2 == child_v] if parent.v in child_v.parents: child_v.parents.remove(parent.v) # Try not to hang. p.moveToParent() break elif child_v.fileIndex == parent.v.fileIndex: g.app.structure_errors += 1 g.error('duplicate gnx: %r v: %s parent: %s' % ( child_v.fileIndex, child_v, parent.v)) child_v.fileIndex = g.app.nodeIndices.getNewIndex(v=child_v) assert child_v.gnx != parent.v.gnx # Should be ok to continue. p.moveToFirstChild() break else: p.moveToFirstChild() elif p.hasNext(): p.moveToNext() else: p.moveToParent() while p: if p.hasNext(): p.moveToNext() break # found p.moveToParent() # not found. return p #@+node:ekr.20150316175921.7: *5* p.checkChild #@+node:ekr.20040303175026: *3* p.Moving, Inserting, Deleting, Cloning, Sorting #@+node:ekr.20040303175026.8: *4* p.clone def clone(self): """Create a clone of back. Returns the newly created position.""" p = self p2 = p.copy() # Do *not* copy the VNode! p2._linkAfter(p) # This should "just work" return p2 #@+node:ekr.20040117171654: *4* p.copy def copy(self): """"Return an independent copy of a position.""" return Position(self.v, self._childIndex, self.stack) #@+node:ekr.20040303175026.9: *4* p.copyTreeAfter, copyTreeTo # These used by unit tests, by the group_operations plugin, # and by the files-compare-leo-files command. # To do: use v.copyTree instead. def copyTreeAfter(self, copyGnxs=False): '''Copy
<filename>src/relstorage/cache/lru_cffiring.py # -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2016 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """ Segmented LRU implementations. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import itertools from zope import interface from relstorage._compat import OID_OBJECT_MAP_TYPE as OidOMap from relstorage.cache.interfaces import IGenerationalLRUCache from relstorage.cache.interfaces import IGeneration from relstorage.cache.interfaces import ILRUEntry from relstorage.cache.interfaces import GenerationalCacheBase from . import _cache_ring try: izip = itertools.izip except AttributeError: # Python 3 izip = zip ffi = _cache_ring.ffi # pylint:disable=no-member _FFI_RING = _cache_ring.lib # pylint:disable=no-member _ring_move_to_head = _FFI_RING.rsc_ring_move_to_head _ring_del = _FFI_RING.rsc_ring_del _ring_add = _FFI_RING.rsc_ring_add ffi_new = ffi.new ffi_new_handle = ffi.new_handle ffi_from_handle = ffi.from_handle _lru_update_mru = _FFI_RING.rsc_update_mru _lru_probation_on_hit = _FFI_RING.rsc_probation_on_hit _eden_add = _FFI_RING.rsc_eden_add _lru_on_hit = _FFI_RING.rsc_on_hit _lru_age_lists = _FFI_RING.rsc_age_lists _eden_add_many = _FFI_RING.rsc_eden_add_many @interface.implementer(IGenerationalLRUCache) class CFFICache(GenerationalCacheBase): # Should we allocate some nodes in a contiguous block on startup? # NOTE: For large cache sizes, this can be slow. It actually makes # the zodbshootout 'cold' tests look bad (for small object counts # especially, or large cache sizes) because when zodbshootout clears caches, # our implementation throws this object all away, and then allocates again. # Meanwhile, all the old objects have to be GC'd. _preallocate_entries = True # If so, how many? Try to get enough to fill the cache assuming objects are # this size on average _preallocate_avg_size = 512 # But no more than this number. _preallocate_max_count = 150000 # 8 MB array _dict_type = OidOMap @classmethod def create_generations(cls, eden_limit=0, protected_limit=0, probation_limit=0, key_weight=len, value_weight=len): cffi_cache = ffi_new("RSCache*") generations = {} for klass, limit in ((Eden, eden_limit), (Protected, protected_limit), (Probation, probation_limit)): generation = klass(limit, cffi_cache, key_weight, value_weight) setattr(cffi_cache, generation.__name__, generation.ring_home) generations[generation.__name__] = generation return generations def __init__(self, byte_limit, key_weight=len, value_weight=len, empty_value=(b'', 0)): # This holds all the ring entries, no matter which ring they are in. # We experimented with using OOBTree and LOBTree for the type # of self.data. The OOBTree has a similar but slightly slower # performance profile (as would be expected given the big-O # complexity) as a dict, but very large ones can't be pickled # in a single shot! The LOBTree works faster and uses less # memory than the OOBTree or the dict *if* all the keys are # integers (which they are now). Plus the LOBTrees # are slower on PyPy than its own dict specializations. We # were hoping to be able to write faster pickles with large # BTrees, but since that's not the case, we abandoned the # idea. # # Maybe a two-level index, like fsIndex? self.data = self._dict_type() self.get = self.data.get generations = self.create_generations( eden_limit=int(byte_limit * self._gen_eden_pct), protected_limit=int(byte_limit * self._gen_protected_pct), probation_limit=int(byte_limit * self._gen_probation_pct), key_weight=key_weight, value_weight=value_weight ) super(CFFICache, self).__init__(byte_limit, generations['eden'], generations['protected'], generations['probation']) self.cffi_cache = self.eden.cffi_cache # Setup the shared data structures for the generations node_free_list = self._make_node_free_list(empty_value) for ring in self.generations[1:]: setattr(ring, 'node_free_list', node_free_list) def _make_node_free_list(self, empty_value): "Create the node free list and preallocate any desired entries" node_free_list = [] if self._preallocate_entries: needed_entries = self.limit // self._preallocate_avg_size entry_count = min(self._preallocate_max_count, needed_entries) node_free_list = self.eden.init_node_free_list(entry_count, empty_value) return node_free_list # mapping operations, operating on user-level key/value pairs. def __iter__(self): return iter(self.data) def __contains__(self, key): return key in self.data def __setitem__(self, key, value): entry = self.get(key) if entry is not None: # This bumps its frequency, and potentially ejects other items. self.update_MRU(entry, value) else: # New values have a frequency of 1 and might evict other # items. self.add_MRU(key, value) assert key in self def __delitem__(self, key): entry = self.data[key] del self.data[key] self.generations[entry.cffi_entry.r_parent].remove(entry) def __getitem__(self, key): entry = self.get(key) if entry is not None: self.on_hit(entry) return entry.value # Cache-specific operations. def get_from_key_or_backup_key(self, pref_key, backup_key): entry = self.get(pref_key) if entry is None: entry = self.get(backup_key) if entry is not None: # Swap the key (which we assume has the same weight). entry.key = pref_key del self.data[backup_key] self.data[pref_key] = entry if entry is not None: self.on_hit(entry) return entry.value def peek(self, key): entry = self.get(key) if entry is not None: return entry.value def age_frequencies(self): _lru_age_lists(self.cffi_cache) age_lists = age_frequencies # BWC def add_MRU(self, key, value): item, evicted_items = self.eden.add_MRU(key, value) for k, _ in evicted_items: del self.data[k] self.data[key] = item return item def add_MRUs(self, ordered_keys, return_count_only=False): added_entries = self.eden.add_MRUs(ordered_keys) for entry in added_entries: self.data[entry.key] = entry return added_entries if not return_count_only else len(added_entries) def update_MRU(self, entry, value): evicted_items = self.generations[entry.cffi_entry.r_parent].update_MRU(entry, value) for k, _ in evicted_items: del self.data[k] def on_hit(self, entry): self.generations[entry.cffi_entry.r_parent].on_hit(entry) @property def size(self): return self.eden.size + self.protected.size + self.probation.size @property def weight(self): return self.size def __len__(self): return len(self.eden) + len(self.protected) + len(self.probation) def stats(self): return { 'eden_stats': self.eden.stats(), 'prot_stats': self.protected.stats(), 'prob_stats': self.probation.stats(), } def entries(self): return getattr(self.data, 'itervalues', self.data.values)() @interface.implementer(ILRUEntry) class CacheRingEntry(object): """ The Python-level objects holding the Python-level key and value. """ __slots__ = ( 'key', 'value', 'weight', 'cffi_ring_node', 'cffi_ring_handle', 'cffi_entry', # This is an owning pointer that is allocated when we # are imported from a persistent file. It keeps a whole array alive '_cffi_owning_node' ) def __init__(self, key, value, weight, node=None): self.key = key self.value = value self._cffi_owning_node = None # Passing the string is faster than passing a cdecl because we # have the string directly in bytecode without a lookup if node is None: node = ffi_new('RSRingNode*') self.cffi_ring_node = node # Directly setting attributes is faster than the initializer node.user_data = self.cffi_ring_handle = ffi_new_handle(self) entry = self.cffi_entry = node.u.entry entry.frequency = 1 # We denormalize len to avoid accessing through CFFI (but it is needed # by the C code). self.weight = entry.weight = weight def reset(self, key, value, weight): self.key = key self.value = value entry = self.cffi_entry entry.frequency = 1 self.weight = entry.weight = weight def reset_for_free_list(self): """ Put this node into an invalid state, representing that it should not be in a ring, but just the free list. You must call `reset` to use this node again. """ self.key = self.value = self.weight = None self.cffi_entry.r_parent = 0 # make sure we can't dereference a generation frequency = property(lambda self: self.cffi_entry.frequency, lambda self, nv: setattr(self.cffi_entry, 'frequency', nv)) def set_value(self, value, weight): old_value = self.value self.value = value if value == old_value: # don't go across the CFFI bridge to set the weight # if we don't have to. return self.weight = self.cffi_entry.weight = weight # We don't implement __len__---we want people to access .len # directly to avoid the function call as it showed up in benchmarks def __repr__(self): return ("<%s key=%r f=%d size=%d>" % (type(self).__name__, self.key, self.frequency, self.weight)) def _mutates_free_list(func): @functools.wraps(func) def mutates(self, *args, **kwargs): try: return func(self, *args, **kwargs) finally: self._mutated_free_list = True # Now replace ourself with a "bound function" on the instance # so our overhead somewhat goes away setattr(self, func.__name__, lambda *args, **kwargs: func(self, *args, **kwargs)) return mutates @interface.implementer(IGeneration) class Generation(object): # For the bulk insertion method add_MRUs in the eden generation, we need # to know whether or not the node_free_list we have is still the original # contiguous array that can be passed to C. _mutated_free_list = False # The CFFI pointer to the RSCache structure. It should be shared # among all the rings of the cache. cffi_cache = None # The list of free CacheRingNode objects. It should be shared # among all the rings of a cache. node_free_list = () PARENT_CONST = 0 def __init__(self, limit, cffi_cache, key_weight=len, value_weight=len): self.limit = limit self.key_weight = key_weight self.value_weight = value_weight self.cffi_cache = cffi_cache node = self.ring_home = ffi.new("RSRing") node.r_next = node node.r_prev = node node.u.head.max_weight = limit node.u.head.generation = self.PARENT_CONST self.node_free_list = [] def init_node_free_list(self, entry_count, empty_value): assert not self.node_free_list assert not self._mutated_free_list keys_and_values = itertools.repeat(('', empty_value), entry_count) _, nodes = self._preallocate_entries(keys_and_values, entry_count) self.node_free_list.extend(nodes) return self.node_free_list def _preallocate_entries(self, ordered_keys_and_values, count=None): """ Create and
outfile.write('gain=%e,\n' % (self.gain,)) if self.bias is not None and 'bias' not in already_processed: already_processed.add('bias') showIndent(outfile, level) outfile.write('bias=%e,\n' % (self.bias,)) def exportLiteralChildren(self, outfile, level, name_): pass def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('gain', node) if value is not None and 'gain' not in already_processed: already_processed.add('gain') try: self.gain = float(value) except ValueError, exp: raise ValueError('Bad float/double attribute (gain): %s' % exp) value = find_attr_value_('bias', node) if value is not None and 'bias' not in already_processed: already_processed.add('bias') try: self.bias = float(value) except ValueError, exp: raise ValueError('Bad float/double attribute (bias): %s' % exp) def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class reflectance class thermal_const(GeneratedsSuper): subclass = None superclass = None def __init__(self, k1=None, k2=None): self.k1 = _cast(float, k1) self.k2 = _cast(float, k2) pass def factory(*args_, **kwargs_): if thermal_const.subclass: return thermal_const.subclass(*args_, **kwargs_) else: return thermal_const(*args_, **kwargs_) factory = staticmethod(factory) def get_k1(self): return self.k1 def set_k1(self, k1): self.k1 = k1 def get_k2(self): return self.k2 def set_k2(self, k2): self.k2 = k2 def hasContent_(self): if ( ): return True else: return False def export(self, outfile, level, namespace_='', name_='thermal_const', namespacedef_='', pretty_print=True): # Check if we are at the root level and output the XML header if level == 0: outfile.write('<?xml version="1.0"?>\n') outfile.write('\n') if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(outfile, level, pretty_print) # Check if we are at the root level and output attributes first before namespacedef if level == 0: outfile.write('<%s%s' % (namespace_, name_)) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='thermal_const') outfile.write('%s' % (namespacedef_ and ' ' + namespacedef_ or '')) else: outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='thermal_const') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespace_='', name_='thermal_const', pretty_print=pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='thermal_const'): if self.k1 is not None and 'k1' not in already_processed: already_processed.add('k1') outfile.write(' k1="%s"' % self.gds_format_double(self.k1, input_name='k1')) if self.k2 is not None and 'k2' not in already_processed: already_processed.add('k2') outfile.write(' k2="%s"' % self.gds_format_double(self.k2, input_name='k2')) def exportChildren(self, outfile, level, namespace_='', name_='thermal_const', fromsubclass_=False, pretty_print=True): pass def exportLiteral(self, outfile, level, name_='thermal_const'): level += 1 already_processed = set() self.exportLiteralAttributes(outfile, level, already_processed, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, already_processed, name_): if self.k1 is not None and 'k1' not in already_processed: already_processed.add('k1') showIndent(outfile, level) outfile.write('k1=%e,\n' % (self.k1,)) if self.k2 is not None and 'k2' not in already_processed: already_processed.add('k2') showIndent(outfile, level) outfile.write('k2=%e,\n' % (self.k2,)) def exportLiteralChildren(self, outfile, level, name_): pass def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('k1', node) if value is not None and 'k1' not in already_processed: already_processed.add('k1') try: self.k1 = float(value) except ValueError, exp: raise ValueError('Bad float/double attribute (k1): %s' % exp) value = find_attr_value_('k2', node) if value is not None and 'k2' not in already_processed: already_processed.add('k2') try: self.k2 = float(value) except ValueError, exp: raise ValueError('Bad float/double attribute (k2): %s' % exp) def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class thermal_const class solar_angles(GeneratedsSuper): subclass = None superclass = None def __init__(self, zenith=None, azimuth=None, units=None): self.zenith = _cast(float, zenith) self.azimuth = _cast(float, azimuth) self.units = _cast(None, units) pass def factory(*args_, **kwargs_): if solar_angles.subclass: return solar_angles.subclass(*args_, **kwargs_) else: return solar_angles(*args_, **kwargs_) factory = staticmethod(factory) def get_zenith(self): return self.zenith def set_zenith(self, zenith): self.zenith = zenith def get_azimuth(self): return self.azimuth def set_azimuth(self, azimuth): self.azimuth = azimuth def get_units(self): return self.units def set_units(self, units): self.units = units def validate_angleType(self, value): # Validate type angleType, a restriction on xs:float. pass def validate_projectionUnitsType(self, value): # Validate type projectionUnitsType, a restriction on xs:string. pass def hasContent_(self): if ( ): return True else: return False def export(self, outfile, level, namespace_='', name_='solar_angles', namespacedef_='', pretty_print=True): # Check if we are at the root level and output the XML header if level == 0: outfile.write('<?xml version="1.0"?>\n') outfile.write('\n') if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(outfile, level, pretty_print) # Check if we are at the root level and output attributes first before namespacedef if level == 0: outfile.write('<%s%s' % (namespace_, name_)) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='solar_angles') outfile.write('%s' % (namespacedef_ and ' ' + namespacedef_ or '')) else: outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='solar_angles') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespace_='', name_='solar_angles', pretty_print=pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='solar_angles'): if self.zenith is not None and 'zenith' not in already_processed: already_processed.add('zenith') outfile.write(' zenith="%s"' % self.gds_format_float(self.zenith, input_name='zenith')) if self.azimuth is not None and 'azimuth' not in already_processed: already_processed.add('azimuth') outfile.write(' azimuth="%s"' % self.gds_format_float(self.azimuth, input_name='azimuth')) if self.units is not None and 'units' not in already_processed: already_processed.add('units') outfile.write(' units=%s' % (quote_attrib(self.units), )) def exportChildren(self, outfile, level, namespace_='', name_='solar_angles', fromsubclass_=False, pretty_print=True): pass def exportLiteral(self, outfile, level, name_='solar_angles'): level += 1 already_processed = set() self.exportLiteralAttributes(outfile, level, already_processed, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, already_processed, name_): if self.zenith is not None and 'zenith' not in already_processed: already_processed.add('zenith') showIndent(outfile, level) outfile.write('zenith=%f,\n' % (self.zenith,)) if self.azimuth is not None and 'azimuth' not in already_processed: already_processed.add('azimuth') showIndent(outfile, level) outfile.write('azimuth=%f,\n' % (self.azimuth,)) if self.units is not None and 'units' not in already_processed: already_processed.add('units') showIndent(outfile, level) outfile.write('units="%s",\n' % (self.units,)) def exportLiteralChildren(self, outfile, level, name_): pass def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('zenith', node) if value is not None and 'zenith' not in already_processed: already_processed.add('zenith') try: self.zenith = float(value) except ValueError, exp: raise ValueError('Bad float/double attribute (zenith): %s' % exp) self.validate_angleType(self.zenith) # validate type angleType value = find_attr_value_('azimuth', node) if value is not None and 'azimuth' not in already_processed: already_processed.add('azimuth') try: self.azimuth = float(value) except ValueError, exp: raise ValueError('Bad float/double attribute (azimuth): %s' % exp) self.validate_angleType(self.azimuth) # validate type angleType value = find_attr_value_('units', node) if value is not None and 'units' not in already_processed: already_processed.add('units') self.units = value self.validate_projectionUnitsType(self.units) # validate type projectionUnitsType def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class solar_angles class view_angles(GeneratedsSuper): subclass = None superclass = None def __init__(self, zenith=None, azimuth=None, units=None): self.zenith = _cast(float, zenith) self.azimuth = _cast(float, azimuth) self.units = _cast(None, units) pass def factory(*args_, **kwargs_): if view_angles.subclass: return view_angles.subclass(*args_, **kwargs_) else: return view_angles(*args_, **kwargs_) factory = staticmethod(factory) def get_zenith(self): return self.zenith def set_zenith(self, zenith): self.zenith = zenith def get_azimuth(self): return self.azimuth def set_azimuth(self, azimuth): self.azimuth = azimuth def get_units(self): return self.units def set_units(self, units): self.units = units def validate_angleType(self, value): # Validate type angleType, a restriction on xs:float. pass def validate_projectionUnitsType(self, value): # Validate type projectionUnitsType, a restriction on xs:string. pass def hasContent_(self): if ( ): return True else: return False def export(self, outfile, level, namespace_='', name_='view_angles', namespacedef_='', pretty_print=True): # Check if we are at the root level and output the XML header if level == 0: outfile.write('<?xml version="1.0"?>\n') outfile.write('\n') if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(outfile, level, pretty_print) # Check if we are at the root level and output attributes first before namespacedef if level == 0: outfile.write('<%s%s' % (namespace_, name_)) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='view_angles') outfile.write('%s' % (namespacedef_ and ' ' + namespacedef_ or '')) else: outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='view_angles') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespace_='', name_='view_angles', pretty_print=pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='view_angles'): if self.zenith is not None and 'zenith' not in already_processed: already_processed.add('zenith') outfile.write(' zenith="%s"' % self.gds_format_float(self.zenith, input_name='zenith')) if
<gh_stars>10-100 #!/usr/bin/env python3 """ collection of functions for NISAR geocode workflow """ import numpy as np import pathlib import shutil import time import h5py import journal import isce3 from pybind_nisar.products.readers import SLC from pybind_nisar.workflows import h5_prep, gpu_check from pybind_nisar.workflows.geocode_insar_runconfig import \ GeocodeInsarRunConfig from pybind_nisar.workflows.h5_prep import add_radar_grid_cubes_to_hdf5 from pybind_nisar.workflows.yaml_argparse import YamlArgparse def run(cfg, runw_hdf5, output_hdf5): """ Run geocode insar on user specified hardware Parameters ---------- cfg : dict Dictionary containing run configuration runw_hdf5 : str Path input RUNW HDF5 output_hdf5 : str Path to output GUNW HDF5 """ use_gpu = gpu_check.use_gpu(cfg['worker']['gpu_enabled'], cfg['worker']['gpu_id']) if use_gpu: # Set the current CUDA device. device = isce3.cuda.core.Device(cfg['worker']['gpu_id']) isce3.cuda.core.set_device(device) gpu_run(cfg, runw_hdf5, output_hdf5) else: cpu_run(cfg, runw_hdf5, output_hdf5) def get_shadow_input_output(scratch_path, freq, dst_freq_path): """ Create input raster object and output dataset path for shadow layover Parameters ---------- scratch_path : pathlib.Path Scratch path to shadow layover mask rasters freq : str Frequency, A or B, of shadow layover mask raster dst_freq_path : str HDF5 path to destination frequency group of geocoded shadow layover Returns ------- input_raster : isce3.io.Raster Shadow layover input raster object dataset_path : str HDF5 path to geocoded shadow layover dataset """ raster_ref = scratch_path / 'rdr2geo' / f'freq{freq}' / 'mask.rdr' input_raster = isce3.io.Raster(str(raster_ref)) # access the HDF5 dataset for layover shadow mask dataset_path = f"{dst_freq_path}/interferogram/layoverShadowMask" return input_raster, dataset_path def get_input_output(src_freq_path, dst_freq_path, pol, runw_hdf5, dataset_name): """ Create input raster object and output dataset path for datasets outside Parameters ---------- src_freq_path : str HDF5 path to input frequency group of input dataset dst_freq_path : str HDF5 path to input frequency group of output dataset pol : str Polarity of input dataset runw_hdf5 : str Path to input RUNW HDF5 dataset_name : str Name of dataset to be geocoded Returns ------- input_raster : isce3.io.Raster Shadow layover input raster object dataset_path : str HDF5 path to geocoded shadow layover dataset """ if dataset_name in ['alongTrackOffset', 'slantRangeOffset']: src_group_path = f'{src_freq_path}/pixelOffsets/{pol}' dst_group_path = f'{dst_freq_path}/pixelOffsets/{pol}' else: src_group_path = f'{src_freq_path}/interferogram/{pol}' dst_group_path = f'{dst_freq_path}/interferogram/{pol}' # prepare input raster input_raster_str = (f"HDF5:{runw_hdf5}:/{src_group_path}/{dataset_name}") input_raster = isce3.io.Raster(input_raster_str) # access the HDF5 dataset for a given frequency and pol dataset_path = f"{dst_group_path}/{dataset_name}" return input_raster, dataset_path def get_offset_radar_grid(offset_cfg, radar_grid_slc): ''' Create radar grid object for offset datasets Parameters ---------- offset_cfg : dict Dictionary containing offset run configuration radar_grid_slc : SLC Object containing SLC properties ''' # Define margin used during dense offsets execution margin = max(offset_cfg['margin'], offset_cfg['gross_offset_range'], offset_cfg['gross_offset_azimuth']) # If not allocated, determine shape of the offsets if offset_cfg['offset_length'] is None: length_margin = 2 * margin + 2 * offset_cfg[ 'half_search_azimuth'] + \ offset_cfg['window_azimuth'] offset_cfg['offset_length'] = (radar_grid_slc.length - length_margin ) // offset_cfg['skip_azimuth'] if offset_cfg['offset_width'] is None: width_margin = 2 * margin + 2 * offset_cfg[ 'half_search_range'] + \ offset_cfg['window_range'] offset_cfg['offset_width'] = (radar_grid_slc.width - width_margin) // offset_cfg['skip_azimuth'] # Determine the starting range and sensing start for the offset radar grid offset_starting_range = radar_grid_slc.starting_range + \ (offset_cfg['start_pixel_range'] + offset_cfg['window_range']//2)\ * radar_grid_slc.range_pixel_spacing offset_sensing_start = radar_grid_slc.sensing_start + \ (offset_cfg['start_pixel_azimuth'] + offset_cfg['window_azimuth']//2)\ / radar_grid_slc.prf # Range spacing for offsets offset_range_spacing = radar_grid_slc.range_pixel_spacing * offset_cfg['skip_range'] offset_prf = radar_grid_slc.prf / offset_cfg['skip_azimuth'] # Create offset radar grid radar_grid = isce3.product.RadarGridParameters(offset_sensing_start, radar_grid_slc.wavelength, offset_prf, offset_starting_range, offset_range_spacing, radar_grid_slc.lookside, offset_cfg['offset_length'], offset_cfg['offset_width'], radar_grid_slc.ref_epoch) return radar_grid def add_radar_grid_cube(cfg, freq, radar_grid, orbit, dst_h5): ''' Write radar grid cube to HDF5 Parameters ---------- cfg : dict Dictionary containing run configuration freq : str Frequency in HDF5 to add cube to radar_grid : isce3.product.radar_grid Radar grid of current frequency of datasets other than offset and shadow layover datasets orbit : isce3.core.orbit Orbit object of current SLC dst_h5: str Path to output GUNW HDF5 ''' radar_grid_cubes_geogrid = cfg['processing']['radar_grid_cubes']['geogrid'] radar_grid_cubes_heights = cfg['processing']['radar_grid_cubes']['heights'] threshold_geo2rdr = cfg["processing"]["geo2rdr"]["threshold"] iteration_geo2rdr = cfg["processing"]["geo2rdr"]["maxiter"] ref_hdf5 = cfg["InputFileGroup"]["InputFilePath"] slc = SLC(hdf5file=ref_hdf5) # get doppler centroid cube_geogrid_param = isce3.product.GeoGridParameters( start_x=radar_grid_cubes_geogrid.start_x, start_y=radar_grid_cubes_geogrid.start_y, spacing_x=radar_grid_cubes_geogrid.spacing_x, spacing_y=radar_grid_cubes_geogrid.spacing_y, width=int(radar_grid_cubes_geogrid.width), length=int(radar_grid_cubes_geogrid.length), epsg=radar_grid_cubes_geogrid.epsg) cube_group_path = '/science/LSAR/GUNW/metadata/radarGrid' native_doppler = slc.getDopplerCentroid(frequency=freq) grid_zero_doppler = isce3.core.LUT2d() ''' The native-Doppler LUT bounds error is turned off to computer cubes values outside radar-grid boundaries ''' native_doppler.bounds_error = False add_radar_grid_cubes_to_hdf5(dst_h5, cube_group_path, cube_geogrid_param, radar_grid_cubes_heights, radar_grid, orbit, native_doppler, grid_zero_doppler, threshold_geo2rdr, iteration_geo2rdr) def cpu_run(cfg, runw_hdf5, output_hdf5): """ Geocode RUNW products on CPU Parameters ---------- cfg : dict Dictionary containing run configuration runw_hdf5 : str Path input RUNW HDF5 output_hdf5 : str Path to output GUNW HDF5 """ # pull parameters from cfg ref_hdf5 = cfg["InputFileGroup"]["InputFilePath"] freq_pols = cfg["processing"]["input_subset"]["list_of_frequencies"] geogrids = cfg["processing"]["geocode"]["geogrids"] dem_file = cfg["DynamicAncillaryFileGroup"]["DEMFile"] threshold_geo2rdr = cfg["processing"]["geo2rdr"]["threshold"] iteration_geo2rdr = cfg["processing"]["geo2rdr"]["maxiter"] lines_per_block = cfg["processing"]["blocksize"]["y"] dem_block_margin = cfg["processing"]["dem_margin"] az_looks = cfg["processing"]["crossmul"]["azimuth_looks"] rg_looks = cfg["processing"]["crossmul"]["range_looks"] interp_method = cfg["processing"]["geocode"]["interp_method"] gunw_datasets = cfg["processing"]["geocode"]["datasets"] scratch_path = pathlib.Path(cfg['ProductPathGroup']['ScratchPath']) offset_cfg = cfg["processing"]["dense_offsets"] slc = SLC(hdf5file=ref_hdf5) info_channel = journal.info("geocode.run") info_channel.log("starting geocode") # NISAR products are always zero doppler grid_zero_doppler = isce3.core.LUT2d() # set defaults shared by both frequencies dem_raster = isce3.io.Raster(dem_file) epsg = dem_raster.get_epsg() proj = isce3.core.make_projection(epsg) ellipsoid = proj.ellipsoid # init geocode object geo = isce3.geocode.GeocodeFloat32() # init geocode members orbit = slc.getOrbit() geo.orbit = orbit geo.ellipsoid = ellipsoid geo.doppler = grid_zero_doppler geo.threshold_geo2rdr = threshold_geo2rdr geo.numiter_geo2rdr = iteration_geo2rdr geo.dem_block_margin = dem_block_margin geo.lines_per_block = lines_per_block geo.data_interpolator = interp_method t_all = time.time() with h5py.File(output_hdf5, "a") as dst_h5: for freq in freq_pols.keys(): pol_list = freq_pols[freq] radar_grid_slc = slc.getRadarGrid(freq) if az_looks > 1 or rg_looks > 1: radar_grid_mlook = radar_grid_slc.multilook(az_looks, rg_looks) geo_grid = geogrids[freq] geo.geogrid( geo_grid.start_x, geo_grid.start_y, geo_grid.spacing_x, geo_grid.spacing_y, geo_grid.width, geo_grid.length, geo_grid.epsg, ) src_freq_path = f"/science/LSAR/RUNW/swaths/frequency{freq}" dst_freq_path = f"/science/LSAR/GUNW/grids/frequency{freq}" # flag to ensure layover shadown only geocoded once per freq # layover shadow has no polarization skip_layover_shadow = False for pol in pol_list: # iterate over key: dataset name value: bool flag to perform geocode for dataset_name, geocode_this_dataset in gunw_datasets.items(): if not geocode_this_dataset: continue if dataset_name == "layoverShadowMask" and skip_layover_shadow: continue # Create radar grid for the offsets (and dataset path) if (dataset_name == "layoverShadowMask"): input_raster, dataset_path = get_shadow_input_output( scratch_path, freq, dst_freq_path) skip_layover_shadow = True else: input_raster, dataset_path = get_input_output(src_freq_path, dst_freq_path, pol, runw_hdf5, dataset_name) if dataset_name in ['alongTrackOffset', 'slantRangeOffset']: radar_grid = get_offset_radar_grid(offset_cfg, radar_grid_slc) geo.data_interpolator = interp_method # prepare input raster elif (dataset_name == "layoverShadowMask"): geo.data_interpolator = 'NEAREST' radar_grid = radar_grid_slc else: geo.data_interpolator = interp_method # Assign correct radar grid if az_looks > 1 or rg_looks > 1: radar_grid = radar_grid_mlook else: radar_grid = radar_grid_slc geocoded_dataset = dst_h5[dataset_path] # Construct the output ratster directly from HDF5 dataset geocoded_raster = isce3.io.Raster( f"IH5:::ID={geocoded_dataset.id.id}".encode("utf-8"), update=True, ) geo.geocode( radar_grid=radar_grid, input_raster=input_raster, output_raster=geocoded_raster, dem_raster=dem_raster, output_mode=isce3.geocode.GeocodeOutputMode.INTERP) del geocoded_raster # spec for NISAR GUNW does not require freq B so skip radar cube if freq.upper() == 'B': continue add_radar_grid_cube(cfg, freq, radar_grid, orbit, dst_h5) t_all_elapsed = time.time() - t_all info_channel.log(f"Successfully ran geocode in {t_all_elapsed:.3f} seconds") def gpu_run(cfg, runw_hdf5, output_hdf5): """ Geocode RUNW products on GPU Parameters ---------- cfg : dict Dictionary containing run configuration runw_hdf5 : str Path input RUNW HDF5 output_hdf5 : str Path to output GUNW HDF5 """ t_all = time.time() # Extract parameters from cfg dictionary dem_block_margin = cfg["processing"]["dem_margin"] ref_hdf5 = cfg["InputFileGroup"]["InputFilePath"] dem_file = cfg["DynamicAncillaryFileGroup"]["DEMFile"] freq_pols = cfg["processing"]["input_subset"]["list_of_frequencies"] geogrids = cfg["processing"]["geocode"]["geogrids"] lines_per_block = cfg["processing"]["blocksize"]["y"] interp_method = cfg["processing"]["geocode"]["interp_method"] gunw_datasets = cfg["processing"]["geocode"]["datasets"] az_looks = cfg["processing"]["crossmul"]["azimuth_looks"] rg_looks = cfg["processing"]["crossmul"]["range_looks"] offset_cfg = cfg["processing"]["dense_offsets"] scratch_path = pathlib.Path(cfg['ProductPathGroup']['ScratchPath']) if interp_method == 'BILINEAR': interp_method = isce3.core.DataInterpMethod.BILINEAR if interp_method == 'BICUBIC': interp_method = isce3.core.DataInterpMethod.BICUBIC if interp_method == 'NEAREST': interp_method = isce3.core.DataInterpMethod.NEAREST if interp_method == 'BIQUINTIC': interp_method = isce3.core.DataInterpMethod.BIQUINTIC info_channel = journal.info("geocode.run") info_channel.log("starting geocode") # Init frequency independent objects slc = SLC(hdf5file=ref_hdf5) grid_zero_doppler = isce3.core.LUT2d() dem_raster = isce3.io.Raster(dem_file) with h5py.File(output_hdf5, "a", libver='latest', swmr=True) as dst_h5: # Loop over frequencies for freq in freq_pols.keys(): geogrid = geogrids[freq] # Create frequency based radar grid radar_grid = slc.getRadarGrid(freq) if az_looks > 1 or rg_looks > 1: # Multilook radar grid if needed radar_grid = radar_grid.multilook(az_looks, rg_looks) # Create radar grid geometry used by most datasets rdr_geometry = isce3.container.RadarGeometry(radar_grid, slc.getOrbit(), grid_zero_doppler) # Create geocode object other than offset and shadow layover datasets geocode_obj = isce3.cuda.geocode.Geocode(geogrid, rdr_geometry, dem_raster, dem_block_margin, lines_per_block, interp_method, invalid_value=np.nan) ''' connectedComponents raster has type unsigned char and an invalid value of NaN becomes 0 which conflicts with 0 being used to indicate an unmasked value/pixel. 255 is chosen as it is the most distant value from components assigned in ascending order [0, 1, ...) ''' geocode_conn_comp_obj = isce3.cuda.geocode.Geocode(geogrid,
<reponame>fochoao/cpython<gh_stars>0 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2021 <NAME> (The Compiler) <<EMAIL>> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <https://www.gnu.org/licenses/>. """Misc. utilities related to Qt. Module attributes: MAXVALS: A dictionary of C/Qt types (as string) mapped to their maximum value. MINVALS: A dictionary of C/Qt types (as string) mapped to their minimum value. MAX_WORLD_ID: The highest world ID allowed by QtWebEngine. """ import io import operator import contextlib from typing import TYPE_CHECKING, BinaryIO, IO, Iterator, Optional, Union, Tuple, cast from PyQt5.QtCore import (qVersion, QEventLoop, QDataStream, QByteArray, QIODevice, QFileDevice, QSaveFile, QT_VERSION_STR, PYQT_VERSION_STR, QObject, QUrl) from PyQt5.QtGui import QColor try: from PyQt5.QtWebKit import qWebKitVersion except ImportError: # pragma: no cover qWebKitVersion = None # type: ignore[assignment] # noqa: N816 if TYPE_CHECKING: from PyQt5.QtWebKit import QWebHistory from PyQt5.QtWebEngineWidgets import QWebEngineHistory from qutebrowser.misc import objects from qutebrowser.utils import usertypes, utils MAXVALS = { 'int': 2 ** 31 - 1, 'int64': 2 ** 63 - 1, } MINVALS = { 'int': -(2 ** 31), 'int64': -(2 ** 63), } class QtOSError(OSError): """An OSError triggered by a QIODevice. Attributes: qt_errno: The error attribute of the given QFileDevice, if applicable. """ def __init__(self, dev: QIODevice, msg: str = None) -> None: if msg is None: msg = dev.errorString() self.qt_errno: Optional[QFileDevice.FileError] = None if isinstance(dev, QFileDevice): msg = self._init_filedev(dev, msg) super().__init__(msg) def _init_filedev(self, dev: QFileDevice, msg: str) -> str: self.qt_errno = dev.error() filename = dev.fileName() msg += ": {!r}".format(filename) return msg def version_check(version: str, exact: bool = False, compiled: bool = True) -> bool: """Check if the Qt runtime version is the version supplied or newer. Args: version: The version to check against. exact: if given, check with == instead of >= compiled: Set to False to not check the compiled version. """ if compiled and exact: raise ValueError("Can't use compiled=True with exact=True!") parsed = utils.VersionNumber.parse(version) op = operator.eq if exact else operator.ge result = op(utils.VersionNumber.parse(qVersion()), parsed) if compiled and result: # qVersion() ==/>= parsed, now check if QT_VERSION_STR ==/>= parsed. result = op(utils.VersionNumber.parse(QT_VERSION_STR), parsed) if compiled and result: # Finally, check PYQT_VERSION_STR as well. result = op(utils.VersionNumber.parse(PYQT_VERSION_STR), parsed) return result MAX_WORLD_ID = 256 def is_new_qtwebkit() -> bool: """Check if the given version is a new QtWebKit.""" assert qWebKitVersion is not None return (utils.VersionNumber.parse(qWebKitVersion()) > utils.VersionNumber.parse('538.1')) def is_single_process() -> bool: """Check whether QtWebEngine is running in single-process mode.""" if objects.backend == usertypes.Backend.QtWebKit: return False assert objects.backend == usertypes.Backend.QtWebEngine, objects.backend args = objects.qapp.arguments() return '--single-process' in args def check_overflow(arg: int, ctype: str, fatal: bool = True) -> int: """Check if the given argument is in bounds for the given type. Args: arg: The argument to check ctype: The C/Qt type to check as a string. fatal: Whether to raise exceptions (True) or truncate values (False) Return The truncated argument if fatal=False The original argument if it's in bounds. """ maxval = MAXVALS[ctype] minval = MINVALS[ctype] if arg > maxval: if fatal: raise OverflowError(arg) return maxval elif arg < minval: if fatal: raise OverflowError(arg) return minval else: return arg class Validatable(utils.Protocol): """An object with an isValid() method (e.g. QUrl).""" def isValid(self) -> bool: ... def ensure_valid(obj: Validatable) -> None: """Ensure a Qt object with an .isValid() method is valid.""" if not obj.isValid(): raise QtValueError(obj) def check_qdatastream(stream: QDataStream) -> None: """Check the status of a QDataStream and raise OSError if it's not ok.""" status_to_str = { QDataStream.Ok: "The data stream is operating normally.", QDataStream.ReadPastEnd: ("The data stream has read past the end of " "the data in the underlying device."), QDataStream.ReadCorruptData: "The data stream has read corrupt data.", QDataStream.WriteFailed: ("The data stream cannot write to the " "underlying device."), } if stream.status() != QDataStream.Ok: raise OSError(status_to_str[stream.status()]) _QtSerializableType = Union[ QObject, QByteArray, QUrl, 'QWebEngineHistory', 'QWebHistory' ] def serialize(obj: _QtSerializableType) -> QByteArray: """Serialize an object into a QByteArray.""" data = QByteArray() stream = QDataStream(data, QIODevice.WriteOnly) serialize_stream(stream, obj) return data def deserialize(data: QByteArray, obj: _QtSerializableType) -> None: """Deserialize an object from a QByteArray.""" stream = QDataStream(data, QIODevice.ReadOnly) deserialize_stream(stream, obj) def serialize_stream(stream: QDataStream, obj: _QtSerializableType) -> None: """Serialize an object into a QDataStream.""" # pylint: disable=pointless-statement check_qdatastream(stream) stream << obj # type: ignore[operator] check_qdatastream(stream) def deserialize_stream(stream: QDataStream, obj: _QtSerializableType) -> None: """Deserialize a QDataStream into an object.""" # pylint: disable=pointless-statement check_qdatastream(stream) stream >> obj # type: ignore[operator] check_qdatastream(stream) @contextlib.contextmanager def savefile_open( filename: str, binary: bool = False, encoding: str = 'utf-8' ) -> Iterator[IO]: """Context manager to easily use a QSaveFile.""" f = QSaveFile(filename) cancelled = False try: open_ok = f.open(QIODevice.WriteOnly) if not open_ok: raise QtOSError(f) dev = cast(BinaryIO, PyQIODevice(f)) if binary: new_f: IO = dev else: new_f = io.TextIOWrapper(dev, encoding=encoding) yield new_f new_f.flush() except: f.cancelWriting() cancelled = True raise finally: commit_ok = f.commit() if not commit_ok and not cancelled: raise QtOSError(f, msg="Commit failed!") def qcolor_to_qsscolor(c: QColor) -> str: """Convert a QColor to a string that can be used in a QStyleSheet.""" ensure_valid(c) return "rgba({}, {}, {}, {})".format( c.red(), c.green(), c.blue(), c.alpha()) class PyQIODevice(io.BufferedIOBase): """Wrapper for a QIODevice which provides a python interface. Attributes: dev: The underlying QIODevice. """ def __init__(self, dev: QIODevice) -> None: super().__init__() self.dev = dev def __len__(self) -> int: return self.dev.size() def _check_open(self) -> None: """Check if the device is open, raise ValueError if not.""" if not self.dev.isOpen(): raise ValueError("IO operation on closed device!") def _check_random(self) -> None: """Check if the device supports random access, raise OSError if not.""" if not self.seekable(): raise OSError("Random access not allowed!") def _check_readable(self) -> None: """Check if the device is readable, raise OSError if not.""" if not self.dev.isReadable(): raise OSError("Trying to read unreadable file!") def _check_writable(self) -> None: """Check if the device is writable, raise OSError if not.""" if not self.writable(): raise OSError("Trying to write to unwritable file!") def open(self, mode: QIODevice.OpenMode) -> contextlib.closing: """Open the underlying device and ensure opening succeeded. Raises OSError if opening failed. Args: mode: QIODevice::OpenMode flags. Return: A contextlib.closing() object so this can be used as contextmanager. """ ok = self.dev.open(mode) if not ok: raise QtOSError(self.dev) return contextlib.closing(self) def close(self) -> None: """Close the underlying device.""" self.dev.close() def fileno(self) -> int: raise io.UnsupportedOperation def seek(self, offset: int, whence: int = io.SEEK_SET) -> int: self._check_open() self._check_random() if whence == io.SEEK_SET: ok = self.dev.seek(offset) elif whence == io.SEEK_CUR: ok = self.dev.seek(self.tell() + offset) elif whence == io.SEEK_END: ok = self.dev.seek(len(self) + offset) else: raise io.UnsupportedOperation("whence = {} is not " "supported!".format(whence)) if not ok: raise QtOSError(self.dev, msg="seek failed!") return self.dev.pos() def truncate(self, size: int = None) -> int: raise io.UnsupportedOperation @property def closed(self) -> bool: return not self.dev.isOpen() def flush(self) -> None: self._check_open() self.dev.waitForBytesWritten(-1) def isatty(self) -> bool: self._check_open() return False def readable(self) -> bool: return self.dev.isReadable() def readline(self, size: Optional[int] = -1) -> bytes: self._check_open() self._check_readable() if size is None or size < 0: qt_size = 0 # no maximum size elif size == 0: return b'' else: qt_size = size + 1 # Qt also counts the NUL byte buf: Union[QByteArray, bytes, None] = None if self.dev.canReadLine(): buf = self.dev.readLine(qt_size) elif size is None or size < 0: buf = self.dev.readAll() else: buf = self.dev.read(size) if buf is None: raise QtOSError(self.dev) if isinstance(buf, QByteArray): # The type (bytes or QByteArray) seems to depend on what data we # feed in... buf = buf.data() return buf def seekable(self) -> bool: return not self.dev.isSequential() def tell(self) -> int: self._check_open() self._check_random() return self.dev.pos() def writable(self) -> bool: return self.dev.isWritable() def write( # type: ignore[override] self, data: Union[bytes, bytearray] ) -> int: self._check_open() self._check_writable() num = self.dev.write(data) if num == -1 or num < len(data): raise QtOSError(self.dev)
<gh_stars>1-10 #!/usr/bin/env python3 # Intended to be run from the chord-fork folder. # cd Error-Ranking/chord-fork # ./scripts/bnet/driver.py pjbench/ftp/chord_output_mln-datarace-problem/bnet/noaugment/bnet-dict.out \ # pjbench/ftp/chord_output_mln-datarace-problem/bnet/noaugment/factor-graph.fg \ # pjbench/ftp/chord_output_mln-datarace-problem/base_queries.txt \ # pjbench/ftp/chord_output_mln-datarace-oracle/oracle_queries.txt # Accepts human-readable commands from stdin, and passes them to LibDAI/wrapper.cpp, thus acting as a convenient driver. # Arguments: # 1. Dictionary file for the bayesian network, named-dict.out, produced by cons_all2bnet.py. This is to translate # commands, such as "O racePairs_cs(428,913) true" to the format accepted by LibDAI/wrapper.cpp, such as # "O 38129 true". # 2. Factor graph, factor-graph.fg # 3. Base queries file, base_queries.txt. This need not be the full list of base queries produced by Chord, but could # instead be any subset of it, such as the alarms reported by the upper oracle. # 4. Oracle queries file, oracle_queries.txt. Needed while producing combined.out. import graph import logging import subprocess import sys import time import re dictFileName = sys.argv[1] fgFileName = sys.argv[2] baseQueriesFileName = sys.argv[3] oracleQueriesFileName = sys.argv[4] wrapperExecutable = sys.argv[5] consFileName = sys.argv[6] if len(sys.argv) > 6 else None oldLabelsFileName = sys.argv[7] if len(sys.argv) > 7 else None printGraph = bool(sys.argv[8]) if len(sys.argv) > 8 else False logging.basicConfig(level=logging.INFO, \ format="[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s", \ datefmt="%H:%M:%S") ######################################################################################################################## # 1. Setup # 1a. Populate bayesian network node dictionary bnetDict = {} for line in open(dictFileName): line = line.strip() if len(line) == 0: continue components = [ c.strip() for c in line.split(': ') if len(c.strip()) > 0 ] assert len(components) == 2 bnetDict[components[1]] = components[0] # 1b. Initialize set of labelled tuples (to confirm that tuples are not being relabelled), and populate the set of # alarms in the ground truth. labelledTuples = {} oracleQueries = set([ line.strip() for line in open(oracleQueriesFileName) if len(line.strip()) > 0 ]) baseQueries = set([ line.strip() for line in open(baseQueriesFileName) if len(line.strip()) > 0 ]) oldLabels = set([ line.strip() for line in open(oldLabelsFileName) if len(line.strip()) > 0]) \ if oldLabelsFileName != None else set () assert(oracleQueries.issubset(baseQueries)) assert(baseQueries.issubset(set(bnetDict.keys()))) logging.info('Populated {} oracle queries.'.format(len(oracleQueries))) logging.info('Populated {} base queries.'.format(len(baseQueries))) logging.info('Loaded {} old labels'.format(len(oldLabels))) # 1c. Setup graph for visualization (optional) # if consFileName is not None: # network = graph.build_graph(consFileName, baseQueries, fmt='compressed') # graph.prepare_visualization(network['graph'], baseQueries, oldLabels, oracleQueries) # logging.info('Prepared Visulaization.') ######################################################################################################################## # 2. Start LibDAI/wrapper.cpp, and interact with the user with subprocess.Popen([wrapperExecutable, fgFileName], \ stdin=subprocess.PIPE, \ stdout=subprocess.PIPE, \ universal_newlines=True) as wrapperProc: def execWrapperCmd(fwdCmd): logging.info('Driver to wrapper: ' + fwdCmd) print(fwdCmd, file=wrapperProc.stdin) wrapperProc.stdin.flush() response = wrapperProc.stdout.readline().strip() logging.info('Wrapper to driver: ' + response) return response def observe(t, value): assert t not in labelledTuples, 'Attempting to relabel alarm {0}'.format(t) if not value == (t in oracleQueries): logging.warning('Labelling alarm {0} with value {1}, which does not match ground truth.'.format(t, value)) fwdCmd = 'O {0} {1}'.format(bnetDict[t], 'true' if value else 'false') execWrapperCmd(fwdCmd) labelledTuples[t] = value def getRankedAlarms(): alarmList = [] for t in baseQueries: index = bnetDict[t] response = float(execWrapperCmd('Q {0}'.format(index))) alarmList.append((t, response)) def getLabelInt(t): return 0 if t not in labelledTuples else 1 if labelledTuples[t] else -1 return sorted(alarmList, key=lambda rec: (-getLabelInt(rec[0]), -rec[1], rec[0])) def getInversionCount(alarmList): numInversions = 0 numFalse = 0 for t, confidence in alarmList: if t in oracleQueries: numInversions = numInversions + numFalse else: numFalse = numFalse + 1 return numInversions def getAlpha(confidence): if confidence > 0.75: return '{:02X}'.format(int(255 * 1.0)) elif confidence > 0.5: return '{:02X}'.format(int(255 * 0.75)) elif confidence > 0.25: return '{:02X}'.format(int(255 * 0.5)) else: return '{:02X}'.format(int(255 * 0.25)) def printNetwork(outFile, latestLabel=None): alarmList = getRankedAlarms() name2idx = network['name2idx'] v_prop = network['graph'].vertex_properties['info'] v_color = network['graph'].vertex_properties['color'] v_shape = network['graph'].vertex_properties['shape'] for t, confidence in alarmList: v_shape[name2idx[t]] = 'circle' if t == latestLabel: v_color[name2idx[t]] = 'green' elif t in oracleQueries: v_color[name2idx[t]] = 'red' elif t not in labelledTuples: alpha = getAlpha(confidence) v_color[name2idx[t]] = '#0000FF' + alpha elif not labelledTuples[t]: v_color[name2idx[t]] = 'black' # negative label graph.draw(network['graph'], outFile) def printRankedAlarms(outFile): alarmList = getRankedAlarms() print('Rank\tConfidence\tGround\tLabel\tComments\tTuple', file=outFile) index = 0 for t, confidence in alarmList: index = index + 1 ground = 'TrueGround' if t in oracleQueries else 'FalseGround' label = 'Unlabelled' if t not in labelledTuples else \ 'PosLabel' if labelledTuples[t] else \ 'NegLabel' print('{0}\t{1}\t{2}\t{3}\tSPOkGoodGood\t{4}'.format(index, confidence, ground, label, t), file=outFile) def runAlarmCarousel(dfile, tolerance, minIters, maxIters, histLength, statsFile, combinedPrefix, combinedSuffix): assert 0 < tolerance and tolerance < 1 assert 0 < histLength and histLength < minIters and minIters < maxIters numTrue = 0 numFalse = 0 with open('{0}{1}.{2}'.format(combinedPrefix, 'init', combinedSuffix), 'w') as outFile: execWrapperCmd('BP {0} {1} {2} {3}'.format(tolerance, minIters, maxIters, histLength)) printRankedAlarms(outFile) if consFileName is not None and printGraph: outFile = '{0}{1}.{2}.svg'.format(combinedPrefix, 'init', combinedSuffix) printNetwork(outFile) graph.print_node_id(network['graph'], '{}init.{}.map'.format(combinedPrefix, combinedSuffix)) numMasked = 0 for oldLabel in oldLabels: # not necessarily queries logging.info('Masking: O {0} False'.format(oldLabel)) observe(oldLabel, False) numMasked = numMasked + 1 logging.info('Carousel start! {} alarms masked'.format(numMasked)) print('Tuple\tConfidence\tGround\tNumTrue\tNumFalse\tFraction\tInversionCount\tYetToConvergeFraction\tTime(s)', file=statsFile) lastTime = time.time() latestLabel = None while baseQueries - set(labelledTuples.keys()): yetToConvergeFraction = float(execWrapperCmd('BP {0} {1} {2} {3}'.format(tolerance, minIters, maxIters, histLength))) rankedAlarmList = getRankedAlarms() unlabelledAlarms = [ (t, confidence) for t, confidence in rankedAlarmList if t not in labelledTuples ] t0, conf0 = unlabelledAlarms[0] ground = 'TrueGround' if t0 in oracleQueries else 'FalseGround' if t0 in oracleQueries: numTrue = numTrue + 1 else: numFalse = numFalse + 1 fraction = numTrue / (numTrue + numFalse) inversionCount = getInversionCount(rankedAlarmList) thisTime = int(time.time() - lastTime) lastTime = time.time() print('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}'.format(t0, conf0, ground, numTrue, numFalse, fraction, \ inversionCount, yetToConvergeFraction, thisTime), \ file=statsFile) statsFile.flush() outFileName = '{0}{1}.{2}'.format(combinedPrefix, numTrue + numFalse - 1, combinedSuffix) with open(outFileName, 'w') as outFile: printRankedAlarms(outFile) if consFileName is not None and printGraph: outFile = outFileName + '.svg' printNetwork(outFile, latestLabel=latestLabel) logging.info('Setting tuple {0} to value {1}'.format(t0, t0 in oracleQueries)) observe(t0, t0 in oracleQueries) if t0 not in oracleQueries and t0 in dfile: for td in dfile[t0]: observe(td, False) latestLabel = t0 if numTrue == len(oracleQueries): break logging.info('Awaiting command') for command in sys.stdin: command = command.strip() logging.info('Read command {0}'.format(command)) components = [ c.strip() for c in re.split(' |\t', command) if len(c.strip()) > 0 ] if len(components) == 0: continue cmdType = components[0] components = components[1:] if cmdType == 'Q': # 2a. Marginal probability query. # Syntax: Q t. # Output: t belief(t). t = components[0] fwdCmd = 'Q {0}'.format(bnetDict[t]) print('{0} {1}'.format(t, float(execWrapperCmd(fwdCmd)))) elif cmdType == 'FQ': # 2b. Factor marginal. # Syntax: FQ f i. # Output: belief(f, i). # Note: No encoding or decoding is performed for this command. It is intended to be used by em.py, which can # do these things on its own. print(float(execWrapperCmd(command))) elif cmdType == 'BP': # 2c. Run belief propagation. # Syntax: BP tolerance minIters maxIters histLength. # Output: 'converged' if belief propagation converged, or 'diverged' otherwise. tolerance = float(components[0]) minIters = int(components[1]) maxIters = int(components[2]) histLength = int(components[3]) assert 0 < tolerance and tolerance < 1 assert 0 < histLength and histLength < minIters and minIters < maxIters print(execWrapperCmd('BP {0} {1} {2} {3}'.format(tolerance, minIters, maxIters, histLength))) elif cmdType == 'OO': # 2d. Observe oracle data. Read tuple and infer value from oracle_queries.txt # Syntax: OO t. # Output: 'O t value'. Value assigned to the tuple. Merely an acknowledgment that the command was received. t = components[0] value = t in oracleQueries observe(t, value) print('O {0} {1}'.format(t, 'true' if value else 'false')) elif cmdType == 'O': # 2e. Observe oracle data. # Syntax: O t value. # Output: 'O t value'. Merely an acknowledgment that the command was received. t = components[0] assert components[1] == 'true' or components[1] == 'false' value = (components[1] == 'true') observe(t, value) print('O {0} {1}'.format(t, 'true' if value else 'false')) elif cmdType == 'P': # 2f. Printing ranked list of alarms to file # Syntax: P filename. # Output: Ranked list of alarms, in the format of combined.out. Printed to filename. Acknowledgment printed # to stdout. outFileName = components[0] with open(outFileName, 'w') as outFile: printRankedAlarms(outFile) print('P {0}'.format(outFileName)) elif cmdType == 'HA': # 2g. Get the alarm with the highest ranking and maximum confidence. # Syntax: HA. # Output: A tuple t alarmList = getRankedAlarms() topAlarm, confidence = alarmList[0] groundTruth = 'TrueGround' if topAlarm in oracleQueries else 'FalseGround' print('{0} {1} {2}'.format(topAlarm, confidence, groundTruth)) elif cmdType == 'AC': # 2h. Run alarm carousel # Syntax: AC dfilename tolerance minIters maxIters histLength statsFileName combinedPrefix combinedSuffix. # Output: Alarm carousel statistics, in the format of stats.txt, printed to statsFileName. Static
<reponame>building-energy/sap2012<filename>sap2012/SAP_worksheet/heat_losses_and_heat_loss_parameter.py # -*- coding: utf-8 -*- def heat_losses_and_heat_loss_parameter( solid_door_net_area, solid_door_u_value, semi_glazed_door_net_area, semi_glazed_door_u_value, window_net_area, window_u_value, roof_window_net_area, roof_window_u_value, basement_floor_net_area, basement_floor_u_value, basement_floor_heat_capacity, ground_floor_net_area, ground_floor_u_value, ground_floor_heat_capacity, exposed_floor_net_area, exposed_floor_u_value, exposed_floor_heat_capacity, basement_wall_gross_area, basement_wall_opening, basement_wall_u_value, basement_wall_heat_capacity, external_wall_gross_area, external_wall_opening, external_wall_u_value, external_wall_heat_capacity, roof_gross_area, roof_opening, roof_u_value, roof_heat_capacity, party_wall_net_area, party_wall_u_value, party_wall_heat_capacity, party_floor_net_area, party_floor_heat_capacity, party_ceiling_net_area, party_ceiling_heat_capacity, internal_wall_net_area, internal_wall_heat_capacity, internal_floor_net_area, internal_floor_heat_capacity, internal_ceiling_net_area, internal_ceiling_heat_capacity, total_floor_area, thermal_bridges_appendix_k, effective_air_change_rate, dwelling_volume ): """Calculates the heat losses and the heat loss parameter, Section 3. :param solid_door_net_area: See (26), in m2. :type solid_door_net_area: float :param solid_door_u_value: See (26), in W/m2K. :type solid_door_u_value: float or None :param semi_glazed_door_net_area: See (26a), in m2. :type semi_glazed_door_net_area: float :param semi_glazed_door_u_value: See (26a), in W/m2K. :type semi_glazed_door_u_value: float or None :param window_net_area: See (27), in m2. :type window_net_area: float :param window_u_value: See (27), in W/m2K. For windows and roof windows, use effective window U-value. Calculated using formula 1/[(1/U-value)+0.04] as given in paragraph 3.2. :type window_u_value: float or None :param roof_window_net_area: See (27a), in m2. For windows and roof windows, use effective window U-value. Calculated using formula 1/[(1/U-value)+0.04] as given in paragraph 3.2. :type roof_window_net_area: float :param roof_window_u_value: See (27a), in W/m2K. :type roof_window_u_value: float or None :param basement_floor_net_area: See (28), in m2. :type basement_floor_net_area: float :param basement_floor_u_value: See (28), in W/m2K. :type basement_floor_u_value: float or None :param basement_floor_heat_capacity: See (28), in kJ/m2K. :type basement_floor_heat_capacity: float or None :param ground_floor_net_area: See (28a), in m2. :type ground_floor_net_area: float :param ground_floor_u_value: See (28a), in W/m2K. :type ground_floor_u_value: float or None :param ground_floor_heat_capacity: See (28a), in kJ/m2K. :type ground_floor_heat_capacity: float or None :param exposed_floor_net_area: See (28b), in m2. :type exposed_floor_net_area: float :param exposed_floor_u_value: See (28b), in W/m2K. :type exposed_floor_u_value: float or None :param exposed_floor_heat_capacity: See (28b), in kJ/m2K. :type exposed_floor_heat_capacity: float or None :param basement_wall_gross_area: See (29), in m2. :type basement_wall_gross_area: float :param basement_wall_opening: See (29), in m2. :type basement_wall_opening: float :param basement_wall_u_value: See (29), in W/m2K. :type basement_wall_u_value: float or None :param basement_wall_heat_capacity: See (29), in kJ/m2K. :type basement_wall_heat_capacity: float or None :param external_wall_gross_area: See (29a), in m2. :type external_wall_gross_area: float :param external_wall_opening: See (29a), in m2. :type external_wall_opening: float :param external_wall_u_value: See (29a), in W/m2K. :type external_wall_u_value: float or None :param external_wall_heat_capacity: See (29a), in kJ/m2K. :type external_wall_heat_capacity: float or None :param roof_gross_area: See (30), in m2. :type roof_gross_area: float :param roof_opening: See (30), in m2. :type roof_opening: float :param roof_u_value: See (30), in W/m2K. :type roof_u_value: float or None :param roof_heat_capacity: See (30), in kJ/m2K. :type roof_heat_capacity: float or None :param party_wall_net_area: See (32), in m2. :type party_wall_net_area: float :param party_wall_u_value: See (32), in W/m2K. :type party_wall_u_value: float or None :param party_wall_heat_capacity: See (32), in kJ/m2K. :type party_wall_heat_capacity: float or None :param party_floor_net_area: See (32a), in m2. :type party_floor_net_area: float :param party_floor_heat_capacity: See (32a), in kJ/m2K. :type party_floor_heat_capacity: float or None :param party_ceiling_net_area: See (32b), in m2. :type party_ceiling_net_area: float :param party_ceiling_heat_capacity: See (32b), in kJ/m2K. :type party_ceiling_heat_capacity: float or None :param internal_wall_net_area: See (32c), in m2. :type internal_wall_net_area: float :param internal_wall_heat_capacity: See (32c), in kJ/m2K. :type internal_wall_heat_capacity: float or None :param internal_floor_net_area: See (32d), in m2. :type internal_floor_net_area: float :param internal_floor_heat_capacity: See (32d), in kJ/m2K. :type internal_floor_heat_capacity: float or None :param internal_ceiling_net_area: See (32e), in m2. :type internal_ceiling_net_area: float :param internal_ceiling_heat_capacity: See (32e), in kJ/m2K. :type internal_ceiling_heat_capacity: float or None :param total_floor_area: See (4). :type total_floor_area: float :param thermal_bridges_appendix_k: in W/K. The transmission heat loss coefficient due to non-repeating thermal bridges as calculated using Appendix K. If None, then a simplified calculation is done in this module. :type thermal_bridges_appendix_k: float or None :param effective_air_change_rate: See (25). :type effective_air_change_rate: list (of floats) :param dwelling_volume: See (5), in m3. :type dwelling_volume: float :return: A dictionary with keys of ( solid_floor_UA, semi_glazed_door_UA, window_UA, roof_window_UA, basement_floor_UA, basement_floor_Ak, ground_floor_UA, ground_floor_Ak, exposed_floor_UA, exposed_floor_Ak, basement_wall_net_area, basement_wall_UA, basement_wall_Ak, external_wall_net_area, external_wall_UA, external_wall_Ak, roof_net_area, roof_UA, roof_Ak, total_area_of_external_elements, party_wall_UA, party_wall_Ak, party_floor_Ak, party_ceiling_Ak, internal_wall_Ak, internal_floor_Ak, internal_ceiling_Ak, fabric_heat_loss, heat_capacity, thermal_mass_parameter, thermal_bridges, total_fabric_heat_loss, ventilation_heat_loss_calculated_monthly, heat_transfer_coefficient, average_heat_transfer_coefficient, heat_loss_parameter, average_heat_loss_parameter ) - **solid_floor_UA** (`float`): See (26), in W/K. - **semi_glazed_door_UA** (`float`): See (26a), in W/K. - **window_UA** (`float`): See (27), in W/K. - **roof_window_UA** (`float`): See (27a), in W/K. - **basement_floor_UA** (`float`): See (28), in W/K. - **basement_floor_Ak** (`float`): See (28), in kJ/K. - **ground_floor_UA** (`float`): See (28a), in W/K. - **ground_floor_Ak** (`float`): See (28a), in kJ/K. - **exposed_floor_UA** (`float`): See (28b), in W/K. - **exposed_floor_Ak** (`float`): See (28b), in kJ/K. - **basement_wall_net_area** (`float`): See (29), in m2. - **basement_wall_UA** (`float`): See (29), in W/K. - **basement_wall_Ak** (`float`): See (29), in kJ/K. - **external_wall_net_area** (`float`): See (29a), in m2. - **external_wall_UA** (`float`): See (29a), in W/K. - **external_wall_Ak** (`float`): See (29a), in kJ/K. - **roof_net_area** (`float`): See (30), in m2. - **roof_UA** (`float`): See (30), in W/K. - **roof_Ak** (`float`): See (30), in kJ/K. - **total_area_of_external_elements** (`float`): See (31), in m2. - **party_wall_UA** (`float`): See (32), in W/K. - **party_wall_Ak** (`float`): See (32), in kJ/K. - **party_floor_Ak** (`float`): See (32a), in kJ/K. - **party_ceiling_Ak** (`float`): See (32b), in kJ/K. - **internal_wall_Ak** (`float`): See (32c), in kJ/K. - **internal_floor_Ak** (`float`): See (32d), in kJ/K. - **internal_ceiling_Ak** (`float`): See (32e), in kJ/K. - **fabric_heat_loss** (`float`): See (33), in W/K. - **heat_capacity** (`float`): See (34), in kJ/K. - **thermal_mass_parameter** (`float`): See (35), in kJ/m2K. - **thermal_bridges** (`float`): See (36), in W/K. - **total_fabric_heat_loss** (`float`): See (37), in W/K. - **ventilation_heat_loss_calculated_monthly** (`list` (`float`): See (82), in W/K. - **heat_transfer_coefficient** (`list` (`float`): See (39), in W/K. - **average_heat_transfer_coefficient** (`float`): See (39), in W/K. - **heat_loss_parameter** (`list` (`float`): See (40), in W/m2K. - **average_heat_loss_parameter** (`float`): See (40), in W/m2K. :rtype: dict """ if solid_door_net_area==0: solid_floor_UA = 0 else: solid_floor_UA = solid_door_net_area * solid_door_u_value if semi_glazed_door_net_area==0: semi_glazed_door_UA = 0 else: semi_glazed_door_UA = semi_glazed_door_net_area * semi_glazed_door_u_value if window_net_area==0: window_UA = 0 else: window_UA = window_net_area * window_u_value if roof_window_net_area==0: roof_window_UA = 0 else: roof_window_UA = roof_window_net_area * roof_window_u_value if basement_floor_net_area==0: basement_floor_UA = 0 basement_floor_Ak = 0 else: basement_floor_UA = basement_floor_net_area * basement_floor_u_value basement_floor_Ak = basement_floor_net_area * basement_floor_heat_capacity if ground_floor_net_area==0: ground_floor_UA=0 ground_floor_Ak=0 else: ground_floor_UA = ground_floor_net_area * ground_floor_u_value ground_floor_Ak = ground_floor_net_area * ground_floor_heat_capacity if exposed_floor_net_area==0: exposed_floor_UA=0 exposed_floor_Ak=0 else: exposed_floor_UA = exposed_floor_net_area * exposed_floor_u_value exposed_floor_Ak = exposed_floor_net_area * exposed_floor_heat_capacity basement_wall_net_area = basement_wall_gross_area - basement_wall_opening if basement_wall_net_area==0: basement_wall_UA=0 basement_wall_Ak=0 else: basement_wall_UA = basement_wall_net_area * basement_wall_u_value basement_wall_Ak= basement_wall_net_area * basement_wall_heat_capacity external_wall_net_area = external_wall_gross_area - external_wall_opening if external_wall_net_area==0: external_wall_UA=0 external_wall_Ak=0 else: external_wall_UA = external_wall_net_area * external_wall_u_value external_wall_Ak = external_wall_net_area * external_wall_heat_capacity roof_net_area = roof_gross_area - roof_opening if roof_net_area==0: roof_UA = 0 roof_Ak = 0 else: roof_UA = roof_net_area * roof_u_value roof_Ak = roof_net_area * roof_heat_capacity total_area_of_external_elements = ( solid_door_net_area + semi_glazed_door_net_area + window_net_area + roof_window_net_area + basement_floor_net_area + ground_floor_net_area + exposed_floor_net_area + basement_wall_net_area + external_wall_net_area + roof_net_area ) if party_wall_net_area==0: party_wall_UA = 0 else: party_wall_UA = party_wall_net_area * party_wall_u_value if party_wall_net_area==0: party_wall_Ak = 0 else: party_wall_Ak = party_wall_net_area * party_wall_heat_capacity if party_floor_net_area==0: party_floor_Ak = 0 else: party_floor_Ak = party_floor_net_area * party_floor_heat_capacity if party_ceiling_net_area==0: party_ceiling_Ak = 0 else: party_ceiling_Ak = party_ceiling_net_area * party_ceiling_heat_capacity if internal_wall_net_area==0: internal_wall_Ak = 0 else: internal_wall_Ak = internal_wall_net_area * internal_wall_heat_capacity if internal_floor_net_area==0: internal_floor_Ak = 0 else: internal_floor_Ak = internal_floor_net_area * internal_floor_heat_capacity if internal_ceiling_net_area==0: internal_ceiling_Ak = 0 else: internal_ceiling_Ak = internal_ceiling_net_area * internal_ceiling_heat_capacity fabric_heat_loss = ( solid_floor_UA + semi_glazed_door_UA + window_UA + roof_window_UA +
= str(info[7]).decode('utf-8') index -= 1 dtime = self.convertTime(ftime) # self.table.setItem(index - 1, 0, QtGui.QTableWidgetItem(f)) self.table.setItem(index, 0, QtGui.QTableWidgetItem(f)) self.table.setItem(index, 1, QtGui.QTableWidgetItem(p)) sizeitem = QtGui.QTableWidgetItem(size+" ") if int(size) > 100*1024*1024: sizeitem.setTextColor(Qt.Qt.red) detection = self.convertSocre2Rank(score) self.table.setItem(index, 4, detection) sizeitem.setTextAlignment(Qt.Qt.AlignRight|Qt.Qt.AlignVCenter) self.table.setItem(index, 5, QtGui.QTableWidgetItem(mark)) self.table.setItem(index, 2, QtGui.QTableWidgetItem(sizeitem)) self.table.setItem(index, 3, QtGui.QTableWidgetItem(ftype)) self.table.setItem(index, 6, dtime) self.table.setItem(index, 7, QtGui.QTableWidgetItem(md5)) self.table.setItem(index, 8, QtGui.QTableWidgetItem(fid)) ''' 转换score数值与评判结果 ''' def convertSocre2Rank(self, score): if score >= 15: detection = QtGui.QTableWidgetItem(u"危险 - " + str(score)) detection.setTextColor(Qt.Qt.red) if score >= 10 and score < 15: detection = QtGui.QTableWidgetItem(u"可疑 - " + str(score)) detection.setTextColor(Qt.Qt.darkRed) if score >= 5 and score < 10: detection = QtGui.QTableWidgetItem(u"常规 - " + str(score)) detection.setTextColor(Qt.Qt.darkYellow) if score < 5 and score >=0: detection = QtGui.QTableWidgetItem(u"安全 - " + str(score)) detection.setTextColor(Qt.Qt.green) if score < 0: detection = QtGui.QTableWidgetItem(u" 不支持类型 ") detection.setTextColor(Qt.Qt.blue) detection.setTextAlignment(Qt.Qt.AlignCenter) return detection ''' 转换时间函数 ''' def convertTime(self, intime): nowtime = time.time() localtime = time.localtime(intime) midnight = nowtime - nowtime % 86400 + time.timezone if intime < midnight: outtime = time.strftime(' %Y-%m-%d ', localtime) else: outtime = time.strftime('%H:%M:%S', localtime) outtime = QtGui.QTableWidgetItem(outtime) outtime.setTextAlignment(Qt.Qt.AlignCenter) return outtime ''' checkbox事件 @flag: 标记全选与其他 ''' def checkBoxEvent(self, flag): ruleslist = [self.cbrpe, self.cbrcryp, self.cbrpack, self.cbrself, self.cbrwl] typeslist = [self.cbtpe, self.cbtofs, self.cbtsh, self.cbtzip, self.cbtmda, self.cbtasm] if flag == 0: # 对应rule全选操作 if self.cbrall.isChecked(): print "all rules selected" for i in ruleslist: i.setCheckState(Qt.Qt.Checked) else: for i in ruleslist: i.setCheckState(Qt.Qt.Unchecked) elif flag == 1: # 对应type全选操作 if self.cbtall.isChecked(): print "all type selected" for i in typeslist: i.setCheckState(Qt.Qt.Checked) else: for i in typeslist: i.setCheckState(Qt.Qt.Unchecked) else: if self.cbrall.isChecked() or self.cbtall.isChecked(): if flag < 7: self.cbrall.setCheckState(Qt.Qt.Unchecked) else: self.cbtall.setCheckState(Qt.Qt.Unchecked) policy = self.getScanPolicy() if set(self.rulelist).issubset(set(policy)): self.cbrall.setCheckState(Qt.Qt.Checked) if set(self.typelist).issubset(set(policy)): self.cbtall.setCheckState(Qt.Qt.Checked) ''' 获取数据库设置及扫描文件类型策略 判断checkbox勾选情况 默认使用内置规则检测pe文件 return policy列表 ''' def getScanPolicy(self): policy = [] if self.cbrall.isChecked(): policy.append("0") if self.cbtall.isChecked(): policy.append("1") if self.cbrpe.isChecked(): policy.append("2") if self.cbrcryp.isChecked(): policy.append("3") if self.cbrpack.isChecked(): policy.append("4") if self.cbrself.isChecked(): policy.append("5") if self.cbrwl.isChecked(): policy.append("6") if self.cbtpe.isChecked(): policy.append("7") if self.cbtofs.isChecked(): policy.append("8") if self.cbtsh.isChecked(): policy.append("9") if self.cbtzip.isChecked(): policy.append("10") if self.cbtmda.isChecked(): policy.append("11") if self.cbtasm.isChecked(): policy.append("12") return policy ''' 菜单栏点击事件响应函数 ''' def menuBarOperate(self, index): print index if 1 == index: dialog = self.setdialog dialog.setWindowFlags(Qt.Qt.WindowStaysOnTopHint) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("./UILib/icons/setting_icon.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off) dialog.setWindowIcon(icon) dialog.show() if 2 == index: dialog = self.mlvddialog dialog.show() if 3 == index: dialog = self.wtldialog dialog.setWindowFlags(Qt.Qt.WindowStaysOnTopHint) # icon = QtGui.QIcon() # icon.addPixmap(QtGui.QPixmap(".\\UILib\\icons\\setting_icon.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off) # dialog.setWindowIcon(icon) dialog.show() if 4 == index: QtGui.QMessageBox.about(self, u"版本信息", u"软件版本:V0.24\n编译日期:2017年5月24日\nPython版本:2.7.1\n运行平台:ArchLinux 4.10.11\ncommit:270c2cee19e95b792300f7a39249f8b15ea9b421") if 6 == index: dialog = self.authorinfo icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("./UILib/icons/pk.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) dialog.setWindowIcon(icon) dialog.show() ''' 右键菜单生成函数 仍需完善策略 ''' def generateMenu(self, pos): # 未选中元素时无法使用右键菜单 print pos # 原始坐标 row_num = [] # 右键操作列索引列表 rid_num = [] # 右键id索引列表 for i in self.table.selectionModel().selection().indexes(): row_num.append(i.row()) rid_num.append(int(self.table.item(i.row(), 8).text())) row_num = list(set(row_num)) rid_num = list(set(rid_num)) # print row_num # print len(row_num) # 未选中任何一行 if len(row_num) < 1: return # 选中多行 elif len(row_num) > 1: print u"多选" mumenu = QtGui.QMenu() muitem1 = mumenu.addAction(QtGui.QIcon("./UILib/icons/drescan_icon.png"), u"重新扫描") if 0 == FlagSet.scandoneflag: muitem1.setEnabled(False) maction = mumenu.exec_(self.table.mapToGlobal(pos)) if maction == muitem1: print "get clicked" self.filenum = len(row_num) rescanfiles = ("rescan", rid_num) print rescanfiles # 直接连接control中的scanfile线程 self.rule, useless = self.prevScanPrepare() self.filesThread = ScanFile(rescanfiles, self.rule) self.filesThread.fileSignal.connect(self.updateRescanInfo) # 连到更行重新函数中 self.filesThread.smsgSignal.connect(self.updateStatusBar) self.filesThread.flogSignal.connect(self.showFileAnalyzLog) self.filesThread.start() # 选中一行 else: row_num = row_num[0] menu = QtGui.QMenu() item1 = menu.addAction(QtGui.QIcon("./UILib/icons/detail_icon.png"), u"详细信息") # (u"详细信息") item2 = menu.addAction(QtGui.QIcon("./UILib/icons/drescan_icon.png"), u"重新扫描") advmenu = menu.addMenu(QtGui.QIcon("./UILib/icons/robot_icon.png"), u"机器学习") item3 = advmenu.addAction(QtGui.QIcon("./UILib/icons/img_icon.png"), u"二进制图像") item4 = advmenu.addAction(QtGui.QIcon("./UILib/icons/code_icon.png"), u"操作码分类") item5 = menu.addAction(QtGui.QIcon("./UILib/icons/mark_icon.png"), u"文件位置") item6 = menu.addAction(QtGui.QIcon("./UILib/icons/user_icon.png"), u"用户标记") item7 = menu.addAction(QtGui.QIcon("./UILib/icons/upload_icon.png"), u"上传样本") fname = self.table.item(row_num, 0).text() fpath = self.table.item(row_num, 1).text() ffull = os.path.join(str(fpath), str(fname)) # 文件绝对路径 fmd5 = self.table.item(row_num, 7).text() if 0 == FlagSet.scandoneflag: item2.setEnabled(False) item6.setEnabled(False) # if 'PE32' not in self.table.item(row_num, 3).text() and 'executable' not in self.table.item(row_num, 3).text(): # 更改为png图片及可执行文件都触发 ext = os.path.splitext(str(fname))[1] t_type = self.table.item(row_num, 3).text() if 'PE32' not in t_type and 'MS-DOS' not in t_type and 'png' not in ext: item3.setEnabled(False) if not str(self.table.item(row_num, 0).text()).endswith('.asm'): item4.setEnabled(False) action = menu.exec_(self.table.mapToGlobal(pos)) if action == item1: # print u'您选了选项一,当前行文字内容是:', self.table.item(row_num, 0).text() print ffull filedetail = self.detailDialog filedetail.getFileName(ffull, fmd5) filedetail.setWindowFlags(Qt.Qt.WindowStaysOnTopHint) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("./UILib/icons/detail_icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) filedetail.setWindowIcon(icon) filedetail.show() elif action == item2: self.filenum = 1 rescan = [] reid = int(self.table.item(row_num, 8).text()) rescan.append(reid) rescanfiles = ("rescan", rescan) print rescanfiles # 直接连接control中的scanfile线程 self.rule, useless = self.prevScanPrepare() self.filesThread = ScanFile(rescanfiles, self.rule) self.filesThread.fileSignal.connect(self.updateRescanInfo) # 连到更行重新函数中 self.filesThread.smsgSignal.connect(self.updateStatusBar) self.filesThread.flogSignal.connect(self.showFileAnalyzLog) self.filesThread.start() elif action == item3: print "going to greate a pe image" malimgclass = self.malimgDialog malimgclass.getFileName(ffull) malimgclass.setWindowFlags(Qt.Qt.WindowStaysOnTopHint) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("./UILib/icons/img_icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) malimgclass.setWindowIcon(icon) malimgclass.show() elif action == item4: print "going to analysis asm file" opcodeclass = self.opcodeDialog opcodeclass.getFileName(ffull) opcodeclass.setWindowFlags(Qt.Qt.WindowStaysOnTopHint) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("./UILib/icons/code_icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) opcodeclass.setWindowIcon(icon) opcodeclass.show() elif action == item5: print u"打开文件所在位置" fname = self.table.item(row_num, 0).text() fpath = self.table.item(row_num, 1).text() # ffull = os.path.join(str(fpath).encode('cp936'), str(fname).encode('cp936')) # 仅打开文件夹 # os.startfile(fpath) # 打开文件-慎重 # os.startfile(ffull) # 打开文件夹并定位到文件 print str(ffull).encode('cp936') estr = 'explorer /select,' + str(ffull).encode('cp936') print os.system(estr) elif action == item6: # 设置item可编辑 self.table.editItem(self.table.item(row_num, 5)) self.enter = row_num self.ui.statusbar.showMessage(u"修改后按回车更新标记内容") elif action == item7: # 在没有数据库的情况下 # 如果前后两次打开同一个文件,那么不清空内容 # 否则执行clear方法 flist = self.flist # 文件名列表 flist.append(fmd5) print flist dialog = self.uploadDialog dialog.getFilename(ffull) if len(flist) == 2: if flist[0] != flist[1]: dialog.clearFileData() del flist[0] print flist dialog.setWindowFlags(Qt.Qt.WindowStaysOnTopHint) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("./UILib/icons/upload_icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) dialog.setWindowIcon(icon) dialog.show() else: return ''' 表头点击事件 index:表头section索引值 ''' def tableHeaderEvent(self, index): if 0 == FlagSet.scansqlcount: return self.table.horizontalHeader().setSortIndicatorShown(True) if 0 == index: print u"按文件名排序" self.table.sortByColumn(index) elif 1 == index: print u"按文件路径排序" self.table.sortByColumn(index) elif 2 == index: print u"按文件大小排序,单位Bytes" sortflag = self.table.horizontalHeader().sortIndicatorOrder() print sortflag self.sortByFileSize(sortflag) elif 3 == index: print u"按文件类型排序" self.table.sortByColumn(index) elif 4 == index: print u"按扫描结果排序" self.table.sortByColumn(index) elif 5 == index: print u"按标记排序" self.table.sortByColumn(index) elif 6 == index: print u"按分析日期排序" self.table.sortByColumn(index) elif 7 == index: print u"按MD5值排序" self.table.sortByColumn(index) else: self.table.horizontalHeader().setSortIndicatorShown(False) pass ''' tablewidget表头点击事件 文件大小排序数据库操作 ''' def sortByFileSize(self, flag): try: sqlconn = sqlite3.connect("../db/fileinfo.db") except sqlite3.Error, e: print "sqlite connect failed" , "\n", e.args[0] sqlcursor = sqlconn.cursor() if 0 == flag: sqlcursor.execute("select * from base_info where md5 not NULL order by size") elif 1 == flag: sqlcursor.execute("select * from base_info where md5 not NULL order by size desc") else: print "sort flag error, quit..." sqlconn.close() return sqlconn.commit() i = 0 for raw in sqlcursor: i += 1 self.updateFromDBInit(i, raw) sqlconn.close() ''' 清空tablewidget内容 ''' def clearTableWidget(self): print self.table.rowCount() print "clear tablewidget" self.table.setRowCount(0) self.table.clearContents() self.rowindex = 0 # 让新元素从第一行开始 self.ui.progressBar.setValue(0) # 进度条回0 self.ui.statusbar.showMessage(u"已清空显示列表内容") try: sqlconn = sqlite3.connect("../db/fileinfo.db") except sqlite3.Error, e: print "sqlite connect failed" , "\n", e.args[0] sqlcursor = sqlconn.cursor() sqlcursor.execute("delete from base_info where id >= 0") sqlconn.commit() sqlconn.close() FlagSet.scansqlcount = 0 # 将全局计数flag置0 self.table.horizontalHeader().setSortIndicatorShown(False) ''' 保存tab页内容至历史数据库 ''' def store2DataBaseByDate(self): tday = datetime.today() y = str(tday.year) m = str(tday.month) d = str(tday.day) src = "../db/fileinfo.db" dst = "../db/history/" + y + m + d + ".db" print src, dst shutil.copy(src, dst) if os.path.isfile(dst): self.ui.statusbar.showMessage(u"本页内容已保存至" + dst) #------------------------------tab2----------------------------- ''' 文件分析日志显示 @flag:标记-暂未使用 @msg:接收内容 ''' def showFileAnalyzLog(self, flag, msg): nowtime = time.localtime(time.time()) nowtime = time.strftime('%Y-%m-%d %H:%M:%S', nowtime) nowtime = "[" + nowtime + "] " self.text.append(nowtime + msg) ''' 清除历史分析 ''' def clearAnalysisLog(self): self.text.clear() self.showFileAnalyzLog(4, " clear all information") ''' 保存分析结果 使用qtextstream流写入文件 ''' def saveAnalysisLog(self): tday = datetime.today() y = str(tday.year) m = str(tday.month) d = str(tday.day) H = str(tday.hour) M = str(tday.minute) S = str(tday.second) name = "../log/{}-{}-{}_{}-{}-{}.analy".format(y, m, d, H, M, S) try: ftmp = QtCore.QFile(name) ftmp.open(QtCore.QIODevice.WriteOnly) stream = QtCore.QTextStream(ftmp) slog = self.text.toPlainText() stream << slog self.ui.statusbar.showMessage(u"扫描报告保存成功") except IOError, e: print e.args[0] #------------------------------tab3----------------------------- ''' 显示分析历史信息 获取日历控件响应 ''' def getCalenderDate(self): qdate = self.calender.selectedDate() y = str(qdate.year()) m = str(qdate.month()) d = str(qdate.day()) flogs = "{}年{}月{}日文件分析记录".format(y, m, d) fname = "../db/history/" + y + m + d + ".db" self.historydb = fname if not os.path.isfile(fname): self.ui.statusbar.showMessage(u"无此日分析历史记录") self.ui.PB_SelectHistory.setEnabled(False) self.table2.setRowCount(0) self.table2.clearContents() return self.ui.PB_SelectHistory.setEnabled(True) try: sqlconn = sqlite3.connect(fname) except sqlite3.Error, e: print "sqlite connect failed" , "\n", e.args[0] self.ui.statusbar.showMessage(unicode(flogs)) sqlcursor = sqlconn.cursor() sqlcursor.execute("select * from base_info where md5 not NULL") sqlconn.commit() i = 0 for raw in sqlcursor: i += 1 self.updateFromHistoryDB(i, raw) sqlconn.close() ''' 显示历史分析数据库内容 @index:table索引 @msg:查询记录 ''' def updateFromHistoryDB(self, index, msg): info = msg # index = info[0] + 1 # index标记 self.table2.setRowCount(index) p, f = os.path.split(str(info[1]).decode('utf-8')) size = str(info[2]) ftype = str(info[3]) ftime = float(info[4])
<gh_stars>1-10 #!/usr/bin/env python3 """Core options v1 to v2 converter Just run this script as follows, to convert 'libretro_core_options.h' & 'Libretro_coreoptions_intl.h' to v2: python3 "/path/to/v1_to_v2_converter.py" "/path/to/where/libretro_core_options.h & Libretro_coreoptions_intl.h/are" The original files will be preserved as *.v1 """ import core_option_regex as cor import os import sys import glob def create_v2_code_file(struct_text, file_name): def replace_option(option_match): _offset = option_match.start(0) if option_match.group(3): res = option_match.group(0)[:option_match.end(2) - _offset] + ',\n NULL' + \ option_match.group(0)[option_match.end(2) - _offset:option_match.end(3) - _offset] + \ 'NULL,\n NULL,\n ' + option_match.group(0)[option_match.end(3) - _offset:] else: return option_match.group(0) return res comment_v1 = '/*\n' \ ' ********************************\n' \ ' * VERSION: 1.3\n' \ ' ********************************\n' \ ' *\n' \ ' * - 1.3: Move translations to libretro_core_options_intl.h\n' \ ' * - libretro_core_options_intl.h includes BOM and utf-8\n' \ ' * fix for MSVC 2010-2013\n' \ ' * - Added HAVE_NO_LANGEXTRA flag to disable translations\n' \ ' * on platforms/compilers without BOM support\n' \ ' * - 1.2: Use core options v1 interface when\n' \ ' * RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION is >= 1\n' \ ' * (previously required RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION == 1)\n' \ ' * - 1.1: Support generation of core options v0 retro_core_option_value\n' \ ' * arrays containing options with a single value\n' \ ' * - 1.0: First commit\n' \ '*/\n' comment_v2 = '/*\n' \ ' ********************************\n' \ ' * VERSION: 2.0\n' \ ' ********************************\n' \ ' *\n' \ ' * - 2.0: Add support for core options v2 interface\n' \ ' * - 1.3: Move translations to libretro_core_options_intl.h\n' \ ' * - libretro_core_options_intl.h includes BOM and utf-8\n' \ ' * fix for MSVC 2010-2013\n' \ ' * - Added HAVE_NO_LANGEXTRA flag to disable translations\n' \ ' * on platforms/compilers without BOM support\n' \ ' * - 1.2: Use core options v1 interface when\n' \ ' * RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION is >= 1\n' \ ' * (previously required RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION == 1)\n' \ ' * - 1.1: Support generation of core options v0 retro_core_option_value\n' \ ' * arrays containing options with a single value\n' \ ' * - 1.0: First commit\n' \ '*/\n' p_intl = cor.p_intl p_set = cor.p_set new_set = 'static INLINE void libretro_set_core_options(retro_environment_t environ_cb,\n' \ ' bool *categories_supported)\n' \ '{\n' \ ' unsigned version = 0;\n' \ '#ifndef HAVE_NO_LANGEXTRA\n' \ ' unsigned language = 0;\n' \ '#endif\n' \ '\n' \ ' if (!environ_cb || !categories_supported)\n' \ ' return;\n' \ '\n' \ ' *categories_supported = false;\n' \ '\n' \ ' if (!environ_cb(RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION, &version))\n' \ ' version = 0;\n' \ '\n' \ ' if (version >= 2)\n' \ ' {\n' \ '#ifndef HAVE_NO_LANGEXTRA\n' \ ' struct retro_core_options_v2_intl core_options_intl;\n' \ '\n' \ ' core_options_intl.us = &options_us;\n' \ ' core_options_intl.local = NULL;\n' \ '\n' \ ' if (environ_cb(RETRO_ENVIRONMENT_GET_LANGUAGE, &language) &&\n' \ ' (language < RETRO_LANGUAGE_LAST) && (language != RETRO_LANGUAGE_ENGLISH))\n' \ ' core_options_intl.local = options_intl[language];\n' \ '\n' \ ' *categories_supported = environ_cb(RETRO_ENVIRONMENT_SET_CORE_OPTIONS_V2_INTL,\n' \ ' &core_options_intl);\n' \ '#else\n' \ ' *categories_supported = environ_cb(RETRO_ENVIRONMENT_SET_CORE_OPTIONS_V2,\n' \ ' &options_us);\n' \ '#endif\n' \ ' }\n' \ ' else\n' \ ' {\n' \ ' size_t i, j;\n' \ ' size_t option_index = 0;\n' \ ' size_t num_options = 0;\n' \ ' struct retro_core_option_definition\n' \ ' *option_v1_defs_us = NULL;\n' \ '#ifndef HAVE_NO_LANGEXTRA\n' \ ' size_t num_options_intl = 0;\n' \ ' struct retro_core_option_v2_definition\n' \ ' *option_defs_intl = NULL;\n' \ ' struct retro_core_option_definition\n' \ ' *option_v1_defs_intl = NULL;\n' \ ' struct retro_core_options_intl\n' \ ' core_options_v1_intl;\n' \ '#endif\n' \ ' struct retro_variable *variables = NULL;\n' \ ' char **values_buf = NULL;\n' \ '\n' \ ' /* Determine total number of options */\n' \ ' while (true)\n' \ ' {\n' \ ' if (option_defs_us[num_options].key)\n' \ ' num_options++;\n' \ ' else\n' \ ' break;\n' \ ' }\n' \ '\n' \ ' if (version >= 1)\n' \ ' {\n' \ ' /* Allocate US array */\n' \ ' option_v1_defs_us = (struct retro_core_option_definition *)\n' \ ' calloc(num_options + 1, sizeof(struct retro_core_option_definition));\n' \ '\n' \ ' /* Copy parameters from option_defs_us array */\n' \ ' for (i = 0; i < num_options; i++)\n' \ ' {\n' \ ' struct retro_core_option_v2_definition *option_def_us = &option_defs_us[i];\n' \ ' struct retro_core_option_value *option_values = option_def_us->values;\n' \ ' struct retro_core_option_definition *option_v1_def_us = &option_v1_defs_us[i];\n' \ ' struct retro_core_option_value *option_v1_values = option_v1_def_us->values;\n' \ '\n' \ ' option_v1_def_us->key = option_def_us->key;\n' \ ' option_v1_def_us->desc = option_def_us->desc;\n' \ ' option_v1_def_us->info = option_def_us->info;\n' \ ' option_v1_def_us->default_value = option_def_us->default_value;\n' \ '\n' \ ' /* Values must be copied individually... */\n' \ ' while (option_values->value)\n' \ ' {\n' \ ' option_v1_values->value = option_values->value;\n' \ ' option_v1_values->label = option_values->label;\n' \ '\n' \ ' option_values++;\n' \ ' option_v1_values++;\n' \ ' }\n' \ ' }\n' \ '\n' \ '#ifndef HAVE_NO_LANGEXTRA\n' \ ' if (environ_cb(RETRO_ENVIRONMENT_GET_LANGUAGE, &language) &&\n' \ ' (language < RETRO_LANGUAGE_LAST) && (language != RETRO_LANGUAGE_ENGLISH) &&\n' \ ' options_intl[language])\n' \ ' option_defs_intl = options_intl[language]->definitions;\n' \ '\n' \ ' if (option_defs_intl)\n' \ ' {\n' \ ' /* Determine number of intl options */\n' \ ' while (true)\n' \ ' {\n' \ ' if (option_defs_intl[num_options_intl].key)\n' \ ' num_options_intl++;\n' \ ' else\n' \ ' break;\n' \ ' }\n' \ '\n' \ ' /* Allocate intl array */\n' \ ' option_v1_defs_intl = (struct retro_core_option_definition *)\n' \ ' calloc(num_options_intl + 1, sizeof(struct retro_core_option_definition));\n' \ '\n' \ ' /* Copy parameters from option_defs_intl array */\n' \ ' for (i = 0; i < num_options_intl; i++)\n' \ ' {\n' \ ' struct retro_core_option_v2_definition *option_def_intl = &option_defs_intl[i];\n' \ ' struct retro_core_option_value *option_values = option_def_intl->values;\n' \ ' struct retro_core_option_definition *option_v1_def_intl = &option_v1_defs_intl[i];\n' \ ' struct retro_core_option_value *option_v1_values = option_v1_def_intl->values;\n' \ '\n' \ ' option_v1_def_intl->key = option_def_intl->key;\n' \ ' option_v1_def_intl->desc = option_def_intl->desc;\n' \ ' option_v1_def_intl->info = option_def_intl->info;\n' \ ' option_v1_def_intl->default_value = option_def_intl->default_value;\n' \ '\n' \ ' /* Values must be copied individually... */\n' \ ' while (option_values->value)\n' \ ' {\n' \ ' option_v1_values->value = option_values->value;\n' \ ' option_v1_values->label = option_values->label;\n' \ '\n' \ ' option_values++;\n' \ ' option_v1_values++;\n' \ ' }\n' \ ' }\n' \ ' }\n' \ '\n' \ ' core_options_v1_intl.us = option_v1_defs_us;\n' \ ' core_options_v1_intl.local = option_v1_defs_intl;\n' \ '\n' \ ' environ_cb(RETRO_ENVIRONMENT_SET_CORE_OPTIONS_INTL, &core_options_v1_intl);\n' \ '#else\n' \ ' environ_cb(RETRO_ENVIRONMENT_SET_CORE_OPTIONS, option_v1_defs_us);\n' \ '#endif\n' \ ' }\n' \ ' else\n' \ ' {\n' \ ' /* Allocate arrays */\n' \ ' variables = (struct retro_variable *)calloc(num_options + 1,\n' \ ' sizeof(struct retro_variable));\n' \ ' values_buf = (char **)calloc(num_options, sizeof(char *));\n' \ '\n' \ ' if (!variables || !values_buf)\n' \ ' goto error;\n' \ '\n' \ ' /* Copy parameters from option_defs_us array */\n' \ ' for (i = 0; i < num_options; i++)\n' \ ' {\n' \ ' const char *key = option_defs_us[i].key;\n' \ ' const char *desc = option_defs_us[i].desc;\n' \ ' const char *default_value = option_defs_us[i].default_value;\n' \ ' struct retro_core_option_value *values = option_defs_us[i].values;\n' \ ' size_t buf_len = 3;\n' \ ' size_t default_index = 0;\n' \ '\n' \ ' values_buf[i] = NULL;\n' \ '\n' \ ' if (desc)\n' \ ' {\n' \ ' size_t num_values = 0;\n' \ '\n' \ ' /* Determine number of values */\n' \ ' while (true)\n' \ ' {\n' \ ' if (values[num_values].value)\n' \ ' {\n' \ ' /* Check if this is the default value */\n' \ ' if (default_value)\n' \ ' if (strcmp(values[num_values].value, default_value) == 0)\n' \ ' default_index = num_values;\n' \ '\n' \ ' buf_len += strlen(values[num_values].value);\n' \ ' num_values++;\n' \ ' }\n' \ ' else\n' \ ' break;\n' \ ' }\n' \ '\n' \ ' /* Build values string */\n' \ ' if (num_values > 0)\n' \ ' {\n' \ ' buf_len += num_values - 1;\n' \ ' buf_len += strlen(desc);\n' \ '\n' \ ' values_buf[i] = (char *)calloc(buf_len, sizeof(char));\n' \ ' if (!values_buf[i])\n' \ ' goto error;\n' \ '\n' \ ' strcpy(values_buf[i], desc);\n' \ ' strcat(values_buf[i], "; ");\n' \ '\n' \ ' /* Default value goes first */\n' \ ' strcat(values_buf[i], values[default_index].value);\n' \ '\n' \ ' /* Add remaining values */\n' \ ' for (j = 0; j < num_values; j++)\n' \ ' {\n' \ ' if (j != default_index)\n' \ ' {\n' \ ' strcat(values_buf[i], "|");\n' \
<reponame>arossert/ldaptor # Copyright (C) 2001 <NAME> # # This library is free software; you can redistribute it and/or # modify it under the terms of version 2.1 of the GNU Lesser General Public # License as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ Test cases for ldaptor.protocols.pureldap module. """ import six from twisted.trial import unittest from ldaptor.protocols import pureldap, pureber def s(*l): """Join all members of list to a byte string. Integer members are chr()ed""" r = b'' for e in l: if isinstance(e, int): e = six.int2byte(e) r = r + e return r def l(s): """Split a byte string to ord's of chars.""" return [six.byte2int([x]) for x in s] class KnownValues(unittest.TestCase): knownValues=( # class, args, kwargs, expected_result (pureldap.LDAPModifyRequest, [], { "object": 'cn=foo, dc=example, dc=com', "modification": [ pureber.BERSequence([ pureber.BEREnumerated(0), pureber.BERSequence([ pureldap.LDAPAttributeDescription('bar'), pureber.BERSet([ pureldap.LDAPString('a'), pureldap.LDAPString('b'), ]), ]), ]), ], }, None, [0x66, 50] + ([0x04, 0x1a] + l(b"cn=foo, dc=example, dc=com") + [0x30, 20] + ([0x30, 18] + ([0x0a, 0x01, 0x00] + [0x30, 13] + ([0x04, len(b"bar")] + l(b"bar") + [0x31, 0x06] + ([0x04, len(b"a")] + l(b"a") + [0x04, len(b"b")] + l(b"b")))))) ), (pureldap.LDAPModifyRequest, [], { "object": 'cn=foo, dc=example, dc=com', "modification": [ pureber.BERSequence([ pureber.BEREnumerated(1), pureber.BERSequence([ pureber.BEROctetString('bar'), pureber.BERSet([]), ]), ]), ], }, None, [0x66, 0x2c] + ([0x04, 0x1a] + l(b"cn=foo, dc=example, dc=com") + [0x30, 0x0e] + ([0x30, 0x0c] + ([0x0a, 0x01, 0x01] + [0x30, 0x07] + ([0x04, 0x03] + l(b"bar") + [0x31, 0x00])))) ), (pureldap.LDAPFilter_not, [], { "value": pureldap.LDAPFilter_present("foo"), }, pureldap.LDAPBERDecoderContext_Filter(fallback=pureber.BERDecoderContext()), [0xa2, 0x05] + [0x87] + [len(b"foo")] + l(b"foo")), (pureldap.LDAPFilter_or, [], { "value": [pureldap.LDAPFilter_equalityMatch( attributeDesc=pureldap.LDAPAttributeDescription(value='cn'), assertionValue=pureldap.LDAPAssertionValue(value='foo')), pureldap.LDAPFilter_equalityMatch( attributeDesc=pureldap.LDAPAttributeDescription(value='uid'), assertionValue=pureldap.LDAPAssertionValue(value='foo')), ] }, pureldap.LDAPBERDecoderContext_Filter(fallback=pureber.BERDecoderContext()), [0xa1, 23] + [0xa3, 9] + [0x04] + [len(b"cn")] + l(b"cn") + [0x04] + [len(b"foo")] + l(b"foo") + [0xa3, 10] + [0x04] + [len(b"uid")] + l(b"uid") + [0x04] + [len(b"foo")] + l(b"foo"), ), (pureldap.LDAPFilter_and, [], { "value": [pureldap.LDAPFilter_equalityMatch( attributeDesc=pureldap.LDAPAttributeDescription(value='cn'), assertionValue=pureldap.LDAPAssertionValue(value='foo')), pureldap.LDAPFilter_equalityMatch( attributeDesc=pureldap.LDAPAttributeDescription(value='uid'), assertionValue=pureldap.LDAPAssertionValue(value='foo')), ] }, pureldap.LDAPBERDecoderContext_Filter(fallback=pureber.BERDecoderContext()), [0xa0, 23] + [0xa3, 9] + [0x04] + [len(b"cn")] + l(b"cn") + [0x04] + [len(b"foo")] + l(b"foo") + [0xa3, 10] + [0x04] + [len(b"uid")] + l(b"uid") + [0x04] + [len(b"foo")] + l(b"foo"), ), (pureldap.LDAPModifyDNRequest, [], {'entry': 'cn=foo,dc=example,dc=com', 'newrdn': 'uid=bar', 'deleteoldrdn': 0, }, None, [0x6c, 0x26] + [0x04] + [len(b"cn=foo,dc=example,dc=com")] + l(b"cn=foo,dc=example,dc=com") + [0x04] + [len(b"uid=bar")] + l(b"uid=bar") + [0x01, 0x01, 0x00]), (pureldap.LDAPModifyDNRequest, [], {'entry': 'cn=aoue,dc=example,dc=com', 'newrdn': 'uid=aoue', 'deleteoldrdn': 0, 'newSuperior': 'ou=People,dc=example,dc=com', }, None, [0x6c, 69] + [0x04] + [len(b"cn=aoue,dc=example,dc=com")] + l(b"cn=aoue,dc=example,dc=com") + [0x04] + [len(b"uid=aoue")] + l(b"uid=aoue") + [0x01, 0x01, 0x00] + [0x80] + [len(b"ou=People,dc=example,dc=com")] + l(b"ou=People,dc=example,dc=com")), (pureldap.LDAPSearchRequest, [], {'baseObject': 'dc=yoja,dc=example,dc=com', }, None, [0x63, 57] + [0x04] + [len(b'dc=yoja,dc=example,dc=com')] + l(b'dc=yoja,dc=example,dc=com') # scope + [0x0a, 1, 2] # derefAliases + [0x0a, 1, 0] # sizeLimit + [0x02, 1, 0] # timeLimit + [0x02, 1, 0] # typesOnly + [0x01, 1, 0] # filter + [135, 11] + l(b'objectClass') # attributes + [48, 0] ), (pureldap.LDAPUnbindRequest, [], {}, None, [0x42, 0x00] ), (pureldap.LDAPSearchResultReference, [], {'uris': [pureldap.LDAPString(b'ldap://example.com/dc=foo,dc=example,dc=com'), pureldap.LDAPString(b'ldap://example.com/dc=bar,dc=example,dc=com')] }, None, [0x73, 90] + [0x04] + [len(b'ldap://example.com/dc=foo,dc=example,dc=com')] + l(b'ldap://example.com/dc=foo,dc=example,dc=com') + [0x04] + [len(b'ldap://example.com/dc=bar,dc=example,dc=com')] + l(b'ldap://example.com/dc=bar,dc=example,dc=com'), ), (pureldap.LDAPSearchResultDone, [], {'resultCode': 0, }, None, [0x65, 0x07] # resultCode + [0x0a, 0x01, 0x00] # matchedDN + [0x04] + [len(b'')] + l(b'') # errorMessage + [0x04] + [len(b'')] + l(b'') # referral, TODO + [] ), (pureldap.LDAPSearchResultDone, [], {'resultCode': 0, 'matchedDN': 'dc=foo,dc=example,dc=com', }, None, [0x65, 31] # resultCode + [0x0a, 0x01, 0x00] # matchedDN + [0x04] + [len(b'dc=foo,dc=example,dc=com')] + l(b'dc=foo,dc=example,dc=com') # errorMessage + [0x04] + [len(b'')] + l(b'') # referral, TODO + [] ), (pureldap.LDAPSearchResultDone, [], {'resultCode': 0, 'matchedDN': 'dc=foo,dc=example,dc=com', 'errorMessage': 'the foobar was fubar', }, None, [0x65, 51] # resultCode + [0x0a, 0x01, 0x00] # matchedDN + [0x04] + [len(b'dc=foo,dc=example,dc=com')] + l(b'dc=foo,dc=example,dc=com') # errorMessage + [0x04] + [len(b'the foobar was fubar')] + l(b'the foobar was fubar',) # referral, TODO + [] ), (pureldap.LDAPSearchResultDone, [], {'resultCode': 0, 'errorMessage': 'the foobar was fubar', }, None, [0x65, 27] # resultCode + [0x0a, 0x01, 0x00] # matchedDN + [0x04] + [len(b'')] + l(b'') # errorMessage + [0x04] + [len(b'the foobar was fubar')] + l(b'the foobar was fubar',) # referral, TODO + [] ), (pureldap.LDAPMessage, [], {'id': 42, 'value': pureldap.LDAPBindRequest(), }, pureldap.LDAPBERDecoderContext_TopLevel( inherit=pureldap.LDAPBERDecoderContext_LDAPMessage( fallback=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext()), inherit=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext()))), [0x30, 12] # id + [0x02, 0x01, 42] # value + l(pureldap.LDAPBindRequest().toWire()) ), (pureldap.LDAPControl, [], {'controlType': '1.2.3.4', }, None, [0x30, 9] # controlType + [0x04, 7] + l(b"1.2.3.4") ), (pureldap.LDAPControl, [], {'controlType': '1.2.3.4', 'criticality': True, }, None, [0x30, 12] # controlType + [0x04, 7] + l(b"1.2.3.4") # criticality + [0x01, 1, 0xFF] ), (pureldap.LDAPControl, [], {'controlType': '1.2.3.4', 'criticality': True, 'controlValue': 'silly', }, None, [0x30, 19] # controlType + [0x04, 7] + l(b"1.2.3.4") # criticality + [0x01, 1, 0xFF] # controlValue + [0x04, len(b"silly")] + l(b"silly") ), (pureldap.LDAPMessage, [], {'id': 42, 'value': pureldap.LDAPBindRequest(), 'controls': [ ('1.2.3.4', None, None), ('2.3.4.5', False), ('3.4.5.6', True, b'\x00\x01\x02\xFF'), ('4.5.6.7', None, b'\x00\x01\x02\xFF'), ], }, pureldap.LDAPBERDecoderContext_TopLevel( inherit=pureldap.LDAPBERDecoderContext_LDAPMessage( fallback=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext()), inherit=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext()))), [0x30, 76] # id + [0x02, 0x01, 42] # value + l(pureldap.LDAPBindRequest().toWire()) # controls + l(pureldap.LDAPControls(value=[ pureldap.LDAPControl(controlType='1.2.3.4'), pureldap.LDAPControl(controlType='2.3.4.5', criticality=False), pureldap.LDAPControl(controlType='3.4.5.6', criticality=True, controlValue=b'\x00\x01\x02\xFF'), pureldap.LDAPControl(controlType='4.5.6.7', criticality=None, controlValue=b'\x00\x01\x02\xFF'), ]).toWire()), ), (pureldap.LDAPFilter_equalityMatch, [], {'attributeDesc': pureldap.LDAPAttributeDescription('cn'), 'assertionValue': pureldap.LDAPAssertionValue('foo'), }, pureldap.LDAPBERDecoderContext_Filter( fallback=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext()), inherit=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext())), [0xa3, 9] + ([0x04, 2] + l(b'cn') + [0x04, 3] + l(b'foo')) ), (pureldap.LDAPFilter_or, [[pureldap.LDAPFilter_equalityMatch(attributeDesc=pureldap.LDAPAttributeDescription('cn'), assertionValue=pureldap.LDAPAssertionValue('foo')), pureldap.LDAPFilter_equalityMatch(attributeDesc=pureldap.LDAPAttributeDescription('uid'), assertionValue=pureldap.LDAPAssertionValue('foo')), pureldap.LDAPFilter_equalityMatch(attributeDesc=pureldap.LDAPAttributeDescription('mail'), assertionValue=pureldap.LDAPAssertionValue('foo')), pureldap.LDAPFilter_substrings(type='mail', substrings=[pureldap.LDAPFilter_substrings_initial('foo@')]), ]], {}, pureldap.LDAPBERDecoderContext_Filter( fallback=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext()), inherit=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext())), [0xA1, 52] + ([0xa3, 9] + ([0x04, 2] + l(b'cn') + [0x04, 3] + l(b'foo')) + [0xa3, 10] + ([0x04, 3] + l(b'uid') + [0x04, 3] + l(b'foo')) + [0xa3, 11] + ([0x04, 4] + l(b'mail') + [0x04, 3] + l(b'foo')) + [0xa4, 14] + ([0x04, 4] + l(b'mail') + [0x30, 6] + ([0x80, 4] + l(b'foo@')))) ), (pureldap.LDAPSearchRequest, [], {'baseObject': 'dc=example,dc=com', 'scope': pureldap.LDAP_SCOPE_wholeSubtree, 'derefAliases': pureldap.LDAP_DEREF_neverDerefAliases, 'sizeLimit': 1, 'timeLimit': 0, 'typesOnly': False, 'filter': pureldap.LDAPFilter_or([ pureldap.LDAPFilter_equalityMatch(attributeDesc=pureldap.LDAPAttributeDescription('cn'), assertionValue=pureldap.LDAPAssertionValue('foo')), pureldap.LDAPFilter_equalityMatch(attributeDesc=pureldap.LDAPAttributeDescription('uid'), assertionValue=pureldap.LDAPAssertionValue('foo')), pureldap.LDAPFilter_equalityMatch(attributeDesc=pureldap.LDAPAttributeDescription('mail'), assertionValue=pureldap.LDAPAssertionValue('foo')), pureldap.LDAPFilter_substrings(type='mail', substrings=[pureldap.LDAPFilter_substrings_initial('foo@')]), ]), 'attributes': [''], }, pureldap.LDAPBERDecoderContext_LDAPMessage( fallback=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext()), inherit=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext())), [0x63, 92] + ([0x04, 17] + l(b'dc=example,dc=com') + [0x0a, 1, 0x02] + [0x0a, 1, 0x00] + [0x02, 1, 0x01] + [0x02, 1, 0x00] + [0x01, 1, 0x00] + [0xA1, 52] + ([0xa3, 9] + ([0x04, 2] + l(b'cn') + [0x04, 3] + l(b'foo')) + [0xa3, 10] + ([0x04, 3] + l(b'uid') + [0x04, 3] + l(b'foo')) + [0xa3, 11] + ([0x04, 4] + l(b'mail') + [0x04, 3] + l(b'foo')) + [0xa4, 14] + ([0x04, 4] + l(b'mail') + [0x30, 6] + ([0x80, 4] + l(b'foo@')))) + [0x30, 2] + ([0x04, 0]) ) ), (pureldap.LDAPMessage, [], {'id': 1, 'value': pureldap.LDAPSearchRequest( baseObject='dc=example,dc=com', scope=pureldap.LDAP_SCOPE_wholeSubtree, derefAliases=pureldap.LDAP_DEREF_neverDerefAliases, sizeLimit=1, timeLimit=0, typesOnly=False, filter=pureldap.LDAPFilter_or([ pureldap.LDAPFilter_equalityMatch(attributeDesc=pureldap.LDAPAttributeDescription('cn'), assertionValue=pureldap.LDAPAssertionValue('foo')), pureldap.LDAPFilter_equalityMatch(attributeDesc=pureldap.LDAPAttributeDescription('uid'), assertionValue=pureldap.LDAPAssertionValue('foo')), pureldap.LDAPFilter_equalityMatch(attributeDesc=pureldap.LDAPAttributeDescription('mail'), assertionValue=pureldap.LDAPAssertionValue('foo')), pureldap.LDAPFilter_substrings(type='mail', substrings=[pureldap.LDAPFilter_substrings_initial('foo@')]), ]), attributes=[''], ), }, pureldap.LDAPBERDecoderContext_TopLevel( inherit=pureldap.LDAPBERDecoderContext_LDAPMessage( fallback=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext()), inherit=pureldap.LDAPBERDecoderContext(fallback=pureber.BERDecoderContext()))), [0x30, 97] # id + [0x02, 1, 1] # value + [0x63, 92] + ([0x04, 17] + l(b'dc=example,dc=com') + [0x0a, 1, 0x02] + [0x0a, 1, 0x00] + [0x02, 1, 0x01] + [0x02, 1, 0x00] + [0x01, 1, 0x00] + [0xA1, 52] + ([0xa3, 9] + ([0x04, 2] + l(b'cn') + [0x04, 3] + l(b'foo')) + [0xa3, 10] + ([0x04, 3] + l(b'uid') + [0x04, 3] + l(b'foo')) + [0xa3, 11] + ([0x04, 4] + l(b'mail') + [0x04, 3] + l(b'foo')) + [0xa4, 14] + ([0x04, 4] + l(b'mail') + [0x30, 6] + ([0x80, 4] + l(b'foo@')))) + [0x30, 2] + ([0x04, 0]) ) ), (pureldap.LDAPExtendedRequest, [], {'requestName': '42.42.42', 'requestValue': 'foo', }, None, [0x40|0x20|23, 1+1+8+1+1+3] + ([0x80|0] + [len(b'42.42.42')] + l(b'42.42.42')) + ([0x80|1] + [len(b'foo')] + l(b'foo')) ), (pureldap.LDAPExtendedRequest, [], {'requestName': '42.42.42', 'requestValue': None, }, None, [0x40|0x20|23, 1+1+8] + ([0x80|0] + [len(b'42.42.42')] + l(b'42.42.42')) ), (pureldap.LDAPExtendedResponse, [], {'resultCode': 49, 'matchedDN': 'foo', 'errorMessage': 'bar', 'responseName': None, 'response': None, }, None, [0x40|0x20|24, 3+2+3+2+3, 0x0a, 1, 49, 0x04,
with self.out: clear_output() date_selected = self.qgrid_obj.get_selected_df().reset_index()["rev_time"].iloc[0] editor_selected = self.qgrid_obj.get_selected_df().reset_index()["editor_id"].iloc[0] editor_name = self.qgrid_obj.get_selected_df().reset_index()["editor"].iloc[0] page_title = self.all_tokens["article_title"].unique()[0] display(md("Loading revisions info...")) second_df = self.revision_manager.get_main(date_selected, editor_selected, self.current_freq) clear_output() display(md(f"Within **{self.current_freq}** timeframe, you have selected **{editor_name}** (id: {editor_selected})")) display(HTML(f"The revisions fall in <a href='https://{self.lng}.wikipedia.org/w/index.php?date-range-to={date_selected}&tagfilter=&title={page_title}&action=history' target='_blank'>{date_selected}</a>")) second_df.rename({"main_opponent": "main_op", "stopwords_ratio": "SW_ratio", "productivity": "prod"}, axis=1, inplace=True) columns_set = {"rev_time": {"width": 165}, "rev_id": {"width": 85}, "adds": {"width": 50}, "dels": {"width": 50}, "reins": {"width": 50}, "prod": {"width": 50, "toolTip": "productivity"}, "conflict": {"width": 70}, "SW_ratio": {"width": 82, "toolTip": "stopwords ratio"}, "main_op": {"width": 80, "toolTip": "main opponent"}, "min_react": {"width": 132, "toolTip": "min reaction time"}, "Damaging": {"width": 92}, "Goodfaith": {"width": 90}} self.second_qgrid = qgrid.show_grid(second_df, grid_options={'forceFitColumns': True, 'syncColumnCellResize': True}, column_definitions=columns_set) display(self.second_qgrid) self.out2 = Output() display(self.out2) self.second_qgrid.observe(self.on_select_revision, names=['_selected_rows']) def listen(self, _range1, _range2, granularity): "Listener." if (len(str(_range1.year)) < 4) | (len(str(_range2.year)) < 4): return display(md("Please enter the correct date!")) if _range1 > _range2: return display(md("Please enter the correct date!")) else: df = self.df[(self.df.rev_time.dt.date >= _range1) & (self.df.rev_time.dt.date <= _range2)] df_from_agg = self._get_ratios(df, freq=granularity) df_from_agg = df_from_agg.rename({"editor_str": "editor_id"}, axis=1) df_display = self._merge_main(df_from_agg, freq=granularity) df_display["conflict"] = (df_display["conflict"] / df_display["elegibles"]).fillna(0) df_display["main_opponent"] = df_display["main_opponent"].replace(self.names_id) df_display.rename({"main_opponent": "main_op", "stopwords_ratio": "SW_ratio", "revisions": "revs", "productivity": "prod"}, axis=1, inplace=True) displayed = df_display[["rev_time", "editor", "adds", "dels", "reins", "prod", "conflict", "SW_ratio", "main_op", "avg_reac_time", "revs", "editor_id"]].set_index("rev_time").sort_index(ascending=False) columns_set = {"rev_time": {"width": 90}, "editor": {"width": 85}, "adds": {"width": 50}, "dels": {"width": 50}, "reins": {"width": 50}, "prod": {"width": 50, "toolTip": "productivity"}, "conflict": {"width": 70}, "SW_ratio": {"width": 80, "toolTip": "stopwords ratio"}, "main_op": {"width": 90, "toolTip": "main opponent"}, "avg_reac_time": {"width": 125, "toolTip": "average reaction time"}, "revs": {"width": 45, "toolTip": "revisions"}, "editor_id": {"width": 80}} self.qgrid_obj = qgrid.show_grid(displayed, grid_options={'forceFitColumns':True}, column_definitions=columns_set) display(self.qgrid_obj) self.out = Output() display(self.out) self.current_freq = granularity if self.search_widget != None: self.qgrid_obj.observe(self.on_select_change, names=['_selected_rows']) class RevisionsManager: """ Called in EditorsListener. Calculate several metrics related to the revisions related to a selected editor ... Attributes: ----------- agg_actions (pd.DataFrame): global variable agg_actions, i.e. the dataframe storing actions, conflicts, stopwords etc. of each revision. all_elegibles (pd.DataFrame): elegibles actions (including stopwords) from ConflictManager. all_tokens (pd.DataFrame): actions occurring on all tokens, including stopwords, from ConflictManager. opponents_info (pd.DataFrame): Opponents information derived from EditorsListener.__calculate(). lng (str): langauge from {'en', 'de'} """ def __init__(self, agg, all_elegibles, all_tokens, opponents_info, lng): self.agg_actions = agg self.names_dict = agg[["editor_str", "editor"]].drop_duplicates().set_index("editor_str")["editor"].to_dict() self.all_elegibles = all_elegibles self.all_tokens = all_tokens self.opponents_info = opponents_info self.lng=lng def get_main(self, selected_date, selected_editor, freq): """ Called in EditorsListener.on_select_change(). Merge several tables with different metrics. ... Parameters: ----------- selected_date (datetime.date): Selected date. selected_editor (str): editor id freq (str): {"Monthly", "Weekly", "Daily"} ... Returns: -------- df_merge2 (pd.DataFrame): the second table in A2.1. """ agg = self._add_revision_id() filtered_df = self._get_filtered_df(agg, selected_date, selected_editor, freq).reset_index(drop=True) df_ratios = self._get_ratios(filtered_df).reset_index() df_opponents = self._get_rev_conflict_reac(df_ratios) df_merge1 = df_ratios.merge(df_opponents, on="rev_id", how="left") df_ores = self._get_ores(df_merge1) df_merge2 = df_merge1.merge(df_ores, on="rev_id", how="left").set_index("rev_time") return df_merge2 def _add_revision_id(self): """ Add revision id to aggregation dataframe. ... Returns: -------- agg_with_revs (pd.DataFrame): agg_actions with an additional revision id column. """ # Drop duplicated revisions no_dup_actions = merged_tokens_and_elegibles(self.all_elegibles, self.all_tokens, drop=True) # Only take rev_time and revision columns. no_dup_actions["rev_time"] = no_dup_actions["rev_time"].astype("datetime64[ns]") revs_to_merged = no_dup_actions[["rev_time", "revision"]] # Merge aggregation table and revision time/id table. agg_with_revs = self.agg_actions.merge(revs_to_merged, on="rev_time", how="left") #agg_with_revs.insert(1, "rev_id", agg_with_revs["revision"]) agg_with_revs = agg_with_revs.drop("revision", axis=1).sort_values("rev_time").reset_index(drop=True) return agg_with_revs def _get_filtered_df(self, df, input_date, editor, freq): """ Filter the aggregation dataframe using input year/month, editor and granularity. ... Parameters: ----------- df (pd.DataFrame): aggregation dataframe with revision ids. input_date (datetime.date): rev_time in selected row ... Returns: ----------- filtered_df (pd.DataFrame): revisions with some agg metrics in a particular time frame. """ # Decompose inputs years = df["rev_time"].dt.year months = df["rev_time"].dt.month days = df["rev_time"].dt.day # Create some masks. mask_year = years == input_date.year mask_month = months == input_date.month mask_editor = df["editor_str"] == editor # Filter by granularities. if freq == "Monthly": filtered_df = df.loc[mask_year & mask_month & mask_editor] elif freq == "Weekly": date_diff = input_date - df["rev_time"].dt.date mask_within_week = (date_diff <= timedelta(days=6)) & (timedelta(days=0) <= date_diff) filtered_df = df.loc[mask_within_week & mask_editor] else: mask_day = days == input_date.day filtered_df = df.loc[mask_year & mask_month & mask_day & mask_editor] return filtered_df def _get_ratios(self, filtered_agg): """ Calculate ratios like productivity, stopwords ratio. Also standardize conflict score using elegible actions. ... Parameters: ----------- filtered_agg (pd.DataFrame): output of get_filtered_df() method. ... Returns: ----------- to_display (pd.DataFrame): dataframe with addtional several ratio columns. """ filtered_agg["productivity"] = filtered_agg["total_surv_48h"] / filtered_agg["total"] filtered_agg["stopwords_ratio"] = filtered_agg["total_stopword_count"] / filtered_agg["total"] filtered_agg["conflict"] = filtered_agg["conflict"] / filtered_agg["elegibles"] filtered_agg["conflict"] = filtered_agg["conflict"].fillna(0) to_display = filtered_agg[["rev_time", "rev_id", "adds", "dels", "reins", "productivity", "conflict", "stopwords_ratio"]].set_index("rev_time") return to_display def _get_most_conflict_from_rev(self, rev_df): """ Called in get_rev_conflict_reac() method. Analyse the main opponent and min reaction time for each revision. ... Parameters: ----------- rev_df (pd.DataFrame): actions without stopwords of a particular revision. ... Returns: ----------- main_opponent (str): main opponent's name. min_react (str): minimum reaction time. """ # Sort conflict score in desc order and find the fastest response. sort_rev = rev_df.sort_values("conflict", ascending=False) min_react = str(sort_rev.iloc[0]["time_diff"]) # Find the main opponent. rev_id = rev_df["revision"].unique()[0] editor = rev_df["editor"].unique()[0] period = rev_df["rev_time"].astype("datetime64[ns]").dt.to_period('M').unique()[0] mask_date = self.opponents_info["edit_time"].astype("datetime64[ns]").dt.to_period('M') == period mask_editor = self.opponents_info["idx_editor"] == editor opponent_info = self.opponents_info[mask_date & mask_editor] opponent_info = opponent_info[opponent_info["revision"] == rev_id] main_opponent_id = opponent_info.groupby(["editor"]).agg({"conflict": "sum"}).sort_values("conflict", ascending=False).iloc[0].name main_opponent = self.names_dict[main_opponent_id] return main_opponent, min_react def _get_rev_conflict_reac(self, df_agg): """ Get main opponent and minium reaction time of each given revision. ... Parameters: ----------- df_agg (pd.DataFrame): output of get_ratios() method. ... Returns: ----------- rev_conflicts (pd.DataFrame): dataframe with main opponent and min reaction time """ # Revisions array. #df_agg = df_agg.loc[~(df_agg["conflict"] == 0)] second_revs = df_agg["rev_id"].values # Only consider non-stopwords. rev_conflicts = pd.DataFrame(columns=["rev_id", "main_opponent", "min_react"]) actions_exc_stop = remove_stopwords(merged_tokens_and_elegibles(self.all_elegibles, self.all_tokens), self.lng).reset_index(drop=True) # Mark the last day of each time frame. mark_last_day(actions_exc_stop) # Also consider the first deletion (that is not considered in elegible actions). fill_first_out(actions_exc_stop) # For each revision analyse the main opponent using get_most_conflict_from_rev method. for idx, rev in enumerate(second_revs): some_rev = actions_exc_stop[actions_exc_stop["revision"] == rev] some_rev = some_rev.dropna(subset=["conflict"]) if len(some_rev) != 0: self.problem_rev = some_rev rev_conflicts.loc[idx] = [rev] + list(self._get_most_conflict_from_rev(some_rev)) return rev_conflicts def _split_arr(self, arr, threshold=50): return [arr[i: i + threshold] for i in range(0, len(arr), threshold)] def _get_ores(self, merge1): """ Get Goodfaith and Damaging scores from ORES API. ... Parameters: ----------- merge1 (pd.DataFrame): df_merge1 in get_main() method. ... Returns: ----------- ores_df (pd.DataFrame): df storing scores. """ # Revsion list revs_list = merge1["rev_id"].values # Use ORES API ores_dv = ORESDV(ORESAPI(lng=self.lng)) if len(revs_list) > 50: revs_container = [] for chunk in self._split_arr(revs_list): chunk_df = ores_dv.get_goodfaith_damage(chunk) revs_container.append(chunk_df) ores_df = pd.concat(revs_container).reset_index(drop=True) else: ores_df = ores_dv.get_goodfaith_damage(revs_list) return ores_df class RankedEditorsListener: """Class for ranking editors by 48-hour survival actions. ... Attributes: ----------- df (pd.DataFrame): stores all survival actions for each editor (if "Unregistered", replace it by IP.) """ def __init__(self, agg): # Specify unregistered id. surv_total = agg[["rev_time", "editor_str", "editor", "total_surv_48h"]] new_editor = pd.DataFrame(np.where(surv_total["editor"] == "Unregistered", surv_total["editor_str"], surv_total["editor"]), columns=["editor"]) surv_total = pd.concat([surv_total[["rev_time", "total_surv_48h"]], new_editor], axis=1) self.df = surv_total def listen(self, _range1, _range2, granularity, top): "Listener." df_time = self.df[(self.df.rev_time.dt.date >= _range1) & (self.df.rev_time.dt.date <= _range2)].reset_index(drop=True) # Get top editors list. group_only_surv = df_time.groupby("editor")\ .agg({"total_surv_48h": "sum"}).sort_values("total_surv_48h", ascending=False).reset_index() editors_top = list(group_only_surv.iloc[0:top,:]["editor"]) # For displaying group_to_display = group_only_surv.iloc[0:top,:].reset_index(drop=True) group_to_display["rank"] = group_to_display.index + 1 group_to_display = group_to_display.rename({"editor": "editor", "total_surv_48h": "total 48h-survival actions"}, axis=1).set_index("rank") # Sort over time group_surv = df_time.groupby(["rev_time", "editor"]).agg({"total_surv_48h": "sum"}).reset_index() group_surv = group_surv.sort_values(["rev_time", "total_surv_48h"], ascending=(True, False)) # Pick up top 5/10/20 editors. mask_inlist = group_surv["editor"].isin(editors_top) group_surv_top = group_surv.loc[mask_inlist] merge_df = group_surv[["rev_time"]].merge(group_surv_top[["rev_time", "editor", "total_surv_48h"]], how="left").reset_index(drop=True) #Table self.qgrid_obj = qgrid.show_grid(group_to_display, grid_options={"minVisibleRows": 2}) display(self.qgrid_obj) # Generate pivot table pivoted = merge_df.pivot(index="rev_time", columns="editor", values="total_surv_48h") pivot_table = pd.DataFrame(pivoted.to_records()) if "nan" in pivot_table.columns: pivot_table = pivot_table.drop("nan", axis=1).fillna(0) else: pivot_table = pivot_table.fillna(0) cols = list(pivot_table.columns) cols.remove("rev_time") agg_dict = {editor: "sum" for editor in cols} if granularity != "Timestamp (Revision)": group_pivot = pivot_table.groupby(pd.Grouper(key="rev_time", freq=granularity[0])).agg(agg_dict).reset_index() else: group_pivot = pivot_table group_pivot = group_pivot[["rev_time"] + editors_top] fig = go.Figure() for editor
start to avoid redirects LOGIN_URL = '/' LOGIN_REDIRECT_URL = '/' LOGOUT_REDIRECT_URL = '/' # When logging in with browser ID, a username is created automatically. # In the case of duplicates, the process is recursive up to this number # of times. MAX_GEN_USERNAME_TRIES = 50 # Email settings ADDONS_EMAIL = "Mozilla Add-ons <<EMAIL>>" DEFAULT_FROM_EMAIL = ADDONS_EMAIL # Email goes to the console by default. s/console/smtp/ for regular delivery EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # Please use all lowercase for the QA allow list. EMAIL_QA_ALLOW_LIST = env.list('EMAIL_QA_ALLOW_LIST', default=()) # Please use all lowercase for the deny_list. EMAIL_DENY_LIST = env.list('EMAIL_DENY_LIST', default=('<EMAIL>',)) # URL for Add-on Validation FAQ. VALIDATION_FAQ_URL = ('https://wiki.mozilla.org/Add-ons/Reviewers/Guide/' 'AddonReviews#Step_2:_Automatic_validation') SHIELD_STUDIES_SUPPORT_URL = 'https://support.mozilla.org/kb/shield' # Celery CELERY_BROKER_URL = env( 'CELERY_BROKER_URL', default=os.environ.get( 'CELERY_BROKER_URL', 'amqp://olympia:olympia@localhost:5672/olympia')) CELERY_BROKER_CONNECTION_TIMEOUT = 0.1 CELERY_BROKER_HEARTBEAT = 60 * 15 CELERY_TASK_DEFAULT_QUEUE = 'default' CELERY_RESULT_BACKEND = env( 'CELERY_RESULT_BACKEND', default=os.environ.get( 'CELERY_RESULT_BACKEND', 'redis://localhost:6379/1')) CELERY_TASK_IGNORE_RESULT = True CELERY_SEND_TASK_ERROR_EMAILS = True CELERY_WORKER_HIJACK_ROOT_LOGGER = False # Testing responsiveness without rate limits. CELERY_WORKER_DISABLE_RATE_LIMITS = True # Only serialize celery tasks using JSON. CELERY_ACCEPT_CONTENT = ['json'] CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' # When testing, we always want tasks to raise exceptions. Good for sanity. CELERY_TASK_EAGER_PROPAGATES = True # Time in seconds before celery.exceptions.SoftTimeLimitExceeded is raised. # The task can catch that and recover but should exit ASAP. Note that there is # a separate, shorter timeout for validation tasks. CELERY_TASK_SOFT_TIME_LIMIT = 60 * 30 # List of modules that contain tasks and that wouldn't be autodiscovered by # celery. Typically, it's either `tasks` modules from something not in # INSTALLED_APPS, or modules not called `tasks`. CELERY_IMPORTS = ( 'olympia.lib.crypto.tasks', 'olympia.lib.es.management.commands.reindex', 'olympia.stats.management.commands.index_stats', ) CELERY_TASK_QUEUES = ( Queue('addons', routing_key='addons'), Queue('adhoc', routing_key='adhoc'), Queue('amo', routing_key='amo'), Queue('bandwagon', routing_key='bandwagon'), Queue('cron', routing_key='cron'), Queue('crypto', routing_key='crypto'), Queue('default', routing_key='default'), Queue('devhub', routing_key='devhub'), Queue('images', routing_key='images'), Queue('priority', routing_key='priority'), Queue('ratings', routing_key='ratings'), Queue('reviewers', routing_key='reviewers'), Queue('search', routing_key='search'), Queue('stats', routing_key='stats'), Queue('tags', routing_key='tags'), Queue('users', routing_key='users'), Queue('zadmin', routing_key='zadmin'), ) # We have separate celeryds for processing devhub & images as fast as possible # Some notes: # - always add routes here instead of @task(queue=<name>) # - when adding a queue, be sure to update deploy.py so that it gets restarted CELERY_TASK_ROUTES = { # Priority. # If your tasks need to be run as soon as possible, add them here so they # are routed to the priority queue. 'olympia.addons.tasks.index_addons': {'queue': 'priority'}, 'olympia.addons.tasks.unindex_addons': {'queue': 'priority'}, 'olympia.blocklist.tasks.process_blocklistsubmission': { 'queue': 'priority' }, 'olympia.blocklist.tasks.import_block_from_blocklist': { 'queue': 'priority' }, 'olympia.blocklist.tasks.delete_imported_block_from_blocklist': { 'queue': 'priority' }, 'olympia.blocklist.tasks.upload_filter': { 'queue': 'priority' }, 'olympia.versions.tasks.generate_static_theme_preview': { 'queue': 'priority' }, # Other queues we prioritize below. # 'Default' queue. 'celery.accumulate': {'queue': 'default'}, 'celery.backend_cleanup': {'queue': 'default'}, 'celery.chain': {'queue': 'default'}, 'celery.chord': {'queue': 'default'}, 'celery.chunks': {'queue': 'default'}, 'celery.group': {'queue': 'default'}, 'celery.map': {'queue': 'default'}, 'celery.starmap': {'queue': 'default'}, # AMO Devhub. 'olympia.devhub.tasks.check_for_api_keys_in_file': {'queue': 'devhub'}, 'olympia.devhub.tasks.create_initial_validation_results': { 'queue': 'devhub' }, 'olympia.devhub.tasks.forward_linter_results': {'queue': 'devhub'}, 'olympia.devhub.tasks.get_preview_sizes': {'queue': 'devhub'}, 'olympia.devhub.tasks.handle_file_validation_result': {'queue': 'devhub'}, 'olympia.devhub.tasks.handle_upload_validation_result': { 'queue': 'devhub' }, 'olympia.devhub.tasks.revoke_api_key': {'queue': 'devhub'}, 'olympia.devhub.tasks.send_welcome_email': {'queue': 'devhub'}, 'olympia.devhub.tasks.submit_file': {'queue': 'devhub'}, 'olympia.devhub.tasks.validate_file': {'queue': 'devhub'}, 'olympia.devhub.tasks.validate_upload': {'queue': 'devhub'}, 'olympia.files.tasks.repack_fileupload': {'queue': 'devhub'}, 'olympia.scanners.tasks.run_customs': {'queue': 'devhub'}, 'olympia.scanners.tasks.run_wat': {'queue': 'devhub'}, 'olympia.scanners.tasks.run_yara': {'queue': 'devhub'}, 'olympia.scanners.tasks.call_mad_api': {'queue': 'devhub'}, # Activity (goes to devhub queue). 'olympia.activity.tasks.process_email': {'queue': 'devhub'}, # This is currently used only by validation tasks. # This puts the chord_unlock task on the devhub queue. Which means anything # that uses chord() or group() must also be running in this queue or must # be on a worker that listens to the same queue. 'celery.chord_unlock': {'queue': 'devhub'}, # Images. 'olympia.users.tasks.resize_photo': {'queue': 'images'}, 'olympia.devhub.tasks.recreate_previews': {'queue': 'images'}, 'olympia.devhub.tasks.resize_icon': {'queue': 'images'}, 'olympia.devhub.tasks.resize_preview': {'queue': 'images'}, # AMO 'olympia.amo.tasks.delete_anonymous_collections': {'queue': 'amo'}, 'olympia.amo.tasks.delete_logs': {'queue': 'amo'}, 'olympia.amo.tasks.send_email': {'queue': 'amo'}, 'olympia.amo.tasks.set_modified_on_object': {'queue': 'amo'}, 'olympia.amo.tasks.sync_object_to_basket': {'queue': 'amo'}, 'olympia.blocklist.tasks.cleanup_old_files': {'queue': 'amo'}, # Addons 'olympia.addons.tasks.add_dynamic_theme_tag': {'queue': 'addons'}, 'olympia.addons.tasks.delete_addons': {'queue': 'addons'}, 'olympia.addons.tasks.delete_preview_files': {'queue': 'addons'}, 'olympia.addons.tasks.version_changed': {'queue': 'addons'}, 'olympia.files.tasks.hide_disabled_files': {'queue': 'addons'}, 'olympia.versions.tasks.delete_preview_files': {'queue': 'addons'}, 'olympia.git.tasks.continue_git_extraction': {'queue': 'addons'}, 'olympia.git.tasks.extract_versions_to_git': {'queue': 'addons'}, 'olympia.git.tasks.on_extraction_error': {'queue': 'addons'}, 'olympia.git.tasks.remove_git_extraction_entry': {'queue': 'addons'}, # Additional image processing tasks that aren't as important go in the # addons queue to leave the 'devhub' queue free to process validations etc. 'olympia.addons.tasks.extract_colors_from_static_themes': { 'queue': 'addons' }, 'olympia.devhub.tasks.pngcrush_existing_preview': {'queue': 'addons'}, 'olympia.devhub.tasks.pngcrush_existing_icons': {'queue': 'addons'}, 'olympia.addons.tasks.recreate_theme_previews': {'queue': 'addons'}, # Adhoc # A queue to be used for one-off tasks that could be resource intensive. 'olympia.files.tasks.extract_optional_permissions': {'queue': 'adhoc'}, # Crons 'olympia.addons.tasks.update_addon_average_daily_users': {'queue': 'cron'}, 'olympia.addons.tasks.update_addon_hotness': {'queue': 'cron'}, 'olympia.addons.tasks.update_addon_weekly_downloads': {'queue': 'cron'}, 'olympia.addons.tasks.update_appsupport': {'queue': 'cron'}, # Bandwagon 'olympia.bandwagon.tasks.collection_meta': {'queue': 'bandwagon'}, # Reviewers 'olympia.reviewers.tasks.recalculate_post_review_weight': { 'queue': 'reviewers' }, # Crypto 'olympia.lib.crypto.tasks.sign_addons': {'queue': 'crypto'}, # Search 'olympia.lib.es.management.commands.reindex.create_new_index': { 'queue': 'search' }, 'olympia.lib.es.management.commands.reindex.delete_indexes': { 'queue': 'search' }, 'olympia.lib.es.management.commands.reindex.flag_database': { 'queue': 'search' }, 'olympia.lib.es.management.commands.reindex.unflag_database': { 'queue': 'search' }, 'olympia.lib.es.management.commands.reindex.update_aliases': { 'queue': 'search' }, 'olympia.addons.tasks.find_inconsistencies_between_es_and_db': { 'queue': 'search' }, # Ratings 'olympia.ratings.tasks.addon_bayesian_rating': {'queue': 'ratings'}, 'olympia.ratings.tasks.addon_rating_aggregates': {'queue': 'ratings'}, 'olympia.ratings.tasks.update_denorm': {'queue': 'ratings'}, # Stats 'olympia.stats.tasks.index_download_counts': {'queue': 'stats'}, # Tags 'olympia.tags.tasks.update_all_tag_stats': {'queue': 'tags'}, 'olympia.tags.tasks.update_tag_stat': {'queue': 'tags'}, # Users 'olympia.accounts.tasks.primary_email_change_event': {'queue': 'users'}, 'olympia.users.tasks.delete_photo': {'queue': 'users'}, 'olympia.users.tasks.update_user_ratings_task': {'queue': 'users'}, 'olympia.accounts.tasks.delete_user_event': {'queue': 'users'}, # Zadmin 'olympia.scanners.tasks.run_yara_query_rule': {'queue': 'zadmin'}, 'olympia.scanners.tasks.run_yara_query_rule_on_versions_chunk': { 'queue': 'zadmin' }, 'olympia.scanners.tasks.mark_yara_query_rule_as_completed_or_aborted': { 'queue': 'zadmin' }, 'olympia.zadmin.tasks.celery_error': {'queue': 'zadmin'}, } # See PEP 391 for formatting help. LOGGING = { 'version': 1, 'filters': {}, 'disable_existing_loggers': False, 'formatters': { 'json': { '()': olympia.core.logger.JsonFormatter, 'logger_name': 'http_app_addons' }, }, 'handlers': { 'mozlog': { 'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'json' }, 'null': { 'class': 'logging.NullHandler', }, 'statsd': { 'level': 'ERROR', 'class': 'django_statsd.loggers.errors.StatsdHandler', }, }, 'root': {'handlers': ['mozlog'], 'level': logging.INFO}, 'loggers': { 'amqp': { 'handlers': ['null'], 'level': logging.WARNING, 'propagate': False }, 'caching': { 'handlers': ['mozlog'], 'level': logging.ERROR, 'propagate': False }, 'caching.invalidation': { 'handlers': ['null'], 'level': logging.INFO, 'propagate': False }, 'celery.worker.strategy': { 'handlers': ['mozlog'], 'level': logging.WARNING, 'propagate': False }, 'django': { 'handlers': ['statsd'], 'level': logging.ERROR, 'propagate': True, }, # Django CSRF related warnings 'django.security.csrf': { 'handlers': ['mozlog'], 'level': logging.WARNING, 'propagate': True }, 'elasticsearch': { 'handlers': ['null'], 'level': logging.INFO, 'propagate': False, }, 'filtercascade': { 'handlers': ['mozlog'], # Ignore INFO or DEBUG from filtercascade, it logs too much. 'level': logging.WARNING, 'propagate': False, }, 'mohawk.util': { 'handlers': ['mozlog'], # Ignore INFO or DEBUG from mohawk.util, it logs too much. 'level': logging.WARNING, 'propagate': False, }, 'newrelic': { 'handlers': ['mozlog'], 'level': logging.WARNING, 'propagate': False, }, 'parso': { 'handlers': ['null'], 'level': logging.INFO, 'propagate': False }, 'post_request_task': { 'handlers': ['mozlog'], # Ignore INFO or DEBUG from post-request-task, it logs too much. 'level': logging.WARNING, 'propagate': False, }, 'sentry_sdk': { 'handlers': ['mozlog'], 'level': logging.WARNING, 'propagate': False }, 'rdflib': { 'handlers': ['null'], 'level': logging.INFO, 'propagate': False, }, 'request': { 'handlers': ['mozlog'], 'level': logging.WARNING, 'propagate': False }, 'z.celery': { 'handlers': ['statsd'], 'level': logging.ERROR, 'propagate': True, }, 'z.pool': { 'handlers': ['mozlog'], 'level': logging.ERROR, 'propagate': False }, }, } # CSP Settings PROD_CDN_HOST = 'https://addons.cdn.mozilla.net' ANALYTICS_HOST = 'https://www.google-analytics.com' CSP_REPORT_URI = '/__cspreport__' CSP_REPORT_ONLY = False CSP_EXCLUDE_URL_PREFIXES = () # NOTE: CSP_DEFAULT_SRC MUST be set otherwise things not set # will default to being open to anything. CSP_DEFAULT_SRC = ( "'self'", ) CSP_BASE_URI = ( "'self'", # Required for the legacy discovery pane. 'https://addons.mozilla.org', ) CSP_CONNECT_SRC = ( "'self'", 'https://sentry.prod.mozaws.net', ANALYTICS_HOST, PROD_CDN_HOST, ) CSP_FORM_ACTION = ( "'self'", 'https://developer.mozilla.org', ) CSP_FONT_SRC = ( "'self'", PROD_CDN_HOST, ) CSP_CHILD_SRC = ( "'self'", 'https://www.google.com/recaptcha/', 'https://www.recaptcha.net/recaptcha/', ) CSP_FRAME_SRC = CSP_CHILD_SRC CSP_IMG_SRC = ( "'self'", 'data:', # Used in inlined mobile css. 'blob:', # Needed for image uploads. PROD_CDN_HOST, 'https://static.addons.mozilla.net', # CDN origin server. 'https://sentry.prod.mozaws.net', ) CSP_MEDIA_SRC = ( 'https://videos.cdn.mozilla.net', ) CSP_OBJECT_SRC = ("'none'",) CSP_SCRIPT_SRC = ( 'https://www.google-analytics.com/analytics.js', 'https://www.google.com/recaptcha/', 'https://www.recaptcha.net/recaptcha/', 'https://www.gstatic.com/recaptcha/', 'https://www.gstatic.cn/recaptcha/', PROD_CDN_HOST, ) CSP_STYLE_SRC = ( "'self'", "'unsafe-inline'", PROD_CDN_HOST, ) RESTRICTED_DOWNLOAD_CSP = { 'DEFAULT_SRC': "'none'", 'BASE_URI': "'none'", 'FORM_ACTION': "'none'", 'OBJECT_SRC': "'none'", 'FRAME_ANCESTORS': "'none'", 'REPORT_URI': CSP_REPORT_URI } # Should robots.txt deny everything or disallow a calculated list of URLs we # don't want to be crawled? Default is true, allow everything, toggled to # False on -dev and stage. # Also see http://www.google.com/support/webmasters/bin/answer.py?answer=93710 ENGAGE_ROBOTS = True # Read-only mode setup. READ_ONLY = env.bool('READ_ONLY', default=False) # Turn on read-only mode in local_settings.py by putting this line # at the VERY BOTTOM: read_only_mode(globals()) def read_only_mode(env): env['READ_ONLY'] = True # Replace the default (master) db with a slave connection. if not env.get('REPLICA_DATABASES'): raise Exception('We need at least one slave database.') slave = env['REPLICA_DATABASES'][0] env['DATABASES']['default'] = env['DATABASES'][slave] # No sessions without the database, so disable auth. env['AUTHENTICATION_BACKENDS'] = ('olympia.users.backends.NoAuthForYou',) # Uploaded file limits MAX_ICON_UPLOAD_SIZE = 4 * 1024 * 1024 MAX_IMAGE_UPLOAD_SIZE = 4 * 1024 * 1024 MAX_VIDEO_UPLOAD_SIZE = 4 * 1024 * 1024 MAX_PHOTO_UPLOAD_SIZE = MAX_ICON_UPLOAD_SIZE MAX_STATICTHEME_SIZE = 7 * 1024 * 1024 MAX_ZIP_UNCOMPRESSED_SIZE = 200 * 1024 * 1024 # File uploads should have -rw-r--r-- permissions in order to be served by # nginx later one. The 0o prefix is intentional, this is an octal value. FILE_UPLOAD_PERMISSIONS = 0o644 # RECAPTCHA: overload the following key settings in local_settings.py # with your keys. # Old recaptcha V1 RECAPTCHA_PUBLIC_KEY
(QPoint)""" cursor = self.cursorForPosition(coordinates) cursor.select(QTextCursor.BlockUnderCursor) return to_text_string(cursor.selectedText()).replace(u'\u2029', '') def get_word_at(self, coordinates): """Return word at *coordinates* (QPoint)""" cursor = self.cursorForPosition(coordinates) cursor.select(QTextCursor.WordUnderCursor) return to_text_string(cursor.selectedText()) def get_block_indentation(self, block_nb): """Return line indentation (character number)""" text = to_text_string(self.document().findBlockByNumber(block_nb).text()) text = text.replace("\t", " "*self.tab_stop_width_spaces) return len(text)-len(text.lstrip()) def get_selection_bounds(self): """Return selection bounds (block numbers)""" cursor = self.textCursor() start, end = cursor.selectionStart(), cursor.selectionEnd() block_start = self.document().findBlock(start) block_end = self.document().findBlock(end) return sorted([block_start.blockNumber(), block_end.blockNumber()]) #------Text selection def has_selected_text(self): """Returns True if some text is selected""" return bool(to_text_string(self.textCursor().selectedText())) def get_selected_text(self): """ Return text selected by current text cursor, converted in unicode Replace the unicode line separator character \u2029 by the line separator characters returned by get_line_separator """ return to_text_string(self.textCursor().selectedText()).replace(u"\u2029", self.get_line_separator()) def remove_selected_text(self): """Delete selected text""" self.textCursor().removeSelectedText() def replace(self, text, pattern=None): """Replace selected text by *text* If *pattern* is not None, replacing selected text using regular expression text substitution""" cursor = self.textCursor() cursor.beginEditBlock() if pattern is not None: seltxt = to_text_string(cursor.selectedText()) cursor.removeSelectedText() if pattern is not None: text = re.sub(to_text_string(pattern), to_text_string(text), to_text_string(seltxt)) cursor.insertText(text) cursor.endEditBlock() #------Find/replace def find_multiline_pattern(self, regexp, cursor, findflag): """Reimplement QTextDocument's find method Add support for *multiline* regular expressions""" pattern = to_text_string(regexp.pattern()) text = to_text_string(self.toPlainText()) try: regobj = re.compile(pattern) except sre_constants.error: return if findflag & QTextDocument.FindBackward: # Find backward offset = min([cursor.selectionEnd(), cursor.selectionStart()]) text = text[:offset] matches = [_m for _m in regobj.finditer(text, 0, offset)] if matches: match = matches[-1] else: return else: # Find forward offset = max([cursor.selectionEnd(), cursor.selectionStart()]) match = regobj.search(text, offset) if match: pos1, pos2 = match.span() fcursor = self.textCursor() fcursor.setPosition(pos1) fcursor.setPosition(pos2, QTextCursor.KeepAnchor) return fcursor def find_text(self, text, changed=True, forward=True, case=False, words=False, regexp=False): """Find text""" cursor = self.textCursor() findflag = QTextDocument.FindFlag() if not forward: findflag = findflag | QTextDocument.FindBackward if case: findflag = findflag | QTextDocument.FindCaseSensitively moves = [QTextCursor.NoMove] if forward: moves += [QTextCursor.NextWord, QTextCursor.Start] if changed: if to_text_string(cursor.selectedText()): new_position = min([cursor.selectionStart(), cursor.selectionEnd()]) cursor.setPosition(new_position) else: cursor.movePosition(QTextCursor.PreviousWord) else: moves += [QTextCursor.End] if not regexp: text = re.escape(to_text_string(text)) if QT55_VERSION: pattern = QRegularExpression(r"\b{}\b".format(text) if words else text) if case: pattern.setPatternOptions( QRegularExpression.CaseInsensitiveOption) else: pattern = QRegExp(r"\b{}\b".format(text) if words else text, Qt.CaseSensitive if case else Qt.CaseInsensitive, QRegExp.RegExp2) for move in moves: cursor.movePosition(move) if regexp and '\\n' in text: # Multiline regular expression found_cursor = self.find_multiline_pattern(pattern, cursor, findflag) else: # Single line find: using the QTextDocument's find function, # probably much more efficient than ours found_cursor = self.document().find(pattern, cursor, findflag) if found_cursor is not None and not found_cursor.isNull(): self.setTextCursor(found_cursor) return True return False def is_editor(self): """Needs to be overloaded in the codeeditor where it will be True""" return False def get_number_matches(self, pattern, source_text='', case=False): """Get the number of matches for the searched text.""" pattern = to_text_string(pattern) if not pattern: return 0 if not source_text: source_text = to_text_string(self.toPlainText()) try: if case: regobj = re.compile(pattern) else: regobj = re.compile(pattern, re.IGNORECASE) except sre_constants.error: return number_matches = 0 for match in regobj.finditer(source_text): number_matches += 1 return number_matches def get_match_number(self, pattern, case=False): """Get number of the match for the searched text.""" position = self.textCursor().position() source_text = self.get_text(position_from='sof', position_to=position) match_number = self.get_number_matches(pattern, source_text=source_text, case=case) return match_number # --- Numpy matrix/array helper / See 'spyder/widgets/arraybuilder.py' def enter_array_inline(self): """ """ self._enter_array(True) def enter_array_table(self): """ """ self._enter_array(False) def _enter_array(self, inline): """ """ offset = self.get_position('cursor') - self.get_position('sol') rect = self.cursorRect() dlg = NumpyArrayDialog(self, inline, offset) # TODO: adapt to font size x = rect.left() x = x - 14 y = rect.top() + (rect.bottom() - rect.top())/2 y = y - dlg.height()/2 - 3 pos = QPoint(x, y) pos = self.calculate_real_position(pos) dlg.move(self.mapToGlobal(pos)) # called from editor if self.is_editor(): python_like_check = self.is_python_like() suffix = '\n' # called from a console else: python_like_check = True suffix = '' if python_like_check and dlg.exec_(): text = dlg.text() + suffix if text != '': cursor = self.textCursor() cursor.beginEditBlock() cursor.insertText(text) cursor.endEditBlock() class TracebackLinksMixin(object): """ """ QT_CLASS = None go_to_error = None def __init__(self): self.__cursor_changed = False self.setMouseTracking(True) #------Mouse events def mouseReleaseEvent(self, event): """Go to error""" self.QT_CLASS.mouseReleaseEvent(self, event) text = self.get_line_at(event.pos()) if get_error_match(text) and not self.has_selected_text(): if self.go_to_error is not None: self.go_to_error.emit(text) def mouseMoveEvent(self, event): """Show Pointing Hand Cursor on error messages""" text = self.get_line_at(event.pos()) if get_error_match(text): if not self.__cursor_changed: QApplication.setOverrideCursor(QCursor(Qt.PointingHandCursor)) self.__cursor_changed = True event.accept() return if self.__cursor_changed: QApplication.restoreOverrideCursor() self.__cursor_changed = False self.QT_CLASS.mouseMoveEvent(self, event) def leaveEvent(self, event): """If cursor has not been restored yet, do it now""" if self.__cursor_changed: QApplication.restoreOverrideCursor() self.__cursor_changed = False self.QT_CLASS.leaveEvent(self, event) class GetHelpMixin(object): def __init__(self): self.help = None self.help_enabled = False def set_help(self, help_plugin): """Set Help DockWidget reference""" self.help = help_plugin def set_help_enabled(self, state): self.help_enabled = state def inspect_current_object(self): text = '' text1 = self.get_text('sol', 'cursor') tl1 = re.findall(r'([a-zA-Z_]+[0-9a-zA-Z_\.]*)', text1) if tl1 and text1.endswith(tl1[-1]): text += tl1[-1] text2 = self.get_text('cursor', 'eol') tl2 = re.findall(r'([0-9a-zA-Z_\.]+[0-9a-zA-Z_\.]*)', text2) if tl2 and text2.startswith(tl2[0]): text += tl2[0] if text: self.show_object_info(text, force=True) def show_object_info(self, text, call=False, force=False): """Show signature calltip and/or docstring in the Help plugin""" text = to_text_string(text) # Show docstring help_enabled = self.help_enabled or force if force and self.help is not None: self.help.dockwidget.setVisible(True) self.help.dockwidget.raise_() if help_enabled and (self.help is not None) and \ (self.help.dockwidget.isVisible()): # Help widget exists and is visible if hasattr(self, 'get_doc'): self.help.set_shell(self) else: self.help.set_shell(self.parent()) self.help.set_object_text(text, ignore_unknown=False) self.setFocus() # if help was not at top level, raising it to # top will automatically give it focus because of # the visibility_changed signal, so we must give # focus back to shell # Show calltip if call and self.calltips: # Display argument list if this is a function call iscallable = self.iscallable(text) if iscallable is not None: if iscallable: arglist = self.get_arglist(text) name = text.split('.')[-1] argspec = signature = '' if isinstance(arglist, bool): arglist = [] if arglist: argspec = '(' + ''.join(arglist) + ')' else: doc = self.get__doc__(text) if doc is not None: # This covers cases like np.abs, whose docstring is # the same as np.absolute and because of that a # proper signature can't be obtained correctly argspec = getargspecfromtext(doc) if not argspec: signature = getsignaturefromtext(doc, name) if argspec or signature: if argspec: tiptext = name + argspec else: tiptext = signature self.show_calltip(_("Arguments"), tiptext, signature=True, color='#2D62FF') def get_last_obj(self, last=False): """ Return the last valid object on the current line """ return getobj(self.get_current_line_to_cursor(), last=last) class SaveHistoryMixin(object): INITHISTORY = None SEPARATOR = None HISTORY_FILENAMES = [] append_to_history = None def __init__(self, history_filename=''): self.history_filename = history_filename self.create_history_filename() def create_history_filename(self): """Create history_filename with INITHISTORY if it doesn't exist.""" if self.history_filename and not osp.isfile(self.history_filename): encoding.writelines(self.INITHISTORY, self.history_filename) def add_to_history(self, command): """Add command to history""" command = to_text_string(command) if command in ['', '\n'] or command.startswith('Traceback'): return if command.endswith('\n'): command = command[:-1] self.histidx = None if len(self.history) > 0 and self.history[-1] == command: return self.history.append(command) text = os.linesep + command # When the first entry will be written in history file, # the separator will be append first: if self.history_filename not in self.HISTORY_FILENAMES: self.HISTORY_FILENAMES.append(self.history_filename) text = self.SEPARATOR + text encoding.write(text, self.history_filename, mode='ab') if self.append_to_history is not None: self.append_to_history.emit(self.history_filename, text) class BrowseHistoryMixin(object): def __init__(self): self.history = [] self.histidx = None self.hist_wholeline = False def clear_line(self): """Clear current line (without clearing console prompt)""" self.remove_text(self.current_prompt_pos, 'eof') def browse_history(self, backward): """Browse history""" if self.is_cursor_before('eol') and self.hist_wholeline: self.hist_wholeline = False tocursor = self.get_current_line_to_cursor() text, self.histidx = self.find_in_history(tocursor, self.histidx, backward) if text is not None: if self.hist_wholeline: self.clear_line() self.insert_text(text) else: cursor_position = self.get_position('cursor') # Removing text from cursor to the end of the line self.remove_text('cursor', 'eol') # Inserting history text self.insert_text(text) self.set_cursor_position(cursor_position) def find_in_history(self, tocursor, start_idx, backward): """Find text 'tocursor' in history, from index 'start_idx'""" if start_idx is None: start_idx = len(self.history) # Finding text in history step = -1 if backward else 1 idx = start_idx if len(tocursor) == 0 or self.hist_wholeline: idx += step if idx >= len(self.history) or len(self.history) == 0: return "", len(self.history) elif idx < 0: idx = 0 self.hist_wholeline = True return self.history[idx], idx else: for index in range(len(self.history)): idx = (start_idx+step*(index+1)) % len(self.history) entry = self.history[idx] if entry.startswith(tocursor):
<reponame>baishancloud/dkit import mock import os import sys import time import unittest import urlparse from BaseHTTPServer import BaseHTTPRequestHandler from BaseHTTPServer import HTTPServer from pykit import redisutil from pykit import threadutil from pykit import utdocker from pykit import ututil from pykit import utfjson dd = ututil.dd redis_tag = 'daocloud.io/redis:3.2.3' redis_port = 6379 class TestRedis(unittest.TestCase): @classmethod def setUpClass(cls): utdocker.pull_image(redis_tag) def setUp(self): self.containers = [ ('redis-0', redis_tag, '192.168.52.40'), ('redis-1', redis_tag, '192.168.52.41'), ] # for single redis cases: self.docker_name = 'redis-0' self.ip = '192.168.52.40' self.is_child = False utdocker.create_network() for args in self.containers: utdocker.start_container(*(args + ('',))) dd('started redis in docker: ' + repr(args)) redisutil.wait_serve((args[2], redis_port)) self.rcl = redisutil.get_client((self.ip, redis_port)) def tearDown(self): # do not tear down in child process if self.is_child: return for args in self.containers: dd('remove_container: ' + args[0]) utdocker.remove_container(args[0]) def test_set_get(self): hname = 'foo' rst = self.rcl.hset(hname, 'a', '1') dd('hset rst:', rst) rst = self.rcl.hget(hname, 'a') dd('hget rst:', rst) self.assertEqual('1', rst) def test_recreate_client(self): hname = 'foo' pid = os.fork() dd('my pid:', os.getpid()) if pid == 0: # child self.is_child = True rcl = redisutil.get_client((self.ip, redis_port)) rcl.hset(hname, 'a', '5') child_exit() else: # parent os.waitpid(pid, 0) dd('child pid quit: ' + repr(pid)) rst = self.rcl.hget(hname, 'a') dd('redis hget rst:', rst) self.assertEqual('5', rst) def test_separated_connection_after_fork(self): hname = 'foo' pid = os.fork() dd('my pid:', os.getpid()) n = 10240 if pid == 0: # child self.rcl = redisutil.get_client((self.ip, redis_port)) # for both parent and child process for i in range(n): self.rcl.hset(hname, 'x' + str(i), 'foobarjfkdslafjdasklfdjsaklfdsa' + str(i)) self.rcl.hget(hname, 'x' + str(i - 1)) if i % 1024 == 0: dd('pid:', os.getpid(), ' finished ', i, ' set/get') if pid == 0: # child self.is_child = True child_exit() else: # parent os.waitpid(pid, 0) dd('child pid quit: ' + repr(pid)) # no error raised with 2 process hget/hset, it is ok def test_2_ip_port(self): rcl = [] for name, tag, ip in self.containers: rcl.append(redisutil.get_client((ip, redis_port))) rcl[0].hset('foo', 'foo', '100') rcl[1].hset('foo', 'foo', '101') rst = rcl[0].hget('foo', 'foo') dd('rst got from first redis:', rst) self.assertEqual('100', rst) def test_normalize_ip_port(self): self.assertEqual(('127.0.0.1', 1234), redisutil.normalize_ip_port(1234)) self.assertEqual(('1.2.3.4', 1234), redisutil.normalize_ip_port(('1.2.3.4', 1234))) def test_redis_channel(self): c = redisutil.RedisChannel((self.ip, redis_port), '/foo', 'client') s = redisutil.RedisChannel((self.ip, redis_port), '/foo', 'server') rst = s.peek_msg() dd('server peek: ', rst) self.assertEqual(None, rst) c.send_msg('c2s') s.send_msg('s2c') # peek does not remove message for ii in range(2): rst = s.peek_msg() dd('server peek: ', rst) self.assertEqual('c2s', rst) rst = c.peek_msg() dd('client peek: ', rst) self.assertEqual('s2c', rst) rst = s.recv_msg() dd('server recv: ', rst) self.assertEqual('c2s', rst, 'server pop message from client') rst = s.peek_msg() dd('server peek: ', rst) self.assertEqual(None, rst, 'no more message in channel') # recv_last_msg for ii in range(10): s.send_msg('s2c' + str(ii)) rst = c.recv_last_msg() dd('client recv last:', rst) self.assertEqual('s2c9', rst) rst = c.peek_msg() self.assertEqual( None, rst, 'all messages should have been read, after recv_last_msg()') def test_list_redis_channel(self): ca = redisutil.RedisChannel((self.ip, redis_port), '/foo/a', 'client') sb = redisutil.RedisChannel((self.ip, redis_port), '/foo/b', 'server') rst = ca.list_channel('/foo/') dd('before write, list_channel:', rst) self.assertEqual([], rst, 'can not list list before any message sent') ca.send_msg(1) sb.send_msg(2) rst = ca.list_channel('/foo/') dd('after write, list_channel:', rst) self.assertEqual(['/foo/a', '/foo/b'], rst) # empty channel then channel can not be seen. redisutil.RedisChannel((self.ip, redis_port), '/foo/a', 'server').recv_msg() redisutil.RedisChannel((self.ip, redis_port), '/foo/b', 'client').recv_msg() rst = ca.list_channel('/foo/') self.assertEqual([], rst) def test_tuple_channel(self): c = redisutil.RedisChannel((self.ip, redis_port), '/foo/a', 'client') s = redisutil.RedisChannel( (self.ip, redis_port), ('foo', 'a'), 'server') c.send_msg(1) rst = s.recv_msg() dd('server recv:', rst) self.assertEqual(1, rst) def test_channel_timeout(self): ca = redisutil.RedisChannel((self.ip, redis_port), '/foo/a', 'client', timeout=1) ca.send_msg(1) self.assertEqual(['/foo/a'], ca.list_channel('/foo/')) time.sleep(1.5) self.assertEqual([], ca.list_channel('/foo/')) def test_brecv_message(self): c = redisutil.RedisChannel((self.ip, redis_port), '/foo', 'client') s = redisutil.RedisChannel((self.ip, redis_port), '/foo', 'server') c.send_msg('aa') self.assertEqual('aa', s.brecv_msg(timeout=1)) self.assertEqual(None, s.brecv_msg(timeout=1)) def _send_msg(): time.sleep(0.5) c.send_msg('bar') threadutil.start_daemon_thread(target=_send_msg) self.assertEqual('bar', s.brecv_msg(timeout=1)) def test_brecv_last_message(self): c = redisutil.RedisChannel((self.ip, redis_port), '/foo', 'client') s = redisutil.RedisChannel((self.ip, redis_port), '/foo', 'server') c.send_msg('aa') c.send_msg('bb') self.assertEqual('bb', s.brecv_last_msg(timeout=1)) self.assertEqual(None, s.brecv_last_msg(timeout=1)) def _send_msg(): time.sleep(0.5) c.send_msg('cc') threadutil.start_daemon_thread(target=_send_msg) self.assertEqual('cc', s.brecv_last_msg(timeout=1)) def test_rpeek_message(self): c = redisutil.RedisChannel((self.ip, redis_port), '/foo', 'client') s = redisutil.RedisChannel((self.ip, redis_port), '/foo', 'server') self.assertEqual(None, c.rpeek_msg()) self.assertEqual(None, s.rpeek_msg()) c.send_msg('c2s1') c.send_msg('c2s2') s.send_msg('s2c1') s.send_msg('s2c2') # rpeek does not remove message for ii in range(2): self.assertEqual('c2s2', s.rpeek_msg()) self.assertEqual('s2c2', c.rpeek_msg()) class TestRedisProxyClient(unittest.TestCase): response = {} request = {} access_key = 'test_accesskey' secret_key = 'test_secretkey' def setUp(self): self.addr = ('127.0.0.1', 22038) self.proxy_addr = ('127.0.0.1', 22039) self.response['http-status'] = 200 def _start_http_svr(): self.http_server = HTTPServer(self.addr, HttpHandle) self.http_server.serve_forever() def _start_proxy_http_svr(): self.proxy_http_server = HTTPServer(self.proxy_addr, HttpHandle) self.proxy_http_server.serve_forever() threadutil.start_daemon_thread(_start_http_svr) threadutil.start_daemon_thread(_start_proxy_http_svr) time.sleep(0.1) self.cli = redisutil.RedisProxyClient([self.addr]) self.n = self.cli.n self.w = self.cli.w self.r = self.cli.r def tearDown(self): self.http_server.shutdown() self.http_server.server_close() self.proxy_http_server.shutdown() self.proxy_http_server.server_close() def test_proxy_get(self): cli = redisutil.RedisProxyClient([self.addr], proxy_hosts=[[self.proxy_addr]]) res = cli.get("foo") time.sleep(0.1) exp_res = {'foo': 1, 'bar': 2} self.assertDictEqual(exp_res, res) self.http_server.shutdown() self.http_server.server_close() res = cli.get("foo") time.sleep(0.1) self.assertDictEqual(exp_res, res) self.response['http-status'] = 404 self.assertRaises(redisutil.KeyNotFoundError, cli.get, 'foo') self.response['http-status'] = 500 self.assertRaises(redisutil.ServerResponseError, cli.get, 'foo') self.proxy_http_server.shutdown() self.proxy_http_server.server_close() self.assertRaises(redisutil.SendRequestError, cli.get, 'foo') def test_proxy_set(self): cli = redisutil.RedisProxyClient([self.addr], proxy_hosts=[[self.proxy_addr]]) # expect no exception cli.set("foo", "bar") self.response['http-status'] = 500 self.assertRaises(redisutil.ServerResponseError, cli.set, 'foo', 'bar') self.response['http-status'] = 200 self.proxy_http_server.shutdown() self.proxy_http_server.server_close() self.assertRaises(redisutil.SendRequestError, cli.set, 'foo', 'bar') self.http_server.shutdown() self.http_server.server_close() self.assertRaises(redisutil.SendRequestError, cli.set, 'foo', 'bar') def test_send_request_failed(self): # close http server self.tearDown() self.assertRaises(redisutil.SendRequestError, self.cli.get, 'foo') self.assertRaises(redisutil.SendRequestError, self.cli.set, 'foo', 'bar') self.assertRaises(redisutil.SendRequestError, self.cli.hget, 'foo', 'bar') self.assertRaises(redisutil.SendRequestError, self.cli.hset, 'foo', 'bar', 'xx') self.assertRaises(redisutil.SendRequestError, self.cli.hkeys, 'foo') self.assertRaises(redisutil.SendRequestError, self.cli.hvals, 'foo') self.assertRaises(redisutil.SendRequestError, self.cli.hgetall, 'foo') self.assertRaises(redisutil.SendRequestError, self.cli.delete, 'foo') self.assertRaises(redisutil.SendRequestError, self.cli.hdel, 'foo', 'bar') def test_not_found(self): self.response['http-status'] = 404 self.assertRaises(redisutil.KeyNotFoundError, self.cli.get, 'foo') self.assertRaises(redisutil.KeyNotFoundError, self.cli.hget, 'foo', 'bar') self.assertRaises(redisutil.KeyNotFoundError, self.cli.hkeys, 'foo') self.assertRaises(redisutil.KeyNotFoundError, self.cli.hvals, 'foo') self.assertRaises(redisutil.KeyNotFoundError, self.cli.hgetall, 'foo') def test_server_response_error(self): cases = ( 201, 302, 403, 500, ) for status in cases: self.response['http-status'] = status self.assertRaises(redisutil.ServerResponseError, self.cli.get, 'foo') self.assertRaises(redisutil.ServerResponseError, self.cli.hget, 'foo', 'bar') self.assertRaises(redisutil.ServerResponseError, self.cli.set, 'foo', 'val') self.assertRaises(redisutil.ServerResponseError, self.cli.hset, 'foo', 'bar', 'val') self.assertRaises(redisutil.ServerResponseError, self.cli.hkeys, 'foo') self.assertRaises(redisutil.ServerResponseError, self.cli.hvals, 'foo') self.assertRaises(redisutil.ServerResponseError, self.cli.hgetall, 'foo') self.assertRaises(redisutil.ServerResponseError, self.cli.delete, 'foo') self.assertRaises(redisutil.ServerResponseError, self.cli.hdel, 'foo', 'bar') def test_get(self): cases = ( ('foo', None), ('bar', 1), ('123', 2), ('foobar123', 3) ) for key, retry_cnt in cases: res = self.cli.get(key, retry_cnt) time.sleep(0.1) exp_path = '{ver}/GET/{k}'.format(ver=self.cli.ver, k=key) self.assertEqual(exp_path, TestRedisProxyClient.request['req-path']) exp_qs = 'n=3&w=2&r=2' self.assertIn(exp_qs, TestRedisProxyClient.request['req-qs']) exp_res = { 'foo': 1, 'bar': 2, } self.assertDictEqual(exp_res, res) def test_hget(self): cases = ( ('hname1', 'hval1'), ('hname2', 'hval2'), ('hname3', 'hval3'), ('hname4', 'hval4'), ) for hname, hkey in cases: res = self.cli.hget(hname, hkey) time.sleep(0.1) exp_path = '{ver}/HGET/{hn}/{hk}'.format(ver=self.cli.ver, hn=hname, hk=hkey) self.assertEqual(exp_path, TestRedisProxyClient.request['req-path']) exp_qs = 'n=3&w=2&r=2' self.assertIn(exp_qs, TestRedisProxyClient.request['req-qs']) exp_res = { 'foo': 1, 'bar': 2, } self.assertDictEqual(exp_res, res) def test_set(self): cases = ( ('key1', 'val1', None), ('key2', 'val2', 1), ('key3', 'val3', 2), ('key4', 'val4', 3), ('key5', 11, 4), ('key6', 22, 5), ) for key, val, expire in cases: self.cli.set(key, val, expire) time.sleep(0.1) exp_path = '{ver}/SET/{k}'.format(ver=self.cli.ver, k=key) self.assertEqual(exp_path, TestRedisProxyClient.request['req-path']) exp_qs = 'n=3&w=2&r=2' if expire is not None: exp_qs += '&expire={e}'.format(e=expire) self.assertIn(exp_qs, TestRedisProxyClient.request['req-qs']) self.assertEqual(utfjson.dump(val), TestRedisProxyClient.request['req-body']) def test_hset(self): cases = ( ('hname1', 'key1', 'val1', None), ('hname2', 'key2', 'val2', 1), ('hname3', 'key3', 'val3', 2), ('hname4', 'key4', 'val4', 3), ('hname5', 'key5', 11, 4), ('hname6', 'key6', 22, 5), ) for hname, key, val, expire in cases: self.cli.hset(hname, key, val, expire=expire) time.sleep(0.1) exp_path = '{ver}/HSET/{hn}/{hk}'.format(ver=self.cli.ver, hn=hname, hk=key) self.assertEqual(exp_path, TestRedisProxyClient.request['req-path']) exp_qs = 'n=3&w=2&r=2' if expire is not None: exp_qs += '&expire={e}'.format(e=expire) self.assertIn(exp_qs, TestRedisProxyClient.request['req-qs']) self.assertEqual(utfjson.dump(val), TestRedisProxyClient.request['req-body']) def test_hkeys_hvals_hgetall(self): cases = ( 'hname1', 'hname2', 'hname3', 'hname4', ) for hname in cases: self.cli.hkeys(hname) time.sleep(0.1) exp_path = '{ver}/HKEYS/{hn}'.format(ver=self.cli.ver, hn=hname) self.assertEqual(exp_path, TestRedisProxyClient.request['req-path']) exp_qs = 'n=3&w=2&r=2' self.assertIn(exp_qs, TestRedisProxyClient.request['req-qs']) self.cli.hvals(hname) time.sleep(0.1) exp_path = '{ver}/HVALS/{hn}'.format(ver=self.cli.ver, hn=hname) self.assertEqual(exp_path, TestRedisProxyClient.request['req-path']) exp_qs = 'n=3&w=2&r=2' self.assertIn(exp_qs, TestRedisProxyClient.request['req-qs']) self.cli.hgetall(hname) time.sleep(0.1) exp_path = '{ver}/HGETALL/{hn}'.format(ver=self.cli.ver, hn=hname) self.assertEqual(exp_path, TestRedisProxyClient.request['req-path']) exp_qs = 'n=3&w=2&r=2' self.assertIn(exp_qs, TestRedisProxyClient.request['req-qs']) def test_retry(self): # close http server self.tearDown() cases = ( (None, 4), (0, 4), (1, 8), (2, 12), (3, 16), (4, 20), ) sess = {'run_times': 0} def _mock_for_retry(req): sess['run_times'] += 1 return req for retry_cnt, exp_cnt in cases: sess['run_times'] = 0 with mock.patch('pykit.redisutil.RedisProxyClient._sign_req', side_effect=_mock_for_retry): try: self.cli.get('foo', retry=retry_cnt) except: pass try: self.cli.hget('foo', 'bar', retry=retry_cnt) except: pass try: self.cli.set('foo', 'val', retry=retry_cnt) except: pass try: self.cli.hset('foo', 'bar', 'val', retry=retry_cnt) except: pass self.assertEqual(exp_cnt, sess['run_times']) def test_delete(self): cases = ( ('foo', None), ('bar', 1), ('123', 2), ('foobar123', 3), ) for key, retry_cnt in cases: self.cli.delete(key, retry_cnt) time.sleep(0.1) exp_path = '{ver}/DEL/{key}'.format(ver=self.cli.ver, key=key) self.assertEqual(exp_path, TestRedisProxyClient.request['req-path']) exp_qs = 'n=3&w=2&r=2' self.assertIn(exp_qs, TestRedisProxyClient.request['req-qs']) def test_hdel(self): cases = ( ('hashname', 'key0', None), ('hashname', 'key1', 1), ('hashname', 'key2', 2), ('hashname', 'key3', 3), ) for hname, key, retry_cnt in cases: self.cli.hdel(hname, key, retry_cnt) time.sleep(0.1) exp_path = '{ver}/HDEL/{hname}/{key}'.format(ver=self.cli.ver, hname=hname, key=key) self.assertEqual(exp_path, TestRedisProxyClient.request['req-path']) exp_qs = 'n=3&w=2&r=2' self.assertIn(exp_qs, TestRedisProxyClient.request['req-qs']) class HttpHandle(BaseHTTPRequestHandler): def do_PUT(self): length = int(self.headers.getheader('Content-Length'))
List window or another save file's List window. Open creates a new List window for the file, or raises its window if the file is already open. A save file's list window allows all main window actions listed above, except for Load. For example, saved messages can be viewed, deleted, replied to, or forwarded, from the file's list window. Operations are mapped to the local mail save file, instead of the server's inbox. Saved messages may also be saved: to move mails from one save file to another, Save and then Delete from the source file's window. You do not need to connect to a server to process save files offline: click the Open button in the main list window. In a save-file list window, a Quit erases that window only; a Delete removes the message from the local save file, not from a server. Save-file list windows are automatically updated when new mails are saved to the corresponding file anywhere in the GUI. The sent-mail file may also be opened and processed as a normal save-mail file, with Open. Note that Save buttons in list windows save the full message text (including its headers, and a message separator). To save just the main text part of a message being viewed or composed, either use the Save button in the text editor component at the bottom of a view or edit window, or select the "Split" action button of view windows. Saves in the text editor component can be useful to save a draft of the mail being composed to a temporary file; it can later be pasted into a compose window if needed. To save attachments, see the next section. New in 2.1: local save-file Open and Delete requests are threaded to avoid blocking the GUI during loads and deletes of large files. Because of this, a loaded file's index may not appear in its List window immediately. Similarly, when new mails are saved or messages are sent, there may be a delay before the corresponding local file's List window is updated, if it is currently open. As a status indication, the window's title changes to "Loading..." on loads and "Deleting..." during deletes, and is reset to the file's name after the thread exits (the server window uses pop ups for status indication, because the delay is longer, and there is progress to display). Eventually, either the index will appear and its window raised, or an error message will pop up. Save-file loads and deletes are not allowed to overlap with each other for a given file, but may overlap with server transfers and operations on other open files. Note: save-file Save operations are still not threaded, and may pause the GUI momentarily when saving very many large mails. This is normaly not noticeable, because unlike Open and Delete, saves simply append to the save-file, and do not reload its content. To avoid pauses completely, though, don't save very many large mails in a single operation. Also note: the current implementation loads the entire save-mail file into memory when opened. Because of this, save-mail files are limited in size, depending upon your computer. To avoid consuming too much memory, you should try to keep your save files relatively small (at the least, smaller than your computer's available memory). As a rule of thumb, organize your saved mails by categories into many small files, instead of a few large files. 4) VIEWING TEXT AND ATTACHMENTS PyMailGUI's view windows use a text-oriented display. When a mail is viewed, its main text is displayed in the View window. This text is taken from the entire body of a simple message, or the first text part of a multipart MIME message. To extract the main message text, PyMailGUI looks for plain text, then HTML, and then text of any other kind. If no such text content is found, nothing is displayed in the view window, but parts may be opened manually with the "Split" button (and quick-access part buttons in 2.1, described below). If the body of a simple message is HTML type, or a HTML part is used as the main message text, a web browser is popped up as an alternative display for the main message text, if verified by the user (the mailconfig module can be used to bypass the verification; see ahead). This is equivalent to opening the HTML part with the "Split" button, but is initiated automatically for the main message text's HTML. If a simple message is something other than text or HTML, its content must be openened manually with Split. When viewing mails, messages with multipart attachments are prefixed with a "*" in list windows. "Parts" and "Split" buttons appear in all View windows. Message parts are defined as follows: - For simple messages, the message body is considered to be the sole part of the message. - For multipart messages, the message parts list includes the main message text, as well as all attachments. In both cases, message parts may be saved and opened with the "Split" button. For simple messages, the message body may be saved with Split, as well as the Save button in the view window's text editor. To process multipart messages: - Use "Parts" to display the names of all message parts, including any attachments, in a pop-up dialog. - Use "Split" to view message parts: all mail parts are first saved to a selected directory, and any well-known and generally safe part files are opened automatically, but only if verified by the user. - See also the note below about 2.1 quick access buttons, for an alternative to the Parts/Split interface on View windows. For "Split", select a local directory to save parts to. After the save, text parts open in the TextEditor GUI, HTML and multimedia types open in a web browser, and common Windows document types (e.g., .doc and .xls files) open via the Windows registry entry for the filename extension. For safety, unknown types and executable program parts are never run automatically; even Python programs are displayed as source text only (save the code to run manually). Web browsers on some platforms may open multimedia types (image, audio, video) in specific content handler programs (e.g., MediaPlayer, image viewers). No other types of attachments are ever opened, and attachments are never opened without user verification (or mailconfig.py authorization in 2.1, described below). Browse the parts save directory to open other parts manually. To avoid scrolling for very long lines (sometimes sent by HTML-based mailers), the main text part of a message is automatically wrapped for easy viewing. Long lines are split up at the first delimiter found before a fixed column, when viewed, replied, or forwarded. The wrapping column may be configured or disabled in the mailconfig module (see ahead). Text lines are never automatically wrapped when sent; users or recipients should manage line length in composed mails. New in 2.1: View windows also have up to a fixed maximum number of quick-access buttons for attached message parts. They are alternatives to Split: selecting an attachment's button automatically extracts, saves, and opens that single attachment directly, without Split directory and pop-up dialogs (a temporary directory is used). The maximum number of part buttons to display per view window can be set in the mailconfig.py user settings module (described ahead). For mails with more than the maximum number of attachments, a '...' button is added which simply runs Split to save and open any additional attachments. Also in 2.1, two settings in the mailconfig.py module (see section 10) can be used to control how PyMailGUI opens parts in the GUI: - okayToOpenParts: controls whether part opens are allowed at all - verifyPartOpens: controls whether to ask before each part is opened. Both are used for View window Split actions and part quick-access buttons. If okayToOpenParts is False, quick-access part buttons will not appear in the GUI, and Split saves parts in a directory but does not open them. verifyPartOpens is used by both Split and quick-access part buttons: if False, part buttons open parts immediately, and Split opens all known-type parts automatically after they are saved (unknown types and executables are never opened). An additional setting in this module, verifyHTMLTextOpen, controls verification of opening a web browser on a HTML main text part
array, Training data, if return_data is True X_test: numpy array, Test data, if return_data is True and test_size > 0 subj_train: numpy array, Training subjects, if return_data is True subj_test: numpy array, Test subjects, if return_data is True and test_size > 0 """ clinical_prefix = "bloc-clinical_score-" roi_prefix = "bloc-t1w_roi-" subject_column_name = "participant_id" path = os.path.join(datasetdir, "rois_X_train.npy") meta_path = os.path.join(datasetdir, "rois_X_train.tsv") path_test = None meta_path_test = None if test_size > 0: path_test = os.path.join(datasetdir, "rois_X_test.npy") meta_path_test = os.path.join(datasetdir, "rois_X_test.tsv") if not os.path.isfile(path): data = pd.read_csv(files["stratification"], sep="\t") roi_mapper = pd.read_csv(files["rois_mapper"], sep="\t") # ROI selection roi_label_range = pd.Series([False] * len(roi_mapper)) for roi_type in roi_types: if roi_type == "cortical": roi_label_range = roi_label_range | ( (roi_mapper["labels"] > 11000) & (roi_mapper["labels"] < 13000)) elif roi_type == "subcortical": roi_label_range = roi_label_range | ( roi_mapper["labels"] > 13000) elif roi_type == "other": roi_label_range = roi_label_range | ( roi_mapper["labels"] < 11000) else: raise ValueError("Roi types must be either 'cortical', \ 'subcortical' or 'other'") roi_labels = roi_mapper.loc[roi_label_range, "labels"] # Feature selection features_list = [] for column in data.columns: if column.startswith(roi_prefix): roi = int(column.split(":")[1].split("_")[0]) metric = column.split("-")[-1] if roi in roi_labels.values and metric in metrics: features_list.append(column.replace(roi_prefix, "")) data_train = apply_qc(data, clinical_prefix, qc).sort_values( subject_column_name) data_train.columns = [elem.replace(roi_prefix, "") for elem in data_train.columns] X_train = data_train[features_list].copy() # Splits in train and test and removes nans if test_size > 0: X_train, X_test, data_train, data_test = train_test_split( X_train, data_train, test_size=test_size, random_state=seed) na_idx_test = (X_test.isna().sum(1) == 0) X_test = X_test[na_idx_test] data_test = data_test[na_idx_test] subj_test = data_test[subject_column_name].values na_idx_train = (X_train.isna().sum(1) == 0) X_train = X_train[na_idx_train] data_train = data_train[na_idx_train] subj_train = data_train[subject_column_name].values cols = X_train.columns # Correction for site effects if adjust_sites: for metric in metrics: adjuster = fortin_combat() features = [feature for feature in features_list if metric in feature] X_train[features] = adjuster.fit_transform( X_train[features], data_train[["{}{}".format( clinical_prefix, site_column_name)]], data_train[["{}{}".format(clinical_prefix, f) for f in residualize_by["discrete"]]], data_train[["{}{}".format(clinical_prefix, f) for f in residualize_by["continuous"]]]) _path = os.path.join( datasetdir, "rois_combat_{0}.pkl".format(metric)) with open(_path, "wb") as of: pickle.dump(adjuster, of) if test_size > 0: X_test[features] = adjuster.transform( X_test[features], data_test[["{}{}".format( clinical_prefix, site_column_name)]], data_test[["{}{}".format(clinical_prefix, f) for f in residualize_by["discrete"]]], data_test[["{}{}".format(clinical_prefix, f) for f in residualize_by["continuous"]]]) # Standardizes if z_score: scaler = RobustScaler() X_train = scaler.fit_transform(X_train) _path = os.path.join(datasetdir, "rois_scaler.pkl") with open(_path, "wb") as f: pickle.dump(scaler, f) if test_size > 0: X_test = scaler.transform(X_test) else: X_train = X_train.values if test_size > 0: X_test = X_test.values # Residualizes and scales if residualize_by is not None or len(residualize_by) > 0: regressor = LinearRegression() y_train = np.concatenate([ data_train[["{}{}".format(clinical_prefix, f) for f in residualize_by["continuous"]]].values, OneHotEncoder(sparse=False).fit_transform( data_train[["{}{}".format(clinical_prefix, f) for f in residualize_by["discrete"]]]) ], axis=1) regressor.fit(y_train, X_train) X_train = X_train - regressor.predict(y_train) _path = os.path.join(datasetdir, "rois_residualizer.pkl") with open(_path, "wb") as f: pickle.dump(regressor, f) if test_size > 0: y_test = np.concatenate([ data_test[[ "{}{}".format(clinical_prefix, f) for f in residualize_by["continuous"]]].values, OneHotEncoder(sparse=False).fit_transform( data_test[["{}{}".format(clinical_prefix, f) for f in residualize_by["discrete"]]]) ], axis=1) X_test = X_test - regressor.predict(y_test) # Return data and subjects X_train_df = pd.DataFrame(data=X_train, columns=cols) X_train_df.insert(0, subject_column_name, subj_train) X_test_df = None if test_size > 0: X_test_df = pd.DataFrame(data=X_test, columns=cols) X_test_df.insert(0, subject_column_name, subj_test) # Saving np.save(path, X_train) X_train_df.to_csv(meta_path, index=False, sep="\t") if test_size > 0: np.save(path_test, X_test) X_test_df.to_csv(meta_path_test, index=False, sep="\t") if return_data: X_train = np.load(path) subj_train = pd.read_csv(meta_path, sep="\t")[ subject_column_name].values X_test, subj_test = (None, None) if test_size > 0: X_test = np.load(path_test) subj_test = pd.read_csv(meta_path_test, sep="\t")[ subject_column_name].values return X_train, X_test, subj_train, subj_test else: return Item(train_input_path=path, test_input_path=path_test, train_metadata_path=meta_path, test_metadata_path=meta_path_test) return fetch_rois def fetch_surface_wrapper(hemisphere, datasetdir=SAVING_FOLDER, files=FILES, cohort=COHORT_NAME, site_column_name="t1:site", defaults=DEFAULTS["surface"]): """ Fetcher wrapper for surface data Parameters ---------- hemisphere: string name of the hemisphere data fetcher, one of "rh" or "lh" datasetdir: string, default SAVING_FOLDER path to the folder in which to save the data files: dict, default FILES contains the paths to the different files cohort: string, default COHORT_NAME, name of the cohort site_columns_name: string, default "t1:site" name of the column containing the site of MRI acquisition defaults: dict, default DEFAULTS default values for the wrapped function Returns ------- fetcher: function corresponding fetcher """ assert(hemisphere in ["rh", "lh"]) fetcher_name = "fetcher_surface_{}_{}".format(hemisphere, cohort) # @Fetchers.register def fetch_surface( metrics=defaults["metrics"], test_size=defaults["test_size"], seed=defaults["seed"], return_data=defaults["return_data"], z_score=defaults["z_score"], adjust_sites=defaults["adjust_sites"], residualize_by=defaults["residualize_by"], qc=defaults["qc"]): """ Fetches and preprocesses surface data Parameters ---------- metrics: list of strings, see defaults metrics to fetch test_size: float, default 0.2 proportion of the dataset to keep for testing. Preprocessing models will only be fitted on the training part and applied to the test set. You can specify not to use a testing set by setting it to 0 seed: int, default 42 random seed to split the data into train / test return_data: bool, default False If false, saves the data in the specified folder, and return the path. Otherwise, returns the preprocessed data and the corresponding subjects z_score: bool, default True wether or not to transform the data into z_scores, meaning standardizing and scaling it adjust_sites: bool, default True wether or not the correct site effects via the Combat algorithm residualize_by: dict, see default variables to residualize the data. Two keys, "continuous" and "discrete", and the values are a list of the variable names qc: dict, see default keys are the name of the features the control on, values are the requirements on their values (see the function apply_qc) Returns ------- item: namedtuple a named tuple containing 'train_input_path', 'train_metadata_path', and 'test_input_path', 'test_metadata_path' if test_size > 0 X_train: numpy array, Training data, if return_data is True X_test: numpy array, Test data, if return_data is True and test_size > 0 subj_train: numpy array, Training subjects, if return_data is True subj_test: numpy array, Test subjects, if return_data is True and test_size > 0 """ clinical_prefix = "bloc-clinical_score-" surf_prefix = "bloc-t1w_hemi-{}_metric".format(hemisphere) data = pd.read_csv(files["clinical_surface"], sep="\t").drop( columns=["bloc-t1w_hemi-lh_metric-area", "bloc-t1w_hemi-rh_metric-area"]) # Feature selection features_list = [] for metric in metrics: for column in data.columns: if column.startswith(surf_prefix): m = column.split('-')[-1] if m == metric: features_list.append(column) data_train = apply_qc(data, clinical_prefix, qc).sort_values( "participant_id") # Loads surface data n_vertices = len( surface_loader(data_train[features_list[0]].iloc[0]).get_data()) X_train = np.zeros((len(data_train), n_vertices, len(features_list))) for i in range(len(data_train)): for j, feature in enumerate(features_list): path = data_train[feature].iloc[i] if not pd.isnull([path]): X_train[i, :, j] = surface_loader( path).get_data().squeeze() # Splits in train and test and removes nans if test_size > 0: X_train, X_test, data_train, data_test = train_test_split( X_train, data_train, test_size=test_size, random_state=seed) na_idx_test = (np.isnan(X_test).sum((1, 2)) == 0) X_test = X_test[na_idx_test] data_test = data_test[na_idx_test] if return_data: subj_test = data_test["participant_id"].values na_idx_train = (np.isnan(X_train).sum((1, 2)) == 0) X_train = X_train[na_idx_train] data_train = data_train[na_idx_train] if return_data: subj_train = data_train["participant_id"].values # Applies feature-wise preprocessing for i, feature in enumerate(features_list): # Correction for site effects if adjust_sites: non_zeros_idx = (X_train[:, :, i] > 0).sum(0) >= 1 adjuster = fortin_combat() X_train[:, non_zeros_idx, i] = adjuster.fit_transform( X_train[:, non_zeros_idx, i], data_train[["{}{}".format( clinical_prefix, site_column_name)]], data_train[["{}{}".format(clinical_prefix, f) for f in residualize_by["discrete"]]], data_train[["{}{}".format(clinical_prefix, f) for f in residualize_by["continuous"]]]) path = os.path.join( datasetdir, "surface_{}_combat_feature{}.pkl".format(hemisphere, i)) with open(path, "wb") as f: pickle.dump(adjuster, f) if test_size > 0: X_test[:, non_zeros_idx, i] = adjuster.transform( X_test[:, non_zeros_idx, i], data_test[["{}{}".format( clinical_prefix, site_column_name)]], data_test[["{}{}".format(clinical_prefix, f) for f in residualize_by["discrete"]]], data_test[["{}{}".format(clinical_prefix, f) for f in residualize_by["continuous"]]]) # Standardizes and scales if z_score: scaler = RobustScaler() X_train[:, :, i] = scaler.fit_transform(X_train[:, :, i]) path = os.path.join( datasetdir, "surface_{}_scaler_feature{}.pkl".format(hemisphere, i)) with open(path, "wb") as f: pickle.dump(scaler, f) if test_size > 0: X_test[:, :, i] = scaler.transform(X_test[:, :, i]) # Residualizes if residualize_by is not None or len(residualize_by) > 0: regressor = LinearRegression() y_train = np.concatenate([ data_train[["{}{}".format(clinical_prefix, f) for f in residualize_by["continuous"]]].values, OneHotEncoder(sparse=False).fit_transform( data_train[["{}{}".format(clinical_prefix, f) for f in residualize_by["discrete"]]]) ], axis=1) regressor.fit(y_train, X_train[:, :, i]) X_train[:, :, i] = X_train[:, :, i] - regressor.predict( y_train) path = os.path.join( datasetdir, "surface_{}_residualizer_feature{}.pkl".format( hemisphere, i)) with open(path, "wb") as f: pickle.dump(regressor, f) if test_size > 0: y_test = np.concatenate([ data_test[["{}{}".format(clinical_prefix, f) for f in residualize_by["continuous"]] ].values, OneHotEncoder(sparse=False).fit_transform( data_test[["{}{}".format(clinical_prefix, f) for f in residualize_by["discrete"]]]) ], axis=1) X_test[:, :, i] = X_test[:, :, i] - regressor.predict( y_test) # Returns data and subjects if return_data: if test_size > 0: return X_train, X_test, subj_train, subj_test return X_train, subj_train # Saving path = os.path.join( datasetdir, "surface_{}_X_train.npy".format(hemisphere)) np.save(path, X_train) if test_size > 0: path_test
from anytree import NodeMixin import os import shutil import numpy as np import traceback from numpy import inf from .data import energy_path, energy_path_from_p_state from .util import set_output_folder from .chain_io import chain_write_between from datetime import datetime import json class GNEB_Node(NodeMixin): """A class that represents a GNEB calculation on a single chain. Can spawn children if cutting of the chain becomes necessary.""" def __init__(self, name, input_file, output_folder, initial_chain_file=None, gneb_workflow_log_file=None, parent=None, children=None): from spirit import simulation, io """Constructor.""" self.chain_file : str = "" self.initial_chain_file : str = "" self.input_file : str = "" self.gneb_workflow_log_file : str = "" self.current_energy_path = object() self.n_iterations_check = 5000 self.n_checks_save = 3 self.total_iterations = 0 self.target_noi = 10 self.noi = -1 self.convergence = 1e-4 self.path_shortening_constant = -1 self.max_total_iterations = -1 self.output_folder = "" self.output_tag = "" self.allow_split = True self.moving_endpoints = None self.translating_endpoints = None self.delta_Rx_left = 1.0 self.delta_Rx_right = 1.0 self.state_prepare_callback = None self.gneb_step_callback = None self.exit_callback = None self.before_gneb_callback = None self.before_llg_callback = None self.intermediate_minima = [] self.child_indices = [] self.image_types = [] self.history = [] self.solver_llg = simulation.SOLVER_LBFGS_OSO self.solver_gneb = simulation.SOLVER_VP_OSO self.chain_write_fileformat = io.FILEFORMAT_OVF_BIN # private fields self._ci = False self._converged = False self._log = True # flag to disable/enable logging # Create output folder if not os.path.exists(output_folder): os.makedirs(output_folder) self.output_folder = output_folder # The current chain is always saved here self.chain_file = output_folder + "/chain.ovf" # If an initial chain is specified we copy it to the output folder if(initial_chain_file): if not os.path.exists(initial_chain_file): raise Exception("Initial chain file ({}) does not exist!".format(initial_chain_file)) self.initial_chain_file = initial_chain_file shutil.copyfile(initial_chain_file, self.chain_file) self.input_file = input_file self.name = name self.parent = parent # If no log file has been specified we put one in the output folder if not gneb_workflow_log_file: self.gneb_workflow_log_file = self.output_folder + "/workflow_log.txt" else: self.gneb_workflow_log_file = gneb_workflow_log_file if children: self.children = children # Log the node creation creation_msg = "Creating new GNEB_Node '{}'".format(name) if(parent): creation_msg += ", parent '{}'".format(parent.name) if(children): creation_msg += ", children: " for c in children: creation_msg += "{} ".format(c.name) self.log(creation_msg) if(initial_chain_file): self.log("Initial chain file: {}".format(initial_chain_file)) def setup_plot_callbacks(self): """Sets up callbacks such that the path is plotted.""" from spirit import simulation, chain from spirit.parameters import gneb from spirit_extras import plotting, data import matplotlib.pyplot as plt def mark_climbing_image(p_state, gnw, ax): """Helper function that marks the climbing image in a plot.""" import numpy as np image_types = np.array([gneb.get_climbing_falling(p_state, i) for i in range(chain.get_noi(p_state))]) idx_climbing_list = np.array(range(chain.get_noi(p_state)))[image_types == gneb.IMAGE_CLIMBING] if(len(idx_climbing_list) == 0): return idx_climbing = idx_climbing_list[0] E0 = gnw.current_energy_path.total_energy[-1] ax.plot( gnw.current_energy_path.reaction_coordinate[idx_climbing], gnw.current_energy_path.total_energy[idx_climbing] - E0, marker="^", color="red" ) def before_gneb_cb(gnw, p_state): # gneb.set_image_type_automatically(p_state) simulation.start(p_state, simulation.METHOD_GNEB, self.solver_gneb, n_iterations=1) gnw.current_energy_path = data.energy_path_from_p_state(p_state) plotting.plot_energy_path(gnw.current_energy_path, plt.gca()) mark_climbing_image(p_state, gnw, plt.gca()) plt.savefig(gnw.output_folder + "/path_{}_initial.png".format(gnw.total_iterations)) plt.close() self.before_gneb_callback = before_gneb_cb def step_cb(gnw, p_state): import numpy as np gnw.update_energy_path(p_state) plotting.plot_energy_path(gnw.current_energy_path, plt.gca()) mark_climbing_image(p_state, gnw, plt.gca()) plt.savefig(gnw.output_folder + "/path_{}.png".format(gnw.total_iterations)) plt.close() plt.plot( np.asarray(gnw.history)[:,0], np.asarray(gnw.history)[:,1] ) plt.xlabel("Total iterations") plt.ylabel("Max. torque [meV]") plt.yscale("log") plt.savefig(gnw.output_folder + "/convergence.png") plt.close() self.gneb_step_callback = step_cb def exit_cb(gnw, p_state): plotting.plot_energy_path(gnw.current_energy_path, plt.gca()) mark_climbing_image(p_state, gnw, plt.gca()) plt.savefig(gnw.output_folder + "/path_{}_final.png".format(gnw.total_iterations)) plt.close() self.exit_callback = exit_cb def change_output_folder(self, new_output_folder, log_file=None): if os.path.exists(new_output_folder): raise Exception("Cannot change to new_output_folder, that already exists!") if self.parent == None: shutil.copytree(self.output_folder, new_output_folder) # Take care of all files we might need log_file = new_output_folder + "/workflow_log.txt" self.output_folder = new_output_folder self.chain_file = self.output_folder + "/chain.ovf" self.gneb_workflow_log_file = log_file if os.path.exists(self.initial_chain_file): self.initial_chain_file = self.output_folder + "/root_initial_chain.ovf" self.log("Changed output folder to {}".format(new_output_folder)) for i,c in enumerate(self.children): child_output_folder = self.output_folder + "/{}".format(i) c.change_output_folder(child_output_folder, log_file) def collect_chain(self, output_file): from spirit import state, io, chain self.log("Collecting chain in file {}".format(output_file)) if os.path.exists(output_file): os.remove(output_file) def helper(p_state, node): node.log(" collecting...") # Make sure the number of images matches our current simulation state chain.image_to_clipboard(p_state) noi = io.n_images_in_file(p_state, node.chain_file) chain.set_length(p_state, noi) io.chain_read(p_state, node.chain_file) noi = chain.get_noi(p_state) i = 0 while i < noi: # First we check if this images is within any of the ranges covered by the children is_child = False for idx_c, (i1, i2) in enumerate(node.child_indices): if i>=i1 and i<=i2: is_child = True idx_child = idx_c break # If the current idx is covered by a child node, we open up another level of recursion, else we append the image if is_child: helper(p_state, node.children[idx_child]) # After collecting the child we advance the current iteration idx, so that we continue one past the last child index i = node.child_indices[idx_child][1] + 1 # We also need to read the chain file again to return to our previous state chain.image_to_clipboard(p_state) chain.set_length(p_state, noi) io.chain_read(p_state, node.chain_file) else: io.image_append(p_state, output_file, idx_image=i) i += 1 with state.State(self.input_file) as p_state: self._prepare_state(p_state) helper(p_state, self) self.log("Done collecting chain in file") def to_json(self): json_file = self.output_folder + "/node.json" self.log("Saving to {}".format(json_file)) node_dict = dict( name = str(self.name), chain_file = str(self.chain_file), initial_chain_file = str(self.initial_chain_file), input_file = str(self.input_file), gneb_workflow_log_file = str(self.gneb_workflow_log_file), n_iterations_check = int(self.n_iterations_check), n_checks_save = int(self.n_checks_save), total_iterations = int(self.total_iterations), target_noi = int(self.target_noi), convergence = float(self.convergence), max_total_iterations = int(self.max_total_iterations), output_folder = str(self.output_folder), output_tag = str(self.output_tag), child_indices = [(int(c1), int(c2)) for c1, c2 in self.child_indices], allow_split = bool(self.allow_split), path_shortening_constant = float(self.path_shortening_constant), moving_endpoints = bool(self.moving_endpoints), delta_Rx_left = float(self.delta_Rx_left), delta_Rx_right = float(self.delta_Rx_right), image_types = [ [int(t[0]), int(t[1]) ] for t in self.image_types], solver_llg = int(self.solver_llg), solver_gneb = int(self.solver_gneb), fileformat = int(self.chain_write_fileformat) ) with open(json_file, "w") as f: f.write(json.dumps(node_dict, indent=4)) for c in self.children: c.to_json() def history_to_file(self, path): np.savetxt(path, self.history, header="iteration, max. torque" ) @staticmethod def from_json(json_file, parent=None, children=None): with open(json_file, "r") as f: data = json.load(f) name = data["name"] input_file = data["input_file"] gneb_workflow_log_file = data["gneb_workflow_log_file"] output_folder = data["output_folder"] result = GNEB_Node(name, input_file, output_folder, None, gneb_workflow_log_file, parent=parent, children=children) result.n_iterations_check = data.get("n_iterations_check", result.n_iterations_check) result.n_checks_save = data.get("n_checks_save", result.n_checks_save) result.total_iterations = data.get("total_iterations", result.total_iterations) result.target_noi = data.get("target_noi", result.target_noi) result.convergence = data.get("convergence", result.convergence) result.max_total_iterations = data.get("max_total_iterations", result.max_total_iterations) result.chain_file = data.get("chain_file", result.chain_file) result.initial_chain_file = data.get("initial_chain_file", result.initial_chain_file) result.output_tag = data.get("output_tag", result.output_tag) result.allow_split = data.get("allow_split", result.allow_split) result.child_indices = data.get("child_indices", result.child_indices) result.moving_endpoints = data.get("moving_endpoints", result.moving_endpoints) result.image_types = data.get("image_types", result.image_types) result.delta_Rx_left = data.get("delta_Rx_left", result.delta_Rx_left) result.delta_Rx_right = data.get("delta_Rx_right", result.delta_Rx_right) result.path_shortening_constant = data.get("path", result.path_shortening_constant) result.solver_llg = data.get("solver_llg", result.solver_llg) result.solver_gneb = data.get("solver_gneb", result.solver_gneb) result.chain_write_fileformat = data.get("fileformat", result.chain_write_fileformat) result.log("Created from json file {}".format(json_file)) for i in range(len(result.child_indices)): new_json_file = result.output_folder + "/{}/node.json".format(i) GNEB_Node.from_json(new_json_file, parent=result) return result def log(self, message): """Append a message with date/time information to the log file.""" if not self._log: # if log flag is not set, do nothing return now = datetime.now() current_time = now.strftime("%m/%d/%Y, %H:%M:%S") log_string = "{} [{:^35}] : {}".format(current_time, self.name, message) with open(self.gneb_workflow_log_file, "a") as f: print(log_string, file=f) def enable_log(self): self._log = True def disable_log(self): self._log = False def update_energy_path(self, p_state=None): """Updates the current energy path. If p_state is given we just use that, otherwise we have to construct it first""" self._log = False if p_state: self.current_energy_path = energy_path_from_p_state(p_state) self.noi = self.current_energy_path.noi() else: from spirit import state, chain, simulation with state.State(self.input_file) as p_state: chain.update_data(p_state) self._prepare_state(p_state) simulation.start(p_state, simulation.METHOD_GNEB, self.solver_gneb, n_iterations=1) # One iteration of GNEB to get interpolated quantities self.update_energy_path(p_state) self._log = True def check_for_minima(self): """ Checks the chain for intermediate minima. """ self.intermediate_minima = [] e = self.current_energy_path.total_energy for i in range(1, len(e) - 1): # Leave out the first and the last energy if(e[i-1] > e[i] and e[i+1] > e[i]): self.intermediate_minima.append(i) if(len(self.intermediate_minima) > 0): self.log("Found intermediate minima at: {}".format(self.intermediate_minima)) def save_chain(self, p_state): """Saves the chain and overwrites the chain_file""" from spirit import io self.log("Writing chain to {}".format(self.chain_file)) io.chain_write(p_state, self.chain_file, fileformat=self.chain_write_fileformat) def backup_chain(self, path): """Saves the chain to a file""" from spirit import state, io with state.State(self.input_file) as p_state: self._prepare_state(p_state) self.log("Writing chain to {}".format(path)) io.chain_write(p_state, path, fileformat=self.chain_write_fileformat) def add_child(self, i1, i2, p_state=None): def _helper(p_state): self.log("Adding child with indices {} and {}".format(i1, i2)) # Attributes that change due to tree structure child_name = self.name + "_{}".format(len(self.children)) child_input_file = self.input_file child_output_folder = self.output_folder + "/{}".format(len(self.children)) self.children += (GNEB_Node(name = child_name, input_file = child_input_file, output_folder = child_output_folder, gneb_workflow_log_file=self.gneb_workflow_log_file, parent = self), ) self.children[-1].current_energy_path = self.current_energy_path.split(i1, i2+1) # Copy the other attributes self.children[-1].target_noi = self.target_noi self.children[-1].convergence = self.convergence self.children[-1].path_shortening_constant = self.path_shortening_constant self.children[-1].max_total_iterations = self.max_total_iterations self.children[-1].state_prepare_callback = self.state_prepare_callback self.children[-1].gneb_step_callback = self.gneb_step_callback self.children[-1].exit_callback = self.exit_callback self.children[-1].before_gneb_callback = self.before_gneb_callback self.children[-1].before_llg_callback = self.before_llg_callback self.children[-1].n_iterations_check = self.n_iterations_check self.children[-1].n_checks_save = self.n_checks_save self.children[-1].allow_split = self.allow_split self.children[-1].chain_write_fileformat
1 else: result = re.search('ControlBool_OneShot_('+args.scalerkey+'[a-zA-Z0-9_\-]*) = 0x([A-F0-9]{5})', line) if (result is not None): scalers[result.group(1)]['OneShot']=(result.group(2)) fpgavarCount += 1 else: result = re.search('IndicatorArrayU32_Counter_('+args.scalerkey+'[a-zA-Z0-9_\-]*) = 0x([A-F0-9]{5})', line) if (result is not None): scalers[result.group(1)]['Counters']=(result.group(2)) fpgavarCount += 1 else: result = re.search('IndicatorArrayU32Size_Counter_('+args.scalerkey+'[a-zA-Z0-9_\-]*) = ([0-9]+)', line) if (result is not None): scalers[result.group(1)]['Number of Counters']=(result.group(2)) fpgavarCount += 1 else: result = re.search('IndicatorBool_Done_('+args.scalerkey+'[a-zA-Z0-9_\-]*) = 0x([A-F0-9]{5})', line) if (result is not None): scalers[result.group(1)]['Done']=(result.group(2)) fpgavarCount += 1 # Extracting waveform data result = re.search('IndicatorArray(I8|U8|I16|U16|I32|U32|I64|U64|Sgl)_('+args.waveformkey+'[a-zA-Z0-9_\-]*) = 0x([A-F0-9]{5})', line, flags=re.IGNORECASE) if (result is not None): waveforms[result.group(2)]['Address']=(result.group(3)) if (result.group(1) == 'I8'): waveforms[result.group(2)]['Type']=('I08') csvwf[result.group(2)]['TypeEPICS']='CHAR' else : if (result.group(1) == 'U8'): waveforms[result.group(2)]['Type']=('U08') csvwf[result.group(2)]['TypeEPICS']='UCHAR' else: waveforms[result.group(2)]['Type']=(result.group(1).upper()) if (result.group(1) == 'U16'): csvwf[result.group(2)]['TypeEPICS']='USHORT' else: if (result.group(1) == 'I16'): csvwf[result.group(2)]['TypeEPICS']='SHORT' else: if (result.group(1) == 'I32'): csvwf[result.group(2)]['TypeEPICS']='LONG' else: if (result.group(1) == 'U32'): csvwf[result.group(2)]['TypeEPICS'] = 'ULONG' else: if (result.group(1) == 'Sgl'): csvwf[result.group(2)]['TypeEPICS'] = 'FLOAT' else : csvwf[result.group(2)]['TypeEPICS'] = 'DOUBLE' fpgavarCount += 1 else: result = re.search('IndicatorArray(I8|U8|I16|U16|I32|U32|I64|U64|Sgl)Size_('+args.waveformkey+'[a-zA-Z0-9_\-]*) = ([0-9]+)', line, flags=re.IGNORECASE) if (result is not None): waveforms[result.group(2)]['Size']=(result.group(3)) fpgavarCount += 1 # Extracting Settings result = re.search('_Signature\s*=\s*\"([A-F0-9]{32})\"', line) if (result is not None): settings['Signature']=(result.group(1)) else: result = re.search('_Bitfile \"([a-zA-Z0-9_\-]+.lvbitx)\"', line) if (result is not None): settings['Bitfile Name']=(result.group(1)) if not (args.extract) : copyfile("{0}/{1}".format(args.src,result.group(1)), "{0}/{1}".format(args.dst,result.group(1))) copyfile("{0}/{1}".format(args.src,result.group(1)), "{0}/reference/{1}".format(args.dst,result.group(1))) else: # Extracting BI, BO, AI, AO, AI FXP, AO FXP, MBBI, and MBBO #Extracting BI Vector if exists result = re.search('IndicatorU64_('+args.bikey+'[a-zA-Z0-9_\-]*) = 0x([A-F0-9]{5})', line, flags=re.IGNORECASE) if (result is not None): biaddr["BI_VECTOR"]=(result.group(2)) if (int(args.binum) != 0): bidict = { i : "DI{0}".format(i) for i in range(int(args.binum)) } fpgavarCount += int(args.binum) else: print(colored("WARNING: Found BI vector in header file, and yet binum input parameter was set to 0. Is this intentional?",'red')) else: # Extract boolean indicator (BI) result = re.search('IndicatorBool_('+args.bikey+'[a-zA-Z0-9_\-]*) = 0x([A-F0-9]{5})', line, flags=re.IGNORECASE) if (result is not None): biaddr[result.group(1)]=(result.group(2)) fpgavarCount += 1 else: # Extract Single precision floating point indicator (AI) result = re.search('IndicatorSgl_('+args.aikey+'[a-zA-Z0-9_\-]*) = 0x([A-F0-9]{5})', line, flags=re.IGNORECASE) if (result is not None): aiaddr[result.group(1)]=(result.group(2)) fpgavarCount += 1 else: # Extract Single precision floating point control (AO) result = re.search('ControlSgl_('+args.aokey+'[a-zA-Z0-9_\-]*) = 0x([A-F0-9]{5})', line, flags=re.IGNORECASE) if (result is not None): aoaddr[result.group(1)]=(result.group(2)) fpgavarCount += 1 else: # Extract boolean control (BO) result = re.search('ControlBool_('+args.bokey+'[a-zA-Z0-9_\-]*) = 0x([A-F0-9]{5})', line, flags=re.IGNORECASE) if (result is not None): boaddr[result.group(1)]=(result.group(2)) fpgavarCount += 1 else: # Extract fixed point indicator (AI) result = re.search('IndicatorU64_('+args.fxpkey+'([a-zA-Z0-9_\-])*) = 0x([A-F0-9]{5})', line, flags=re.IGNORECASE) if (result is not None): aiaddr["FXP_"+result.group(1)]=(result.group(3)) fpgavarCount += 1 else: # Extract fixed point control (AO) result = re.search('ControlU64_('+args.fxpkey+'([a-zA-Z0-9_\-])*) = 0x([A-F0-9]{5})', line, flags=re.IGNORECASE) if (result is not None): aoaddr["FXP_"+result.group(1)]=(result.group(3)) fpgavarCount += 1 else: # Extract MBB indicator (MBBO) result = re.search('IndicatorU16_('+args.mbbikey+'([a-zA-Z0-9_\-])*) = 0x([A-F0-9]{5})', line, flags=re.IGNORECASE) if (result is not None): mbbiaddr[result.group(1)]=(result.group(3)) fpgavarCount += 1 else: # Extract MBB control (MBBI) result = re.search('ControlU16_('+args.mbbokey+'([a-zA-Z0-9_\-])*) = 0x([A-F0-9]{5})', line, flags=re.IGNORECASE) if (result is not None): mbboaddr[result.group(1)]=(result.group(3)) fpgavarCount += 1 #process RT variables if enabled if (args.useSM): rtlist = [rt.rstrip() for rt in open('{}/RT.list'.format(args.src))] result = re.search('smsize=([0-9]*)', rtlist[0]) if (result is not None): settings['Shared Memory Size'] = result.group(1) rtlist.pop(0) else: print (colored("Did not find the shared memory size in the RT file. Using size set by script {}...".format(args.smsize), 'red')) for i, val in enumerate(rtlist): result = re.search('RT_MBI', val) if (result is not None): mbbiaddr[val]=i rtvarCount += 1 else: result = re.search('RT_MBO', val) if (result is not None): mbboaddr[val]=i rtvarCount += 1 else: result = re.search('RT_[A-Z0-9]{3}_AO', val) if (result is not None): aoaddr[val]=i rtvarCount += 1 else: result = re.search('RT_BOL_BO', val) if (result is not None): boaddr[val]=i rtvarCount += 1 else: result = re.search('RT_[A-Z0-9]{3}_AI', val) if (result is not None): aiaddr[val]=i rtvarCount += 1 else: result = re.search('RT_BOL_BI', val) if (result is not None): biaddr[val]=i rtvarCount += 1 else: result = re.search('(RT_(I08|U08|I16|U16|I32|U32|I64|U64|SGL|DBL|BOL)_WF[a-zA-Z0-9_\-]*)[\s*]([0-9]+)', val) if (result is not None): waveforms[result.group(1)]['Address']=i waveforms[result.group(1)]['Size']=result.group(3) waveforms[result.group(1)]['Type']=result.group(2).upper() if (result.group(2) == 'I08'): csvwf[result.group(1)]['TypeEPICS']='CHAR' else : if (result.group(2) == 'U08'): csvwf[result.group(1)]['TypeEPICS']='UCHAR' else: if (result.group(2) == 'U16'): csvwf[result.group(1)]['TypeEPICS']='USHORT' else: if (result.group(2) == 'I16'): csvwf[result.group(1)]['TypeEPICS']='SHORT' else: if (result.group(2) == 'I32'): csvwf[result.group(1)]['TypeEPICS']='LONG' else: if (result.group(2) == 'U32'): csvwf[result.group(1)]['TypeEPICS'] = 'ULONG' else: if (result.group(2) == 'SGL'): csvwf[result.group(1)]['TypeEPICS'] = 'FLOAT' else : if (result.group(2) == 'BOL'): csvwf[result.group(1)]['TypeEPICS']='CHAR' else: csvwf[result.group(1)]['TypeEPICS'] = 'DOUBLE' rtvarCount += 1 else: result = re.search('RT_STI', val) if (result is not None): strinaddr[val]=i rtvarCount += 1 else: result = re.search('RT_STO', val) if (result is not None): stroutaddr[val]=i rtvarCount += 1 else: print("Found {} in RT.list file, but could not classify it.".format(val)) print( "{} RT variables processed\n{} FPGA addresses extracted".format(rtvarCount, fpgavarCount)) # All tasks when extraction is not chosen if not (args.extract) : # Use *.csv file with open("{0}/{1}".format(args.src, args.cfgcsv) , "r") as f: lines = f.readlines() current = "None" for index, val in enumerate(lines): val = val.strip() removedDelimiter = val.replace(dlm , "") removedDelimiter = removedDelimiter.replace(" ", "") if (not removedDelimiter): continue lineSplit = val.split(dlm) result = re.search('^AI INI NAME', lineSplit[0]) if (result is not None): current = "AI" continue else : result = re.search('^BI INI NAME', lineSplit[0]) if (result is not None): current = "BI" continue else : result = re.search('^BO INI NAME', lineSplit[0]) if (result is not None): current = "BO" continue else : result = re.search('^AO INI NAME', lineSplit[0]) if (result is not None): current = "AO" continue else : result = re.search('^SCALER INI NAME', lineSplit[0]) if (result is not None): current = "SCALER" continue else : result = re.search('^WAVEFORM INI NAME', lineSplit[0]) if (result is not None): current = "WAVEFORM" continue else : result = re.search('^MBBI INI NAME', lineSplit[0]) if (result is not None): current = "MBBI" continue else : result = re.search('^MBBO INI NAME', lineSplit[0]) if (result is not None): current = "MBBO" continue else : result = re.search('^STRINGIN INI NAME', lineSplit[0]) if (result is not None): current = "STRINGIN" continue else : result = re.search('^STRINGOUT INI NAME', lineSplit[0]) if (result is not None): current = "STRINGOUT" continue if (current == 'AI'): #AI INI NAME,AI DB NAME,AI DESCRIPTION,AI Sign(FXP),AI Word Length(FXP),AI INTEGER LENGTH(FXP), SCAN # 0 1 2 3 4 5 6 #EGU, , PREC, HIHI, HIGH, LOW, LOLO, HHSV, HSV, LSV, LLSV, HYST, DISABLE # 7 8 9 10 11 12 13 14 15 16 17 18 csvai[lineSplit[0]]['EQ'] = lineSplit[1] csvai[lineSplit[0]]['DESC'] = lineSplit[2] csvai[lineSplit[0]]['SCAN'] = lineSplit[6] csvai[lineSplit[0]]['DISABLE'] = lineSplit[18] if not csvai[lineSplit[0]]['DISABLE']: print (colored("Error with CSV entry {0} line {1}. Disable is not set. Exiting...".format(lineSplit[0], index+1), 'red')) sys.exit() if (csvai[lineSplit[0]]['DISABLE'] == '0') : csvai[lineSplit[0]]['EGU'] = lineSplit[7] csvai[lineSplit[0]]['PREC'] = lineSplit[8] csvai[lineSplit[0]]['HIHI'] = float(lineSplit[9]) csvai[lineSplit[0]]['HIGH'] = float(lineSplit[10]) csvai[lineSplit[0]]['LOW'] = float(lineSplit[11]) csvai[lineSplit[0]]['LOLO'] = float(lineSplit[12]) csvai[lineSplit[0]]['HHSV'] = lineSplit[13].upper() csvai[lineSplit[0]]['HSV'] = lineSplit[14].upper() csvai[lineSplit[0]]['LSV'] = lineSplit[15].upper() csvai[lineSplit[0]]['LLSV'] = lineSplit[16].upper() csvai[lineSplit[0]]['HYST'] = float(lineSplit[17]) result = re.search('FXP_', lineSplit[0]) if (result is not None): try: csvai[lineSplit[0]]['Sign'] = int(lineSplit[3]) csvai[lineSplit[0]]['Word Length'] = int(lineSplit[4]) csvai[lineSplit[0]]['Integer Word Length'] = int(lineSplit[5]) fxps[lineSplit[0]]['Sign'] = int(lineSplit[3]) fxps[lineSplit[0]]['Word Length'] = int(lineSplit[4]) fxps[lineSplit[0]]['Integer Word Length'] = int(lineSplit[5]) except (ValueError) as err: print (colored("Error with CSV entry {0} line {1}".format(lineSplit[0], index+1), 'red')) print(colored(err, 'red')) sys.exit() else: if (current == 'AO'): #AO INI NAME,AO DB NAME,AO DESCRIPTION,AO Sign(FXP),AO Word Length(FXP),AO INTEGER LENGTH(FXP), AUTOSAVE, PINI, INIT VAL # 0 1 2 3 4 5 6 7 8 #EGU, PREC, HIHI, HIGH, LOW, LOLO, HHSV, HSV, LSV, LLSV, HYST, IVOA, IVOV, DISABLE # 9 10 11 12 13 14 15 16 17 18 19 20 21 22 csvao[lineSplit[0]]['EQ'] = lineSplit[1] csvao[lineSplit[0]]['DESC'] = lineSplit[2] csvao[lineSplit[0]]['AUTOSAVE'] = int(lineSplit[6]) csvao[lineSplit[0]]['PINI'] = int(lineSplit[7]) csvao[lineSplit[0]]['INIT VAL'] = float(lineSplit[8]) csvao[lineSplit[0]]['DISABLE'] = lineSplit[22] if not csvao[lineSplit[0]]['DISABLE']: print (colored("Error with CSV entry {0} line {1}. Disable is not set. Exiting...".format(lineSplit[0], index+1), 'red')) sys.exit() if (csvao[lineSplit[0]]['DISABLE'] == '0') : csvao[lineSplit[0]]['EGU'] = lineSplit[9] csvao[lineSplit[0]]['PREC'] = lineSplit[10] csvao[lineSplit[0]]['HIHI'] = float(lineSplit[11]) csvao[lineSplit[0]]['HIGH'] = float(lineSplit[12]) csvao[lineSplit[0]]['LOW'] = float(lineSplit[13]) csvao[lineSplit[0]]['LOLO'] = float(lineSplit[14]) csvao[lineSplit[0]]['HHSV'] =
################################################################################# # Copyright 2020 <NAME> # Contrib.: T.P. Correa # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. See the License for # the specific language governing permissions and limitations under the License. ################################################################################# ## Contributor list: ## 2020 - <NAME> - https://github.com/rftafas ## 2020 - <NAME> - https://github.com/tpcorrea import sys import os from datetime import datetime import hdltools.vhdl_gen as vhdl import hdltools.pkgvhdl_gen as pkgvhdl import math import random from mdutils.mdutils import MdUtils indent = vhdl.indent RegisterTypeSet = {"ReadOnly", "ReadWrite", "SplitReadWrite", "Write2Clear", "Write2Pulse"} vunitPort = ( "aclk", "--areset_n", "awaddr", "--awprot", "awvalid", "awready", "wdata", "wstrb", "wvalid", "wready", "bresp", "bvalid", "bready", "araddr", "--arprot", "arvalid", "arready", "rdata", "rresp", "rvalid", "rready" ) testBenchCode = """ S_AXI_ACLK <= not S_AXI_ACLK after 10 ns; main : process variable rdata_v : std_logic_vector(C_S_AXI_DATA_WIDTH-1 downto 0) := (others=>'0'); begin test_runner_setup(runner, runner_cfg); S_AXI_ARESETN <= '0'; wait until rising_edge(S_AXI_ACLK); wait until rising_edge(S_AXI_ACLK); S_AXI_ARESETN <= '1'; wait until rising_edge(S_AXI_ACLK); wait until rising_edge(S_AXI_ACLK); while test_suite loop if run("Sanity check for system.") then report "System Sane. Begin tests."; check_passed(result("Sanity check for system.")); elsif run("Simple Run Test") then wait for 100 us; check_passed(result("Simple Run Test Pass.")); elsif run("Read Only Test") then --read_only_tag elsif run("Read and Write Test") then --read_write_tag elsif run("Split Read Write Test") then --split_read_write_tag elsif run("Write to Clear Test") then --write_to_clear_tag elsif run("Write to Pulse Test") then --write_to_pulse_tag elsif run("External Clear Test") then --external_clear_tag end if; end loop; test_runner_cleanup(runner); -- Simulation ends here end process; test_runner_watchdog(runner, 101 us); """ templateScript = ''' from os.path import join, dirname import sys try: from vunit import VUnit except: print("Please, intall vunit_hdl with 'pip install vunit_hdl'") print("Also, make sure to have either GHDL or Modelsim installed.") exit() root = dirname(__file__) vu = VUnit.from_argv() vu.add_osvvm() vu.add_verification_components() try: expert = vu.add_library("expert") expert.add_source_files(join(root, "stdexpert/src/*.vhd")) except: print("Missing std_logic_expert. Please, run:") print("git clone https://github.com/rftafas/stdexpert.git") exit() lib = vu.add_library("<name>") lib.add_source_files(join(root, "./*.vhd")) test_tb = lib.entity("<name>_tb") test_tb.scan_tests_from_file(join(root, "<name>_tb.vhd")) test_tb.add_config( name="run_time", generics=dict(run_time=100) ) vu.main() ''' TemplateCode = """ ------------------------------------------------------------------------------------------------ -- I/O Connections assignments ------------------------------------------------------------------------------------------------ S_AXI_AWREADY <= awready_s; S_AXI_WREADY <= wready_s; S_AXI_BRESP <= bresp_s; S_AXI_BVALID <= bvalid_s; S_AXI_ARREADY <= arready_s; S_AXI_RRESP <= rresp_s; S_AXI_RVALID <= rvalid_s; ------------------------------------------------------------------------------------------------ --write ------------------------------------------------------------------------------------------------ waddr_p : process(S_AXI_ARESETN, S_AXI_ACLK) begin if S_AXI_ARESETN = '0' then awready_s <= '1'; awaddr_s <= (others => '1'); elsif rising_edge(S_AXI_ACLK) then if S_AXI_AWVALID = '1' then awready_s <= '0'; awaddr_s <= S_AXI_AWADDR; elsif (S_AXI_BREADY = '1' and bvalid_s = '1') then awready_s <= '1'; elsif wtimeout_s = '1' then awready_s <= '1'; end if; end if; end process; wdata_p : process (S_AXI_ACLK) begin if S_AXI_ARESETN = '0' then wready_s <= '1'; elsif rising_edge(S_AXI_ACLK) then if S_AXI_WVALID = '1' then wready_s <= '0'; elsif (S_AXI_BREADY = '1' and bvalid_s = '1') then wready_s <= '1'; elsif wtimeout_s = '1' then wready_s <= '1'; end if; end if; end process; wreg_en_p : process (S_AXI_ACLK) variable lock_v : std_logic; begin if S_AXI_ARESETN = '0' then regwrite_en <= '0'; lock_v := '0'; elsif rising_edge(S_AXI_ACLK) then if lock_v = '1' then regwrite_en <= '0'; if (S_AXI_BREADY = '1' and bvalid_s = '1') then lock_v := '0'; end if; elsif (wready_s = '0' or S_AXI_WVALID = '1' ) and ( awready_s = '0' or S_AXI_AWVALID = '1' )then regwrite_en <= '1'; lock_v := '1'; elsif wtimeout_s = '1' then regwrite_en <= '0'; lock_v := '0'; end if; end if; end process; wresp_p : process (S_AXI_ACLK) begin if S_AXI_ARESETN = '0' then bvalid_s <= '0'; bresp_s <= "00"; elsif rising_edge(S_AXI_ACLK) then if (wready_s = '1' and awready_s = '1' ) then bvalid_s <= '1'; bresp_s <= "00"; bresp_timer_sr <= ( 0 => '1', others=>'0' ); elsif wtimeout_s = '1' then bvalid_s <= '1'; bresp_s <= "10"; bresp_timer_sr <= ( 0 => '1', others=>'0' ); elsif bvalid_s = '1' then bresp_timer_sr <= bresp_timer_sr(14 downto 0) & bresp_timer_sr(15); if S_AXI_BREADY = '1' or bresp_timer_sr(15) = '1' then bvalid_s <= '0'; bresp_s <= "00"; bresp_timer_sr <= ( 0 => '1', others=>'0' ); end if; end if; end if; end process; wtimer_p : process (S_AXI_ACLK) begin if S_AXI_ARESETN = '0' then wtimeout_s <= '0'; elsif rising_edge(S_AXI_ACLK) then wtimeout_s <= wtimeout_sr(15); if wready_s = '1' or awready_s = '1' then wtimeout_sr <= ( 0 => '1', others=>'0'); elsif wready_s = '1' and awready_s = '1' then wtimeout_sr <= (others=>'0'); else wtimeout_sr <= wtimeout_sr(14 downto 0) & wtimeout_sr(15); end if; end if; end process; wreg_p : process (S_AXI_ACLK) variable loc_addr : INTEGER; begin if S_AXI_ARESETN = '0' then regwrite_s <= (others => (others => '0')); elsif rising_edge(S_AXI_ACLK) then loc_addr := to_integer(awaddr_s(C_S_AXI_ADDR_WIDTH - 1 downto C_S_AXI_ADDR_LSB)); for j in regwrite_s'range loop for k in C_S_AXI_DATA_WIDTH - 1 downto 0 loop if regclear_s(j)(k) = '1' then regwrite_s(j)(k) <= '0'; elsif regwrite_en = '1' then if j = loc_addr then if S_AXI_WSTRB(k/8) = '1' then regwrite_s(j)(k) <= S_AXI_WDATA(k); end if; end if; end if; end loop; end loop; end if; end process; ------------------------------------------------------------------------------------------------ --Read ------------------------------------------------------------------------------------------------ raddr_p : process (S_AXI_ARESETN, S_AXI_ACLK) begin if S_AXI_ARESETN = '0' then arready_s <= '1'; regread_en <= '0'; araddr_s <= (others => '1'); elsif rising_edge(S_AXI_ACLK) then if S_AXI_ARVALID = '1' then arready_s <= '0'; araddr_s <= S_AXI_ARADDR; regread_en <= '1'; elsif rvalid_s = '1' and S_AXI_RREADY = '1' then arready_s <= '1'; regread_en <= '0'; elsif rtimeout_s = '1' then arready_s <= '1'; regread_en <= '0'; else regread_en <= '0'; end if; end if; end process; --AXI uses same channel for data and response. --one can consider that AXI-S RRESP is sort of TUSER. rresp_rdata_p : process (S_AXI_ARESETN, S_AXI_ACLK) begin if S_AXI_ARESETN = '0' then rvalid_s <= '0'; rresp_s <= "00"; elsif rising_edge(S_AXI_ACLK) then if regread_en = '1' then --there is an address waiting for us. rvalid_s <= '1'; rresp_s <= "00"; -- 'OKAY' response elsif S_AXI_RREADY = '1' then --Read data is accepted by the master rvalid_s <= '0'; elsif rtimeout_s = '1' then --when it times out? when after doing my part, master does not respond --with the RREADY, meaning he havent read my data. rvalid_s <= '0'; rresp_s <= "10"; -- No one is expected to read this. Debug only. else rvalid_s <= '0'; rresp_s <= "00"; -- No one is expected to read this. Debug only. end if; end if; end process; rtimer_p : process (S_AXI_ACLK) begin if S_AXI_ARESETN = '0' then rtimeout_s <= '0'; elsif rising_edge(S_AXI_ACLK) then rtimeout_s <= rtimeout_sr(15); if S_AXI_RREADY = '1' then rtimeout_sr <= ( 0 => '1', others=>'0'); elsif rvalid_s = '1' then rtimeout_sr <= rtimeout_sr(14 downto 0) & rtimeout_sr(15); end if; end if; end process; --get data from ports to bus read_reg_p : process( S_AXI_ACLK ) is variable loc_addr : integer; variable reg_tmp : reg_t := (others => (others => '0')); variable reg_lock : reg_t := (others => (others => '0')); begin if (S_AXI_ARESETN = '0') then reg_tmp := (others => (others => '0')); reg_lock := (others => (others => '0')); S_AXI_RDATA <= (others => '0'); elsif (rising_edge (S_AXI_ACLK)) then for j in regread_s'range loop for k in regread_s(0)'range loop if regclear_s(j)(k) = '1' then reg_tmp(j)(k) := '0'; reg_lock(j)(k) := '0'; elsif regset_s(j)(k) = '1' then reg_tmp(j)(k) := '1'; reg_lock(j)(k) := '1'; elsif reg_lock(j)(k) = '0' then reg_tmp(j)(k) := regread_s(j)(k); end if; end loop; end loop; -- loc_addr := to_integer(araddr_s(C_S_AXI_ADDR_WIDTH-1 downto C_S_AXI_ADDR_LSB)); if regread_en = '1' then S_AXI_RDATA <= reg_tmp(loc_addr); end if; end if; end process; """ def byte_enable_vector(start,end,size): vector = "\"" tmp1 = math.floor(start/8) tmp2 = math.floor(end/8) j = math.ceil(size/8)-1 while j >= 0: if (j < tmp1 or j > tmp2): vector += "0" else: vector += "1" j -= 1 vector += "\"" return vector def random_vector(length): bits = "01" vector = "\"" j = 0 while j < length: vector += random.choice(bits) j += 1 vector += "\"" return vector def random_bit(): bits = "01" vector = "'" vector += random.choice(bits) vector += "'" return vector def GetDirection(type): if type in ("ReadOnly", "Write2Clear", "SplitReadWrite"): return "in" else:
0 DummyHandler2.get.assert_called_with( 'GET', self.hmc, '/api/cpcs/1', tuple('1'), True) assert DummyHandler2.delete.called == 0 def test_post_cpcs(self): # the function to be tested result = self.urihandler.post(self.hmc, '/api/cpcs', {}, True, True) assert result == self.new_cpc assert DummyHandler1.get.called == 0 DummyHandler1.post.assert_called_with( 'POST', self.hmc, '/api/cpcs', tuple(), {}, True, True) assert DummyHandler2.get.called == 0 assert DummyHandler2.delete.called == 0 def test_delete_cpc2(self): # the function to be tested self.urihandler.delete(self.hmc, '/api/cpcs/2', True) assert DummyHandler1.get.called == 0 assert DummyHandler1.post.called == 0 assert DummyHandler2.get.called == 0 DummyHandler2.delete.assert_called_with( 'DELETE', self.hmc, '/api/cpcs/2', tuple('2'), True) def standard_test_hmc(): """ Return a FakedHmc object that is prepared with a few standard resources for testing. """ hmc_resources = { 'consoles': [ { 'properties': { 'name': 'fake_console_name', }, 'users': [ { 'properties': { 'object-id': 'fake-user-oid-1', 'name': 'fake_user_name_1', 'description': 'User #1', 'type': 'system-defined', }, }, ], 'user_roles': [ { 'properties': { 'object-id': 'fake-user-role-oid-1', 'name': 'fake_user_role_name_1', 'description': 'User Role #1', 'type': 'system-defined', }, }, ], 'user_patterns': [ { 'properties': { 'element-id': 'fake-user-pattern-oid-1', 'name': 'fake_user_pattern_name_1', 'description': 'User Pattern #1', 'pattern': 'fake_user_name_*', 'type': 'glob-like', 'retention-time': 0, 'user-template-uri': '/api/users/fake-user-oid-1', }, }, ], 'password_rules': [ { 'properties': { 'element-id': 'fake-password-rule-oid-1', 'name': 'fake_password_rule_name_1', 'description': 'Password Rule #1', 'type': 'system-defined', }, }, ], 'tasks': [ { 'properties': { 'element-id': 'fake-task-oid-1', 'name': 'fake_task_name_1', 'description': 'Task #1', }, }, { 'properties': { 'element-id': 'fake-task-oid-2', 'name': 'fake_task_name_2', 'description': 'Task #2', }, }, ], 'ldap_server_definitions': [ { 'properties': { 'element-id': 'fake-ldap-srv-def-oid-1', 'name': 'fake_ldap_srv_def_name_1', 'description': 'LDAP Srv Def #1', 'primary-hostname-ipaddr': '10.11.12.13', }, }, ], } ], 'cpcs': [ { 'properties': { 'object-id': '1', 'name': 'cpc_1', 'dpm-enabled': False, 'description': 'CPC #1 (classic mode)', 'status': 'operating', }, 'lpars': [ { 'properties': { 'object-id': '1', 'name': 'lpar_1', 'description': 'LPAR #1 in CPC #1', 'status': 'not-activated', }, }, ], 'reset_activation_profiles': [ { 'properties': { 'name': 'r1', 'description': 'Reset profile #1 in CPC #1', }, }, ], 'image_activation_profiles': [ { 'properties': { 'name': 'i1', 'description': 'Image profile #1 in CPC #1', }, }, ], 'load_activation_profiles': [ { 'properties': { 'name': 'L1', 'description': 'Load profile #1 in CPC #1', }, }, ], }, { 'properties': { 'object-id': '2', 'name': 'cpc_2', 'dpm-enabled': True, 'description': 'CPC #2 (DPM mode)', 'status': 'active', }, 'partitions': [ { 'properties': { 'object-id': '1', 'name': 'partition_1', 'description': 'Partition #1 in CPC #2', 'status': 'stopped', 'hba-uris': [], # updated automatically 'nic-uris': [], # updated automatically 'virtual-function-uris': [], # updated autom. }, 'hbas': [ { 'properties': { 'element-id': '1', 'name': 'hba_1', 'description': 'HBA #1 in Partition #1', 'adapter-port-uri': '/api/adapters/2/storage-ports/1', 'wwpn': 'CFFEAFFE00008001', 'device-number': '1001', }, }, ], 'nics': [ { 'properties': { 'element-id': '1', 'name': 'nic_1', 'description': 'NIC #1 in Partition #1', 'network-adapter-port-uri': '/api/adapters/3/network-ports/1', 'device-number': '2001', }, }, ], 'virtual_functions': [ { 'properties': { 'element-id': '1', 'name': 'vf_1', 'description': 'VF #1 in Partition #1', 'device-number': '3001', }, }, ], }, ], 'adapters': [ { 'properties': { 'object-id': '1', 'name': 'osa_1', 'description': 'OSA #1 in CPC #2', 'adapter-family': 'osa', 'network-port-uris': [], # updated automatically 'status': 'active', 'adapter-id': 'BEF', }, 'ports': [ { 'properties': { 'element-id': '1', 'name': 'osa_1_port_1', 'description': 'Port #1 of OSA #1', }, }, ], }, { 'properties': { 'object-id': '2', 'name': 'fcp_2', 'description': 'FCP #2 in CPC #2', 'adapter-family': 'ficon', 'storage-port-uris': [], # updated automatically 'adapter-id': 'CEF', }, 'ports': [ { 'properties': { 'element-id': '1', 'name': 'fcp_2_port_1', 'description': 'Port #1 of FCP #2', }, }, ], }, { 'properties': { 'object-id': '2a', 'name': 'fcp_2a', 'description': 'FCP #2a in CPC #2', 'adapter-family': 'ficon', 'storage-port-uris': [], # updated automatically 'adapter-id': 'CEE', }, 'ports': [ { 'properties': { 'element-id': '1', 'name': 'fcp_2a_port_1', 'description': 'Port #1 of FCP #2a', }, }, ], }, { 'properties': { 'object-id': '3', 'name': 'roce_3', 'description': 'ROCE #3 in CPC #2', 'adapter-family': 'roce', 'network-port-uris': [], # updated automatically 'adapter-id': 'DEF', }, 'ports': [ { 'properties': { 'element-id': '1', 'name': 'roce_3_port_1', 'description': 'Port #1 of ROCE #3', }, }, ], }, { 'properties': { 'object-id': '4', 'name': 'crypto_4', 'description': 'Crypto #4 in CPC #2', 'adapter-family': 'crypto', 'adapter-id': 'EEF', 'detected-card-type': 'crypto-express-5s', 'crypto-number': 7, 'crypto-type': 'accelerator', }, }, ], 'virtual_switches': [ { 'properties': { 'object-id': '1', 'name': 'vswitch_osa_1', 'description': 'Vswitch for OSA #1 in CPC #2', }, }, ], }, ], } hmc = FakedHmc('fake-hmc', '2.13.1', '1.8') hmc.add_resources(hmc_resources) return hmc, hmc_resources class TestGenericGetPropertiesHandler(object): """All tests for class GenericGetPropertiesHandler.""" def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)', GenericGetPropertiesHandler), ) self.urihandler = UriHandler(self.uris) def test_get(self): # the function to be tested: cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True) exp_cpc1 = { 'object-id': '1', 'object-uri': '/api/cpcs/1', 'class': 'cpc', 'parent': None, 'name': 'cpc_1', 'dpm-enabled': False, 'is-ensemble-member': False, 'description': 'CPC #1 (classic mode)', 'status': 'operating', } assert cpc1 == exp_cpc1 def test_get_error_offline(self): self.hmc.disable() with pytest.raises(ConnectionError): # the function to be tested: self.urihandler.get(self.hmc, '/api/cpcs/1', True) class _GenericGetUpdatePropertiesHandler(GenericGetPropertiesHandler, GenericUpdatePropertiesHandler): pass class TestGenericUpdatePropertiesHandler(object): """All tests for class GenericUpdatePropertiesHandler.""" def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/cpcs/([^/]+)', _GenericGetUpdatePropertiesHandler), ) self.urihandler = UriHandler(self.uris) def test_update_verify(self): update_cpc1 = { 'description': 'CPC #1 (updated)', } # the function to be tested: resp = self.urihandler.post(self.hmc, '/api/cpcs/1', update_cpc1, True, True) assert resp is None cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True) assert cpc1['description'] == 'CPC #1 (updated)' def test_post_error_offline(self): self.hmc.disable() update_cpc1 = { 'description': 'CPC #1 (updated)', } with pytest.raises(ConnectionError): # the function to be tested: self.urihandler.post(self.hmc, '/api/cpcs/1', update_cpc1, True, True) class TestGenericDeleteHandler(object): """All tests for class GenericDeleteHandler.""" def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/console/ldap-server-definitions/([^/]+)', GenericDeleteHandler), ) self.urihandler = UriHandler(self.uris) def test_delete(self): uri = '/api/console/ldap-server-definitions/fake-ldap-srv-def-oid-1' # the function to be tested: ret = self.urihandler.delete(self.hmc, uri, True) assert ret is None # Verify it no longer exists: with pytest.raises(KeyError): self.hmc.lookup_by_uri(uri) def test_delete_error_offline(self): self.hmc.disable() uri = '/api/console/ldap-server-definitions/fake-ldap-srv-def-oid-1' with pytest.raises(ConnectionError): # the function to be tested: self.urihandler.delete(self.hmc, uri, True) class TestVersionHandler(object): """All tests for class VersionHandler.""" def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/version', VersionHandler), ) self.urihandler = UriHandler(self.uris) def test_get_version(self): # the function to be tested: resp = self.urihandler.get(self.hmc, '/api/version', True) api_major, api_minor = self.hmc.api_version.split('.') exp_resp = { 'hmc-name': self.hmc.hmc_name, 'hmc-version': self.hmc.hmc_version, 'api-major-version': int(api_major), 'api-minor-version': int(api_minor), } assert resp == exp_resp class TestConsoleHandler(object): """All tests for class ConsoleHandler.""" def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/console', ConsoleHandler), ) self.urihandler = UriHandler(self.uris) # Note: There is no test_list() function because there is no List # operation for Console resources. def test_get(self): # the function to be tested: console = self.urihandler.get(self.hmc, '/api/console', True) exp_console = { 'object-uri': '/api/console', 'name': 'fake_console_name', 'class': 'console', 'parent': None, } assert console == exp_console class TestConsoleRestartHandler(object): """All tests for class ConsoleRestartHandler.""" def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/console', ConsoleHandler), (r'/api/console/operations/restart', ConsoleRestartHandler), ) self.urihandler = UriHandler(self.uris) def test_restart_success(self): body = { 'force': False, } # the function to be tested: resp = self.urihandler.post( self.hmc, '/api/console/operations/restart', body, True, True) assert self.hmc.enabled assert resp is None def test_restart_error_not_found(self): # Remove the faked Console object self.hmc.consoles.remove(None) body = { 'force': False, } with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post( self.hmc, '/api/console/operations/restart', body, True, True) exc = exc_info.value assert exc.reason == 1 class TestConsoleShutdownHandler(object): """All tests for class ConsoleShutdownHandler.""" def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/console', ConsoleHandler), (r'/api/console/operations/shutdown', ConsoleShutdownHandler), ) self.urihandler = UriHandler(self.uris) def test_shutdown_success(self): body = { 'force': False, } # the function to be tested: resp = self.urihandler.post( self.hmc, '/api/console/operations/shutdown', body, True, True) assert not self.hmc.enabled assert resp is None def test_shutdown_error_not_found(self): # Remove the faked Console object self.hmc.consoles.remove(None) body = { 'force': False, } with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post( self.hmc, '/api/console/operations/shutdown', body, True, True) exc = exc_info.value assert exc.reason == 1 class TestConsoleMakePrimaryHandler(object): """All tests for class ConsoleMakePrimaryHandler.""" def setup_method(self): self.hmc, self.hmc_resources = standard_test_hmc() self.uris = ( (r'/api/console', ConsoleHandler), (r'/api/console/operations/make-primary', ConsoleMakePrimaryHandler), ) self.urihandler = UriHandler(self.uris) def test_make_primary_success(self): # the function to be tested: resp = self.urihandler.post( self.hmc, '/api/console/operations/make-primary', None, True, True) assert self.hmc.enabled assert resp is None def test_make_primary_error_not_found(self): # Remove the faked Console object self.hmc.consoles.remove(None) body = { 'force': False, } with pytest.raises(InvalidResourceError) as exc_info: # the function to be tested: self.urihandler.post( self.hmc, '/api/console/operations/make-primary', body, True, True) exc = exc_info.value
with site specified tempfile.tempdir = '/output' blast_tmpfile = tempfile.NamedTemporaryFile() #f = open('/output/blast_tmp', 'wb') if id_pos_res == {}: for id in id_seq: wr = '>'+id+'\n'+id_seq[id]+'\n' blast_tmpfile.write(wr) else: for id in id_pos_res: try: wr = '>'+id+'\n'+id_seq[id]+'\n' blast_tmpfile.write(wr) #print(wr) #f.write(wr) except: sys.stderr.write("No sequence available for '%s'\n"%id) blast_tmpfile.flush() blastDB = os.path.join(datadir, "%s.protein.sequences.fa"%(organism)).replace(' ', '\\ ') # print("blastdb path is" + blastDB) # Check if blast database is actually initialized, if not: do it if not os.path.isfile(blastDB+'.pin'): #print("Is this it??") command = "%s/formatdb -i %s"%(blastDir.rsplit("/", 1)[0], blastDB) sys.stderr.write("Looks like blast database is not initialized, trying to run:\n%s\n"%command) myPopen(command) command = "%s -a %s -p blastp -e 1e-10 -m 8 -d %s -i %s | sort -k12nr" % ( blastDir, number_of_processes, blastDB, blast_tmpfile.name) #print(blastDir, blastDB, '/output/blast_tmp') #if os.path.isfile(blast_tmpfile.name): #print('File exists') # to save time - jhkim if fast and os.path.isfile(fn_blast_output): #print('fast') blast_out = ReadLines(fn_blast_output) else: #print('mopen') blast_out = myPopen(command) if leave_intermediates: WriteString(fn_blast_output, "".join(blast_out)) #print(blast_out) for line in blast_out: tokens = line.split('\t') incoming = tokens[0] if incoming not in incoming2string: # get rid of organism prefix string = tokens[1].replace("%s."%organism, "") identity = float(tokens[2]) evalue = float(tokens[-2]) if string in string2incoming: sys.stderr.write('Best hit for '+incoming+' is not reciprocal\n') if identity < 90: sys.stderr.write('Best hit for '+incoming+' has only '+fpformat.fix(identity, 2)+' %identity\n') if evalue > 1e-40: sys.stderr.write('Best hit for '+incoming+' has high E-value '+fpformat.sci(evalue, 2)+' \n') if incoming in incoming2string: incoming2string[incoming][string] = True else: incoming2string[incoming] = { string: True } if string in string2incoming: string2incoming[string][incoming] = True else: string2incoming[string] = { incoming: True } else: pass return incoming2string, string2incoming # Random mapping of incoming identifiers to string def mapRandom(id_seq): sys.stderr.write("Mapping random\n") import random incoming2string = {} string2incoming = {} stringIDs = [] file = open('%s/%s.protein.sequences.fa'%(datadir,organism), 'r') data = file.readlines() file.close() for line in data: if line[0] == '>': name = line[1:-1] stringIDs.append(name) string2incoming[name] = {} max = len(stringIDs) - 1 for incoming in id_seq: if incoming not in incoming2string: int = random.randint(0, max) string = stringIDs[int] incoming2string[incoming] = { string: True } if string in string2incoming: string2incoming[string][incoming] = True else: string2incoming[string] = { incoming: True } return incoming2string, string2incoming # In case we run NetworKIN on the same sequence set we use in STRING, we can skip the mapping by blast def mapOne2one(id_seq): sys.stderr.write("Mapping one2one\n") incoming2string = {} string2incoming = {} for incoming in id_seq: incoming2string[incoming] = {incoming: True} string2incoming[incoming] = {incoming: True} return incoming2string, string2incoming # In case we have a better mapping then we can expect from blasting, we can use an external file to do so # file format: # incoming ID -> STRING ID def mapFromFile(filename): sys.stderr.write("Mapping using external mapping file\n") incoming2string = {} string2incoming = {} command = "cat %s"%(filename) try: mappingFile = myPopen(command) except: sys.stderr.write("Going to sleep, crashed with '%s'\n"%command) time.sleep(3600) for line in mappingFile: if(re.match('^#',line)): continue line = line.strip() tokens = line.split('\t') incoming = tokens[0] string = tokens[1] if incoming in incoming2string: incoming2string[incoming][string] = True else: incoming2string[incoming] = { string: True } if string in string2incoming: string2incoming[string][incoming] = True else: string2incoming[string] = { incoming: True } return incoming2string, string2incoming # Load the precalculated STRING network file def loadSTRINGdata(string2incoming, datadir, number_of_processes): #command = 'gzip -cd %s/%s.bestpath.tsv.gz'%(datadir, organism) fn_bestpath = "%s/%s.string_000_%04d_%04d.tsv.gz" % (os.path.join(datadir, "bestpath"), organism, dPenalty[organism]["hub penalty"], dPenalty[organism]["length penalty"]) if not os.path.isfile(fn_bestpath): sys.stderr.write("Best path file does not exist: %s" % fn_bestpath) ''' command = "gzip -cd %s" % fn_bestpath try: data = myPopen(command) except: sys.stderr.write("Error loading STRING data using '%s', sleeping fo 1h.\n"%command) time.sleep(3600) ''' tree_pred_string_data = {} # for memory efficiency import gzip f = gzip.open(fn_bestpath) #for line in data: while (True): line = f.readline() if line == '': break line = line.strip() tokens = line.split('\t') if len(tokens) == 8: (tree, group, name, string1, string2, stringscore, stringscore_indirect, path) = tokens elif len(tokens) == 7: (tree, group, name, string1, string2, stringscore, stringscore_indirect) = tokens path = "" # path to itself, we will miss the path information elif len(tokens) == 6: (name, string1, string2, stringscore, stringscore_indirect, path) = tokens elif len(tokens) == 5: (name, string1, string2, stringscore, stringscore_indirect) = tokens path = "" # path to itself, we will miss the path information if string2 in string2incoming: if string2 in tree_pred_string_data: tree_pred_string_data[string2][string1] = {"_name": name} else: tree_pred_string_data[string2] = {string1: {"_name": name}} if options.path == "direct": tree_pred_string_data[string2][string1]["_score"] = float(stringscore) elif options.path == "indirect": tree_pred_string_data[string2][string1]["_score"] = float(stringscore_indirect) # Use indirect path else: raise "Path information should be either direct or indirect." tree_pred_string_data[string2][string1]["_path"] = path else: pass f.close() return tree_pred_string_data def InsertValueIntoMultiLevelDict(d, keys, value): for i in range(len(keys)-1): if not d.has_key(keys[i]): d[keys[i]] = {} d = d[keys[i]] if not d.has_key(keys[-1]): d[keys[-1]] = [] d[keys[-1]].append(value) def ReadGroup2DomainMap(path_group2domain_map): map_group2domain = {} # KIN group name f = open(path_group2domain_map, "rU") for line in f.readlines(): tokens = line.split() InsertValueIntoMultiLevelDict(map_group2domain, tokens[:2], tokens[2]) f.close() return map_group2domain def SetValueIntoMultiLevelDict(d, keys, value): for i in range(len(keys)-1): if not d.has_key(keys[i]): d[keys[i]] = {} d = d[keys[i]] if d.has_key(keys[-1]) and type(d[keys[-1]]) != type(value): sys.stderr.write("Causion: multi-dict already has value and try to assign a value of different type") pass if d.has_key(keys[-1]): if d[keys[-1]] != value: sys.stderr.write("This operation replaces a value (%s)" % " ".join(map(lambda(x):str(x), keys))) d[keys[-1]] = value def printResult(id_pos_tree_pred, tree_pred_string_data, incoming2string, string_alias, string_desc, organism, mode, dir_likelihood_conversion_tbl, map_group2domain): ALPHA = ALPHAS[organism] species = dSpeciesName[organism] dLRConvTbl = {} for fname in glob.glob(os.path.join(dir_likelihood_conversion_tbl, "conversion_tbl_*_smooth*")): netphorest_or_string, species_of_conversion_table, tree, player_name = re.findall("conversion_tbl_([a-z]+)_smooth_([a-z]+)_([A-Z0-9]+)_([a-zA-Z0-9_/-]+)", os.path.basename(os.path.splitext(fname)[0]))[0] #species, tree, player_name = os.path.basename(os.path.splitext(fname)[0]).rsplit('_', 3)[1:] if species_of_conversion_table != species: continue conversion_tbl = ReadConversionTableBin(fname) SetValueIntoMultiLevelDict(dLRConvTbl, [species_of_conversion_table, tree, player_name, netphorest_or_string], conversion_tbl) if options.verbose: sys.stderr.write("Conversion table %s %s %s %s\n" % (species_of_conversion_table, tree, player_name, netphorest_or_string) ) # For each ID in NetPhorest for id in id_pos_tree_pred: # We have a mapping to STRING if id in incoming2string: # For each predicted position for pos in id_pos_tree_pred[id]: # For each of the trees (KIN, SH@ etc.) for tree in id_pos_tree_pred[id][pos]: score_results = {} # For each single classifier for pred in id_pos_tree_pred[id][pos][tree]: # For each mapped sequence for string1 in incoming2string[id]: if string1 in string_alias: bestName1 = string_alias[string1] else: bestName1 = '' if string1 in string_desc: desc1 = string_desc[string1] else: desc1 = '' if string1 in tree_pred_string_data: (res, peptide, netphorestScore) = id_pos_tree_pred[id][pos][tree][pred] for string2 in tree_pred_string_data[string1]: if string2 in string_alias: bestName2 = string_alias[string2] else: bestName2 = '' if string2 in string_desc: desc2 = string_desc[string2] else: desc2 = '' stringScore = tree_pred_string_data[string1][string2]["_score"] path = tree_pred_string_data[string1][string2]["_path"] name = tree_pred_string_data[string1][string2]["_name"] # string2 = kinase #sys.stderr.write("%s %s %s\n" % (tree, pred, name)) if not map_group2domain.has_key(tree) or not map_group2domain[tree].has_key(pred) or not name in map_group2domain[tree][pred]: if options.string_for_uncovered: if species == "human": if tree in ["1433", "BRCT", "WW", "PTB", "WD40", "FHA"]: conversion_tbl_string = dLRConvTbl[species]["SH2"]["general"]["string"] else: conversion_tbl_string = dLRConvTbl[species][tree]["general"]["string"] elif species == "yeast": conversion_tbl_string = dLRConvTbl[species][tree]["general"]["string"] else: raise "This species is not supported" likelihood_netphorest = 1 likelihood_string = ConvertScore2L(stringScore, conversion_tbl_string) unified_likelihood = likelihood_netphorest * likelihood_string networkinScore = unified_likelihood # NetworKIN result result = id+'\t'+res+str(pos)+'\t'+tree+'\t'+pred+'\t'+name+'\t'+ \ fpformat.fix(networkinScore,4)+'\t'+"N/A"+'\t'+fpformat.fix(stringScore,4)+'\t'+ \ string1+'\t'+string2+'\t'+bestName1+'\t'+bestName2+'\t'+desc1+'\t'+desc2+'\t'+peptide+'\t'+path+'\n' else: continue else: #sys.stderr.write("%s %s\n" % (string1, string2)) if species == "human": if tree in ["1433", "BRCT", "WW", "PTB", "WD40", "FHA"]: conversion_tbl_netphorest = dLRConvTbl[species]["SH2"]["general"]["netphorest"] conversion_tbl_string = dLRConvTbl[species]["SH2"]["general"]["string"] else: if dLRConvTbl[species][tree].has_key(name): conversion_tbl_netphorest = dLRConvTbl[species][tree][name]["netphorest"] conversion_tbl_string = dLRConvTbl[species][tree][name]["string"] else: conversion_tbl_netphorest = dLRConvTbl[species][tree]["general"]["netphorest"] conversion_tbl_string = dLRConvTbl[species][tree]["general"]["string"] elif species == "yeast": if dLRConvTbl[species][tree].has_key(name): conversion_tbl_netphorest = dLRConvTbl[species][tree][name]["netphorest"] conversion_tbl_string = dLRConvTbl[species][tree][name]["string"] else: conversion_tbl_netphorest = dLRConvTbl[species][tree]["general"]["netphorest"] conversion_tbl_string = dLRConvTbl[species][tree]["general"]["string"] else: raise "This species is not supported" likelihood_netphorest = ConvertScore2L(netphorestScore, conversion_tbl_netphorest) likelihood_string = ConvertScore2L(stringScore, conversion_tbl_string) unified_likelihood = likelihood_netphorest * likelihood_string networkinScore = unified_likelihood #networkinScore = pow(stringScore, ALPHA)*pow(netphorestScore, 1-ALPHA) # NetworKIN result result = id+'\t'+res+str(pos)+'\t'+tree+'\t'+pred+'\t'+name+'\t'+ \ fpformat.fix(networkinScore,4)+'\t'+fpformat.fix(netphorestScore,4)+'\t'+fpformat.fix(stringScore,4)+'\t'+ \ string1+'\t'+string2+'\t'+bestName1+'\t'+bestName2+'\t'+desc1+'\t'+desc2+'\t'+peptide+'\t'+path+'\n' if networkinScore not in score_results: score_results[networkinScore] = [] score_results[networkinScore].append(result) if mode == 'network': highestScore = sorted(score_results.keys(), reverse=True)[0] if len(score_results[highestScore]) > 1: index = random.randint(0,len(score_results[highestScore])-1) sys.stdout.write((score_results[highestScore][index])) else: sys.stdout.write((score_results[highestScore][0])) pass else: for score in sorted(score_results.keys(), reverse=True): sys.stdout.write("".join(score_results[score])) else: pass return #MAIN def Main(): sys.stderr.write("Reading fasta input file\n") id_seq = readFasta(fastafile) #print('0000') #print(id_seq) if options.verbose: sys.stderr.write("%s sequences loaded\n"%len(id_seq.keys())) if sitesfile: sys.stderr.write("Reading phosphosite file\n") input_type = CheckInputType(sitesfile) if input_type == NETWORKIN_SITE_FILE: id_pos_res = readPhosphoSites(sitesfile) elif input_type == PROTEOME_DISCOVERER_SITE_FILE: id_pos_res = readPhosphoSitesProteomeDiscoverer(fn_fasta, sitesfile) elif input_type == MAX_QUANT_DIRECT_OUTPUT_FILE: id_pos_res = readPhosphoSitesMaxQuant(sitesfile) else: id_pos_res = {} #print("ID POS RES") #print(id_pos_res) sys.stderr.write("Loading aliases and descriptions\n") (string_alias, string_desc) = readAliasFiles(args[0], options.datadir); if organism == "9606": path_group2domain_map = os.path.join(options.datadir, "group_human_protein_name_map.tsv") elif organism == "4932": path_group2domain_map = os.path.join(options.datadir, "group_yeast_KIN.tsv") map_group2domain = ReadGroup2DomainMap(path_group2domain_map) #print(map_group2domain) # Default way of mapping using BLAST incoming2string, string2incoming = mapPeptides2STRING(blastDir, organism, fastafile.name, id_pos_res, id_seq, options.threads, options.datadir, options.fast, options.leave) #print(string2incoming) # Hack for random mapping to proteins #incoming2string, string2incoming = mapRandom(id_seq) # Use if a mapping file for the input can be provided #incoming2string, string2incoming = mapFromFile("/home/red1/hhorn/projects2/2012_03_22_Jesper/ensmusp_ensp.tsv") # Used if only ensembl of the same version used #incoming2string, string2incoming = mapOne2one(id_seq) # Load the STRING network data sys.stderr.write("Loading STRING network\n") tree_pred_string_data = loadSTRINGdata(string2incoming, options.datadir, options.threads) #print('String data') #print(tree_pred_string_data) # Run NetPhorest sys.stderr.write("Running NetPhorest") netphorestTmpFiles = runNetPhorest(id_seq, id_pos_res, options.compress, options.threads, options.active_threads, options.fast, options.leave) sys.stderr.write('\n') #print('--------------') # print(len(netphorestTmpFiles), netphorestTmpFiles[0].name) #print('---------------') # Writing result to STDOUT sys.stderr.write("Writing results\n") sys.stdout.write("#Name\tPosition\tTree\tNetPhorest Group\tKinase/Phosphatase/Phospho-binding domain\tNetworKIN score\tNetPhorest probability\tSTRING score\tTarget STRING ID\tKinase/Phosphatase/Phospho-binding domain STRING ID\tTarget description\tKinase/Phosphatase/Phospho-binding domain description\tTarget Name\tKinase/Phosphatase/Phospho-binding domain Name\tPeptide sequence window\tIntermediate nodes\n") for i in range(len(netphorestTmpFiles)): id_pos_tree_pred = parseNetphorestFile(netphorestTmpFiles[i].name, id_pos_res, options.compress) if options.path == "direct": dir_likelihood_conversion_tbl = os.path.join(options.datadir, "likelihood_conversion_table_direct") elif options.path == "indirect": dir_likelihood_conversion_tbl = os.path.join(options.datadir, "likelihood_conversion_table_indirect") else: raise "Path information should be either direct or indirect." printResult(id_pos_tree_pred, tree_pred_string_data, incoming2string, string_alias, string_desc, args[0], options.mode, dir_likelihood_conversion_tbl, map_group2domain) return if __name__ == '__main__': #BLAST try: blastDir = os.environ['BLAST_PATH'] except: blastDir="" #NETPHOREST try: netphorest_bin = os.environ['NETPHOREST_PATH'] except: netphorest_bin="" usage = "usage: %prog [options] organism FASTA-file [sites-file]" parser = OptionParser(usage=usage, version="%prog 3.0") parser.add_option("-n", "--netphorest", dest="netphorest_bin", default=netphorest_bin, help="set the location of the NetPhorest binary, overwrites the 'NETPHOREST_PATH' environmental variable. [ENV: %default]") parser.add_option("-b", "--blast", dest="blast", default=blastDir, help="set the directory for the BLAST binaries (formatdb and blastall), overwrites the 'BLAST_PATH' environmental variable. [ENV: %default]") parser.add_option("-m", "--mode", dest="mode", default=False, help="if set to 'network', gives only one best scoring result for each site. In case of multiple candidate kinases with the same core, the selection hapens randomly. [default: %default]") parser.add_option("-p", "--path", dest="path", default="direct", help="NetworKIN uses both direct and indirect paths. Otherwise, it uses only indirect paths. [default: %default]") parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="print
:param callback function: The callback function for asynchronous request. (optional) :param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required) :param SNMPCommunityAddDescriptorList body: (required) :param str controller: Controller selection :param bool verbose_error_response: :return: ReturnCodeWithRefList If the method is called asynchronously, returns the request thread. :raises: ValueError If the required params are not provided or if the response data format is unknown. TypeError: When the data type of response data is different from what we are expecting ApiException: Occurs when we get a HTTP error code (422 and above). """ all_params = ['system_id', 'body', 'controller', 'verbose_error_response'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method symbol_add_snmp_community" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'system_id' is set if ('system_id' not in params) or (params['system_id'] is None): raise ValueError("Missing the required parameter `system_id` when calling `symbol_add_snmp_community`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `symbol_add_snmp_community`") resource_path = '/storage-systems/{system-id}/symbol/addSNMPCommunity'.replace('{format}', 'json') path_params = {} if 'system_id' in params: path_params['system-id'] = params['system_id'] query_params = {} if 'controller' in params: query_params['controller'] = params['controller'] if 'verbose_error_response' in params: query_params['verboseErrorResponse'] = params['verbose_error_response'] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['basicAuth'] response = self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ReturnCodeWithRefList', auth_settings=auth_settings, callback=params.get('callback')) return response def symbol_add_snmp_trap_destination(self, system_id, body, **kwargs): """ This procedure is used to add an SNMP trap destination. Documented return codes: ok, snmpIncompatibleIpv4Address, snmpIncompatibleIpv6Address. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.symbol_add_snmp_trap_destination(system_id, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required) :param SNMPTrapDestinationAddDescriptorList body: (required) :param str controller: Controller selection :param bool verbose_error_response: :return: ReturnCodeWithRefList If the method is called asynchronously, returns the request thread. :raises: ValueError If the required params are not provided or if the response data format is unknown. TypeError: When the data type of response data is different from what we are expecting ApiException: Occurs when we get a HTTP error code (422 and above). """ all_params = ['system_id', 'body', 'controller', 'verbose_error_response'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method symbol_add_snmp_trap_destination" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'system_id' is set if ('system_id' not in params) or (params['system_id'] is None): raise ValueError("Missing the required parameter `system_id` when calling `symbol_add_snmp_trap_destination`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `symbol_add_snmp_trap_destination`") resource_path = '/storage-systems/{system-id}/symbol/addSNMPTrapDestination'.replace('{format}', 'json') path_params = {} if 'system_id' in params: path_params['system-id'] = params['system_id'] query_params = {} if 'controller' in params: query_params['controller'] = params['controller'] if 'verbose_error_response' in params: query_params['verboseErrorResponse'] = params['verbose_error_response'] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['basicAuth'] response = self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ReturnCodeWithRefList', auth_settings=auth_settings, callback=params.get('callback')) return response def symbol_adopt_all_drives(self, system_id, **kwargs): """ This procedure caused the storage array to \"adopt\" all foreign drives that are eligible to be adopted. Adoption means accepting or incorporating elements of a foreign drive's configuration database into that of the recipient array. It must be possible for the storage array to match the adoption-candidate drives with ones that are currently being tracked as \"not present.\" In addition, several other technical criteria must be met in order for the adoption to succeed. Documented return codes: ok, error, noDrivesAdopted, someDrivesAdopted. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.symbol_adopt_all_drives(system_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required) :param str controller: Controller selection :param bool verbose_error_response: :return: str If the method is called asynchronously, returns the request thread. :raises: ValueError If the required params are not provided or if the response data format is unknown. TypeError: When the data type of response data is different from what we are expecting ApiException: Occurs when we get a HTTP error code (422 and above). """ all_params = ['system_id', 'controller', 'verbose_error_response'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method symbol_adopt_all_drives" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'system_id' is set if ('system_id' not in params) or (params['system_id'] is None): raise ValueError("Missing the required parameter `system_id` when calling `symbol_adopt_all_drives`") resource_path = '/storage-systems/{system-id}/symbol/adoptAllDrives'.replace('{format}', 'json') path_params = {} if 'system_id' in params: path_params['system-id'] = params['system_id'] query_params = {} if 'controller' in params: query_params['controller'] = params['controller'] if 'verbose_error_response' in params: query_params['verboseErrorResponse'] = params['verbose_error_response'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['basicAuth'] response = self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='str', auth_settings=auth_settings, callback=params.get('callback')) return response def symbol_adopt_drive(self, system_id, body, **kwargs): """ This procedure caused the storage array to \"adopt\" a foreign drive that is eligible to be adopted. Adoption means accepting or incorporating elements of a foreign drive's configuration database into that of the recipient array. It must be possible for the storage array to match the adoption-candidate drive with one that is currently being tracked as \"not present.\" In addition, several other technical criteria must be met in order for the adoption to succeed. Documented return codes: ok, error. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.symbol_adopt_drive(system_id, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required) :param str body: A SYMbol reference to the drive that is to be adopted. (required) :param str controller: Controller selection :param bool verbose_error_response: :return: str If the method is called asynchronously, returns the request thread. :raises: ValueError If the required params are
<filename>IliasDownloaderUniMA/IliasDL.py #!/usr/bin/env python3 from requests import session, get, ConnectionError from bs4 import BeautifulSoup from urllib.parse import urljoin from pathlib import Path as plPath from dateparser import parse as parsedate from datetime import datetime from multiprocessing.pool import ThreadPool import math import os import shutil import re class IliasDownloaderUniMA(): """ Base class """ base_url = "https://ilias.uni-mannheim.de/" desktop_url = "https://ilias.uni-mannheim.de/ilias.php?baseClass=ilPersonalDesktopGUI" def __init__(self): """ Constructs a new instance. """ self.current_semester_pattern = self.getCurrentSemester() self.courses = [] self.to_scan = [] self.files = [] self.params = { 'num_scan_threads' : 5, 'num_download_threads': 5, 'download_path': os.getcwd(), 'tutor_mode': False, 'verbose' : False } self.session = None self.login_soup = None self.background_task_files = [] self.background_tasks_to_clean = [] self.external_scrapers = [] def getCurrentSemester(self): d = datetime.now() if d.month in range(2, 8): return rf"\((FSS|ST) {d.year}\)" else: return rf"\((HWS|WT) {d.year}\)" def setParam(self, param, value): """ Sets the parameter. :param arg: The parameter we want to alter :type arg: string :param value: The new parameter value :type value: str or int """ if param in ['num_scan_threads', 'num_download_threads']: if type(value) is int: self.params[param] = value if param == 'download_path': if os.path.isdir(value): self.params[param] = value if param == 'verbose': if type(value) is bool: self.params[param] = value if param == 'tutor_mode': if type(value) is bool: self.params[param] = value def createIliasUrl(self, iliasid): """ Creates an ilias url from the ilias ref id. :param iliasid: The ilias ref id :type iliasid: int :returns: feasible url :rtype: str """ return self.base_url + "ilias.php?ref_id=" + str(iliasid) \ + "&cmd=frameset" + "&cmdClass=ilrepositorygui" \ + "&cmdNode=vi" + "&baseClass=ilrepositorygui" def login(self, login_id, login_pw): """ create requests session and log into ilias.uni-mannheim.de :param args: login details (uni-id and password) :type args: list :raises TypeError: """ if type(login_id) is not str or type(login_pw) is not str: raise TypeError("...") # User data and user-agent data = {'username': login_id, 'password': <PASSWORD>} head = { 'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) "\ + "AppleWebKit/537.36 (KHTML, like Gecko) " \ + "Chrome/56.0.2924.87 Safari/537.36", 'Connection': 'keep-alive' } self.session = session() self.login_soup = BeautifulSoup(self.session.get("https://cas.uni-mannheim.de/cas/login").content, "lxml") form_data = self.login_soup.select('form[action^="/cas/login"] input') data.update({inp["name"]: inp["value"] for inp in form_data if inp["name"] not in data}) self.session.post('https://cas.uni-mannheim.de/cas/login', data=data, headers=head) self.login_soup = BeautifulSoup(self.session.get(self.base_url).content, "lxml") # Login successful? FIY if not self.login_soup.find("a", {'id' : 'mm_desktop'}): raise ConnectionError("Couldn't log into ILIAS. Make sure your provided uni-id and the password are correct.") def addCourse(self, iliasid, course_name=None): """ Adds a course to the courses list. :param iliasid: the ilias ref id of the course :type iliasid: int """ url = self.createIliasUrl(iliasid) if not course_name: soup = BeautifulSoup(self.session.get(url).content, "lxml") course_name = soup.select_one("#mainscrolldiv > ol > li:nth-child(3) > a").text if (course_name := re.sub(r"\[.*\] ", "", course_name)): self.courses += [{'name' : course_name, 'url': url}] def addCourses(self, *iliasids): """ Adds multiple courses to the courses list :param iliasids: the ilias ref ids of the courses :type iliasids: list """ for iliasid in iliasids: self.addCourse(iliasid) def addAllSemesterCourses(self, semester_pattern=None, exclude_ids=[]): """ Extracts the users subscribed courses of the specified semester and adds them to the course list. :param semester_pattern: semester or regex for semester :type semester_pattern: string :param exclude_ids: optional ilias ids to ignore :type exclude_ids: list """ if semester_pattern is None: semester_pattern = self.current_semester_pattern # Performance gain in case of many courses semester_compiled = re.compile(semester_pattern) extract_compiled = re.compile(r"ref_id=(\d+)") for course in self.login_soup.find_all("a", "il_ContainerItemTitle"): course_name = course.text if semester_compiled.search(course_name): url = course["href"] if (match := extract_compiled.search(url)): iliasid = int(match.group(1)) if iliasid not in exclude_ids: self.addCourse(iliasid, course_name) def _determineItemType(self, url): if "target=file" in url: return "file" elif "calldirectlink" in url: return "link" elif "showThreads" in url: return "forum" elif "showOverview" in url: return "task" elif "ilHTLMPresentationGUI" in url: return "lernmaterialien" else: return "folder" def _parseFileProperties(self, bs_item): """ Tries to parse the file's size, modification date and the file ending. Note: there are some cases where Ilias doesn't provide a modification date and/or a file size. :param bs_item: The beautifulsoup item :type bs_item: { type_description } :returns: file ending, file size, file modification date :rtype: tuple """ props = bs_item.find_all('span', 'il_ItemProperty') p = [i for i in props if len(i.text.split()) > 0 and "Version" not in i.text] # Parse the file ending if len(p[0].text.split()) > 1: file_ending = "" else: file_ending = "." + p[0].text.split()[0] # Parse the file size if len(p) > 1: size_tmp = p[1].text.lower().replace(".","").replace(",", ".").split() size = float(size_tmp[0]) if size_tmp[1] == "kb": size *= 1e-3 elif size_tmp[1] == "bytes": size *= 1e-6 else: size = math.nan # Parse the modification date if len(p) > 2: mod_date = parsedate(p[2].text) else: mod_date = datetime.fromisoformat('2000-01-01') return file_ending, size, mod_date def parseVideos(self, mc_soup): # Checks if there's a video inside the mediacontainer: if (vsoup := mc_soup.find('video', {"class": "ilPageVideo"})): if (v_src := vsoup.find('source')['src']): v_url = urljoin(self.base_url, v_src) v_name = re.search(r"mobs/mm_\d+/(.*)\?il_wac_token.*", v_src).group(1) try: v_size = float(self.session.head(v_url).headers['Content-Length']) * 1e-6 except: v_size = math.nan # The HEAD requests misses the 'last-modified' key, so it's not # possible to get the mod date from there :( v_mod_date = datetime.fromisoformat('2000-01-01') return v_name, v_size, v_mod_date, v_url else: return None def scanMediaContainer(self, course_name, file_path, soup): """ Scans videos on the top of the course inside the MediaContainer and adds them to the list 'to_scan'. :param soup: The soup :type soup: { type_description } """ if self.params['verbose']: print(f"Scanning Videos...") for mc in soup.find_all("figure", {"class": "ilc_media_cont_MediaContainer"}): if (video := self.parseVideos(mc)): v_name, v_size, v_mod_date, v_url = video self.files += [{ 'course': course_name, 'type': 'file', 'name': v_name, 'size': v_size, 'mod-date': v_mod_date, 'url': v_url, 'path': file_path }] def scanContainerList(self, course_name, file_path, soup): """ Scans the soup object for links inside the ContainerList and adds them to the list 'to_scan'. See _determineItemType() for the possible types of links. :param soup: :type soup: bs4.BeautifulSoup """ for i in (items := soup.find_all("div", "il_ContainerListItem")): if (subitem := i.find('a', href=True)): el_url = urljoin(self.base_url, subitem['href']) el_name = subitem.text el_type = self._determineItemType(el_url) if el_type == "file": ending, size, mod_date = self._parseFileProperties(i) self.files += [{ 'course': course_name, 'type': el_type, 'name': el_name + ending, 'size': size, 'mod-date': mod_date, 'url': el_url, 'path': file_path }] elif el_type in ["folder", "task", "lernmaterialien"]: self.to_scan += [{ 'type': el_type, 'name': el_name, 'url': el_url }] def scanFolder(self, course_name, url_to_scan): """ Scans a folder. :param course_name: The name of the course the folder belongs to :type course_name: str :param url_to_scan: The url to scan :type url_to_scan: str """ url = urljoin(self.base_url, url_to_scan) soup = BeautifulSoup(self.session.get(url).content, "lxml") file_path = course_name + "/" + "/".join(soup.find("body").find("ol").text.split("\n")[4:-1]) + "/" file_path = file_path.replace(":", " - ") if self.params['verbose']: print(f"Scanning Folder...\n{file_path}\n{url}") print("-------------------------------------------------") self.scanMediaContainer(course_name, file_path, soup) self.scanContainerList(course_name, file_path, soup) def scanTaskUnit(self, course_name, url_to_scan): """ Scans a task unit. :param course_name: The name of the course the Task belongs to :type course_name: str :param url_to_scan: The url to scan :type url_to_scan: str """ url = urljoin(self.base_url, url_to_scan) soup = BeautifulSoup(self.session.get(url).content, "lxml") task_unit_name = soup.find("a", {"class" : "ilAccAnchor"}).text file_path = course_name + "/" + "Aufgaben/" + task_unit_name + "/" file_path = file_path.replace(":", " - ") task_items = soup.find("div", {"id":"infoscreen_section_1"}).find_all("div", "form-group") if self.params['verbose']: print(f"Scanning TaskUnit...\n{file_path}\n{url}") print("-------------------------------------------------") for i in task_items: el_url = urljoin(self.base_url, i.find('a')['href']) el_name = i.find("div", 'il_InfoScreenProperty').text el_type = 'file' file_mod_date = datetime.fromisoformat('2000-01-01') file_size = math.nan self.files += [{ 'course': course_name, 'type': el_type, 'name': el_name, 'size': file_size, 'mod-date': file_mod_date, 'url': el_url, 'path': file_path }] # Now scan the submissions if self.params['tutor_mode']: self.scanTaskUnitSubmissions(course_name, file_path, soup) def scanTaskUnitSubmissions(self, course_name, file_path, soup): form_data = { 'user_login': '', 'cmd[downloadSubmissions]': 'Alle Abgaben herunterladen' } # Deadline finished? deadline = soup.select_one('#infoscreen_section_2 > div:nth-child(2) > div.il_InfoScreenPropertyValue.col-xs-9').text if (deadline_time := parsedate(deadline)) < datetime.now(): # Access to the submissions? if (tab_grades := soup.select_one('#tab_grades > a')): tab_grades_url = urljoin(self.base_url, tab_grades['href']) submissions_soup = BeautifulSoup(self.session.get(tab_grades_url).content, "lxml") form_action_url = urljoin(self.base_url, submissions_soup.find('form', {'id': 'ilToolbar'})['action']) # Post form data r = self.session.post(form_action_url, data=form_data) el_name = submissions_soup.select_one('#il_mhead_t_focus').text.replace("\n", "") + ".zip" # Add backgroundtask file to list, we parse the download links # later from the background tasks tab from the page header self.background_task_files += [{ 'course': course_name, 'type': 'file', 'name': el_name, 'size': math.nan, 'mod-date': deadline_time, #'url': dl_url, 'path': file_path }] def searchBackgroundTaskFile(self, el_name): # # TO DO: Cleanup!!! # for idx, f in enumerate(self.background_task_files): f["name"] = f["name"].encode() f["name"] = f["name"].replace('ü'.encode(), b'ue') f["name"] = f["name"].replace('Ü'.encode(), b'Ue') f["name"] = f["name"].replace('ä'.encode(), b'ae') f["name"] = f["name"].replace('Ä'.encode(), b'Ae') f["name"] = f["name"].replace('ö'.encode(), b'oe') f["name"] = f["name"].replace('Ö'.encode(), b'Oe') f["name"] = f["name"].replace('ß'.encode(), b'ss') f["name"] = f["name"].decode('utf-8') if f["name"] == el_name: return self.background_task_files.pop(idx) def parseBackgroundTasks(self): # time.sleep(5) # Not really needed? # Reload ilias main page to parse the background tasks bar on the top desktop_soup = BeautifulSoup(self.session.get(self.desktop_url).content, "lxml") tasks_tab_url = urljoin(self.base_url, desktop_soup.select_one('#mm_tb_background_tasks')['refresh-uri']) tasks_tab_soup = BeautifulSoup(self.session.get(tasks_tab_url).content, "lxml") # Extract the items for i in tasks_tab_soup.find_all('div', {'class': 'il-item-task'}): # Extract the download url and the remove url dl, rm = i.find_all('button', {'class': 'btn btn-default'}) dl_url = urljoin(self.base_url, dl['data-action']) rm_url = urljoin(self.base_url, rm['data-action']) self.background_tasks_to_clean.append(rm_url) # Add file to downloads el_name = i.find('div', {'class' : 'il-item-task-title'}).text.replace("\n", "") + ".zip" if (bt := self.searchBackgroundTaskFile(el_name)): self.files += [{ 'course': bt['course'], 'type': 'file', 'name': el_name, 'size': bt['size'], 'mod-date': bt['mod-date'], 'url': dl_url, 'path': bt['path'] }] def scanLernmaterial(self, course_name, url_to_scan): pass # ... to do ... def scanHelper(self, course_name, el): if len(self.to_scan) > 0: self.to_scan.pop() if el['type'] == "folder": self.scanFolder(course_name, el['url']) if el['type'] == "task": self.scanTaskUnit(course_name, el['url']) elif el['type'] == 'lernmaterialien': self.scanLernmaterial(course_name, el['url']) def searchForFiles(self, course_name): """ Scans an ilias url and all nested subfolders for files :param arg: url for the "dateien" folder :type arg: str """ while len(self.to_scan) > 0: results = ThreadPool(self.params['num_scan_threads']).imap_unordered(lambda x: self.scanHelper(course_name, x), self.to_scan) for r in results: pass def addExternalScraper(self, scraper, *args): self.external_scrapers.append({'fun' : scraper, 'args': args}) def scanCourses(self): """ Scans all courses inside the instance's courses list. """ for course in self.courses: self.to_scan += [{ 'type' : 'folder', 'name': course['name'], 'url': course['url'] }] print(f"Scanning {course['name']} with {self.params['num_scan_threads']} Threads....") self.searchForFiles(course['name']) # External Scrapers for d in self.external_scrapers: print(f"Scanning {d['args'][0]} with the external Scraper....") self.files += d['fun'](*d['args']) def downloadFile(self, file): """ Downloads a file. :param file: The file we want do download :type file: dict :returns: { description_of_the_return_value } :rtype: { return_type_description } """ file_dl_path = os.path.join(self.params['download_path'],file['path'], file['name']) file_mod_date = file['mod-date'].timestamp() size = file['size'] # Does the file already exists locally and is the newest version? if os.path.exists(file_dl_path) and file_mod_date < os.path.getmtime(file_dl_path): return else: # Download the file r = self.session.get(file['url'], stream=True) if r.status_code == 200: try: with open(file_dl_path, 'wb') as f: print(f"Downloading {file['course']}: {file['name']} ({size:.1f} MB)...") shutil.copyfileobj(r.raw, f) except Exception as e: return e def downloadAllFiles(self): """ Downloads all files inside the instance's files list. """ # Scan all files self.scanCourses() if self.params['tutor_mode']: # Parse the background tasks, i.e. add them to the download files self.parseBackgroundTasks() # Check the file paths paths = list(set([os.path.join(self.params['download_path'],f['path']) for f in self.files])) for p in paths: if not plPath(p).exists(): plPath(p).mkdir(parents=True, exist_ok=True) # Download all files for r in
import os import re import logging from nonebot import permission as perm from nonebot import Message as M from nonebot import MessageSegment as MS from AzusaBot import config from Azusa.utils import Timer from Azusa.data import groupdict, frienddict, SIGNAL from Azusa.middleware import on_command, CommandGroup, on_websocket_connect, on_message, scheduled_job from Azusa.exceptions import PixivError, InfoNotFoundError, RetryExhaustedError from ._pixiv import pixiv_image __plugin_name__ = 'P站插件' __plugin_usage__ = 'P站相关功能实现,查询详细帮助请使用指令“pixiv帮助”' _pixiv_instance = None _timer = Timer() logger = logging.getLogger('Azusa.pixiv') pixiv_command = CommandGroup('pixiv', logger=logger, only_to_me=False, privileged=True) @on_websocket_connect(logger=logger, checkfunc=lambda event: _pixiv_instance is None, wait_for=lambda : SIGNAL['coolq_directory'][1] == True) async def init_pixiv_instance(event, bot): username = bot.config.PIXIV_USERNAME password = <PASSWORD> proxy_url = bot.config.PIXIV_PROXY_URL imagepath = os.path.join(SIGNAL['coolq_directory'][0], 'data', 'image', 'pixiv') if SIGNAL['coolq_directory'][0] else '' global _pixiv_instance _pixiv_instance = pixiv_image(username, password, proxy_url, imagepath) _pixiv_instance.set_pagesize(20) # 控制指令 # 控制R18与R18G @pixiv_command.command('enabler18', checkfunc=lambda session: _pixiv_instance is not None, aliases=('允许R18'), permission=perm.GROUP_ADMIN | perm.PRIVATE_FRIEND, privileged=False) async def enabler18(session, bot): selfid = session.self_id if session.event['message_type'] == 'group': groupid = session.event['group_id'] groupdict[selfid][groupid]['mods_config']['pixiv']['allowr18'] = True await session.send(M(f'群{groupid}已允许R18图像')) else: userid = session.event['user_id'] frienddict[selfid][userid]['mods_config']['pixiv']['allowr18'] = True await session.send(M(f'用户{userid}已允许R18图像')) @pixiv_command.command('disabler18', checkfunc=lambda session: _pixiv_instance is not None, aliases=('禁止R18'), permission=perm.GROUP_ADMIN | perm.PRIVATE_FRIEND, privileged=False) async def disabler18(session, bot): selfid = session.self_id if session.event['message_type'] == 'group': groupid = session.event['group_id'] groupdict[selfid][groupid]['mods_config']['pixiv']['allowr18'] = False await session.send(M(f'群{groupid}已禁止R18图像')) else: userid = session.event['user_id'] frienddict[selfid][userid]['mods_config']['pixiv']['allowr18'] = False await session.send(M(f'用户{userid}已禁止R18图像')) @pixiv_command.command('enabler18g', checkfunc=lambda session: _pixiv_instance is not None, aliases=('允许R18G'), permission=perm.GROUP_ADMIN | perm.PRIVATE_FRIEND, privileged=False) async def enabler18g(session, bot): selfid = session.self_id if session.event['message_type'] == 'group': groupid = session.event['group_id'] groupdict[selfid][groupid]['mods_config']['pixiv']['allowr18g'] = True await session.send(M(f'群{groupid}已允许R18G图像')) else: userid = session.event['user_id'] frienddict[selfid][userid]['mods_config']['pixiv']['allowr18g'] = True await session.send(M(f'用户{userid}已允许R18G图像')) @pixiv_command.command('disabler18g', checkfunc=lambda session: _pixiv_instance is not None, aliases=('禁止R18G'), permission=perm.GROUP_ADMIN | perm.PRIVATE_FRIEND, privileged=False) async def disabler18g(session, bot): selfid = session.self_id if session.event['message_type'] == 'group': groupid = session.event['group_id'] groupdict[selfid][groupid]['mods_config']['pixiv']['allowr18g'] = False await session.send(M(f'群{groupid}已禁止R18G图像')) else: userid = session.event['user_id'] frienddict[selfid][userid]['mods_config']['pixiv']['allowr18g'] = False await session.send(M(f'用户{userid}已禁止R18G图像')) # 判断是否允许插件运行 async def _check(session) -> bool: if _pixiv_instance is None: return False if session.event['message_type'] == 'group': if groupdict[session.self_id][session.event['group_id']]['mods_config']['pixiv']['disable']: return False else: if session.event['sub_type'] == 'group': return True elif frienddict[session.self_id][session.event['user_id']]['mods_config']['pixiv']['disable']: return False return True # 刷新登录token async def refreshtoken(): if _timer.run: if _timer.running: _timer.stop() # 两次执行时间间隔超过一小时则重新登录以刷新token if _timer.elapsed > 3600: await _pixiv_instance.login() logger.info('pixiv refresh token') _timer.reset() _timer.start() else: await _pixiv_instance.login() logger.info('pixiv first login') _timer.start() # 获取R18与R18G权限值 async def _getperm(session) -> tuple: """ 获取r18与r18g权限 """ if session.event['message_type'] == 'group': r18 = groupdict[session.self_id][session.event['group_id']]['mods_config']['pixiv']['allowr18'] r18g = groupdict[session.self_id][session.event['group_id']]['mods_config']['pixiv']['allowr18g'] else: if session.event['sub_type'] == 'group': r18 = r18g = True else: r18 = frienddict[session.self_id][session.event['user_id']]['mods_config']['pixiv']['allowr18'] r18g = frienddict[session.self_id][session.event['user_id']]['mods_config']['pixiv']['allowr18g'] return r18, r18g # 通用函数 # pid参数解析器 async def common_pid_parser(session): """ pid参数解析器,获取一个pid参数 """ paramList = session.current_arg_text.strip().split(' ') if paramList[0] == '': paramList = [] if session.is_first_run: try: session.state['pid'] = int(paramList[0]) except (IndexError, ValueError): pass elif not paramList: session.pause(M('请输入有效的pid')) else: try: session.state['pid'] = int(paramList[0]) except ValueError: session.pause(M('参数必须是数字')) # uid参数解析器 async def common_uid_parser(session): """ uid参数解析器,获取一个uid参数 """ paramList = session.current_arg_text.strip().split(' ') if paramList[0] == '': paramList = [] if session.is_first_run: try: session.state['uid'] = int(paramList[0]) except (IndexError, ValueError): pass elif not paramList: session.pause(M('请输入有效的uid')) else: try: session.state['uid'] = int(paramList[0]) except ValueError: session.pause(M('参数必须是数字')) # 类型,页码,最低收藏数,原图与多图参数的解析器 re_page_object = re.compile(r'页\d+') re_minbookmarks_object = re.compile(r'\d+收藏|收藏\d+') async def common_type_page_minbookmarks_original_multiimage_parser(session): ''' 类型,页码,最低收藏数,原图与多图参数的解析器,获取页码、最低收藏数、原图与多图参数,默认值分别为1,False,False ''' stripped_args = session.current_arg_text.strip() re_page = re_page_object.search(stripped_args) re_minbookmarks = re_minbookmarks_object.search(stripped_args) session.state['type'] = None if '全部' in stripped_args else 'manga' if '漫画' in stripped_args else 'illust' session.state['page'] = int(re_page.group(0)[1:]) if re_page else 1 session.state['min_bookmarks'] = int(re_minbookmarks.group(0).replace('收藏', '')) if re_minbookmarks else 1 session.state['original'] = True if '原图' in stripped_args else False session.state['multiimage'] = True if '多图' in stripped_args else False # 通用多图片消息发送函数 async def common_multiimage_msgsender(session, pids: tuple, original: bool=False, multiimage: bool=False): ''' 通用多图片消息发送函数。 参数: session pids: 图片pid组成的元组 original: 是否使用原图 multiimage: 是否使用多图 ''' msg = M() msglimit = 20 msgcurrent = 0 useb64 = False if session.self_id == SIGNAL['MainQQ'] else True for illust in _pixiv_instance.getpics(pids, useb64): msgcurrent += illust['count'] if multiimage else 1 if msgcurrent > msglimit: await session.send(msg) msgcurrent = illust['count'] if multiimage else 1 msg.clear() msg.append(MS.text('-' * 20 + '\n')) msg.append(MS.text(f'作品名:{illust["title"]}\n')) msg.append(MS.text(f'PID:{illust["id"]}\n')) if SIGNAL['RegisteredQQ'][session.self_id]['coolq_edition'] == 'pro': files = illust['files']['original'] if original else illust['files']['large'] multiimage = session.state['multiimage'] if multiimage: for file in files: msg.append(MS.image(file)) msg.append(MS.text('\n')) else: msg.append(MS.image(files[0])) msg.append(MS.text('\n')) if msg: await session.send(msg) # 通用多用户消息发送函数 async def common_multiuser_msgsender(session, uids: tuple): """ 通用多用户消息发送函数。 参数: session uids: 作者uid组成的元组 """ msg = M() msglimit = 20 msgcurrent = 0 useb64 = False if session.self_id == SIGNAL['MainQQ'] else True for user in _pixiv_instance.getusers(uids, useb64): msgcurrent += 1 if msgcurrent > msglimit: await session.send(msg) msgcurrent = 1 msg.clear() msg.append(MS.text('-' * 20 + '\n')) msg.append(MS.text(f'用户名:{user["user"]["name"]}\n')) msg.append(MS.text(f'用户ID:{user["user"]["id"]}\n')) if SIGNAL['RegisteredQQ'][session.self_id]['coolq_edition'] == 'pro' and user['user']['profile_image']: msg.append(MS.text('用户头像:')) msg.append(MS.image(user['user']['profile_image'])) msg.append(MS.text('\n')) if msg: await session.send(msg) # PID搜索 # PID搜索 @pixiv_command.command('pid_search_detail', checkfunc=_check, aliases=('PID搜索')) async def pid_search_detail(session, bot): original = session.state['original'] multiimage = session.state['multiimage'] pid = session.get('pid', prompt='请输入想查询的PID') r18, r18g = await _getperm(session) useb64 = False if session.self_id == SIGNAL['MainQQ'] else True await session.send(M(f'开始搜索图片{pid}')) try: await refreshtoken() pids = await _pixiv_instance.illust_detail(pid, original_image=original, multiimage=multiimage, allowr18=r18, allowr18g=r18g) except InfoNotFoundError: await session.send(M('未查询到信息')) except PixivError as e: await session.send(M('插件出错')) raise e else: msg = M() for illust in _pixiv_instance.getpics(pids, useb64): msg.append(MS.text(f'作品名: {illust["title"]}\n')) msg.append(MS.text(f'作品ID: {illust["id"]}\n')) msg.append(MS.text(f'作品类型: {illust["type"]}\n')) msg.append(MS.text(f'作品页数: {illust["count"]}\n')) msg.append(MS.text(f'作品创作时间(JST): {illust["time"]}\n')) msg.append(MS.text(f'作品标签: {",".join([i["name"] + "(" + i["translated_name"] + ")" if i["translated_name"] is not None else i["name"] for i in illust["tags"]])}\n')) msg.append(MS.text(f'作者名: {illust["author_name"]}\n')) msg.append(MS.text(f'作者ID: {illust["author_id"]}\n')) msg.append(MS.text(f'阅览人数: {illust["views"]}\n')) msg.append(MS.text(f'收藏数: {illust["bookmarks"]}\n')) msg.append(MS.text(f'评论数: {illust["comments"]}\n')) if SIGNAL['RegisteredQQ'][session.self_id]['coolq_edition'] == 'pro': if ('R-18' in str(illust['tags']) or 'R18' in str(illust['tags'])) and not r18: break elif ('R-18G' in str(illust['tags']) or 'R18G' in str(illust['tags'])) and not r18g: break msglimit = 20 msgcurrent = 0 files = illust['files']['original'] if original else illust['files']['large'] if multiimage: for file in files: msgcurrent += 1 if msgcurrent >= msglimit: await session.send(msg) msgcurrent = 1 msg.clear() msg.append(MS.image(file)) else: msg.append(MS.image(files[0])) if msg: await session.send(msg) finally: await session.send(M(f'搜索图片{pid}完毕')) @pid_search_detail.args_parser async def pid_search_detail_parser(session): await common_pid_parser(session) if session.is_first_run: await common_type_page_minbookmarks_original_multiimage_parser(session) # 评论区 @pixiv_command.command('pid_search_comment', checkfunc=_check, aliases=('PID搜索评论区')) async def pid_search_comment(session, bot): pid = session.get('pid', prompt='请输入想查询的PID') await session.send(M(f'开始搜索作品{pid}的评论区')) try: await refreshtoken() comments = await _pixiv_instance.illust_comments(pid) except InfoNotFoundError: await session.send(M('未查询到信息')) except PixivError as e: await session.send(M('插件出错')) raise e else: msg = M() msglimit = 10 msgcurrent = 0 for comment in comments: msgcurrent += 1 if msgcurrent > msglimit: await session.send(msg) msgcurrent = 1 msg.clear() msg.append(MS.text(comment)) if msg: await session.send(msg) finally: await session.send(M(f'搜索作品{pid}评论区完毕')) @pid_search_comment.args_parser async def pid_search_comment_parser(session): await common_pid_parser(session) # 推荐与相关搜索 # 推荐 @pixiv_command.command('recommend', checkfunc=_check, aliases=('PIXIV推荐')) async def recommend(session, bot): type = session.state['type'] page = session.state['page'] minbookmarks = session.state['min_bookmarks'] original = session.state['original'] multiimage = session.state['multiimage'] r18, r18g = await _getperm(session) await session.send(M(f'开始搜索推荐第{page}页数据')) try: await refreshtoken() pids = await _pixiv_instance.illust_recommended(type=type, page=page, min_bookmarks=minbookmarks, original_image=original, multiimage=multiimage, allowr18=r18, allowr18g=r18g) except InfoNotFoundError: await session.send(M('未查询到信息')) except PixivError as e: await session.send(M('插件出错')) raise e else: await common_multiimage_msgsender(session, pids, original, multiimage) finally: await session.send(M(f'搜索推荐第{page}页完毕')) @recommend.args_parser async def recommend_parser(session): await common_type_page_minbookmarks_original_multiimage_parser(session) # 作品相关搜索 @pixiv_command.command('pid_search_relate', checkfunc=_check, aliases=('PID搜索相关')) async def pid_search_relate(session, bot): type = session.state['type'] page = session.state['page'] minbookmarks = session.state['min_bookmarks'] original = session.state['original'] multiimage = session.state['multiimage'] pid = session.get('pid', prompt='请输入想查询的PID') r18, r18g = await _getperm(session) await session.send(M(f'开始搜索作品{pid}相关图片第{page}页数据')) try: await refreshtoken() pids = await _pixiv_instance.illust_related(pid=pid, type=type, page=page, min_bookmarks=minbookmarks, original_image=original, multiimage=multiimage, allowr18=r18, allowr18g=r18g) except InfoNotFoundError: await session.send(M('未查询到信息')) except PixivError as e: await session.send(M('插件出错')) raise e else: await common_multiimage_msgsender(session, pids, original, multiimage) finally: await session.send(M(f'搜索作品{pid}相关图片第{page}页完毕')) @pid_search_relate.args_parser async def pid_search_relate_parser(session): await common_pid_parser(session) if session.is_first_run: await common_type_page_minbookmarks_original_multiimage_parser(session) # 关键词搜索 # 关键词通用查找函数 async def common_keywords_search(session, search_target: str): type = session.state['type'] page = session.state['page'] minbookmarks = session.state['min_bookmarks'] original = session.state['original'] multiimage = session.state['multiimage'] keywords = session.get('keywords', prompt='输入关键词') r18, r18g = await _getperm(session) await session.send(M(f'开始搜索关键词“{keywords}”第{page}页数据')) try: await refreshtoken() pids = await _pixiv_instance.search_illust(keywords, type=type, page=page, search_target=search_target, min_bookmarks=minbookmarks, original_image=original, multiimage=multiimage, allowr18=r18, allowr18g=r18g) except InfoNotFoundError: await session.send(M('未查询到信息')) except PixivError as e: await session.send(M('插件出错')) raise e else: await common_multiimage_msgsender(session, pids, original, multiimage) finally: await session.send(M(f'搜索关键词“{keywords}”第{page}页完毕')) # 关键词通用查找函数解析器 re_keywords_search_parser_object = re.compile(r'全部|漫画|插画|原图|多图|页\d+|\d+收藏|收藏\d+') async def common_keywords_search_parser(session): stripped_args = session.current_arg_text.strip() stripped_args = re_keywords_search_parser_object.sub('', stripped_args).strip() if session.is_first_run: await common_type_page_minbookmarks_original_multiimage_parser(session) if stripped_args: session.state['keywords'] = stripped_args elif not stripped_args: session.pause(M('关键词不能为空')) else: session.state['keywords'] = stripped_args @pixiv_command.command('keywords_search_partial', checkfunc=_check, aliases=('标签搜索')) async def keywords_search_partial(session, bot): await common_keywords_search(session, search_target='partial_match_for_tags') @keywords_search_partial.args_parser async def keywords_search_partial_parser(session): await common_keywords_search_parser(session) @pixiv_command.command('keywords_search_exact', checkfunc=_check, aliases=('精确标签搜索')) async def keywords_search_exact(session, bot): await common_keywords_search(session, search_target='exact_match_for_tags') @keywords_search_exact.args_parser async def keywords_search_exact_parser(session): await common_keywords_search_parser(session) @pixiv_command.command('keywords_search_title', checkfunc=_check, aliases=('标题搜索')) async def keywords_search_title(session, bot): await common_keywords_search(session, search_target='title_and_caption') @keywords_search_title.args_parser async def keywords_search_title_parser(session): await common_keywords_search_parser(session) # UID搜索 # 查询用户详细信息 @pixiv_command.command('uid_search_detail', checkfunc=_check, aliases=('UID搜索')) async def uid_search_detail(session, bot): uid = session.get('uid', prompt='请输入想查询的UID') await session.send(M(f'开始搜索用户{uid}的详细信息')) try: await refreshtoken() uids = await _pixiv_instance.user_detail(uid) except InfoNotFoundError: await session.send(M('未查询到信息')) except PixivError as e: await session.send(M('插件出错')) raise e else: msg = M() useb64 = False if session.self_id == SIGNAL['MainQQ'] else True for user in _pixiv_instance.getusers(uids, useb64): msg.append(MS.text(f'用户名:{user["user"]["name"]}\n')) msg.append(MS.text(f'用户ID:{user["user"]["id"]}\n')) d = { 'user': { 'profile_image': '用户头像' if 'profile_image' in user['user'].keys() else '', 'comment': '自我介绍' if 'comment' in user['user'].keys() else '', }, 'profile': { 'webpage': '网站' if 'webpage' in user['profile'].keys() else '', 'gender': '性别'
<filename>cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py """ CISCO_IPSLA_ECHO_MIB This MIB module defines the templates for IP SLA operations of ICMP echo, UDP echo and TCP connect. The ICMP echo operation measures end\-to\-end response time between a Cisco router and any IP enabled device by computing the time taken between sending an ICMP echo request message to the destination and receiving an ICMP echo reply. The UDP echo operation measures end\-to\-end response time between a Cisco router and any IP enabled device by computing the time taken between sending an UDP echo request message to the destination and receiving an UDP echo reply. The TCP connect operation measures end\-to\-end response time between a Cisco router and any IP enabled device by computing the time taken to perform a TCP connect operation. """ from collections import OrderedDict from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class CISCOIPSLAECHOMIB(Entity): """ .. attribute:: cipslaicmpechotmpltable A table that contains ICMP echo template definitions **type**\: :py:class:`CipslaIcmpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable>` .. attribute:: cipslaudpechotmpltable A table that contains UDP echo template specific definitions **type**\: :py:class:`CipslaUdpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable>` .. attribute:: cipslatcpconntmpltable A table that contains TCP connect template specific definitions **type**\: :py:class:`CipslaTcpConnTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable>` """ _prefix = 'CISCO-IPSLA-ECHO-MIB' _revision = '2007-08-16' def __init__(self): super(CISCOIPSLAECHOMIB, self).__init__() self._top_entity = None self.yang_name = "CISCO-IPSLA-ECHO-MIB" self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("cipslaIcmpEchoTmplTable", ("cipslaicmpechotmpltable", CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable)), ("cipslaUdpEchoTmplTable", ("cipslaudpechotmpltable", CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable)), ("cipslaTcpConnTmplTable", ("cipslatcpconntmpltable", CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable))]) self._leafs = OrderedDict() self.cipslaicmpechotmpltable = CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable() self.cipslaicmpechotmpltable.parent = self self._children_name_map["cipslaicmpechotmpltable"] = "cipslaIcmpEchoTmplTable" self.cipslaudpechotmpltable = CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable() self.cipslaudpechotmpltable.parent = self self._children_name_map["cipslaudpechotmpltable"] = "cipslaUdpEchoTmplTable" self.cipslatcpconntmpltable = CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable() self.cipslatcpconntmpltable.parent = self self._children_name_map["cipslatcpconntmpltable"] = "cipslaTcpConnTmplTable" self._segment_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(CISCOIPSLAECHOMIB, [], name, value) class CipslaIcmpEchoTmplTable(Entity): """ A table that contains ICMP echo template definitions. .. attribute:: cipslaicmpechotmplentry A row entry representing an IPSLA ICMP echo template **type**\: list of :py:class:`CipslaIcmpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry>` """ _prefix = 'CISCO-IPSLA-ECHO-MIB' _revision = '2007-08-16' def __init__(self): super(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable, self).__init__() self.yang_name = "cipslaIcmpEchoTmplTable" self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("cipslaIcmpEchoTmplEntry", ("cipslaicmpechotmplentry", CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry))]) self._leafs = OrderedDict() self.cipslaicmpechotmplentry = YList(self) self._segment_path = lambda: "cipslaIcmpEchoTmplTable" self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable, [], name, value) class CipslaIcmpEchoTmplEntry(Entity): """ A row entry representing an IPSLA ICMP echo template. .. attribute:: cipslaicmpechotmplname (key) This field is used to specify the ICMP echo template name **type**\: str **length:** 1..64 .. attribute:: cipslaicmpechotmpldescription This field is used to provide description for the ICMP echo template **type**\: str **length:** 0..128 .. attribute:: cipslaicmpechotmplsrcaddrtype An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaIcmpEchoTmplSrcAddr object **type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>` .. attribute:: cipslaicmpechotmplsrcaddr A string which specifies the IP address of the source **type**\: str **length:** 0..255 .. attribute:: cipslaicmpechotmpltimeout Specifies the duration to wait for a IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout **type**\: int **range:** 0..604800000 **units**\: milliseconds .. attribute:: cipslaicmpechotmplverifydata When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size **type**\: bool .. attribute:: cipslaicmpechotmplreqdatasize This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' IP SLA request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value **type**\: int **range:** 0..16384 **units**\: octets .. attribute:: cipslaicmpechotmpltos This object represents the type of service octet in an IP header **type**\: int **range:** 0..255 .. attribute:: cipslaicmpechotmplvrfname This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing table for this operation **type**\: str **length:** 0..32 .. attribute:: cipslaicmpechotmplthreshold This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaIcmpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted **type**\: int **range:** 0..2147483647 **units**\: milliseconds .. attribute:: cipslaicmpechotmplhistlives The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection **type**\: int **range:** 0..2 .. attribute:: cipslaicmpechotmplhistbuckets The maximum number of history buckets to record. This value is set to the number of operations to keep per lifetime. After cipslaIcmpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaIcmpEchoTmplHistBuckets buckets are retained **type**\: int **range:** 1..60 .. attribute:: cipslaicmpechotmplhistfilter Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaIcmpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded **type**\: :py:class:`CipslaIcmpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry.CipslaIcmpEchoTmplHistFilter>` .. attribute:: cipslaicmpechotmplstatshours The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection **type**\: int **range:** 0..25 **units**\: hours .. attribute:: cipslaicmpechotmpldistbuckets The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaIcmpEchoTmplStatsNumDistBuckets will be kept. The last cipslaIcmpEchoTmplStatsNumDistBucket will contain all entries from its distribution interval start point to infinity **type**\: int **range:** 1..20 .. attribute:: cipslaicmpechotmpldistinterval The statistical distribution buckets interval. Distribution Bucket Example\: cipslaIcmpEchoTmplDistBuckets = 5 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaIcmpEchoTmplDistBuckets = 1 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaIcmpEchoTmplDistInterval does not apply when cipslaIcmpEchoTmplDistBuckets is one **type**\: int **range:** 1..100 **units**\: milliseconds .. attribute:: cipslaicmpechotmplstoragetype The storage type of this conceptual row **type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>` .. attribute:: cipslaicmpechotmplrowstatus The status of the conceptual ICMP echo template control row. When the status is active, all the read\-create objects in that row can be modified **type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>` """ _prefix = 'CISCO-IPSLA-ECHO-MIB' _revision = '2007-08-16' def __init__(self): super(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry, self).__init__() self.yang_name = "cipslaIcmpEchoTmplEntry" self.yang_parent_name = "cipslaIcmpEchoTmplTable" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['cipslaicmpechotmplname'] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('cipslaicmpechotmplname', (YLeaf(YType.str, 'cipslaIcmpEchoTmplName'), ['str'])), ('cipslaicmpechotmpldescription', (YLeaf(YType.str, 'cipslaIcmpEchoTmplDescription'), ['str'])), ('cipslaicmpechotmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])), ('cipslaicmpechotmplsrcaddr', (YLeaf(YType.str, 'cipslaIcmpEchoTmplSrcAddr'), ['str'])), ('cipslaicmpechotmpltimeout', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplTimeOut'), ['int'])), ('cipslaicmpechotmplverifydata', (YLeaf(YType.boolean, 'cipslaIcmpEchoTmplVerifyData'), ['bool'])), ('cipslaicmpechotmplreqdatasize', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplReqDataSize'), ['int'])), ('cipslaicmpechotmpltos', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplTOS'), ['int'])), ('cipslaicmpechotmplvrfname', (YLeaf(YType.str, 'cipslaIcmpEchoTmplVrfName'), ['str'])),
BazaarNPCFlip("Spooky Bait", 10.0), BazaarNPCFlip("Spiked Bait", 20.0), BazaarNPCFlip("Blessed Bait", 42.0), BazaarNPCFlip("Ice Bait", 3.0), BazaarNPCFlip("Whale Bait", 80.0), #################################### ######### Oddities ######### #################################### # Potatos, compactors, fuels BazaarNPCFlip("Hot Potato Book", 13000.0), BazaarNPCFlip("Compactor", 640.0), BazaarNPCFlip("Super Compactor 3000", 50000.0), BazaarNPCFlip("Enchanted Lava Bucket", 50000.0), # EXP bottles BazaarNPCFlip("Experience Bottle", 5.0), BazaarNPCFlip("Grand Experience Bottle", 480.0), BazaarNPCFlip("Titanic Experience Bottle", 5000.0), BazaarNPCFlip("Colossal Experience Bottle", 5000.0, id="COLOSSAL_EXP_BOTTLE"), # =================================================================================== # | NPC > Craft to Bazaar | # =================================================================================== # Adventurer NPCCraftBazaarFlip( "Rotten Flesh", "Enchanted Rotten Flesh", "ROTTEN_FLESH_4", 160, "ENCHANTED_ROTTEN_FLESH", 8, "Adventurer in Bazaar Alley", ), NPCCraftBazaarFlip( "Bone", "Enchanted Bone", "BONE_5", 160, "ENCHANTED_BONE", 8, "Adventurer in Bazaar Alley", ), NPCCraftBazaarFlip( "Gunpowder", "Enchanted Gunpowder", "SULPHUR_4", 160, "ENCHANTED_GUNPOWDER", 10, "Adventurer in Bazaar Alley", ), NPCCraftBazaarFlip( "String", "Enchanted String", "STRING_4", 192, "ENCHANTED_STRING", 10, "Adventurer in Bazaar Alley", ), NPCCraftBazaarFlip( "Slimeball", "Enchanted Slimeball", "SLIME_BALL_5", 160, "ENCHANTED_SLIME_BALL", 14, "Adventurer in Bazaar Alley", ), # Lumber merchant NPCCraftBazaarFlip( "Oak Wood", "Enchanted Oak Wood", "LOG_6", 160, "ENCHANTED_OAK_LOG", 5, "Lumber Merchant in Bazaar Alley", ), NPCCraftBazaarFlip( "Dark Oak Wood", "Enchanted Dark Oak Wood", "LOG_2:1_5", 160, "ENCHANTED_DARK_OAK_LOG", 5, "Lumber Merchant in Bazaar Alley", ), NPCCraftBazaarFlip( "Jungle Wood", "Enchanted Jungle Wood", "LOG:3_6", 160, "ENCHANTED_JUNGLE_LOG", 5, "Lumber Merchant in Bazaar Alley", ), NPCCraftBazaarFlip( "Acacia Wood", "Enchanted Acacia Wood", "LOG_2_6", 160, "ENCHANTED_ACACIA_LOG", 5, "Lumber Merchant in Bazaar Alley", ), NPCCraftBazaarFlip( "Spruce Wood", "Enchanted Spruce Wood", "LOG:1_5", 160, "ENCHANTED_SPRUCE_LOG", 5, "Lumber Merchant in Bazaar Alley", ), NPCCraftBazaarFlip( "Birch Wood", "Enchanted Birch Wood", "LOG:2_7", 160, "ENCHANTED_BIRCH_LOG", 5, "Lumber Merchant in Bazaar Alley", ), # Mine merchant NPCCraftBazaarFlip( "Cobblestone", "Enchanted Cobblestone", "COBBLESTONE_4", 160, "ENCHANTED_COBBLESTONE", 3, "Mine Merchant nearby the Coal Mine", ), NPCCraftBazaarFlip( "Coal", "Enchanted Coal", "COAL_4", 160, "ENCHANTED_COAL", 4, "Mine Merchant nearby the Coal Mine", ), # Farm merchant NPCCraftBazaarFlip( "Melon Slice", "Enchanted Melon", "MELON_4", 160, "ENCHANTED_MELON", 3, "Farm Merchant in the Central Hub", ), NPCCraftBazaarFlip( "Wheat", "Hay Bale", "WHEAT_-1", 9, "HAY_BLOCK", 2.33, "Farm Merchant in the Central Hub", ), NPCCraftBazaarFlip( "Potato", "Enchanted Potato", "POTATO_ITEM_4", 160, "ENCHANTED_POTATO", 2.33, "Farm Merchant in the Central Hub", ), NPCCraftBazaarFlip( "Carrot", "Enchanted Carrot", "CARROT_ITEM_4", 160, "ENCHANTED_CARROT", 2.33, "Farm Merchant in the Central Hub", ), NPCCraftBazaarFlip( "Cocoa Beans", "Enchanted Cocoa Bean", "INK_SACK:3_4", 160, "ENCHANTED_COCOA", 5, "Farm Merchant in the Central Hub", ), NPCCraftBazaarFlip( "Sugar Cane", "Enchanted Sugar", "SUGAR_CANE_4", 160, "ENCHANTED_SUGAR", 5, "Farm Merchant in the Central Hub", ), NPCCraftBazaarFlip( "Pumpkin", "Enchanted Pumpkin", "PUMPKIN_3", 160, "ENCHANTED_PUMPKIN", 8, "Farm Merchant in the Central Hub", ), NPCCraftBazaarFlip( "Red Mushroom", "Enchanted Red Mushroom", "MUSHROOM_COLLECTION_6", 160, "ENCHANTED_RED_MUSHROOM", 12, "Farm Merchant in the Central Hub", ), NPCCraftBazaarFlip( "Brown Mushroom", "Enchanted Brown Mushroom", "MUSHROOM_COLLECTION_6", 160, "ENCHANTED_BROWN_MUSHROOM", 12, "Farm Merchant in the Central Hub", ), # Alchemist NPCCraftBazaarFlip( "Nether Wart", "Enchanted Nether Wart", "NETHER_STALK_4", 160, "ENCHANTED_NETHER_STALK", 10, "Alchemist in the Purple House", ), NPCCraftBazaarFlip( "Rabbit's Foot", "Enchanted Rabbit Foot", "RABBIT_4", 160, "ENCHANTED_RABBIT_FOOT", 10, "Alchemist in the Purple House", ), NPCCraftBazaarFlip( "Spider Eye", "Enchanted Spider Eye", "SPIDER_EYE_4", 160, "ENCHANTED_SPIDER_EYE", 12, "Alchemist in the Purple House", ), NPCCraftBazaarFlip( "Magma Cream", "Enchanted Magma Cream", "MAGMA_CREAM_5", 160, "ENCHANTED_MAGMA_CREAM", 20, "Alchemist in the Purple House", ), NPCCraftBazaarFlip( "Ghast Tear", "Enchanted Ghast Tear", "GHAST_TEAR_4", 5, "ENCHANTED_GHAST_TEAR", 200, "Alchemist in the Purple House", ), # Fish NPCCraftBazaarFlip( "Raw Fish", "Enchanted Raw Fish", "RAW_FISH_6", 160, "ENCHANTED_RAW_FISH", 20, "Fish Merchant nearby the Pond", ), NPCCraftBazaarFlip( "Pufferfish", "Enchanted Pufferfish", "RAW_FISH:3_2", 160, "ENCHANTED_PUFFERFISH", 40, "Fish Merchant nearby the Pond", ), NPCCraftBazaarFlip( "Raw Salmon", "Enchanted Raw Salmon", "RAW_FISH:1_4", 160, "ENCHANTED_RAW_SALMON", 30, "Fish Merchant nearby the Pond", ), NPCCraftBazaarFlip( "Clownfish", "Enchanted Clownfish", "RAW_FISH:2_-1", 160, "ENCHANTED_CLOWNFISH", 100, "Fish Merchant nearby the Pond", ), # Builder NPCCraftBazaarFlip( "Sand", "Enchanted Sand", "SAND_5", 160, "ENCHANTED_SAND", 1, "Builder in the Builder's House", ), NPCCraftBazaarFlip( "Ice", "Enchanted Ice", "ICE_4", 160, "ENCHANTED_ICE", 1, "Builder in the Builder's House", ), NPCCraftBazaarFlip( "Ice", "Packed Ice", "ICE_-1", 9, "PACKED_ICE", 1, "Builder in the Builder's House", ), NPCCraftBazaarFlip( "Packed Ice", "Enchanted Packed Ice", "ICE_7", 160, "ENCHANTED_PACKED_ICE", 9, "Builder in the Builder's House", ), NPCCraftBazaarFlip( "Quartz Block", "Enchanted Quartz", "QUARTZ_4", 40, "ENCHANTED_QUARTZ", 50, "Builder in the Builder's House", ), NPCCraftBazaarFlip( "Quartz Block", "Enchanted Quartz", "QUARTZ_4", 40, "ENCHANTED_QUARTZ", 32, "Old Builder in the Builder's House", ), NPCCraftBazaarFlip( "Redstone", "Enchanted Redstone", "REDSTONE_4", 160, "ENCHANTED_REDSTONE", 4, "Mad Redstone Engineer in the Builder's House", ), # Pat and Gold Mine NPCCraftBazaarFlip( "Flint", "Enchanted Flint", "GRAVEL_5", 160, "ENCHANTED_FLINT", 6, "Pat in the Graveyard House", ), NPCCraftBazaarFlip( "Iron Ingot", "Enchanted Iron", "IRON_INGOT_4", 160, "ENCHANTED_IRON", 5, "Iron Forger outside the Gold Mine", ), NPCCraftBazaarFlip( "Gold Ingot", "Enchanted Gold", "GOLD_INGOT_4", 160, "ENCHANTED_GOLD", 5.5, "Gold Forger outside the Gold Mine", ), # The End NPCCraftBazaarFlip( "End Stone", "Enchanted End Stone", "ENDER_STONE_4", 160, "ENCHANTED_ENDSTONE", 10, "Pearl Dealer on the End Island", ), NPCCraftBazaarFlip( "Obsidian", "Enchanted Obsidian", "OBSIDIAN_4", 160, "ENCHANTED_OBSIDIAN", 50, "Pearl Dealer on the End Island", ), # =================================================================================== # | Bazaar to Craft to Bazaar | # =================================================================================== # Wheat BazaarCraftBazaarFlip("Wheat", 60, "WHEAT_5", "Enchanted Bread"), BazaarCraftBazaarFlip("Wheat", 9, "WHEAT_-1", "Hay Bale"), BazaarCraftBazaarFlip("Hay Bale", 144, "WHEAT_7", "Enchanted Hay Bale"), BazaarCraftBazaarFlip("Wheat", 1296, "WHEAT_7", "Enchanted Hay Bale"), BazaarCraftBazaarFlip( "Wheat", 186624, "WHEAT_11", "Tightly-Tied Hay Bale", crafted_id="TIGHTLY_TIED_HAY_BALE", ), BazaarCraftBazaarFlip( "Hay Bale", 20736, "WHEAT_11", "Tightly-Tied Hay Bale", crafted_id="TIGHTLY_TIED_HAY_BALE", ), BazaarCraftBazaarFlip( "Enchanted Hay Bale", 144, "WHEAT_11", "Tightly-Tied Hay Bale", crafted_id="TIGHTLY_TIED_HAY_BALE", ), BazaarCraftBazaarFlip("Seeds", 160, "WHEAT_-1", "Enchanted Seeds"), # Carrots BazaarCraftBazaarFlip("Carrot", 160, "CARROT_ITEM_4", "Enchanted Carrot"), BazaarCraftBazaarFlip( "Enchanted Carrot", 64, "CARROT_ITEM_4", "Enchanted Carrot on a Stick", source_name_2="Oak Wood", craft_cost_2=1, ), BazaarCraftBazaarFlip( "Enchanted Carrot", 128, "CARROT_ITEM_7", "Enchanted Golden Carrot", source_name_2="Carrot", craft_cost_2=32, source_name_3="Gold Ingot", craft_cost_3=29, ), # Potatoes BazaarCraftBazaarFlip("Potato", 160, "POTATO_ITEM_4", "Enchanted Potato"), BazaarCraftBazaarFlip("Potato", 25600, "POTATO_ITEM_7", "Enchanted Baked Potato"), BazaarCraftBazaarFlip("Enchanted Potato", 160, "POTATO_ITEM_7", "Enchanted Baked Potato"), # Pumpkins BazaarCraftBazaarFlip("Pumpkin", 160, "PUMPKIN_3", "Enchanted Pumpkin"), BazaarCraftBazaarFlip("Pumpkin", 25600, "PUMPKIN_10", "Polished Pumpkin"), BazaarCraftBazaarFlip("Enchanted Pumpkin", 160, "PUMPKIN_10", "Polished Pumpkin"), # Melons BazaarCraftBazaarFlip("Melon", 160, "MELON_4", "Enchanted Melon"), BazaarCraftBazaarFlip( "Melon", 256, "MELON_5", "Enchanted Glistering Melon", source_name_2="Gold Ingot", craft_cost_2=228, ), BazaarCraftBazaarFlip("Melon", 25600, "MELON_6", "Enchanted Melon Block"), BazaarCraftBazaarFlip("Enchanted Melon", 160, "MELON_6", "Enchanted Melon Block"), # Mushrooms BazaarCraftBazaarFlip("Red Mushroom", 160, "MUSHROOM_COLLECTION_6", "Enchanted Red Mushroom"), BazaarCraftBazaarFlip("Red Mushroom", 9, "MUSHROOM_COLLECTION_5", "Red Mushroom Block"), BazaarCraftBazaarFlip( "Red Mushroom", 5184, "MUSHROOM_COLLECTION_8", "Enchanted Red Mushroom Block" ), BazaarCraftBazaarFlip( "Red Mushroom Block", 576, "MUSHROOM_COLLECTION_8", "Enchanted Red Mushroom Block" ), BazaarCraftBazaarFlip("Brown Mushroom", 160, "MUSHROOM_COLLECTION_6", "Enchanted Brown Mushroom"), BazaarCraftBazaarFlip("Brown Mushroom", 9, "MUSHROOM_COLLECTION_5", "Brown Mushroom Block"), BazaarCraftBazaarFlip( "Brown Mushroom", 5184, "MUSHROOM_COLLECTION_8", "Enchanted Brown Mushroom Block" ), BazaarCraftBazaarFlip( "Brown Mushroom Block", 576, "MUSHROOM_COLLECTION_8", "Enchanted Brown Mushroom Block" ), # Cocoa BazaarCraftBazaarFlip("Cocoa Beans", 160, "INK_SACK:3_4", "Enchanted Cocoa Bean"), BazaarCraftBazaarFlip( "Enchanted Cocoa Bean", 128, "INK_SACK:3_6", "Enchanted Cookie", source_name_2="Wheat", craft_cost_2=32, ), # Cacti BazaarCraftBazaarFlip( "Cactus", 160, "CACTUS_4", "Enchanted Cactus Green", source_name_2="Coal", craft_cost_2=20 ), BazaarCraftBazaarFlip( "Cactus", 25600, "CACTUS_8", "Enchanted Cactus", source_name_2="Coal", craft_cost_2=3200 ), BazaarCraftBazaarFlip("Enchanted Cactus Green", 160, "CACTUS_8", "Enchanted Cactus"), # Sugar Cane BazaarCraftBazaarFlip("Sugar Cane", 160, "SUGAR_CANE_4", "Enchanted Sugar"), BazaarCraftBazaarFlip("Sugar Cane", 192, "SUGAR_CANE_5", "Enchanted Paper"), BazaarCraftBazaarFlip("Sugar Cane", 25600, "SUGAR_CANE_8", "Enchanted Sugar Cane"), BazaarCraftBazaarFlip("Enchanted Sugar", 160, "SUGAR_CANE_8", "Enchanted Sugar Cane"), # Cow BazaarCraftBazaarFlip("Leather", 576, "LEATHER_6", "Enchanted Leather"), BazaarCraftBazaarFlip("Raw Beef", 160, "LEATHER_4", "Enchanted Raw Beef"), # Pork BazaarCraftBazaarFlip("Raw Porkchop", 160, "PORK_4", "Enchanted Pork"), BazaarCraftBazaarFlip("Raw Porkchop", 25600, "PORK_7", "Enchanted Grilled Pork"), BazaarCraftBazaarFlip("Enchanted Pork", 160, "PORK_7", "Enchanted Grilled Pork"), # Chicken BazaarCraftBazaarFlip("Raw Chicken", 160, "RAW_CHICKEN_4", "Enchanted Raw Chicken"), BazaarCraftBazaarFlip( "Enchanted Egg", 1, "RAW_CHICKEN_7", "Enchanted Cake", source_name_2="Enchanted Sugar", craft_cost_2=2, source_name_3="Wheat", craft_cost_3=3, ), BazaarCraftBazaarFlip("Enchanted Egg", 144, "RAW_CHICKEN_9", "Super Enchanted Egg"), BazaarCraftBazaarFlip("Feather", 160, "FEATHER_5", "Enchanted Feather"), # Mutton BazaarCraftBazaarFlip("Mutton", 160, "MUTTON_5", "Enchanted Mutton"), BazaarCraftBazaarFlip("Mutton", 25600, "MUTTON_8", "Enchanted Cooked Mutton"), BazaarCraftBazaarFlip("Enchanted Mutton", 160, "MUTTON_8", "Enchanted Cooked Mutton"), # Rabbit BazaarCraftBazaarFlip("Raw Rabbit", 160, "RAW_RABBIT_-1", "Enchanted Raw Rabbit"), BazaarCraftBazaarFlip("Rabbit's Foot", 160, "RAW_RABBIT_4", "Enchanted Rabbit Foot"), BazaarCraftBazaarFlip("Rabbit Hide", 576, "RAW_RABBIT_6", "Enchanted Rabbit Hide"), # Nether Wart BazaarCraftBazaarFlip("Nether Wart", 160, "NETHER_STALK_4", "Enchanted Nether Wart"), BazaarCraftBazaarFlip("Nether Wart", 25600, "NETHER_STALK_12", "Mutant Nether Wart", crafted_id="MUTANT_NETHER_STALK"), BazaarCraftBazaarFlip("Enchanted Nether Wart", 160, "NETHER_STALK_12", "Mutant Nether Wart", crafted_id="MUTANT_NETHER_STALK"), #################################### ######### Mining ######### #################################### # Cobblestone BazaarCraftBazaarFlip("Cobblestone", 160, "COBBLESTONE_4", "Enchanted Cobblestone"), # Coal BazaarCraftBazaarFlip("Coal", 160, "COAL_4", "Enchanted Coal"), BazaarCraftBazaarFlip( "Coal", 20480, "COAL_5", "Enchanted Charcoal", source_name_2="Oak Wood", craft_cost_2=32 ), BazaarCraftBazaarFlip( "Enchanted Coal", 128, "COAL_5", "Enchanted Charcoal", source_name_2="Oak Wood", craft_cost_2=32 ), BazaarCraftBazaarFlip("Coal", 25600, "COAL_7", "Enchanted Block Of Coal", crafted_id="ENCHANTED_COAL_BLOCK"), BazaarCraftBazaarFlip("Enchanted Coal", 160, "COAL_7", "Enchanted Block Of Coal", crafted_id="ENCHANTED_COAL_BLOCK"), # Iron BazaarCraftBazaarFlip("Iron
EXTENSION_PUBLISHER_NAME, EXTENSION_IMAGE_TYPE, EXTENSION_IMAGE_VERSION) # TODO: dont belong here # Get Container Service[get] # result = self.mgmt_client.container_services.get(resource_group.name, CONTAINER_SERVICE_NAME) # Get a dedicated host.[get] result = self.mgmt_client.dedicated_hosts.get(resource_group.name, HOST_GROUP_NAME, HOST_NAME) # Get a gallery image.[get] result = self.mgmt_client.gallery_images.get(resource_group.name, GALLERY_NAME, IMAGE_NAME) # Lists all available virtual machine sizes to which the specified virtual machine can be resized[get] result = self.mgmt_client.virtual_machines.list_available_sizes(resource_group.name, VIRTUAL_MACHINE_NAME) # # Get information about a disk encryption set.[get] # result = self.mgmt_client.disk_encryption_sets.get(resource_group.name, DISK_ENCRYPTION_SET_NAME) # Get a Virtual Machine.[get] result = self.mgmt_client.virtual_machines.get(resource_group.name, VIRTUAL_MACHINE_NAME) # Get a virtual machine scale sets (TODO: need swagger file) result = self.mgmt_client.virtual_machine_scale_sets.get(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME) # Get virtual machine scale set os upgrade history (TODO: need swagger file) result = self.mgmt_client.virtual_machine_scale_sets.get_os_upgrade_history(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME) # Get instance view of virtual machine scale set (TODO: need swagger file) result = self.mgmt_client.virtual_machine_scale_sets.get_instance_view(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME) # Get virtual machine extension (TODO: need swagger file) result = self.mgmt_client.virtual_machine_extensions.get(resource_group.name, VIRTUAL_MACHINE_NAME, VIRTUAL_MACHINE_EXTENSION_NAME) # Get virtual machine scale set extension (TODO: need swagger file) result = self.mgmt_client.virtual_machine_scale_set_extensions.get(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME, VMSS_EXTENSION_NAME) # Get VMSS vm extension (TODO: need swagger file) # result = self.mgmt_client.virtual_machine_scale_set_vm_extensions.get(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME, INSTANCE_ID, VIRTUAL_MACHINE_EXTENSION_NAME) # List VMSS vm extensions (TODO: need swagger file) # result = self.mgmt_client.virtual_machine_scale_set_vm_extensions.list(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME, INSTANCE_ID) # TODO: it has a bug that doesn't send request and always returns []. # List vitual machine scale set vms (TODO: need swagger file) result = self.mgmt_client.virtual_machine_scale_set_vms.list(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME) # INSTANCE_ID_1 = result.current_page[0].instance_id # INSTANCE_ID_2 = result.current_page[1].instance_id # INSTANCE_ID = INSTANCE_ID_1 # Get virtual machine scale set vm instance view (TODO: need swagger file) # result = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME, INSTANCE_ID) # Get virtual machine scale set vm (TODO: need swagger file) # result = self.mgmt_client.virtual_machine_scale_set_vms.get(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME, INSTANCE_ID) # INSTANCE_VM_1 = result # Create VMSS vm extension (TODO: need swagger file) # BODY = { # "location": "eastus", # "auto_upgrade_minor_version": True, # "publisher": "Microsoft.Azure.NetworkWatcher", # "virtual_machine_extension_type": "NetworkWatcherAgentWindows", # "type_handler_version": "1.4", # } # result = self.mgmt_client.virtual_machine_scale_set_vm_extensions.create_or_update(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME, INSTANCE_ID, VIRTUAL_MACHINE_EXTENSION_NAME, BODY) # result = result.result() # Llist virtual machine extensions (TODO: need swagger file) result = self.mgmt_client.virtual_machine_extensions.list(resource_group.name, VIRTUAL_MACHINE_NAME) # List virtual machine scale set extension (TODO: need swagger file) result = self.mgmt_client.virtual_machine_scale_set_extensions.list(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME) # List Virtual Machine images (TODO: need swagger file) result = self.mgmt_client.virtual_machine_images.list(AZURE_LOCATION, PUBLISHER_NAME, OFFER, SKUS) # List Virtual Machine image offers (TODO: need swagger file) result = self.mgmt_client.virtual_machine_images.list_offers(AZURE_LOCATION, PUBLISHER_NAME) # List Virtual Machine image publishers (TODO: need swagger file) result = self.mgmt_client.virtual_machine_images.list_publishers(AZURE_LOCATION) # List Virtual Machine image skus (TODO: need swagger file) result = self.mgmt_client.virtual_machine_images.list_skus(AZURE_LOCATION, PUBLISHER_NAME, OFFER) # List Virtual Machine extension image types (TODO: need swagger file) result = self.mgmt_client.virtual_machine_extension_images.list_types(AZURE_LOCATION, EXTENSION_PUBLISHER_NAME) # # List Virtual Machine extension image versions (TODO: need swagger file) result = self.mgmt_client.virtual_machine_extension_images.list_versions(AZURE_LOCATION, EXTENSION_PUBLISHER_NAME, EXTENSION_IMAGE_TYPE) # List gallery Applications in a gallery.[get] result = self.mgmt_client.gallery_applications.list_by_gallery(resource_group.name, GALLERY_NAME) # List gallery images in a gallery.[get] result = self.mgmt_client.gallery_images.list_by_gallery(resource_group.name, GALLERY_NAME) # Get a dedicated host group.[get] result = self.mgmt_client.dedicated_host_groups.get(resource_group.name, HOST_GROUP_NAME) # Get information about a snapshot.[get] result = self.mgmt_client.snapshots.get(resource_group.name, SNAPSHOT_NAME) # Get a gallery.[get] result = self.mgmt_client.galleries.get(resource_group.name, GALLERY_NAME) # VirtualMachineRunCommandGet[get] RUN_COMMAND_NAME = "RunPowerShellScript" result = self.mgmt_client.virtual_machine_run_commands.get(AZURE_LOCATION, RUN_COMMAND_NAME) # TODO: dont belong here # # List Container Services by Resource Group[get] # result = self.mgmt_client.container_services.list_by_resource_group(resource_group.name) # List proximity placement groups in a resource group.[get] result = self.mgmt_client.proximity_placement_groups.list_by_resource_group(resource_group.name) # Get information about a virtual machine image.[get] result = self.mgmt_client.images.get(resource_group.name, IMAGE_NAME) # Get information about a managed disk.[get] result = self.mgmt_client.disks.get(resource_group.name, DISK_NAME) # Get availability set (TODO: need swagger file) result = self.mgmt_client.availability_sets.get(resource_group.name, AVAILABILITY_SET_NAME) # TODO: The entity was not found in this Azure location. # Get virtual machine scale set latest rolling upgrade (TODO: need swagger file) # result = self.mgmt_client.virtual_machine_scale_set_rolling_upgrades.get_latest(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME) # List all disk encryption sets in a resource group.[get] result = self.mgmt_client.disk_encryption_sets.list_by_resource_group(resource_group.name) # List virtual machine scale sets in a resource group (TODO: need swagger file) result = self.mgmt_client.virtual_machine_scale_sets.list(resource_group.name) # List all virtual machine scale sets (TODO: need swagger file) result = self.mgmt_client.virtual_machine_scale_sets.list_all() # List virtual machine scale sets skus (TODO: need swagger file) result = self.mgmt_client.virtual_machine_scale_sets.list_skus(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME) # List the virtual machines (TODO: need swagger file) result = self.mgmt_client.virtual_machines.list(resource_group.name) # List all virtual machines (TODO: need swagger file) result = self.mgmt_client.virtual_machines.list_all() # Lists all the virtual machines under the specified subscription for the specified location.[get] result = self.mgmt_client.virtual_machines.list_by_location(AZURE_LOCATION) # List virtual machine sizes (TODO: need swagger file) result = self.mgmt_client.virtual_machine_sizes.list(AZURE_LOCATION) # List dedicated host groups in a resource group (TODO: need swagger file) result = self.mgmt_client.dedicated_host_groups.list_by_resource_group(resource_group.name) # List galleries in a resource group.[get] result = self.mgmt_client.galleries.list_by_resource_group(resource_group.name) # List all snapshots in a resource group.[get] result = self.mgmt_client.snapshots.list_by_resource_group(resource_group.name) # List all virtual machine images in a resource group.[get] result = self.mgmt_client.images.list_by_resource_group(resource_group.name) # List all managed disks in a resource group.[get] result = self.mgmt_client.disks.list_by_resource_group(resource_group.name) # List dedicated hosts in host group (TODO: need swagger file) result = self.mgmt_client.dedicated_hosts.list_by_host_group(resource_group.name, HOST_GROUP_NAME) # VirtualMachineRunCommandList[get] result = self.mgmt_client.virtual_machine_run_commands.list(AZURE_LOCATION) # TODO: dont belong here # # List Container Services[get] # result = self.mgmt_client.container_services.list() # List proximity placement groups in a subscription. [get] result = self.mgmt_client.proximity_placement_groups.list_by_subscription() # List all disk encryption sets in a subscription.[get] result = self.mgmt_client.disk_encryption_sets.list() # List dedicated host groups in a subscription (TODO: need swagger file) result = self.mgmt_client.dedicated_host_groups.list_by_subscription() # List availability sets in a subscription.[get] result = self.mgmt_client.availability_sets.list_by_subscription() # List availability sets (TODO: need swagger file) result = self.mgmt_client.availability_sets.list(resource_group.name) # List availability sets available sizes (TODO: need swagger file) result = self.mgmt_client.availability_sets.list_available_sizes(resource_group.name, AVAILABILITY_SET_NAME) # List galleries in a subscription.[get] result = self.mgmt_client.galleries.list() # List all snapshots in a subscription.[get] result = self.mgmt_client.snapshots.list() # List all virtual machine images in a subscription.[get] result = self.mgmt_client.images.list() # List all managed disks in a subscription.[get] result = self.mgmt_client.disks.list() # List usage (TODO: need swagger file) result = self.mgmt_client.usage.list(AZURE_LOCATION) # List operations (TODO: need swagger file) result = self.mgmt_client.operations.list() # Lists all available Resource SKUs[get] result = self.mgmt_client.resource_skus.list() # # Lists all available Resource SKUs for the specified region[get] # result = self.mgmt_client.resource_skus.list() # Update a dedicated host group.[put] BODY = { "tags": { "department": "finance" }, "platform_fault_domain_count": "3" } result = self.mgmt_client.dedicated_host_groups.update(resource_group.name, HOST_GROUP_NAME, BODY) # Update a snapshot by BODY = { "creation_data": { "create_option": "Copy", "source_uri": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Compute/disks/" + DISK_NAME } } result = self.mgmt_client.snapshots.update(resource_group.name, SNAPSHOT_NAME, BODY) result = result.result() # TODO: dont finish # # Update VirtualMachineScaleSet VM extension.[patch] # BODY = { # "properties": { # "auto_upgrade_minor_version": True, # "publisher": "extPublisher", # "type": "extType", # "type_handler_version": "1.2", # "settings": { # "user_name": "<EMAIL>" # } # } # } # result = self.mgmt_client.virtual_machine_scale_set_vmextensions.update(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME, VIRTUAL_MACHINE_NAME, EXTENSION_NAME, BODY) # result = result.result() # TODO: dont finish # Update a simple gallery Application Version.[patch] # BODY = { # "properties": { # "publishing_profile": { # "source": { # "file_name": "package.zip", # "media_link": "https://mystorageaccount.blob.core.windows.net/mycontainer/package.zip?{sasKey}" # }, # "target_regions": [ # { # "name": "eastus", # "regional_replica_count": "1", # "storage_account_type": "Standard_LRS" # } # ], # "replica_count": "1", # "end_of_life_date": "2019-07-01T07:00:00Z", # "storage_account_type": "Standard_LRS" # } # } # } # result = self.mgmt_client.gallery_application_versions.update(resource_group.name, GALLERY_NAME, APPLICATION_NAME, VERSION_NAME, BODY) # result = result.result() # Start an extension rolling upgrade.[post] result = self.mgmt_client.virtual_machine_scale_set_rolling_upgrades.start_extension_upgrade(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME) result = result.result() # TODO:msrestazure.azure_exceptions.CloudError: Azure Error: MaxUnhealthyInstancePercentExceededInRollingUpgrade # Message: Rolling Upgrade failed after exceeding the MaxUnhealthyInstancePercent value defined in the RollingUpgradePolicy. 100% of instances are in an unhealthy state after being upgraded - more than the threshold of 20% configured in the RollingUpgradePolicy. The most impactful error is: Instance found to be unhealthy or unreachable. For details on rolling upgrades, use http://aka.ms/AzureVMSSRollingUpgrade # Start vmss os upgrade (TODO: need swagger file) # result = self.mgmt_client.virtual_machine_scale_set_rolling_upgrades.start_os_upgrade(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME) # result = result.result() # Cancel vmss upgrade (TODO: need swagger file) # result = self.mgmt_client.virtual_machine_scale_set_rolling_upgrades.cancel(resource_group.name, VIRTUAL_MACHINE_SCALE_SET_NAME) # result = result.result() # Update disk.[patch] (TODO: need swagger file.)(doing) BODY = { "disk_size_gb": "200" } result = self.mgmt_client.disks.update(resource_group.name, DISK_NAME, BODY) result = result.result() # Grant acess disk (TODO: need swagger file) ACCESS = "Read" DURATION_IN_SECONDS = 1800
try: from io import BytesIO except ImportError as e: import StringIO as BytesIO try: import cPickle as pickle except ImportError as e: import pickle from mango import mpi haveMpi4py = mpi.haveMpi4py import scipy as sp import scipy.optimize logger, rootLogger = mpi.getLoggers(__name__) class DistributedMetricEvaluator(object): """ Wrapper class for functions which are evaluated by combining (MPI-reducing) a result from each MPI process. See also the :func:`distributed_minimize` function. Example:: import mango.mpi import mango.optimize import scipy as sp import scipy.optimize def my_func(x): return (mango.mpi.world.Get_rank()+1) * (x * x + x + 1) # dfunc sums/reduces (rank+1)*(x*x + x + 1) from all processes to the dfunc.root process dfunc = mango.optimize.DistributedMetricEvaluator(my_func) if (dfunc.comm.Get_rank() == dfunc.root): x0 = 8 res = scipy.optimize.minimize(dfunc, x0, method="Powell") dfunc.rootTerminate() print("res.x = %s, res.fun = %s" % (res.x, res.fun)) else: dfunc.waitForEvaluate() """ #: Instruction to evaluate local MPI function value, see :meth:`waitForEvaluate` EVALUATE = 0 #: Instruction to terminate the wait-loop in :meth:`waitForEvaluate` TERMINATE = 1 def __init__(self, funcObj, comm=mpi.world, root=0): self.funcObj = funcObj self.comm = comm self.root = root self.x = None def __getattr__(self, name): """ Python magic to forward attribute access to self.funcObj """ return getattr(self.funcObj, name) def evaluate(self, x): """ Evaluate the function at :samp:`x` on the local MPI process. """ self.x = x return self.funcObj(x) def calcReduction(self, localVal): """ Converts the *local* result :samp:`{localVal}` returned from the :meth:`evaluate` method to a MPI-reduced result. :type localVal: reducable :obj:`object` :param localVal: Object returned from the :meth:`evaluate` method. """ if (self.comm != None): metricVal = self.comm.reduce(localVal, root=self.root, op=mpi.SUM) return metricVal def rootEvaluate(self, x): """ Broadcasts :samp:`x` to all processes and then does local evaluation (:samp:`self.evaluate(x)`). MPI reduces (:samp:`op=mpi.SUM`) the results from all MPI processes and returns the reduced result. Should only be called by the :samp:`self.root`-rank process. :type x: broadcastable :obj:`object` :param x: Function parameter. :return: Reduction of the :meth:`evaluate` values from MPI processes. """ if (self.comm != None): (instr, x) = self.comm.bcast((self.EVALUATE, x), root=self.root) metricVal = self.evaluate(x) metricVal = self.calcReduction(metricVal) return metricVal def waitForEvaluate(self): """ Loops waiting for an :samp:`self.EVALUATE` broadcast of the :samp:`x` parameter from the :samp:`self.root`-rank process. Exit's loop when a :samp:`(self.TERMINATE,x)` broadcast is received. Should only be called by the non-:samp:`self.root`-rank MPI processes. """ (instr, x) = self.comm.bcast(root=self.root) while (instr != self.TERMINATE): metricVal = self.evaluate(x) metricVal = self.calcReduction(metricVal) (instr, x) = self.comm.bcast(root=self.root) def rootTerminate(self): """ Issues a :samp:`(self.TERMINATE,x)` broadcast so other processes exit the :meth:`waitForEvaluate` loop. Should only be called from the :samp:`self.root`-rank process. """ if (self.comm != None): (instr, x) = self.comm.bcast((self.TERMINATE, None)) def __call__(self, x): """ Method which makes this object behave like a function, should only be called from the :samp:`self.root`-rank process. """ if (self.comm != None): if (self.root == self.comm.Get_rank()): metricVal = self.rootEvaluate(x) else: raise RuntimeError("__call__ method called from non-root process, rank=%s, self.root=%s" % (self.comm.Get_rank(), self.root)) else: metricVal = self.rootEvaluate(x) return metricVal def distributed_minimize(fun, *args, **kwargs): """ Wrapper for the :func:`scipy.optimize.minimize` function for distributed function evaluation on multiple MPI processors. The :samp:`{fun}` argument should be an instance of :obj:`DistributedMetricEvaluator`. This function should be called from all MPI processes. :type fun: :obj:`DistributedMetricEvaluator` :param fun: Function which is to be minimized. :type bcastres: :obj:`bool` :param bcastres: If :samp:`True` (default is :samp:`True`) the result object (:obj:`scipy.optimize.OptimizeResult`) returned by :func:`scipy.optimize.minimize` is broadcast from the :samp:`{fun}.root` process to all other :samp:`{fun}.comm` MPI processes. If :samp:`False` only the :samp:`{fun}.root` process returns the result and other processes return the :samp:`None` value. Example:: import mango.mpi import scipy as sp import mango.optimize def my_func(x): return (mango.mpi.world.Get_rank()+1) * (x * x + x + 1) # dfunc sums/reduces (rank+1)*(x*x + x + 1) from all processes to the dfunc.root process dfunc = mango.optimize.DistributedMetricEvaluator(my_func) x0 = 8 res = mango.optimize.minimize(dfunc, x0, method="Powell") print("res.x = %s, res.fun = %s" % (res.x, res.fun)) """ res = None if ((fun.comm == None) or ((fun.comm != None) and (fun.root == fun.comm.Get_rank()))): res = sp.optimize.minimize(fun, *args, **kwargs) fun.rootTerminate() else: fun.waitForEvaluate() if ((fun.comm != None) and (("bcastres" not in kwargs.keys()) or kwargs["bcastres"])): res = fun.comm.bcast(res, fun.root) return res class OptimizeResult: """ Optimization result object returned by SingleStartOptimizer.optimize method. """ def __init__(self): self.optim = None self.start = None self.cost = None self.numIterations = None self.numFuncEvals = None self.startIndex = None def getStartIndex(self): return self.startIndex def getNumIterations(self): return self.numIterations def getNumFuncEvals(self): return self.numFuncEvals def getStart(self): return self.start def getOptim(self): return self.optim def getCost(self): return self.cost def __str__(self): optimFmtStr = "%16.8f" startFmtStr = "%16.8f" if (hasattr(self.optim, "__len__")): optimStr = "(" optimStr += optimFmtStr % self.optim[0] for comp in self.optim[1:]: optimStr += optimFmtStr % comp optimStr += ")" else: optimStr = optimFmtStr % self.optim if (hasattr(self.start, "__len__")): startStr = "(" startStr += startFmtStr % self.start[0] for comp in self.start[1:]: startStr += startFmtStr % comp startStr += ")" else: startStr = startFmtStr % self.start return \ ( "cost=%16.8f,optim=%s,start=%s,startIdx=%6d,numFuncEval=%8d,numIterations=%6d" % ( self.cost, optimStr, startStr, self.startIndex, self.numFuncEvals, self.numIterations ) ) class SingleStartOptimizer: """ Base class for single start optimizers. """ def __init__(self): pass def cmpOptimizeResult(self, r0, r1): """ Compare two OptimizeResult objects, used for sorting list of OptimizeResult objects. """ d = r0.cost - r1.cost if (d < 0): return -1 elif (d > 0): return 1 return 0 def optimize(self, startPrms): raise NotImplementedError("Derived class does not implement optimize method") def minimize(self, startPrms): return self.optimize(startPrms) class MultiStartOptimizer: """ Runs an optimization, using multiple starting values, in an attempt to find a global minimum. Can take advantage of mpi4py as long as the start-parameter-objects can be pickled (and subsequently sent to remote MPI processes). The self.rootRank master MPI process uses asynchronous communication to hand out individual (single-start) optimization jobs to remote worker MPI processes. """ def __init__(self, singleStartOptimizer, rootRank=0, mpiComm=None): """ Initialise. :type singleStartOptimizer: SingleStartOptimizer :param singleStartOptimizer: An object which can perform an optimization of a single start-parameter object. :type rootRank: int :param rootRank: Rank of the process which controls the handing out of individual (single-start) optimization jobs """ self.ssOptimizer = singleStartOptimizer self.prmIterator = None self.rootRank = rootRank self.terminatePrmArray = None self.byteArrayRecvSize = 2**19 self.resultList = None if (mpi.haveMpi4py and (mpiComm == None)): mpiComm = mpi.world self.mpiComm = mpiComm self.numMpiProcsForAsync = 4 self.START_PARAM_TAG = 0 self.OPTIM_RESULT_TAG = 1 def setNumMpiProcessesForAsyncProcessing(self, numMpiProcs): """ Set the number of MPI processes which triggers the use of asynchronous master/slave processing of the multi-start optimizations. """ self.numMpiProcsForAsync = numMpiProcs def broadcast(self, prmIterator): if ((self.mpiComm == None) or (self.mpiComm.Get_rank() == self.rootRank)): self.prmIterator = prmIterator else: self.prmIterator = None if (self.mpiComm != None): self.prmIterator = self.mpiComm.bcast(self.prmIterator, root=self.rootRank) def broadcastResultList(self): if (self.mpiComm != None): self.resultList = self.mpiComm.bcast(self.resultList, root=self.rootRank) def pickleToByteArray(self, obj): bytesIO = BytesIO() pickle.dump(obj, bytesIO) return bytearray(bytesIO.getvalue()) def unpickleFromByteArray(self, ba): bytesIO = BytesIO(ba) return pickle.load(bytesIO) def doAsyncMpiMasterSends(self): if (self.mpiComm.Get_rank() == self.rootRank): optimReqDict = dict() SREQ = 0 SBUF = 1 RREQ = 2 RBUF = 3 worldSz = self.mpiComm.Get_size() workerSz = worldSz-1 for rank in range(1, worldSz): optimReqDict[rank] = [None, bytearray(), None, bytearray()] prmIdx = 0 startRankIdx = 0 resultList = [] completeCount = 0 numOptimsPerMpiProc = len(self.prmIterator) for startPrm in self.prmIterator: foundWorker = False while (not foundWorker): for rankIdx in range(startRankIdx, workerSz): rank = rankIdx + 1 optimReq = optimReqDict[rank] if ((optimReq[RREQ] == None) or (optimReq[RREQ].Test())): if ((optimReq[RREQ] != None)): result = self.unpickleFromByteArray(optimReq[RBUF]) if (result.optim != None): resultList.append(result) completeCount += 1 logger.info("Completed %5d of %5d optimizations." % (completeCount, numOptimsPerMpiProc)) if (len(resultList) > 0): resultList.sort(self.ssOptimizer.cmpOptimizeResult) logger.info("Best result so far:\n%s" % (str(resultList[0],))) foundWorker = True optimReq[SBUF] = self.pickleToByteArray([prmIdx, startPrm]) optimReq[RBUF] = bytearray(self.byteArrayRecvSize) optimReq[SREQ] = self.mpiComm.Isend(optimReq[SBUF], dest = rank, tag = self.START_PARAM_TAG) optimReq[RREQ] = self.mpiComm.Irecv(optimReq[RBUF], source = rank, tag = self.OPTIM_RESULT_TAG) startRankIdx = rankIdx+1 break startRankIdx = 0 prmIdx += 1 waitingForOptimResult = True while (waitingForOptimResult): waitingForOptimResult = False for rankIdx in range(0, workerSz): rank = rankIdx + 1 optimReq = optimReqDict[rank] if (optimReq[SREQ] == None):
: A Scikit-learn model instance. Returns ------- kernel_kwargs : Dictionary Get the respective kernel type of the SVM model. """ kernel_kwargs = dict() if model.kernel == 'linear': kernel_kwargs['LinearKernelType'] = pml.LinearKernelType(description='Linear Kernel Type') elif model.kernel == 'poly': kernel_kwargs['PolynomialKernelType'] = pml.PolynomialKernelType(description='Polynomial Kernel type', gamma="{:.16f}".format(model._gamma), coef0="{:.16f}".format(model.coef0), degree=model.degree) elif model.kernel == 'rbf': kernel_kwargs['RadialBasisKernelType'] = pml.RadialBasisKernelType(description='Radial Basis Kernel Type', gamma="{:.16f}".format(model._gamma)) elif model.kernel == 'sigmoid': kernel_kwargs['SigmoidKernelType'] = pml.SigmoidKernelType(description='Sigmoid Kernel Type', gamma="{:.16f}".format(model._gamma), coef0="{:.16f}".format(model.coef0)) else: raise NotImplementedError("{} kernel is not implemented!".format(model.kernel)) return kernel_kwargs def get_supportVectorMachine(model): """ It return the Support Vector Machine element. Parameters ---------- model : A Scikit-learn model instance. Returns ------- support_vector_machines : List Get the Support Vector Machine element which conatains targetCategory, alternateTargetCategory, SupportVectors, Coefficients """ support_vector_machines = list() if model.__class__.__name__ in ['SVR','OneClassSVM']: support_vector = list() for sv in model.support_: support_vector.append(pml.SupportVector(vectorId=sv)) support_vectors = pml.SupportVectors(SupportVector=support_vector) coefficient = list() absoValue = model.intercept_[0] if model.dual_coef_.__class__.__name__ != 'csr_matrix': for coef in model.dual_coef_: for num in coef: coefficient.append(pml.Coefficient(value="{:.16f}".format(num))) else: dual_coefficent=model.dual_coef_.data for num in dual_coefficent: coefficient.append(pml.Coefficient(value="{:.16f}".format(num))) coeff = pml.Coefficients(absoluteValue=absoValue, Coefficient=coefficient) support_vector_machines.append(pml.SupportVectorMachine(SupportVectors=support_vectors, Coefficients=coeff)) else: support_vector_locs = np.cumsum(np.hstack([[0], model.n_support_])) n_class = model.dual_coef_.shape[0] + 1 coef_abs_val_index = 0 for class1 in range(n_class): sv1 = model.support_[support_vector_locs[class1]:support_vector_locs[class1 + 1]] for class2 in range(class1 + 1, n_class): svs = list() coefs = list() sv2 = model.support_[support_vector_locs[class2]:support_vector_locs[class2 + 1]] svs.append((list(sv1) + list(sv2))) alpha1 = model.dual_coef_[class2 - 1, support_vector_locs[class1]:support_vector_locs[class1 + 1]] alpha2 = model.dual_coef_[class1, support_vector_locs[class2]:support_vector_locs[class2 + 1]] coefs.append((list(alpha1) + list(alpha2))) all_svs = list() for sv in (svs[0]): all_svs.append(pml.SupportVector(vectorId=sv)) all_coefs = list() for coef in (coefs[0]): all_coefs.append(pml.Coefficient(value="{:.16f}".format(coef))) coef_abs_value = model.intercept_[coef_abs_val_index] coef_abs_val_index += 1 if len(model.classes_) == 2: support_vector_machines.append( pml.SupportVectorMachine( targetCategory=model.classes_[class1], alternateTargetCategory=model.classes_[class2], SupportVectors=pml.SupportVectors(SupportVector=all_svs), Coefficients=pml.Coefficients(absoluteValue="{:.16f}".format(coef_abs_value), Coefficient=all_coefs) ) ) else: support_vector_machines.append( pml.SupportVectorMachine( targetCategory=model.classes_[class2], alternateTargetCategory=model.classes_[class1], SupportVectors=pml.SupportVectors(SupportVector=all_svs), Coefficients=pml.Coefficients(absoluteValue="{:.16f}".format(coef_abs_value), Coefficient=all_coefs) ) ) return support_vector_machines def get_tree_models(model, derived_col_names, col_names, target_name, mining_imp_val,categoric_values,tasktype): """ It return Tree Model element of the model Parameters ---------- model : A Scikit-learn model instance. derived_col_names : Contains column names after preprocessing. col_names : List Contains list of feature/column names. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value Returns ------- tree_models : List Get the TreeModel element. """ model_kwargs = get_model_kwargs(model, col_names, target_name, mining_imp_val,categoric_values) tree_models = list() tree_models.append(pml.TreeModel( modelName=model.__class__.__name__, Node=get_node(model, derived_col_names), taskType=tasktype, **model_kwargs )) return tree_models def get_neural_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype): """ It returns Neural Network element of the model. Parameters ---------- model : A Scikit-learn model instance. derived_col_names : List Contains column names after preprocessing. col_names : List Contains list of feature/column names. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value. Returns ------- neural_model : List Model attributes for PMML file. """ model_kwargs = get_model_kwargs(model, col_names, target_name, mining_imp_val,categoric_values) neural_model = list() neural_model.append(pml.NeuralNetwork( modelName=model.__class__.__name__, threshold='0', altitude='1.0', activationFunction=get_funct(model), NeuralInputs = get_neuron_input(derived_col_names), NeuralLayer = get_neural_layer(model, derived_col_names, target_name)[0], NeuralOutputs = get_neural_layer(model, derived_col_names, target_name)[1], **model_kwargs )) return neural_model def get_funct(sk_model): """ It returns the activation fucntion of the model. Parameters ---------- model : A Scikit-learn model instance. Returns ------- a_fn : String Returns the activation function. """ a_fn = sk_model.activation if a_fn =='relu': a_fn = 'rectifier' return a_fn def get_regrs_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype): """ It returns the Regression Model element of the model Parameters ---------- model : A Scikit-learn model instance. derived_col_names : List Contains column names after preprocessing. col_names : List Contains list of feature/column names. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value categoric_values : tuple Contains Categorical attribute names and its values Returns ------- regrs_models : List Returns a regression model of the respective model """ model_kwargs = get_model_kwargs(model, col_names, target_name, mining_imp_val, categoric_values) if model.__class__.__name__ not in ['LinearRegression','LinearSVR']: model_kwargs['normalizationMethod'] = 'logit' regrs_models = list() regrs_models.append(pml.RegressionModel( modelName=model.__class__.__name__, RegressionTable=get_regrs_tabl(model, derived_col_names, target_name, categoric_values), taskType=tasktype, **model_kwargs )) return regrs_models def get_regrs_tabl(model, feature_names, target_name, categoric_values): """ It returns the Regression Table element of the model. Parameters ---------- model : A Scikit-learn model instance. derived_col_names : List Contains column names after preprocessing. target_name : String Name of the Target column. categoric_values : tuple Contains Categorical attribute names and its values Returns ------- merge : List Returns a list of Regression Table. """ merge = list() if hasattr(model, 'intercept_'): func_name = get_mining_func(model) inter = model.intercept_ model_coef = model.coef_ merge = list() target_classes = target_name row_idx = 0 if not hasattr(inter, '__iter__') or model.__class__.__name__ in ['LinearRegression','LinearSVR']: inter = np.array([inter]) target_classes = [target_classes] model_coef = np.ravel(model_coef) model_coef = model_coef.reshape(1, model_coef.shape[0]) target_cat = None else: target_classes = model.classes_ max_target_index = len(target_classes) - 1 target_cat = target_classes[max_target_index] if len(inter) == 1: regr_predictor = get_regr_predictors(model_coef, row_idx, feature_names, categoric_values) merge.append( pml.RegressionTable( intercept="{:.16f}".format(inter.item()), targetCategory=target_cat, NumericPredictor=regr_predictor ) ) if func_name != 'regression': merge.append( pml.RegressionTable( intercept="0.0", targetCategory=target_classes[0] ) ) else: for tgname, tg_idx in zip(np.unique(target_classes), range(len(np.unique(target_classes)))): row_idx = tg_idx regr_predictors = get_regr_predictors(model_coef, row_idx, feature_names, categoric_values) merge.append( pml.RegressionTable( intercept="{:.16f}".format(inter[tg_idx]), targetCategory=tgname, NumericPredictor=regr_predictors ) ) else: if len(model.classes_) == 2: merge.append( pml.RegressionTable( NumericPredictor=[pml.NumericPredictor(coefficient='1.0',name=feature_names[0])], intercept='0.0', targetCategory=str(model.classes_[-1]) ) ) merge.append( pml.RegressionTable(intercept='0.0', targetCategory=str(model.classes_[0])) ) else: for feat_idx in range(len(feature_names)): merge.append( pml.RegressionTable( NumericPredictor=[pml.NumericPredictor(coefficient='1.0',name=feature_names[feat_idx])], intercept='0.0', targetCategory=str(model.classes_[feat_idx]) ) ) return merge def get_node(model, features_names, main_model=None): """ It return the Node element of the model. Parameters ---------- model : An instance of the estimator of the tree object. features_names : List Contains the list of feature/column name. main_model : A Scikit-learn model instance. Returns ------- _getNode : Get all the underlying Nodes. """ tree = model.tree_ node_samples = tree.n_node_samples if main_model and main_model.__class__.__name__ == 'RandomForestClassifier': classes = main_model.classes_ elif hasattr(model,'classes_'): classes = model.classes_ tree_leaf = -1 def _getNode(idx,parent=None, cond=None): simple_pred_cond = None if cond: simple_pred_cond = cond node = pml.Node(id=idx, recordCount=float(tree.n_node_samples[idx])) if simple_pred_cond: node.SimplePredicate = simple_pred_cond else: node.True_ = pml.True_() if tree.children_left[idx] != tree_leaf: fieldName = features_names[tree.feature[idx]] prnt = None if model.__class__.__name__ == "ExtraTreeRegressor": prnt = parent + 1 simplePredicate = pml.SimplePredicate(field=fieldName, operator="lessOrEqual", value="{:.16f}".format(tree.threshold[idx])) left_child = _getNode(tree.children_left[idx],prnt, simplePredicate) simplePredicate = pml.SimplePredicate(field=fieldName, operator="greaterThan", value="{:.16f}".format(tree.threshold[idx])) right_child = _getNode(tree.children_right[idx],prnt, simplePredicate) node.add_Node(left_child) node.add_Node(right_child) else: nodeValue = list(tree.value[idx][0]) lSum = float(sum(nodeValue)) if model.__class__.__name__ == 'DecisionTreeClassifier': probs = [x / lSum for x in nodeValue] score_dst = [] for i in range(len(probs)): score_dst.append(pml.ScoreDistribution(confidence=probs[i], recordCount=float(nodeValue[i]), value=classes[i])) node.ScoreDistribution = score_dst node.score = classes[probs.index(max(probs))] else: if model.__class__.__name__ == "ExtraTreeRegressor": nd_sam=node_samples[int(idx)] node.score = "{:.16f}".format(parent+avgPathLength(nd_sam)) else: node.score="{:.16f}".format(lSum) return node if model.__class__.__name__ == "ExtraTreeRegressor": return _getNode(0,0) else: return _getNode(0) def avgPathLength(n): if n<=1.0: return 1.0 return 2.0*(math.log(n-1.0)+0.57721566) - 2.0*((n-1.0)/n) def get_output(model, target_name): """ It returns the output element of the model. Parameters ---------- model : A Scikit-learn model instance. target_name : String Name of the Target column. Returns ------- Output : Get the Output element. """ mining_func = get_mining_func(model) output_fields = list() if not has_target(model): output_fields.append(pml.OutputField( name='predicted', feature="predictedValue", optype="categorical", dataType="double" )) else: alt_target_name = 'predicted_' + target_name if mining_func == 'classification': for cls in model.classes_: output_fields.append(pml.OutputField( name='probability_' + str(cls), feature="probability", optype="continuous", dataType="double", value=str(cls) )) output_fields.append(pml.OutputField( name=alt_target_name, feature="predictedValue", optype="categorical", dataType="string")) else: output_fields.append(pml.OutputField( name=alt_target_name, feature="predictedValue", optype="continuous", dataType="double")) return pml.Output(OutputField=output_fields) def get_mining_func(model): """ It returns the name of the mining function of the model. Parameters ---------- model : A Scikit-learn model instance. Returns ------- func_name : String Returns the function name of the model """ if not hasattr(model, 'classes_'): if hasattr(model,'n_clusters'): func_name = 'clustering' else: func_name = 'regression' else: if isinstance(model.classes_, np.ndarray): func_name = 'classification' else: func_name = 'regression' return func_name def get_mining_schema(model, feature_names, target_name, mining_imp_val, categoric_values): """ It returns the Mining Schema of the model. Parameters ---------- model : A Scikit-learn model instance. feature_names : List Contains the list of feature/column name. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value. Returns ------- MiningSchema : Get the MiningSchema element """ if mining_imp_val: mining_attributes = mining_imp_val[0] mining_strategy = mining_imp_val[1] mining_replacement_val = mining_imp_val[2] n_features = len(feature_names) features_pmml_optype = ['continuous'] * n_features features_pmml_utype = ['active'] * n_features target_pmml_utype = 'target' mining_func = get_mining_func(model) if mining_func == 'classification': target_pmml_optype = 'categorical' elif mining_func == 'regression': target_pmml_optype = 'continuous' mining_flds = list() mining_name_stored = list() # handling impute pre processing if mining_imp_val: for mining_item, mining_idx
self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height) def is_partly_within_image(self, image): """Estimate whether the BB is at least partially inside the image area. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use. If an ``ndarray``, its shape will be used. If a ``tuple``, it is assumed to represent the image shape and must contain at least two integers. Returns ------- bool ``True`` if the bounding box is at least partially inside the image area. ``False`` otherwise. """ shape = normalize_shape(image) height, width = shape[0:2] eps = np.finfo(np.float32).eps img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps) return self.intersection(img_bb) is not None def is_out_of_image(self, image, fully=True, partly=False): """Estimate whether the BB is partially/fully outside of the image area. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use. If an ``ndarray``, its shape will be used. If a ``tuple``, it is assumed to represent the image shape and must contain at least two integers. fully : bool, optional Whether to return ``True`` if the bounding box is fully outside of the image area. partly : bool, optional Whether to return ``True`` if the bounding box is at least partially outside fo the image area. Returns ------- bool ``True`` if the bounding box is partially/fully outside of the image area, depending on defined parameters. ``False`` otherwise. """ if self.is_fully_within_image(image): return False elif self.is_partly_within_image(image): return partly return fully @ia.deprecated(alt_func="BoundingBox.clip_out_of_image()", comment="clip_out_of_image() has the exactly same " "interface.") def cut_out_of_image(self, *args, **kwargs): return self.clip_out_of_image(*args, **kwargs) def clip_out_of_image(self, image): """Clip off all parts of the BB box that are outside of the image. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use for the clipping of the bounding box. If an ``ndarray``, its shape will be used. If a ``tuple``, it is assumed to represent the image shape and must contain at least two integers. Returns ------- imgaug.augmentables.bbs.BoundingBox Bounding box, clipped to fall within the image dimensions. """ shape = normalize_shape(image) height, width = shape[0:2] assert height > 0, ( "Expected image with height>0, got shape %s." % (image.shape,)) assert width > 0, ( "Expected image with width>0, got shape %s." % (image.shape,)) eps = np.finfo(np.float32).eps x1 = np.clip(self.x1, 0, width - eps) x2 = np.clip(self.x2, 0, width - eps) y1 = np.clip(self.y1, 0, height - eps) y2 = np.clip(self.y2, 0, height - eps) return self.copy( x1=x1, y1=y1, x2=x2, y2=y2, label=self.label ) # TODO convert this to x/y params? def shift(self, top=None, right=None, bottom=None, left=None): """Move this bounding box along the x/y-axis. Parameters ---------- top : None or int, optional Amount of pixels by which to shift this object *from* the top (towards the bottom). right : None or int, optional Amount of pixels by which to shift this object *from* the right (towards the left). bottom : None or int, optional Amount of pixels by which to shift this object *from* the bottom (towards the top). left : None or int, optional Amount of pixels by which to shift this object *from* the left (towards the right). Returns ------- imgaug.augmentables.bbs.BoundingBox Shifted bounding box. """ top = top if top is not None else 0 right = right if right is not None else 0 bottom = bottom if bottom is not None else 0 left = left if left is not None else 0 return self.copy( x1=self.x1+left-right, x2=self.x2+left-right, y1=self.y1+top-bottom, y2=self.y2+top-bottom ) # TODO add explicit test for zero-sized BBs (worked when tested by hand) def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1, copy=True, raise_if_out_of_image=False, thickness=None): """Draw the bounding box on an image. Parameters ---------- image : (H,W,C) ndarray The image onto which to draw the bounding box. Currently expected to be ``uint8``. color : iterable of int, optional The color to use, corresponding to the channel layout of the image. Usually RGB. alpha : float, optional The transparency of the drawn bounding box, where ``1.0`` denotes no transparency and ``0.0`` is invisible. size : int, optional The thickness of the bounding box in pixels. If the value is larger than ``1``, then additional pixels will be added around the bounding box (i.e. extension towards the outside). copy : bool, optional Whether to copy the input image or change it in-place. raise_if_out_of_image : bool, optional Whether to raise an error if the bounding box is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. thickness : None or int, optional Deprecated. Returns ------- (H,W,C) ndarray(uint8) Image with bounding box drawn on it. """ if thickness is not None: ia.warn_deprecated( "Usage of argument 'thickness' in BoundingBox.draw_on_image() " "is deprecated. The argument was renamed to 'size'.") size = thickness if raise_if_out_of_image and self.is_out_of_image(image): raise Exception( "Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f " "on image with shape %s." % ( self.x1, self.y1, self.x2, self.y2, image.shape)) result = np.copy(image) if copy else image if isinstance(color, (tuple, list)): color = np.uint8(color) for i in range(size): y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int # When y values get into the range (H-0.5, H), the *_int functions # round them to H. That is technically sensible, but in the case # of drawing means that the border lies just barely outside of # the image, making the border disappear, even though the BB is # fully inside the image. Here we correct for that because of # beauty reasons. Same is the case for x coordinates. if self.is_fully_within_image(image): y1 = np.clip(y1, 0, image.shape[0]-1) y2 = np.clip(y2, 0, image.shape[0]-1) x1 = np.clip(x1, 0, image.shape[1]-1) x2 = np.clip(x2, 0, image.shape[1]-1) y = [y1-i, y1-i, y2+i, y2+i] x = [x1-i, x2+i, x2+i, x1-i] rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape) if alpha >= 0.99: result[rr, cc, :] = color else: if ia.is_float_array(result): # TODO use blend_alpha here result[rr, cc, :] = ( (1 - alpha) * result[rr, cc, :] + alpha * color) result = np.clip(result, 0, 255) else: input_dtype = result.dtype result = result.astype(np.float32) result[rr, cc, :] = ( (1 - alpha) * result[rr, cc, :] + alpha * color) result = np.clip(result, 0, 255).astype(input_dtype) return result # TODO add tests for pad and pad_max def extract_from_image(self, image, pad=True, pad_max=None, prevent_zero_size=True): """Extract the image pixels within the bounding box. This function will zero-pad the image if the bounding box is partially/fully outside of the image. Parameters ---------- image : (H,W) ndarray or (H,W,C) ndarray The image from which to extract the pixels within the bounding box. pad : bool, optional Whether to zero-pad the image if the object is partially/fully outside of it. pad_max : None or int, optional The maximum number of pixels that may be zero-paded on any side, i.e. if this has value ``N`` the total maximum of added pixels is ``4*N``. This option exists to prevent extremely large images as a result of single points being moved very far away during augmentation. prevent_zero_size : bool, optional Whether to prevent the height or width of the extracted image from becoming zero. If this is set to ``True`` and the height or width of the bounding box is below ``1``, the height/width will be increased to ``1``. This can be useful to prevent problems, e.g. with image saving or plotting. If it is set to ``False``, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or ``W`` potentially being 0. Returns ------- (H',W') ndarray or (H',W',C) ndarray Pixels within the bounding box. Zero-padded if the bounding box is partially/fully outside of the image. If `prevent_zero_size` is activated, it is guarantueed that ``H'>0`` and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``. """ pad_top = 0 pad_right = 0 pad_bottom = 0 pad_left = 0 height,
<reponame>BBN-Q/pyWRspice # Copyright (c) 2019 <NAME> - Quantum Group """ Python wrapper for WRspice class RawFile: Read and store WRspice output data into Variable structures. class WRWrapper: Run WRspice script via WRspice simulator. """ import numpy as np import pandas as pd import os, tempfile, time, datetime import uuid, itertools, logging, subprocess import multiprocessing as mp logging.basicConfig(level=logging.WARNING) # Get the run_parallel.py file dir_path = os.path.dirname(os.path.realpath(__file__)) fexec = os.path.join(dir_path,"data","run_parallel.py") #------------------------------------------ # Wrapper for convenient parallel loops #------------------------------------------ class WRWrapper: """ Wrapper for WRspice simulator. script: Declare the script with python format strings. '{output_file}' should be written by the script in the .control block. Any other keywords (which become mandatory) are added as named slots in the format string. source: WRspice .cir source file work_dir: Working directory. If None, use a temporary one. command: location of the wrspice exec file, depending on specific system: For Unix systems, it is likely "/usr/local/xictools/bin/wrspice" For Windows, it is likely "C:/usr/local/xictools/bin/wrspice.bat" """ def __init__(self, script=None, source=None, work_dir=None, command="/usr/local/xictools/bin/wrspice"): self.script = script if source is not None: self.get_script(source) if work_dir is None: self.work_dir = tempfile.TemporaryDirectory().name else: self.work_dir = work_dir if not os.path.exists(self.work_dir): os.mkdir(self.work_dir) self.command = backslash(command) def _new_fname(self, prefix="",suffix=""): """ Create a temporary file in the temporary folder """ return backslash(os.path.join(self.work_dir, prefix+str(uuid.uuid4())+suffix)) def get_script(self,fname): """ Get WRspice script from .cir file """ with open(fname,'r') as f: lines = f.readlines() self.script = "".join(lines) def fullpath(self,fname): """ Return the full path of a filename relative to working directory """ return backslash(os.path.join(self.work_dir,fname)) def render(self,script,kwargs): """ Render a script by formatting it with kwargs then write into a file Return circuit and output file names """ if "circuit_file" not in kwargs.keys(): kwargs["circuit_file"] = self._new_fname("tmp_script_",".cir") if "output_file" not in kwargs.keys() or kwargs["output_file"] in [None, ""]: kwargs["output_file"] = self._new_fname("tmp_output_",".raw") # Render rendered_script = script.format(**kwargs) with open(kwargs["circuit_file"],'w') as f: f.write(rendered_script) return kwargs["circuit_file"], kwargs["output_file"] def run(self,*script,read_raw=True,save_file=False,**kwargs): """ Execute the script, return output data from WRspice script: (Optional) WRspice script to be simulated read_raw: if True, read resulting raw data into memory save_file: if False and if read_raw, remove circuit and output files kwargs: keyword arguments to be passed to self.script """ if len(script)>0: # Assume the first argument is the script self.script = script[0] cir_fname, out_fname = self.render(self.script,kwargs) run_file(cir_fname,command=self.command) if read_raw: output = RawFile(out_fname, binary=True) if (not save_file): os.remove(cir_fname) os.remove(out_fname) else: output = out_fname return output def get_fconfig(self,fname="simconfig"): """ Generate a config file for parallel simulation """ now = datetime.datetime.now().strftime("_%Y%m%d_%H%M%S") fconfig = self.fullpath(fname + now + ".csv") comments = ''.join(["# To run manually: python %s %s --processes=<num>\n" %(fexec,fconfig), "#%s -b \n" %self.command]) with open(fconfig,'w') as f: logging.info("Write configuration file: %s" %fconfig) f.write(comments) return fconfig def prepare_parallel(self, *script, **params): """ Write script files to prepare for the actual parallel simulation execution Return: a config file containing information of the simulation """ if len(script)>0: # Assume the first argument is the script self.script = script[0] # Disintegrate the parameters (dict) into iterative and non-iterative parts iter_params = {} # iterative params kws = {} for k,v in params.items(): if (not isinstance(v,str)) and hasattr(v,'__iter__'): # if param value is a list iter_params[k] = v else: kws[k] = v param_vals = list(itertools.product(*[iter_params[k] for k in iter_params.keys()])) # Write circuit files circuit_fnames = [] all_params = [] for i,vals in enumerate(param_vals): kws_cp = kws.copy() for pname,val in zip(iter_params.keys(), vals): kws_cp[pname] = val # Make sure they run separate script files if "circuit_file" not in kws_cp.keys(): kws_cp["circuit_file"] = self.fullpath("tmp_circuit_%d.cir" %i) else: kws_cp["circuit_file"] = self.fullpath(kws_cp["circuit_file"][:-4] + "_%d.cir" %i) if "output_file" not in kws_cp.keys() or kws_cp["output_file"] in [None,'']: kws_cp["output_file"] = self.fullpath("tmp_output_%d.raw" %i) else: kws_cp["output_file"] = self.fullpath(kws_cp["output_file"][:-4] + "_%d.raw" %i) circuit_fname, output_fname = self.render(self.script,kws_cp) circuit_fnames.append(circuit_fname) all_params.append(kws_cp) # Write config file fconfig = self.get_fconfig() df = pd.DataFrame(all_params) df.to_csv(fconfig,mode='a',index=False) return fconfig def remove_fconfig(self,fconfig,files=["circuit_file","output_file","config"]): """ Clean up the simulation files on local and remote locations based on the information in the fconfig file """ # Get simulation file names df = pd.read_csv(fconfig,skiprows=2) fend = os.path.join(os.path.dirname(fconfig),"finish_" + os.path.basename(fconfig)[:-4] + ".txt") all_files = [fend] filetypes = files.copy() if "config" in files: filetypes.pop(filetypes.index("config")) all_files.append(fconfig) for k in filetypes: all_files += list(df[k]) # Remove all of them logging.info("Remove files in %s" %files) for fname in all_files: os.remove(fname) def get_results(self,fconfig,timeout=20,read_raw=False): """ Get simulation results from server fconfig: the config file generated by self.prepare_parallel timeout (seconds): Maximum time to wait until the simulation finishes read_raw: If True, import raw files into memory; otherwise, return filenames only """ # First check if the simulation has finished t0 = time.time() t1 = time.time() fend = os.path.join(os.path.dirname(fconfig),"finish_" + os.path.basename(fconfig)[:-4] + ".txt") while t1-t0 < timeout: if os.path.exists(fend): break else: time.sleep(10) t1 = time.time() if not os.path.exists(fend): logging.error("Timeout: Simulation is not done yet. Try again later.") return None df = pd.read_csv(fconfig,skiprows=2) fnames = np.array(df["output_file"]) # Get output files from server if read_raw: results = [RawFile(fname,binary=True) for fname in fnames] else: results = fnames df["result"] = results return df def reshape_results(self,df,params): """ Reshape the results df: results DataFrame as returned by self.get_results params: simulated script parameters """ # Get iterative parameters iter_params = {} for k,v in params.items(): if (not isinstance(v,str)) and hasattr(v,'__iter__'): # if param value is a list iter_params[k] = v param_vals = list(itertools.product(*[iter_params[k] for k in iter_params.keys()])) dims = [len(v) for v in iter_params.values() if len(v)>1] data = np.array(df["result"]).reshape(dims) param_vals = np.array(param_vals).reshape(dims+[len(iter_params)]).T param_out = {} for i,pname in enumerate(iter_params.keys()): param_out[pname] = param_vals[i].T return param_out, data def run_fconfig(self,fconfig,processes=16): """ Run simulation in parallel based on information from fconfig """ # Simulate in parallel cmd = "python %s %s --processes=%d" %(fexec,fconfig,processes) logging.info("Run simulation: %s" %cmd) t1 = time.time() with subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE, shell=False, stderr=subprocess.PIPE, env=os.environ.copy()) as process: t2 = time.time() proc_stds = process.communicate() # Get output messages proc_stdout = proc_stds[0].strip() msg = proc_stdout.decode('ascii') proc_stderr = proc_stds[1].strip() msg_err = proc_stderr.decode('ascii') if len(msg_err)>0 : print("WRspice ERROR when running: %s" %fin) print(msg_err) logging.debug(msg) logging.info("Finished execution. Time elapsed: %.1f seconds" %(t2-t1)) def run_parallel(self,*script,read_raw=True,processes=mp.cpu_count()//2,save_file=True,reshape=True,**params): """ Use multiprocessing to run in parallel script (optional): WRspice script to be simulated. processes: number of parallel processes if save_file==False: remove all relevant simulation files after execution (only if read_raw==True) if reshape==False: return output data as a pandas DataFrame if read_raw==True: import raw file into memory, otherise provide the list of output raw filenames """ fconfig = self.prepare_parallel(*script,**params) self.run_fconfig(fconfig,processes=processes) # Get output files back to local df = self.get_results(fconfig,read_raw=read_raw) if df is None: return df # Delete files if necessary if (not save_file) and read_raw: logging.debug("Remove temporary files") self.remove_fconfig(fconfig) if reshape: return self.reshape_results(df,params) else: return df def run_adaptive(self,*script,func=None,refine_func=None,max_num_points=100,processes=16,criterion="difference",**params): """ Run multiprocessing simulation witht adaptive repeats script: (Optional) WRspice script to be simulated. func: Function to calculate the desired output to be evaluated for repetition refine_func: Criterion function to determine the next points to run max_num_points: Maximum number of points """ if func is None or refine_func is None: raise ValueError("Refine functions not determined.") if len(script)>0: # Assume the first argument is the script self.script = script[0] # params has param name and value list # if dim(param)==0 (string or scalar), not include in the iteration iter_params = {} kws = {} for k,v in params.items(): if (not isinstance(v,str)) and hasattr(v,'__iter__'): iter_params[k] = v else: kws[k] = v new_points = np.array(list(itertools.product(*[v for v in iter_params.values()]))).flatten() num = int(len(new_points)/len(iter_params)) new_points = new_points.reshape(num,len(iter_params)) results_all = [] points_all = None while num <= max_num_points: """ Execute the simulations in parallel """ with mp.Pool(processes=processes) as pool: results = [] for i,vals in enumerate(new_points): kws_cp = kws.copy() for pname,val in zip(iter_params.keys(), vals): kws_cp[pname] = val logging.debug("Start to execute %d-th processes with parameters: %s" %(i+1,kws_cp)) results.append(pool.apply_async(self.run, (self.script,), kws_cp)) results = [result.get() for result in results] results_all += results if points_all is None: points_all = new_points else: points_all = np.concatenate([points_all,new_points],axis=0) # import ipdb; ipdb.set_trace() new_points = refine_func(points_all,func(points_all,results_all),criterion=criterion) num += len(new_points) # Return results and points results_all = np.array(results_all) param_out = {} points = points_all.T for i,pname in enumerate(iter_params.keys()): param_out[pname] = points[i].T return param_out, results_all #------------------------------ # Execute wrspice script #------------------------------ def backslash(path): """ Convert the path to backslash-seperated one """ p = str(path) ps =
= {} result_obj_dict = copy.deepcopy(read_result) result_obj_dict.update(obj_dict) if result_obj_dict.get('multi_policy_service_chains_enabled'): import_export_targets = result_obj_dict.get('route_target_list') or {} import_targets = result_obj_dict.get('import_route_target_list') or {} export_targets = result_obj_dict.get('export_route_target_list') or {} import_targets_set = set(import_targets.get('route_target') or []) export_targets_set = set(export_targets.get('route_target') or []) targets_in_both_import_and_export = \ import_targets_set.intersection(export_targets_set) if ((import_export_targets.get('route_target') or []) or targets_in_both_import_and_export): msg = "Multi policy service chains are not supported, " msg += "with both import export external route targets" return (False, (409, msg)) return (True, '') @classmethod def _check_net_mode_for_flat_ipam(cls, obj_dict, db_dict): net_mode = None vn_props = None if 'virtual_network_properties' in obj_dict: vn_props = obj_dict['virtual_network_properties'] elif db_dict: vn_props = db_dict.get('virtual_network_properties') if vn_props: net_mode = vn_props.get('forwarding_mode') if net_mode != 'l3': return (False, "flat-subnet is allowed only with l3 network") else: return (True, "") @classmethod def _check_ipam_network_subnets(cls, obj_dict, db_conn, vn_uuid, db_dict=None): # if Network has subnets in network_ipam_refs, it should refer to # atleast one ipam with user-defined-subnet method. If network is # attached to all "flat-subnet", vn can not have any VnSubnetType cidrs if (('network_ipam_refs' not in obj_dict) and ('virtual_network_properties' in obj_dict)): # it is a network update without any changes in network_ipam_refs # but changes in virtual_network_properties # we need to read ipam_refs from db_dict and for any ipam if # if subnet_method is flat-subnet, network_mode should be l3 if db_dict is None: return (True, 200, '') else: db_ipam_refs = db_dict.get('network_ipam_refs') or [] ipam_with_flat_subnet = False ipam_uuid_list = [ipam['uuid'] for ipam in db_ipam_refs] if not ipam_uuid_list: return (True, 200, '') ok, ipam_lists = db_conn.dbe_list('network_ipam', obj_uuids=ipam_uuid_list) if not ok: return (ok, 500, 'Error in dbe_list: %s' % pformat(ipam_lists)) for ipam in ipam_lists: if 'ipam_subnet_method' in ipam: subnet_method = ipam['ipam_subnet_method'] ipam_with_flat_subnet = True break if ipam_with_flat_subnet: (ok, result) = cls._check_net_mode_for_flat_ipam(obj_dict, db_dict) if not ok: return (ok, 400, result) # validate ipam_refs in obj_dict either update or remove ipam_refs = obj_dict.get('network_ipam_refs') or [] ipam_subnets_list = [] ipam_with_flat_subnet = False for ipam in ipam_refs: ipam_fq_name = ipam['to'] ipam_uuid = ipam.get('uuid') if not ipam_uuid: ipam_uuid = db_conn.fq_name_to_uuid('network_ipam', ipam_fq_name) (ok, ipam_dict) = db_conn.dbe_read( obj_type='network_ipam', obj_ids={'uuid': ipam_uuid}) if not ok: return (ok, 400, ipam_dict) subnet_method = ipam_dict.get('ipam_subnet_method') if subnet_method is None: subnet_method = 'user-defined-subnet' if subnet_method == 'flat-subnet': ipam_with_flat_subnet = True subnets_list = cls.addr_mgmt._ipam_to_subnets(ipam_dict) if not subnets_list: subnets_list = [] ipam_subnets_list = ipam_subnets_list + subnets_list # get user-defined-subnet information to validate # that there is cidr configured if subnet_method is 'flat-subnet' vnsn = ipam['attr'] ipam_subnets = vnsn['ipam_subnets'] if (subnet_method == 'flat-subnet'): for ipam_subnet in ipam_subnets: subnet_dict = ipam_subnet.get('subnet') if subnet_dict: ip_prefix = subnet_dict.get('ip_prefix') if ip_prefix is not None: return (False, 400, "with flat-subnet, network can not have user-defined subnet") if subnet_method == 'user-defined-subnet': (ok, result) = cls.addr_mgmt.net_check_subnet(ipam_subnets) if not ok: return (ok, 409, result) if (db_conn.update_subnet_uuid(ipam_subnets)): db_conn.config_log( 'AddrMgmt: subnet uuid is updated for vn %s' % (vn_uuid), level=SandeshLevel.SYS_DEBUG) # end of ipam in ipam_refs if ipam_with_flat_subnet: (ok, result) = cls._check_net_mode_for_flat_ipam(obj_dict, db_dict) if not ok: return (ok, 400, result) (ok, result) = cls.addr_mgmt.net_check_subnet_quota(db_dict, obj_dict, db_conn) if not ok: return (ok, vnc_quota.QUOTA_OVER_ERROR_CODE, result) vn_subnets_list = cls.addr_mgmt._vn_to_subnets(obj_dict) if not vn_subnets_list: vn_subnets_list = [] (ok, result) = cls.addr_mgmt.net_check_subnet_overlap(vn_subnets_list, ipam_subnets_list) if not ok: return (ok, 400, result) return (True, 200, '') # end _check_ipam_network_subnets @classmethod def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): (ok, response) = cls._is_multi_policy_service_chain_supported(obj_dict) if not ok: return (ok, response) is_shared = obj_dict.get('is_shared') # neutorn <-> vnc sharing if obj_dict['perms2']['global_access'] == PERMS_RWX: obj_dict['is_shared'] = True elif is_shared: obj_dict['perms2']['global_access'] = PERMS_RWX # TODO(ethuleau): As we keep the virtual network ID allocation in # schema and in the vnc API for one release overlap to # prevent any upgrade issue, we still authorize to # set or update the virtual network ID until release # (3.2 + 1) # # Does not authorize to set the virtual network ID as it's allocated # # by the vnc server # if obj_dict.get('virtual_network_network_id') is not None: # return (False, (403, "Cannot set the virtual network ID")) if obj_dict.get('virtual_network_network_id') is None: # Allocate virtual network ID vn_id = cls.vnc_zk_client.alloc_vn_id(':'.join(obj_dict['fq_name'])) def undo_vn_id(): cls.vnc_zk_client.free_vn_id(vn_id) return True, "" get_context().push_undo(undo_vn_id) obj_dict['virtual_network_network_id'] = vn_id + 1 # Changing RPF default if configured if cls.rpf_default is not None: vnp = obj_dict.get('virtual_network_properties') if vnp is None: vnp = {'rpf': cls.rpf_default} else: rpf = vnp.get('rpf') if rpf is None: vnp['rpf'] = cls.rpf_default obj_dict['virtual_network_properties'] = vnp vn_uuid = obj_dict.get('uuid') (ok, return_code, result) = cls._check_ipam_network_subnets(obj_dict, db_conn, vn_uuid) if not ok: return (ok, (return_code, result)) ok, error = cls._check_route_targets(obj_dict, db_conn) if not ok: return False, error (ok, error) = cls._check_provider_details(obj_dict, db_conn, True) if not ok: return (False, (400, error)) ipam_refs = obj_dict.get('network_ipam_refs') or [] try: cls.addr_mgmt.net_create_req(obj_dict) #for all ipams which are flat, we need to write a unique id as # subnet uuid for all cidrs in flat-ipam for ipam in ipam_refs: ipam_fq_name = ipam['to'] ipam_uuid = ipam.get('uuid') if not ipam_uuid: ipam_uuid = db_conn.fq_name_to_uuid('network_ipam', ipam_fq_name) (ok, ipam_dict) = db_conn.dbe_read( obj_type='network_ipam', obj_ids={'uuid': ipam_uuid}) if not ok: return (ok, (400, ipam_dict)) subnet_method = ipam_dict.get('ipam_subnet_method') if (subnet_method != None and subnet_method == 'flat-subnet'): subnet_dict = {} flat_subnet_uuid = str(uuid.uuid4()) subnet_dict['subnet_uuid'] = flat_subnet_uuid ipam['attr']['ipam_subnets'] = [subnet_dict] def undo(): cls.addr_mgmt.net_delete_req(obj_dict) return True, "" get_context().push_undo(undo) except Exception as e: return (False, (500, str(e))) return True, "" # end pre_dbe_create @classmethod def post_dbe_create(cls, tenant_name, obj_dict, db_conn): api_server = db_conn.get_api_server() # Create native/vn-default routing instance ri_fq_name = obj_dict['fq_name'][:] ri_fq_name.append(obj_dict['fq_name'][-1]) ri_obj = RoutingInstance( parent_type='virtual-network', fq_name=ri_fq_name, routing_instance_is_default=True) api_server.internal_request_create( 'routing-instance', ri_obj.serialize_to_json()) return True, '' # end post_dbe_create @classmethod def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs): if ((fq_name == cfgm_common.IP_FABRIC_VN_FQ_NAME) or (fq_name == cfgm_common.LINK_LOCAL_VN_FQ_NAME)): # Ignore ip-fabric subnet updates return True, "" # neutron <-> vnc sharing try: global_access = obj_dict['perms2']['global_access'] except KeyError: global_access = None is_shared = obj_dict.get('is_shared') if global_access is not None or is_shared is not None: if global_access is not None and is_shared is not None: if is_shared != (global_access != 0): error = "Inconsistent is_shared (%s a) and global_access (%s)" % (is_shared, global_access) return (False, (400, error)) elif global_access is not None: obj_dict['is_shared'] = (global_access != 0) else: ok, result = cls.dbe_read(db_conn, 'virtual_network', id, obj_fields=['perms2']) if not ok: return ok, result obj_dict['perms2'] = result['perms2'] obj_dict['perms2']['global_access'] = PERMS_RWX if is_shared else 0 ok, error = cls._check_route_targets(obj_dict, db_conn) if not ok: return False, error (ok, error) = cls._check_provider_details(obj_dict, db_conn, False) if not ok: return (False, (409, error)) ok, read_result = cls.dbe_read(db_conn, 'virtual_network', id) if not ok: return ok, read_result # TODO(ethuleau): As we keep the virtual network ID allocation in # schema and in the vnc API for one release overlap to # prevent any upgrade issue, we still authorize to # set or update the virtual network ID until release # (3.2 + 1) # new_vn_id = obj_dict.get('virtual_network_network_id') # # Does not authorize to update the virtual network ID as it's allocated # # by the vnc server # if (new_vn_id is not None and # new_vn_id != read_result.get('virtual_network_network_id')): # return (False, (403, "Cannot update the virtual network ID")) (ok, response) = cls._is_multi_policy_service_chain_supported(obj_dict, read_result) if not ok: return (ok, response) (ok, return_code, result) = cls._check_ipam_network_subnets(obj_dict, db_conn, id, read_result) if not ok: return (ok, (return_code, result)) (ok, result) = cls.addr_mgmt.net_check_subnet_delete(read_result, obj_dict) if not ok: return (ok, (409, result)) ipam_refs = obj_dict.get('network_ipam_refs') or [] if ipam_refs: (ok, result) = cls.addr_mgmt.net_validate_subnet_update(read_result, obj_dict) if not ok: return (ok, (400, result)) try: cls.addr_mgmt.net_update_req(fq_name, read_result, obj_dict, id) #update link with a subnet_uuid if ipam in read_result or obj_dict # does not have it already for ipam in ipam_refs: ipam_fq_name = ipam['to'] ipam_uuid = db_conn.fq_name_to_uuid('network_ipam', ipam_fq_name) (ok, ipam_dict) = db_conn.dbe_read( obj_type='network_ipam', obj_ids={'uuid': ipam_uuid}) if not ok: return (ok, (409, ipam_dict)) subnet_method = ipam_dict.get('ipam_subnet_method') if (subnet_method != None and subnet_method == 'flat-subnet'): vnsn_data = ipam.get('attr') or {} ipam_subnets = vnsn_data.get('ipam_subnets') or [] if (len(ipam_subnets) == 1): continue if (len(ipam_subnets) == 0): subnet_dict = {} flat_subnet_uuid = str(uuid.uuid4()) subnet_dict['subnet_uuid'] = flat_subnet_uuid ipam['attr']['ipam_subnets'].insert(0, subnet_dict) def undo(): # failed => update with flipped values for db_dict and req_dict cls.addr_mgmt.net_update_req(fq_name, obj_dict, read_result, id) # end undo get_context().push_undo(undo) except Exception as e: return (False, (500, str(e))) return True,
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import print_function, division, absolute_import import logging import numpy import sys from collections import OrderedDict from six.moves import zip import numpy as np import Ska.Numpy from Chandra.Time import DateTime import Ska.tdb from . import units MODULE = sys.modules[__name__] logger = logging.getLogger('engarchive') class NoValidDataError(Exception): pass class DataShapeError(Exception): pass def quality_index(dat, colname): """Return the index for `colname` in `dat`""" colname = colname.split(':')[0] return list(dat.dtype.names).index(colname) def numpy_converter(dat): return Ska.Numpy.structured_array(dat, colnames=dat.dtype.names) def convert(dat, content): # Zero-length file results in `dat is None` if dat is None: raise NoValidDataError try: converter = getattr(MODULE, content.lower()) except AttributeError: converter = numpy_converter return converter(dat) def generic_converter(prefix=None, add_quality=False, aliases=None): """Convert an input FITS recarray assuming that it has a TIME column. If ``add_prefix`` is set then add ``content_`` as a prefix to the data column names. If ``add_quality`` is set then add a QUALITY column with all values False. """ def _convert(dat): colnames = dat.dtype.names colnames_out = [x.upper() for x in colnames] if aliases: colnames_out = [aliases.get(x, x).upper() for x in colnames_out] if prefix: # Note to self: never change an enclosed reference, i.e. don't do # prefix = prefix.upper() + '_' # You will lose an hour again figuring this out if so. PREFIX = prefix.upper() + '_' colnames_out = [(x if x in ('TIME', 'QUALITY') else PREFIX + x) for x in colnames_out] arrays = [dat.field(x) for x in colnames] if add_quality: descrs = [(x,) + y[1:] for x, y in zip(colnames_out, dat.dtype.descr)] quals = numpy.zeros((len(dat), len(colnames) + 1), dtype=numpy.bool) descrs += [('QUALITY', numpy.bool, (len(colnames) + 1,))] arrays += [quals] else: descrs = [(name, array.dtype.str, array.shape[1:]) for name, array in zip(colnames_out, arrays)] return numpy.rec.fromarrays(arrays, dtype=descrs) return _convert def get_bit_array(dat, in_name, out_name, bit_index): bit_indexes = [int(bi) for bi in bit_index.split(',')] bit_index = max(bit_indexes) if dat[in_name].shape[1] < bit_index: raise DataShapeError('column {} has shape {} but need at least {}' .format(in_name, dat[in_name].shape[1], bit_index + 1)) if len(bit_indexes) > 1: mult = 1 out_array = np.zeros(len(dat), dtype=np.uint32) # no more than 32 bit indexes for bit_index in reversed(bit_indexes): # Note: require casting mult and 0 to uint32 because recent versions of numpy # disallow in-place adding of int64 to uint32. out_array += np.where(dat[in_name][:, bit_index], np.uint32(mult), np.uint32(0)) mult *= 2 else: try: tscs = Ska.tdb.msids[out_name].Tsc scs = {tsc['LOW_RAW_COUNT']: tsc['STATE_CODE'] for tsc in tscs} except (KeyError, AttributeError): scs = ['OFF', 'ON '] # CXC telemetry stores state code vals with trailing spaces so all match # in length. Annoying, but reproduce this here for consistency so # fetch Msid.raw_vals does the right thing. max_len = max(len(sc) for sc in scs.values()) fmtstr = '{:' + str(max_len) + 's}' scs = [fmtstr.format(val) for key, val in scs.items()] out_array = np.where(dat[in_name][:, bit_index], scs[1], scs[0]) return out_array def generic_converter2(msid_cxc_map, default_dtypes=None): """Convert an input FITS recarray assuming that it has a TIME column. Use the ``msid_cxc_map`` to define the list of output eng archive MSIDs (keys) and the corresponding colnames in the CXC archive FITS file (values). The CXC values can contain an optional bit specifier in the form <colname>:<N> where N is the bit selector referenced from 0 as the leftmost bit. :param msid_cxc_map: dict of out_name => in_name mapping """ def _convert(dat): # Make quality bool array with entries for TIME, QUALITY, then all other cols out_names = ['TIME', 'QUALITY'] + list(msid_cxc_map.keys()) out_quality = np.zeros(shape=(len(dat), len(out_names)), dtype=np.bool) out_arrays = {'TIME': dat['TIME'], 'QUALITY': out_quality} for out_name, in_name in msid_cxc_map.items(): if ':' in in_name: in_name, bit_index = in_name.split(':') out_array = get_bit_array(dat, in_name, out_name, bit_index) quality = dat['QUALITY'][:, quality_index(dat, in_name)] else: if in_name in dat.dtype.names: out_array = dat[in_name] quality = dat['QUALITY'][:, quality_index(dat, in_name)] else: # Handle column that is intermittently available in `dat` by using the # supplied default dtype. Quality is True (missing) everywhere. out_array = np.zeros(shape=len(dat), dtype=default_dtypes[out_name]) quality = True assert out_array.ndim == 1 out_arrays[out_name] = out_array out_quality[:, out_names.index(out_name)] = quality out = Ska.Numpy.structured_array(out_arrays, out_names) return out return _convert orbitephem0 = generic_converter('orbitephem0', add_quality=True) lunarephem0 = generic_converter('lunarephem0', add_quality=True) solarephem0 = generic_converter('solarephem0', add_quality=True) orbitephem1 = generic_converter('orbitephem1', add_quality=True) lunarephem1 = generic_converter('lunarephem1', add_quality=True) solarephem1 = generic_converter('solarephem1', add_quality=True) angleephem = generic_converter(add_quality=True) def parse_alias_str(alias_str, invert=False): aliases = OrderedDict() for line in alias_str.strip().splitlines(): cxcmsid, msid = line.split()[:2] if invert: aliases[msid] = cxcmsid else: aliases[cxcmsid] = msid return aliases ALIASES = {'simdiag': """ RAMEXEC 3SDSWELF SEA CSC Exectuting from RAM DSTACKPTR 3SDPSTKP SEA Data Stack Ptr TSCEDGE 3SDTSEDG TSC Tab Edge Detection Flags FAEDGE 3SDFAEDG FA Tab Edge Detection Flags MJFTIME 3SDMAJFP Major Frame Period Time Measured by SEA MRMDEST 3SDRMOVD Most Recent Motor Move Destination TSCTABADC 3SDTSTSV TSC Tab Position Sensor A/D converter FATABADC 3SDFATSV FA Tab Position Sensor A/D Converter AGRNDADC 3SDAGV Analog Ground A/D Converter Reading P15VADC 3SDP15V +15V Power Supply A/D Converter Reading P5VADC 3SDP5V +5V Power Supply A/D Converter Reading N15VADC 3SDM15V -15V Power Supply A/D Converter Reading FLEXATEMPADC 3SDFLXAT Flexture A Thermistor A/D Converter FLEXBTEMPADC 3SDFLXBT Flexture B Thermistor A/D Converter FLEXCTEMPADC 3SDFLXCT Flexture C Thermistor A/D Converter TSCMTRTEMPADC 3SDTSMT TSC Motor Thermistor A/D Converter FAMTRTEMPADC 3SDFAMT FA Motor Thermistor A/D Converter PSUTEMPADC 3SDPST SEA Power Supply Thermistor A/D Converter BOXTEMPADC 3SDBOXT SEA Box Thermistor A/D Converter RAMFAILADDR 3SDRMFAD RAM Most Recent detected Fail Address TSCTABWID 3SDTSTBW TSC Most Recent detected Tab Width FATABWID 3SDFATBW FA Most Recent detected Tab Width SYNCLOSS 3SDSYRS Process Reset Due Synchronization Loss WARMRESET 3SDWMRS Processor Warm Reset TSCHISTO 3SDTSP TSC Most Recent PWM Histogram FAHISTO 3SDFAP FA Most Recent PWM Histogram INVCMDCODE 3SDINCOD SEA Invalid CommandCode """, 'sim_mrg': """ TLMUPDATE 3SEATMUP "Telemtry Update Flag" SEAIDENT 3SEAID "SEA Identification Flag" SEARESET 3SEARSET "SEA Reset Flag" PROMFAIL 3SEAROMF "SEA PROM Checksum Flag" INVCMDGROUP 3SEAINCM "SEA Invalid Command Group Flag" TSCMOVING 3TSCMOVE "TSC In Motion Flag" FAMOVING 3FAMOVE "FA In Motion Flag" FAPOS 3FAPOS "FA Position" TSCPOS 3TSCPOS "TSC Postion" PWMLEVEL 3MRMMXMV "Max Power Motor Volt recent move" LDRTMECH 3LDRTMEK "Last Detected Reference Mechanism Tab" LDRTNUM 3LDRTNO "Last Detected Reference Tab Number" LDRTRELPOS 3LDRTPOS "Last Detected Reference Relative Postion" FLEXATEMP 3FAFLAAT "Flexture A Temperature" FLEXBTEMP 3FAFLBAT "Flexture B Temperature" FLEXCTEMP 3FAFLCAT "Flexture C Temperature" TSCMTRTEMP 3TRMTRAT "TSC Motor Temperature" FAMTRTEMP 3FAMTRAT "FA Motor Temperature" PSUTEMP 3FAPSAT "SEA Power Supply Temperature" BOXTEMP 3FASEAAT "SEA Box Temperature" STALLCNT 3SMOTSTL "SEA Motor Stall Counter" TAB2AUTOPOS 3STAB2EN "SEA Tab 2 Auto Position Update Status" MTRDRVRLY 3SMOTPEN "SEA Motor Driver Power Relay status" MTRSELRLY 3SMOTSEL "SEA Motor Selection Relay Status" HTRPWRRLY 3SHTREN "SEA Heater Power Relay Status" RAMFAIL 3SEARAMF "SEA RAM Failure Detected Flag" MTROVRCCNT 3SMOTOC "Motor Drive Overcurrent Counter" PENDCMDCNT 3SPENDC "SEA Pending Command Count" FLEXATSET 3SFLXAST "Flexture A Temperature Setpoint" FLEXBTSET 3SFLXBST "Flexture B Temperature Setpoint" FLEXCTSET 3SFLXCST "Flexture C Temperature Setpoint" """, 'hrc0ss': """ TLEVART 2TLEV1RT VLEVART 2VLEV1RT SHEVART 2SHEV1RT TLEVART 2TLEV2RT VLEVART 2VLEV2RT SHEVART 2SHEV2RT """, 'hrc0hk': """ SCIDPREN:0,1,2,3,8,9,10 HRC_SS_HK_BAD P24CAST:7 224PCAST P15CAST:7 215PCAST N15CAST:7 215NCAST SPTPAST 2SPTPAST SPBPAST 2SPBPAST IMTPAST 2IMTPAST IMBPAST 2IMBPAST MTRSELCT:3 2NYMTAST MTRSELCT:4 2PYMTAST MTRSELCT:5 2CLMTAST MTRSELCT:6 2DRMTAST MTRSELCT:7 2ALMTAST MTRSTATR:0 2MSMDARS MTRSTATR:1 2MDIRAST MTRSTATR:2 2MSNBAMD MTRSTATR:3 2MSNAAMD MTRSTATR:4 2MSLBAMD MTRSTATR:5 2MSLAAMD MTRSTATR:6 2MSPRAMD MTRSTATR:7 2MSDRAMD MTRCMNDR:0 2MCMDARS MTRCMNDR:2 2MCNBAMD MTRCMNDR:3 2MCNAAMD MTRCMNDR:4 2MCLBAMD MTRCMNDR:5 2MCLAAMD MTRCMNDR:6 2MCPRAMD MTRCMNDR:7 2MDRVAST SCTHAST 2SCTHAST MTRITMP:1 2SMOIAST MTRITMP:2 2SMOTAST MTRITMP:5 2DROTAST MTRITMP:6 2DROIAST MLSWENBL:3 2SFLGAST MLSWENBL:4 2OSLSAST MLSWENBL:5 2OPLSAST MLSWENBL:6 2CSLSAST MLSWENBL:7 2CPLSAST MLSWSTAT:2 2OSLSADT MLSWSTAT:3 2OSLSAAC MLSWSTAT:4 2OPLSAAC MLSWSTAT:5 2CSLSADT MLSWSTAT:6 2CSLSAAC MLSWSTAT:7 2CPLSAAC FCPUAST 2FCPUAST FCPVAST 2FCPVAST CBHUAST 2CBHUAST CBLUAST 2CBLUAST CBHVAST 2CBHVAST CBLVAST 2CBLVAST WDTHAST 2WDTHAST SCIDPREN:4 2CLMDAST SCIDPREN:5 2FIFOAVR SCIDPREN:6 2OBNLASL SCIDPREN:7 2SPMDASL SCIDPREN:11 2EBLKAVR SCIDPREN:12 2CBLKAVR SCIDPREN:13 2ULDIAVR SCIDPREN:14 2WDTHAVR SCIDPREN:15 2SHLDAVR HVPSSTAT:0 2SPONST HVPSSTAT:1 2SPCLST HVPSSTAT:2 2S1ONST HVPSSTAT:3 2IMONST HVPSSTAT:4 2IMCLST HVPSSTAT:5 2S2ONST S1HVST 2S1HVST S2HVST 2S2HVST C05PALV 2C05PALV C15PALV 2C15PALV C15NALV 2C15NALV C24PALV 2C24PALV IMHVLV 2IMHVLV IMHBLV 2IMHBLV SPHVLV 2SPHVLV SPHBLV 2SPHBLV S1HVLV 2S1HVLV S2HVLV 2S2HVLV PRBSCR 2PRBSCR PRBSVL 2PRBSVL ULDIALV 2ULDIALV LLDIALV 2LLDIALV FEPRATM 2FEPRATM CALPALV 2CALPALV GRDVALV 2GRDVALV RSRFALV 2RSRFALV SPINATM 2SPINATM IMINATM 2IMINATM LVPLATM 2LVPLATM SPHVATM 2SPHVATM IMHVATM 2IMHVATM SMTRATM 2SMTRATM FE00ATM 2FE00ATM CE00ATM 2CE00ATM CE01ATM 2CE01ATM """, } CXC_TO_MSID = {key: parse_alias_str(val) for key, val in ALIASES.items()} MSID_TO_CXC = {key: parse_alias_str(val, invert=True) for key, val
<filename>trac/versioncontrol/admin.py # -*- coding: utf-8 -*- # # Copyright (C) 2008 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.com/license.html. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://trac.edgewall.org/. import os.path import sys from genshi.builder import tag from trac.admin import IAdminCommandProvider, IAdminPanelProvider from trac.config import ListOption from trac.core import * from trac.perm import IPermissionRequestor from trac.util import as_bool, is_path_below from trac.util.compat import any from trac.util.text import breakable_path, normalize_whitespace, print_table, \ printout from trac.util.translation import _, ngettext, tag_ from trac.versioncontrol import DbRepositoryProvider, RepositoryManager, \ is_default from trac.web.chrome import Chrome, add_notice, add_warning class VersionControlAdmin(Component): """trac-admin command provider for version control administration.""" implements(IAdminCommandProvider, IPermissionRequestor) # IAdminCommandProvider methods def get_admin_commands(self): yield ('changeset added', '<repos> <rev> [rev] [...]', """Notify trac about changesets added to a repository This command should be called from a post-commit hook. It will trigger a cache update and notify components about the addition. """, self._complete_repos, self._do_changeset_added) yield ('changeset modified', '<repos> <rev> [rev] [...]', """Notify trac about changesets modified in a repository This command should be called from a post-revprop hook after revision properties like the commit message, author or date have been changed. It will trigger a cache update for the given revisions and notify components about the change. """, self._complete_repos, self._do_changeset_modified) yield ('repository list', '', 'List source repositories', None, self._do_list) yield ('repository resync', '<repos> [rev]', """Re-synchronize trac with repositories When [rev] is specified, only that revision is synchronized. Otherwise, the complete revision history is synchronized. Note that this operation can take a long time to complete. If synchronization gets interrupted, it can be resumed later using the `sync` command. To synchronize all repositories, specify "*" as the repository. """, self._complete_repos, self._do_resync) yield ('repository sync', '<repos> [rev]', """Resume synchronization of repositories It works like `resync`, except that it doesn't clear the already synchronized changesets, so it's a better way to resume an interrupted `resync`. See `resync` help for detailed usage. """, self._complete_repos, self._do_sync) def get_reponames(self): rm = RepositoryManager(self.env) return [reponame or '(default)' for reponame in rm.get_all_repositories()] def _complete_repos(self, args): if len(args) == 1: return self.get_reponames() def _do_changeset_added(self, reponame, *revs): if is_default(reponame): reponame = '' rm = RepositoryManager(self.env) rm.notify('changeset_added', reponame, revs) def _do_changeset_modified(self, reponame, *revs): if is_default(reponame): reponame = '' rm = RepositoryManager(self.env) rm.notify('changeset_modified', reponame, revs) def _do_list(self): rm = RepositoryManager(self.env) values = [] for (reponame, info) in sorted(rm.get_all_repositories().iteritems()): alias = '' if 'alias' in info: alias = info['alias'] or '(default)' values.append((reponame or '(default)', info.get('type', ''), alias, info.get('dir', ''))) print_table(values, [_('Name'), _('Type'), _('Alias'), _('Directory')]) def _sync(self, reponame, rev, clean): rm = RepositoryManager(self.env) if reponame == '*': if rev is not None: raise TracError(_('Cannot synchronize a single revision ' 'on multiple repositories')) repositories = rm.get_real_repositories() else: if is_default(reponame): reponame = '' repos = rm.get_repository(reponame) if repos is None: raise TracError(_("Repository '%(repo)s' not found", repo=reponame or '(default)')) if rev is not None: repos.sync_changeset(rev) printout(_('%(rev)s resynced on %(reponame)s.', rev=rev, reponame=repos.reponame or '(default)')) return repositories = [repos] for repos in sorted(repositories, key=lambda r: r.reponame): printout(_('Resyncing repository history for %(reponame)s... ', reponame=repos.reponame or '(default)')) repos.sync(self._sync_feedback, clean=clean) for cnt, in self.env.db_query( "SELECT count(rev) FROM revision WHERE repos=%s", (repos.id,)): printout(ngettext('%(num)s revision cached.', '%(num)s revisions cached.', num=cnt)) printout(_('Done.')) def _sync_feedback(self, rev): sys.stdout.write(' [%s]\r' % rev) sys.stdout.flush() def _do_resync(self, reponame, rev=None): self._sync(reponame, rev, clean=True) def _do_sync(self, reponame, rev=None): self._sync(reponame, rev, clean=False) # IPermissionRequestor methods def get_permission_actions(self): return [('VERSIONCONTROL_ADMIN', ['BROWSER_VIEW', 'CHANGESET_VIEW', 'FILE_VIEW', 'LOG_VIEW'])] class RepositoryAdminPanel(Component): """Web admin panel for repository administration.""" implements(IAdminPanelProvider) allowed_repository_dir_prefixes = ListOption('versioncontrol', 'allowed_repository_dir_prefixes', '', doc="""Comma-separated list of allowed prefixes for repository directories when adding and editing repositories in the repository admin panel. If the list is empty, all repository directories are allowed. (''since 0.12.1'')""") # IAdminPanelProvider methods def get_admin_panels(self, req): if 'VERSIONCONTROL_ADMIN' in req.perm('admin', 'versioncontrol/repository'): yield ('versioncontrol', _('Version Control'), 'repository', _('Repositories')) def render_admin_panel(self, req, category, page, path_info): # Retrieve info for all repositories rm = RepositoryManager(self.env) all_repos = rm.get_all_repositories() db_provider = self.env[DbRepositoryProvider] if path_info: # Detail view reponame = path_info if not is_default(path_info) else '' info = all_repos.get(reponame) if info is None: raise TracError(_("Repository '%(repo)s' not found", repo=path_info)) if req.method == 'POST': if req.args.get('cancel'): req.redirect(req.href.admin(category, page)) elif db_provider and req.args.get('save'): # Modify repository changes = {} valid = True for field in db_provider.repository_attrs: value = normalize_whitespace(req.args.get(field)) if (value is not None or field == 'hidden') \ and value != info.get(field): changes[field] = value if 'dir' in changes and not \ self._check_dir(req, changes['dir']): valid = False if valid and changes: db_provider.modify_repository(reponame, changes) add_notice(req, _('Your changes have been saved.')) name = req.args.get('name') resync = tag.tt('trac-admin $ENV repository resync ' '"%s"' % (name or '(default)')) if 'dir' in changes: msg = tag_('You should now run %(resync)s to ' 'synchronize Trac with the repository.', resync=resync) add_notice(req, msg) elif 'type' in changes: msg = tag_('You may have to run %(resync)s to ' 'synchronize Trac with the repository.', resync=resync) add_notice(req, msg) if name and name != path_info and not 'alias' in info: cset_added = tag.tt('trac-admin $ENV changeset ' 'added "%s" $REV' % (name or '(default)')) msg = tag_('You will need to update your ' 'post-commit hook to call ' '%(cset_added)s with the new ' 'repository name.', cset_added=cset_added) add_notice(req, msg) if valid: req.redirect(req.href.admin(category, page)) Chrome(self.env).add_wiki_toolbars(req) data = {'view': 'detail', 'reponame': reponame} else: # List view if req.method == 'POST': # Add a repository if db_provider and req.args.get('add_repos'): name = req.args.get('name') type_ = req.args.get('type') # Avoid errors when copy/pasting paths dir = normalize_whitespace(req.args.get('dir', '')) if name is None or type_ is None or not dir: add_warning(req, _('Missing arguments to add a ' 'repository.')) elif self._check_dir(req, dir): try: db_provider.add_repository(name, dir, type_) except self.env.db_exc.IntegrityError: name = name or '(default)' raise TracError(_('The repository "%(name)s" ' 'already exists.', name=name)) name = name or '(default)' add_notice(req, _('The repository "%(name)s" has been ' 'added.', name=name)) resync = tag.tt('trac-admin $ENV repository resync ' '"%s"' % name) msg = tag_('You should now run %(resync)s to ' 'synchronize Trac with the repository.', resync=resync) add_notice(req, msg) cset_added = tag.tt('trac-admin $ENV changeset ' 'added "%s" $REV' % name) msg = tag_('You should also set up a post-commit hook ' 'on the repository to call %(cset_added)s ' 'for each committed changeset.', cset_added=cset_added) add_notice(req, msg) req.redirect(req.href.admin(category, page)) # Add a repository alias elif db_provider and req.args.get('add_alias'): name = req.args.get('name') alias = req.args.get('alias') if name is not None and alias is not None: try: db_provider.add_alias(name, alias) except self.env.db_exc.IntegrityError: raise TracError(_('The alias "%(name)s" already ' 'exists.', name=name or '(default)')) add_notice(req, _('The alias "%(name)s" has been ' 'added.', name=name or '(default)')) req.redirect(req.href.admin(category, page)) add_warning(req, _('Missing arguments to add an ' 'alias.')) # Refresh the list of repositories elif req.args.get('refresh'): req.redirect(req.href.admin(category, page)) # Remove repositories elif db_provider and req.args.get('remove'): sel = req.args.getlist('sel') if sel: for name in sel: db_provider.remove_repository(name) add_notice(req, _('The selected repositories have ' 'been removed.')) req.redirect(req.href.admin(category, page)) add_warning(req, _('No repositories were selected.')) data = {'view': 'list'} # Find repositories that are editable db_repos = {} if db_provider is not None: db_repos = dict(db_provider.get_repositories()) # Prepare common rendering data repositories = dict((reponame, self._extend_info(reponame, info.copy(), reponame in db_repos)) for (reponame, info) in all_repos.iteritems()) types = sorted([''] + rm.get_supported_types()) data.update({'types': types, 'default_type': rm.repository_type, 'repositories': repositories}) return 'admin_repositories.html', data def _extend_info(self, reponame, info, editable): """Extend repository info for rendering.""" info['name'] = reponame if info.get('dir') is not None: info['prettydir'] = breakable_path(info['dir']) or '' info['hidden'] = as_bool(info.get('hidden')) info['editable'] = editable if not info.get('alias'): try: repos = RepositoryManager(self.env).get_repository(reponame) youngest_rev = repos.get_youngest_rev() info['rev'] = youngest_rev info['display_rev'] = repos.display_rev(youngest_rev) except Exception: pass return info def _check_dir(self, req, dir): """Check that a repository directory is valid, and add a warning message if not. """ if not os.path.isabs(dir): add_warning(req, _('The repository directory must be an absolute ' 'path.')) return False prefixes = [os.path.join(self.env.path, prefix) for prefix in self.allowed_repository_dir_prefixes] if prefixes and not any(is_path_below(dir, prefix) for prefix in prefixes): add_warning(req, _('The repository directory must be located ' 'below one of the following directories: '
from __future__ import annotations import abc import re from numbers import Number from typing import Awaitable # noqa: F401 from typing import ( TYPE_CHECKING, Any, Callable, Generic, List, Optional, Sequence, Type, TypeVar, Union, overload, ) from zucker.filtering import NegatableFilter, NullishFilter, NumericFilter, ValuesFilter from zucker.utils import ApiType, JsonType if TYPE_CHECKING: from zucker.model.module import AsyncModule, BaseModule, SyncModule, UnboundModule NativeType = TypeVar("NativeType") GetType = TypeVar("GetType") ModuleType = TypeVar("ModuleType", bound="BaseModule") SetType = TypeVar("SetType") Self = TypeVar("Self", bound="Field[Any, Any]") AnyModule = Union["SyncModule", "AsyncModule", "UnboundModule"] class Field(Generic[ModuleType, GetType], abc.ABC): """Base class for all fields. This class accepts type variables which resemble the native type that is returned when the field accessed, for example as ``record.some_value`` where ``some_value`` is the field instance. The generic arguments give the return type of that call, depending on whether the field is placed in a synchronous or an asynchronous module. """ def __init__(self, api_name: Optional[str] = None): self._api_name = api_name self._name: Optional[str] = "test" # Check the name to make sure it's valid. self.name # noqa self._name = None def __set_name__(self, owner: ModuleType, name: str) -> None: self._name = name # Check the name to make sure it's valid. self.name # noqa @overload def __get__(self: Self, instance: ModuleType, owner: Type[BaseModule]) -> GetType: ... @overload def __get__(self: Self, instance: None, owner: Type[BaseModule]) -> Self: ... def __get__( self: Self, instance: Union[ModuleType, None], owner: Type[BaseModule], ) -> Union[GetType, Self]: from ..module import BaseModule if isinstance(instance, BaseModule): # Here, the field is accessed as a property on a record, like this: # the_first_name = record.first_name # In this case, the actual field type determines which data is returned. value = self._get_value(instance) # This type is ignored because because MyPy cannot parse generic self # argument types and just flat-out ignores them currently. Hence, # self._get_value() is actually an Any (since self is a 'Self' which is # Field[Any] or some subtype). # See also: https://github.com/python/mypy/issues/2354 return value # type: ignore elif instance is None: # The other case is when the field gets referenced directly on the class, # for example when building queries: # module.filter(Lead.name == "Apple") # Here we return the field itself again, because then we can build filters # and use other APIs from the class. return self else: raise AttributeError( f"Field() objects should only created inside bound modules - got " f"{instance!r}" ) @property def name(self) -> str: if self._api_name is not None: result = self._api_name elif self._name is None: raise RuntimeError( "Could not retrieve the field's model name. Check for the correct " "Field() usage - otherwise this is a bug." ) else: result = self._name if not isinstance(result, str): raise TypeError(f"field name must be a string, got {result!r}") if result == "" or " " in result: raise ValueError("field name may not be empty and must not contain spaces") return result @abc.abstractmethod def _get_value(self, record: ModuleType) -> GetType: ... class MutableField( Generic[ModuleType, GetType, SetType], Field[ModuleType, GetType], abc.ABC, ): """Base class for fields that are mutable. Being "mutable" means that users can not only read values, but also write (and save) new values for the field. Since setting could potentially take a different type, there are two generic arguments for this field. For example, a date field could return native date objects but additionally accept strings for setting. """ def __set__(self, instance: ModuleType, value: SetType) -> None: from ..module import BaseModule if not isinstance(instance, BaseModule): raise AttributeError self._set_value(instance, value) @abc.abstractmethod def _set_value(self, record: ModuleType, value: SetType) -> None: ... class ScalarField( Generic[NativeType, ApiType], Field[AnyModule, NativeType], abc.ABC, ): """Scalar fields are fields that also support some basic filtering operations. :param validators: Validators can be provided to make sure that data meets specific requirements. These will be checked both for incoming values from the server and for any value that is set on the field, before writing changes. A validator may either be a function or a regular expression object. The former will be called with the value to be checked as the single argument and should raise a :exc:`ValueError` when validation fails. The latter will pass the check if the entire value matches the provided regular expression. .. note:: A few notes to keep in mind when using validators: 1. The default strategy for validating regular expressions will coerce the incoming type to a string. That means that -- for example -- the number ``0xff`` *will* match the expression ``2..``, because the string representation is ``255``. 2. Validators are always evaluated on the api data type. That means that they are run *after* serializing any user input. """ def __init__( self, api_name: Optional[str] = None, *, validators: Optional[ Sequence[Union[re.Pattern[str], Callable[[ApiType], None]]] ] = None, ): super().__init__(api_name=api_name) self._validators: List[Callable[[ApiType], None]] = [] for validator in validators or []: if isinstance(validator, re.Pattern): def validate(value: ApiType) -> None: assert isinstance(validator, re.Pattern) if not validator.fullmatch(str(value)): raise ValueError(f"pattern did not match: {validator.pattern}") self._validators.append(validate) elif callable(validator): self._validators.append(validator) else: raise TypeError( f"validators must be regular expression pattern objects or " f"callables, got {type(validator)!r}" ) ################################## # Getting / setting field values # ################################## def _get_value(self, record: BaseModule) -> NativeType: raw_value = record.get_data(self.name) if raw_value is None: raise AttributeError( f"Trying to access an undefined field {self.name!r} in record " f"{type(record)!r}. Either add the field to the module " f"definition or check for the correct spelling." ) for validate in self._validators: validate(raw_value) # type: ignore return self.load_value(raw_value) @abc.abstractmethod def load_value(cls, raw_value: JsonType) -> NativeType: """Load a value from the API into a native data type. :param raw_value: Response from the API for this field. This will be a JSON primitive, which should either be returned as-is (where appropriate) or converted into a native Python data type. :returns: A Python data type for this field. """ @abc.abstractmethod def serialize(cls, value: Union[NativeType, ApiType]) -> ApiType: """Serialize a native data type into something the API can take back for saving. This method also supports "serializing" api types. In this case implementors are advised to verify the input's validity and return it as-is. :param value: Native or API data type for this field. :returns: An API-compatible data type. """ ################### # Filter building # ################### def __eq__( # type: ignore[override] self, other: Optional[Union[NativeType, ApiType]] ) -> NegatableFilter[Any]: """Filter for exact values of this field. Depending on type of the given value, this is is equivalent to one of the other filtering methods: >>> Person.name == "Ben" # Is the same as Person.name.values("Ben") >>> Person.age == 3 # Is the same as Person.age.values(3) >>> Person.supervisor == None # Is the same as Person.supervisor.null() """ if other is None: return self.null() else: return self.values(other) def __ne__( # type: ignore[override] self, other: Optional[Union[NativeType, ApiType]] ) -> NegatableFilter[Any]: """Inverse of the ``==`` filter operator. Use the ``!=`` operator to exclude specific values: >>> Person.name != "Ben" # Is the same as ~(Person.name.values("Ben")) >>> Person.supervisor != None # Is the same as ~(Person.supervisor.null()) """ return ~(self.__eq__(other)) def values(self, *values: Union[NativeType, ApiType]) -> ValuesFilter[ApiType]: """Filter for exact values of this field. Most basic use for this filter is finding objects by value. The filter >>> Person.name.values("Ben") will return objects who's name is 'Ben'. This filter takes one or more arguments. It matches entries where this set directly contains the field's value, for example: >>> Person.name.values("Paul", "Spencer") will match objects who's name is either 'Paul' or 'Spencer'. Inverting this filter yields a 'not-equal' filter, for example: >>> ~Person.name.values("Mike") This query will match all objects where the name is not equal to 'Mike'. The above examples are also available as a shorthand through the equals operator (although you can only check for a single value here): >>> Person.name == "Ben" >>> Person.name != "Ben" """ return ValuesFilter( self.name, *tuple(self.serialize(raw_value) for raw_value in values) ) def null(self) -> NullishFilter: """Filter for whether the field is null. Use the filter like this: >>> Person.employer.null() This will return objects where the 'employer' field is not set. To find only objects where a field
"@com_github_buildbarn_bb_remote_execution//:patches/com_github_hanwen_go_fuse_v2/fuse-712-for-invalidation.diff", "@com_github_buildbarn_bb_remote_execution//:patches/com_github_hanwen_go_fuse_v2/notify-testability.diff", ], sum = "h1:+32ffteETaLYClUj0a3aHjZ1hOPxxaNEHiZiujuDaek=", version = "v2.1.0", ) go_repository( name = "com_github_hpcloud_tail", importpath = "github.com/hpcloud/tail", sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=", version = "v1.0.0", ) go_repository( name = "com_github_jmespath_go_jmespath", importpath = "github.com/jmespath/go-jmespath", sum = "h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=", version = "v0.4.0", ) go_repository( name = "com_github_jmespath_go_jmespath_internal_testify", importpath = "github.com/jmespath/go-jmespath/internal/testify", sum = "h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=", version = "v1.5.1", ) go_repository( name = "com_github_jpillora_backoff", importpath = "github.com/jpillora/backoff", sum = "h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=", version = "v1.0.0", ) go_repository( name = "com_github_json_iterator_go", importpath = "github.com/json-iterator/go", sum = "h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=", version = "v1.1.12", ) go_repository( name = "com_github_julienschmidt_httprouter", importpath = "github.com/julienschmidt/httprouter", sum = "h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=", version = "v1.3.0", ) go_repository( name = "com_github_jung_kurt_gofpdf", importpath = "github.com/jung-kurt/gofpdf", sum = "h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0=", version = "v1.0.3-0.20190309125859-24315acbbda5", ) go_repository( name = "com_github_kballard_go_shellquote", importpath = "github.com/kballard/go-shellquote", sum = "h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=", version = "v0.0.0-20180428030007-95032a82bc51", ) go_repository( name = "com_github_kisielk_errcheck", importpath = "github.com/kisielk/errcheck", sum = "h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=", version = "v1.5.0", ) go_repository( name = "com_github_kisielk_gotool", importpath = "github.com/kisielk/gotool", sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=", version = "v1.0.0", ) go_repository( name = "com_github_klauspost_compress", importpath = "github.com/klauspost/compress", sum = "h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=", version = "v1.15.1", ) go_repository( name = "com_github_konsorten_go_windows_terminal_sequences", importpath = "github.com/konsorten/go-windows-terminal-sequences", sum = "h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=", version = "v1.0.3", ) go_repository( name = "com_github_kr_logfmt", importpath = "github.com/kr/logfmt", sum = "h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=", version = "v0.0.0-20140226030751-b84e30acd515", ) go_repository( name = "com_github_kr_pretty", importpath = "github.com/kr/pretty", sum = "h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=", version = "v0.3.0", ) go_repository( name = "com_github_kr_pty", importpath = "github.com/kr/pty", sum = "h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=", version = "v1.1.1", ) go_repository( name = "com_github_kr_text", importpath = "github.com/kr/text", sum = "h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=", version = "v0.2.0", ) go_repository( name = "com_github_kylelemons_godebug", importpath = "github.com/kylelemons/godebug", sum = "h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=", version = "v0.0.0-20170820004349-d65d576e9348", ) go_repository( name = "com_github_lazybeaver_xorshift", importpath = "github.com/lazybeaver/xorshift", sum = "h1:TfmftEfB1zJiDTFi3Qw1xlbEbfJPKUhEDC19clfBMb8=", version = "v0.0.0-20170702203709-ce511d4823dd", ) go_repository( name = "com_github_mattn_go_colorable", importpath = "github.com/mattn/go-colorable", sum = "h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=", version = "v0.1.8", ) go_repository( name = "com_github_mattn_go_isatty", importpath = "github.com/mattn/go-isatty", sum = "h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=", version = "v0.0.12", ) go_repository( name = "com_github_matttproud_golang_protobuf_extensions", importpath = "github.com/matttproud/golang_protobuf_extensions", sum = "h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=", version = "v1.0.1", ) go_repository( name = "com_github_modern_go_concurrent", importpath = "github.com/modern-go/concurrent", sum = "h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=", version = "v0.0.0-20180306012644-bacd9c7ef1dd", ) go_repository( name = "com_github_modern_go_reflect2", importpath = "github.com/modern-go/reflect2", sum = "h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=", version = "v1.0.2", ) go_repository( name = "com_github_mwitkow_go_conntrack", importpath = "github.com/mwitkow/go-conntrack", sum = "h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=", version = "v0.0.0-20190716064945-2f068394615f", ) go_repository( name = "com_github_nxadm_tail", importpath = "github.com/nxadm/tail", sum = "h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=", version = "v1.4.8", ) go_repository( name = "com_github_oneofone_xxhash", importpath = "github.com/OneOfOne/xxhash", sum = "h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=", version = "v1.2.2", ) go_repository( name = "com_github_onsi_ginkgo", importpath = "github.com/onsi/ginkgo", sum = "h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=", version = "v1.16.4", ) go_repository( name = "com_github_onsi_gomega", importpath = "github.com/onsi/gomega", sum = "h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=", version = "v1.16.0", ) go_repository( name = "com_github_opentracing_opentracing_go", importpath = "github.com/opentracing/opentracing-go", sum = "h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=", version = "v1.1.0", ) go_repository( name = "com_github_phpdave11_gofpdf", importpath = "github.com/phpdave11/gofpdf", sum = "h1:KPKiIbfwbvC/wOncwhrpRdXVj2CZTCFlw4wnoyjtHfQ=", version = "v1.4.2", ) go_repository( name = "com_github_phpdave11_gofpdi", importpath = "github.com/phpdave11/gofpdi", sum = "h1:o61duiW8M9sMlkVXWlvP92sZJtGKENvW3VExs6dZukQ=", version = "v1.0.13", ) go_repository( name = "com_github_pkg_diff", importpath = "github.com/pkg/diff", sum = "h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=", version = "v0.0.0-20210226163009-20ebb0f2a09e", ) go_repository( name = "com_github_pkg_errors", importpath = "github.com/pkg/errors", sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=", version = "v0.9.1", ) go_repository( name = "com_github_pmezard_go_difflib", importpath = "github.com/pmezard/go-difflib", sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=", version = "v1.0.0", ) go_repository( name = "com_github_prometheus_client_golang", importpath = "github.com/prometheus/client_golang", sum = "h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=", version = "v1.12.1", ) go_repository( name = "com_github_prometheus_client_model", importpath = "github.com/prometheus/client_model", sum = "h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=", version = "v0.2.0", ) go_repository( name = "com_github_prometheus_common", importpath = "github.com/prometheus/common", sum = "h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=", version = "v0.32.1", ) go_repository( name = "com_github_prometheus_procfs", importpath = "github.com/prometheus/procfs", sum = "h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=", version = "v0.7.3", ) go_repository( name = "com_github_rogpeppe_fastuuid", importpath = "github.com/rogpeppe/fastuuid", sum = "h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=", version = "v1.2.0", ) go_repository( name = "com_github_rogpeppe_go_internal", importpath = "github.com/rogpeppe/go-internal", sum = "h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=", version = "v1.8.1", ) go_repository( name = "com_github_ruudk_golang_pdf417", importpath = "github.com/ruudk/golang-pdf417", sum = "h1:K1Xf3bKttbF+koVGaX5xngRIZ5bVjbmPnaxE/dR08uY=", version = "v0.0.0-20201230142125-a7e3863a1245", ) go_repository( name = "com_github_sergi_go_diff", importpath = "github.com/sergi/go-diff", sum = "h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=", version = "v1.1.0", ) go_repository( name = "com_github_sirupsen_logrus", importpath = "github.com/sirupsen/logrus", sum = "h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=", version = "v1.6.0", ) go_repository( name = "com_github_spaolacci_murmur3", importpath = "github.com/spaolacci/murmur3", sum = "h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=", version = "v0.0.0-20180118202830-f09979ecbc72", ) go_repository( name = "com_github_spf13_pflag", importpath = "github.com/spf13/pflag", sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=", version = "v1.0.5", ) go_repository( name = "com_github_stretchr_objx", importpath = "github.com/stretchr/objx", sum = "h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As=", version = "v0.3.0", ) go_repository( name = "com_github_stretchr_testify", importpath = "github.com/stretchr/testify", sum = "h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=", version = "v1.7.1", ) go_repository( name = "com_github_yuin_goldmark", importpath = "github.com/yuin/goldmark", sum = "h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM=", version = "v1.4.1", ) go_repository( name = "com_google_cloud_go", importpath = "cloud.google.com/go", sum = "h1:i2ukt/HTgcBhgL1J0Dx9w7gb5oCe7zWEcumzQSh+9I4=", version = "v0.100.1", ) go_repository( name = "com_shuralyov_dmitri_go_generated", importpath = "dmitri.shuralyov.com/go/generated", sum = "h1:aVJ7xNKqudwDKOM6ynz6Y/Ii1iIgiB8YgsPU5mKWYa0=", version = "v0.0.0-20211227232225-c5b6cf572ec5", ) go_repository( name = "com_shuralyov_dmitri_gpu_mtl", importpath = "dmitri.shuralyov.com/gpu/mtl", sum = "h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=", version = "v0.0.0-20190408044501-666a987793e9", ) go_repository( name = "ht_sr_git_sbinet_gg", importpath = "git.sr.ht/~sbinet/gg", sum = "h1:LNhjNn8DerC8f9DHLz6lS0YYul/b602DUxDgGkd/Aik=", version = "v0.3.1", ) go_repository( name = "in_gopkg_alecthomas_kingpin_v2", importpath = "gopkg.in/alecthomas/kingpin.v2", sum = "h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=", version = "v2.2.6", ) go_repository( name = "in_gopkg_check_v1", importpath = "gopkg.in/check.v1", sum = "h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=", version = "v1.0.0-20190902080502-41f04d3bba15", ) go_repository( name = "in_gopkg_errgo_v2", importpath = "gopkg.in/errgo.v2", sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=", version = "v2.1.0", ) go_repository( name = "in_gopkg_fsnotify_v1", importpath = "gopkg.in/fsnotify.v1", sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=", version = "v1.4.7", ) go_repository( name = "in_gopkg_tomb_v1", importpath = "gopkg.in/tomb.v1", sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=", version = "v1.0.0-20141024135613-dd632973f1e7", ) go_repository( name = "in_gopkg_yaml_v2", importpath = "gopkg.in/yaml.v2", sum = "h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=", version = "v2.4.0", ) go_repository( name = "in_gopkg_yaml_v3", importpath = "gopkg.in/yaml.v3", sum = "h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=", version = "v3.0.0-20210107192922-496545a6307b", ) go_repository( name = "io_k8s_sigs_yaml", importpath = "sigs.k8s.io/yaml", sum = "h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=", version = "v1.3.0", ) go_repository( name = "io_opencensus_go", importpath = "go.opencensus.io", sum = "h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=", version = "v0.23.0", ) go_repository( name = "io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc", importpath = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", sum = "h1:n9b7AAdbQtQ0k9dm0Dm2/KUcUqtG8i2O15KzNaDze8c=", version = "v0.29.0", ) go_repository( name = "io_opentelemetry_go_contrib_propagators_b3", importpath = "go.opentelemetry.io/contrib/propagators/b3", sum = "h1:wDb2ct7xMzossYpx44w81skxkEyeT2IRnBgYKqyEork=", version = "v1.4.0", ) go_repository( name = "io_opentelemetry_go_otel", build_file_proto_mode = "disable", importpath = "go.opentelemetry.io/otel", sum = "h1:DhCU8oR2sJH9rfnwPdoV/+BJ7UIN5kXHL8DuSGrPU8E=", version = "v1.5.0", ) go_repository( name = "io_opentelemetry_go_otel_exporters_jaeger", importpath = "go.opentelemetry.io/otel/exporters/jaeger", sum = "h1:ZR7nhLSfLufS5AHk/iN11Q+W9XYwsJrVZ1Frb833d+Y=", version = "v1.5.0", ) go_repository( name = "io_opentelemetry_go_otel_exporters_otlp_internal_retry", importpath = "go.opentelemetry.io/otel/exporters/otlp/internal/retry", sum = "h1:lC0ldaVQwBpO1G5IaOYRbBCa67h6ioGkK6qYkqZbYOI=", version = "v1.5.0", ) go_repository( name = "io_opentelemetry_go_otel_exporters_otlp_otlptrace", importpath = "go.opentelemetry.io/otel/exporters/otlp/otlptrace", sum = "h1:Arn+HOtC6neocvr6J4ykfILvtiSwoDkkLFMaVLFKBnY=", version = "v1.5.0", ) go_repository( name = "io_opentelemetry_go_otel_metric", importpath = "go.opentelemetry.io/otel/metric", sum = "h1:t+5EioN8YFXQ2EH+1j6FHCKMUj+57zIDSnSGr/mWuug=", version = "v0.17.0", ) go_repository( name = "io_opentelemetry_go_otel_oteltest", importpath = "go.opentelemetry.io/otel/oteltest", sum = "h1:TyAihUowTDLqb4+m5ePAsR71xPJaTBJl4KDArIdi9k4=", version = "v0.17.0", ) go_repository( name = "io_opentelemetry_go_otel_sdk", importpath = "go.opentelemetry.io/otel/sdk", sum = "h1:QKhWBbcOC9fDCZKCfPFjWTWpfIlJR+i9xiUDYrLVmZs=", version = "v1.5.0", ) go_repository( name = "io_opentelemetry_go_otel_trace", importpath = "go.opentelemetry.io/otel/trace", sum = "h1:AKQZ9zJsBRFAp7zLdyGNkqG2rToCDIt3i5tcLzQlbmU=", version = "v1.5.0", ) go_repository( name = "io_opentelemetry_go_proto_otlp", importpath = "go.opentelemetry.io/proto/otlp", sum = "h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c=", version = "v0.12.0", ) go_repository( name = "io_rsc_pdf", importpath = "rsc.io/pdf", sum = "h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=", version = "v0.1.1", ) go_repository( name = "org_gioui", importpath = "gioui.org", sum = "h1:K72hopUosKG3ntOPNG4OzzbuhxGuVf06fa2la1/H/Ho=", version = "v0.0.0-20210308172011-57750fc8a0a6", ) go_repository( name = "org_golang_google_api", importpath = "google.golang.org/api", sum = "h1:n2bqqK895ygnBpdPDYetfy23K7fJ22wsrZKCyfuRkkA=", version = "v0.63.0", ) go_repository( name = "org_golang_google_appengine", importpath = "google.golang.org/appengine", sum = "h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=", version = "v1.6.7", ) go_repository( name = "org_golang_google_genproto", importpath = "google.golang.org/genproto", sum = "h1:ErU+UA6wxadoU8nWrsy5MZUVBs75K17zUCsUCIfrXCE=", version = "v0.0.0-20220314164441-57ef72a4c106", ) go_repository( name = "org_golang_google_grpc", build_file_proto_mode = "disable", importpath = "google.golang.org/grpc", sum = "h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=", version = "v1.45.0", ) go_repository( name = "org_golang_google_grpc_cmd_protoc_gen_go_grpc", importpath = "google.golang.org/grpc/cmd/protoc-gen-go-grpc", sum = "h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE=", version = "v1.1.0", ) go_repository( name = "org_golang_google_protobuf", build_extra_args = [ "-exclude=**/testdata", ], importpath = "google.golang.org/protobuf", sum = "h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=", version = "v1.27.1", ) go_repository( name = "org_golang_x_crypto", importpath = "golang.org/x/crypto", sum = "h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=", version = "v0.0.0-20200622213623-75b288015ac9", ) go_repository( name = "org_golang_x_exp", importpath = "golang.org/x/exp", sum = "h1:n9HxLrNxWWtEb1cA950nuEEj3QnKbtsCJ6KjcgisNUs=", version = "v0.0.0-20191002040644-a1355ae1e2c3", ) go_repository( name = "org_golang_x_image", importpath = "golang.org/x/image", sum = "h1:TcHcE0vrmgzNH1v3ppjcMGbhG5+9fMuvOmUYwNEF4q4=", version = "v0.0.0-20220302094943-723b81ca9867", ) go_repository( name = "org_golang_x_lint", importpath = "golang.org/x/lint", sum = "h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=", version = "v0.0.0-20210508222113-6edffad5e616", ) go_repository( name = "org_golang_x_mobile", importpath = "golang.org/x/mobile", sum = "h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=", version = "v0.0.0-20190719004257-d2bd2a29d028", ) go_repository( name = "org_golang_x_mod", importpath = "golang.org/x/mod", sum = "h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=", version = "v0.5.1", ) go_repository( name = "org_golang_x_net", importpath = "golang.org/x/net", sum = "h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=", version = "v0.0.0-20220225172249-27dd8689420f", ) go_repository( name = "org_golang_x_oauth2", importpath = "golang.org/x/oauth2", sum = "h1:qfl7ob3DIEs3Ml9oLuPwY2N04gymzAW04WsUQHIClgM=", version = "v0.0.0-20220309155454-6242fa91716a", ) go_repository( name = "org_golang_x_sync", importpath = "golang.org/x/sync", sum = "h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=", version = "v0.0.0-20210220032951-036812b2e83c", ) go_repository( name = "org_golang_x_sys", importpath = "golang.org/x/sys", sum = "h1:Fm4IcnUL803i92qDlmB0obyHmosDrxZWxJL3gIeNqOw=", version = "v0.0.0-20220317061510-51cd9980dadf",
drive firmware information. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_drives_drive_firmware(lnn, driveid, async_req=True) >>> result = thread.get() :param async_req bool :param int lnn: (required) :param str driveid: (required) :return: DrivesDriveFirmware If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_drives_drive_firmware_with_http_info(lnn, driveid, **kwargs) # noqa: E501 else: (data) = self.get_drives_drive_firmware_with_http_info(lnn, driveid, **kwargs) # noqa: E501 return data def get_drives_drive_firmware_with_http_info(self, lnn, driveid, **kwargs): # noqa: E501 """get_drives_drive_firmware # noqa: E501 Retrieve drive firmware information. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_drives_drive_firmware_with_http_info(lnn, driveid, async_req=True) >>> result = thread.get() :param async_req bool :param int lnn: (required) :param str driveid: (required) :return: DrivesDriveFirmware If the method is called asynchronously, returns the request thread. """ all_params = ['lnn', 'driveid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_drives_drive_firmware" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'lnn' is set if ('lnn' not in params or params['lnn'] is None): raise ValueError("Missing the required parameter `lnn` when calling `get_drives_drive_firmware`") # noqa: E501 # verify the required parameter 'driveid' is set if ('driveid' not in params or params['driveid'] is None): raise ValueError("Missing the required parameter `driveid` when calling `get_drives_drive_firmware`") # noqa: E501 collection_formats = {} path_params = {} if 'lnn' in params: path_params['Lnn'] = params['lnn'] # noqa: E501 if 'driveid' in params: path_params['Driveid'] = params['driveid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/platform/7/cluster/nodes/{Lnn}/drives/{Driveid}/firmware', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DrivesDriveFirmware', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_node_drive(self, node_drive_id, lnn, **kwargs): # noqa: E501 """get_node_drive # noqa: E501 Retrieve drive information. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_node_drive(node_drive_id, lnn, async_req=True) >>> result = thread.get() :param async_req bool :param str node_drive_id: Retrieve drive information. (required) :param int lnn: (required) :param float timeout: Request timeout :return: NodeDrives If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_node_drive_with_http_info(node_drive_id, lnn, **kwargs) # noqa: E501 else: (data) = self.get_node_drive_with_http_info(node_drive_id, lnn, **kwargs) # noqa: E501 return data def get_node_drive_with_http_info(self, node_drive_id, lnn, **kwargs): # noqa: E501 """get_node_drive # noqa: E501 Retrieve drive information. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_node_drive_with_http_info(node_drive_id, lnn, async_req=True) >>> result = thread.get() :param async_req bool :param str node_drive_id: Retrieve drive information. (required) :param int lnn: (required) :param float timeout: Request timeout :return: NodeDrives If the method is called asynchronously, returns the request thread. """ all_params = ['node_drive_id', 'lnn', 'timeout'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_node_drive" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'node_drive_id' is set if ('node_drive_id' not in params or params['node_drive_id'] is None): raise ValueError("Missing the required parameter `node_drive_id` when calling `get_node_drive`") # noqa: E501 # verify the required parameter 'lnn' is set if ('lnn' not in params or params['lnn'] is None): raise ValueError("Missing the required parameter `lnn` when calling `get_node_drive`") # noqa: E501 if 'timeout' in params and params['timeout'] > 4294967295: # noqa: E501 raise ValueError("Invalid value for parameter `timeout` when calling `get_node_drive`, must be a value less than or equal to `4294967295`") # noqa: E501 if 'timeout' in params and params['timeout'] < 0: # noqa: E501 raise ValueError("Invalid value for parameter `timeout` when calling `get_node_drive`, must be a value greater than or equal to `0`") # noqa: E501 collection_formats = {} path_params = {} if 'node_drive_id' in params: path_params['NodeDriveId'] = params['node_drive_id'] # noqa: E501 if 'lnn' in params: path_params['Lnn'] = params['lnn'] # noqa: E501 query_params = [] if 'timeout' in params: query_params.append(('timeout', params['timeout'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/platform/7/cluster/nodes/{Lnn}/drives/{NodeDriveId}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='NodeDrives', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_node_driveconfig(self, lnn, **kwargs): # noqa: E501 """get_node_driveconfig # noqa: E501 View a node's drive subsystem XML configuration file. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_node_driveconfig(lnn, async_req=True) >>> result = thread.get() :param async_req bool :param int lnn: (required) :param float timeout: Request timeout :return: NodeDriveconfig If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_node_driveconfig_with_http_info(lnn, **kwargs) # noqa: E501 else: (data) = self.get_node_driveconfig_with_http_info(lnn, **kwargs) # noqa: E501 return data def get_node_driveconfig_with_http_info(self, lnn, **kwargs): # noqa: E501 """get_node_driveconfig # noqa: E501 View a node's drive subsystem XML configuration file. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_node_driveconfig_with_http_info(lnn, async_req=True) >>> result = thread.get() :param async_req bool :param int lnn: (required) :param float timeout: Request timeout :return: NodeDriveconfig If the method is called asynchronously, returns the request thread. """ all_params = ['lnn', 'timeout'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_node_driveconfig" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'lnn' is set if ('lnn' not in params or params['lnn'] is None): raise ValueError("Missing the required parameter `lnn` when calling `get_node_driveconfig`") # noqa: E501 if 'timeout' in params and params['timeout'] > 4294967295: # noqa: E501 raise ValueError("Invalid value for parameter `timeout` when calling `get_node_driveconfig`, must be a value less than or equal to `4294967295`") # noqa: E501 if 'timeout' in params and params['timeout'] < 0: # noqa: E501 raise ValueError("Invalid value for parameter `timeout` when calling `get_node_driveconfig`, must be a value greater than or equal to `0`") # noqa: E501 collection_formats = {} path_params = {} if 'lnn' in params: path_params['Lnn'] = params['lnn'] # noqa: E501 query_params = [] if 'timeout' in params: query_params.append(('timeout', params['timeout'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['basicAuth'] # noqa: E501 return self.api_client.call_api( '/platform/7/cluster/nodes/{Lnn}/driveconfig', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='NodeDriveconfig', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_node_drives(self, lnn, **kwargs): # noqa: E501 """get_node_drives # noqa: E501 List the drives on this node. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_node_drives(lnn, async_req=True) >>> result = thread.get() :param async_req bool :param int lnn: (required) :param float timeout: Request timeout :return: NodeDrives If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] =
'LOCAL' : [], } ), ] def __init__(self, target=None, new_subscribed_library=None, subscribed_library=None, location=None, vcenter=None, placement=None, ): """ :type target: :class:`Subscriptions.CreateSpecSubscribedLibrary.Target` :param target: Specifies whether the target subscribed library should be newly created or an existing subscribed library should be used. This attribute was added in vSphere API 6.7.2. :type new_subscribed_library: :class:`Subscriptions.CreateSpecNewSubscribedLibrary` :param new_subscribed_library: Specification for creating a new subscribed library associated with the subscription. This attribute was added in vSphere API 6.7.2. This attribute is optional and it is only relevant when the value of ``target`` is :attr:`Subscriptions.CreateSpecSubscribedLibrary.Target.CREATE_NEW`. :type subscribed_library: :class:`str` :param subscribed_library: Identifier of the existing subscribed library to associate with the subscription. Only the subscribed libraries for which :attr:`SubscriptionInfo.subscription_url` property is set to the :attr:`PublishInfo.publish_url` of the published library can be associated with the subscription. This attribute was added in vSphere API 6.7.2. When clients pass a value of this class as a parameter, the attribute must be an identifier for the resource type: ``com.vmware.content.Library``. When methods return a value of this class as a return value, the attribute will be an identifier for the resource type: ``com.vmware.content.Library``. This attribute is optional and it is only relevant when the value of ``target`` is :attr:`Subscriptions.CreateSpecSubscribedLibrary.Target.USE_EXISTING`. :type location: :class:`Subscriptions.Location` :param location: Location of the subscribed library relative to the published library. This attribute was added in vSphere API 6.7.2. :type vcenter: :class:`Subscriptions.CreateSpecVcenter` :param vcenter: Specification for the subscribed library's vCenter Server instance. This attribute was added in vSphere API 6.7.2. This attribute is optional and it is only relevant when the value of ``location`` is :attr:`Subscriptions.Location.REMOTE`. :type placement: :class:`Subscriptions.CreateSpecPlacement` or ``None`` :param placement: Placement specification for the virtual machine template library items on the subscribed library. This attribute was added in vSphere API 6.7.2. This attribute is currently required. In future, if this is None, the system will attempt to choose a suitable placement specification for the virtual machine template items; if a placement specification cannot be chosen, publish of virtual machine template items will fail. """ self.target = target self.new_subscribed_library = new_subscribed_library self.subscribed_library = subscribed_library self.location = location self.vcenter = vcenter self.placement = placement VapiStruct.__init__(self) class Target(Enum): """ The ``Subscriptions.CreateSpecSubscribedLibrary.Target`` class defines the options related to the target subscribed library which will be associated with the subscription. This enumeration was added in vSphere API 6.7.2. .. note:: This class represents an enumerated type in the interface language definition. The class contains class attributes which represent the values in the current version of the enumerated type. Newer versions of the enumerated type may contain new values. To use new values of the enumerated type in communication with a server that supports the newer version of the API, you instantiate this class. See :ref:`enumerated type description page <enumeration_description>`. """ CREATE_NEW = None """ Create a new subscribed library. This class attribute was added in vSphere API 6.7.2. """ USE_EXISTING = None """ Use the specified existing subscribed library. This class attribute was added in vSphere API 6.7.2. """ def __init__(self, string): """ :type string: :class:`str` :param string: String value for the :class:`Target` instance. """ Enum.__init__(string) Target._set_values([ Target('CREATE_NEW'), Target('USE_EXISTING'), ]) Target._set_binding_type(type.EnumType( 'com.vmware.content.library.subscriptions.create_spec_subscribed_library.target', Target)) CreateSpecSubscribedLibrary._set_binding_type(type.StructType( 'com.vmware.content.library.subscriptions.create_spec_subscribed_library', { 'target': type.ReferenceType(__name__, 'Subscriptions.CreateSpecSubscribedLibrary.Target'), 'new_subscribed_library': type.OptionalType(type.ReferenceType(__name__, 'Subscriptions.CreateSpecNewSubscribedLibrary')), 'subscribed_library': type.OptionalType(type.IdType()), 'location': type.ReferenceType(__name__, 'Subscriptions.Location'), 'vcenter': type.OptionalType(type.ReferenceType(__name__, 'Subscriptions.CreateSpecVcenter')), 'placement': type.OptionalType(type.ReferenceType(__name__, 'Subscriptions.CreateSpecPlacement')), }, CreateSpecSubscribedLibrary, False, None)) class CreateSpec(VapiStruct): """ The ``Subscriptions.CreateSpec`` class defines the information required to create a new subscription of the published library. This class was added in vSphere API 6.7.2. .. tip:: The arguments are used to initialize data attributes with the same names. """ def __init__(self, subscribed_library=None, ): """ :type subscribed_library: :class:`Subscriptions.CreateSpecSubscribedLibrary` :param subscribed_library: Specification for the subscribed library to be associated with the subscription. This attribute was added in vSphere API 6.7.2. """ self.subscribed_library = subscribed_library VapiStruct.__init__(self) CreateSpec._set_binding_type(type.StructType( 'com.vmware.content.library.subscriptions.create_spec', { 'subscribed_library': type.ReferenceType(__name__, 'Subscriptions.CreateSpecSubscribedLibrary'), }, CreateSpec, False, None)) class Summary(VapiStruct): """ The ``Subscriptions.Summary`` class contains commonly used information about the subscription. This class was added in vSphere API 6.7.2. .. tip:: The arguments are used to initialize data attributes with the same names. """ def __init__(self, subscription=None, subscribed_library=None, subscribed_library_name=None, subscribed_library_vcenter_hostname=None, ): """ :type subscription: :class:`str` :param subscription: Identifier of the subscription. This attribute was added in vSphere API 6.7.2. When clients pass a value of this class as a parameter, the attribute must be an identifier for the resource type: ``com.vmware.content.library.Subscriptions``. When methods return a value of this class as a return value, the attribute will be an identifier for the resource type: ``com.vmware.content.library.Subscriptions``. :type subscribed_library: :class:`str` :param subscribed_library: Identifier of the subscribed library. This attribute was added in vSphere API 6.7.2. When clients pass a value of this class as a parameter, the attribute must be an identifier for the resource type: ``com.vmware.content.Library``. When methods return a value of this class as a return value, the attribute will be an identifier for the resource type: ``com.vmware.content.Library``. :type subscribed_library_name: :class:`str` :param subscribed_library_name: Name of the subscribed library. This attribute was added in vSphere API 6.7.2. :type subscribed_library_vcenter_hostname: :class:`str` or ``None`` :param subscribed_library_vcenter_hostname: Hostname of the vCenter instance where the subscribed library exists. This attribute was added in vSphere API 6.7.2. This attribute is unset if the subscribed library is on the same vCenter Server instance as the published library. """ self.subscription = subscription self.subscribed_library = subscribed_library self.subscribed_library_name = subscribed_library_name self.subscribed_library_vcenter_hostname = subscribed_library_vcenter_hostname VapiStruct.__init__(self) Summary._set_binding_type(type.StructType( 'com.vmware.content.library.subscriptions.summary', { 'subscription': type.IdType(resource_types='com.vmware.content.library.Subscriptions'), 'subscribed_library': type.IdType(resource_types='com.vmware.content.Library'), 'subscribed_library_name': type.StringType(), 'subscribed_library_vcenter_hostname': type.OptionalType(type.StringType()), }, Summary, False, None)) class UpdateSpecVcenter(VapiStruct): """ The ``Subscriptions.UpdateSpecVcenter`` class defines information about the vCenter Server instance where the subscribed library associated with the subscription exists. The ``Subscriptions.UpdateSpecVcenter`` class is only applicable to subscribed library which exists on remote vCenter Server instance. This class was added in vSphere API 6.7.2. .. tip:: The arguments are used to initialize data attributes with the same names. """ def __init__(self, hostname=None, https_port=None, ): """ :type hostname: :class:`str` or ``None`` :param hostname: The hostname of the subscribed library's vCenter Server. This attribute was added in vSphere API 6.7.2. If None, the value is unchanged. :type https_port: :class:`long` or ``None`` :param https_port: The HTTPS port of the vCenter Server instance where the subscribed library exists. This attribute was added in vSphere API 6.7.2. If None, the value is unchanged. """ self.hostname = hostname self.https_port = https_port VapiStruct.__init__(self) UpdateSpecVcenter._set_binding_type(type.StructType( 'com.vmware.content.library.subscriptions.update_spec_vcenter', { 'hostname': type.OptionalType(type.StringType()), 'https_port': type.OptionalType(type.IntegerType()), }, UpdateSpecVcenter, False, None)) class UpdateSpecPlacement(VapiStruct): """ The ``Subscriptions.UpdateSpecPlacement`` class defines the placement information for the subscribed library's virtual machine template library items. Storage location of the virtual machine template items is defined by the subscribed library's storage backing. This placement information needs to be compatible with the subscribed library's storage backing. The ``Subscriptions.UpdateSpecPlacement`` class is only applicable for the newly published virtual machine template library items of the subscribed library. Existing items will not be moved. This class was added in vSphere API 6.7.2. .. tip:: The arguments are used to initialize data attributes with the same names. """ def __init__(self, folder=None, cluster=None, resource_pool=None, host=None, network=None, ): """ :type folder: :class:`str` or ``None`` :param folder: Virtual machine folder into which the virtual machine template should be placed. This attribute was added in vSphere API 6.7.2. When clients pass a value of this class as a parameter, the attribute must be an identifier for the resource type: ``Folder:VCenter``. When methods return a value of this class as a return value, the attribute will be an identifier for the resource type: ``Folder:VCenter``. This attribute is currently required. In future, if this is None, the system will attempt to choose a suitable folder for the virtual machine template; if a folder cannot be chosen, publishing a virtual machine template item will fail. :type cluster: :class:`str` or ``None`` :param cluster: Cluster onto which the virtual machine template should be placed. If ``cluster`` and ``resourcePool`` are both specified, ``resourcePool`` must belong to ``cluster``. If ``cluster`` and ``host`` are both
import sys import os import argparse import xml.etree.ElementTree as ET def flipBoundingBox(x_min, y_min, x_max, y_max, img_width, img_height): ''' Flips coordinates on the vertical axis. Assumes x_min, y_min, x_max, y_max starts from 0. Assumes all parameters are numbers. ''' x_min_flip = (img_width-1) - x_min y_min_flip = y_min x_max_flip = (img_width-1) - x_max y_max_flip = y_max # After flip point_min is at upper right corner # and point_max lower left corner. # Extract new upper left and lower right corner. new_x_min = x_max_flip new_y_min = y_min_flip new_x_max = x_min_flip new_y_max = y_max_flip return new_x_min, new_y_min, new_x_max, new_y_max def flipBoundingBox2(x_min, y_min, x_max, y_max, img_width, img_height): ''' Flips coordinates on the vertical axis. Assumes x_min, y_min, x_max, y_max starts from 1. Assumes all parameters are numbers. PASCAL VOC dataset uses this coordinate system. ''' x_min_flip = img_width - x_min + 1 y_min_flip = y_min x_max_flip = img_width - x_max + 1 y_max_flip = y_max # After flip point_min is at upper right corner # and point_max lower left corner. # Extract new upper left and lower right corner. new_x_min = x_max_flip new_y_min = y_min_flip new_x_max = x_min_flip new_y_max = y_max_flip return new_x_min, new_y_min, new_x_max, new_y_max def flipBoundingBoxTest(): x1,y1, x2,y2 = flipBoundingBox(10, 20, 40, 100, 256, 128) print(x1, y1, x2, y2) print("expect 215 20 245 100") def flipBBInVocXml(file_name, out_file): #print("flipBBInVocXml", file_name, out_file) if os.path.isfile(file_name): tree = ET.parse(file_name) root = tree.getroot() # get image size size = root.find('size') if size == None: print(file_name) print("Size not found") return #print(size) width = size.find('width') if width == None: print(file_name) print("Width not found") img_width = int(width.text) #print(width) #print(width.text) height = size.find('height') if height == None: print(file_name) print("Height not found") img_height = int(height.text) #print(height) #print(height.text) #print(node.tag, node.attrib, node.text) # Processing bounding boxes ''' <bndbox> <xmin>237</xmin> <ymin>209</ymin> <xmax>299</xmax> <ymax>268</ymax> </bndbox> ''' for box in root.iter('bndbox'): #print(box) #for child in box: # print(child.tag, child.text) #print('==guess') #print(box[0].text, box[1].text, box[2].text, box[3].text) x_min = int(box[0].text) y_min = int(box[1].text) x_max = int(box[2].text) y_max = int(box[3].text) #print('==guest END') #x1,y1, x2,y2 = flipBoundingBox(box[0].text, box[1].text, box[2].text, box[3].text, img_width, img_height) x1,y1, x2,y2 = flipBoundingBox2(x_min, y_min, x_max, y_max, img_width, img_height) #print(x1, y1, x2, y2) box[0].text = str(x1) box[1].text = str(y1) box[2].text = str(x2) box[3].text = str(y2) tree.write(out_file) else: print(file_name, "not found") def stream(voc_index_file): with open(voc_index_file) as fp: line = fp.readline() cnt = 1 while line: yield line.rstrip() line = fp.readline() cnt += 1 def readComp4DetTest(file_name): with open(file_name) as fp: line = fp.readline() cnt = 1 while line: # processing array = line.rstrip().split(' ') if len(array) == 6: yield {'name' : array[0], 'bbox' : { 'xmin' : float(array[1]), 'ymin' : float(array[2]), 'xmax' : float(array[3]), 'ymax' : float(array[4]) }} else: print("Unknown data:", line.rstrip()) line = fp.readline() cnt += 1 def boundCoord(x, limit): # Convert floating point pixel coordinate to integer and bound by limit. # labelimg is 1..544 (start from 1) x1 = int(round(x)) if x1 < 1: x1 = 1 if x1 > limit: x1 = limit return x1 def boundImageCoord(x, y, image_width, image_height): x1 = boundCoord(x, image_width) y1 = boundCoord(y, image_height) return x1, y1 def readAllComp4DetTest(file_name, label, db, threshold = 0.24, image_width = 544, image_height = 544): ''' db {'file name' : [ { 'name': str 'bndbox': {'xmin': float, 'ymin': float, 'xmax': float, 'ymax': float} } ], [...], [...] } ''' with open(file_name) as fp: line = fp.readline() cnt = 1 while line: # processing array = line.rstrip().split(' ') if len(array) == 6: thresh = float(array[1]) if thresh > threshold: image_name = array[0] xmin = float(array[2]) ymin = float(array[3]) xmin, ymin = boundImageCoord(xmin, ymin, image_width, image_height) xmax = float(array[4]) ymax = float(array[5]) xmax, ymax = boundImageCoord(xmax, ymax, image_width, image_height) if image_name not in db: db[image_name] = [] db[image_name].append({'name': label, 'bndbox': {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}}) else: print("Unknown data:", line.rstrip()) line = fp.readline() cnt += 1 print(file_name, "has", cnt, "detections.") return db def readComp4DetTest_TEST(file_name): gen = readComp4DetTest(file_name) for x in gen: print(x) def setIndexPrefix(index, prefix): data = index.split('_') size = len(data) if size == 1: new_str = prefix + index[1:] return new_str elif size == 2: mov = data[0] number = data[1] new_number = prefix + number[1:] new_str = mov + "_" + new_number return new_str else: print("Unsupported VOC index: {}".format(index)) return "" def changePrefixInVocIndex(file_name, prefix): ''' Generator Read each line in the file and do the following: Input: 7785_0002001 Output: 7785_[prefix]002001 ''' gen = stream(file_name) for line in gen: data = line.split('_') mov = data[0] number = data[1] new_number = prefix + number[1:] new_str = mov + "_" + new_number yield line, new_str ''' with open(file_name) as fp: line = fp.readline() cnt = 1 while line: data = line.rstrip().split('_') mov = data[0] number = data[1] new_number = prefix + number[1:] new_str = mov + "_" + new_number yield line.rstrip(), new_str line = fp.readline() cnt += 1 ''' def addPrefixInVocIndex(file_name, offset): ''' Generator Read each line in the file and do the following: Input: 7785_X002001 Output: 7785_[X+offset]002001 ''' gen = stream(file_name) for line in gen: data = line.split('_') mov = data[0] number = data[1] base = int(number[:1]) prefix = str(base+offset) new_number = prefix + number[1:] new_str = mov + "_" + new_number yield line, new_str ''' with open(file_name) as fp: line = fp.readline() cnt = 1 while line: data = line.rstrip().split('_') mov = data[0] number = data[1] base = int(number[:1]) prefix = str(base+offset) new_number = prefix + number[1:] new_str = mov + "_" + new_number yield line.rstrip(), new_str line = fp.readline() cnt += 1 ''' def gen_list_expo(file_name): gen = stream(args.file) for in_file in gen: print("{}".format(in_file)) gen = changePrefixInVocIndex(args.file, "1") for in_file, out_file in gen: print("{}".format(out_file)) gen = changePrefixInVocIndex(args.file, "2") for in_file, out_file in gen: print("{}".format(out_file)) def gen_anno_expo(file_name): print("#!/bin/sh") gen = changePrefixInVocIndex(args.file, "1") for in_file, out_file in gen: print("\cp {}.xml {}.xml".format(in_file, out_file)) gen = changePrefixInVocIndex(args.file, "2") for in_file, out_file in gen: print("\cp {}.xml {}.xml".format(in_file, out_file)) def do_expo(file_name): print("#!/bin/sh") gen = changePrefixInVocIndex(args.file, "1") for in_file, out_file in gen: print("exposure -a -50 {}.jpg {}.jpg".format(in_file, out_file)) gen = changePrefixInVocIndex(args.file, "2") for in_file, out_file in gen: print("exposure -a 50 {}.jpg {}.jpg".format(in_file, out_file)) def add_jpg_ext(file_name): gen = stream(file_name) for in_file in gen: print("{}.jpg".format(in_file)) def cp(file_name): print("#!/bin/sh") gen = stream(file_name) for in_file in gen: #print("\cp ~/data/VOC/VOCdevkit/VOC2018/JPEGImages/{}.jpg .".format(in_file)) #print("\cp {}.jpg mov-7785-1080x1080/".format(in_file)) print("\cp {}.jpg ~/data/VOC/VOCdevkit/VOC2000/JPEGImages/".format(in_file)) def flop(file_name): print("#!/bin/sh") gen = addPrefixInVocIndex(file_name, 3) for in_file, out_file in gen: print("convert {}.jpg -flop {}.jpg".format(in_file, out_file)) def flop_anno(file_name): gen = addPrefixInVocIndex(file_name, 3) for in_file, out_file in gen: flipBBInVocXml(in_file+".xml", out_file+".xml") def ln(file_name): print("#!/bin/sh") gen = stream(file_name) for in_file in gen: print("ln -s ../{}.jpg {}.jpg".format(in_file, in_file)) def createAnnoXMLObj(name, image_ext, image_width = 544, image_height = 544): ''' name: e.g. IMG_6148 image_ext: extension of image file, .e.g, jpg ''' root = ET.Element('annotation') ET.SubElement(root, 'folder').text = 'JPEGImages' ET.SubElement(root, 'filename').text = name + image_ext ET.SubElement(root, 'path').text = name source = ET.SubElement(root, 'source') ET.SubElement(source, 'database').text = 'Unknown' size = ET.SubElement(root, 'size') ET.SubElement(size, 'width').text = str(image_width) ET.SubElement(size, 'height').text = str(image_height) ET.SubElement(size, 'depth').text = '3' ET.SubElement(root, 'segmented').text = '0' ''' # Write to file tree = ET.ElementTree(root) filename = name + '.xml' with open(filename, 'w', encoding='utf-8') as file: tree.write(file, encoding='unicode') ''' return root def addBBoxToXMLObj(anno_obj, label, bbox): ''' obj: ElementTree Element bbox: Bounding box in this format: 'bbox' : { 'xmin' : int 'ymin' : int 'xmax' : int 'ymax' : int } Return: obj modified ''' obj = ET.SubElement(anno_obj, 'object') ET.SubElement(obj, 'name').text = label ET.SubElement(obj, 'pose').text = 'Unspecified' ET.SubElement(obj, 'truncated').text = '0' ET.SubElement(obj, 'difficult').text = '0' bndbox = ET.SubElement(obj, 'bndbox') ET.SubElement(bndbox, 'xmin').text = str(bbox['xmin']) ET.SubElement(bndbox, 'ymin').text = str(bbox['ymin']) ET.SubElement(bndbox, 'xmax').text = str(bbox['xmax']) ET.SubElement(bndbox, 'ymax').text = str(bbox['ymax']) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("file") args = parser.parse_args() #flipBBInVocXml(args.file, "output.xml") #gen = changePrefixInVocIndex(args.file, "X") #gen = addPrefixInVocIndex(args.file, 3) #gen = stream(args.file) #for item in gen: # print(item) ''' for in_file, out_file in gen: print(in_file, out_file) flipBBInVocXml(in_file + ".xml", out_file + ".xml") ''' #cp(args.file) #ln(args.file) #do_expo(args.file) #gen_anno_expo(args.file) #gen_list_expo(args.file) #flop_anno(args.file) #flop(args.file) #gen_tar_list(args.file) #add_jpg_ext(args.file) ## results file to XML ''' #readComp4DetTest_TEST(args.file) anno = createAnnoXMLObj(args.file, '.jpg', 544, 544) #xmlstr = ET.dump(anno) xmlstr = ET.tostring(anno) print(xmlstr) ''' #anno = createAnnoXMLObj(args.file, '.jpg', 544, 544) #with open('output.xml', 'w') as f: # tree.write(f) ''' import lxml.etree
<reponame>cbwang505/DIYDynamoRIO import time import Queue import bisect import logging import weakref import threading import collections from lighthouse.util.misc import * from lighthouse.util.disassembler import disassembler logger = logging.getLogger("Lighthouse.Metadata") #------------------------------------------------------------------------------ # Metadata #------------------------------------------------------------------------------ # # To aid in performance, Lighthouse lifts and indexes an in-memory limited # representation of the disassembler's open database. This is commonly # referred to as 'metadata' throughout this codebase. # # Once built, the lifted metadata cache stands completely independent of # the disassembler. This effectively eliminates the need for Lighthouse to # communicate with the underlying disassembler / API (which is slow) when # mapping coverage, or doing coverage composition logic. # # With this model, we have been able to move the heavy director based # coverage composition logic to python-only threads without disrupting the # user, or IDA. (added in v0.4.0) # # However, there are two main caveats of this model - # # 1. The cached 'metadata' representation may not always be true to state # of the database. For example, if the user defines/undefines functions, # the metadata cache will not be aware of such changes. # # Lighthouse will try to update the director's metadata cache when # applicable, but there are instances when it will be in the best # interest of the user to manually trigger a refresh of the metadata. # # 2. Building the metadata comes with an upfront cost, but this cost has # been reduced as much as possible. For example, generating metadata for # a database with ~17k functions, ~95k nodes (basic blocks), and ~563k # instructions takes only ~6 seconds. # # This will be negligible for small-medium sized databases, but may still # be jarring for larger databases. # # Ultimately, this model provides us a more responsive user experience at # the expense of the occasional inaccuracies that can be corrected by # reasonably low cost refresh. # #------------------------------------------------------------------------------ # Database Metadata #------------------------------------------------------------------------------ class DatabaseMetadata(object): """ Database level metadata cache. """ def __init__(self): # name & imagebase of the executable this metadata is based on self.filename = "" self.imagebase = -1 # database metadata cache status self.cached = False # the cache of key database structures self.nodes = {} self.functions = {} self.instructions = [] # internal members to help index & navigate the cached metadata self._stale_lookup = False self._name2func = {} self._last_node = [] # HACK: blank iterable for now self._node_addresses = [] self._function_addresses = [] # placeholder attribute for disassembler event hooks self._rename_hooks = None # metadata callbacks (see director for more info) self._function_renamed_callbacks = [] # asynchronous metadata collection thread self._refresh_worker = None self._stop_threads = False def terminate(self): """ Cleanup & terminate the metadata object. """ self.abort_refresh(join=True) if self._rename_hooks: self._rename_hooks.unhook() #-------------------------------------------------------------------------- # Providers #-------------------------------------------------------------------------- def get_instructions_slice(self, start_address, end_address): """ Get the instructions addresses that fall within a given range. """ index_start = bisect.bisect_left(self.instructions, start_address) index_end = bisect.bisect_left(self.instructions, end_address) return self.instructions[index_start:index_end] def get_node(self, address): """ Get the node (basic block) metadata for a given address. """ assert not self._stale_lookup, "Stale metadata is unsafe to use..." # fast path, effectively a LRU cache of 1 ;P if address in self._last_node: return self._last_node # # use the lookup lists to do a 'fuzzy' lookup of the given address, # locating the index of the closest known node address (rounding down) # index = bisect.bisect_right(self._node_addresses, address) - 1 node_metadata = self.nodes.get(self._node_addresses[index], None) # # if the given address does not fall within the selected node (or the # node simply does not exist), then we have no match/metadata to return # if not (node_metadata and address in node_metadata): return None # # if the selected node metadata contains the given target address, it # is a positive hit and we should cache this node (in last_node) for # faster consecutive lookups # self._last_node = node_metadata # return the located node_metadata return node_metadata def get_function(self, address): """ Get the function metadata for a given address. """ node_metadata = self.get_node(address) if not node_metadata: return None return node_metadata.function def get_function_by_name(self, function_name): """ Get the function metadata for a given function name. """ try: return self.functions[self._name2func[function_name]] except (IndexError, KeyError): return None def get_function_by_index(self, index): """ Get the function metadata for a given function index. """ try: return self.functions[self._function_addresses[index]] except (IndexError, KeyError): return None def get_function_index(self, address): """ Get the function index for a given address. """ return self._function_addresses.index(address) def get_closest_function(self, address): """ Get the function metadata for the function closest to the give address. """ # sanity check if not self._function_addresses: return None # get the closest insertion point of the given address index = bisect.bisect_left(self._function_addresses, address) # the given address is a min, return the first known function if index == 0: return self.functions[self._function_addresses[0]] # given address is a max, return the last known function if index == len(self._function_addresses): return self.functions[self._function_addresses[-1]] # select the two candidate addresses before = self._function_addresses[index - 1] after = self._function_addresses[index] # return the function closest to the given address if after - address < address - before: return self.functions[after] else: return self.functions[before] def flatten_blocks(self, basic_blocks): """ Flatten a list of basic blocks (address, size) to instruction addresses. This function provides a way to convert a list of (address, size) basic block entries into a list of individual instruction (or byte) addresses based on the current metadata. """ output = [] for address, size in basic_blocks: instructions = self.get_instructions_slice(address, address+size) output.extend(instructions) return output def flatten_blocks_funtion_trace(self, basic_blocks): """ Flatten a list of basic blocks (address, size) to instruction addresses. This function provides a way to convert a list of (address, size) basic block entries into a list of individual instruction (or byte) addresses based on the current metadata. """ output = {} for address, size ,func_id in basic_blocks: instructions = self.get_instructions_slice(address, address+size) if not output[func_id]: output[func_id]=(instructions) else: output[func_id].extend(instructions) return output def is_big(self): """ Return a bool indicating whether we think the database is 'big'. """ return len(self.functions) > 50000 #-------------------------------------------------------------------------- # Refresh #-------------------------------------------------------------------------- def refresh(self, function_addresses=None, progress_callback=None): """ Request an asynchronous refresh of the database metadata. TODO/FUTURE: we should make a synchronous refresh available """ assert self._refresh_worker == None, 'Refresh already running' result_queue = Queue.Queue() # # reset the async abort/stop flag that can be used used to cancel the # ongoing refresh task # self._stop_threads = False # # kick off an asynchronous metadata collection task # self._refresh_worker = threading.Thread( target=self._async_refresh, args=(result_queue, function_addresses, progress_callback,) ) self._refresh_worker.start() # # immediately return a queue to the caller which it can use to listen # on and wait for a refresh completion message # return result_queue def abort_refresh(self, join=False): """ Abort an asynchronous refresh. To guarantee an asynchronous refresh has been canceled, the caller can optionally wait for the result_queue from refresh() to return 'None'. Alternatively, the `join` parameter can be set to `True`, making this function block until the refresh is canceled. """ # # the refresh worker (if it exists) can be ripped away at any time. # take a local reference to avoid a double fetch problems # worker = self._refresh_worker # # if there is no worker present or running (cleaning up?) there is # nothing for us to abort. Simply reset the abort flag (just in case) # and return immediately # if not (worker and worker.is_alive()): self._stop_threads = False self._refresh_worker = None return # signal the worker thread to stop self._stop_threads = True # if requested, don't return until the worker thread has stopped... if join: worker.join() def _refresh_instructions(self): """ Refresh the list of database instructions (from function metadata). """ instructions = [] for function_metadata in self.functions.itervalues(): instructions.extend(function_metadata.instructions) instructions = list(set(instructions)) instructions.sort() # commit the updated instruction list self.instructions = instructions def _refresh_lookup(self): """ Refresh the internal fast lookup address lists. Fast lookup lists are simply sorted address lists of function metadata, node metadata, or possibly other forms of metadata (in the future). We create sorted lists of metadata object addresses so that we can use them for fast, fuzzy address lookup (eg, bisect). c.f: - get_node(ea) - get_function(ea) """ self._last_node = [] self._name2func = { f.name: f.address for f in
<filename>zerver/tests/test_auth_backends.py # -*- coding: utf-8 -*- from django.conf import settings from django.http import HttpResponse from django.test import TestCase, override_settings from django_auth_ldap.backend import _LDAPUser from django.test.client import RequestFactory from typing import Any, Callable, Dict, Optional, Text from builtins import object from oauth2client.crypt import AppIdentityError from django.core import signing from django.core.urlresolvers import reverse import jwt import mock import re from zerver.forms import HomepageForm from zerver.lib.actions import do_deactivate_realm, do_deactivate_user, \ do_reactivate_realm, do_reactivate_user from zerver.lib.initial_password import initial_password from zerver.lib.session_user import get_session_dict_user from zerver.lib.test_classes import ( ZulipTestCase, ) from zerver.models import \ get_realm, get_user_profile_by_email, email_to_username, UserProfile, \ PreregistrationUser, Realm from confirmation.models import Confirmation from zproject.backends import ZulipDummyBackend, EmailAuthBackend, \ GoogleMobileOauth2Backend, ZulipRemoteUserBackend, ZulipLDAPAuthBackend, \ ZulipLDAPUserPopulator, DevAuthBackend, GitHubAuthBackend, ZulipAuthMixin, \ dev_auth_enabled, password_auth_enabled, github_auth_enabled, \ SocialAuthMixin, AUTH_BACKEND_NAME_MAP from zerver.views.auth import maybe_send_to_registration from social_core.exceptions import AuthFailed from social_django.strategy import DjangoStrategy from social_django.storage import BaseDjangoStorage from social_core.backends.github import GithubOrganizationOAuth2, GithubTeamOAuth2, \ GithubOAuth2 from six.moves import urllib from six.moves.http_cookies import SimpleCookie import ujson from zerver.lib.test_helpers import MockLDAP class AuthBackendTest(TestCase): def verify_backend(self, backend, good_args=None, good_kwargs=None, bad_kwargs=None, email_to_username=None): # type: (Any, List[Any], Dict[str, Any], Dict[str, Any], Callable[[Text], Text]) -> None if good_args is None: good_args = [] if good_kwargs is None: good_kwargs = {} email = u"<EMAIL>" user_profile = get_user_profile_by_email(email) username = email if email_to_username is not None: username = email_to_username(email) # If bad_kwargs was specified, verify auth fails in that case if bad_kwargs is not None: self.assertIsNone(backend.authenticate(username, **bad_kwargs)) # Verify auth works result = backend.authenticate(username, *good_args, **good_kwargs) self.assertEqual(user_profile, result) # Verify auth fails with a deactivated user do_deactivate_user(user_profile) self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs)) # Reactivate the user and verify auth works again do_reactivate_user(user_profile) result = backend.authenticate(username, *good_args, **good_kwargs) self.assertEqual(user_profile, result) # Verify auth fails with a deactivated realm do_deactivate_realm(user_profile.realm) self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs)) # Verify auth works again after reactivating the realm do_reactivate_realm(user_profile.realm) result = backend.authenticate(username, *good_args, **good_kwargs) self.assertEqual(user_profile, result) # ZulipDummyBackend isn't a real backend so the remainder # doesn't make sense for it if isinstance(backend, ZulipDummyBackend): return # Verify auth fails if the auth backend is disabled on server with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipDummyBackend',)): self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs)) # Verify auth fails if the auth backend is disabled for the realm for backend_name in AUTH_BACKEND_NAME_MAP.keys(): if isinstance(backend, AUTH_BACKEND_NAME_MAP[backend_name]): break index = getattr(user_profile.realm.authentication_methods, backend_name).number user_profile.realm.authentication_methods.set_bit(index, False) user_profile.realm.save() self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs)) user_profile.realm.authentication_methods.set_bit(index, True) user_profile.realm.save() def test_dummy_backend(self): # type: () -> None self.verify_backend(ZulipDummyBackend(), good_kwargs=dict(use_dummy_backend=True), bad_kwargs=dict(use_dummy_backend=False)) def setup_subdomain(self, user_profile): # type: (UserProfile) -> None realm = user_profile.realm realm.string_id = 'zulip' realm.save() def test_email_auth_backend(self): # type: () -> None email = "<EMAIL>" user_profile = get_user_profile_by_email(email) password = "<PASSWORD>" user_profile.set_password(password) user_profile.save() self.setup_subdomain(user_profile) self.verify_backend(EmailAuthBackend(), bad_kwargs=dict(password=''), good_kwargs=dict(password=password)) # Subdomain is ignored when feature is not enabled self.verify_backend(EmailAuthBackend(), good_kwargs=dict(password=password, realm_subdomain='acme', return_data=dict())) with self.settings(REALMS_HAVE_SUBDOMAINS=True): # With subdomains, authenticating with the right subdomain # works; using the wrong subdomain doesn't self.verify_backend(EmailAuthBackend(), good_kwargs=dict(password=password, realm_subdomain='zulip', return_data=dict()), bad_kwargs=dict(password=password, realm_subdomain='acme', return_data=dict())) # Things work normally in the event that we're using a # non-subdomain login page, even if subdomains are enabled self.verify_backend(EmailAuthBackend(), bad_kwargs=dict(password="<PASSWORD>"), good_kwargs=dict(password=password)) def test_email_auth_backend_disabled_password_auth(self): # type: () -> None email = u"<EMAIL>" user_profile = get_user_profile_by_email(email) password = "<PASSWORD>" user_profile.set_password(password) user_profile.save() # Verify if a realm has password auth disabled, correct password is rejected with mock.patch('zproject.backends.password_auth_enabled', return_value=False): self.assertIsNone(EmailAuthBackend().authenticate(email, password)) @override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GoogleMobileOauth2Backend',)) def test_google_backend(self): # type: () -> None email = "<EMAIL>" backend = GoogleMobileOauth2Backend() payload = dict(email_verified=True, email=email) user_profile = get_user_profile_by_email(email) self.setup_subdomain(user_profile) with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload): self.verify_backend(backend) # With REALMS_HAVE_SUBDOMAINS off, subdomain is ignored with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload): self.verify_backend(backend, good_kwargs=dict(realm_subdomain='acme')) with self.settings(REALMS_HAVE_SUBDOMAINS=True): # With subdomains, authenticating with the right subdomain # works; using the wrong subdomain doesn't with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload): self.verify_backend(backend, good_kwargs=dict(realm_subdomain="zulip"), bad_kwargs=dict(realm_subdomain='acme')) # Verify valid_attestation parameter is set correctly unverified_payload = dict(email_verified=False) with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=unverified_payload): ret = dict() # type: Dict[str, str] result = backend.authenticate(return_data=ret) self.assertIsNone(result) self.assertFalse(ret["valid_attestation"]) nonexistent_user_payload = dict(email_verified=True, email="<EMAIL>") with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=nonexistent_user_payload): ret = dict() result = backend.authenticate(return_data=ret) self.assertIsNone(result) self.assertTrue(ret["valid_attestation"]) with mock.patch('apiclient.sample_tools.client.verify_id_token', side_effect=AppIdentityError): ret = dict() result = backend.authenticate(return_data=ret) self.assertIsNone(result) @override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',)) def test_ldap_backend(self): # type: () -> None email = "<EMAIL>" password = "<PASSWORD>" user_profile = get_user_profile_by_email(email) self.setup_subdomain(user_profile) backend = ZulipLDAPAuthBackend() # Test LDAP auth fails when LDAP server rejects password with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn', side_effect=_LDAPUser.AuthenticationFailed("Failed")), ( mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), ( mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs', return_value=dict(full_name=['Hamlet']))): self.assertIsNone(backend.authenticate(email, password)) # For this backend, we mock the internals of django_auth_ldap with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), ( mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), ( mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs', return_value=dict(full_name=['Hamlet']))): self.verify_backend(backend, good_kwargs=dict(password=password)) with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), ( mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), ( mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs', return_value=dict(full_name=['Hamlet']))): self.verify_backend(backend, good_kwargs=dict(password=password, realm_subdomain='acme')) with self.settings(REALMS_HAVE_SUBDOMAINS=True): # With subdomains, authenticating with the right subdomain # works; using the wrong subdomain doesn't with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), ( mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), ( mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs', return_value=dict(full_name=['Hamlet']))): self.verify_backend(backend, bad_kwargs=dict(password=password, realm_subdomain='acme'), good_kwargs=dict(password=password, realm_subdomain='zulip')) def test_devauth_backend(self): # type: () -> None self.verify_backend(DevAuthBackend()) @override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)) def test_remote_user_backend(self): # type: () -> None self.setup_subdomain(get_user_profile_by_email(u'<EMAIL>')) self.verify_backend(ZulipRemoteUserBackend(), good_kwargs=dict(realm_subdomain='acme')) with self.settings(REALMS_HAVE_SUBDOMAINS=True): # With subdomains, authenticating with the right subdomain # works; using the wrong subdomain doesn't self.verify_backend(ZulipRemoteUserBackend(), good_kwargs=dict(realm_subdomain='zulip'), bad_kwargs=dict(realm_subdomain='acme')) @override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)) def test_remote_user_backend_sso_append_domain(self): # type: () -> None self.setup_subdomain(get_user_profile_by_email(u'<EMAIL>')) with self.settings(SSO_APPEND_DOMAIN='zulip.com'): self.verify_backend(ZulipRemoteUserBackend(), email_to_username=email_to_username, good_kwargs=dict(realm_subdomain='acme')) with self.settings(REALMS_HAVE_SUBDOMAINS=True): # With subdomains, authenticating with the right subdomain # works; using the wrong subdomain doesn't with self.settings(SSO_APPEND_DOMAIN='zulip.com'): self.verify_backend(ZulipRemoteUserBackend(), email_to_username=email_to_username, good_kwargs=dict(realm_subdomain='zulip'), bad_kwargs=dict(realm_subdomain='acme')) @override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',)) def test_github_backend(self): # type: () -> None email = '<EMAIL>' self.setup_subdomain(get_user_profile_by_email(email)) good_kwargs = dict(response=dict(email=email), return_data=dict(), realm_subdomain='acme') self.verify_backend(GitHubAuthBackend(), good_kwargs=good_kwargs, bad_kwargs=dict()) with self.settings(REALMS_HAVE_SUBDOMAINS=True): # With subdomains, authenticating with the right subdomain # works; using the wrong subdomain doesn't good_kwargs = dict(response=dict(email=email), return_data=dict(), realm_subdomain='zulip') bad_kwargs = dict(response=dict(email=email), return_data=dict(), realm_subdomain='acme') self.verify_backend(GitHubAuthBackend(), good_kwargs=good_kwargs, bad_kwargs=bad_kwargs) class SocialAuthMixinTest(ZulipTestCase): def test_social_auth_mixing(self): # type: () -> None mixin = SocialAuthMixin() with self.assertRaises(NotImplementedError): mixin.get_email_address() with self.assertRaises(NotImplementedError): mixin.get_full_name() class GitHubAuthBackendTest(ZulipTestCase): def setUp(self): # type: () -> None self.email = '<EMAIL>' self.name = 'Hamlet' self.backend = GitHubAuthBackend() self.backend.strategy = DjangoStrategy(storage=BaseDjangoStorage()) self.user_profile = get_user_profile_by_email(self.email) self.user_profile.backend = self.backend rf = RequestFactory() request = rf.get('/complete') request.session = {} request.get_host = lambda: 'acme.testserver' request.user = self.user_profile self.backend.strategy.request = request def do_auth(self, *args, **kwargs): # type: (*Any, **Any) -> UserProfile with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',)): return self.backend.authenticate(*args, **kwargs) def test_github_auth_enabled(self): # type: () -> None with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',)): self.assertTrue(github_auth_enabled()) def test_full_name_with_missing_key(self): # type: () -> None self.assertEqual(self.backend.get_full_name(), '') def test_github_backend_do_auth_without_subdomains(self): # type: () -> None with mock.patch('social_core.backends.github.GithubOAuth2.do_auth', side_effect=self.do_auth), \ mock.patch('zerver.views.auth.login'): response = dict(email=self.email, name=self.name) result = self.backend.do_auth(response=response) self.assertNotIn('subdomain=1', result.url) def test_github_backend_do_auth_with_non_existing_subdomain(self): # type: () -> None with mock.patch('social_core.backends.github.GithubOAuth2.do_auth', side_effect=self.do_auth): with self.settings(REALMS_HAVE_SUBDOMAINS=True): self.backend.strategy.session_set('subdomain', 'test') response = dict(email=self.email, name=self.name) result = self.backend.do_auth(response=response) self.assertIn('subdomain=1', result.url) def test_github_backend_do_auth_with_subdomains(self): # type: () -> None with mock.patch('social_core.backends.github.GithubOAuth2.do_auth', side_effect=self.do_auth): with self.settings(REALMS_HAVE_SUBDOMAINS=True): self.backend.strategy.session_set('subdomain', 'zulip') response = dict(email=self.email, name=self.name) result = self.backend.do_auth(response=response) self.assertEqual('http://zulip.testserver/accounts/login/subdomain/', result.url) def test_github_backend_do_auth_for_default(self): # type: () -> None with mock.patch('social_core.backends.github.GithubOAuth2.do_auth', side_effect=self.do_auth), \ mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result: response = dict(email=self.email, name=self.name) self.backend.do_auth('fake-access-token', response=response) kwargs = {'realm_subdomain': 'acme', 'response': response, 'return_data': {}} result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs) def test_github_backend_do_auth_for_team(self): # type: () -> None with mock.patch('social_core.backends.github.GithubTeamOAuth2.do_auth', side_effect=self.do_auth), \ mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result: response = dict(email=self.email, name=self.name) with self.settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp'): self.backend.do_auth('fake-access-token', response=response) kwargs = {'realm_subdomain': 'acme', 'response': response, 'return_data': {}} result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs) def test_github_backend_do_auth_for_team_auth_failed(self): # type: () -> None with mock.patch('social_core.backends.github.GithubTeamOAuth2.do_auth', side_effect=AuthFailed('Not found')), \ mock.patch('logging.info'), \ mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result: response = dict(email=self.email, name=self.name) with self.settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp'): self.backend.do_auth('fake-access-token', response=response) kwargs = {'realm_subdomain': 'acme', 'response': response, 'return_data': {}} result.assert_called_with(None, 'fake-access-token', **kwargs) def test_github_backend_do_auth_for_org(self): # type: () -> None with mock.patch('social_core.backends.github.GithubOrganizationOAuth2.do_auth', side_effect=self.do_auth), \ mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result: response = dict(email=self.email, name=self.name) with self.settings(SOCIAL_AUTH_GITHUB_ORG_NAME='Zulip'): self.backend.do_auth('fake-access-token', response=response) kwargs = {'realm_subdomain': 'acme', 'response': response, 'return_data': {}} result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs) def test_github_backend_do_auth_for_org_auth_failed(self): # type: () -> None with mock.patch('social_core.backends.github.GithubOrganizationOAuth2.do_auth', side_effect=AuthFailed('Not found')), \ mock.patch('logging.info'), \ mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result: response = dict(email=self.email, name=self.name) with self.settings(SOCIAL_AUTH_GITHUB_ORG_NAME='Zulip'): self.backend.do_auth('fake-access-token', response=response) kwargs = {'realm_subdomain': 'acme', 'response': response, 'return_data': {}} result.assert_called_with(None, 'fake-access-token', **kwargs) def test_github_backend_authenticate_nonexisting_user(self): # type: () -> None with mock.patch('zproject.backends.get_user_profile_by_email', side_effect=UserProfile.DoesNotExist("Do not exist")): response = dict(email=self.email, name=self.name) return_data = dict() # type: Dict[str, Any] user = self.backend.authenticate(return_data=return_data, response=response) self.assertIs(user, None) self.assertTrue(return_data['valid_attestation']) def test_github_backend_inactive_user(self): # type: () -> None def do_auth_inactive(*args, **kwargs): # type: (*Any, **Any) -> UserProfile return_data = kwargs['return_data'] return_data['inactive_user'] = True return self.user_profile with mock.patch('zerver.views.auth.login_or_register_remote_user') as result, \ mock.patch('social_core.backends.github.GithubOAuth2.do_auth', side_effect=do_auth_inactive): response = dict(email=self.email, name=self.name) user = self.backend.do_auth(response=response) result.assert_not_called() self.assertIs(user, None) def test_github_backend_new_user(self): # type: () -> None rf = RequestFactory() request = rf.get('/complete') request.session = {} request.user = self.user_profile self.backend.strategy.request = request def do_auth(*args, **kwargs): # type: (*Any, **Any) -> UserProfile return_data = kwargs['return_data'] return_data['valid_attestation'] = True return None with mock.patch('social_core.backends.github.GithubOAuth2.do_auth', side_effect=do_auth): response = dict(email='<EMAIL>', name='Ghost') result = self.backend.do_auth(response=response) self.assert_in_response('action="/register/"', result) self.assert_in_response('Your email address does not correspond to any ' 'existing organization.', result) def test_login_url(self): #
# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for the Inspur InStorage volume driver.""" import re from oslo_concurrency import processutils from oslo_utils import units import six from cinder import exception from cinder import utils from cinder.volume.drivers.inspur.instorage import instorage_const from cinder.volume.drivers.inspur.instorage import instorage_fc from cinder.volume.drivers.inspur.instorage import instorage_iscsi MCS_POOLS = ['openstack', 'openstack1'] def get_test_pool(get_all=False): if get_all: return MCS_POOLS else: return MCS_POOLS[0] class FakeInStorageMCSFcDriver(instorage_fc.InStorageMCSFCDriver): def __init__(self, *args, **kwargs): super(FakeInStorageMCSFcDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _run_ssh(self, cmd, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) return ret class FakeInStorageMCSISCSIDriver(instorage_iscsi.InStorageMCSISCSIDriver): def __init__(self, *args, **kwargs): super(FakeInStorageMCSISCSIDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _run_ssh(self, cmd, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) return ret class FakeInStorage(object): def __init__(self, pool_name): self._flags = {'instorage_mcs_volpool_name': pool_name} self._volumes_list = {} self._hosts_list = {} self._mappings_list = {} self._lcmappings_list = {} self._lcconsistgrp_list = {} self._rcrelationship_list = {} self._partnership_list = {} self._partnershipcandidate_list = {} self._system_list = {'instorage-mcs-sim': {'id': '0123456789ABCDEF', 'name': 'instorage-mcs-sim'}, 'aux-mcs-sim': {'id': '<KEY>', 'name': 'aux-mcs-sim'}} self._other_pools = {'openstack2': {}, 'openstack3': {}} self._next_cmd_error = { 'lsportip': '', 'lsfabric': '', 'lsiscsiauth': '', 'lsnodecanister': '', 'mkvdisk': '', 'lsvdisk': '', 'lslcmap': '', 'prestartlcmap': '', 'startlcmap': '', 'rmlcmap': '', 'lslicense': '', 'lsguicapabilities': '', 'lshost': '', 'lsrcrelationship': '' } self._errors = { 'CMMVC5701E': ('', 'CMMVC5701E No object ID was specified.'), 'CMMVC6035E': ('', 'CMMVC6035E The action failed as the ' 'object already exists.'), 'CMMVC5753E': ('', 'CMMVC5753E The specified object does not ' 'exist or is not a suitable candidate.'), 'CMMVC5707E': ('', 'CMMVC5707E Required parameters are missing.'), 'CMMVC6581E': ('', 'CMMVC6581E The command has failed because ' 'the maximum number of allowed iSCSI ' 'qualified names (IQNs) has been reached, ' 'or the IQN is already assigned or is not ' 'valid.'), 'CMMVC5754E': ('', 'CMMVC5754E The specified object does not ' 'exist, or the name supplied does not meet ' 'the naming rules.'), 'CMMVC6071E': ('', 'CMMVC6071E The VDisk-to-host mapping was ' 'not created because the VDisk is already ' 'mapped to a host.'), 'CMMVC5879E': ('', 'CMMVC5879E The VDisk-to-host mapping was ' 'not created because a VDisk is already ' 'mapped to this host with this SCSI LUN.'), 'CMMVC5840E': ('', 'CMMVC5840E The virtual disk (VDisk) was ' 'not deleted because it is mapped to a ' 'host or because it is part of a LocalCopy ' 'or Remote Copy mapping, or is involved in ' 'an image mode migrate.'), 'CMMVC6527E': ('', 'CMMVC6527E The name that you have entered ' 'is not valid. The name can contain letters, ' 'numbers, spaces, periods, dashes, and ' 'underscores. The name must begin with a ' 'letter or an underscore. The name must not ' 'begin or end with a space.'), 'CMMVC5871E': ('', 'CMMVC5871E The action failed because one or ' 'more of the configured port names is in a ' 'mapping.'), 'CMMVC5924E': ('', 'CMMVC5924E The LocalCopy mapping was not ' 'created because the source and target ' 'virtual disks (VDisks) are different sizes.'), 'CMMVC6303E': ('', 'CMMVC6303E The create failed because the ' 'source and target VDisks are the same.'), 'CMMVC7050E': ('', 'CMMVC7050E The command failed because at ' 'least one node in the I/O group does not ' 'support compressed VDisks.'), 'CMMVC6430E': ('', 'CMMVC6430E The command failed because the ' 'target and source managed disk groups must ' 'be different.'), 'CMMVC6353E': ('', 'CMMVC6353E The command failed because the ' 'copy specified does not exist.'), 'CMMVC6446E': ('', 'The command failed because the managed disk ' 'groups have different extent sizes.'), # Catch-all for invalid state transitions: 'CMMVC5903E': ('', 'CMMVC5903E The LocalCopy mapping was not ' 'changed because the mapping or consistency ' 'group is another state.'), 'CMMVC5709E': ('', 'CMMVC5709E [-%(VALUE)s] is not a supported ' 'parameter.'), 'CMMVC5982E': ('', 'CMMVC5982E The operation was not performed ' 'because it is not valid given the current ' 'relationship state.'), 'CMMVC5963E': ('', 'CMMVC5963E No direction has been defined.'), } self._lc_transitions = {'begin': {'make': 'idle_or_copied'}, 'idle_or_copied': {'prepare': 'preparing', 'delete': 'end', 'delete_force': 'end'}, 'preparing': {'flush_failed': 'stopped', 'wait': 'prepared'}, 'end': None, 'stopped': {'prepare': 'preparing', 'delete_force': 'end'}, 'prepared': {'stop': 'stopped', 'start': 'copying'}, 'copying': {'wait': 'idle_or_copied', 'stop': 'stopping'}, # Assume the worst case where stopping->stopped # rather than stopping idle_or_copied 'stopping': {'wait': 'stopped'}, } self._lc_cg_transitions = {'begin': {'make': 'empty'}, 'empty': {'add': 'idle_or_copied'}, 'idle_or_copied': {'prepare': 'preparing', 'delete': 'end', 'delete_force': 'end'}, 'preparing': {'flush_failed': 'stopped', 'wait': 'prepared'}, 'end': None, 'stopped': {'prepare': 'preparing', 'delete_force': 'end'}, 'prepared': {'stop': 'stopped', 'start': 'copying', 'delete_force': 'end', 'delete': 'end'}, 'copying': {'wait': 'idle_or_copied', 'stop': 'stopping', 'delete_force': 'end', 'delete': 'end'}, # Assume the case where stopping->stopped # rather than stopping idle_or_copied 'stopping': {'wait': 'stopped'}, } self._rc_transitions = {'inconsistent_stopped': {'start': 'inconsistent_copying', 'stop': 'inconsistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'inconsistent_copying': { 'wait': 'consistent_synchronized', 'start': 'inconsistent_copying', 'stop': 'inconsistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'consistent_synchronized': { 'start': 'consistent_synchronized', 'stop': 'consistent_stopped', 'stop_access': 'idling', 'delete': 'end', 'delete_force': 'end'}, 'consistent_stopped': {'start': 'consistent_synchronized', 'stop': 'consistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'end': None, 'idling': { 'start': 'inconsistent_copying', 'stop': 'inconsistent_stopped', 'stop_access': 'idling', 'delete': 'end', 'delete_force': 'end'}, } def _state_transition(self, function, lcmap): if (function == 'wait' and 'wait' not in self._lc_transitions[lcmap['status']]): return ('', '') if lcmap['status'] == 'copying' and function == 'wait': if lcmap['copyrate'] != '0': if lcmap['progress'] == '0': lcmap['progress'] = '50' else: lcmap['progress'] = '100' lcmap['status'] = 'idle_or_copied' return ('', '') else: try: curr_state = lcmap['status'] lcmap['status'] = self._lc_transitions[curr_state][function] return ('', '') except Exception: return self._errors['CMMVC5903E'] def _lc_cg_state_transition(self, function, lc_consistgrp): if (function == 'wait' and 'wait' not in self._lc_transitions[lc_consistgrp['status']]): return ('', '') try: curr_state = lc_consistgrp['status'] new_state = self._lc_cg_transitions[curr_state][function] lc_consistgrp['status'] = new_state return ('', '') except Exception: return self._errors['CMMVC5903E'] # Find an unused ID @staticmethod def _find_unused_id(d): ids = [] for v in d.values(): ids.append(int(v['id'])) ids.sort() for index, n in enumerate(ids): if n > index: return six.text_type(index) return six.text_type(len(ids)) # Check if name is valid @staticmethod def _is_invalid_name(name): if re.match(r'^[a-zA-Z_][\w._-]*$', name): return False return True # Convert argument string to dictionary @staticmethod def _cmd_to_dict(arg_list): no_param_args = [ 'autodelete', 'bytes', 'compressed', 'force', 'nohdr', 'nofmtdisk', 'async', 'access', 'start' ] one_param_args = [ 'chapsecret', 'cleanrate', 'copy', 'copyrate', 'delim', 'intier', 'filtervalue', 'grainsize', 'hbawwpn', 'host', 'iogrp', 'iscsiname', 'mdiskgrp', 'name', 'rsize', 'scsi', 'size', 'source', 'target', 'unit', 'vdisk', 'warning', 'wwpn', 'primary', 'consistgrp', 'master', 'aux', 'cluster', 'linkbandwidthmbits', 'backgroundcopyrate' ] no_or_one_param_args = [ 'autoexpand', ] # Handle the special case of lsnode which is a two-word command # Use the one word version of the command internally if arg_list[0] in ('mcsinq', 'mcsop'): if arg_list[1] == 'lsnode': if len(arg_list) > 4: # e.g. mcsinq lsnode -delim ! <node id> ret = {'cmd': 'lsnode', 'node_id': arg_list[-1]} else: ret = {'cmd': 'lsnodecanister'} else: ret = {'cmd': arg_list[1]} arg_list.pop(0) else: ret = {'cmd': arg_list[0]} skip = False for i in range(1, len(arg_list)): if skip: skip = False continue # Check for a quoted command argument for volumes and strip # quotes so that the simulater can match it later. Just # match against test naming convensions for now. if arg_list[i][0] == '"' and ('volume' in arg_list[i] or 'snapshot' in arg_list[i]): arg_list[i] = arg_list[i][1:-1] if arg_list[i][0] == '-': if arg_list[i][1:] in no_param_args: ret[arg_list[i][1:]] = True elif arg_list[i][1:] in one_param_args: ret[arg_list[i][1:]] = arg_list[i + 1] skip = True elif arg_list[i][1:] in no_or_one_param_args: if i == (len(arg_list) - 1) or arg_list[i + 1][0] == '-': ret[arg_list[i][1:]] = True else: ret[arg_list[i][1:]] = arg_list[i + 1] skip = True else: raise exception.InvalidInput( reason='unrecognized argument %s' % arg_list[i]) else: ret['obj'] = arg_list[i] return ret @staticmethod def _print_info_cmd(rows, delim=' ', nohdr=False, **kwargs): """Generic function for printing information.""" if nohdr: del rows[0] for index in range(len(rows)): rows[index] = delim.join(rows[index]) return ('%s' % '\n'.join(rows), '') @staticmethod def _print_info_obj_cmd(header, row, delim=' ',
serial Atom serial number 12 - 16 Integer serial Serial number of bonded atom _ 17 - 21 Integer serial Serial number of bonded atom | 22 - 26 Integer serial Serial number of bonded atom +-- optional 27 - 31 Integer serial Serial number of bonded atom _| """ def parseLines(self): """ does the dirty work parsing header and models """ inside = False currModel = [] i,j = self._kwslicing # model state of the molecule self.currentModel = 0 modelId = 0 for idx, raw in enumerate(self.text): l = raw[i:j] if not l in self.kw: self.header.append(idx) continue if l == 'ENDMDL': inside = False currModel.append(idx) self.modelSet[modelId] = currModel modelId += 1 currModel = [] #print "FLUSHING", currModel, modelId elif l == 'MODEL ': inside = True currModel = [idx] elif l == 'CONECT': pass #bonds = self._parseConect(raw) #self.conect.append(bonds) else: currModel.append(idx) if len(currModel): self.modelSet[modelId] = currModel def setState(self, model=None, altMode=None, altResidueMode={}): """ define the state of the multistructure""" if model: self.currentModel = model self.setAltMode(altMode) self.setResidueAltMode(altResidueMode) #self.generateOBMol() def setAltMode(self, altMode='a'): """ define the default alternate conformation mode for residues that do not have a specific setting in self.altResidueMode """ if not altMode: return self.altMode = altMode.upper() def setResidueAltMode(self, altResidueMode={}): """ set the residue-specific alternate residue mode""" if altResidueMode == {}: self.altresidueMode = {} return for k,v in altResidueMode: self.altResidueMode[k] = v def generateOBMol(self, raw): """ create the OBMolecule from the current model""" self.mol = ob.OBMol() conv = ob.OBConversion() conv.SetInFormat('pdb') conv.ReadString(self.mol, raw) return self.mol def getHeader(self): """ return PDB header information """ return [self.text[i] for i in self.header] def parseAtoms(self): """ process all atoms found in each model""" #self.graph = [] # XXX kw = self.coordKw # ('ATOM ', 'HETATM ') i,j = self._kwslicing for mIdx, model in self.modelSet.items(): self.currentModel = mIdx self.altResidues[mIdx] = {} structure = {} # scan all atom lines in the model for lineIdx in model: raw = self.text[lineIdx] if raw[i:j] in kw: self.addAtomToStructure(structure, lineIdx) # cleanup the model cleanStructure = self.cleanStructure(structure) # store the model self.modelSet[mIdx] = cleanStructure # reset model to the begin self.currentModel = 0 # DEBUG if 0: for ch, re in self.modelSet[mIdx].items(): print "CH[%s]" % ch for nu, inf in re.items(): for k,v in inf.items(): print "\t", k, v #x ADD CHECK FOR COVALENT LIGANDS! #IT SHOULD BE A FLAG SO WHEN MIGRATING SUGARS, WILL BE CONSIDERED COVALENT LIGANDS @property def modelCount(self): """ return the number of models found in the structure""" return len(self.modelSet) @property def getAltResidues(self): """ return the number of models found in the structure""" return sorted(self.altResidues[self.currentModel].keys()) @ property def chains(self): return sorted(self.modelSet[self.currentModel].keys()) def addAtomToStructure(self, structure, lineIdx): """ take care of creating/adding atoms, book-keeping...""" chainInfo, resInfo, atInfo = self.atomInfo(lineIdx) resName = resInfo['name'] resNum = resInfo['num'] chain = chainInfo['name'] #resKey = "%s%s" % (resName, resNum) # it is possible that an alternate location defines a different residue! # therefore the key is going to be the sequence number and not the NameNumber resKey = "%d" % (resNum) residues = structure.setdefault(chain, {}) atoms = residues.setdefault(resKey, [] ) atoms.append(atInfo) def cleanStructure(self, structure): """process each residue for alt states""" for chain, residue in structure.items(): for res, atoms in residue.items(): newResAtoms = self.compactAtoms(atoms) structure[chain][res] = newResAtoms return structure def compactAtoms(self, atoms): """ generate the structure to create requested alt conformations """ altResidues = self.altResidues[self.currentModel] common = [] altAtoms = {} for a in atoms: if a['isAlt']: altLabel = a['alt'] # register residue in list of alt residues altList = altResidues.setdefault(a['resNum'], []) if not altLabel in altList: altList.append(altLabel) # register atoms to the specific alt group altGroup = altAtoms.setdefault(altLabel, []) altGroup.append(a) else: common.append(a) return {'common': common, 'alternate': altAtoms} def getStructure(self, chains=[]): """ generate OBMol molecule structure (optionally, containing only selected chains """ raw = "".join( self.getStructureState(chains=chains)) #open('DEBUG_XX.pdb','w').write(raw) return self.generateOBMol(raw) def getStructureState(self, chains=[]): #, altMode='A', model='all', altResidueMode=[]): """ create a representation o the structure using the current model (self.model), alternate locations per residue (self.altResidueMode) and default (self.altMode) """ out = [] currModel = self.modelSet[self.currentModel] for modelChain, residues in currModel.items(): if chains and not modelChain in chains: continue for resNum in sorted(residues.keys()): resAtoms = residues[resNum] resState = self.getResidueState(resNum, resAtoms) out += resState return [self.text[i] for i in out] def sortedResidues(self, residues): """ return a tuple with """ out = [] keys = residues.keys() nameNum = [ (x[0:3], int(x[3:])) for x in keys ] nameNum.sort(key=itemgetter(1)) for name, num in nameNum: k = "%s%d" % (name, num) out.append( (k, residues[k]) ) return out def getResidueState(self, resNum, resAtoms): """ extract residue atoms in the specified altModes (default or specific)""" # common atoms with no alternate conformations out = [ x['lineIdx'] for x in resAtoms['common'] ] altMode = self.altResidueMode.setdefault(resNum, self.altMode) if altMode in resAtoms['alternate']: for a in resAtoms['alternate'][altMode]: out.append(a['lineIdx']) return out def atomInfo(self, lineIdx): """ extract information from the ATOM/HETATM line""" # atom information s = self.text[lineIdx] atName = s[12:16].strip() atNum = int(s[6:11]) alt = s[16].strip() isAlt = bool(len(alt)) #occupancy = float(s[54:60].strip()) #temp = float(s[60:66].strip()) #element = s[76:78] #coord = map(float, ( s[30:38], s[38:46], s[46:54]) ) # remove the alt location label self.text[lineIdx] = s[:16] + " "+ s[17:] # chain chain = s[21] #segment = s[72:76] chainInfo = { 'name': chain} # residue resNum = int(s[22:26].strip()) resName = s[17:20].strip() resId = "%s:%s%d" % (chain, resName, resNum) resInfo = {'name': resName, 'num': resNum, 'resId':resId} # atom atInfo = { 'isAlt' : isAlt, 'atomName': atName, 'atomNum': atNum, 'lineIdx' : lineIdx, 'resId': resId, 'alt':alt, 'resNum':resNum, #'segment':segment, #'element':element, #'occupancy': occupancy, 'temp':temp, 'coord':coord, } return chainInfo, resInfo, atInfo def _parseConect(self, string): """ do we need the re-numbering book-keeping after alt identification?""" pass class KamajiInterface: def __init__(self, mol, perceiveBondOrders=True): """ """ self.mol = mol self.classes = {} maxLenType = 0 #self._bondOrder = numpy.zeros( (mol._ag.numBonds(),), dtype = 'int') self._bondOrder = {} self._types = numpy.zeros((mol._ag.numAtoms(),), dtype='S5') self._hbTypes = numpy.zeros((mol._ag.numAtoms(),), dtype='uint8') #self._types = np.chararray(shape=(len(mol._ag),), dtype='|S5') self._bCounter = 0 self._aCounter = 0 self.hasProtein = False self.hasDna = False self.hasRna = False self.hasLigand = False self.hasWater = False self.hasOther = False self.kamaji = Kamaji() self.proteinTrees = [] self.dnaTrees = [] self.rnaTrees = [] self.ligandTrees = [] self.waterTrees = [] self.otherTrees = [] self._other = [ 'salt', 'genericMetal', 'additives' ] self._modifiers = ['glycosylation', 'modifier' ] for chId in numpy.unique(self.mol._ag.getChids()): chain = self.mol.select('chain "%c"'%chId) obmol = obi.ProdyToOBMol(chain, title='%s: chain %s'%(mol.name, chId)) if perceiveBondOrders: obmol.PerceiveBondOrders() self.kamaji.setMolecule(obmol) # get atom type for atm in ob.OBMolAtomIter(obmol): self._types[self._aCounter] = atm.GetType() maxLenType = max(maxLenType, len(atm.GetType())) self._hbTypes[self._aCounter] = 1 * atm.IsHbondAcceptor() + 2 * atm.IsHbondDonor() self._aCounter += 1 # get bond order for bond in ob.OBMolBondIter(obmol): i = bond.GetBeginAtomIdx()-1 j = bond.GetEndAtomIdx()-1 self._bondOrder['%d %d'%(i,j)] = bond.GetBondOrder() #self._bondOrder[self._bCounter] = bond.GetBondOrder() self._bCounter += 1 self.classes[chId] = {} self.kamaji.setMolecule(obmol) #, pdbinfo = self.mol.pdbInfo) self.classes[chain] = {} kst = self.kamaji.structureTypes tree = None # XXX this code should be modified to allow multiple types per chain (i.e., protein DNA)? # XXX ask David # XXX if so, the following if/elif should be converted into a for loop + break if 'protein' in kst: self.hasProtein = True protein = self.classes[chain]['protein'] = self.kamaji.structureTypes['protein'] tree = self.generateBiopolymerTree(chId, protein, 'protein') elif 'dna' in kst: self.hasDna = True dna = self.classes[chain]['dna'] = self.kamaji.structureTypes['dna'] tree = self.generateBiopolymerTree(chId, dna, 'dna') elif 'rna' in kst: self.hasRna = True rna = self.classes[chain]['rna'] = self.kamaji.structureTypes['rna'] tree = self.generateBiopolymerTree(chId, rna, 'rna') if tree: # self.parseModifiers(tree) self.parseCofactors(tree) self.parseWater(tree) if 'ligand' in kst: self.hasLigand = True ligand = self.classes[chain]['ligand'] = self.kamaji.structureTypes['ligand'] self.generateLigandTree(chId, ligand) self.parseOther(chId) self.compactTrees() #print 'ATOMTYPES', self._types #print 'bondORders', self._bondOrder #print 'HBDONORS', self._hbTypes #for i in self._hbTypes: # print i #print "MAXTYPE", maxLenType def getBondOrders(self): """ return the bond order""" return self._bondOrder def getAtomTypes(self): """ return atom types""" return self._types def getHBTypes(self): """ return h-bond types 0 : no hydrogen bond 1 : hydrogen bond acceptor 2 : hydrogen bond donor 3 : hydrogen bond acceptor/donor """ return self._hbTypes def compactTrees(self): """ create
# -*- coding: utf-8 -*- """This module defines the differentiable function class.""" from typing import Union, List, Optional, Tuple import abc import numpy as np class DiffFunction(abc.ABC): """An abstract class representing a differentiable scalar function. Supports Numpy broadcasting. Defaults to using finite difference for derivative calculation. Parameters ---------- input_ranges : List[Tuple[Optional[float], Optional[float]]] input ranges. delta_list : Optional[List[float]] a list of finite difference step size for each input. If None, finite difference will be disabled. """ def __init__(self, input_ranges, delta_list=None): # type: (List[Tuple[Optional[float], Optional[float]]], Optional[List[float]]) -> None # error checking self._ndim = len(input_ranges) if delta_list is not None and len(delta_list) != self._ndim: raise ValueError('finite difference list length inconsistent.') self._input_ranges = input_ranges self.delta_list = delta_list @property def input_ranges(self): # type: () -> List[Tuple[Optional[float], Optional[float]]] return self._input_ranges @property def ndim(self): # type: () -> int """Number of input dimensions.""" return self._ndim @abc.abstractmethod def __call__(self, xi): """Interpolate at the given coordinates. Numpy broadcasting rules apply. Parameters ---------- xi : array_like The coordinates to evaluate, with shape (..., ndim) Returns ------- val : np.multiarray.ndarray The interpolated values at the given coordinates. """ raise NotImplementedError('Not implemented') def get_input_range(self, idx): # type: (int) -> Tuple[Optional[float], Optional[float]] """Returns the input range of the given dimension.""" return self._input_ranges[idx] def deriv(self, xi, j): """Calculate the derivative at the given coordinates with respect to input j. Numpy broadcasting rules apply. Parameters ---------- xi : array_like The coordinates to evaluate, with shape (..., ndim) j : int input index. Returns ------- val : np.multiarray.ndarray The derivatives at the given coordinates. """ return self._fd(xi, j, self.delta_list[j]) def jacobian(self, xi): """Calculate the Jacobian at the given coordinates. Numpy broadcasting rules apply. If finite difference step sizes are not specified, will call deriv() in a for loop to compute the Jacobian. Parameters ---------- xi : array_like The coordinates to evaluate, with shape (..., ndim) Returns ------- val : np.multiarray.ndarray The Jacobian matrices at the given coordinates. """ if self.delta_list: return self._fd_jacobian(xi, self.delta_list) else: xi = np.asarray(xi, dtype=float) ans = np.empty(xi.shape) for n in range(self.ndim): ans[..., n] = self.deriv(xi, n) return ans def _fd(self, xi, idx, delta): """Calculate the derivative along the given index using central finite difference. Parameters ---------- xi : array_like The coordinates to evaluate, with shape (..., ndim) idx : int The index to calculate the derivative on. delta : float The finite difference step size. Returns ------- val : np.multiarray.ndarray The derivatives at the given coordinates. """ if idx < 0 or idx >= self.ndim: raise ValueError('Invalid derivative index: %d' % idx) xi = np.asarray(xi, dtype=float) if xi.shape[-1] != self.ndim: raise ValueError("The requested sample points xi have dimension %d, " "but this interpolator has dimension %d" % (xi.shape[-1], self.ndim)) # use broadcasting to evaluate two points at once xtest = np.broadcast_to(xi, (2,) + xi.shape).copy() xtest[0, ..., idx] += delta / 2.0 xtest[1, ..., idx] -= delta / 2.0 val = self(xtest) ans = (val[0] - val[1]) / delta # type: np.ndarray if ans.size == 1 and not np.isscalar(ans): return ans[0] return ans def _fd_jacobian(self, xi, delta_list): """Calculate the Jacobian matrix using central finite difference. Parameters ---------- xi : array_like The coordinates to evaluate, with shape (..., ndim) delta_list : List[float] list of finite difference step sizes for each input. Returns ------- val : np.multiarray.ndarray The Jacobian matrices at the given coordinates. """ xi = np.asarray(xi, dtype=float) if xi.shape[-1] != self.ndim: raise ValueError("The requested sample points xi have dimension %d, " "but this interpolator has dimension %d" % (xi.shape[-1], self.ndim)) # use broadcasting to evaluate all points at once xtest = np.broadcast_to(xi, (2 * self.ndim,) + xi.shape).copy() for idx, delta in enumerate(delta_list): xtest[2 * idx, ..., idx] += delta / 2.0 xtest[2 * idx + 1, ..., idx] -= delta / 2.0 val = self(xtest) ans = np.empty(xi.shape) for idx, delta in enumerate(delta_list): ans[..., idx] = (val[2 * idx, ...] - val[2 * idx + 1, ...]) / delta return ans def transform_input(self, amat, bmat): # type: (np.multiarray.ndarray, np.multiarray.ndarray) -> DiffFunction """Returns f(Ax + B), where f is this function and A, B are matrices. Parameters ---------- amat : np.multiarray.ndarray the input transform matrix. bmat : np.multiarray.ndarray the input shift matrix. Returns ------- dfun : DiffFunction a scalar differential function. """ return InLinTransformFunction(self, amat, bmat) def __add__(self, other): # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction if isinstance(other, DiffFunction): return SumDiffFunction(self, other, f2_sgn=1.0) elif isinstance(other, float) or isinstance(other, int): return ScaleAddFunction(self, other, 1.0) elif isinstance(other, np.ndarray): return ScaleAddFunction(self, np.asscalar(other), 1.0) else: raise NotImplementedError('Unknown type %s' % type(other)) def __radd__(self, other): # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction return self.__add__(other) def __sub__(self, other): # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction if isinstance(other, DiffFunction): return SumDiffFunction(self, other, f2_sgn=-1.0) elif isinstance(other, float) or isinstance(other, int): return ScaleAddFunction(self, -other, 1.0) elif isinstance(other, np.ndarray): return ScaleAddFunction(self, -np.asscalar(other), 1.0) else: raise NotImplementedError('Unknown type %s' % type(other)) def __rsub__(self, other): # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction if isinstance(other, DiffFunction): return SumDiffFunction(other, self, f2_sgn=-1.0) elif isinstance(other, float) or isinstance(other, int): return ScaleAddFunction(self, other, -1.0) elif isinstance(other, np.ndarray): return ScaleAddFunction(self, np.asscalar(other), -1.0) else: raise NotImplementedError('Unknown type %s' % type(other)) def __mul__(self, other): # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction if isinstance(other, DiffFunction): return ProdFunction(self, other) elif isinstance(other, float) or isinstance(other, int): return ScaleAddFunction(self, 0.0, other) elif isinstance(other, np.ndarray): return ScaleAddFunction(self, 0.0, np.asscalar(other)) else: raise NotImplementedError('Unknown type %s' % type(other)) def __rmul__(self, other): # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction return self.__mul__(other) def __pow__(self, other): # type: (Union[float, int, np.multiarray.ndarray]) -> DiffFunction if isinstance(other, float) or isinstance(other, int): return PwrFunction(self, other, scale=1.0) elif isinstance(other, np.ndarray): return PwrFunction(self, np.asscalar(other), scale=1.0) else: raise NotImplementedError('Unknown type %s' % type(other)) def __div__(self, other): # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction if isinstance(other, DiffFunction): return DivFunction(self, other) elif isinstance(other, float) or isinstance(other, int): return ScaleAddFunction(self, 0.0, 1.0 / other) elif isinstance(other, np.ndarray): return ScaleAddFunction(self, 0.0, 1.0 / np.asscalar(other)) else: raise NotImplementedError('Unknown type %s' % type(other)) def __truediv__(self, other): # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction return self.__div__(other) def __rdiv__(self, other): # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction if isinstance(other, DiffFunction): return DivFunction(other, self) elif isinstance(other, float) or isinstance(other, int): return PwrFunction(self, -1.0, scale=other) elif isinstance(other, np.ndarray): return PwrFunction(self, -1.0, scale=np.asscalar(other)) else: raise NotImplementedError('Unknown type %s' % type(other)) def __rtruediv__(self, other): # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction return self.__rdiv__(other) def __neg__(self): # type: () -> DiffFunction return ScaleAddFunction(self, 0.0, -1.0) class InLinTransformFunction(DiffFunction): """A DiffFunction where the input undergoes a linear transformation first. This function computes f(Ax + B), where A and B are matrices. Parameters ---------- f1 : DiffFunction the parent function. amat : np.multiarray.ndarray the input transform matrix. bmat : np.multiarray.ndarray the input shift matrix. """ def __init__(self, f1, amat, bmat): # type: (DiffFunction, np.multiarray.ndarray, np.multiarray.ndarray) -> None if amat.shape[0] != f1.ndim or bmat.shape[0] != f1.ndim: raise ValueError('amat/bmat number of rows must be %d' % f1.ndim) if len(bmat.shape) != 1: raise ValueError('bmat must be 1 dimension.') # domain of f(Ax+B) cannot be represented by input ranges. super(InLinTransformFunction, self).__init__([(None, None)] * amat.shape[1], delta_list=None) self._f1 = f1 self._amat = amat self._bmat = bmat.reshape(-1, 1) def _get_arg(self, xi): xi = np.asarray(xi) xi_shape = xi.shape my_ndim = self.ndim if xi_shape[-1] != my_ndim: raise ValueError('Last dimension must have size %d' % my_ndim) xi = xi.reshape(-1, my_ndim) return (self._amat.dot(xi.T) + self._bmat).T, xi_shape def __call__(self, xi): farg, xi_shape = self._get_arg(xi) result = self._f1(farg) if np.isscalar(result): return result return result.reshape(xi_shape[:-1]) def deriv(self, xi, j): jmat = self.jacobian(xi) return jmat[..., 0, j] def jacobian(self, xi): farg, xi_shape = self._get_arg(xi) jmat = self._f1.jacobian(farg).dot(self._amat) shape_trunc = xi_shape[:-1] # type: Tuple[int, ...] return jmat.reshape(shape_trunc + (1, self.ndim)) class ScaleAddFunction(DiffFunction): """A DiffFunction multiply by a scalar then added to a scalar. Parameters ---------- f1 : DiffFunction the first function. adder : float constant to add. scaler : float constant to multiply. """ def __init__(self, f1, adder, scaler): # type: (DiffFunction, float, float) -> None DiffFunction.__init__(self, f1.input_ranges, delta_list=None)
in s or 'r_H+' in s: reax.loc[i, s] = reax.loc[i, s]*int(r_multiplier ) if ('ox_' in s and 'coeff' in s) or ('rR_' in s and 'coeff' in s) or ('pO_' in s and 'coeff' in s) or 'o_H2O' in s or 'o_H+' in s: reax.loc[i, s] = reax.loc[i, s]*int(o_multiplier) reax['H+'] = reax['r_H+'] + reax['o_H+'] reax['protons'] = 'H+' reax['H2O'] = reax['r_H2O'] + reax['o_H2O'] reax['water'] = 'H2O' reax.drop(columns = ['r_H2O', 'r_H+', 'r_-', 'r_+', 'r_e-', 'o_H2O', 'o_H+', 'o_-', 'o_+', 'o_e-'],axis = 1, inplace = True) count = 0 for i in db_names: db_names[count] = 'start'+i+'end' count += 1 real_reax = reax.replace(formulas,db_names) real_reax['rO_coeff'] = real_reax['rO_coeff'].astype('float') real_reax['rR_coeff'] = real_reax['rR_coeff'].astype('float') real_reax['pO_coeff'] = real_reax['pO_coeff'].astype('float') real_reax['pR_coeff'] = real_reax['pR_coeff'].astype('float') count = 0 rxn_count = [] rxn_number = [] for i in real_reax['Reaction']: if i not in rxn_count: rxn_count.append(i) rxn_number.append(real_reax['Names'][count]+ '_'+str(count)) else: rxn_number.append(real_reax['Names'][count]+ '_'+str(count)+'_sub') count += 1 real_reax.insert(0, 'Reaction Number', rxn_number) real_reax.insert(1, 'electrons', electrons) lst3 = [] #list of reaction numbers lst4 = [] #list of reactions with issues for i in range(0, len(real_reax['Reaction'])): if real_reax['Reaction'][i] not in lst3: lst3.append(real_reax['Reaction'][i]) for j in lst3: #looping through reaction numbers first_e = real_reax.loc[real_reax['Reaction'] == j]['electrons'].reset_index(drop=True)[0] for k in real_reax.loc[real_reax['Reaction'] == j]['electrons'].reset_index(drop=True): if k != first_e: if j not in lst4: lst4.append(j) for l in lst4: print(real_reax.loc[real_reax['Reaction'] == l]) real_reax.drop(labels='Reaction', axis=1, inplace = True) real_reax.drop(columns = 'Names', inplace = True) pairs = real_reax['redox_pair'] real_reax.drop(columns = 'redox_pair', inplace = True) # 2-16-2022 CHANGES START HERE for i in range(0, len(real_reax['Reaction Number'])): for j in range(2, len(real_reax.columns)): if str(real_reax.iloc[i, j]) == 'nan': real_reax.iloc[i, j] = '' test_df = real_reax.copy(deep=True) count = 0 for i in range(0, len(test_df['Reaction Number'])): coefficients = [] species = [] for j in range(2, len(test_df.columns)): if count % 2 == 0: if test_df.iloc[i, j] != '': test_df.iloc[i, j] = round(test_df.iloc[i, j], 14) if test_df.iloc[i, j] == 0 or test_df.iloc[i, j] == 0.0: test_df.iloc[i, j] = '' test_df.iloc[i, j+1] = '' coefficient = test_df.iloc[i, j] coefficients.append(test_df.iloc[i, j]) if count % 2 != 0: if test_df.iloc[i, j] != '': test_df.iloc[i, j] = str(test_df.iloc[i, j]).split('start')[1].split('end')[0] compound = test_df.iloc[i, j] if compound in species: og_location = species.index(compound) df_location = 3+og_location*2 df_location_coeff = df_location - 1 old_coeff = coefficients[og_location] new_coeff = coefficient + old_coeff test_df.iloc[i, j] = '' test_df.iloc[i, j-1] = '' df_value = test_df.iloc[i, df_location] #values from first occurence remaining test_df.iloc[i, df_location_coeff] = new_coeff species.append(compound) count+=1 for m in range(0, 7): for i in range(0, len(test_df['Reaction Number'])): line = [] for j in range(2, len(test_df.columns)): line.append(test_df.iloc[i, j]) if test_df.iloc[i, j] != '' and test_df.iloc[i, j-2] =='': test_df.iloc[i, j-2] = test_df.iloc[i, j] test_df.iloc[i, j] = '' file = test_df.to_csv(sep='\t', header=False, index=False, line_terminator='\n') file = file.split("\n") #not sure if I should keep this newlines = [] for line in file: line = line.strip() newlines.append(line) self.affinity_energy_reactions_raw = "\n".join(newlines) df_rxn = pd.DataFrame([x.split('\t') for x in self.affinity_energy_reactions_raw.split('\n')]) df_rxn.columns = df_rxn.columns.map(str) df_rxn = df_rxn.rename(columns={"0": "reaction_name", "1": "mol_e-_transferred_per_mol_rxn"}) df_rxn.insert(1, 'redox_pairs', all_reax['pairs']) df_rxn = df_rxn.set_index("reaction_name") df_rxn = df_rxn[df_rxn['mol_e-_transferred_per_mol_rxn'].notna()] self.affinity_energy_reactions_table = df_rxn prev_was_coeff = False n = 1 for col in self.affinity_energy_reactions_table.iloc[:, 2:].columns: if not prev_was_coeff: new_col_name = "coeff_"+str(n) prev_was_coeff = True else: new_col_name = "species_"+str(n) prev_was_coeff = False n += 1 self.affinity_energy_reactions_table = self.affinity_energy_reactions_table.rename(columns={col: new_col_name}) nonsub_reaction_names = [name for name in self.affinity_energy_reactions_table.index if "_sub" not in name[-4:]] if self.verbose != 0: print("{} redox reactions have been generated.".format(len(nonsub_reaction_names))) def show_redox_reactions(self, formatted=True, charge_sign_at_end=False, hide_subreactions=True, simplify=True, show=True): """ Show a table of redox reactions generated with the function `make_redox_reactions`. Parameters ---------- formatted : bool, default True Should reactions be formatted for html output? charge_sign_at_end : bool, default False Display charge with sign after the number (e.g. SO4 2-)? Ignored if `formatted` is False. hide_subreactions : bool, default True Hide subreactions? show : bool, default False Show the table of reactions? Ignored if not run in a Jupyter notebook. Returns ---------- A pandas dataframe containing balanced redox reactions written in full. """ self.affinity_energy_formatted_reactions = copy.copy(self.affinity_energy_reactions_table.iloc[:, 0:1]) df = copy.copy(self.affinity_energy_reactions_table) if simplify: main_rxn_names = df.loc[[ind for ind in df.index if "_sub" not in ind[-4:]]].index df = df.iloc[[i-1 for i in range(0, len(df.index)) if "_sub" not in df.index[i][-4:]]] self.affinity_energy_formatted_reactions = copy.copy(df.iloc[:, 0:1]) reactions = [] for irow in range(0, df.shape[0]): redox_pair = df.loc[df.index[irow], "redox_pairs"] oxidant_1 = self.half_cell_reactions.loc[self.half_cell_reactions.index[redox_pair[0]], "Oxidant_1"] oxidant_2 = self.half_cell_reactions.loc[self.half_cell_reactions.index[redox_pair[0]], "Oxidant_2"] oxidant_3 = self.half_cell_reactions.loc[self.half_cell_reactions.index[redox_pair[0]], "Oxidant_3"] reductant_1 = self.half_cell_reactions.loc[self.half_cell_reactions.index[redox_pair[1]], "Reductant_1"] reductant_2 = self.half_cell_reactions.loc[self.half_cell_reactions.index[redox_pair[1]], "Reductant_2"] oxidants = [ox for ox in [oxidant_1, oxidant_2, oxidant_3] if str(ox) != 'nan'] reductants = [rd for rd in [reductant_1, reductant_2] if str(rd) != 'nan'] if len(oxidants) > 1: oxidant_sigma_needed = True else: oxidant_sigma_needed = False if len(reductants) > 1: reductant_sigma_needed = True else: reductant_sigma_needed = False rxn_row = df.iloc[irow, 2:] rxn = rxn_row[rxn_row.notna()] coeffs = copy.copy(rxn[::2]).tolist() names = copy.copy(rxn[1::2]).tolist() if oxidant_sigma_needed or reductant_sigma_needed: reactant_names = [names[i] for i in range(0, len(names)) if float(coeffs[i]) < 0] for sp in reactant_names: if sp in oxidants and oxidant_sigma_needed: i = names.index(sp) names[i] = u"\u03A3"+sp if sp in reductants and reductant_sigma_needed: if u"\u03A3"+sp not in names: i = names.index(sp) names[i] = u"\u03A3"+sp react_grid = pd.DataFrame({"coeff":coeffs, "name":names}) react_grid["coeff"] = pd.to_numeric(react_grid["coeff"]) react_grid = react_grid.astype({'coeff': 'float'}) reactants = " + ".join([(str(-int(react_grid["coeff"][i]) if react_grid["coeff"][i].is_integer() else -react_grid["coeff"][i])+" " if -react_grid["coeff"][i] != 1 else "") + react_grid["name"][i] for i in range(0, len(react_grid["name"])) if react_grid["coeff"][i] < 0]) products = " + ".join([(str(int(react_grid["coeff"][i]) if react_grid["coeff"][i].is_integer() else react_grid["coeff"][i])+" " if react_grid["coeff"][i] != 1 else "") + react_grid["name"][i] for i in range(0, len(react_grid["name"])) if react_grid["coeff"][i] > 0]) if formatted: reactants = " + ".join([format_coeff(react_grid["coeff"][i]) + html_chemname_format_AqEquil(react_grid["name"][i], charge_sign_at_end=charge_sign_at_end) for i in range(0, len(react_grid["name"])) if react_grid["coeff"][i] < 0]) products = " + ".join([format_coeff(react_grid["coeff"][i]) + html_chemname_format_AqEquil(react_grid["name"][i], charge_sign_at_end=charge_sign_at_end) for i in range(0, len(react_grid["name"])) if react_grid["coeff"][i] > 0]) reaction = reactants + " = " + products reactions.append(reaction) self.affinity_energy_formatted_reactions["reaction"] = reactions[1:] + reactions[:1] # because reactions got rotated with respect to reaction names, rotate the other way self.affinity_energy_formatted_reactions.index = main_rxn_names else: reactions = [] for irow in range(0, df.shape[0]): redox_pair = df.loc[self.affinity_energy_reactions_table.index[irow], "redox_pairs"] oxidant = redox_pair[0] reductant = redox_pair[1] rxn_row = df.iloc[irow, 2:] rxn = rxn_row[rxn_row.notna()] coeffs = copy.copy(rxn[::2]).tolist() names = copy.copy(rxn[1::2]).tolist() react_grid = pd.DataFrame({"coeff":coeffs, "name":names}) react_grid["coeff"] = pd.to_numeric(react_grid["coeff"]) react_grid = react_grid.astype({'coeff': 'float'}) reactants = " + ".join([(str(-int(react_grid["coeff"][i]) if react_grid["coeff"][i].is_integer() else -react_grid["coeff"][i])+" " if -react_grid["coeff"][i] != 1 else "") + react_grid["name"][i] for i in range(0, len(react_grid["name"])) if react_grid["coeff"][i] < 0]) products = " + ".join([(str(int(react_grid["coeff"][i]) if react_grid["coeff"][i].is_integer() else react_grid["coeff"][i])+" " if react_grid["coeff"][i] != 1 else "") + react_grid["name"][i] for i in range(0, len(react_grid["name"])) if react_grid["coeff"][i] > 0]) if formatted: reactants = " + ".join([format_coeff(react_grid["coeff"][i]) + html_chemname_format_AqEquil(react_grid["name"][i], charge_sign_at_end=charge_sign_at_end) for i in range(0, len(react_grid["name"])) if react_grid["coeff"][i] < 0]) products = " + ".join([format_coeff(react_grid["coeff"][i]) + html_chemname_format_AqEquil(react_grid["name"][i], charge_sign_at_end=charge_sign_at_end) for i in range(0, len(react_grid["name"])) if react_grid["coeff"][i] > 0]) reaction = reactants + " = " + products reactions.append(reaction) self.affinity_energy_formatted_reactions["reaction"] = reactions df_out = copy.copy(self.affinity_energy_formatted_reactions) if hide_subreactions and not simplify: df_out = self.affinity_energy_formatted_reactions.loc[[ind for ind in self.affinity_energy_formatted_reactions.index if "_sub" not in ind[-4:]]] if isnotebook() and show: display(HTML(df_out.to_html(escape=False))) return df_out def compare(*args): """ Combine two or more speciations into a single speciation object for comparison. The speciation object returned by this function can produce scatterplots, barplots, and mass contribution plots, and contains a report that can be browsed with `lookup`. See documentation for the functions in the `Speciation` class for more detail. Parameters ---------- *args : two or more objects of class `Speciation` to compare Returns ---------- An object of class `Speciation`. """ if all(["mass_contribution" in a.__dict__.keys() for a in args]): allow_mass_contribution = True mass_contribution_breaks = [] else: allow_mass_contribution = False for i,sp in enumerate(args): if i == 0: sp_total = copy.deepcopy(sp) sp_total.sample_data = None if allow_mass_contribution: mass_contribution_breaks.append(0) else: sp_i = copy.deepcopy(sp) if allow_mass_contribution: mass_contribution_breaks.append(sp_total.report.shape[0]) sp_total.report = pd.concat([sp_total.report, sp_i.report], axis=0, sort=False) sp_total.report.index = sp_total.report.index + ("_"+sp_total.report.groupby(level=0).cumcount().astype(str)).replace('_0','') if
<gh_stars>1-10 #!/usr/bin/env python """ Control agents and related resources. @see https://confluence.oceanobservatories.org/display/CIDev/R2+Agent+Use+Guide See help below for available options and arguments. Invoke via command line like this: bin/pycc -x ion.agents.agentctrl.AgentControl instrument='CTDPF' op=start bin/pycc -x ion.agents.agentctrl.AgentControl resource_id='uuid' op=start activate=False bin/pycc -x ion.agents.agentctrl.AgentControl agent_name='uuid' op=stop bin/pycc -x ion.agents.agentctrl.AgentControl platform='uuid' op=start recurse=True bin/pycc -x ion.agents.agentctrl.AgentControl platform='uuid' op=config_instance cfg=file.csv recurse=True bin/pycc -x ion.agents.agentctrl.AgentControl instrument='CTDPF' op=recover_data recover_start=0.0 recover_end=1.0 bin/pycc -x ion.agents.agentctrl.AgentControl preload_id='CP02PMUI-WF001_PD' op=start and others (see below and Confluence page) TODO: - Force terminate agents and clean up - Change owner of resource - Change contact info, metadata of resource based on spreadsheet """ __author__ = '<NAME>, <NAME>, <NAME>, <NAME>' import csv import datetime import os import shutil import time from pyon.agent.agent import ResourceAgentClient, ResourceAgentEvent from pyon.core.object import IonObjectBase from pyon.public import RT, log, PRED, OT, ImmediateProcess, BadRequest, NotFound, LCS, AS, EventPublisher, dict_merge, IonObject from ion.core.includes.mi import DriverEvent from ion.services.dm.inventory.dataset_management_service import DatasetManagementService from ion.services.sa.observatory.deployment_util import DeploymentUtil from ion.services.sa.observatory.observatory_util import ObservatoryUtil from ion.services.sa.observatory.deployment_util import DeploymentUtil from ion.util.parse_utils import parse_dict, get_typed_value from ion.util.datastore.resuse import ResourceUseInfo from interface.objects import AgentCommand, Site, TemporalBounds, AgentInstance, AgentDefinition, Device from interface.objects import DeviceModel, ProcessStateEnum, DataProductTypeEnum from interface.services.sa.idata_acquisition_management_service import DataAcquisitionManagementServiceProcessClient from interface.services.sa.idata_product_management_service import DataProductManagementServiceProcessClient from interface.services.sa.iinstrument_management_service import InstrumentManagementServiceProcessClient from interface.services.sa.iobservatory_management_service import ObservatoryManagementServiceProcessClient from interface.services.coi.iidentity_management_service import IdentityManagementServiceProcessClient from interface.services.coi.iorg_management_service import OrgManagementServiceProcessClient from interface.services.dm.ipubsub_management_service import PubsubManagementServiceProcessClient ARG_HELP = { "help": "prints help for operation", "instrument": "name or resource uuid of instrument device", "platform": "name or resource uuid of platform device", "device_name": "name or resource uuid of a device resource", "resource_id": "resource uuid for any resource in the system", "agent_name": "name of agent instance. Resource ids (uuid) can be used instead of names.", "preload_id": "preload (PRE) id or comma separated list of preload ids of resources", "recurse": "if True, execute op child devices/sites if existing", "fail_fast": "if True, exit after the first exception. Otherwise log errors only", "recover_start": "floating point string representing seconds since 1900-01-01 00:00:00 (NTP64 Epoch)", "recover_end": "floating point string representing seconds since 1900-01-01 00:00:00 (NTP64 Epoch)", "force": "if True, ignore some warning conditions and move on or clear up", "autoclean": "if True, try to clean up resources directly after failed operations", "verbose": "if True, log more messages for detailed steps", "dryrun": "if True, log attempted actions but don't execute them (use verbose=True to see many details)", "clone_id": "provides suffix for a cloned preload id, e.g. CP02PMUI-WP001_PD -> CP02PMUI-WP001_PD_CLONE1", "attr_key": "provides the name of an attribute to set", "attr_value": "provides the value of an attribute to set", "cfg": "name of a CSV file with lookup values", "activate": "if True, puts agent into streaming mode after start (default: True)", "lcstate": "target lcstate", "availability": "target availability state", "facility": "a facility (Org) identified by governance name, preload id, name or uuid", "role": "a user role identified by governance name, preload id, name or uuid within the facility (Org)", "agent": "comma separate list of preload ids of agent definitions", "user": "a user or actor identified by name, preload id or uuid", "validity": "validity in seconds for authentication information starting now", "token": "a token secret string", } RES_ARG_LIST = ["resource_id", "preload_id"] DEV_ARG_LIST = RES_ARG_LIST + ["instrument", "platform", "device_name", "agent_name"] COMMON_ARG_LIST = ["help", "recurse", "fail_fast", "force", "verbose", "dryrun"] OP_HELP = [ ("start_agent", dict( alias=["start", "load"], opmsg="Start agent instance", opmsg_ext=["must provide a device or agent name or id", "option: don't put it into streaming mode", "option: provide start and stop date for instrument agent reachback recover"], args=DEV_ARG_LIST + ["activate"] + COMMON_ARG_LIST)), ("start", dict( opmsg="Alias for start_agent")), ("load", dict( opmsg="Alias for start_agent")), ("stop_agent", dict( alias=["stop"], opmsg="Stop agent instance", args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("stop", dict( opmsg="Alias for stop_agent")), ("configure_instance", dict( opmsg="Update the AgentInstance driver_config using a config CSV lookup file", args=DEV_ARG_LIST + ["cfg"] + COMMON_ARG_LIST)), ("set_attributes", dict( opmsg="Update resource attributes using a CSV lookup file", args=DEV_ARG_LIST + ["cfg"] + COMMON_ARG_LIST)), ("activate_persistence", dict( opmsg="Activate persistence for the data products of the device", args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("suspend_persistence", dict( opmsg="Suspend persistence for the data products of the device", args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("cleanup_persistence", dict( opmsg="Delete remnant persistent records about activated persistence for a device in the system", args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("cleanup_agent", dict( opmsg="Delete remnant persistent records about running agents for a device in the system", args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("recover_data", dict( opmsg="Issue data reachback command for instrument agents", opmsg_ext=["requires start and stop dates"], args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("set_calibration", dict( opmsg="Add or replace calibration information for a device and its data products", args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("clear_saved_state", dict( opmsg="Clear out the saved_agent_state in agent instance resource", args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("clear_status", dict( opmsg="Clear out the device status", args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("set_lcstate", dict( opmsg="Set resource lifecycle state", args=RES_ARG_LIST + ["lcstate"] + COMMON_ARG_LIST)), ("set_availability", dict( opmsg="Set resource availability state", args=RES_ARG_LIST + ["availability"] + COMMON_ARG_LIST)), ("share_resource", dict( opmsg="Share resource in given facility", args=RES_ARG_LIST + ["facility"] + COMMON_ARG_LIST)), ("unshare_resource", dict( opmsg="Remove resource from given facility", args=RES_ARG_LIST + ["facility"] + COMMON_ARG_LIST)), ("enroll_member", dict( opmsg="Add user as member to a facility (Org)", args=RES_ARG_LIST + ["facility"] + COMMON_ARG_LIST)), ("remove_member", dict( opmsg="Remove user as member of a facility (Org)", args=RES_ARG_LIST + ["facility"] + COMMON_ARG_LIST)), ("grant_role", dict( opmsg="For given user, grant a role in a facility (Org)", args=RES_ARG_LIST + ["facility", "role"] + COMMON_ARG_LIST)), ("revoke_role", dict( opmsg="For given user, revoke a role in a facility (Org)", args=RES_ARG_LIST + ["facility", "role"] + COMMON_ARG_LIST)), ("create_commitment", dict( opmsg="Set a commitment for a user and a resource in a facility (Org)", args=RES_ARG_LIST + ["facility"] + COMMON_ARG_LIST)), ("retire_commitment", dict( opmsg="Retire a commitment for a user and a resource in a facility (Org)", args=RES_ARG_LIST + ["facility"] + COMMON_ARG_LIST)), ("set_owner", dict( opmsg="Set the owner user/actor for given resource, replacing current owner if existing", args=RES_ARG_LIST + ["user"] + COMMON_ARG_LIST)), ("set_agentdef", dict( opmsg="Reassigns the agent definition", args=RES_ARG_LIST + ["agent"] + COMMON_ARG_LIST)), ("create_dataset", dict( opmsg="Create Dataset resource and coverage for a device, but don't activate ingestion worker", args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("delete_dataset", dict( opmsg="Remove Dataset resource and coverage for a device", opmsg_ext=["can also be called on a Dataset resource directly"], args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("delete_all_data", dict( opmsg="Remove all device related DataProduct, StreamDefinition, Dataset, resources and coverages", args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("delete_all_device", dict( opmsg="Remove all device related resources and all from delete_all_data", args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("delete_all_device", dict( opmsg="Remove all device related resources and all from delete_all_data", args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("delete_site", dict( opmsg="Remove site resources", args=RES_ARG_LIST + COMMON_ARG_LIST)), ("activate_deployment", dict( opmsg="Activate a deployment", opmsg_ext=["if a device is provided, activate the current available deployment"], args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("deactivate_deployment", dict( opmsg="Deactivate a deployment", opmsg_ext=["if a device is provided, deactivate the current deployment"], args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("clone_device", dict( opmsg="Clone a device into a new device with similar associations, new agent instance and new data products.", opmsg_ext=["the clone_id determines the PRE id suffix used to uniquely identify cloned resources", "a CSV file can provide attribute values for the cloned resources"], args=DEV_ARG_LIST + COMMON_ARG_LIST)), ("clone_deployment", dict( opmsg="Clone a deployment into a new deployment with similar associations.", opmsg_ext=["the clone_id determines the PRE id suffix used to uniquely identify cloned resources", "a CSV file can provide attribute values for the cloned resources"], args=RES_ARG_LIST + COMMON_ARG_LIST)), ("generate_auth_token", dict( opmsg="Generates a temporary authentication token for user with given validity (default: 1 day)", args=["user", "validity"])), ("cancel_auth_token", dict( opmsg="Invalidates given authentication token", args=["token"])), ("list_persistence", dict( opmsg="Prints a report of currently active persistence", args=["verbose"])), ("list_agents", dict( opmsg="Prints a report of currently active agents", args=["verbose"])), ("list_containers", dict( opmsg="Prints a report of currently active containers", args=["verbose"])), ("list_services", dict( opmsg="Prints a report of currently active services", args=["verbose"])), ("show_use", dict( opmsg="Shows associations and attributes for a resource", args=DEV_ARG_LIST + ["verbose"])), ("show_dataset", dict( opmsg="Prints a report about a dataset (coverage)", args=RES_ARG_LIST + ["verbose"])), ("set_sys_attribute", dict( opmsg="Sets a system attribute, such as MI version in the directory", opmsg_ext=["must provide attr_key and attr_value"], args=RES_ARG_LIST + ["attr_key", "attr_value"])), ("help", dict( opmsg="Lists available operations", args=["verbose"])), ] class AgentControl(ImmediateProcess): def on_start(self): log.info("======================== OOINet AgentControl ========================") self.rr = self.container.resource_registry self.op = self.CFG.get("op", "start") # Map op aliases to internal names self.op = dict(start="start_agent", load="start_agent", stop="stop_agent").get(self.op, self.op) if not self.op or self.op.startswith("_") or not hasattr(self, self.op): raise BadRequest("Operation %s unknown", self.op) log.info("OPERATION: %s", self.op) dataset_name = self.CFG.get("dataset", None) device_name = self.CFG.get("device_name", None) or self.CFG.get("instrument", None) or self.CFG.get("platform", None) or self.CFG.get("resource_id", None) agent_name = self.CFG.get("agent_name", None) resource_name = dataset_name or device_name or agent_name self.recurse = self.CFG.get("recurse", False) self.fail_fast
None and solver.is_software != software: return False if vfyc is not None and solver.is_vfyc != vfyc: return False if flux_biases is not None and solver.has_flux_biases != flux_biases: return False if num_qubits is not None: if isinstance(num_qubits, list) and len(num_qubits) == 2: min_qubits, max_qubits = num_qubits else: min_qubits, max_qubits = num_qubits, num_qubits if min_qubits is not None and solver.num_qubits < min_qubits: return False if max_qubits is not None and max_qubits < solver.num_qubits: return False return True solvers = list(filter(predicate, self.get_solvers(refresh=refresh).values())) solvers.sort(key=attrgetter('id')) return solvers def get_solver(self, name=None, refresh=False): """Load the configuration for a single solver. Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}` is a URL configured for the client, and returns a :class:`.Solver` instance that can be used to submit sampling problems to the D-Wave API and retrieve results. Args: name (str): ID of the requested solver. ``None`` returns the default solver. If default solver is not configured, ``None`` returns the first available solver in ``Client``'s class (QPU/software/base). refresh (bool): Return solver from cache (if cached with ``get_solvers()``), unless set to ``True``. Returns: :class:`.Solver` Examples: This example creates two solvers for a client instantiated from a local system's auto-detected default configuration file, which configures a connection to a D-Wave resource that provides two solvers. The first uses the default solver, the second explicitly selects another solver. >>> from dwave.cloud import Client >>> client = Client.from_config() >>> client.get_solvers() # doctest: +SKIP {u'2000Q_ONLINE_SOLVER1': <dwave.cloud.solver.Solver at 0x7e84fd0>, u'2000Q_ONLINE_SOLVER2': <dwave.cloud.solver.Solver at 0x7e84828>} >>> solver1 = client.get_solver() # doctest: +SKIP >>> solver2 = client.get_solver('2000Q_ONLINE_SOLVER2') # doctest: +SKIP >>> solver1.id # doctest: +SKIP u'2000Q_ONLINE_SOLVER1' >>> solver2.id # doctest: +SKIP u'2000Q_ONLINE_SOLVER2' >>> # code that uses client >>> client.close() # doctest: +SKIP """ _LOGGER.debug("Looking for solver: %s", name) if name is None: if self.default_solver: name = self.default_solver else: # get the first appropriate solver try: return self.solvers()[0] except IndexError: raise SolverError("No solvers available this client can handle") with self._solvers_lock: if refresh or name not in self._solvers: try: response = self.session.get( posixpath.join(self.endpoint, 'solvers/remote/{}/'.format(name))) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError if response.status_code == 404: raise KeyError("No solver with the name {} was available".format(name)) response.raise_for_status() solver = Solver(self, data=response.json()) if solver.id != name: raise InvalidAPIResponseError( "Asked for solver named {!r}, got {!r}".format(name, solver.id)) self._solvers[name] = solver return self._solvers[name] def _submit(self, body, future): """Enqueue a problem for submission to the server. This method is thread safe. """ self._submission_queue.put(self._submit.Message(body, future)) _submit.Message = collections.namedtuple('Message', ['body', 'future']) def _do_submit_problems(self): """Pull problems from the submission queue and submit them. Note: This method is always run inside of a daemon thread. """ try: while True: # Pull as many problems as we can, block on the first one, # but once we have one problem, switch to non-blocking then # submit without blocking again. # `None` task is used to signal thread termination item = self._submission_queue.get() if item is None: break ready_problems = [item] while len(ready_problems) < self._SUBMIT_BATCH_SIZE: try: ready_problems.append(self._submission_queue.get_nowait()) except queue.Empty: break # Submit the problems _LOGGER.debug("Submitting %d problems", len(ready_problems)) body = '[' + ','.join(mess.body for mess in ready_problems) + ']' try: try: response = self.session.post(posixpath.join(self.endpoint, 'problems/'), body) except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() response.raise_for_status() message = response.json() _LOGGER.debug("Finished submitting %d problems", len(ready_problems)) except BaseException as exception: _LOGGER.debug("Submit failed for %d problems", len(ready_problems)) if not isinstance(exception, SolverAuthenticationError): exception = IOError(exception) for mess in ready_problems: mess.future._set_error(exception, sys.exc_info()) self._submission_queue.task_done() continue # Pass on the information for submission, res in zip(ready_problems, message): self._handle_problem_status(res, submission.future) self._submission_queue.task_done() # this is equivalent to a yield to scheduler in other threading libraries time.sleep(0) except BaseException as err: _LOGGER.exception(err) def _handle_problem_status(self, message, future): """Handle the results of a problem submission or results request. This method checks the status of the problem and puts it in the correct queue. Args: message (dict): Update message from the SAPI server wrt. this problem. future `Future`: future corresponding to the problem Note: This method is always run inside of a daemon thread. """ try: status = message['status'] _LOGGER.debug("Handling response for %s with status %s", message['id'], status) _LOGGER.trace("Handling response: %r", message) # The future may not have the ID set yet with future._single_cancel_lock: # This handles the case where cancel has been called on a future # before that future received the problem id if future._cancel_requested: if not future._cancel_sent and status == self.STATUS_PENDING: # The problem has been canceled but the status says its still in queue # try to cancel it self._cancel(message['id'], future) # If a cancel request could meaningfully be sent it has been now future._cancel_sent = True # Set the id field in the future future.id = message['id'] future.remote_status = status if not future.time_received and message.get('submitted_on'): future.time_received = parse_datetime(message['submitted_on']) if not future.time_solved and message.get('solved_on'): future.time_solved = parse_datetime(message['solved_on']) if not future.eta_min and message.get('earliest_estimated_completion'): future.eta_min = parse_datetime(message['earliest_estimated_completion']) if not future.eta_max and message.get('latest_estimated_completion'): future.eta_max = parse_datetime(message['latest_estimated_completion']) if status == self.STATUS_COMPLETE: # TODO: find a better way to differentiate between # `completed-on-submit` and `completed-on-poll`. # Loading should happen only once, not every time when response # doesn't contain 'answer'. # If the message is complete, forward it to the future object if 'answer' in message: future._set_message(message) # If the problem is complete, but we don't have the result data # put the problem in the queue for loading results. else: self._load(future) elif status in self.ANY_STATUS_ONGOING: # If the response is pending add it to the queue. self._poll(future) elif status == self.STATUS_CANCELLED: # If canceled return error future._set_error(CanceledFutureError()) else: # Return an error to the future object future._set_error(SolverFailureError(message.get('error_message', 'An unknown error has occurred.'))) except Exception as error: # If there were any unhandled errors we need to release the # lock in the future, otherwise deadlock occurs. future._set_error(error, sys.exc_info()) def _cancel(self, id_, future): """Enqueue a problem to be canceled. This method is thread safe. """ self._cancel_queue.put((id_, future)) def _do_cancel_problems(self): """Pull ids from the cancel queue and submit them. Note: This method is always run inside of a daemon thread. """ try: while True: # Pull as many problems as we can, block when none are available. # `None` task is used to signal thread termination item = self._cancel_queue.get() if item is None: break item_list = [item] while True: try: item_list.append(self._cancel_queue.get_nowait()) except queue.Empty: break # Submit the problems, attach the ids as a json list in the # body of the delete query. try: body = [item[0] for item in item_list] try: self.session.delete(posixpath.join(self.endpoint, 'problems/'), json=body) except requests.exceptions.Timeout: raise RequestTimeout except Exception as err: for _, future in item_list: if future is not None: future._set_error(err, sys.exc_info()) # Mark all the ids as processed regardless of success or failure. [self._cancel_queue.task_done() for _ in item_list] # this is equivalent to a yield to scheduler in other threading libraries time.sleep(0) except Exception as err: _LOGGER.exception(err) def _poll(self, future): """Enqueue a problem to poll the server for status.""" if future._poll_backoff is None: # on first poll, start with minimal back-off future._poll_backoff = self._POLL_BACKOFF_MIN # if we have ETA of results, schedule the first poll for then if future.eta_min: at = datetime_to_timestamp(future.eta_min) else: at = time.time() + future._poll_backoff else: # update exponential poll back-off, clipped to a range future._poll_backoff = \ max(self._POLL_BACKOFF_MIN, min(future._poll_backoff * 2, self._POLL_BACKOFF_MAX)) # for poll priority we use timestamp of next scheduled poll at = time.time() + future._poll_backoff now = utcnow() future_age = (now - future.time_created).total_seconds() _LOGGER.debug("Polling scheduled at %.2f with %.2f sec new back-off for: %s (age: %.2f sec)", at, future._poll_backoff, future.id, future_age) # don't enqueue for next poll if polling_timeout is exceeded by then future_age_on_next_poll = future_age + (at - datetime_to_timestamp(now)) if self.polling_timeout is not None and future_age_on_next_poll > self.polling_timeout: _LOGGER.debug("Polling timeout exceeded before next poll: %.2f sec > %.2f sec, aborting polling!", future_age_on_next_poll, self.polling_timeout) raise PollingTimeout self._poll_queue.put((at, future)) def _do_poll_problems(self): """Poll the server for the status of a set of problems. Note: This method is always run inside of a daemon thread.
<reponame>ralbuyeh-figure/saint import torch from torch import nn from models import SAINT from data_openml import data_prep_openml,task_dset_ids,DataSetCatCon import argparse from torch.utils.data import DataLoader import torch.optim as optim from utils import count_parameters, classification_scores, mean_sq_error from augmentations import embed_data_mask from augmentations import add_noise import os import numpy as np class FakeParser: pass # Driver's code opt = FakeParser() opt.dset_id = 1461 opt.task = "binary" opt.attentiontype = "colrow" opt.cont_embeddings = "MLP" opt.vision_dset = False opt.embedding_size = 32 opt.transformer_depth = 6 opt.attention_heads = 8 opt.attention_dropout = 0.1 opt.ff_dropout = 0.1 opt.attentiontype = "colrow" opt.optimizer = "AdamW" #parser = argparse.ArgumentParser() #parser.add_argument('--dset_id', required=True, type=int) #parser.add_argument('--vision_dset', action = 'store_true') #parser.add_argument('--task', required=True, type=str,choices = ['binary','multiclass','regression']) #parser.add_argument('--cont_embeddings', default='MLP', type=str,choices = ['MLP','Noemb','pos_singleMLP']) #parser.add_argument('--embedding_size', default=32, type=int) #parser.add_argument('--transformer_depth', default=6, type=int) #parser.add_argument('--attention_heads', default=8, type=int) #parser.add_argument('--attention_dropout', default=0.1, type=float) #parser.add_argument('--ff_dropout', default=0.1, type=float) #parser.add_argument('--attentiontype', default='colrow', type=str,choices = ['col','colrow','row','justmlp','attn','attnmlp']) #parser.add_argument('--optimizer', default='AdamW', type=str,choices = ['AdamW','Adam','SGD']) opt.scheduler = "cosine" #parser.add_argument('--scheduler', default='cosine', type=str,choices = ['cosine','linear']) opt.lr = 0.0001 #parser.add_argument('--lr', default=0.0001, type=float) opt.epochs = 100 #parser.add_argument('--epochs', default=100, type=int) opt.batchsize = 256 #parser.add_argument('--batchsize', default=256, type=int) opt.savemodelroot = "./bestmodels" #parser.add_argument('--savemodelroot', default='./bestmodels', type=str) opt.run_name = "testrun" #parser.add_argument('--run_name', default='testrun', type=str) opt.set_seed = 1 #parser.add_argument('--set_seed', default= 1 , type=int) opt.dset_seed = 5 #parser.add_argument('--dset_seed', default= 5 , type=int) opt.active_log = True #parser.add_argument('--active_log', action = 'store_true') opt.pretrain = True #parser.add_argument('--pretrain', action = 'store_true') opt.pretrain_epochs = 50 #parser.add_argument('--pretrain_epochs', default=50, type=int) opt.pt_tasks = ["contrastive", "denoising"] #parser.add_argument('--pt_tasks', default=['contrastive','denoising'], type=str,nargs='*',choices = ['contrastive','contrastive_sim','denoising']) opt.pt_aug = [] #parser.add_argument('--pt_aug', default=[], type=str,nargs='*',choices = ['mixup','cutmix']) opt.pt_aug_lam = 0.1 #parser.add_argument('--pt_aug_lam', default=0.1, type=float) opt.mixup_lam = 0.3 #parser.add_argument('--mixup_lam', default=0.3, type=float) opt.train_mask_prob = 0 #parser.add_argument('--train_mask_prob', default=0, type=float) opt.mask_prob = 0 #parser.add_argument('--mask_prob', default=0, type=float) opt.ssl_avail_y = 0 #parser.add_argument('--ssl_avail_y', default= 0, type=int) opt.pt_projhead_style = "diff" #parser.add_argument('--pt_projhead_style', default='diff', type=str,choices = ['diff','same','nohead']) opt.nce_temp = 0.7 #parser.add_argument('--nce_temp', default=0.7, type=float) opt.lam0 = 0.5 opt.lam1 = 10 opt.lam2 = 1 opt.lam3 = 10 opt.final_mlp_style = "sep" #parser.add_argument('--lam0', default=0.5, type=float) #parser.add_argument('--lam1', default=10, type=float) #parser.add_argument('--lam2', default=1, type=float) #parser.add_argument('--lam3', default=10, type=float) #parser.add_argument('--final_mlp_style', default='sep', type=str,choices = ['common','sep']) #opt = parser.parse_args() modelsave_path = os.path.join(os.getcwd(),opt.savemodelroot,opt.task,str(opt.dset_id),opt.run_name) if opt.task == 'regression': opt.dtask = 'reg' else: opt.dtask = 'clf' device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(f"Device is {device}.") torch.manual_seed(opt.set_seed) os.makedirs(modelsave_path, exist_ok=True) if opt.active_log: import wandb if opt.pretrain: wandb.init(project="saint_v2_all", group =opt.run_name ,name = f'pretrain_{opt.task}_{str(opt.attentiontype)}_{str(opt.dset_id)}_{str(opt.set_seed)}') else: if opt.task=='multiclass': wandb.init(project="saint_v2_all_kamal", group =opt.run_name ,name = f'{opt.task}_{str(opt.attentiontype)}_{str(opt.dset_id)}_{str(opt.set_seed)}') else: wandb.init(project="saint_v2_all", group =opt.run_name ,name = f'{opt.task}_{str(opt.attentiontype)}_{str(opt.dset_id)}_{str(opt.set_seed)}') #### gutting the data preprocessing import openml import numpy as np from sklearn.preprocessing import LabelEncoder import pandas as pd from torch.utils.data import Dataset ds_id = opt.dset_id seed = opt.dset_seed task = opt.task datasplit=[.65, .15, .2] np.random.seed(seed) dataset = openml.datasets.get_dataset(ds_id) X, y, categorical_indicator, attribute_names = dataset.get_data(dataset_format="dataframe", target=dataset.default_target_attribute) # x is a pandas dataframe with a bunch of features, mixed continuous and float # y is a pandas series with distinct caegories, '1' and '2' # categorical indicator is a list of booleans where the value corresponds to the column index # attribute names is like above but with names in place of boolean if ds_id == 42178: categorical_indicator = [True, False, True, True, False, True, True, True, True, True, True, True, True, True, True, True, True, False, False] tmp = [x if (x != ' ') else '0' for x in X['TotalCharges'].tolist()] X['TotalCharges'] = [float(i) for i in tmp] y = y[X.TotalCharges != 0] X = X[X.TotalCharges != 0] X.reset_index(drop=True, inplace=True) print(y.shape, X.shape) if ds_id in [42728, 42705, 42729, 42571]: # import ipdb; ipdb.set_trace() X, y = X[:50000], y[:50000] X.reset_index(drop=True, inplace=True) categorical_columns = X.columns[list(np.where(np.array(categorical_indicator) == True)[0])].tolist() # this just specifies the categorical column names cont_columns = list(set(X.columns.tolist()) - set(categorical_columns)) # this is the continuous columns, the disjoint of feature names where you remove the categorical columsn or whatever cat_idxs = list(np.where(np.array(categorical_indicator) == True)[0]) # indexes of categorical columns.. for some reason con_idxs = list(set(range(len(X.columns))) - set(cat_idxs)) # indexes of continuous columns for col in categorical_columns: X[col] = X[col].astype("object") # converting all the categoricals to type object X["Set"] = np.random.choice(["train", "valid", "test"], p=datasplit, size=(X.shape[0],)) # apply train test val flag train_indices = X[X.Set == "train"].index valid_indices = X[X.Set == "valid"].index test_indices = X[X.Set == "test"].index # int64 index of the corresponding indices to flag X = X.drop(columns=['Set']) # drop that flag column temp = X.fillna("MissingValue") # fillna as other... nan_mask = temp.ne("MissingValue").astype(int) # returns a dataframe of 1s where value is not missing value... so mostly a matrix of 1s. cat_dims = [] for col in categorical_columns: # X[col] = X[col].cat.add_categories("MissingValue") X[col] = X[col].fillna("MissingValue") l_enc = LabelEncoder() X[col] = l_enc.fit_transform(X[col].values) cat_dims.append(len(l_enc.classes_)) # apply arbitrary integer values to categorical columns... # cat dims is the number of distinct categories for each categorical column # watch out here, they're not really being mindful of leakage. for col in cont_columns: # X[col].fillna("MissingValue",inplace=True) X.fillna(X.loc[train_indices, col].mean(), inplace=True) # mean impute the continuous columns... that's bad because i don't see them doing any of that using the training params # on the val and test, we'll see y = y.values if task != 'regression': l_enc = LabelEncoder() y = l_enc.fit_transform(y) # label encoding the y vector to be 0s and 1s.. def data_split(X, y, nan_mask, indices): x_d = { 'data': X.values[indices], 'mask': nan_mask.values[indices] } if x_d['data'].shape != x_d['mask'].shape: raise 'Shape of data not same as that of nan mask!' y_d = { 'data': y[indices].reshape(-1, 1) } return x_d, y_d # above function returns x_d which is a dictionary of numpy array of x data values, and then the mask, and then # row filtered based on an index X_train, y_train = data_split(X, y, nan_mask, train_indices) X_valid, y_valid = data_split(X, y, nan_mask, valid_indices) X_test, y_test = data_split(X, y, nan_mask, test_indices) train_mean, train_std = np.array(X_train['data'][:, con_idxs], dtype=np.float32).mean(0), np.array( X_train['data'][:, con_idxs], dtype=np.float32).std(0) train_std = np.where(train_std < 1e-6, 1e-6, train_std) # import ipdb; ipdb.set_trace() ####DONE print('Downloading and processing the dataset, it might take some time.') #cat_dims, cat_idxs, con_idxs, X_train, y_train, X_valid, y_valid, X_test, y_test, train_mean, train_std = data_prep_openml(opt.dset_id, opt.dset_seed,opt.task, datasplit=[.65, .15, .2]) # I scrubbed the above line because I gutted it in the above section continuous_mean_std = np.array([train_mean,train_std]).astype(np.float32) ##### Setting some hyperparams based on inputs and dataset _,nfeat = X_train['data'].shape # this just gets the number of features in the x matrix if nfeat > 100: opt.embedding_size = min(8,opt.embedding_size) opt.batchsize = min(64, opt.batchsize) if opt.attentiontype != 'col': opt.transformer_depth = 1 opt.attention_heads = min(4,opt.attention_heads) opt.attention_dropout = 0.8 opt.embedding_size = min(32,opt.embedding_size) opt.ff_dropout = 0.8 print(nfeat,opt.batchsize) print(opt) if opt.active_log: wandb.config.update(opt) ##### gutting the datasetcatcon class # # class DataSetCatCon(Dataset): # # def __init__(self, X, Y, cat_cols, task='clf', continuous_mean_std=None): # # _X = X_train # # that data dict thing # _Y = y_train # # the y dict # _cat_cols = cat_idxs # # indices of categorical columns # _task = opt.dtask # # 'clf' in our case # _continuous_mean_std = continuous_mean_std # # _cat_cols = list(_cat_cols) # # redundant # _X_mask = _X['mask'].copy() # # getting the mask of that data dict # _X = _X['data'].copy() # # getting the X element of that data dict # _con_cols = list(set(np.arange(_X.shape[1])) - set(_cat_cols)) # # the continuous column indices # _X1 = _X[:, _cat_cols].copy().astype(np.int64) # categorical columns # # broken off categorical columns # _X2 = _X[:, _con_cols].copy().astype(np.float32) # numerical columns # # broken off numerical columns # # _X1_mask = _X_mask[:, _cat_cols].copy().astype(np.int64) # categorical columns # # broken off categorical missing value mask # # _X2_mask = _X_mask[:, _con_cols].copy().astype(np.int64) # numerical columns # # broken off numerical missing value mask # # if task == 'clf': # _y = _Y['data'] # .astype(np.float32) # else: # _y = _Y['data'].astype(np.float32) # # just grabbing that y vector # # _cls = np.zeros_like(_y, dtype=int) # # get a bunch of zeros in the same dimensionality as y vector # # _cls_mask = np.ones_like(_y, dtype=int) # # get a bunch of ones in same dimensionality as y vector # # # if continuous_mean_std is not None: # _mean, _std = continuous_mean_std # _X2 = (_X2 - _mean) / _std # z normalize only the continuous x columns # # # def __len__(self): # return len(self.y) # # # def __getitem__(self, idx): # # X1 has categorical data, X2 has continuous # return np.concatenate((self.cls[idx], self.X1[idx])), self.X2[idx], self.y[idx], np.concatenate( # (self.cls_mask[idx], self.X1_mask[idx])), self.X2_mask[idx] # note that they are not converting it to a torch tensor.. they must do it at some point... train_ds = DataSetCatCon(X_train, y_train, cat_idxs,opt.dtask,continuous_mean_std) trainloader = DataLoader(train_ds, batch_size=opt.batchsize, shuffle=True,num_workers=4) # a single element looks like this: # (array([0, 4, 1, 2, 0, 1, 0, 2, 8, 3]), # this one is like the categorical vector, with a leading zero appended to it. Andrew suggests it might be the token they mention in the paper # array([ 1.5994084 , 0.25852484, -1.2972844 , 0.01338016, -0.5672942 , # -0.41616383, -0.2361183 ], dtype=float32), # this one is the continuous element of the vector # array([0]), # this is the y-element of a vector # array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), # │broken off categorical missing value mask with a leading 1 appended to it. that's because that 0 element is not missing # array([1, 1, 1, 1, 1, 1, 1])) # broken off numerical missing value mask valid_ds = DataSetCatCon(X_valid, y_valid, cat_idxs,opt.dtask, continuous_mean_std) validloader = DataLoader(valid_ds, batch_size=opt.batchsize, shuffle=False,num_workers=4) test_ds = DataSetCatCon(X_test, y_test, cat_idxs,opt.dtask, continuous_mean_std) testloader = DataLoader(test_ds, batch_size=opt.batchsize, shuffle=False,num_workers=4) if opt.task == 'regression': y_dim = 1 else: y_dim = len(np.unique(y_train['data'][:,0])) cat_dims = np.append(np.array([1]),np.array(cat_dims)).astype(int) #Appending 1 for CLS token, this is later used to generate embeddings. # note in
<reponame>dacut/ionosphere-repository #!/usr/bin/env python3 """ Ionosphere repository build orchestration. """ from enum import Enum from logging import basicConfig as log_config, getLogger, DEBUG from os import link, lstat, mkdir, walk from os.path import abspath, exists, join as path_join, split as path_split from platform import machine, system as system_name from re import compile as re_compile from shutil import copy2, rmtree from sys import argv from tempfile import mkdtemp from threading import Condition, local from typing import ( Any, Callable, Dict, Iterable, List, NamedTuple, Optional, Sequence) from urllib.parse import unquote_plus as url_unquote_plus, urlparse import docker from docker.errors import BuildError, ContainerError from docker.models.images import Image import requests import yaml # pylint: disable=invalid-name,too-many-instance-attributes,too-many-arguments READ_BUFFER_SIZE = 1 << 20 LOG_STRIP_PATTERN = re_compile(r"^(?:[ \n]*\n)?((?:.|\n+.)*)(?:\n[ \n]*)?$") log = getLogger("ionosphere.repobuild") class Package: """ Data model for a software package. """ def __init__(self, name: str, version: str, download_url: str, dependencies: Dict[str, str]) -> None: """ Create a new Package. """ super(Package, self).__init__() self.name = name self.version = version self.download_url = download_url self.dependencies = dict(dependencies) @property def source_archive_name(self) -> str: """ The base filename of the source archive. """ return path_split( url_unquote_plus(urlparse(self.resolved_download_url).path))[-1] @property def resolved_download_url(self) -> str: """ The download_url resolved with variables replaced. """ arch = machine() return self.download_url.format( Arch=arch, Architecture=arch, Name=self.name, System=system_name(), Version=self.version) @classmethod def from_yaml_config(cls, config: Dict[str, Any]) -> "Package": name = config["Name"] version = config["Version"] download_url = config["URL"] dependencies: Dict[str, str] = config.get("Dependencies", {}) return cls(name=name, version=version, download_url=download_url, dependencies=dependencies) class PackageType(Enum): """ Type of package being produced (RPM or DEB). """ RPM = "rpm" DEB = "deb" class PlatformInfo(NamedTuple): """ Variables for a given platform to build for. """ name: str arch: str source_docker_image: str package_type: PackageType class Platform(Enum): """ Platforms we know how to build against. """ amzn1_x86_64 = PlatformInfo( "amzn1", "x86_64", "amazonlinux:1", PackageType.RPM) amzn2_x86_64 = PlatformInfo( "amzn2", "x86_64", "amazonlinux:2", PackageType.RPM) el7_x86_64 = PlatformInfo( "el7", "x86_64", "centos:7", PackageType.RPM) ubuntu_1604_x86_64 = PlatformInfo( "ubuntu-xenial", "amd64", "ubuntu:16.04", PackageType.DEB) ubuntu_1804_x86_64 = PlatformInfo( "ubuntu-bionic", "amd64", "ubuntu:18.04", PackageType.DEB) ubuntu_1810_x86_64 = PlatformInfo( "ubuntu-cosmic", "amd64", "ubuntu:18.10", PackageType.DEB) ubuntu_1904_x86_64 = PlatformInfo( "ubuntu-disco", "amd64", "ubuntu:19.04", PackageType.DEB) @property def os_name(self) -> str: """ The short OS name (amzn1, amzn2, el7, etc.); used in RPM suffixes. """ return self.value.name # pylint: disable=no-member @property def arch(self) -> str: """ The processor architecture; used in package names (OS-specific; notably RPM-based OSes use x86_64, while DEB-based OSes use amd64). """ return self.value.arch # pylint: disable=no-member @property def source_docker_image(self) -> str: """ The Docker image used for building. """ return self.value.source_docker_image # pylint: disable=no-member @property def package_type(self) -> PackageType: """ The type of packages (RPM, DEB) used by the OS. """ return self.value.package_type # pylint: disable=no-member @property def dockerfile_template(self) -> str: """ The name of the docker template used to build the Docker image. """ return f"{self.os_name}.dockerfile" class SourcePackageState(Enum): """ State of a source package download. """ InProgress = 1 Downloaded = 2 Failed = 3 class PackageBuild: """ Build orchestration for a single package. """ thread_local = local() source_state: Dict[str, SourcePackageState] = {} download_cond = Condition() def __init__( self, package: Package, platform: Platform, build_root: str, package_root: str, remove_build_dir: bool = True) -> None: """ Create a new PackageBuild instance. This also creates the temporary build directory used for creating the Docker image. """ super(PackageBuild, self).__init__() self.package = package self.platform = platform self.build_root = build_root self.package_root = package_root self.package_dir = path_join(package_root, package.name) self.build_dir = abspath(mkdtemp( prefix=(f'{package.name.replace("/", "-")}-' f'{package.version.replace("/", "-")}-{platform.name}-' f'{platform.arch}'), dir=build_root)) self.remove_build_dir = remove_build_dir self.staged = False self.image: Optional[Image] = None def __del__(self): """ Perform cleanup operations on the PackageBuild instance. If remove_build_dir is set, this will remove the build directory created during initialization. """ if self.remove_build_dir: rmtree(self.build_dir) @property def docker(self): """ A thread-local Docker client. """ try: result = PackageBuild.thread_local.docker except AttributeError: result = PackageBuild.thread_local.docker = \ docker.from_env(timeout=300) return result @property def source_archive_path(self) -> str: """ The full path to the downloaded source archive. """ return path_join(self.package_dir, self.package.source_archive_name) def download_source_package(self) -> bool: """ Download the package if it does not already exist. The return value indicates if a download was made. """ while True: with self.download_cond: state = self.source_state.get(self.package.name) if state is None or state == SourcePackageState.Failed: # No download in progress; do it. self.source_state[self.package.name] = \ SourcePackageState.InProgress break if state == SourcePackageState.Downloaded: # Already downloaded. return False # Wait until we get a notification about the download, then # try again. assert state == SourcePackageState.InProgress self.download_cond.wait() # If we broke out of the wait loop, we own downloading the package. try: if not exists(self.package_dir): mkdir(self.package_dir) log.debug("Downloading %s", self.package.resolved_download_url) with requests.get( self.package.resolved_download_url, stream=True) as req: with open(self.source_archive_path, "wb") as fd: for chunk in req.iter_content(chunk_size=READ_BUFFER_SIZE): fd.write(chunk) with self.download_cond: self.source_state[self.package.name] = ( SourcePackageState.Downloaded) return True except: # noqa with self.download_cond: self.source_state[self.package.name] = ( SourcePackageState.Failed) raise @property def staged_archive(self) -> str: """ The path to the staged source archive. """ return path_join(self.build_dir, self.package.source_archive_name) @property def staged_dockerfile(self) -> str: """ The path to the staged Dockerfile. """ return path_join(self.build_dir, "Dockerfile") @property def buildargs(self) -> Dict[str, str]: """ The build arguments to pass to Docker while building the image. """ return { "ARCH": self.platform.arch, "OS_NAME": self.platform.os_name, "PACKAGE": self.package.name, "REGION": "us-west-2", "REL": "0", "SOURCE_ARCHIVE": self.package.source_archive_name, "VERSION": self.package.version, } def stage_files(self) -> None: """ Copy files from the source directory to the staging directory. """ self.download_source_package() # Are the package directory and build directory on the same filesystem? # If so, just create hard-links to save on disk space and run faster. package_dev = lstat(self.package.name).st_dev build_dev = lstat(self.build_dir).st_dev if package_dev == build_dev: log.debug("Package directory and build directory reside on the " "same filesystem; using link to copy files") copy_function: Callable[[str, str], Any] = link else: log.debug("Package directory and build directory reside on " "different filesystems (%d vs %d); using copy2 to " "copy files", package_dev, build_dev) copy_function = copy2 # Copy the bits in the package directory so they're available to the # Docker builder. log.debug("Copying (recursively) %s to %s", self.package.name, self.build_dir) # We can't use copytree here -- it insists on build_dir not existing, # which is problematic for us. package_source = abspath(self.package.name) for source_dir, subdirs, filenames in walk(package_source): target_dir = path_join( self.build_dir, source_dir[len(package_source):].lstrip("/")) for filename in filenames: source_path = path_join(source_dir, filename) target_path = path_join(target_dir, filename) log.debug("Copying %s to %s", source_path, target_path) copy_function(source_path, target_path) for subdir in subdirs: target_path = path_join(target_dir, subdir) log.debug("Creating %s", target_path) mkdir(target_path) # Copy the package itself. log.debug("Copying %s to %s", self.source_archive_path, self.staged_archive) copy_function( abspath(self.source_archive_path), self.staged_archive) # Copy the Dockerfile. log.debug("Copying %s to %s", self.platform.dockerfile_template, self.staged_dockerfile) copy_function( abspath(self.platform.dockerfile_template), self.staged_dockerfile) self.staged = True def build(self) -> None: """ Perform the build. """ if not self.staged: raise ValueError("Files have not been staged for building") try: self.image, build_logs = self.docker.images.build( dockerfile=self.staged_dockerfile, path=self.build_dir, rm=True, buildargs=self.buildargs) except BuildError as e: log.error("Failed to build %s-%s (%s %s): %s", self.package.name, self.package.version, self.platform.os_name, self.platform.arch, e) self.handle_log_entries(e.build_log) raise self.handle_log_entries(build_logs) def export(self, dest_root: str) -> None: """ Export files from the build. """ if self.image is None: raise ValueError("Image has not been built") try: logs = self.docker.containers.run( self.image.id, volumes={dest_root: {"bind": "/export", "mode": "rw"}}, detach=False, stdout=True, stderr=True, remove=True) except ContainerError as e: log.error("Failed to export %s-%s (%s %s): %s", self.package.name, self.package.version, self.platform.os_name, self.platform.arch, e) stdout = getattr(e, "stdout", None) if stdout: m = LOG_STRIP_PATTERN.match(stdout.decode("utf-8")) assert m log.info(" %s", m.group(1)) stderr = getattr(e, "stderr", None) if stderr: m = LOG_STRIP_PATTERN.match(stderr.decode("utf-8")) assert m log.error(" %s", m.group(1)) raise log.debug("Build logs: %s", logs) @staticmethod def handle_log_entries(logs: Iterable[Dict[str, Any]]) -> Dict[str, Any]: """ Emit log entries to the log. If a log entry has a stream key, it is emitted as an INFO event. If a log entry has an errorDetail key, it is emitted as an ERROR event. If a log entry has an aux key, it is emitted as an INFO event and the dictionary contents are saved and returned. """ result: Dict[str, Any] = {} for log_entry in logs: if "stream" in log_entry: m = LOG_STRIP_PATTERN.match(log_entry["stream"]) assert m, "m failed to match %r" % log_entry["stream"] log.info(" %s", m.group(1)) if "errorDetail" in log_entry: m = LOG_STRIP_PATTERN.match(log_entry["errorDetail"]["message"]) assert m log.error(" %s", m.group(1)) if "aux" in log_entry: for key, value in log_entry["aux"].items(): result[key] = value log.info(" %s: %s", key, value) return
""" Record all instance methods in a 2 level dictionary so we can go through them and see which are overloaded { className: {methodName: [methodSpec, methodSpec, methodSpec]}} """ methodList = self.overloadedInstanceMethods.setdefault(methodSpec.name, []) methodList.append(methodSpec) def cullOverloadedMethods(self): """ Find all the entries that have multiple indexes for the same method name Get rid of all others. Do this for class methods and instance methods """ self.overloadedClassMethods = FFIOverload.cullOverloadedMethods(self.overloadedClassMethods) self.overloadedInstanceMethods = FFIOverload.cullOverloadedMethods(self.overloadedInstanceMethods) def filterOutStaticMethods(self): """ Run through the list of instance methods and filter out the ones that are static class methods. We can tell this because they do not have a this pointer in their arg list. Those methods that are static are then placed in a new staticMethods list and the ones that are left are stored back in the instanceMethods list. We are avoiding modifying the instanceMethods list in place while traversing it. Do not check upcast or downcast methods because we know they are not static. """ newInstanceMethods = [] for method in self.instanceMethods: if method.isStatic(): self.staticMethods.append(method) else: newInstanceMethods.append(method) self.instanceMethods = newInstanceMethods def recordOverloadedMethods(self): """ Record all the methods in dictionaries based on method name so we can see if they are overloaded """ classMethods = self.constructors + self.staticMethods if self.destructor: classMethods = classMethods + [self.destructor] for method in classMethods: self.recordClassMethod(method) instanceMethods = (self.instanceMethods + self.upcastMethods + self.downcastMethods) for method in instanceMethods: self.recordInstanceMethod(method) def hasMethodNamed(self, methodName): for method in (self.constructors + [self.destructor] + self.instanceMethods + self.upcastMethods + self.downcastMethods + self.staticMethods): if (method and (method.name == methodName)): return 1 return 0 def copyParentMethods(self, file, nesting): """ At multiple inheritance nodes, copy all the parent methods into this class and call them after upcasting us to that class """ if (len(self.parentTypes) >= 2 or \ (len(self.parentTypes) == 1 and self.hasMethodNamed('upcastTo' + self.parentTypes[0].foreignTypeName))): indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Upcast inherited instance method wrappers #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') for parentType in self.parentTypes: parentList = [parentType] self.copyParentMethodsRecursively(parentList, file, nesting) def inheritsMethodNamed(self, parentList, methodName): """ returns true if the named method is a method on this class, or on any parent class except the last one in the list. """ if self.hasMethodNamed(methodName): return 1 for pi in range(len(parentList) - 1): if parentList[pi].hasMethodNamed(methodName): return 1 return 0 def copyParentMethodsRecursively(self, parentList, file, nesting): """ Copy all the parents instance methods Do not copy functions if this class already has a function with that name We need to recurse up the hierarchy copying all our parents nodes all the way up the tree stopping either at the top, or at another MI node that has already copied his parent's methods in Note: Do not copy the downcast methods """ parent = parentList[-1] if (len(parent.parentTypes) > 0): recurse = 1 else: recurse = 0 for method in parent.instanceMethods: if not self.inheritsMethodNamed(parentList, method.name): # with downcast for all instance methods that are not themselves upcasts method.generateInheritedMethodCode(self, parentList, file, nesting, 1) # Also duplicate the overloaded method dispatch functions, if # we don't already have any matching methods by this name. for methodSpecList in parent.overloadedInstanceMethods.values(): if not self.inheritsMethodNamed(parentList, methodSpecList[0].name): treeColl = FFIOverload.FFIMethodArgumentTreeCollection(self, methodSpecList) treeColl.generateCode(file, nesting) # Copy all the parents upcast methods so we transitively pick them up for method in parent.upcastMethods: if not self.inheritsMethodNamed(parentList, method.name): # no downcast for all instance methods that are themselves upcasts # that would cause an infinite loop method.generateInheritedMethodCode(self, parentList, file, nesting, 0) # Now recurse up the hierarchy until we get to a node that is itself # a multiple inheritance node and stop there because he will have already # copied all his parent functions in if recurse: for parentType in parent.parentTypes: newParentList = parentList[:] newParentList.append(parentType) self.copyParentMethodsRecursively(newParentList, file, nesting) def generateOverloadedMethods(self, file, nesting): """ Generate code for all the overloaded methods of this class """ if (len(self.overloadedClassMethods.values()) or len(self.overloadedInstanceMethods.values())): indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Overloaded methods #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') # Overload all the class and instance methods for methodSpecList in (self.overloadedClassMethods.values() + self.overloadedInstanceMethods.values()): treeColl = FFIOverload.FFIMethodArgumentTreeCollection(self, methodSpecList) treeColl.generateCode(file, nesting) def generateGlobalCode(self, dir, extensionsDir): """ Generate shadow class code for this type. We make our own file form our foreignTypeName and put it in the dir passed in. """ fileName = self.foreignTypeName + '.py' fileName1 = self.foreignTypeName + '1.py' file = open(os.path.join(dir, fileName), 'w') indent(file, 0, FFIConstants.generatedHeader) self.outputBaseImports(file) self.generateCode1(file, 0, extensionsDir) file.close() file = open(os.path.join(dir, fileName1), 'w') indent(file, 0, FFIConstants.generatedHeader) #self.outputBaseImports(file) self.generateCode2(file, 0, extensionsDir, self.foreignTypeName) file.close() # Copy in any extensions we may have #self.copyExtensions(extensionsDir, file, 0) #self.outputClassFooter(file) file.close() def generateCode(self, file, nesting, extensionsDir=None): self.recordOverloadedMethods() self.cullOverloadedMethods() self.outputImports(file, nesting) self.outputClassHeader(file, nesting) self.outputClassComment(file, nesting) self.outputClassCModules(file, nesting) self.outputNestedTypes(file, nesting) indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Constructors #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') self.outputBaseConstructor(file, nesting) if self.constructors: for method in self.constructors: method.generateConstructorCode(self, file, nesting) else: self.outputEmptyConstructor(file, nesting) indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Destructor #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') self.outputBaseDestructor(file, nesting) if self.destructor: self.destructor.generateDestructorCode(self, file, nesting) # If you have no destructor, inherit one ########################## ## Extension methods moved up locally if extensionsDir: self.copyExtensions(extensionsDir, file, 0) ########################## ## import return types returnTypeModules = self.getReturnTypeModules() if len(returnTypeModules): for moduleName in returnTypeModules: indent(file, nesting, 'import ' + moduleName + '\n') ################################ if len(self.staticMethods): indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Static Methods #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') for method in self.staticMethods: method.generateStaticCode(self, file, nesting) if len(self.instanceMethods): indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Instance methods #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') for method in self.instanceMethods: method.generateMethodCode(self, file, nesting) if len(self.upcastMethods): indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Upcast methods #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') for method in self.upcastMethods: method.generateUpcastMethodCode(self, file, nesting) if len(self.downcastMethods): indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Downcast methods #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') for method in self.downcastMethods: method.generateDowncastMethodCode(self, file, nesting) # Copy in all our parent nodes (only does work if we are an MI node) self.copyParentMethods(file, nesting) self.generateOverloadedMethods(file, nesting) def generateCode1(self, file, nesting, extensionsDir=None): self.recordOverloadedMethods() self.cullOverloadedMethods() self.outputImports(file, nesting) self.outputClassHeader(file, nesting) self.outputClassComment(file, nesting) self.outputClassCModules(file, nesting) self.outputNestedTypes(file, nesting) indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Constructors #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') self.outputBaseConstructor(file, nesting) if self.constructors: for method in self.constructors: method.generateConstructorCode(self, file, nesting) else: self.outputEmptyConstructor(file, nesting) indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Destructor #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') self.outputBaseDestructor(file, nesting) if self.destructor: self.destructor.generateDestructorCode(self, file, nesting) # If you have no destructor, inherit one ########################## ## Extension methods moved up locally if extensionsDir: self.copyExtensions(extensionsDir, file, 0) def generateCode2(self, file, nesting, extensionsDir, file1module): indent(file, nesting, 'from ' + file1module + ' import *\n') ########################## ## import return types returnTypeModules = self.getReturnTypeModules() if len(returnTypeModules): for moduleName in returnTypeModules: indent(file, nesting, 'import ' + moduleName + '\n') ################################ if len(self.staticMethods): indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Static Methods #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') for method in self.staticMethods: method.generateStaticCode(self, file, nesting) if len(self.instanceMethods): indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Instance methods #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') for method in self.instanceMethods: method.generateMethodCode(self, file, nesting) if len(self.upcastMethods): indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Upcast methods #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') for method in self.upcastMethods: method.generateUpcastMethodCode(self, file, nesting) if len(self.downcastMethods): indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Downcast methods #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') for method in self.downcastMethods: method.generateDowncastMethodCode(self, file, nesting) # Copy in all our parent nodes (only does work if we are an MI node) self.copyParentMethods(file, nesting) self.generateOverloadedMethods(file, nesting) def outputNestedTypes(self, file, nesting): if (len(self.nestedTypes) > 0): indent(file, nesting+1, '\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '# Nested Types #\n') indent(file, nesting+1, '##################################################\n') indent(file, nesting+1, '\n') # Output code in this same file for all our nested types
k here #replay buffer order: state, action, state_action_grad, reward, reward_grad, next_state, done, param_grad # assert len(self.replay_memory_buffer)==self.horizon policy_gradient = [] # print(len(self.replay_memory_buffer)) for index, value in enumerate(self.replay_memory_buffer): #index from 0 to horizon -1 state_action_grad = value[2] reward_grad = value[4] qp_param_grad = value[7] # print("grad",reward_grad @ state_action_grad @ qp_param_grad) policy_gradient.append( reward_grad @ state_action_grad @ qp_param_grad ) # print("policy gradient",policy_gradient[-1]) policy_gradient = np.asarray(policy_gradient) policy_gradient = np.sum(policy_gradient,axis = 0) # print(policy_gradient) print(f"alpha_nominal:{self.actor.alpha_nominal}, k:{self.actor.k_nominal}") self.actor.alpha_nominal = self.actor.alpha_nominal + self.beta*policy_gradient[0] self.actor.k_nominal = self.actor.k_nominal + self.beta*policy_gradient[1] # print("beta",self.beta) # clipping > 0 if self.actor.alpha_nominal < 0: self.actor.alpha_nominal = 0 if self.actor.k_nominal < 0: self.actor.k_nominal = 0 # print(f"policy_gradient:{policy_gradient}, alpha_nominal:{self.actor.alpha_nominal}, k_nominal:{self.actor.k_nominal}") def learn_and_update_weights_by_multiple_shooting(self): # Update alpha and k here #replay buffer order: state, action, state_action_grad, reward, reward_grad, next_state, done, param_grad # assert len(self.replay_memory_buffer)==self.horizon policy_gradient = [] # print(len(self.replay_memory_buffer)) for index, value in enumerate(self.replay_memory_buffer): #index from 0 to horizon -1 state_action_grad = value[2] reward_grad = value[4] qp_param_grad = value[7] # print("grad",reward_grad @ state_action_grad @ qp_param_grad) policy_gradient.append( reward_grad @ state_action_grad @ qp_param_grad ) # print("policy gradient",policy_gradient[-1]) policy_gradient = np.asarray(policy_gradient) policy_gradient = np.sum(policy_gradient,axis = 0) # print(policy_gradient) # print(f"alpha_nominal:{self.actor.alpha_nominal}, k:{self.actor.k_nominal}") self.actor.alpha1_nominal = self.actor.alpha1_nominal + self.beta*policy_gradient[0] self.actor.alpha2_nominal = self.actor.alpha2_nominal + self.beta*policy_gradient[1] self.actor.alpha3_nominal = self.actor.alpha3_nominal + self.beta*policy_gradient[2] self.actor.k_nominal = self.actor.k_nominal + self.beta*policy_gradient[3] # print("beta",self.beta) # clipping > 0 if self.actor.alpha1_nominal < 0: self.actor.alpha1_nominal = 0 if self.actor.alpha2_nominal < 0: self.actor.alpha2_nominal = 0 if self.actor.alpha3_nominal < 0: self.actor.alpha3_nominal = 0 if self.actor.k_nominal < 0: self.actor.k_nominal = 0 # print(f"policy_gradient:{policy_gradient}, alpha1_nom:{self.actor.alpha_nominal}, alpha2_nom:{self.actor.alpha2_nominal}, alpha3_nom:{self.actor.alpha3_nominal} k_nominal:{self.actor.k_nominal}") def policy(self,follower,target): solved, U, param_grad, delta = self.actor.policy(follower,target) return solved, U, param_grad, delta def train(args): # Exploration Parameters epsilon = 0.4 epsilon_decay = 0.999 epsilon_min = 0.05 epsilon2 = 0.4 epsilon2_decay = 0.999 epsilon2_min = 0.1#0.05 timestep_list = [] avg_timestep_list = [] reward_list = [] dt = 0.01 for ep in range(args.total_episodes): plt.ion() reward_episode = [] if ep % args.plot_freq ==0: fig = plt.figure() ax = plt.axes(xlim=(0,10),ylim=(-5,5)) lines, = ax.plot([],[],'o-') areas, = ax.fill([],[],'r',alpha=0.1) bodyF = ax.scatter([],[],c='r',s=10) bodyT = ax.scatter([],[],c='g',s=10) ax.set_xlabel("X") ax.set_ylabel("Y") ax.set_aspect(1) if args.movie==True: frames = [] # for storing the generated images # state = env.reset() episodic_reward = 0 timestep = 0 max_action = np.array([4,4]).reshape(-1,1) min_action = np.array([-4,-4]).reshape(-1,1) agent = policy_learning_agent(gamma = args.gamma, #discount factor lr_actor = args.lr_actor, #actor learning rate batch_size = args.batch_size, buffer_capacity = args.buffer_capacity, alpha = args.alpha,#2.0, k = args.k, #0.1, beta = args.lr_actor, max_action=max_action, min_action=min_action) agentF = Unicycle2D(np.array([0,0.2,0]),dt,3,FoV,max_D,min_D) agentT = SingleIntegrator(np.array([1,0]),dt,ax,0) TX_prev = agentT.X FX_prev = agentF.X TX = agentT.X FX = agentF.X state = np.array([FX[0,0],FX[1,0],FX[2,0],TX[0,0],TX[1,0]]) N = 3 GP_list = build_GP_model(N) gp_state = np.array([FX[2,0]])/np.pi*10.0 t = 0 t_plot = [] obs_d1 = [] obs_d2 = [] obs_d3 = [] true_d1 = [] true_d2 = [] true_d3 = [] pred_d1 = [] pred_d2 = [] pred_d3 = [] replay_buffer_gp_x = []#deque(maxlen = batch_size) replay_buffer_d1 = []#deque(maxlen = batch_size) replay_buffer_d2 = []#deque(maxlen = batch_size) replay_buffer_d3 = []#deque(maxlen = batch_size) reward_horizon = 0 reward_moving_avg = [] alphas = [] alpha1s = [] alpha2s = [] alpha3s = [] ks = [] deltas = [] h1s = [] h2s = [] h3s = [] TXs = [] actions = [] metadata = dict(title='Movie Test', artist='Matplotlib',comment='Movie support!') writer = FFMpegWriter(fps=15, metadata=metadata) with writer.saving(fig, args.movie_name, 100): # if 1: for st in range(args.max_steps): # for each step, move 10 times?? # re-initialize everything if horizon reached if st % args.horizon == 0: reward_horizon = 0 # agent.replay_memory_buffer = deque(maxlen = args.buffer_capacity) epsilon = epsilon*epsilon_decay if epsilon<epsilon_min: epsilon = epsilon_min epsilon2 = epsilon2*epsilon2_decay if epsilon2<epsilon2_min: epsilon2 = epsilon2_min # uL = 0.1 # vL = 0.2*np.sin(np.pi*t/5) # 0.1 # uL = 0.5 # vL = 2.6*np.sin(np.pi*t) # 0.1 # 1.2 uL = 1.0 vL = 12*np.sin(np.pi*t*4) # 0.1 # 1.2 # Exploration vs Exploitation rand_sample = np.random.random() if 0:#and_sample<epsilon: found_valid = False action = np.array([0,0]).reshape(-1,1) param_grad = np.array([[0,0],[0,0]]) solved = False else: solved, action, param_grad, delta = agent.policy(agentF,agentT) if solved==False: print("************ ERROR: problem infeasible **************") break; # exit() # Predict GP dynamics if t==0: d1 = 0; d2 = 0; d3 = 0 else: d, d_std = predict_GP_dynamics( GP_list,N,agentF.X[2,0] ) d1 = d[0]; d2 = d[1]; d3 = d[2] replay_buffer_gp_x.append(agentF.X[2,0]) # pred_d1.append(d1); pred_d2.append(d2); pred_d3.append(d3) ## actual # agentF.f_corrected = np.array([agentF.d1*np.cos(agentF.X[2][0]),agentF.d2*np.sin(agentF.X[2][0]),agentF.d3]).reshape(-1,1) ## predicted agentF.f_corrected = np.array([d1*np.cos(agentF.X[2][0]),d2*np.sin(agentF.X[2][0]),d3]).reshape(-1,1) ## ignored # agentF.f_corrected = np.array([0*np.cos(agentF.X[2][0]),0*np.sin(agentF.X[2][0]),0]).reshape(-1,1) # Propagate state state_action_grad = agentF.g + agentF.g_corrected U_L = np.array([uL,vL]).reshape(-1,1) FX = agentF.step(action.reshape(-1,1)) TX = agentT.step(uL,vL) # get observed disturbance if (np.abs(np.cos(FX_prev[2,0]))>0.01): d1_obs = ( (FX[0,0]-FX_prev[0,0])/dt - action[0,0]*np.cos(FX_prev[2,0]) )/np.cos(FX_prev[2,0]) else: d1_obs = 0 if (np.abs(np.sin(FX_prev[2,0]))>0.01): d2_obs = ( (FX[1,0]-FX_prev[1,0])/dt - action[0,0]*np.sin(FX_prev[2,0]) )/np.sin(FX_prev[2,0]) else: d2_obs = 0 d3_obs = wrap_angle(FX[2,0]-FX_prev[2,0])/dt - action[1,0] obs_d1.append(d1_obs); obs_d2.append(d2_obs); obs_d3.append(d3_obs) replay_buffer_d1.append(d1_obs) replay_buffer_d2.append(d2_obs) replay_buffer_d3.append(d3_obs) # s = time.time() update_GP_dynamics(GP_list,replay_buffer_gp_x, replay_buffer_d1, 0, replay_buffer_gp_x[-1]) update_GP_dynamics(GP_list,replay_buffer_gp_x, replay_buffer_d2, 1, replay_buffer_gp_x[-1]) update_GP_dynamics(GP_list,replay_buffer_gp_x, replay_buffer_d3, 2, replay_buffer_gp_x[-1]) # print("update time",time.time()-s) # Compute reward # print(f"fx:{agentF.X}, tx:{agentT.X}") reward, reward_grad, h1, h2, h3 = agentF.compute_reward(agentT) reward_horizon += reward reward_episode.append(reward) reward_prev = reward episodic_reward += reward # add to buffer next_state = np.array([FX[0,0],FX[1,0],FX[2,0],TX[0,0],TX[1,0]]) # animation plot if ep % args.plot_freq ==0: lines, areas, bodyF = agentF.render(lines,areas,bodyF) bodyT = agentT.render(bodyT) fig.canvas.draw() fig.canvas.flush_events() if args.movie: writer.grab_frame() if reward<0: done = True else: done = False agent.add_to_replay_memory(state, action, state_action_grad, reward, reward_grad, next_state, done, param_grad) # Update loop variables state = next_state timestep += 1 TX_prev = TX FX_prev = FX t += dt alphas.append(agent.actor.alpha_nominal) alpha1s.append(agent.actor.alpha1_nominal) alpha2s.append(agent.actor.alpha2_nominal) alpha3s.append(agent.actor.alpha3_nominal) ks.append(agent.actor.k_nominal) t_plot.append(t) deltas.append(delta) h1s.append(h1) h2s.append(h2) h3s.append(h3) TXs.append(TX) actions.append(action) if done: print("Became Unsafe") break if (st+1) % args.horizon == 0 and st>=(args.buffer_capacity-1) and args.train==True: # print(len(agent.replay_memory_buffer)) reward_moving_avg.append(reward_horizon) agent.learn_and_update_weights_by_multiple_shooting() # print(reward_episode) # print(reward_moving_avg) return reward_episode, reward_moving_avg, alphas, ks, t_plot, deltas, h1s, h2s, h3s, TXs, actions, alpha1s, alpha2s, alpha3s # print("became unsafe") # reward_list.append(reward_episode) # print("Episode Reward: ",episodic_reward) # plt.ioff() # plt.figure() # plt.plot(reward_moving_avg) # plt.title("moving reward") # plt.figure() # plt.plot(reward_episode) # plt.title("reward") # plt.show() import argparse parser = argparse.ArgumentParser(description='td3') parser.add_argument('--env-name', default="BipedalWalkerHardcore-v3") parser.add_argument('--rl-name', default="td3") parser.add_argument('--gamma', type=float, default=0.99,metavar='G',help='discounted factor') parser.add_argument('--tau', type=float, default=0.01, metavar='G',help='target smoothing coefficient(τ)') parser.add_argument('--lr_actor', type=float, default=0.03, metavar='G',help='learning rate of actor') #0.003 parser.add_argument('--lr-critic', type=float, default=0.03, metavar='G',help='learning rate of critic') #0.003 parser.add_argument('--plot_freq', type=float, default=1, metavar='G',help='plotting frequency') parser.add_argument('--seed', type=int, default=123456, metavar='N',help='random seed (default: 123456)') parser.add_argument('--batch-size', type=int, default=10, metavar='N', help='batch size (default: 256)') #100 parser.add_argument('--buffer-capacity', type=int, default=20, metavar='N', help='buffer_capacity') #10 parser.add_argument('--max-steps', type=int, default=200, metavar='N',help='maximum number of steps of each episode') #70 parser.add_argument('--total-episodes', type=int, default=1, metavar='N',help='total training episodes') #1000 parser.add_argument('--policy-freq', type=int, default=500, metavar='N',help='update frequency of target network ') parser.add_argument('--start-timestep', type=int, default=10000, metavar='N',help='number of steps using random policy') parser.add_argument('--horizon', type=int, default=2, metavar='N',help='RL time horizon') #3 parser.add_argument('--alpha', type=float, default=0.15, metavar='G',help='CBF parameter') #0.003 parser.add_argument('--k', type=float, default=0.1, metavar='G',help='CLF parameter') #0.003 parser.add_argument('--train', type=float, default=True, metavar='G',help='CLF parameter') #0.003 parser.add_argument('--movie', type=float, default=True, metavar='G',help='CLF parameter') #0.003 parser.add_argument('--movie_name', default="test_temp.mp4") args = parser.parse_args("") Alphas = [0.15]#[0.0] #0.15 #0.115 Ks = [0.1] #0.1 #2.0 Trains = [True, False] # Betas = [-0.5,-0.2, -0.05, -0.03, 0, 0.03, 0.05, 0.2, 0.5] # Betas = [0.4, 0] Betas = [0.8, 0.0] # Betas = [0.2, 0.4, 0.6, 0.8] movie_names = ['Adaptive.mp4','Non-Adaptive.mp4'] reward_episodes = [] reward_horizons = [] # plt.figure() figure1, axis1 = plt.subplots(2, 2) # plt.figure() figure2, axis2 = plt.subplots(1, 1) # plt.figure() figure3, axis3 = plt.subplots(1, 1) figure4, axis4 = plt.subplots(2, 1) figure5, axis5 = plt.subplots(2, 1) colors = ['purple','maroon','red','salmon','k','yellow','yellowgreen','darkgreen','teal'] colors = ['r','g','b','k'] colors2 = colors.copy() colors2.reverse() index = 0 for Alpha in Alphas: for K in Ks: for Beta in Betas: args.alpha = Alpha args.k = K args.lr_actor = Beta args.train = True # args.movie_name = movie_names[index] if index==0: name = 'Adaptive Parameter' else: name = 'Constant parameter' # name = 'test' episode_reward, moving_reward, alphas, ks, t_plot, deltas, h1s, h2s, h3s, TXs, actions, alpha1s, alpha2s, alpha3s = train(args) # Reward Plot axis2.plot(t_plot,episode_reward,c = colors[index],label = name) # Parameter Plot axis1[0,0].plot(t_plot,alpha1s,c = colors[index],label = name) axis1[1,0].plot(t_plot,alpha2s,c = colors[index],label = name) axis1[0,1].plot(t_plot,alpha3s,c = colors[index],label = name) axis1[1,1].plot(t_plot,ks,c = colors[index],label = name) # axis2[1].plot(t_plot,deltas,c = 'r',label='slack') if index == 0: mark = '--' if index == 1: mark = '.' # Barrier Function Plots axis3.plot(t_plot,h1s,('g' + mark),label = 'h1 '+ name) # style = colors[index]+'.' # print(style) axis3.plot(t_plot,h2s,('b'+mark),label = 'h2 ' + name) axis3.plot(t_plot,h3s,('k'+mark),label = 'h3 ' + name) # Target Movement Plot axis4[0].plot(t_plot,[x[0] for x in TXs],c = 'r',label='X') axis4[1].plot(t_plot,[x[1] for x in TXs],c = 'g',label='Y') #
<reponame>AlanConstantino/scuffed-space-cadet import pygame, sys, random, time # helper function that calls kill method on entities passed def kill_all(entities): for entity in entities: entity.kill() # initialize pygame.init() pygame.font.init() pygame.mixer.init() # Window title pygame.display.set_caption('Scuffed Space Cadet') # resolution SCREEN_WIDTH = 800 SCREEN_HEIGHT = 1080 SCREEN = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) # sound volume MASTER_VOLUME = 0.5 SFX_VOLUME = 0.5 BGM_VOLUME = 0.8 # seconds till a new enemy spawns SECONDS_ENEMY_SPAWN = 3 before = time.time() # for delta time calculations prev_time = time.time() dt = 0 # FPS clock = pygame.time.Clock() FPS = 60 # assets ENEMIES = [ './assets/images/enemy1.png', # red enemy './assets/images/enemy2.png', # white enemy './assets/images/ship7.png', # green enemy ] PLAYER_IMG = './assets/images/player.png' BULLET_IMG = './assets/images/bullet2.png' ALIEN_IMG = './assets/images/alien.png' # alien boss ship # Sound FX Path BGM = './assets/sound/music/ror1-monsoon.wav' ALIEN_HOVERING_SOUND = './assets/sound/sfx/alien-ship-engine.ogg' SCORE_SOUND = './assets/sound/sfx/bonus.wav' DEATH_SOUND = './assets/sound/sfx/death.wav' EXPLOSION_SOUND = './assets/sound/sfx/explosion.ogg' BOSS_GUN_SOUND = './assets/sound/sfx/laserLarge_001.ogg' PLAYER_GUN_SOUND = './assets/sound/sfx/laserSmall2.ogg' ENEMY_GUN_SOUND = './assets/sound/sfx/laserSmall4.ogg' WIN_SOUND = './assets/sound/sfx/win.wav' # audio channels pygame.mixer.set_reserved(1) channel1 = pygame.mixer.Channel(0) # boss ship engine sound channel2 = pygame.mixer.Channel(1) # unused channel3 = pygame.mixer.Channel(2) # unused channel4 = pygame.mixer.Channel(3) # unused channel5 = pygame.mixer.Channel(4) # unused channel6 = pygame.mixer.Channel(5) # unused channel7 = pygame.mixer.Channel(6) # unused channel8 = pygame.mixer.Channel(7) # unused # BGM music pygame.mixer.music.load(BGM) pygame.mixer.music.play(-1) pygame.mixer.music.set_volume(MASTER_VOLUME * BGM_VOLUME) # the player is the spaceship class Spaceship(pygame.sprite.Sprite): def __init__(self, picture_path, speed = 550): super().__init__() self.prev_speed = 0 self.is_invincible = False # boolean, player is either invincible or he is not self.last_invincible = 0 # last time the player went invincible self.seconds_invincible = 2 # how long invincibility will last self.speed = speed self.image = pygame.transform.scale(pygame.image.load(picture_path).convert(), (50, 50)) # size of image self.rect = self.image.get_rect() # size of hurtbox self.rect.center = [(SCREEN_WIDTH * 0.5), (SCREEN_HEIGHT * 0.9)] # ship's spawn point self.lives = 4 self.score = 0 self.is_alive = True self.has_won = False self.has_lost = False self.last_shot = time.time() self.shot_delay = 0.03 self.damage = 10 self.sounds = { 'explode': { 'has_played': False, 'sound': pygame.mixer.Sound(EXPLOSION_SOUND), }, 'shoot': { 'has_played': False, 'sound': pygame.mixer.Sound(PLAYER_GUN_SOUND), }, 'score': { 'has_played': False, 'sound': pygame.mixer.Sound(SCORE_SOUND) }, 'death': { 'has_played': False, 'sound': pygame.mixer.Sound(DEATH_SOUND), }, 'win': { 'has_played': False, 'sound': pygame.mixer.Sound(WIN_SOUND), }, } # setting volume level for sounds for data in self.sounds: self.sounds[data]['sound'].set_volume(MASTER_VOLUME * SFX_VOLUME) def shoot(self): bullet = Bullet(BULLET_IMG, self, 1800, 'up', (self.rect.width * 0.5), 0) # what the bullet looks like ALL_SPRITES.add(bullet) # adding bullet to ALL_SPRITES group player_bullet_group.add(bullet) # added bullet to bullet group def start_invincibility(self): self.is_invincible = True self.last_invincible = time.time() def stop_invincibility(): self.is_invincible = False self.last_invincible = 0 def update(self, dt): pressed_keys = pygame.key.get_pressed() time_to_remove_i_frames = (time.time() - self.last_invincible) >= self.seconds_invincible # removes player's invincibility if self.is_invincible and time_to_remove_i_frames: self.is_invincible = False # self.last_invincible = time.time() # move up if self.rect.top > 0: if pressed_keys[pygame.K_UP] or pressed_keys[pygame.K_w]: self.rect.y -= self.speed * dt # updates original y value of rect # self.rect.move_ip(0, -self.speed * dt) # modifies original rect by moving it in place # self.rect = self.rect.move(0, -self.speed * dt) # creates a new rect and reassigns old rect to newly created rect # move down if self.rect.bottom < SCREEN_HEIGHT: if pressed_keys[pygame.K_DOWN] or pressed_keys[pygame.K_s]: self.rect.y += self.speed * dt # self.rect.move_ip(0, self.speed * dt) # self.rect = self.rect.move(0, self.speed * dt) # move left if self.rect.left > 0: if pressed_keys[pygame.K_LEFT] or pressed_keys[pygame.K_a]: self.rect.x -= self.speed * dt # self.rect.move_ip(-self.speed * dt, 0) # self.rect = self.rect.move(-self.speed * dt, 0) # move right if self.rect.right < SCREEN_WIDTH: if pressed_keys[pygame.K_RIGHT] or pressed_keys[pygame.K_d]: self.rect.x += self.speed * dt # self.rect.move_ip(self.speed * dt, 0) # self.rect = self.rect.move(self.speed * dt, 0) # shoot when space is pressed if pressed_keys[pygame.K_SPACE]: now = time.time() time_to_shoot = (now - self.last_shot) >= self.shot_delay if time_to_shoot: channel1.play(self.sounds['shoot']['sound'], 0) self.sounds['shoot']['has_played'] = True self.shoot() self.last_shot = now def draw(self, surface): surface.blit(self.image, self.rect) class Bullet(pygame.sprite.Sprite): def __init__(self, picture_path, entity, speed = 800, direction = 'up', x_off = 0, y_off = 0): super().__init__() self.prev_speed = 0 self.speed = speed self.image = pygame.transform.scale(pygame.image.load(picture_path).convert(), (5, 20)) self.rect = self.image.get_rect() self.rect.center = [entity.rect.x + x_off, entity.rect.y + y_off] self.direction = direction def update(self, dt): # shoot bullet upwards if self.direction == 'up': self.rect.move_ip(0, -self.speed * dt) # shoot bullet downwards if self.direction == 'down': self.rect.move_ip(0, self.speed * dt) # remove bullet if top of screen is reached # player can only shoot upward if self.rect.y < 0: self.kill() # remove bullet if bottom of screen is reached # enemies can only shoot downward if self.rect.y > SCREEN_HEIGHT: self.kill() def draw(self, surface): surface.blit(self.image, self.rect) class Enemyship(pygame.sprite.Sprite): def __init__(self, picture_path): super().__init__() self.prev_speed = 0 self.speed = random.randint(200, 450) self.ammo = random.randint(1, 6) self.image = pygame.transform.scale(pygame.transform.flip(pygame.image.load(picture_path).convert(), False, True), (60, 60)) # image self.rect = self.image.get_rect(w = 60, h = 30) # hurtbox self.rect.center = [random.randint(50, SCREEN_WIDTH - 50), -10] # ship's spawn point self.last_shot = time.time() self.shot_delay = 0.2 self.sounds = { 'shoot': pygame.mixer.Sound(ENEMY_GUN_SOUND), } for data in self.sounds: self.sounds[data].set_volume(MASTER_VOLUME * SFX_VOLUME) def shoot(self): now = time.time() time_to_shoot = (now - self.last_shot) >= self.shot_delay if time_to_shoot: self.sounds['shoot'].play() new_bullet = Bullet(BULLET_IMG, self, 600, 'down', (self.rect.width * 0.5), (self.rect.height * 0.5) + 50) enemy_bullet_group.add(new_bullet) ALL_SPRITES.add(new_bullet) self.ammo -= 1 self.last_shot = time.time() def update(self, dt): self.rect.move_ip(0, self.speed * dt) if self.ammo > 0: self.shoot() if self.rect.bottom > SCREEN_HEIGHT: self.rect.x = random.randint(0, SCREEN_WIDTH - 50) self.rect.y = 0 self.kill() def draw(self, surface): surface.blit(self.image, self.rect) class Boss(pygame.sprite.Sprite): def __init__(self, picture_path, speed = 300, ammo = random.randint(10, 20)): super().__init__() self.prev_speed = 0 self.speed = speed self.ammo = ammo self.image = pygame.transform.scale(pygame.image.load(picture_path).convert(), (300, 125)) # image self.rect = self.image.get_rect() # hurtbox self.rect.center = [SCREEN_WIDTH * 0.5, 150] # ship's spawn point self.health = 10000 self.is_alive = False self.last_shot = time.time() self.shot_delay = 0.5 self.sounds = { 'shoot': pygame.mixer.Sound(BOSS_GUN_SOUND), } for data in self.sounds: self.sounds[data].set_volume(MASTER_VOLUME * SFX_VOLUME) def shoot(self): rand_fire = random.randint(5, 10) rand_operation = random.randint(0, 1) operations = ['add', 'subtract'] for i in range(rand_fire): fire = 0 if operations[rand_operation] == 'add': x_rand = (self.rect.width * 0.5) + random.randint(0, 100) y_rand = random.randint(int(self.rect.height * 0.5), self.rect.height) fire = Bullet(BULLET_IMG, self, random.randint(800, 1500), 'down', x_rand, y_rand) boss_bullet_group.add(fire) if operations[rand_operation] == 'subtract': x_rand = (self.rect.width * 0.5) - random.randint(0, 100) y_rand = random.randint(int(self.rect.height * 0.5), self.rect.height) fire = Bullet(BULLET_IMG, self, random.randint(800, 1500), 'down', x_rand, y_rand) boss_bullet_group.add(fire) ALL_SPRITES.add(fire) self.sounds['shoot'].play() def update(self, dt): self.rect.move_ip(self.speed * dt, 0) time_to_shoot = (time.time() - self.last_shot) >= self.shot_delay if time_to_shoot: self.shoot() self.last_shot = time.time() # bounce off left wall if self.rect.left >= 0: self.speed *= -1 # bounce off right wall if self.rect.right <= SCREEN_WIDTH: self.speed *= -1 def draw(self, surface): surface.blit(self.image, self.rect) # Class wrapper for writing Text HUDs class TextHUD: def __init__(self, size = 32, text = '', font = 'freesansbold.ttf', pos = (0, 0), color1 = (255, 255, 255), color2 = None): self.size = size self.font = pygame.font.Font(font, size) self.text = self.font.render(str(text), True, color1, color2) self.pos = (abs(pos[0] - size), abs(pos[1] - size)) self.show = True def set_visibility(self, visibility): self.show = visibility def update(self, text, anti_alias = True, color1 = (255, 255, 255), color2 = None): if self.show == True: self.text = self.font.render('{}'.format(str(text)), True, color1, color2) def draw(self, surface): if self.show == True: surface.blit(self.text, self.pos) # to store all sprites for easy manipulation ALL_SPRITES = pygame.sprite.Group() # Groups player_group = pygame.sprite.GroupSingle() # player boss_group = pygame.sprite.GroupSingle() # boss enemy_group = pygame.sprite.Group() # enemies player_bullet_group = pygame.sprite.Group() # player bullets boss_bullet_group = pygame.sprite.Group() # boss bullets enemy_bullet_group = pygame.sprite.Group() # enemy bullets # Player player = Spaceship(PLAYER_IMG) player_group.add(player) ALL_SPRITES.add(player) # Alien Boss boss = Boss(ALIEN_IMG) spawn_regular_enemies = True SPAWN_BOSS_SCORE = 10000 # boss sound boss_sound = pygame.mixer.Sound(ALIEN_HOVERING_SOUND) channel1.play(boss_sound, -1) channel1.pause() # HUD center = (int(SCREEN_WIDTH * 0.5), int(SCREEN_HEIGHT * 0.5)) lives_HUD = TextHUD(text = 'x{}'.format(player.lives), pos = (SCREEN_WIDTH - 20, 50)) score_HUD = TextHUD(text = 'Score: {}'.format(0), pos = (45, 50)) boss_health_HUD = TextHUD(text = '{:,}/10,000'.format(10000), pos = (SCREEN_WIDTH * 0.5 - 60, 50)) # game state text to be displayed on screen at a later time win_text = TextHUD(text = 'YOU WIN', pos = center) game_over_text = TextHUD(text = 'YOU DIED', pos = center, color1 = (220, 20, 60)) paused_text = TextHUD(text = 'PAUSED', pos = center, color1 = (255, 255, 255)) # container for player and boss hud ALL_HUDS = [] ALL_HUDS.append(lives_HUD) ALL_HUDS.append(score_HUD) ALL_HUDS.append(boss_health_HUD) # container for text screens ALL_TEXT = [] ALL_TEXT.append(win_text) ALL_TEXT.append(game_over_text) ALL_TEXT.append(paused_text) PAUSED = False # main game loop while True: clock.tick(FPS) SCREEN.fill((0, 0, 0)) # black pygame.time.delay(10) # delta time now = time.time() dt = now - prev_time prev_time = now score_HUD.update(text = 'Score: {}'.format(player.score)) #################### ###### CHECKS ######
# -*- coding: utf-8 -*- """PanSTARRS 1 bands from Mega-Cam gen1 bands.""" __author__ = "<NAME>" __credits__ = [ "http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html" ] __all__ = [ "U_MP9301", "G_MP9401", "R_MP9601", "I_MP9701", "Z_MP9801", "UmG", "UmR", "UmI", "UmZ", "GmR", "GmI", "GmZ", "RmI", "RmZ", "ImZ", ] ############################################################################# # IMPORTS import warnings from ....units import mag as MAG from ....units import quantity_io from ..data import read_MegaCamGen1_from_PS1 ############################################################################# # PARAMETERS data = read_MegaCamGen1_from_PS1() ############################################################################# # CODE ############################################################################# @quantity_io() def U_MP9301(ps, **kw) -> MAG: r"""Convert Pan-STARRS1 bands to CFHT U-MP9301 band. Parameters ---------- ps: astropy Table need: g col either: i, g-i col g: str g column name (default 'g') i: str i column name (default 'i') gmi: str g-i column name (default 'g-i') Returns ------- U_MP9301 : Quantity array_like CFHT u-band Notes ----- The conversion is:: u_CFHT = g_PS + .523 - .343 gmi + 2.44 gmi^2 - .998 gmi^3 where `gmi = g_PS - i_PS` in the range `0.3 < gmi < 1.5 [mag]`. filter transformations from `Pan-STARRS to MegaCam plots. Top row, 1st plot <http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html>`_. """ g, i = kw.get("g", "g"), kw.get("i", "i") gmi = kw.get("gmi", "g-i") c = data.loc["U_MP9301"] if gmi in ps.colnames: gmi = ps[gmi] else: gmi = ps[g] - ps[i] ind = (c["lb"] < gmi) & (gmi < c["ub"]) if not all(ind): warnings.warn("MCg1.U: not all .3 mag < (g-i)_ps < 1.5 mag") g_ps = ps[g] u_cfht = ( g_ps + c["c0"] + (c["c1"] * gmi) + (c["c2"] * gmi ** 2) + (c["c3"] * gmi ** 3) ) return u_cfht # /def @quantity_io() def G_MP9401(ps, **kw) -> MAG: r"""Convert Pan-STARRS1 bands to CFHT G-MP9401 band. Parameters ---------- ps: astropy Table need: g col either: i, g-i col g: str (default 'g') g column name i: str (default 'i') i column name gmi: str (default 'g-i') g-i column name Returns ------- G_MP9401 : Quantity array_like CFHT g-band Notes ----- .. math:: g_{CFHT} = g_{PS} -.001 - .004 gmi - .0056 gmi^2 + .00292 gmi^3 where :math:`gmi \equiv g_{PS}-i_{PS}` in the range :math:`-1 \rm{mag} < g-i < 4 \rm{mag}` filter transformations from `Pan-STARRS to MegaCam plots. Top row, 2nd plot <http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html>`_. """ g, i = kw.get("g", "g"), kw.get("i", "i") gmi = kw.get("gmi", "g-i") if gmi in ps.colnames: gmi = ps[gmi] else: gmi = ps[g] - ps[i] ind = (-1.0 * MAG < gmi) & (gmi < 4 * MAG) if not all(ind): warnings.warn("MCg1.G: not all -1 mag < (g-i)_ps < 4 mag") c0 = -0.001 * MAG c1 = -0.004 c2 = -0.0056 / MAG c3 = 0.00292 / MAG ** 2 g_ps = ps[g] g_cfht = g_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3) return g_cfht # /def @quantity_io() def R_MP9601(ps, **kw) -> MAG: r"""Convert Pan-STARRS1 bands to CFHT R-MP9601 band. Parameters ---------- ps: astropy Table need: r, col either: (g & i), g-i col g: str (default 'g') g column name r: str (default 'r') r column name i: str (default 'i') i column name gmi: str (default 'g-i') g-i column name Returns ------- R_MP9601 : Quantity array_like CFHT r-band Notes ----- .. math:: r_{CFHT} = r_{PS} + .002 - .017 gmi + .00554 gmi^2 - .000692 gmi^3 where :math:`gmi \equiv g_{PS}-i_{PS}` in the range :math:`-1 \rm{mag} < g-i < 4 \rm{mag}` filter transformations from `Pan-STARRS to MegaCam plots. Top row, 3rd plot <http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html>`_. """ g, r, i = kw.get("g", "g"), kw.get("r", "r"), kw.get("i", "i") gmi = kw.get("gmi", "g-i") if gmi in ps.colnames: gmi = ps[gmi] else: gmi = ps[g] - ps[i] ind = (-1.0 * MAG < gmi) & (gmi < 4 * MAG) if not all(ind): warnings.warn("MCg1.R: not all -1 mag < (g-i)_ps < 4 mag") c0 = 0.002 * MAG c1 = -0.017 c2 = 0.00554 / MAG c3 = -0.000692 / MAG ** 2 r_ps = ps[r] r_cfht = r_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3) return r_cfht # /def @quantity_io() def I_MP9701(ps, **kw) -> MAG: r"""Convert Pan-STARRS1 bands to CFHT I-MP9701 band. Parameters ---------- ps: astropy Table need: i col either: g, g-i col g: str (default 'g') g column name i: str (default 'i') i column name gmi: str (default 'g-i') g-i column name Returns ------- I_MP9701 : Quantity array_like CFHT i-band Notes ----- .. math:: i_{CFHT} = i_{PS} + .001 - .021 gmi + .00398 gmi^2 - .00369 gmi^3 where :math:`gmi \equiv g_{PS}-i_{PS}` in the range :math:`-1 \rm{mag} < g-i < 4 \rm{mag}` filter transformations from `Pan-STARRS to MegaCam plots. 2nd row, 1st plot <http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html>`_. """ g, i = kw.get("g", "g"), kw.get("i", "i") gmi = kw.get("gmi", "g-i") if gmi in ps.colnames: gmi = ps[gmi] else: gmi = ps[g] - ps[i] ind = (-1.0 * MAG < gmi) & (gmi < 4 * MAG) if not all(ind): warnings.warn("MCg1.I: not all -1 mag < (g-i)_ps < 4 mag") c0 = 0.001 * MAG c1 = -0.021 c2 = 0.00398 / MAG c3 = -0.00369 / MAG ** 2 i_ps = ps[i] i_cfht = i_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3) return i_cfht # /def @quantity_io() def Z_MP9801(ps, **kw) -> MAG: r"""Convert Pan-STARRS1 bands to CFHT Z-MP9801 band. Parameters ---------- ps: astropy Table need: z col either: (g & i), g-i col g: str (default 'g') g column name i: str (default 'i') i column name z: str (default 'z') z column name gmi: str (default 'g-i') g-i column name Returns ------- Z_MP9801 : Quantity array_like CFHT z-band Notes ----- .. math:: z_{CFHT} = z_{PS} -.009 - .029 gmi + .012 gmi^2 - .00367 gmi^3 where :math:`gmi \equiv g_{PS}-i_{PS}` in the range :math:`-1 \rm{mag} < g-i < 4 \rm{mag}` filter transformations from `Pan-STARRS to MegaCam plots. 2nd row, 2nd plot <http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html>`_. """ g, i, z = kw.get("g", "g"), kw.get("i", "i"), kw.get("z", "z") gmi = kw.get("gmi", "g-i") if gmi in ps.colnames: gmi = ps[gmi] else: gmi = ps[g] - ps[i] ind = (-1.0 * MAG < gmi) & (gmi < 4 * MAG) if not all(ind): warnings.warn("MCg1.Z: not all -1 mag < (g-i)_ps < 4 mag") c0 = -0.009 * MAG c1 = -0.029 c2 = 0.012 / MAG c3 = -0.00367 / MAG ** 2 z_ps = ps[z] z_cfht = z_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3) return z_cfht # /def ############################################################################# # Colors @quantity_io() def UmG(ps, **kw) -> MAG: """U-G color. Parameters ---------- ps : astropy.table.Table need arguments for u(g)-band functions kwargs passes to U & G-band functions Returns ------- U-G color : Quantity array_like """ return U_MP9301(ps, **kw) - G_MP9401(ps, **kw) # /def @quantity_io() def UmR(ps, **kw) -> MAG: """U-R color. Parameters ---------- ps : astropy.table.Table need arguments for u(r)-band functions kwargs passes to U & R-band functions Returns ------- U-G color : Quantity array_like """ return U_MP9301(ps, **kw) - R_MP9601(ps, **kw) # /def @quantity_io() def UmI(ps, **kw) -> MAG: """U-I color. Parameters ---------- ps : astropy.table.Table need arguments for u(i)-band functions kwargs passes to U & I-band functions Returns ------- U-I color : Quantity array_like """ return U_MP9301(ps, **kw) - I_MP9701(ps, **kw) # /def @quantity_io() def UmZ(ps, **kw) -> MAG: """U-Z color. Parameters ---------- ps : astropy.table.Table need arguments for u(z)-band functions kwargs passes to U & Z-band functions Returns ------- U-Z color : Quantity array_like """ return U_MP9301(ps, **kw) - Z_MP9801(ps, **kw) # /def @quantity_io() def GmR(ps, **kw) -> MAG: """G-R color. Parameters ---------- ps : astropy.table.Table need arguments for g(r)-band functions kwargs passes to G & R-band functions Returns ------- G-R color : Quantity array_like """ return G_MP9401(ps, **kw) - R_MP9601(ps, **kw) # /def @quantity_io() def GmI(ps, **kw) -> MAG: """G-I color. Parameters ---------- ps : astropy.table.Table need arguments for g(i)-band functions kwargs passes to G & I-band functions Returns ------- G-I color : Quantity array_like """ return G_MP9401(ps, **kw) - I_MP9701(ps, **kw) # /def @quantity_io() def GmZ(ps, **kw) -> MAG: """G-Z color. Parameters ---------- ps : astropy.table.Table need arguments for g(z)-band functions kwargs passes to G & Z-band functions Returns ------- G-Z
<gh_stars>0 # -*- coding: utf-8 -*- from __future__ import absolute_import ########################################################################## # # Redmine to JIRA Importers plugin # # Copyright (C) 2018, <NAME> # # config.py - Core application configuration settings # ########################################################################## # Jira has a limit of 10Mb for import files, so you may need to export and # immport in groups, especially if using a file that contains pretty printed # JSON # command line filtering examples # redmine2jira export --filter='status_id=*&sort=id' -v /tmp/all_issues_debug.json # redmine2jira export --filter='status_id=*&sort=id&offset=1000&limit=5' -p -v /tmp/all_issues_debug.json # redmine2jira export --filter='status_id=*&sort=id&offset=1000&limit=5' -p -v /tmp/all_issues_debug.json # # As well as this file review the following methods in issues.py to fix issues with invalid textile # site_specific_journal_fixups # site_specific_description_fixups # ######## # Core # ######## # The export feature of this tool offer a way to filter issues # via a query string.Among the several filter parameters there's # one by issue ID(s). # However, depending on the Redmine version, it might not be # available in the Redmine REST API, # As it's not known which specific version introduced it, # the tool embeds a mechanism to guess if it's available or not. # If those details are not known we suggest to leave the # CHECK_ISSUE_ID_FILTER_AVAILABILITY setting to its default value: # every time the 'issue_id' filter is used the tool will automatically: # # - Verify its availability # - Prompt the result of the verification # - Propose to change the ISSUE_ID_FILTER_AVAILABLE setting # according to the result # - Propose to disable CHECK_ISSUE_ID_FILTER_AVAILABILITY # to avoid performing the same check again in the future # # For instance it may be useful to re-enable CHECK_ISSUE_ID_FILTER_AVAILABILITY # if the Redmine instance version changed. # CHECK_ISSUE_ID_FILTER_AVAILABILITY = True ISSUE_ID_FILTER_AVAILABLE = True # Flag to include journals in the issues export. # As the export of the journals of an issue is a complex feature # and comes with several requirements on configuration settings, # it needs to be explicitly enabled by the final user. # EXPORT_ISSUE_JOURNALS = False # # Resource mappings # # Here follows a bunch of settings related to mappings used during the # issues export to statically map Redmine resource instances to Jira ones. # # Each settings is valid within the context of a specific mapping between # a Redmine resource type and a Jira one. It's worth to mention that those # resource type mappings consist in One-to-Many relationships: a Redmine # resource type may be mapped to one or more Jira resource types. # # The naming convention for these settings is the following: # # REDMINE_{RESOURCE_TYPE}_JIRA_{RESOURCE_TYPE}_MAPPINGS # # where the two "RESOURCE_TYPE" placeholders respectively refers to the # Redmine and Jira resource types where the mappings of resources apply. # These settings are declared as Python dictionaries where the keys are the # "identifying names" of the instances of the related Redmine resource types, # and the values may be one of: # # - "Identifying names" of the mapped Jira resource instances # - Tuples composed by: # - Internal ID of the mapped Jira resource instance # - "Identifying name" of the mapped Jira resource instance # # The second form, more complex, that contains Jira resources internal ID's, # is necessary only if issue journals need to be included in the export, # as that feature needs those ID's to properly generate Jira history items. # The most effective (and official) method to retrieve Jira resources # internal ID's is via Jira REST API. Nonetheless, calling those API's may # not be feasible for everyone, for several reasons. So we added a short # handbook in the tool documentation with a list of useful API's to # retrieve those ID's: # # https://redmine2jira.readthedocs.io/en/latest/appendixes.html#jira-rest-api-handbook # # For both forms, instead, by "identifying names" we mean strings that # "uniquely" identify each instance of a specific resource type, in addition # to its internal ID number. # As each resource type, either it belongs to Redmine or Jira, has always # at least one field designated to contain such identifying name, for each # one of its instances, we will specify, in a comment before each dictionary # setting, which of them are involved in the mapping: # # Example: # # {redmine_resource_type}.{identifying_field} ==> # {jira_resource_type}.{identifying_field} # # Furthermore, some of the mappings defined via these settings are valid # only "on a per-project basis". # As both Redmine and Jira are project management tools, it's natural to # state that for both of them the "Project" is one of the main resource types # around which they are built, likewise any other software tool belonging # to this category. In turn, there are some resource types which instances # are defined only within the context of a specific "Project" instance. # In such cases our dictionary keys become the "identifying names" of # Redmine projects ("identifier" field), and the values the final mappings # with respect to the resource type instances: # # Example: # # 'my-cool-project': { # 'My Redmine instance name': (10, 'My Jira instance name'), # ... # } # ... # # As we assume a Redmine project has a One-to-One mapping with a Jira project, # it would be pointless to specify the latter. # Therefore, for mappings defined on a per project basis we extend the above # syntax as follows: # # {redmine_project}.{redmine_resource_type}.{identifying_field} ==> # {jira_project}.{jira_resource_type}.{identifying_field} # # The configuration of all the following mappings entirely depends on the # specific use case, hence all the dictionaries hereby defined are empty. # That's because they are meant to be properly overridden in the # `config_local.py` file, according to the actual configuration of both # the Redmine and Jira instances he's dealing with. # # In case the tool detect some needed missing mapping at runtime, it will # prompt to input one, which will be retained for the whole session. # # # Relationship.Name ==> Link.name # # REDMINE_RELATIONSHIP_FIELD_JIRA_LINK_FIELD_MAPPINGS = { 'blocks': 'blocks', 'copied from': 'cloners', 'copied to': 'cloners', 'copied_to': 'cloners', 'duplicates': 'duplicates', 'duplicated by': 'duplicates', 'precedes': 'precedes', 'related to': 'relates', 'relates': 'relates' # precedes - Links issues to define an "order", where A needs to be # completed x days before B can be started on # If B follows A, you can't give B # a starting date equal or less # than the ending date of A. # follows - Reciprocal of precedes. # If issue B follows A (ex A ends the 21/04 and B begins the 22/04) # and you add +2 day at the ending date of A, # the starting and ending dates of B will be +2 too. # copied to - Reciprocal of copied from. } # # User.Login ==> User.Username # # NOTE: The concept of "Jira user" is also extended to Jira Service Desk # "portal only customers". # REDMINE_USER_JIRA_USER_MAPPINGS = { # # Example: # # 'ozzy.osbourne: 'ronny.james.dio', # 1st form # ... # 'alice.cooper': ('dave.grohl', 'dave.grohl'), # 2nd form # ... 'admin': 'admin', 'andrewy': 'andrewy', 'brucef': 'brucef', 'chrisw': 'chrisw', 'Craig': 'craigr', 'clintone': 'clintone', 'davek': 'davek', 'davidb': 'davidb', 'dylan': 'dylan', 'edisonh': 'edisonh', 'graemef': 'graemef', 'jimw': 'jimw', 'mafdyb': 'mafdyb', 'MafdyB': 'mafdyB', 'markk': 'markk', 'ravic': 'ravic', 'redbug': 'redbug', 'nathanm': 'nathanm', 'Nathanm': 'nathanm', } # # Group.Name ==> User.Username # # The only relations between issues and groups is via the "Assignee" # field, and only if issue assignment to groups is explicitly allowed # in the Redmine instance settings. # However, as Jira does not (and will not) support issue assignment to groups # (https://jira.atlassian.com/browse/JRASERVER-1397) one possible mapping # is from external system group names to Jira usernames. # It's worth to check out the section "Managing Issues via a User Account" # in the following KB article: # # https://confluence.atlassian.com/jira/how-do-i-assign-issues-to-multiple-users-207489749.html#HowdoIassignissuestomultipleusers-ManagingIssuesviaaUserAccount # # NOTE: The concept of "Jira user" is also extended to Jira Service Desk # "portal only customers". # REDMINE_GROUP_JIRA_USER_MAPPINGS = { # # Example: # # 'qa-leads': 'johann.sebastian.bach', # 1st form # ... # 'lead-developers': ('linus.torvalds', 'linus.torvalds'), # 2nd form # ... 'developers': 'markk', 'Support Staff': 'andrewy', 'Support-Staff': 'andrewy', } # # Project.Identifier ==> Project.Key # REDMINE_PROJECT_JIRA_PROJECT_MAPPINGS = { # # Example: # # 'my-very-cool-project': 'MVCP', # 1st form # ... # 'my-cool-project': (123, 'MCP'), # 2nd form # ... # Map redmine project to Jira project key 'administration': 'AD', 'autonesting-improvements': 'AUTONESTIM', 'database-improvement': 'DB', 'fabtech2011': 'PC', 'kmc': 'KMC', 'kmcapp': 'KMCAP', 'kmcweb': 'KMCWEB', 'machine-integration-primecutne': 'MACINTEGRA', 'machines': 'TC', 'oldissues': 'OI', 'osk': 'OSK', 'pc-plate-dragger': 'PCPLAT', 'primecut': 'PC', 'primecut3': 'PC3', 'primecutne-doc': 'PC', 'primecut-pipe-support': 'PCP', 'primeshape': 'PS', 'pug-part-unloader': 'PUGUNLOAD', 'software-department': 'SW', 'solid-import': 'IMPORTSLID', 'support': 'SUP', 'tc7': 'TC7', 'tc6': 'TC6', 'tc-plate-dragger': 'TCPLATE', 'TouchCut <-> PrimecutNE': 'MACINTEGRA', 'touchcut': 'TC', 'touchmill': 'TM', 'towerp': 'PCTOWER', 'tubenc': 'TNC' } # # Tracker.Name ==> Issue_Type.Name # REDMINE_TRACKER_JIRA_ISSUE_TYPE_MAPPINGS = { # # Example: # # 'Nonconformity': 'Incident', # 1st form # ... # 'Defect': (8, 'Bug'), #
<gh_stars>0 # -*- coding: utf-8 -*- """ /*************************************************************************** Name : paradosiakoi oikismoi Description : paradosiakoi oikismoi aigaiou Date : 16/Feb/14 copyright : (C) 2014 by <NAME> email : <EMAIL> ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ # Import the PyQt and QGIS libraries from PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4.QtXml import * from PyQt4 import QtSql from qgis.core import * import ConfigParser # Initialize Qt resources from file resources.py import resources_rc # Import the code for the dialog from paradosiakoioikismoiDialog import paradosiakoioikismoiDialog from NomothesiaDialog import NomothesiaDialog import sys,os #from sys import settrace #sys.path.append("/usr/share/eclipse/dropins/pydev/eclipse/plugins/org.python.pydev_3.3.3.201401272249/pysrc/") #from pydevd import * import tempfile import time class paradosiakoioikismoi: def __init__(self, iface): # Save reference to the QGIS interface self.iface = iface self.config = ConfigParser.ConfigParser() self.config.read(os.path.join(os.path.dirname(os.path.realpath(__file__)),"settings.cfg")) self.read() #postgis layers self.postgislayers={ "vwoikismoi": ['geometry', u'Οικισμοί', 'pkuid'], "vworiaoikismon": ['geometry', u'Όρια παραδοσιακών οικισμών','pkuid'] } dbase = QtSql.QSqlDatabase.addDatabase(self.dbdriver, "myconnection" ) dbase.setHostName(self.host) dbase.setPort(int(self.port)) dbase.setDatabaseName( self.dbname) dbase.setUserName(self.username) dbase.setPassword( self.password) print "DB open state:", dbase.isOpen () def generateReport(self): try: ###GET DATA listviewindex =self.dlg.ui.listViewOikismoi.selectionModel().selection() myindex = self.dlg.proxyModeloikismoi.index(listviewindex.indexes()[0].row(),4) #4= index of column NAME in model oikismosname = self.dlg.proxyModeloikismoi.data(myindex,0) filename = QFileDialog.getSaveFileName(self.dlg, u"Δημιουργία αναφοράς", "", 'pdf(*.pdf)') if filename != "": QApplication.setOverrideCursor(QCursor(Qt.WaitCursor)) if filename.split(".")[-1]!="pdf": filename = filename + ".pdf" listviewindex =self.dlg.ui.listViewDhmoi.selectionModel().selection() myindex = self.dlg.modeldimoi.index(listviewindex.indexes()[0].row(),4) #4= index of column NAME in model dhmosname = self.dlg.modeldimoi.data(myindex,0) listviewindex =self.dlg.ui.listViewNomoi.selectionModel().selection() myindex = self.dlg.model.index(listviewindex.indexes()[0].row(),4) #4= index of column NAME in model nomosname = self.dlg.model.data(myindex,0) #===================================================================== # tableviewindexes = self.dlg.ui.tableViewNomothesia.selectionModel() # μπορεί να υπάρχουν πολλές επιλογές/indexes # print len(tableviewindexes.indexes()) # for i in tableviewindexes.indexes(): # print i #===================================================================== #myindex = self.proxyModelNomothesia.index(tableviewindex.indexes()[0].row(), 9) mapRenderer = self.iface.mapCanvas().mapRenderer() c = QgsComposition(mapRenderer) c.setPlotStyle(QgsComposition.Print) composerMap = QgsComposerMap(c, 135,102,150,100) composerMap.setFrameEnabled(True) composerMap.setFrameOutlineWidth(0.1) c.addItem(composerMap) #title composerLabel_title = QgsComposerLabel(c) composerLabel_title.setText(u"ΠΑΡΑΔΟΣΙΑΚΟΙ ΟΙΚΙΣΜΟΙ ΣΤΟ ΑΙΓΑΙΟ") composerLabel_title.setFont(QFont("Courier", 16, QFont.Bold)) composerLabel_title.setHAlign(Qt.AlignHCenter) composerLabel_title.setVAlign(Qt.AlignVCenter) composerLabel_title.setItemPosition(0,5,c.paperWidth(),10) composerLabel_title.setBackgroundColor (QColor(200,200,200,125)) composerLabel_title.setFrameEnabled(True) composerLabel_title.setFrameOutlineWidth(0.1) c.addItem(composerLabel_title) #date now = time.strftime("%d/%m/%Y") composerLabel_date = QgsComposerLabel(c) composerLabel_date.setText(u"Ημερομηνία:%s" %now ) composerLabel_date.setFont(QFont("Times", 9)) composerLabel_date.adjustSizeToText() composerLabel_date.setHAlign(Qt.AlignLeft) composerLabel_date.setVAlign(Qt.AlignVCenter) #composerLabel_date.setBackgroundColor (QColor(200,200,200,125)) composerLabel_date.setItemPosition(3,17) #composerLabel_date.setFrameEnabled(True) #composerLabel_date.setFrameOutlineWidth(0.1) c.addItem(composerLabel_date) #oikismos composerLabel_Oikismos = QgsComposerLabel(c) composerLabel_Oikismos.setText(u"ΟΙΚΙΣΜΟΣ:") composerLabel_Oikismos.setFont(QFont("Times", 12, QFont.Bold)) #composerLabel_Oikismos.adjustSizeToText() composerLabel_Oikismos.setHAlign(Qt.AlignLeft) composerLabel_Oikismos.setVAlign(Qt.AlignVCenter) composerLabel_Oikismos.setBackgroundColor (QColor(200,200,200,125)) composerLabel_Oikismos.setItemPosition(7,32,50,10) composerLabel_Oikismos.setFrameEnabled(True) composerLabel_Oikismos.setFrameOutlineWidth(0.1) c.addItem(composerLabel_Oikismos) #dhmos composerLabel_Dhmos= QgsComposerLabel(c) composerLabel_Dhmos.setText(u"ΔΗΜΟΣ/ΚΟΙΝΟΤΗΤΑ:") composerLabel_Dhmos.adjustSizeToText() composerLabel_Dhmos.setHAlign(Qt.AlignLeft) composerLabel_Dhmos.setVAlign(Qt.AlignVCenter) composerLabel_Dhmos.setBackgroundColor (QColor(200,200,200,125)) composerLabel_Dhmos.setItemPosition(7,42,50,10) composerLabel_Dhmos.setFrameEnabled(True) composerLabel_Dhmos.setFrameOutlineWidth(0.1) c.addItem(composerLabel_Dhmos) ##nomos composerLabel_Nomos = QgsComposerLabel(c) composerLabel_Nomos.setText(u"ΝΟΜΟΣ:") composerLabel_Nomos.adjustSizeToText() composerLabel_Nomos.setHAlign(Qt.AlignLeft) composerLabel_Nomos.setVAlign(Qt.AlignVCenter) composerLabel_Nomos.setBackgroundColor (QColor(200,200,200,125)) composerLabel_Nomos.setItemPosition(7,52,50,10) composerLabel_Nomos.setFrameEnabled(True) composerLabel_Nomos.setFrameOutlineWidth(0.1) c.addItem(composerLabel_Nomos) up=72 #nomothesia_label composerLabel_nomothesia_label = QgsComposerLabel(c) composerLabel_nomothesia_label.setText(u"Σχετική νομοθεσία") composerLabel_nomothesia_label.setFont(QFont("Times", 12, QFont.Bold)) #composerLabel_Oikismos.adjustSizeToText() composerLabel_nomothesia_label.setHAlign(Qt.AlignHCenter) composerLabel_nomothesia_label.setVAlign(Qt.AlignVCenter) composerLabel_nomothesia_label.setBackgroundColor (QColor(200,200,200,125)) composerLabel_nomothesia_label.setItemPosition(7,up,120,7) #LEFT,UP, WIDTH,HEIGHT composerLabel_nomothesia_label.setFrameEnabled(True) composerLabel_nomothesia_label.setFrameOutlineWidth(0.1) c.addItem(composerLabel_nomothesia_label) up+=7 #nomothesia_label2 composerLabel_nomothesia_label2 = QgsComposerLabel(c) composerLabel_nomothesia_label2.setText(u"Νομοθεσία") composerLabel_nomothesia_label2.setFont(QFont("Times", 10, QFont.Bold)) composerLabel_nomothesia_label2.setHAlign(Qt.AlignHCenter) composerLabel_nomothesia_label2.setVAlign(Qt.AlignVCenter) composerLabel_nomothesia_label2.setBackgroundColor (QColor(200,200,200,125)) composerLabel_nomothesia_label2.setItemPosition(7,up,50,7) #LEFT,UP, WIDTH,HEIGHT composerLabel_nomothesia_label2.setFrameEnabled(True) composerLabel_nomothesia_label2.setFrameOutlineWidth(0.1) c.addItem(composerLabel_nomothesia_label2) #nomothesia_label3 composerLabel_nomothesia_label3 = QgsComposerLabel(c) composerLabel_nomothesia_label3.setText(u"Λεπτομέρειες") composerLabel_nomothesia_label3.setFont(QFont("Times", 10, QFont.Bold)) composerLabel_nomothesia_label3.setHAlign(Qt.AlignHCenter) composerLabel_nomothesia_label3.setVAlign(Qt.AlignVCenter) composerLabel_nomothesia_label3.setBackgroundColor (QColor(200,200,200,125)) composerLabel_nomothesia_label3.setItemPosition(57,79,70,7) #LEFT,UP, WIDTH,HEIGHT composerLabel_nomothesia_label3.setFrameEnabled(True) composerLabel_nomothesia_label3.setFrameOutlineWidth(0.1) c.addItem(composerLabel_nomothesia_label3) #map_label composerLabel_map_label = QgsComposerLabel(c) composerLabel_map_label.setText(u"Απόσπασμα χάρτη") composerLabel_map_label.setFont(QFont("Times", 12, QFont.Bold)) composerLabel_map_label.setHAlign(Qt.AlignHCenter) composerLabel_map_label.setVAlign(Qt.AlignVCenter) composerLabel_map_label.setBackgroundColor (QColor(200,200,200,125)) composerLabel_map_label.setItemPosition(135,95,150,7) #LEFT,UP, WIDTH,HEIGHT composerLabel_map_label.setFrameEnabled(True) composerLabel_map_label.setFrameOutlineWidth(0.1) c.addItem(composerLabel_map_label) #scalebar item = QgsComposerScaleBar(c) item.setFont(QFont("Times", 9)) item.setStyle('Numeric') # optionally modify the style item.setComposerMap(composerMap) item.applyDefaultSize() item.setItemPosition(265,192,18,6) c.addItem(item) #dimografikoi_label composerLabel_dhnografikoi_label = QgsComposerLabel(c) composerLabel_dhnografikoi_label.setText(u"Δημογραφικοί δείκτες") composerLabel_dhnografikoi_label.setFont(QFont("Times", 12, QFont.Bold)) composerLabel_dhnografikoi_label.setHAlign(Qt.AlignHCenter) composerLabel_dhnografikoi_label.setVAlign(Qt.AlignVCenter) composerLabel_dhnografikoi_label.setBackgroundColor (QColor(200,200,200,125)) composerLabel_dhnografikoi_label.setItemPosition(135,32,150,7) #LEFT,UP, WIDTH,HEIGHT composerLabel_dhnografikoi_label.setFrameEnabled(True) composerLabel_dhnografikoi_label.setFrameOutlineWidth(0.1) c.addItem(composerLabel_dhnografikoi_label) uplabels=39 dimograf_labels=[u'Μόνιμος πληθυσμός', u'Αναλογία Ανδρών-Γυναικών', u'Δείκτης Αντικ.Παραγωγικού πληθυσμού',u'Δείκτης γήρανσης', u'Δείκτης εξάρτησης', u'Αναλογία ηλικιωμένων προς δυνητικά ενεργό πληθυσμό', u'Αναλογία των ατόμων άνω των 65 ετών επί του συνολικού πληθυσμού'] for label in dimograf_labels: composerLabel_dimograf_label = QgsComposerLabel(c) composerLabel_dimograf_label.setText(label) composerLabel_dimograf_label.setFont(QFont("Times", 10)) composerLabel_dimograf_label.setHAlign(Qt.AlignHCenter) composerLabel_dimograf_label.setVAlign(Qt.AlignVCenter) composerLabel_dimograf_label.setItemPosition(135,uplabels,120,7) composerLabel_dimograf_label.setFrameEnabled(True) composerLabel_dimograf_label.setFrameOutlineWidth(0.1) c.addItem(composerLabel_dimograf_label) uplabels+=7 #dimografkoi values for widget in self.dlg.ui.tab_2.children(): if isinstance(widget, QLineEdit): print "linedit: %s - %s" %(widget.objectName(),widget.text()) composerLabel_dimograf_value = QgsComposerLabel(c) value=widget.text() if type(value)==QPyNullVariant: value = '-' composerLabel_dimograf_value .setText(value) composerLabel_dimograf_value .setHAlign(Qt.AlignHCenter) composerLabel_dimograf_value .setVAlign(Qt.AlignVCenter) if widget.objectName()=="lineEdit_Pop": composerLabel_dimograf_value .setItemPosition(255,39,30,7) if widget.objectName()=="lineEdit_mw": composerLabel_dimograf_value .setItemPosition(255,46,30,7) if widget.objectName()=="lineEdit_dapp": composerLabel_dimograf_value .setItemPosition(255,53,30,7) if widget.objectName()=="lineEdit_dgp": composerLabel_dimograf_value .setItemPosition(255,60,30,7) if widget.objectName()=="lineEdit_de": composerLabel_dimograf_value .setItemPosition(255,67,30,7) if widget.objectName()=="lineEdit_ilikiomenoi1": composerLabel_dimograf_value .setItemPosition(255,74,30,7) if widget.objectName()=="lineEdit_ilikiomenoi2": composerLabel_dimograf_value .setItemPosition(255,81,30,7) composerLabel_dimograf_value .setFrameEnabled(True) composerLabel_dimograf_value .setFrameOutlineWidth(0.1) c.addItem(composerLabel_dimograf_value ) uplabels+=7 ##oikismos_value composerLabel_OikismosValue = QgsComposerLabel(c) composerLabel_OikismosValue.setText(oikismosname) composerLabel_OikismosValue.adjustSizeToText() composerLabel_OikismosValue.setHAlign(Qt.AlignLeft) composerLabel_OikismosValue.setVAlign(Qt.AlignVCenter) composerLabel_OikismosValue.setItemPosition(57,32,30,10) composerLabel_OikismosValue.setFrameEnabled(True) composerLabel_OikismosValue.setFrameOutlineWidth(0.1) c.addItem(composerLabel_OikismosValue) ##dhmos_value composerLabel_dhmosValue = QgsComposerLabel(c) composerLabel_dhmosValue.setText(dhmosname) composerLabel_dhmosValue.adjustSizeToText() composerLabel_dhmosValue.setHAlign(Qt.AlignLeft) composerLabel_dhmosValue.setVAlign(Qt.AlignVCenter) composerLabel_dhmosValue.setItemPosition(57,42,30,10) composerLabel_dhmosValue.setFrameEnabled(True) composerLabel_dhmosValue.setFrameOutlineWidth(0.1) c.addItem(composerLabel_dhmosValue) ##nomos_value composerLabel_NomosValue = QgsComposerLabel(c) composerLabel_NomosValue.setText(nomosname) composerLabel_NomosValue.adjustSizeToText() composerLabel_NomosValue.setHAlign(Qt.AlignLeft) composerLabel_NomosValue.setVAlign(Qt.AlignVCenter) composerLabel_NomosValue.setItemPosition(57,52,30,10) composerLabel_NomosValue.setFrameEnabled(True) composerLabel_NomosValue.setFrameOutlineWidth(0.1) c.addItem(composerLabel_NomosValue) #nomothesia, add values for each nomothesia mymodel = self.dlg.ui.tableViewNomothesia.model(); up+=7 for i in range(mymodel.rowCount()): composerLabel_Nomosthesia = QgsComposerLabel(c) composerLabel_Nomosthesia.setText(mymodel.data(mymodel.index(i, 2))) composerLabel_Nomosthesia.setFont(QFont("Times", 10)) composerLabel_Nomosthesia.setHAlign(Qt.AlignHCenter) composerLabel_Nomosthesia.setVAlign(Qt.AlignVCenter) composerLabel_Nomosthesia.setItemPosition(7,up,50,7) composerLabel_Nomosthesia.setFrameEnabled(True) composerLabel_Nomosthesia.setFrameOutlineWidth(0.1) composerLabel_Details = QgsComposerLabel(c) composerLabel_Details = QgsComposerLabel(c) details=mymodel.data(mymodel.index(i, 7)) if type(details)==QPyNullVariant: details = '-' composerLabel_Details.setText(details) composerLabel_Details.setHAlign(Qt.AlignHCenter) composerLabel_Details.setVAlign(Qt.AlignVCenter) composerLabel_Details.setItemPosition(57,up,70,7) composerLabel_Details.setFrameEnabled(True) composerLabel_Details.setFrameOutlineWidth(0.1) c.addItem(composerLabel_Nomosthesia) c.addItem(composerLabel_Details) up+=7 printer = QPrinter() printer.setOutputFormat(QPrinter.PdfFormat) printer.setOutputFileName(filename) printer.setPaperSize(QSizeF(c.paperWidth(), c.paperHeight()), QPrinter.Millimeter) printer.setFullPage(True) printer.setColorMode(QPrinter.Color) printer.setResolution(c.printResolution()) pdfPainter = QPainter(printer) paperRectMM = printer.pageRect(QPrinter.Millimeter) paperRectPixel = printer.pageRect(QPrinter.DevicePixel) c.render(pdfPainter, paperRectPixel, paperRectMM) pdfPainter.end() QApplication.restoreOverrideCursor() QMessageBox.information(self.dlg, u"Ενημέρωση",u"Η εξαγωγή της αναφοράς ολοκληρώθηκε!") except IndexError: QMessageBox.information(None,u"Ενημέρωση!",u"Επιλέξτε οικισμό από την αντίστοιχη λίστα!") return def zoomToOrio(self): """zoom to orio""" try: self.layers = self.iface.mapCanvas().layers() self.layerOrio=self.getlayerbyName(self.layers,self.postgislayers.get('vworiaoikismon')[1]) self.iface.setActiveLayer(self.layerOrio) tableviewindex = self.dlg.ui.tableViewNomothesia.selectionModel().selection()#μπορεί να υπάρχουν πολλές επιλογές/indexes myindex = self.dlg.proxyModelNomothesia.index(tableviewindex.indexes()[0].row(),10) idnom = str(self.dlg.proxyModelNomothesia.data(self.dlg.proxyModelNomothesia.index(myindex.row(),10),0)) self.zoomTo(self.layerOrio, "idnom", idnom) except IndexError: QMessageBox.information(None,u"Ενημέρωση!",u"Παρακαλώ επιλέξτε νομοθεσία") def zoomToOikismoi(self, myview, mymodel, layername, searchfield): try: self.layers = self.iface.mapCanvas().layers() self.layerOikismoi=self.getlayerbyName(self.layers,self.postgislayers.get(layername)[1]) self.iface.setActiveLayer(self.layerOikismoi) listviewindex =myview.selectionModel().selection() myindex = mymodel.index(listviewindex.indexes()[0].row(),6) #6= index of column KODIKOS in model KODIKOS = mymodel.data(myindex,0) self.zoomTo(self.layerOikismoi, searchfield, KODIKOS) except IndexError: QMessageBox.information(None,u"Ενημέρωση!",u"Παρακαλώ επιλέξτε έναν οικισμό από την αντίστοιχη λίστα") def zoomTo(self, layer, searchfield, value): try: self.it = layer.getFeatures( QgsFeatureRequest().setFilterExpression ( u'"%s" = \'%s\'' % (searchfield,str(value)) ) ) print u'"%s" = \'%s\'' % (searchfield,str(value)) if len(list(self.it) )==0: QMessageBox.information(None,u"Ενημέρωση!",u"Δεν βρέθηκε το αντικείμενο στο θεματικό επίπεδο:%s!" %layer.name()) return del self.it self.it = layer.getFeatures( QgsFeatureRequest().setFilterExpression (u'"%s" = \'%s\'' % (searchfield,str(value)) ) ) layer.setSelectedFeatures( [ f.id() for f in self.it ] ) self.canvas.zoomToSelected() except AttributeError: QMessageBox.information(None,u"Ενημέρωση!",u"Δεν βρέθηκε το απαραίτητο θεματικό επίπεδο ή δεν είναι ενεργό") def AddVectorLayer(self, layername): self.layers = self.iface.mapCanvas().layers()#get all layers in mapcanvas self.layerexist=self.getlayerbyName(self.layers , self.postgislayers.get(layername)[1]) #check if layer already exist if self.layerexist: #if layer already exist, msgbox and exit reply = QMessageBox.question(None, u'Ερώτηση', u"Το θεματικό επίπεδο %s υπάρχει ήδη. \n Να προστεθεί;" %self.postgislayers.get(layername)[1], QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if reply == QMessageBox.Yes: pass else: return #else add postgis layer and assign some style to oikismoi vlayer= self.AddPostgisLayer(layername, self.postgislayers.get(layername)[0], self.postgislayers.get(layername)[1], self.postgislayers.get(layername)[2]) if vlayer.name() == u"Οικισμοί": oikismoi = { '1': ('#f00', u'Παραδοσιακοί οικισμοί στο Αιγαίο'), '0': ('#0f0', u'Οικισμοί') } # create a category for each item in animals categories = [] for typos, (color, label) in oikismoi.items(): symbol = QgsSymbolV2.defaultSymbol(vlayer.geometryType()) symbol.setColor(QColor(color)) category = QgsRendererCategoryV2(typos, symbol, label) categories.append(category) # create the renderer and assign it to a layer field = 'PARADOSIAKOS' # field name renderer = QgsCategorizedSymbolRendererV2(field, categories) vlayer.setRendererV2(renderer) #Finally add layer to map and toc QgsMapLayerRegistry.instance().addMapLayer(vlayer, True) self.iface.mapCanvas().refresh() ### def AddPostgisLayer(self, tablename, geometrycolumn,layername, keycolumn ): """Adds postgis layers """ #if not exist, add it uri = QgsDataSourceURI() # set host name, port, database name, username and password uri.setConnection(self.host, str(self.port), self.dbname, self.username,self.password) # set database schema, table name, geometry column and optionaly # subset (WHERE clause) uri.setDataSource(self.schema, tablename, geometrycolumn,'', str(keycolumn)) vlayer = QgsVectorLayer(uri.uri(), layername, "postgres") return vlayer def AddWMSLayer(self): """Adds WMS layers """ uri = QgsDataSourceURI() #uri = "http://gis.ktimanet.gr/wms/wmsopen/wmsserver.aspx" self.urlwithParams='crs=EPSG:4326&dpiMode=7&featureCount=10&format=image/jpeg&layers=KTBASEMAP&styles=&url=http://gis.ktimanet.gr/wms/wmsopen/wmsserver.aspx' rlayer = QgsRasterLayer(self.urlwithParams, u"Ορθοφωτοχάρτες (Εθνικό κτηματολόγιο)", "wms") if not rlayer.isValid(): print "Layer failed to load!" QMessageBox.information(None,u"Ενημέρωση!",u"Η προσθήκη του θεματικού επιπέδου απέτυχε") QgsMapLayerRegistry.instance().addMapLayer(rlayer, True) self.iface.mapCanvas().refresh() return rlayer def getlayerbyName(self,layers, name): """Pass a list with all mapcanvas layers, return a layer or None if not found """ for layer in layers: if layer.name()==name: return layer ######################################init############################################## def initGui(self): # Create action that will start plugin configuration self.action = QAction(QIcon(":/plugins/paradosiakoioikismoi/icons/icon.png"), u"Φόρμα αναζήτησης παραδοσιακών οικισμών", self.iface.mainWindow()) self.action2 = QAction(QIcon(":/plugins/paradosiakoioikismoi/icons/icon3.png"), u"Φόρμα αναζήτησης με βάση την νομοθεσία", self.iface.mainWindow()) # connect the action to the run
<filename>Speedo/plugins/stickers.py import asyncio import io import math import os import random import textwrap import urllib.request from os import remove from PIL import Image, ImageDraw, ImageFont from telethon import events from telethon.errors.rpcerrorlist import YouBlockedUserError from telethon.tl.functions.messages import GetStickerSetRequest from telethon.tl.types import DocumentAttributeFilename, DocumentAttributeSticker, InputStickerSetID, MessageMediaPhoto, InputMessagesFilterDocument from . import * KANGING_STR = [ "Using Witchery to kang this sticker...", "Plagiarising hehe...", "Inviting this sticker over to my pack...", "Kanging this sticker...", "Hey that's a nice sticker!\nMind if I kang?!..", "hehe me stel ur stikér\nhehe.", "Ay look over there (☉。☉)!→\nWhile I kang this...", "Roses are red violets are blue, kanging this sticker so my pacc looks cool", "Imprisoning this sticker...", "Mr.Steal Your Sticker is stealing this sticker... ", "Hey! That's my sticker. Lemme get it back...", "Turn around, Go straight and f*ck off...", ] Speedo = Config.STICKER_PACKNAME @speedo.on(Speedo_cmd(outgoing=True, pattern="kang")) @speedo.on(sudo_cmd(pattern="kang", allow_sudo=True)) async def kang(args): user = await bot.get_me() if not user.username: user.username = user.first_name message = await args.get_reply_message() photo = None emojibypass = False is_anim = False emoji = None if message and message.media: if isinstance(message.media, MessageMediaPhoto): speedo = await eor(args, f"`{random.choice(KANGING_STR)}`") photo = io.BytesIO() photo = await bot.download_media(message.photo, photo) elif "image" in message.media.document.mime_type.split("/"): speedo = await eor(args, f"`{random.choice(KANGING_STR)}`") photo = io.BytesIO() await bot.download_file(message.media.document, photo) if ( DocumentAttributeFilename(file_name="sticker.webp") in message.media.document.attributes ): emoji = message.media.document.attributes[1].alt emojibypass = True elif "tgsticker" in message.media.document.mime_type: speedo = await eor(args, f"`{random.choice(KANGING_STR)}`") await bot.download_file(message.media.document, "AnimatedSticker.tgs") attributes = message.media.document.attributes for attribute in attributes: if isinstance(attribute, DocumentAttributeSticker): emoji = attribute.alt emojibypass = True is_anim = True photo = 1 else: await eod(args, "`Unsupported File!`") return else: await eod(args, "`I can't kang that...`") return if photo: splat = args.text.split() if not emojibypass: emoji = "😎" pack = 1 if len(splat) == 3: pack = splat[2] # User sent both emoji = splat[1] elif len(splat) == 2: if splat[1].isnumeric(): # User wants to push into different pack, but is okay with # thonk as emote. pack = int(splat[1]) else: # User sent just custom emote, wants to push to default # pack emoji = splat[1] packname = f"{user.username}_Hellbot_{pack}" packnick = ( f"{Speedo} Vol.{pack}" if Speedo else f"@{user.username}'s Speedo Vol.{pack}" ) cmd = "/newpack" file = io.BytesIO() if not is_anim: image = await resize_photo(photo) file.name = "sticker.png" image.save(file, "PNG") else: packname += "_anim" packnick += " (Animated)" cmd = "/newanimated" response = urllib.request.urlopen( urllib.request.Request(f"http://t.me/addstickers/{packname}") ) htmlstr = response.read().decode("utf8").split("\n") if ( " A <strong>Telegram</strong> user has created the <strong>Sticker&nbsp;Set</strong>." not in htmlstr ): async with bot.conversation("Stickers") as conv: await conv.send_message("/addsticker") await conv.get_response() # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) await conv.send_message(packname) x = await conv.get_response() while "120" in x.text: pack += 1 packname = f"{user.username}_by_{user.username}_{pack}" packnick = ( f"{Speedo} Vol.{pack}" if Speedo else f"@{user.username}'s Speedo Vol.{pack}" ) await speedo.edit( "`Switching to Pack " + str(pack) + " due to insufficient space`" ) await conv.send_message(packname) x = await conv.get_response() if x.text == "Invalid pack selected.": await conv.send_message(cmd) await conv.get_response() # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) await conv.send_message(packnick) await conv.get_response() # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) if is_anim: await conv.send_file("AnimatedSticker.tgs") remove("AnimatedSticker.tgs") else: file.seek(0) await conv.send_file(file, force_document=True) await conv.get_response() await conv.send_message(emoji) # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) await conv.get_response() await conv.send_message("/publish") if is_anim: await conv.get_response() await conv.send_message(f"<{packnick}>") # Ensure user doesn't get spamming notifications await conv.get_response() await bot.send_read_acknowledge(conv.chat_id) await conv.send_message("/skip") # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) await conv.get_response() await conv.send_message(packname) # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) await conv.get_response() # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) await speedo.edit( f"`Sticker added in a Different Pack !\ \nThis Pack is Newly created!\ \nYour pack can be found [here](t.me/addstickers/{packname})", parse_mode="md", ) return if is_anim: await conv.send_file("AnimatedSticker.tgs") remove("AnimatedSticker.tgs") else: file.seek(0) await conv.send_file(file, force_document=True) rsp = await conv.get_response() if "Sorry, the file type is invalid." in rsp.text: await speedo.edit( "`Failed to add sticker, use` @Stickers `bot to add the sticker manually.`" ) return await conv.send_message(emoji) # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) await conv.get_response() await conv.send_message("/done") await conv.get_response() # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) else: await speedo.edit("`Preparing a new pack....`") async with bot.conversation("Stickers") as conv: await conv.send_message(cmd) await conv.get_response() # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) await conv.send_message(packnick) await conv.get_response() # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) if is_anim: await conv.send_file("AnimatedSticker.tgs") remove("AnimatedSticker.tgs") else: file.seek(0) await conv.send_file(file, force_document=True) rsp = await conv.get_response() if "Sorry, the file type is invalid." in rsp.text: await speedo.edit( "`Failed to add sticker, use` @Stickers `bot to add the sticker manually.`" ) return await conv.send_message(emoji) # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) await conv.get_response() await conv.send_message("/publish") if is_anim: await conv.get_response() await conv.send_message(f"<{packnick}>") # Ensure user doesn't get spamming notifications await conv.get_response() await bot.send_read_acknowledge(conv.chat_id) await conv.send_message("/skip") # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) await conv.get_response() await conv.send_message(packname) # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) await conv.get_response() # Ensure user doesn't get spamming notifications await bot.send_read_acknowledge(conv.chat_id) await speedo.edit( f"⚡** This Sticker iz [kanged](t.me/addstickers/{packname}) successfully to your pack **⚡", parse_mode="md", ) async def resize_photo(photo): """ Resize the given photo to 512x512 """ image = Image.open(photo) maxsize = (512, 512) if (image.width and image.height) < 512: size1 = image.width size2 = image.height if image.width > image.height: scale = 512 / size1 size1new = 512 size2new = size2 * scale else: scale = 512 / size2 size1new = size1 * scale size2new = 512 size1new = math.floor(size1new) size2new = math.floor(size2new) sizenew = (size1new, size2new) image = image.resize(sizenew) else: image.thumbnail(maxsize) return image @speedo.on(admin_cmd(outgoing=True, pattern="stkrinfo")) @speedo.on(sudo_cmd(pattern="stkrinfo", allow_sudo=True)) async def get_pack_info(event): if not event.is_reply: await edit_or_reply(event, "`I can't fetch info from black hole!!!`") return rep_msg = await event.get_reply_message() if not rep_msg.document: await edit_or_reply(event, "`Reply to a sticker to get the pack details`") return try: stickerset_attr = rep_msg.document.attributes[1] await edit_or_reply(event, "`Fetching details of the sticker pack, please wait..`") except BaseException: await edit_or_reply(event, "`This is not a sticker. Reply to a sticker.`") return if not isinstance(stickerset_attr, DocumentAttributeSticker): await edit_or_reply(event, "`This is not a sticker. Reply to a sticker.`") return get_stickerset = await bot( GetStickerSetRequest( InputStickerSetID( id=stickerset_attr.stickerset.id, access_hash=stickerset_attr.stickerset.access_hash, ) ) ) pack_emojis = [] for document_sticker in get_stickerset.packs: if document_sticker.emoticon not in pack_emojis: pack_emojis.append(document_sticker.emoticon) OUTPUT = ( f"🔹 **Sticker Title:** `{get_stickerset.set.title}\n`" f"🔸 **Sticker Short Name:** `{get_stickerset.set.short_name}`\n" f"🔹 **Official:** `{get_stickerset.set.official}`\n" f"🔸 **Archived:** `{get_stickerset.set.archived}`\n" f"🔹 **Stickers In Pack:** `{len(get_stickerset.packs)}`\n" f"🔸 **Emojis In Pack:**\n{' '.join(pack_emojis)}" ) await edit_or_reply(event, OUTPUT) @speedo.on(Speedo_cmd(pattern=r"delst ?(.*)", outgoing=True)) @speedo.on(sudo_cmd(pattern=r"delst ?(.*)", allow_sudo=True)) async def _(event): if event.fwd_from: return if not event.reply_to_msg_id: await event.edit("`Reply to any user's message.`") return reply_message = await event.get_reply_message() chat = "@Stickers" reply_message.sender if reply_message.sender.bot: await edit_or_reply(event, "`Reply to actual user's message.`") return await event.edit("🥴 `Deleting sticker...`") async with bot.conversation(chat) as conv: try: response = conv.wait_event( events.NewMessage(incoming=True, from_users=429000) ) await conv.send_message("/delsticker") await conv.get_response() await asyncio.sleep(2) await bot.forward_messages(chat, reply_message) response = await response except YouBlockedUserError: await event.reply("Please unblock @Stickers and try again") return if response.text.startswith("Sorry, I can't do this, it seems that you are not the owner of the relevant pack."): await event.edit("**🥴 Nashe me hai kya lawde!!**" ) elif response.text.startswith("You don't have any sticker packs yet. You can create one using the /newpack command."): await event.edit("**😪 You don't have any sticker pack to delete stickers.** \n\n@Stickers :- 'Pehle Pack Bna Lamde 🤧'") elif response.text.startswith("Please send me the sticker."): await event.edit("**😪 Nashe me hai kya lawde**") elif response.text.startswith("Invalid pack selected."): await event.edit("**😪 Nashe me hai kya lawde**") else: await event.edit("**😐 Deleted that replied sticker, it will stop being available to Telegram users within about an hour.**") @speedo.on(Speedo_cmd(pattern=r"editst ?(.*)", outgoing=True)) @speedo.on(sudo_cmd(pattern=r"editst ?(.*)", allow_sudo=True)) async def _(event): if event.fwd_from: return if not event.reply_to_msg_id: await event.edit("`Reply to any user's message.`") return reply_message = await event.get_reply_message() speedo = event.pattern_match.group(1) chat = "@Stickers" reply_message.sender if reply_message.sender.bot: await edit_or_reply(event, "`Reply to actual user's message.`") return await event.edit("📝 `Editing sticker emoji...`") if speedo == "": await event.edit("**🤧 Nashe me hai kya lawde**") else: async with bot.conversation(chat) as conv: try: response = conv.wait_event( events.NewMessage(incoming=True, from_users=429000) ) await conv.send_message(f"/editsticker") await conv.get_response() await asyncio.sleep(2) await bot.forward_messages(chat, reply_message) await conv.get_response() await asyncio.sleep(2) await conv.send_message(f"{speedo}") response =
problem? Maybe not.') tty = '' release_message = RELEASE_MESSAGE_TEMPLATE.format(release_version) if changelog_lines: release_message += '\n\nChangelog Details:' for line in changelog_lines: release_message += '\n' + line.strip() cmd = ['git', 'tag', '-a', release_version, '-m', release_message] if overwrite: cmd.append('-f') signed = False if gpg: sign_with_key = _prompt( 'GPG is installed on your system. Would you like to sign the release tag with your GitHub committer email ' 'GPG key? (y/N/[alternative key ID]):', ).lower() or INSTRUCTION_NO if sign_with_key == INSTRUCTION_YES: cmd.append('-s') elif sign_with_key != INSTRUCTION_NO: cmd.extend(['-u', sign_with_key]) if sign_with_key != INSTRUCTION_NO: signed = True try: subprocess.check_output( ['git', 'config', '--global', 'gpg.program', gpg], ) except subprocess.CalledProcessError as e: raise ReleaseFailure( 'Failed to configure Git+GPG. Something is not right. Aborting.\n{code}: {output}'.format( code=e.returncode, output=e.output.decode('utf8'), ) ) else: _standard_output('GPG is not installed on your system. Will not sign the release tag.') try: result = subprocess.check_output( cmd, stderr=subprocess.STDOUT, env=dict(os.environ, GPG_TTY=tty), ).decode('utf8') except subprocess.CalledProcessError as e: result = '`git` command exit code {code} - {output}'.format(code=e.returncode, output=e.output.decode('utf8')) if result: if 'unable to sign the tag' in result: raise ReleaseFailure( 'Failed tagging branch due to error signing the tag. Perhaps you need to create a code-signing key, or ' 'the alternate key ID you specified was incorrect?\n\n' 'Suggestions:\n' ' - Generate a key with `{gpg} --get-key` (GPG v1) or `{gpg} --full-gen-key` (GPG v2) (and use 4096)\n' ' - It is not enough for the key email to match your committer email; the full display name must ' 'match, too (e.g. "First Last <<EMAIL>>")\n' ' - If the key display name does not match the committer display name, use the alternate key ID\n' 'Error output: {output}'.format(gpg=gpg, output=result) ) raise ReleaseFailure('Failed tagging branch: {}'.format(result)) if signed: try: subprocess.check_call( ['git', 'tag', '-v', release_version], stdout=sys.stdout, stderr=sys.stderr, ) except subprocess.CalledProcessError: raise ReleaseFailure( 'Successfully created a signed release tag, but failed to verify its signature. Something is not right.' ) _verbose_output(verbose, 'Finished tagging branch.') def _commit_release_changes(release_version, changelog_lines, verbose): _verbose_output(verbose, 'Committing release changes...') files_to_commit = [VERSION_FILENAME, CHANGELOG_FILENAME] + _get_extra_files_to_commit() _verbose_output(verbose, 'Staging changes for files {}.'.format(files_to_commit)) try: result = subprocess.check_output( ['git', 'add'] + files_to_commit, stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError as e: result = '`git` command exit code {code} - {output}'.format(code=e.returncode, output=e.output.decode('utf8')) if result: raise ReleaseFailure('Failed staging release files for commit: {}'.format(result)) release_message = [RELEASE_MESSAGE_TEMPLATE.format(release_version)] if changelog_lines: release_message.append('\nChangelog Details:') for line in changelog_lines: release_message.append(line.strip()) subprocess.check_call( ['git', 'commit', '-m', '\n'.join(release_message)], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Finished releasing changes.') def _push_release_changes(release_version, branch_name, verbose): try: if USE_TAG: message = 'Push release changes and tag to remote origin (branch "{}")? (y/N/rollback):' else: message = 'Push release changes to remote origin (branch "{}")? (y/N/rollback):' push = _prompt(message, branch_name).lower() except KeyboardInterrupt: push = INSTRUCTION_ROLLBACK if push == INSTRUCTION_YES: _verbose_output(verbose, 'Pushing changes to remote origin...') subprocess.check_call( ['git', 'push', 'origin', '{0}:{0}'.format(branch_name)], stdout=sys.stdout, stderr=sys.stderr, ) if USE_TAG: # push the release tag subprocess.check_call( ['git', 'push', 'origin', release_version], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Finished pushing changes to remote origin.') return PUSH_RESULT_PUSHED elif push == INSTRUCTION_ROLLBACK: _standard_output('Rolling back local release commit and tag...') if USE_PULL_REQUEST: _checkout_branch(verbose, BRANCH_MASTER) _delete_branch(verbose, branch_name) else: _delete_last_commit(verbose) if USE_TAG: _delete_local_tag(release_version, verbose) _verbose_output(verbose, 'Finished rolling back local release commit.') return PUSH_RESULT_ROLLBACK else: _standard_output('Not pushing changes to remote origin!') if USE_TAG: _print_output( COLOR_RED_BOLD, 'Make sure you remember to explicitly push {branch} and the tag ' '(or revert your local changes if you are trying to cancel)! ' 'You can push with the following commands:\n' ' git push origin {branch}:{branch}\n' ' git push origin "{tag}"\n', branch=branch_name, tag=release_version, ) else: _print_output( COLOR_RED_BOLD, 'Make sure you remember to explicitly push {branch} (or revert your local changes if you are ' 'trying to cancel)! You can push with the following command:\n' ' git push origin {branch}:{branch}\n', branch=branch_name, ) return PUSH_RESULT_NO_ACTION def _get_last_commit_hash(verbose): _verbose_output(verbose, 'Getting last commit hash...') commit_hash = subprocess.check_output( ['git', 'log', '-n', '1', '--pretty=format:%H'], stderr=sys.stderr, ).decode('utf8').strip() _verbose_output(verbose, 'Last commit hash is {}.', commit_hash) return commit_hash def _get_commit_subject(commit_hash, verbose): _verbose_output(verbose, 'Getting commit message for hash {}...', commit_hash) message = subprocess.check_output( ['git', 'log', '-n', '1', '--pretty=format:%s', commit_hash], stderr=sys.stderr, ).decode('utf8').strip() _verbose_output(verbose, 'Commit message for hash {hash} is "{value}".', hash=commit_hash, value=message) return message def _get_branch_name(verbose): _verbose_output(verbose, 'Determining current Git branch name.') branch_name = subprocess.check_output( ['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stderr=sys.stderr, ).decode('utf8').strip() _verbose_output(verbose, 'Current Git branch name is {}.', branch_name) return branch_name def _create_branch(verbose, branch_name): _verbose_output(verbose, 'Creating branch {branch}...', branch=branch_name) subprocess.check_call( ['git', 'checkout', '-b', branch_name], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Done creating branch {}.', branch_name) def _create_local_tracking_branch(verbose, branch_name): """Create a local tracking branch of origin/<branch_name>. Returns True if successful, False otherwise. """ _verbose_output( verbose, 'Creating local branch {branch} set up to track remote branch {branch} from \'origin\'...', branch=branch_name ) success = True try: subprocess.check_call( ['git', 'checkout', '--track', 'origin/{}'.format(branch_name)], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Done creating branch {}.', branch_name) except subprocess.CalledProcessError: _verbose_output(verbose, 'Creating branch {} failed.', branch_name) success = False return success def _checkout_branch(verbose, branch_name): _verbose_output(verbose, 'Checking out branch {branch}...', branch=branch_name) subprocess.check_call( ['git', 'checkout', branch_name], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Done checking out branch {}.', branch_name) def _delete_branch(verbose, branch_name): _verbose_output(verbose, 'Deleting branch {branch}...', branch=branch_name) subprocess.check_call( ['git', 'branch', '-D', branch_name], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Done deleting branch {}.', branch_name) def _is_branch_on_remote(verbose, branch_name): _verbose_output(verbose, 'Checking if branch {} exists on remote...', branch_name) result = subprocess.check_output( ['git', 'ls-remote', '--heads', 'origin', branch_name], stderr=sys.stderr, ).decode('utf8').strip() on_remote = branch_name in result _verbose_output( verbose, 'Result of on-remote check for branch {branch_name} is {result}.', branch_name=branch_name, result=on_remote, ) return on_remote def _create_branch_from_tag(verbose, tag_name, branch_name): _verbose_output(verbose, 'Creating branch {branch} from tag {tag}...', branch=branch_name, tag=tag_name) subprocess.check_call( ['git', 'checkout', 'tags/{}'.format(tag_name), '-b', branch_name], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Done creating branch {}.', branch_name) def _push_branch(verbose, branch_name): _verbose_output(verbose, 'Pushing branch {} to remote.', branch_name) subprocess.check_call( ['git', 'push', 'origin', '{0}:{0}'.format(branch_name)], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Done pushing branch {}.', branch_name) def _fetch_tags(verbose): _verbose_output(verbose, 'Fetching all remote tags...') subprocess.check_call( ['git', 'fetch', '--tags'], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Done fetching tags.') def _get_tag_list(verbose): _verbose_output(verbose, 'Parsing list of local tags...') result = subprocess.check_output( ['git', 'tag', '--list'], stderr=sys.stderr, ).decode('utf8').strip().split() _verbose_output(verbose, 'Result of tag list parsing is {}.', result) return result def _does_tag_exist_locally(release_version, verbose): _verbose_output(verbose, 'Checking if tag {} exists locally...', release_version) result = subprocess.check_output( ['git', 'tag', '--list', release_version], stderr=sys.stderr, ).decode('utf8').strip() exists = release_version in result _verbose_output(verbose, 'Result of exists check for tag {tag} is {result}.', tag=release_version, result=exists) return exists def _is_tag_on_remote(release_version, verbose): _verbose_output(verbose, 'Checking if tag {} was pushed to remote...', release_version) result = subprocess.check_output( ['git', 'ls-remote', '--tags', 'origin', release_version], stderr=sys.stderr, ).decode('utf8').strip() on_remote = release_version in result _verbose_output( verbose, 'Result of on-remote check for tag {tag} is {result}.', tag=release_version, result=on_remote, ) return on_remote def _get_remote_branches_with_commit(commit_hash, verbose): _verbose_output(verbose, 'Checking if commit {} was pushed to any remote branches...', commit_hash) result = subprocess.check_output( ['git', 'branch', '-r', '--contains', commit_hash], stderr=sys.stderr, ).decode('utf8').strip() on_remote = [] for line in result.splitlines(): line = line.strip() if line.startswith('origin/') and not line.startswith('origin/HEAD'): on_remote.append(line) _verbose_output( verbose, 'Result of on-remote check for commit {hash} is {remote}.', hash=commit_hash, remote=on_remote, ) return on_remote def _delete_local_tag(tag_name, verbose): _verbose_output(verbose, 'Deleting local tag {}...', tag_name) subprocess.check_call( ['git', 'tag', '-d', tag_name], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Finished deleting local tag {}.', tag_name) def _delete_remote_tag(tag_name, verbose): _verbose_output(verbose, 'Deleting remote tag {}...', tag_name) subprocess.check_call( ['git', 'push', 'origin', ':refs/tags/{}'.format(tag_name)], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Finished deleting remote tag {}.', tag_name) def _delete_last_commit(verbose): _verbose_output(verbose, 'Deleting last commit, assumed to be for version and changelog files...') extra_files = _get_extra_files_to_commit() subprocess.check_call( ['git', 'reset', '--soft', 'HEAD~1'], stdout=sys.stdout, stderr=sys.stderr, ) subprocess.check_call( ['git', 'reset', 'HEAD', VERSION_FILENAME, CHANGELOG_FILENAME] + extra_files, stdout=sys.stdout, stderr=sys.stderr, ) subprocess.check_call( ['git', 'checkout', '--', VERSION_FILENAME, CHANGELOG_FILENAME] + extra_files, stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Finished deleting last commit.') def _revert_remote_commit(release_version, commit_hash, branch_name, verbose): _verbose_output(verbose, 'Rolling back release commit on remote branch "{}"...', branch_name) subprocess.check_call( ['git', 'revert', '--no-edit', '--no-commit', commit_hash], stdout=sys.stdout, stderr=sys.stderr, ) release_message = 'REVERT: {}'.format(RELEASE_MESSAGE_TEMPLATE.format(release_version)) subprocess.check_call( ['git', 'commit', '-m', release_message], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Pushing changes to remote branch "{}"...', branch_name) subprocess.check_call( ['git', 'push', 'origin', '{0}:{0}'.format(branch_name)], stdout=sys.stdout, stderr=sys.stderr, ) _verbose_output(verbose, 'Finished rolling back release commit.') def _import_version_or_exit(): if VERSION_FILE_IS_TXT: # if there is version.txt, use that with codecs.open(VERSION_FILENAME, 'rb', encoding='utf8') as version_txt: return version_txt.read() try: return __import__('{}.version'.format(MODULE_NAME), fromlist=[str('__version__')]).__version__ except ImportError as e: import pprint _error_output_exit( 'Could not import `__version__` from `{module}.version`. Error was "ImportError: {err}." Path is:\n{path}', module=MODULE_NAME, err=e.args[0], path=pprint.pformat(sys.path), ) except AttributeError as e: _error_output_exit('Could not retrieve `__version__`
#!/usr/bin/env python3 # Copyright (c) 2019 <NAME>, <EMAIL> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # TLS parameters (numbers used below variously) are found at: # https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml import traceback import os,sys,argparse,re,random,time import pyshark # structures/function to handle (the bits wer care about from) a TLS session # given we may have large inputs, we wanna be less wasterful of memory # so we'll use classes for this class TLSSession(): __slots__ = [ 'sess_id', 'fname', 'version', 'start_time', 'end_time', 'timestamp', 'src', 'sport', 'dst', 'dport', 'certsize', 'cvsize', 'chsize', 'shsize', 'chtime', 'rttest', 'min_pdu', 'max_pdu', 'num_sizes', 's_psizes', 's_delays', 'd_psizes', 'd_delays', 'channel', 'instrument', 'sortstr' ] def __init__(self,fname='',ver='',stime=0,tstamp='0',src='',sport='',dst='',dport=''): self.sess_id=random.getrandbits(32) self.fname=fname # file name in which packet was seen self.version=ver # TLS version from the 1st relevant packet we see self.start_time=stime # file-relative start time of session self.end_time=0 # file-relative start time of session self.timestamp=float(tstamp) # file-relative start time of session self.src=src # client IP (v4 or v6) self.sport=sport # client port self.dst=dst # server IP self.dport=dport # server port self.certsize=0 # cert size if seen in h/ self.cvsize=0 # cert verify size if seen in h/s self.chsize=0 # ClientHello size self.shsize=0 # ServerHello size self.chtime=0 # for Estimated RTT record time of ClientHello self.rttest=0 # Estimated RTT based on gap between ClientHello and ServerHello timing self.min_pdu=sys.maxsize self.max_pdu=0 self.num_sizes=0 self.s_psizes=[] # list of APDU sizes from src, 0 is 1st, 1 2nd seen etc. self.s_delays=[] # list of relative time offsets from session start self.d_psizes=[] # list of APDU sizes from dst, 0 is 1st, 1 2nd seen etc. self.d_delays=[] # list of relative time offsets from session start self.channel=0 # used in Tls2Music only (so far) self.instrument=0 # used in Tls2Music only (so far) self.sortstr=src+":"+sport+"->"+dst+":"+dport def __str__(self): return "ID: " + str(self.sess_id) + " V:" + self.version + "\n" + \ " started: " + str(self.start_time) + " ended: " + str(self.end_time) + " tstamp: " + str(self.timestamp) + "\n" + \ " file: " + self.fname + "\n" + \ "\t" + self.src + ":" + self.sport + "->" + self.dst + ":" + self.dport + "\n" + \ "\t" + "CH size: " + str(self.chsize) + " SH size: " + str(self.shsize) + "\n" + \ "\t" + "Estimated RTT: " + str(self.rttest) + "\n" + \ "\t" + "Cert size: " + str(self.certsize) + " CV size (proxy): " + str(self.cvsize) + "\n" + \ "\t" + "Min PDU: " + str(self.min_pdu) + " Max PDU: " + str(self.max_pdu) + " Num sizes: " + str(self.num_sizes) + "\n" + \ "\t" + "number tx'd: " + str(len(self.s_psizes)) + " rx'd: " + str(len(self.d_psizes)) + "\n" + \ "\t" + "source packet sizes: " + str(self.s_psizes) + "\n"+ \ "\t" + "source packet times: " + str(["%.3f" % v for v in self.s_delays]) + "\n" + \ "\t" + "dest packet sizes: " + str(self.d_psizes) + "\n" + \ "\t" + "dest packet times: " + str(["%.3f" % v for v in self.d_delays]) + "\n" + \ "\t" + "channel " + str(self.channel) + " instrument: " + str(self.instrument) def add_apdu(self,size,pkttime,pstamp,src): #print ("type(pstamp): " + str(type(pstamp)) + " type(self.timestamp): " + str(type(self.timestamp))) tdiff=float(pstamp)-self.timestamp msecs=tdiff*1000 isize=int(size) if src==True: self.s_psizes.append(isize) slen=len(self.s_delays) if slen>0 and self.s_delays[slen-1]>msecs: print("Oddity: src going backwards in time to " + str(msecs) + " from " + str(self) + " tstamp: " + str(pstamp)) self.s_delays.append(float(msecs)) elif src==False: self.d_psizes.append(isize) dlen=len(self.d_delays) if dlen>0 and self.d_delays[dlen-1]>msecs: print("Oddity: dest going backwards in time to " + str(msecs) + " from "+ str(self) + " tstamp: " + str(pstamp)) self.d_delays.append(float(msecs)) else: raise ValueError('Bad (non-boolean) given to add_apdu') if isize < self.min_pdu: self.min_pdu=isize if isize > self.max_pdu: self.max_pdu=isize # probably v. slow but we don't get huge numbers of packets/session # so likely not a big deal self.num_sizes=len(set(self.s_psizes+self.d_psizes)) def note_chsize(self,cs): self.chsize=cs def note_shsize(self,ss): self.shsize=ss def note_end(self,pkttime): self.end_time=pkttime # used for sorting sessions def get_sortstr(s): return s.sortstr def sess_find(fname,sessions,ver,ptime,ptstamp,src,sport,dst,dport): #print ("Checking for " + src + ":" + sport + " to/from " + dst + ":" + dport + "|") for s in sessions: #print("Considering: " + str(s)) if s.fname==fname and s.src==dst and s.sport==dport and s.dst==src and s.dport==sport: #print("Matched reverse") return s elif s.fname==fname and s.src==src and s.sport==sport and s.dst==dst and s.dport==dport: #print("Matched forward") return s # otherwise make a new one # TODO: extend/parameterise this set of known server ports sometime if dport=="443" or dport=="853" or dport=="993": #sys.stderr.write("New Session option 1: " + sport + "->" + dport + "\n") s=TLSSession(fname,ver,ptime,ptstamp,src,sport,dst,dport) sessions.append(s) return s elif sport=="443" or sport=="853" or sport=="993": #sys.stderr.write("New Session option 2: " + sport + "->" + dport + "\n") s=TLSSession(fname,ver,ptime,ptstamp,dst,dport,src,sport) sessions.append(s) return s else: # take 'em as they come s=TLSSession(fname,ver,ptime,ptstamp,src,sport,dst,dport) #print("New Session option 3: " + sport + "->" + dport + "Session ID: " + str(s.sess_id)) sessions.append(s) return s def analyse_pcaps(flist,sessions,verbose): # iterate through each file, gathering our stats for fname in flist: if verbose: print("Processing " + fname) try: f = pyshark.FileCapture(fname,display_filter='ssl') chtime=0 for pkt in f: src="" if 'ip' in pkt: src=pkt.ip.src dst=pkt.ip.dst elif 'ipv6' in pkt: src=pkt.ipv6.src dst=pkt.ipv6.dst else: sys.stderr.write("No sender!\n"); sys.stderr.write(str(dir(pkt))+"\n") sys.stderr.write(str(pkt)+"\n") continue if 'tcp' in pkt: dport=pkt.tcp.dstport sport=pkt.tcp.srcport else: sys.stderr.write("Not a TCP packet!"+"\n") sys.stderr.write(str(dir(pkt))+"\n") sys.stderr.write(str(pkt)+"\n") continue if 'ssl' not in pkt: #print ("Skipping non SSL packet from " + src) continue if not (hasattr(pkt.ssl,'record_content_type') or hasattr(pkt.ssl,'record_opaque_type')): #print("Skipping SSL packet with nonsense content") continue ver='unknown' if hasattr(pkt.ssl,'record_version'): ver=pkt.ssl.record_version # see if this is a known session or not this_sess=sess_find(fname,sessions,ver,pkt.sniff_time,pkt.sniff_timestamp,src,sport,dst,dport) if hasattr(pkt.ssl,'record_content_type') and pkt.ssl.record_content_type=="20": # print("ChangeCipherSpec") pass elif hasattr(pkt.ssl,'record_content_type') and pkt.ssl.record_content_type=="21": # TODO: if we get two of these, one from each side, that may be good reason to # think that's the end of this TLS session, but maybe more checking is # needed, we'll see... # print("EncryptedAlert at " + str(pkt.sniff_time) + " for: " + str(this_sess)) this_sess.note_end(pkt.sniff_time) pass elif hasattr(pkt.ssl,'record_content_type') and pkt.ssl.record_content_type=="22": # handshake if hasattr(pkt.ssl,'handshake_type'): if pkt.ssl.handshake_type=="1": #print("ClientHello for " + str(this_sess.sess_id)) this_sess.note_chsize(pkt.ssl.record_length) this_sess.chtime=pkt.sniff_time pass elif pkt.ssl.handshake_type=="2": #print("ServerHello") this_sess.note_shsize(pkt.ssl.record_length) if this_sess.chtime==0: this_sess.rttest=-1 else: td=pkt.sniff_time-this_sess.chtime this_sess.rttest=int(td.total_seconds()*1000) pass elif pkt.ssl.handshake_type=="4": #print("NewSessionTicket") pass elif pkt.ssl.handshake_type=="11": #print("Certificate") this_sess.certsize=pkt.ssl.record_length # If RSA: # Use the server cert modulus size as a proxy for what # would be the size of a TLS1.3 CertificateVerify # Modulus format here is of the form "00:aa:bb..." # So we want to loose the colons (1/3 of length) # then divide by 2 to get octets # then add 10 which'd be the overhead for a TLS1.3 CertificateVerify # So bottom line is divide by 3, then add 10 # and "//" is integer divide for python 3 if hasattr(pkt.ssl,'pkcs1_modulus'): mlen=len(pkt.ssl.pkcs1_modulus) mlen=(mlen//3)+10 if this_sess.cvsize==0: this_sess.cvsize=mlen else: # Don't think this should happen, but who knows... # If it does, better we know sys.stderr.write("Re-setting cvsize for " + str(this_sess.sess_id) + \ " from: " + str(this_sess.cvsize) + \ " to: " + str(mlen) + "\n" ) this_sess.cvsize=mlen elif hasattr(pkt.ssl,'pkcs1_ecparameters') and hasattr(pkt.ssl,'x509af_subjectpublickey'): # same encoding
<gh_stars>0 import package.data_input as di import ete3 import math import package.instance as inst import package.params as pm import numpy as np import matplotlib try: import tkinter except: matplotlib.use('Qt5Agg', warn=False, force=True) import matplotlib.pyplot as plt import palettable as pal import pprint as pp import pandas as pd import package.auxiliar as aux import os import package.superdict as sd import package.tuplist as tl import package.nodes as nd import package.nodes_checks as nc import package.geometry as geom class Solution(inst.Instance): def __init__(self, input_data, solution_data): super().__init__(input_data) # self.sol_data = solution_data if len(solution_data) == 0: self.trees = [] return self.trees = [] defects = input_data['defects'] data_byPlate = solution_data.index_by_property('PLATE_ID', get_list=True) for pos, plate in enumerate(data_byPlate): tree = self.get_tree_from_solution_data(plate) tree.add_feature('DEFECTS', defects.get(pos, [])) self.trees.append(tree) self.order_all_children() @staticmethod def get_tree_from_solution_data(solution_data): parent_child = [(int(v['PARENT']), int(k), 1) for k, v in solution_data.items() if not math.isnan(v['PARENT'])] for p, c, d in parent_child: if p == c: raise ValueError('parent cannot be the same node!') if len(parent_child) == 0: # this means we have a one-node tree: name = [*solution_data.keys()][0] tree = ete3.Tree(name=name) else: tree = ete3.Tree.from_parent_child_table(parent_child) # add info about each node for node in tree.traverse(): for k, v in solution_data[node.name].items(): if math.isnan(v): node.add_feature(k, v) else: node.add_feature(k, int(v)) return tree def draw(self, pos=0, *attributes): node = self.trees[pos] nd.draw(node, *attributes) return def draw_interactive(self, pos=0): return self.trees[pos].show() @staticmethod def search_case_in_options(path): try: options = di.load_data(path + 'options.json') except FileNotFoundError: return None else: return options.get('case_name', None) @classmethod def from_io_files(cls, case_name=None, path=pm.PATHS['checker_data'], solutionfile="solution"): if case_name is None: case_name = cls.search_case_in_options(path) if case_name is None: raise ImportError('case_name is None and options.json is not available') input_data = di.get_model_data(case_name, path) solution = di.get_model_solution(case_name, path, filename=solutionfile) return cls(input_data, solution) @classmethod def from_input_files(cls, case_name=None, path=pm.PATHS['data'], **kwargs): if case_name is None: case_name = cls.search_case_in_options(path) if case_name is None: raise ImportError('case_name is None and options.json is not available') return cls(di.get_model_data(case_name, path), **kwargs) def get_cuts(self): # for each node, we get the cut that made it. # there's always one of the children that has no cut # each cut wll have as property the first and second piece pass def order_all_children(self): for tree in self.trees: nd.order_children(tree) def get_pieces_by_type(self, solution, by_plate=False, pos=None, min_type=0): """ Gets the solution pieces indexed by the TYPE. :param by_plate: when active it returns a dictionary indexed by the plates. So it's {PLATE_0: {0: leaf0, 1: leaf1}, PLATE_1: {}} :param pos: get an individual plate marked by pos :param filter_type: if True: gets only demanded items. If not: returns all plates :param solution: if given it's evaluated instead of self.trees :return: {0: leaf0, 1: leaf1} """ if pos is None: leaves = [leaf for tree in solution for leaf in nd.get_node_leaves(tree, min_type)] else: leaves = [leaf for leaf in nd.get_node_leaves(solution[pos], min_type)] if not by_plate: return {int(leaf.TYPE): leaf for leaf in leaves} leaves_by_plate = sd.SuperDict({tree.PLATE_ID: {} for tree in solution}) for leaf in leaves: leaves_by_plate[leaf.PLATE_ID][int(leaf.TYPE)] = leaf if pos is None: return leaves_by_plate return leaves_by_plate[pos] def get_plate_production(self): return [(leaf.WIDTH, leaf.HEIGHT) for tree in self.trees for leaf in tree.get_leaves()] def count_errors(self): checks = self.check_all() return len([i for k, v in checks.items() for i in v]) def check_all(self): func_list = { 'overlapping': self.check_overlapping , 'sequence': self.check_sequence , 'defects': self.check_defects , 'demand': self.check_demand_satisfied , 'ch_size': self.check_nodes_fit , 'inside': self.check_parent_of_children , 'cuts': self.check_cuts_number , 'max_cut': self.check_max_cut , 'position': self.check_nodes_inside_jumbo , 'types': self.check_wrong_type , 'ch_order': self.check_children_order , 'node_size': self.check_sizes , 'waste_size': self.check_waste_size } result = {k: v() for k, v in func_list.items()} return {k: v for k, v in result.items() if len(v) > 0} def check_consistency(self): func_list = { 'ch_size': self.check_nodes_fit , 'inside': self.check_parent_of_children , 'cuts': self.check_cuts_number , 'types': self.check_wrong_type , 'ch_order': self.check_children_order , 'node_size': self.check_sizes , 'only_child': self.check_only_child } result = {k: v() for k, v in func_list.items()} return {k: v for k, v in result.items() if len(v) > 0} def check_only_child(self): return [a for t in self.trees for a in nc.check_only_child(t)] def check_overlapping(self): solution = self.trees plate_leaves = self.get_pieces_by_type(by_plate=True, solution=solution) overlapped = [] for plate, leaves in plate_leaves.items(): for k1, leaf1 in leaves.items(): point1 = {'X': leaf1.X, 'Y': leaf1.Y} point2 = {'X': leaf1.X + leaf1.WIDTH, 'Y': leaf1.Y + leaf1.HEIGHT} for k2, leaf2 in leaves.items(): square = nd.node_to_square(leaf2) if geom.point_in_square(point1, square) or \ geom.point_in_square(point2, square): overlapped.append((leaf1, leaf2)) return overlapped def get_previous_nodes(self, solution=None, type_node_dict=None): """ :param solution: forest: a list of trees. :return: """ if type_node_dict is None or solution is not None: if solution is None: solution = self.trees type_node_dict = self.get_pieces_by_type(solution=solution) prev_items = self.get_previous_items() prev_nodes = {} for k, v in prev_items.items(): prev_nodes[type_node_dict[k]] = [] for i in v: prev_nodes[type_node_dict[k]].append(type_node_dict[i]) return sd.SuperDict(prev_nodes) def check_sequence(self, solution=None, type_node_dict=None): wrong_order = [] n_prec = self.get_previous_nodes(solution=solution, type_node_dict=type_node_dict) for node, prec_nodes in n_prec.items(): for prec_node in prec_nodes: # prec is in a previous plate: correct if node.PLATE_ID > prec_node.PLATE_ID: continue # if prec is in the next plate: very incorrect if node.PLATE_ID < prec_node.PLATE_ID: wrong_order.append((node, prec_node)) continue # if we're here, they are in the same plate/ tree. # we find the common ancestor and check which node's # ancestors appear first if nd.check_node_order(node, prec_node): wrong_order.append((node, prec_node)) return tl.TupList(wrong_order) def check_defects(self, solution=None): """ :return: [(node, defect), ()] """ node_defect = self.get_nodes_defects(solution) return [(node, defect) for node, defect in node_defect if node.TYPE >= 0] def get_nodes_defects(self, solution=None): # A defect can be in more than one node/ if solution is None: solution = self.trees defect_node = [] defects_by_plate = self.get_defects_per_plate() # if solution is None: # a = 1 # pass for tree in solution: if tree.PLATE_ID not in defects_by_plate: continue for defect in defects_by_plate[tree.PLATE_ID]: nodes = nd.search_nodes_of_defect(tree, defect) assert nodes is not None, 'defect {} doesnt have node'.format(defect) defect_node.extend((node, defect) for node in nodes) return defect_node def check_waste_size(self, solution=None): min_waste = self.get_param('minWaste') if solution is None: solution = self.trees bad_wastes = [] for tree in solution: wastes = nd.get_node_leaves(tree, type_options=[-1, -3]) bad_wastes.extend([w for w in wastes if 0 < w.WIDTH < min_waste or 0 < w.HEIGHT < min_waste]) return bad_wastes def check_space_usage(self, solution=None): # if solution is None: # solution = self.trees return self.calculate_objective(solution, discard_empty_trees=True) # return sum(self.calculate_residual_plate(tree)*(pos+1)**4 for pos, tree in enumerate(solution)) / \ # (self.get_param('widthPlates') * len(solution)**4) # sum(nd.get_node_position_cost(n, self.get_param('widthPlates')) for tree in solution # for n in nd.get_node_leaves(tree, type_options=[-1, -3])) / \ # ((self.get_param('widthPlates') * len(solution))**2 *self.get_param('widthPlates')*self.get_param('heightPlates')) def calculate_residual_plate(self, node): waste = nd.find_waste(node, child=True) if waste is None: return 0 return waste.WIDTH def check_nodes_inside_jumbo(self): w, h = self.get_param('widthPlates'), self.get_param('heightPlates') plate_square = {'DL': {'X': 0, 'Y': 0}, 'UR': {'X': w, 'Y': h}} bad_position = [] for tree in self.trees: for node in tree.iter_leaves(): square = nd.node_to_square(node) if geom.square_inside_square(square, plate_square, both_sides=False): continue bad_position.append(node) return bad_position def check_cuts_number(self): """ This checks if the CUT property of each node really corresponds with the node's level. :return: """ return [a for t in self.trees for a in nc.check_cuts_number(t)] def check_max_cut(self): """ check that the maximum achieved level is 4 :return: """ levels = {} for tree in self.trees: levels.update(nd.assign_cut_numbers(tree, update=False)) return [(node, level) for node, level in levels.items() if level > 4 or\ (level == 4 and len(node.get_sisters()) > 1)] def check_wrong_type(self): return [a for t in self.trees for a in nc.check_wrong_type(t)] def check_nodes_fit(self): return [a for t in self.trees for a in nc.check_nodes_fit(t)] def check_children_order(self): # This checks that the order of the children # follows the positions. # meaining: if children A is before B # it is lower or more to the left return [a for t in self.trees for a in nc.check_children_order(t)] def check_sizes(self): return [a for t in self.trees for a in nc.check_sizes(t)] def check_parent_of_children(self): """ We want to check if each node is inside its parent. :return: """ return [a for t in self.trees for a in nc.check_parent_of_children(t)] def check_demand_satisfied(self): demand = self.get_batch() produced = [] pieces = self.get_pieces_by_type(solution=self.trees) for k, leaf in pieces.items(): item = demand.get(k, None) # there's no demand for this item code if item is None: continue plate1 = item['WIDTH_ITEM'], item['LENGTH_ITEM'] plate2 = leaf.WIDTH, leaf.HEIGHT if geom.plate_inside_plate(plate1, plate2): produced.append(k) return np.setdiff1d([*demand], produced) def calculate_objective(self, solution=None, discard_empty_trees=False): if solution is None: solution = self.trees if not solution: return None
symmetric matrix check = check_symmetric(Anxn) print('This is {} that the matrix is symmetric'.format(check)) # plot histogram N mu, sig = cp.mean(Anxn), cp.std(Anxn) print('For n = {0}, mu = {1}, sig = {2}'.format(n, mu, sig)) # # https://www.tutorialspoint.com/python_data_science/python_normal_distribution.htm mu = cp.asnumpy(mu) sigma = cp.asnumpy(sig) s = cp.squeeze(Anxn) s = cp.asnumpy(s) plt.figure() count, bins, ignored = plt.hist(s, density=True) plt.plot(bins, 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(- (bins - mu) ** 2 / (2 * sigma ** 2)), linewidth=3, color='y') plt.show() # 3c) # Create a matrix m = rand_mat_gauss(n) with 𝑛=1000 n = 1000 m = rand_mat_gauss(n) # cupy to compute the eigenvalues of the matrix w, v = cupy_eig(m) # print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # plot the histrogram of the eigenvalues mu, sig = cp.mean(m), cp.std(m) mu = cp.asnumpy(mu) sigma = cp.asnumpy(sig) plt.figure() count, bins, ignored = plt.hist(cp.asnumpy(w), bins = 30) # plt.plot(bins, 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(- (bins - mu) ** 2 / (2 * sigma ** 2)), linewidth=3, color='y') plt.title('3c) n = ' + str(n)) plt.show() print('For n = {0}, mu = {1}, sig = {2}'.format(n, mu, sig)) print('The normal distribution of the eigenvalues smoothly follows in histogram\n\n') print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # 3d) # repeat 3c with 𝑛=2000 and 𝑛=4000 # For n = 2000 n = 2000 #200 m = rand_mat_gauss(n) # cupy to compute the eigenvalues of the matrix w, v = cupy_eig(m) # print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # plot the histrogram of the eigenvalues mu, sig = cp.mean(m), cp.std(m) mu = cp.asnumpy(mu) sigma = cp.asnumpy(sig) plt.figure() count, bins, ignored = plt.hist(cp.asnumpy(w), bins = 30) # plt.plot(bins, 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(- (bins - mu) ** 2 / (2 * sigma ** 2)), linewidth=3, color='y') plt.title('3d) n = ' + str(n)) plt.show() print('For n = {0}, mu = {1}, sig = {2}'.format(n, mu, sig)) print('The normal distribution of the eigenvalues smoothly follows in histogram') print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # For n = 4000 n = 4000 m = rand_mat_gauss(n) # cupy to compute the eigenvalues of the matrix w, v = cupy_eig(m) # print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # plot the histrogram of the eigenvalues mu, sig = cp.mean(m), cp.std(m) mu = cp.asnumpy(mu) sigma = cp.asnumpy(sig) plt.figure() count, bins, ignored = plt.hist(cp.asnumpy(w), bins = 30) # plt.plot(bins, 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(- (bins - mu) ** 2 / (2 * sigma ** 2)), linewidth=3, color='y') plt.title('3d) n = ' + str(n)) plt.show() print('For n = {0}, mu = {1}, sig = {2}'.format(n, mu, sig)) print('The normal distribution of the eigenvalues smoothly follows in histogram') print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # What features are independent of 𝑛 ? print('As shown in graph when n is increases, they are proportionally increases. \nThe distribution of the eigenvalues are independent of n. \nLeading value of the eigvalues are proprotional to n') # # # # problem4) # p4a) # epeat problem 3 but with matrix entries chosen uniformly from {−1,1} # instead of from a normal distribution # a) # For n = 1000, 2000, 4000 # For n = 1000 n = 1000 m = rand_mat_plusminus(n) # cupy to compute the eigenvalues of the matrix w, v = cupy_eig(m) # print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # For this problem, exclude the largest eigenvalue before computing the histogram. # max leading eigval (one with large magnitude) idx = cp.argmax(abs(w)) w = np.delete(cp.asnumpy(w), cp.asnumpy(idx)) # plot the histrogram of the eigenvalues mu, sig = cp.mean(m), cp.std(m) mu = cp.asnumpy(mu) sigma = cp.asnumpy(sig) plt.figure() count, bins, ignored = plt.hist(cp.asnumpy(w), bins = 30) # plt.plot(bins, 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(- (bins - mu) ** 2 / (2 * sigma ** 2)), linewidth=3, color='y') plt.title('4a) n = ' + str(n)) plt.show() print('For n = {0}, mu = {1}, sig = {2}'.format(n, mu, sig)) print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # For n = 2000 n = 2000 m = rand_mat_plusminus(n) # cupy to compute the eigenvalues of the matrix w, v = cupy_eig(m) # print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # For this problem, exclude the largest eigenvalue before computing the histogram. # max leading eigval (one with large magnitude) idx = cp.argmax(abs(w)) w = np.delete(cp.asnumpy(w), cp.asnumpy(idx)) # plot the histrogram of the eigenvalues mu, sig = cp.mean(m), cp.std(m) mu = cp.asnumpy(mu) sigma = cp.asnumpy(sig) plt.figure() count, bins, ignored = plt.hist(cp.asnumpy(w), bins = 30) # plt.plot(bins, 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(- (bins - mu) ** 2 / (2 * sigma ** 2)), linewidth=3, color='y') plt.title('4a) n = ' + str(n)) plt.show() print('For n = {0}, mu = {1}, sig = {2}'.format(n, mu, sig)) print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # For n = 4000 n = 4000 m = rand_mat_plusminus(n) # cupy to compute the eigenvalues of the matrix w, v = cupy_eig(m) # print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # For this problem, exclude the largest eigenvalue before computing the histogram. # max leading eigval (one with large magnitude) idx = cp.argmax(abs(w)) w = np.delete(cp.asnumpy(w), cp.asnumpy(idx)) # plot the histrogram of the eigenvalues mu, sig = cp.mean(m), cp.std(m) mu = cp.asnumpy(mu) sigma = cp.asnumpy(sig) plt.figure() count, bins, ignored = plt.hist(cp.asnumpy(w), bins = 30) # plt.plot(bins, 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(- (bins - mu) ** 2 / (2 * sigma ** 2)), linewidth=3, color='y') plt.title('4a) n = ' + str(n)) plt.show() print('For n = {0}, mu = {1}, sig = {2}'.format(n, mu, sig)) print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # When the distribution from which elements are chosen is changed, what is preserved and what changes? print('As shown in histgram, when n increases, histogram also increases.\n As n is increases the the distribution doesnt changing much.') # # # # p4b) # b) # Again explore the distribution of the eigenvalues and the dependence on 𝑛 . When the distribution is changed this time, what is preserved and what changes? # For n = 1000, 2000, 4000 # For n = 1000 n = 1000 m = rand_mat_uniform(n) # cupy to compute the eigenvalues of the matrix w, v = cupy_eig(m) # print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # For this problem, exclude the largest eigenvalue before computing the histogram. # max leading eigval (one with large magnitude) idx = cp.argmax(abs(w)) w = np.delete(cp.asnumpy(w), cp.asnumpy(idx)) # plot the histrogram of the eigenvalues mu, sig = cp.mean(m), cp.std(m) mu = cp.asnumpy(mu) sigma = cp.asnumpy(sig) plt.figure() count, bins, ignored = plt.hist(cp.asnumpy(w), bins = 30) # plt.plot(bins, 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(- (bins - mu) ** 2 / (2 * sigma ** 2)), linewidth=3, color='y') plt.title('4b) n = ' + str(n)) plt.show() print('For n = {0}, mu = {1}, sig = {2}'.format(n, mu, sig)) print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # For n = 2000 n = 2000 m = rand_mat_uniform(n) # cupy to compute the eigenvalues of the matrix w, v = cupy_eig(m) # print('For n = {0}, in rand_mat_gauss: \neigval = \n{1}'.format(n, w)) # For this problem, exclude the largest eigenvalue before computing the histogram. # max leading eigval (one with large magnitude) idx = cp.argmax(abs(w)) w = np.delete(cp.asnumpy(w), cp.asnumpy(idx)) # plot the histrogram of the eigenvalues mu, sig = cp.mean(m), cp.std(m) mu = cp.asnumpy(mu) sigma = cp.asnumpy(sig) plt.figure() count, bins, ignored = plt.hist(cp.asnumpy(w), bins
controlled state evolutions Saves a :class:`Propagator` and a list of cost functions $(k_0, \dots, k_T)$ of equal lengths. As a callable, implements the map $$k^{F, A}\colon (s_0, (\xi_t)_{t=1}^{T+1})) \mapsto \sum_{t=0}^T k_t(s_t, A_t(s_t))$$ where $(A_0,\dots, A_T)$ are the control functions saved by the :class:`Popagator` and $(s_t)_{t=0}^T$ is the state evolution as computed by the :class:`Propagator`. """ def __init__(self, propagator: Propagator, cost_functions: Sequence[Optional[Any]]) -> None: r""" Construct a :class:`CostToGo` object from :class:`Propagator` and list of cost functions Must provide list ``cost_functions`` of cost functions compatible in length with ``propagator``. Parameters ---------- propagator : The possibly zero-length Propagator cost_functions : The possibly empty sequence of cost functions Raises ------ ValueError Raised if the lengths of ``propagator`` and ``cost_functions`` differ. """ super(CostToGo, self).__init__() if not len(propagator) == len(cost_functions): raise ValueError("Length mismatch.") self.propagator = propagator self._cost_functions = ModuleList(*cost_functions) @classmethod def from_steps(cls, number_of_steps: int) -> CostToGo : """ Construct an empty :class:`CostToGo` object of certain length. Parameters ---------- number_of_steps : The length, in terms of a number of steps (see :func:`from_steps`). Returns ------- CostToGo The :class:`CostToGo` object of specified length. """ cost_functions = [None] * (number_of_steps + 1) propagator = Propagator.from_steps(number_of_steps) return CostToGo(propagator, cost_functions) def __repr__(self, optimizer: Optional[torch.optim.Optimizer] = None, include_id: bool = False, _print_width: int = _PRINT_WIDTH) -> str: TIME_COL_WIDTH = 6 COL_WIDTH = int((_print_width - TIME_COL_WIDTH) / 3 - 1) desc_width = (COL_WIDTH # Spacing left and right within cell: - 2 # opt state indicator counts 2 and wants spacer: - (2 if optimizer is not None else 0)) repr_lines = [] repr_lines.append("CostToGo(") repr_lines.append("|".join([ f"{'time' : ^{TIME_COL_WIDTH}}", f"{'state_func' : ^{COL_WIDTH}}", f"{'control_func' : ^{COL_WIDTH}}", f"{'cost_func' : ^{COL_WIDTH}}" ])) repr_lines.append("=" * (TIME_COL_WIDTH + 3 * (1 + COL_WIDTH))) for time, (state_func, control_func, cost_func) in enumerate(zip( [None] + list(self.state_functions), list(self.control_functions) + [None], list(self.cost_functions) + [None] )): opt_state, desc_state = _info(state_func, optimizer=optimizer, include_id=include_id, width=desc_width) opt_control, desc_control = _info(control_func, optimizer=optimizer, include_id=include_id, width=desc_width) opt_cost, desc_cost = _info(cost_func, optimizer=optimizer, include_id=include_id, width=desc_width) cell_time = f" {time} " cell_state = f"{desc_state : ^{desc_width}}" cell_control = f"{desc_control : ^{desc_width}}" cell_cost = f"{desc_cost : ^{desc_width}}" if optimizer is not None: cell_state = opt_state + " " + cell_state cell_control = opt_control + " " + cell_control cell_cost = opt_cost + " " + cell_cost # Adjustments: if time == 0: cell_state = "" if time == len(self): cell_time = f"({time})" cell_control = "" cell_cost = "" repr_lines.append(" ".join([ f"{cell_time : >{TIME_COL_WIDTH}}", f"{cell_state : ^{COL_WIDTH}}", f"{cell_control : ^{COL_WIDTH}}", f"{cell_cost : ^{COL_WIDTH}}" ])) repr_lines.append(")") return "\n".join(repr_lines) def descr(self, optimizer=None, include_id=True): print(self.__repr__(optimizer=optimizer, include_id=include_id)) def __setattr__(self, name: str, value: Any) -> None: if name == 'propagator' and hasattr(self, 'propagator'): warnings.warn('Change state and cost functions directly') return else: super().__setattr__(name, value) @property def state_functions(self) -> MutableSequence[Optional[Any]]: r""" The mutable, zero-based sequence of state functions $(F_0,\dots, F_T)$ as saved by :attr:`CostToGo.propagator`. To manipulate state functions, access the contents of this sequence. Immutable as a property in the sense that any attempts to replace the sequence by another object will be ignored. Returns ------- MutableSequence[Optional[Any]] The state functions. """ return self.propagator.state_functions @state_functions.setter def state_functions(self, value: Any) -> None: self.propagator.state_functions = value @property def control_functions(self) -> MutableSequence[Optional[Any]]: r""" The mutable, zero-based sequence of control functions $(A_0,\dots, A_T)$ as saved by :attr:`CostToGo.propagator`. To manipulate control functions, access the contents of this sequence. Immutable as a property in the sense that any attempts to replace the sequence by another object will be ignored. Returns ------- MutableSequence[Optional[Any]] The control functions. """ return self.propagator.control_functions @control_functions.setter def control_functions(self, value: Any) -> None: self.propagator.control_functions = value @property def cost_functions(self) -> MutableSequence[Optional[Any]]: r""" The mutable, zero-based sequence of cost functions $(k_0,\dots, k_T)$. To manipulate cost functions, access the contents of this sequence. Immutable as a property in the sense that any attempts to replace the sequence by another object will be ignored. Returns ------- MutableSequence[Optional[object]] The mutable sequence of cost functions. """ return self._cost_functions @cost_functions.setter def cost_functions(self, value: Any) -> None: warnings.warn("To change cost functions, access list contents. Ignored.") def __getitem__(self, key: Union[int, slice]) -> CostToGo: r""" Return a sub-:class:`CostToGo` of ``self`` If ``self`` has the state functions $F=(F_0,\dots, F_T)$, the control functions $A=(A_0,\dots, A_T)$ and the cost functions $k=(k_0,\dots, k_T)$ and ``key`` specifies the subset $I=\{t_0,\dots, t_k\}$ of $\{0, \dots, T\}$, then return the :class:`CostToGo` given by state the functions $(F_{t_0},\dots, F_{t_k})$, the control functions $(A_{t_0}, \dots, A_{t_k})$ and the cost functions $(k_{t_0},\dots, k_{t_k})$. Parameters ---------- key : Union[int, slice] Specify the subrange, `int`'s are singleton ranges Returns ------- CostToGo The sub-:class:`CostToGo` Raises ------ KeyError If ``key`` does not specify a valid subrange """ if isinstance(key, int): return CostToGo( propagator=self.propagator[key], cost_functions=[self.cost_functions[key]] ) elif isinstance(key, slice): return CostToGo( propagator=self.propagator[key], cost_functions=self.cost_functions[key] ) else: raise KeyError("Query just like a list.") def __setitem__(self, key: Union[int, slice], value: Propagator) -> None: if isinstance(value, CostToGo): self.state_functions[key] = value.state_functions self.control_functions[key] = value.control_functions self.cost_functions[key] = value.cost_functions else: raise ValueError("Cannot assign given value.") def __len__(self) -> int: r""" The length of ``self`` as a :class:`CostToGo` If ``self`` has the state functions $F=(F_0,\dots, F_T)$, the control functions $A=(A_0,\dots, A_T)$ and the cost functions $k=(k_0,\dots, k_T)$, then the length of ``self`` is $T+1$. Returns ------- int The length, $T+1$ """ assert len(self.propagator) == len(self.cost_functions) return len(self.cost_functions) def steps(self) -> int: r""" The number of steps of ``self`` as a cost-to-go If ``self`` has the state functions $F=(F_0,\dots, F_T)$, the control functions $A=(A_0,\dots, A_T)$ and the cost functions $k=(k_0,\dots, k_T)$, then the number of steps of ``self`` is $T$. Returns ------- int The number of steps, $T$ """ return len(self) - 1 def __add__(self, other: CostToGo) -> CostToGo: r""" Add, i.e. concatenate, two :class:`CostToGo` objects. If ``self`` has the state functions $F=(F_0,\dots, F_T)$, the control functions $A=(A_0,\dots, A_T)$ and the cost functions $k=(k_0,\dots, k_T)$ and ``other`` is as well a :class:`CostToGo` with the state functions $G=(G_0,\dots, G_S)$, the control functions $B=(B_0,\dots, B_S)$ and the cost functions $l=(l_0,\dots,l_S)$, then return the concatenated $(T+S)$-step :class:`CostToGo` given by the state functions $(F_0, \dots, F_T, G_0, \dots, G_S)$, the control functions $(A_0,\dots, A_T, B_0,\dots, B_S)$ and the cost functions $(k_0,\dots, k_T, l_0,\dots, l_S)$. Parameters ---------- other : CostToGo The :class:`CostToGo` to be appended on the right Returns ------- CostToGo The concatenation of ``self`` and `other` Raises ------ TypeError Raised, if ``other`` is not a :class:`CostToGo` """ if isinstance(other, CostToGo): return CostToGo( self.propagator + other.propagator, list(self.cost_functions) + list(other.cost_functions) ) else: raise TypeError("Can only add `CostToGo`'s") def forward(self, initial_state: Optional[torch.Tensor] = None, random_effects: Optional[Sequence[Optional[torch.Tensor]]] = None) -> torch.Tensor | int: states, controls, _ = self.propagator.propagate( initial_state, random_effects ) cost = 0 for step, cost_func in enumerate(self.cost_functions): if cost_func is not None: cost_args = [] if states[step] is not None: cost_args.append(states[step]) if controls[step] is not None: cost_args.append(controls[step]) #if random_effects[step] is not None: # cost_args.append(random_effects[step]) cost_step = cost_func(*cost_args) if cost_step.dim() <= 1: cost_step = cost_step.expand(1, *[cost_step.size() or [1]]) cost = cost + cost_step return cost def plot_state_component_range( self, *component_ranges: Sequence[torch.Tensor], plot_component_index : int = 0, random_effects: Optional[List[Optional[torch.Tensor]]] = None, versus: Optional[CostToGo] = None, plot_size=(8, 5) ) -> plt.Axes: component_ranges = list(component_ranges) # Make range of scalars for component_range in component_ranges: if isinstance(component_range, torch.Tensor): component_range.squeeze() # Does this even do anything? TODO No! else: for component in component_range: component.squeeze() # `component_range` will have plot component removed: plot_component_range = component_ranges.pop(plot_component_index) length_plot_range = len(plot_component_range) states_range = map( torch.Tensor.cpu, plot_component_range.squeeze() ) states_range = np.array(list(map( torch.Tensor.numpy, states_range ))) if isinstance(versus, CostToGo): versus = [versus] if versus is None: versus = [] cost_to_gos = [self] + versus number_plots = int(np.prod(list(map(len, component_ranges)))) fig, axs = plt.subplots(nrows=number_plots, squeeze=False) fig_size = (plot_size[0],) + (number_plots * plot_size[1],) fig.set_size_inches(fig_size) with torch.no_grad(), ExitStack() as stack: cost_to_gos = [stack.enter_context(_evaluating(cost_to_go)) for cost_to_go in cost_to_gos] for i, fixed_comps in enumerate(it.product(*component_ranges)): fixed_states = list(fixed_comps) # The following inserts the plot components between the other components at the right index: states
6.36674450874035)**2 - 1) * (0.0001 + 0.9999 * m.b221) + 0.00544122020134202 * m.b221 <= 0.00544122020134202) m.e409 = Constraint(expr= ((-m.x853 / (0.0001 + 0.9999 * m.b222) + 8.48963678940351)**2 + (-m.x854 / (0.0001 + 0.9999 * m.b222) + 5.48402210821562)**2 - 1) * (0.0001 + 0.9999 * m.b222) + 0.0101148431299391 * m.b222 <= 0.0101148431299391) m.e410 = Constraint(expr= ((-m.x855 / (0.0001 + 0.9999 * m.b223) + 6.03160481727421)**2 + (-m.x856 / (0.0001 + 0.9999 * m.b223) + 1.276552921829)**2 - 1) * (0.0001 + 0.9999 * m.b223) + 0.00370098440339957 * m.b223 <= 0.00370098440339957) m.e411 = Constraint(expr= ((-m.x857 / (0.0001 + 0.9999 * m.b224) + 6.90662266429413)**2 + (-m.x858 / (0.0001 + 0.9999 * m.b224) + 8.78931785429954)**2 - 1) * (0.0001 + 0.9999 * m.b224) + 0.012395354497085 * m.b224 <= 0.012395354497085) m.e412 = Constraint(expr= ((-m.x859 / (0.0001 + 0.9999 * m.b225) + 3.71078725250056)**2 + (-m.x860 / (0.0001 + 0.9999 * m.b225) + 4.66415664133948)**2 - 1) * (0.0001 + 0.9999 * m.b225) + 0.00345242992082719 * m.b225 <= 0.00345242992082719) m.e413 = Constraint(expr= ((-m.x861 / (0.0001 + 0.9999 * m.b226) + 0.107239833314705)**2 + (-m.x862 / (0.0001 + 0.9999 * m.b226) + 9.26202525440128)**2 - 1) * (0.0001 + 0.9999 * m.b226) + 0.00847966121950165 * m.b226 <= 0.00847966121950165) m.e414 = Constraint(expr= ((-m.x863 / (0.0001 + 0.9999 * m.b227) + 8.04739319923014)**2 + (-m.x864 / (0.0001 + 0.9999 * m.b227) + 3.59305161391794)**2 - 1) * (0.0001 + 0.9999 * m.b227) + 0.00766705572032937 * m.b227 <= 0.00766705572032937) m.e415 = Constraint(expr= ((-m.x865 / (0.0001 + 0.9999 * m.b228) + 5.28411061001579)**2 + (-m.x866 / (0.0001 + 0.9999 * m.b228) + 9.36648183133357)**2 - 1) * (0.0001 + 0.9999 * m.b228) + 0.0114652806835583 * m.b228 <= 0.0114652806835583) m.e416 = Constraint(expr= ((-m.x867 / (0.0001 + 0.9999 * m.b229) + 1.06171481272944)**2 + (-m.x868 / (0.0001 + 0.9999 * m.b229) + 3.65844432398946)**2 - 1) * (0.0001 + 0.9999 * m.b229) + 0.00135114532152998 * m.b229 <= 0.00135114532152998) m.e417 = Constraint(expr= ((-m.x869 / (0.0001 + 0.9999 * m.b230) + 9.05099582793631)**2 + (-m.x870 / (0.0001 + 0.9999 * m.b230) + 3.98215513024397)**2 - 1) * (0.0001 + 0.9999 * m.b230) + 0.00967780849586488 * m.b230 <= 0.00967780849586488) m.e418 = Constraint(expr= ((-m.x871 / (0.0001 + 0.9999 * m.b231) + 9.08772707393705)**2 + (-m.x872 / (0.0001 + 0.9999 * m.b231) + 3.01388234113091)**2 - 1) * (0.0001 + 0.9999 * m.b231) + 0.00906702701365492 * m.b231 <= 0.00906702701365492) m.e419 = Constraint(expr= ((-m.x873 / (0.0001 + 0.9999 * m.b232) + 8.83910845171846)**2 + (-m.x874 / (0.0001 + 0.9999 * m.b232) + 8.26686922455514)**2 - 1) * (0.0001 + 0.9999 * m.b232) + 0.0145470964997138 * m.b232 <= 0.0145470964997138) m.e420 = Constraint(expr= ((-m.x875 / (0.0001 + 0.9999 * m.b233) + 3.41090111187451)**2 + (-m.x876 / (0.0001 + 0.9999 * m.b233) + 6.48985541572597)**2 - 1) * (0.0001 + 0.9999 * m.b233) + 0.00527524697120144 * m.b233 <= 0.00527524697120144) m.e421 = Constraint(expr= ((-m.x877 / (0.0001 + 0.9999 * m.b234) + 1.22004189182266)**2 + (-m.x878 / (0.0001 + 0.9999 * m.b234) + 2.52276995748249)**2 - 1) * (0.0001 + 0.9999 * m.b234) + 0.000685287047617839 * m.b234 <= 0.000685287047617839) m.e422 = Constraint(expr= ((-m.x879 / (0.0001 + 0.9999 * m.b235) + 3.94802045665365)**2 + (-m.x880 / (0.0001 + 0.9999 * m.b235) + 6.40339456473198)**2 - 1) * (0.0001 + 0.9999 * m.b235) + 0.00555903274777947 * m.b235 <= 0.00555903274777947) m.e423 = Constraint(expr= ((-m.x881 / (0.0001 + 0.9999 * m.b236) + 4.9104301706676)**2 + (-m.x882 / (0.0001 + 0.9999 * m.b236) + 3.1538945754034)**2 - 1) * (0.0001 + 0.9999 * m.b236) + 0.00330593754537616 * m.b236 <= 0.00330593754537616) m.e424 = Constraint(expr= ((-m.x883 / (0.0001 + 0.9999 * m.b237) + 3.23176530143453)**2 + (-m.x884 / (0.0001 + 0.9999 * m.b237) + 7.03644502956294)**2 - 1) * (0.0001 + 0.9999 * m.b237) + 0.00589558656176173 * m.b237 <= 0.00589558656176173) m.e425 = Constraint(expr= ((-m.x885 / (0.0001 + 0.9999 * m.b238) + 0.39249886515424)**2 + (-m.x886 / (0.0001 + 0.9999 * m.b238) + 1.58871179600639)**2 - 1) * (0.0001 + 0.9999 * m.b238) + 0.000167806052991721 * m.b238 <= 0.000167806052991721) m.e426 = Constraint(expr= ((-m.x887 / (0.0001 + 0.9999 * m.b239) + 4.42690021019308)**2 + (-m.x888 / (0.0001 + 0.9999 * m.b239) + 8.74244441962606)**2 - 1) * (0.0001 + 0.9999 * m.b239) + 0.00950277799012584 * m.b239 <= 0.00950277799012584) m.e427 = Constraint(expr= ((-m.x889 / (0.0001 + 0.9999 * m.b240) + 2.59382124128511)**2 + (-m.x890 / (0.0001 + 0.9999 * m.b240) + 9.98780114819833)**2 - 1) * (0.0001 + 0.9999 * m.b240) + 0.0105484080407694 * m.b240 <= 0.0105484080407694) m.e428 = Constraint(expr= m.b211 + m.b212 + m.b213 + m.b214 + m.b215 + m.b216 + m.b217 + m.b218 + m.b219 + m.b220 + m.b221 + m.b222 + m.b223 + m.b224 + m.b225 + m.b226 + m.b227 + m.b228 + m.b229 + m.b230 + m.b231 + m.b232 + m.b233 + m.b234 + m.b235 + m.b236 + m.b237 + m.b238 + m.b239 + m.b240 == 1) m.e429 = Constraint(expr= ((-m.x891 / (0.0001 + 0.9999 * m.b241) + 2.84746945184196)**2 + (-m.x892 / (0.0001 + 0.9999 * m.b241) + 6.74779851669768)**2 - 1) * (0.0001 + 0.9999 * m.b241) + 0.00526408671011206 * m.b241 <= 0.00526408671011206) m.e430 = Constraint(expr= ((-m.x893 / (0.0001 + 0.9999 * m.b242) + 6.63261133839597)**2 + (-m.x894 / (0.0001 + 0.9999 * m.b242) + 7.58930814907984)**2 - 1) * (0.0001 + 0.9999 * m.b242) + 0.0100589131347908 * m.b242 <= 0.0100589131347908) m.e431 = Constraint(expr= ((-m.x895 / (0.0001 + 0.9999 * m.b243) + 2.78008351564232)**2 + (-m.x896 / (0.0001 + 0.9999 * m.b243) + 5.62089512941063)**2 - 1) * (0.0001 + 0.9999 * m.b243) + 0.00383233264097783 * m.b243 <= 0.00383233264097783) m.e432 = Constraint(expr= ((-m.x897 / (0.0001 + 0.9999 * m.b244) + 3.36345404799715)**2 + (-m.x898 / (0.0001 + 0.9999 * m.b244) + 5.61693710344635)**2 - 1) * (0.0001 + 0.9999 * m.b244) + 0.00418628055570607 * m.b244 <= 0.00418628055570607) m.e433 = Constraint(expr= ((-m.x899 / (0.0001 + 0.9999 * m.b245) + 1.46574263591158)**2 + (-m.x900 / (0.0001 + 0.9999 * m.b245) + 0.14249643071642)**2 - 1) * (0.0001 + 0.9999 * m.b245) + 0.000116870670749595 * m.b245 <= 0.000116870670749595) m.e434 = Constraint(expr= ((-m.x901 / (0.0001 + 0.9999 * m.b246) + 6.59824950993792)**2 + (-m.x902 / (0.0001 + 0.9999 * m.b246) + 1.91438252349491)**2 - 1) * (0.0001 + 0.9999 * m.b246) + 0.00462017570416588 * m.b246 <= 0.00462017570416588) m.e435 = Constraint(expr= ((-m.x903 / (0.0001 + 0.9999 * m.b247) + 2.02515523263325)**2 + (-m.x904 / (0.0001 + 0.9999 * m.b247) + 3.62141731303869)**2 - 1) * (0.0001 + 0.9999 * m.b247) + 0.00162159170714382 * m.b247 <= 0.00162159170714382) m.e436 = Constraint(expr= ((-m.x905 / (0.0001 + 0.9999 * m.b248) + 8.97152199819966)**2 + (-m.x906 / (0.0001 + 0.9999 * m.b248) + 8.78133845105015)**2 - 1) * (0.0001 + 0.9999 * m.b248) + 0.0156600111956072 * m.b248 <= 0.0156600111956072) m.e437 = Constraint(expr= ((-m.x907 / (0.0001 + 0.9999 * m.b249) + 1.87485423601802)**2 + (-m.x908 / (0.0001 + 0.9999 * m.b249) + 8.96274662253461)**2 - 1) * (0.0001 + 0.9999 * m.b249) + 0.00828459054260703 * m.b249 <= 0.00828459054260703) m.e438 = Constraint(expr= ((-m.x909 / (0.0001 + 0.9999 * m.b250) + 7.52025674966412)**2 + (-m.x910 / (0.0001 + 0.9999 * m.b250) + 8.80268658264247)**2 - 1) * (0.0001 + 0.9999 * m.b250) + 0.0133041552653103 * m.b250 <= 0.0133041552653103) m.e439 = Constraint(expr= ((-m.x911 / (0.0001 + 0.9999 * m.b251) + 3.85704114235832)**2 + (-m.x912 / (0.0001 + 0.9999 * m.b251) + 6.36674450874035)**2 - 1) * (0.0001 + 0.9999 * m.b251) + 0.00544122020134202 * m.b251 <= 0.00544122020134202) m.e440 = Constraint(expr= ((-m.x913 / (0.0001 + 0.9999 * m.b252) + 8.48963678940351)**2 + (-m.x914 / (0.0001 + 0.9999 * m.b252) + 5.48402210821562)**2 - 1) * (0.0001 + 0.9999 * m.b252) + 0.0101148431299391 * m.b252 <= 0.0101148431299391) m.e441 = Constraint(expr= ((-m.x915 / (0.0001 + 0.9999 * m.b253) + 6.03160481727421)**2 + (-m.x916 / (0.0001 + 0.9999 * m.b253) + 1.276552921829)**2 - 1) * (0.0001 + 0.9999 * m.b253) + 0.00370098440339957 * m.b253 <= 0.00370098440339957) m.e442 = Constraint(expr= ((-m.x917 / (0.0001 + 0.9999 * m.b254) + 6.90662266429413)**2 + (-m.x918 / (0.0001 + 0.9999 * m.b254) + 8.78931785429954)**2 - 1) * (0.0001 + 0.9999 * m.b254) + 0.012395354497085 * m.b254 <= 0.012395354497085) m.e443 = Constraint(expr= ((-m.x919 / (0.0001 + 0.9999 * m.b255) + 3.71078725250056)**2 + (-m.x920 / (0.0001 + 0.9999 * m.b255) + 4.66415664133948)**2 - 1) * (0.0001 + 0.9999 * m.b255) + 0.00345242992082719 * m.b255 <= 0.00345242992082719) m.e444 = Constraint(expr= ((-m.x921 / (0.0001 + 0.9999 * m.b256) + 0.107239833314705)**2 +
<gh_stars>1-10 import os import time import json import zipfile import StringIO import datetime import traceback import requests from collections import OrderedDict, defaultdict from sqlalchemy import distinct, func, or_, not_ from sqlalchemy.orm import joinedload from flask import Blueprint, make_response, render_template, request, flash, redirect, url_for, jsonify, Response, current_app from flask_wtf import Form from flask_wtf.file import FileField from flask_admin.form import Select2Field from flask_cors import cross_origin from wtforms.fields.html5 import URLField from wtforms.validators import url, required from appcomposer.db import db from appcomposer.models import TranslatedApp, TranslationUrl, TranslationBundle, RepositoryApp, ActiveTranslationMessage, TranslationMessageHistory from appcomposer.login import requires_golab_login, current_golab_user from appcomposer.translator.mongodb_pusher import retrieve_mongodb_contents, retrieve_mongodb_apps, retrieve_mongodb_urls, retrieve_mongodb_app, retrieve_mongodb_translation_url from appcomposer.languages import obtain_groups, obtain_languages from appcomposer.translator.extractors import extract_local_translations_url, extract_messages_from_translation from appcomposer.translator.ops import add_full_translation_to_app, get_latest_synchronizations from appcomposer.translator.utils import bundle_to_xml, bundle_to_jquery_i18n, bundle_to_json, bundle_to_graasp_json, bundle_to_properties, url_to_filename, messages_to_xml, NO_CATEGORY, NO_TOOL from appcomposer.translator.suggestions import translate_texts from appcomposer.utils import public from appcomposer.languages import LANGUAGES_PER_NAME, LANGUAGE_NAMES_PER_CODE, WRONG_LANGUAGES_PER_CORRECT_NAME, WRONG_LANGUAGES, LANGUAGE_THRESHOLD, sort_languages, guess_default_language import flask_cors.core as cors_core cors_core.debugLog = lambda *args, **kwargs : None translator_dev_blueprint = Blueprint('translator_dev', __name__, static_folder = '../../translator3/dist/', static_url_path = '/web') @translator_dev_blueprint.route('/supported_languages.json') @public @cross_origin() def supported_languages(): languages = sorted([ (name, code) for name, code in LANGUAGES_PER_NAME.items() if not '_' in code ], lambda (name1, code1), (name2, code2) : cmp(name1, name2)) visible_languages = [ key.split('_')[0] for key in obtain_languages().keys() ] return jsonify(languages=languages, golab_languages=visible_languages, mappings=WRONG_LANGUAGES_PER_CORRECT_NAME) @translator_dev_blueprint.route('/supported_languages.html') @public @cross_origin() def supported_languages_human(): languages = sorted([ (name, code) for name, code in LANGUAGES_PER_NAME.items() if not '_' in code ], lambda (name1, code1), (name2, code2) : cmp(name1, name2)) visible_languages = [ key.split('_')[0] for key in obtain_languages().keys() ] return render_template("translator/supported_languages.html", languages=languages, wrong=WRONG_LANGUAGES_PER_CORRECT_NAME, visible_languages=visible_languages) # @translator_dev_blueprint.route('/languages/apps.json') # @public # def languages_apps(): # from appcomposer.translator.tasks import GOLAB_REPO # apps = db.session.query(RepositoryApp).filter_by(repository=GOLAB_REPO).filter(not_(RepositoryApp.external_id.like('%-%'))).all() # by_repo = { # # id: [lang1, lang2...] # } # for app in apps: # app_languages = [] # for lang in app.languages: # app_languages.append(lang.language.language.split('_')[0]) # by_repo[app.external_id] = app_languages # return jsonify(by_repo) # # @translator_dev_blueprint.route('/languages/labs.json') # @public # def languages_labs(): # from appcomposer.translator.tasks import GOLAB_REPO # labs = db.session.query(RepositoryApp).filter_by(repository=GOLAB_REPO).filter(RepositoryApp.external_id.like('%-%')).all() # by_repo = { # # id: [lang1, lang2...] # } # for lab in labs: # external_id = lab.external_id.split('-')[0] # lab_languages = set(by_repo.get(external_id, [])) # for lang in lab.languages: # lab_languages.add(lang.language.language.split('_')[0]) # by_repo[external_id] = list(lab_languages) # return jsonify(by_repo) @translator_dev_blueprint.route('/languages.json') @public def languages_labs(): from appcomposer.translator.tasks import GOLAB_REPO labs = db.session.query(RepositoryApp).filter_by(repository=GOLAB_REPO).all() by_repo = { # id: [lang1, lang2...] } default_level = 0.7 provided_level = request.args.get('level') level = default_level if provided_level: try: level = float(provided_level) except: pass for lab in labs: external_id = lab.external_id.split('-')[0] lab_languages = set(by_repo.get(external_id, [])) # Add all the languages supported by the original provider for lang in lab.languages: lang_pack = lang.language.language.split('_') language_name = lang_pack[0] language_country = lang_pack[1] if lang.language.language.lower() == 'zh_all': # Exception language_country = 'CN' if language_country.upper() == 'ALL': lab_languages.add(language_name) else: lab_languages.add('{}_{}'.format(language_name, language_country)) # Add all the languages supported by the translators for lang, translation_level in json.loads(lab.translation_percent or '{}').items(): if translation_level > level: lang_pack = lang.split('_') language_name = lang_pack[0] if len(lang_pack) > 1: language_country = lang_pack[1] else: language_country = 'ALL' if language_name.lower() == 'zh' and language_country.upper() == 'ALL': language_country = 'CN' if language_country.upper() == 'ALL': lang_record = language_name else: lang_record = '{}_{}'.format(language_name, language_country) lab_languages.add(lang_record) by_repo[external_id] = list(lab_languages) # Some labs do not report what language they support. Assume English for lang_pack in by_repo.values(): if len(lang_pack) == 0: lang_pack.append('en') return jsonify(by_repo) @translator_dev_blueprint.route('/changes.json') @public @cross_origin() def translation_changes(): try: r = requests.get("https://www.golabz.eu/rest/labs/retrieve.json") r.raise_for_status() labs = r.json() except: return "Error accessing https://www.golabz.eu/rest/labs/retrieve.json", 500 from appcomposer.translator.tasks import GOLAB_REPO repository_apps = db.session.query(RepositoryApp).filter_by(repository=GOLAB_REPO).filter(RepositoryApp.app_link.like('https://www.golabz.eu/lab%'), or_(RepositoryApp.translation_percent != None, RepositoryApp.original_translations != None)).all() automatic_urls = {} for translated_app in db.session.query(TranslatedApp).filter(TranslatedApp.url.in_([ repo_app.url for repo_app in repository_apps ])).all(): automatic_urls[translated_app.url] = translated_app.translation_url.automatic repository_apps_by_external_id = defaultdict(list) # { # id: [ repository_app1, repository_app2, repository_app3 ... ] # } for repository_app in repository_apps: external_id = repository_app.external_id.split('-')[0] repository_apps_by_external_id[external_id].append(repository_app) threshold = request.args.get('threshold', 100 * LANGUAGE_THRESHOLD) try: threshold = float(threshold) except (ValueError, TypeError): threshold = 100 * LANGUAGE_THRESHOLD threshold = threshold / 100.0 show_names = request.args.get('show_names', 'false').lower() == 'true' show_urls = request.args.get('show_urls', 'false').lower() == 'true' show_total = request.args.get('show_total', 'false').lower() == 'true' changes = { # If there is a change (removal, addition), it lists it like this: # identifier: [ lang1, lang2, lang3 ] } total_changes = { # If there is an addition, it lists it like this: # identifier: { # 'additions' : [lang1, lang2] # 'subtractions' : [lang3, lang4] # } } visible_languages = [ key.split('_')[0] for key in obtain_languages().keys() ] for lab in labs: external_id = lab.get('id') appcomposer_languages = set() for repo_app in repository_apps_by_external_id.get(external_id, []): # If it is not automatic we should not count it (only the original translations) if automatic_urls.get(repo_app.url, True): translation_percent = json.loads(repo_app.translation_percent or "{}") for lang, value in translation_percent.items(): if value >= threshold: # lang should be 'en'; not 'en_ALL_ALL' lang = lang.split('_')[0] appcomposer_languages.add(lang) for lang in (repo_app.original_translations or '').split(','): if lang: lang = lang.split('_')[0] if lang in visible_languages: appcomposer_languages.add(lang) lab_languages = lab.get('lab_languages', []) golabz_languages = set() for language in lab_languages: # If the language is in WRONG_LANGUAGES, take it; otherwise keep it language = WRONG_LANGUAGES.get(language, language) if language in LANGUAGES_PER_NAME: lang_code = LANGUAGES_PER_NAME[language] golabz_languages.add(lang_code) # If there are changes and there are appcomposer languages if len(appcomposer_languages) > 0: additions = appcomposer_languages - golabz_languages subtractions = golabz_languages - appcomposer_languages if subtractions or additions: identifier = external_id if show_urls: repo_apps = repository_apps_by_external_id.get(external_id, []) if repo_apps: identifier = repo_apps[0].app_link elif show_names: repo_apps = repository_apps_by_external_id.get(external_id, []) if repo_apps: identifier = repo_apps[0].name total_changes[identifier] = {} if subtractions: total_changes[identifier]['subtractions'] = list(subtractions) if additions: total_changes[identifier]['additions'] = list(additions) changes[identifier] = [] for lang_code in appcomposer_languages: display_name = LANGUAGE_NAMES_PER_CODE.get(lang_code, lang_code) display_name = WRONG_LANGUAGES_PER_CORRECT_NAME.get(display_name, [ display_name ])[0] changes[identifier].append(display_name) changes[identifier] = sort_languages(changes[identifier]) response = dict(changes=changes) if show_total: response['total_changes'] = total_changes return jsonify(**response) TARGET_CHOICES = [] TARGETS = obtain_groups() for target_code in sorted(TARGETS): TARGET_CHOICES.append((target_code, TARGETS[target_code])) LANGUAGE_CHOICES = [] LANGUAGES = obtain_languages() for lang_code in sorted(LANGUAGES): LANGUAGE_CHOICES.append((lang_code, LANGUAGES[lang_code])) class UploadForm(Form): url = URLField(u"App URL", validators=[url(), required()]) language = Select2Field(u"Language", choices = LANGUAGE_CHOICES, validators = [ required() ]) target = Select2Field(u"Target age", choices = TARGET_CHOICES, validators = [ required() ], default = "ALL") opensocial_xml = FileField(u'OpenSocial XML file', validators = [required()]) @translator_dev_blueprint.route('/upload/', methods = ('GET', 'POST')) @requires_golab_login def translation_upload(): default_language = guess_default_language() if default_language: form = UploadForm(language = default_language) else: form = UploadForm() if form.validate_on_submit(): errors = False app_url = form.url.data try: translation_url, original_messages, metadata = extract_local_translations_url(app_url) except Exception as e: traceback.print_exc() form.url.errors = [unicode(e)] errors = True xml_contents = form.opensocial_xml.data.read() if isinstance(xml_contents, str): xml_contents = unicode(xml_contents, 'utf8') try: translated_messages, metadata = extract_messages_from_translation(translation_url, xml_contents) except Exception as e: traceback.print_exc() form.opensocial_xml.errors = [unicode(e)] errors = True if not errors: language = form.language.data target = form.target.data add_full_translation_to_app(current_golab_user().email, app_url, translation_url, metadata, language, target, translated_messages, original_messages, from_developer = False) from appcomposer.translator.tasks import synchronize_apps_cache_wrapper synchronize_apps_cache_wrapper.delay("upload") flash("Contents successfully added") return render_template('translator/translations_upload.html', form=form) @translator_dev_blueprint.route('/') @public def translations(): return render_template("translator/translations.html") @translator_dev_blueprint.route('/users') def translation_users_old(): return redirect(url_for('translator_stats.translation_users')) @translator_dev_blueprint.route('/sync/', methods = ['GET', 'POST']) @requires_golab_login def sync_translations(): since_id = request.args.get('since') if since_id: try: since_id = int(since_id) except ValueError: since_id = None latest_synchronizations = get_latest_synchronizations() finished = False for latest_synchronization in latest_synchronizations: if latest_synchronization['id'] > since_id and latest_synchronization['end'] is not None: finished = True break if latest_synchronizations: latest_id = latest_synchronizations[0]['id'] else: latest_id = 0 if request.method == 'POST': from appcomposer.translator.tasks import synchronize_apps_no_cache_wrapper synchronize_apps_no_cache_wrapper.delay("manual sync request") submitted = True return redirect(url_for('.sync_translations', since = latest_id)) else: submitted = False return render_template("translator/sync.html", latest_synchronizations = latest_synchronizations, since_id = since_id, submitted = submitted, current_datetime = datetime.datetime.utcnow(), finished = finished) @translator_dev_blueprint.route('/sync/debug/') def sync_debug(): # Just in case the debug value changes during the load of modules if not current_app.debug: return "Not in debug mode!" now = datetime.datetime.utcnow() t0 = time.time() from appcomposer.translator.translation_listing import synchronize_apps_no_cache synchronize_apps_no_cache("sync debug") tf = time.time() return "<html><body>synchronization process finished (%.2f seconds): %s </body></html>" % (tf - t0, now) @translator_dev_blueprint.route('/urls/') @public def translations_urls(): urls = {} for db_url in db.session.query(TranslationUrl).options(joinedload('bundles')): urls[db_url.url] = [] for bundle in db_url.bundles: urls[db_url.url].append({ 'from_developer' : bundle.from_developer, 'target' : bundle.target, 'lang' : bundle.language, }) return render_template("translator/translations_urls.html", urls = urls) def _sort_dicts_by_datetime(dictionary): all_values = [ (key, value) for key, value in dictionary.iteritems() ] all_values.sort(lambda (k1, v1), (k2, v2) : cmp(v1.get('last_change'), v2.get('last_change')), reverse = True) new_dict = OrderedDict() for key, value in all_values: new_dict[key] = value return new_dict def _dict2sorted_list(dictionary, key_name = 'id'): all_values = [ (key, value) for key, value in dictionary.iteritems() ] all_values.sort(lambda (k1, v1), (k2, v2) : cmp(v1.get('last_change'), v2.get('last_change')), reverse = True) sorted_list = [] for key, value in all_values: value[key_name] = key sorted_list.append(value) return sorted_list SITE_ROOT = '.' @translator_dev_blueprint.route('/apps/') @public def translations_apps(): # Takes 1ms to load these two files. And putting it here is better for being able to change the code dynamically apps_angular_code = open(os.path.join(SITE_ROOT,
has length: {}".format(len(description))) def _convert_spark_dtype_to_hive_dtype(spark_dtype): """ Helper function to convert a spark data type into a hive datatype Args: :spark_dtype: the spark datatype to convert Returns: the hive datatype or None Raises: :SparkToHiveSchemaConversionError: if there was an error converting a spark datatype to a hive datatype """ if type(spark_dtype) is dict: if spark_dtype[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE].lower() == constants.SPARK_CONFIG.SPARK_ARRAY: return spark_dtype[ constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE].upper() + "<" + \ _convert_spark_dtype_to_hive_dtype( spark_dtype[constants.SPARK_CONFIG.SPARK_SCHEMA_ELEMENT_TYPE]) + ">" if spark_dtype[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE].lower() == constants.SPARK_CONFIG.SPARK_STRUCT: struct_nested_fields = list(map( lambda field: field[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_NAME] + constants.DELIMITERS.COLON_DELIMITER + _convert_spark_dtype_to_hive_dtype( field[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE]), spark_dtype[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELDS])) return spark_dtype[ constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE].upper() + "<" + \ constants.DELIMITERS.COMMA_DELIMITER.join(struct_nested_fields) + ">" if spark_dtype.upper() in constants.HIVE_CONFIG.HIVE_DATA_TYPES: return spark_dtype.upper() if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_LONG_TYPE: return constants.HIVE_CONFIG.HIVE_BIGINT_TYPE if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_SHORT_TYPE: return constants.HIVE_CONFIG.HIVE_INT_TYPE if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_BYTE_TYPE: return constants.HIVE_CONFIG.HIVE_CHAR_TYPE if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_INTEGER_TYPE: return constants.HIVE_CONFIG.HIVE_INT_TYPE if constants.SPARK_CONFIG.SPARK_DECIMAL_TYPE in spark_dtype.lower(): return spark_dtype.upper() raise SparkToHiveSchemaConversionError("Dataframe data type: {} not recognized.".format(spark_dtype)) def _convert_spark_dtype_to_mysql_dtype(spark_dtype): """ Helper function to convert a spark data type into a mysql datatype Args: :spark_dtype: the spark datatype to convert Returns: the mysql datatype or None Raises: :SparkToMySQLSchemaConversionError: if there was an error converting a spark datatype to a mysql datatype """ if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_LONG_TYPE: return constants.MYSQL_CONFIG.MYSQL_BIGINT_TYPE if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_SHORT_TYPE: return constants.MYSQL_CONFIG.MYSQL_BYTE_TYPE if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_BYTE_TYPE: return constants.MYSQL_CONFIG.MYSQL_CHAR_TYPE if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_INTEGER_TYPE \ or spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_INT_TYPE: return constants.MYSQL_CONFIG.MYSQL_INTEGER_TYPE if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_BIGINT_TYPE: return constants.MYSQL_CONFIG.MYSQL_BIGINT_TYPE if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_SMALLINT_TYPE: return constants.MYSQL_CONFIG.MYSQL_SMALLINT_TYPE if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_STRING_TYPE: return constants.MYSQL_CONFIG.MYSQL_VARCHAR_1000_TYPE if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_BINARY_TYPE: return constants.MYSQL_CONFIG.MYSQL_BLOB_TYPE if constants.SPARK_CONFIG.SPARK_STRUCT in spark_dtype.lower(): return constants.MYSQL_CONFIG.MYSQL_BLOB_TYPE if constants.SPARK_CONFIG.SPARK_ARRAY in spark_dtype.lower(): return constants.MYSQL_CONFIG.MYSQL_BLOB_TYPE if constants.SPARK_CONFIG.SPARK_VECTOR in spark_dtype.lower(): return constants.MYSQL_CONFIG.MYSQL_BLOB_TYPE if spark_dtype.lower() in constants.MYSQL_CONFIG.MYSQL_DATA_TYPES: return spark_dtype.lower() if spark_dtype.upper() in constants.MYSQL_CONFIG.MYSQL_DATA_TYPES: return spark_dtype.upper() raise SparkToMySQLSchemaConversionError("Dataframe data type: {} not recognized.".format(spark_dtype)) def _structure_cluster_analysis_json(cluster_analysis_dict): """ Converts the dict/json returned by spark cluster analysis into the correct format that the backend expects in the REST call Args: :cluster_analysis_dict: the raw data Returns: the formatted data """ data_points = [] clusters = [] for key, value in cluster_analysis_dict[constants.FEATURE_STORE.CLUSTERING_ANALYSIS_FEATURES_COLUMN].items(): try: first_dim = float(value[constants.FEATURE_STORE.CLUSTERING_ANALYSIS_ARRAY_COLUMN][0]) second_dim = float(value[constants.FEATURE_STORE.CLUSTERING_ANALYSIS_ARRAY_COLUMN][1]) if math.isnan(first_dim): first_dim = 0.0 if math.isnan(second_dim): second_dim = 0.0 except ValueError: first_dim = 0.0 second_dim = 0.0 data_point = { constants.REST_CONFIG.JSON_CLUSTERING_ANALYSIS_DATA_POINT_NAME: str(key), constants.REST_CONFIG.JSON_CLUSTERING_ANALYSIS_FIRST_DIMENSION: first_dim, constants.REST_CONFIG.JSON_CLUSTERING_ANALYSIS_SECOND_DIMENSION: second_dim, } data_points.append(data_point) for key, value in cluster_analysis_dict[constants.FEATURE_STORE.CLUSTERING_ANALYSIS_CLUSTERS_OUTPUT_COLUMN].items(): try: cluster_val = int(value) if math.isnan(cluster_val): cluster_val = -1 except ValueError: cluster_val = -1 cluster = { constants.REST_CONFIG.JSON_CLUSTERING_ANALYSIS_DATA_POINT_NAME: str(key), constants.REST_CONFIG.JSON_CLUSTERING_ANALYSIS_CLUSTER: cluster_val } clusters.append(cluster) cluster_analysis_json_dict = { constants.REST_CONFIG.JSON_CLUSTERING_ANALYSIS_DATA_POINTS: data_points, constants.REST_CONFIG.JSON_CLUSTERING_ANALYSIS_CLUSTERS: clusters } return cluster_analysis_json_dict def _structure_descriptive_stats_json(descriptive_stats_list): """ Converts the dict/json returned by spark descriptive statistics into the correct format that the backend expects in the REST call Args: :descriptive_stats_list: raw data Returns: the formatted data """ descriptive_stats_list = list(map(lambda x: json.loads(x), descriptive_stats_list)) descriptive_stats = [] for key in descriptive_stats_list[0]: if not key == constants.FEATURE_STORE.DESCRIPTIVE_STATS_SUMMARY_COL: metric_values = [] for ds in descriptive_stats_list: if key in ds: try: stat_value = float(ds[key]) if math.isnan(stat_value): stat_value = None except ValueError: stat_value = None metric_value = { constants.FEATURE_STORE.DESCRIPTIVE_STATS_METRIC_NAME_COL: ds[ constants.FEATURE_STORE.DESCRIPTIVE_STATS_SUMMARY_COL], constants.FEATURE_STORE.DESCRIPTIVE_STATS_VALUE_COL: stat_value } metric_values.append(metric_value) descriptive_stat = { constants.REST_CONFIG.JSON_DESCRIPTIVE_STATS_FEATURE_NAME: key, constants.REST_CONFIG.JSON_DESCRIPTIVE_STATS_METRIC_VALUES: metric_values } descriptive_stats.append(descriptive_stat) desc_stats_json_dict = { constants.REST_CONFIG.JSON_DESCRIPTIVE_STATS: descriptive_stats } return desc_stats_json_dict def _structure_feature_histograms_json(feature_histogram_list): """ Converts the dict/json returned by spark histogram computation into the correct format that the backend expects in the REST call Args: :feature_histogram_list: the raw data Returns: the formatted data """ feature_distributions = [] for dist in feature_histogram_list: frequency_distribution = [] for bin, freq in dist[constants.FEATURE_STORE.HISTOGRAM_FREQUENCY].items(): try: freq_val = int(freq) if math.isnan(freq_val): freq_val = 0 except ValueError: freq_val = 0 histogram_bin = { constants.REST_CONFIG.JSON_HISTOGRAM_BIN: str(bin), constants.REST_CONFIG.JSON_HISTOGRAM_FREQUENCY: freq_val } frequency_distribution.append(histogram_bin) feature_distribution = { constants.REST_CONFIG.JSON_HISTOGRAM_FEATURE_NAME: dist[constants.FEATURE_STORE.HISTOGRAM_FEATURE], constants.REST_CONFIG.JSON_HISTOGRAM_FREQUENCY_DISTRIBUTION: frequency_distribution } feature_distributions.append(feature_distribution) feature_distributions_dict = { constants.REST_CONFIG.JSON_HISTOGRAM_FEATURE_DISTRIBUTIONS: feature_distributions } return feature_distributions_dict def _structure_feature_corr_json(feature_corr_dict): """ Converts the dict/json returned by spark correlation analysis into the correct format that the backend expects in the REST call Args: :feature_corr_dict: the raw data Returns: the formatted data """ feature_correlations = [] for key, value in feature_corr_dict.items(): correlation_values = [] for key1, value1 in value.items(): try: corr = float(value1) if math.isnan(corr): corr = 0.0 except ValueError: corr = 0.0 correlation_value = { constants.REST_CONFIG.JSON_CORRELATION_FEATURE_NAME: str(key1), constants.REST_CONFIG.JSON_CORRELATION: corr } correlation_values.append(correlation_value) feature_correlation = { constants.REST_CONFIG.JSON_CORRELATION_FEATURE_NAME: str(key), constants.REST_CONFIG.JSON_CORRELATION_VALUES: correlation_values } feature_correlations.append(feature_correlation) correlation_matrix_dict = { constants.REST_CONFIG.JSON_FEATURE_CORRELATIONS: feature_correlations } return correlation_matrix_dict def _do_get_project_featurestore(): """ Gets the project's featurestore name (project_featurestore) Returns: the project's featurestore name """ project_name = hdfs.project_name() featurestore_name = project_name.lower() + constants.FEATURE_STORE.FEATURESTORE_SUFFIX return featurestore_name def _do_get_project_training_datasets_sink(): """ Gets the project's default location for storing training datasets in HopsFS Returns: the project's default hopsfs location for storing training datasets """ project_name = hdfs.project_name() training_datasets_sink = project_name + constants.FEATURE_STORE.TRAINING_DATASETS_SUFFIX return training_datasets_sink def _visualization_validation_warning(): """ Checks whether the user is trying to do visualization inside a livy session and prints a warning message if the user is trying to plot inside the livy session. Returns: None """ if constants.ENV_VARIABLES.LIVY_VERSION_ENV_VAR in os.environ: _log("Visualizations are not supported in Livy Sessions. " "Use %%local and %matplotlib inline to access the " "python kernel from PySpark notebooks") def _matplotlib_magic_reminder(): """ Prints a reminder message to the user to enable matplotlib inline when plotting inside Jupyter notebooks Returns: None """ _log("Remember to add %%matplotlib inline when doing visualizations in Jupyter notebooks") def _is_hive_enabled(spark): """ Checks whether Hive is enabled for a given spark session\ Args: :spark: the spark session to verify Returns: true if hive is enabled, otherwise false """ return _get_spark_sql_catalog_impl(spark) == constants.SPARK_CONFIG.SPARK_SQL_CATALOG_HIVE def _get_spark_sql_catalog_impl(spark): """ Gets the sparkSQL catalog implementatin of a given spark session Args: :spark: the spark session to get the SQL catalog implementation of Returns: the sparkSQL catalog implementation of the spark session """ return dict(spark.sparkContext._conf.getAll())[constants.SPARK_CONFIG.SPARK_SQL_CATALOG_IMPLEMENTATION] def _get_on_demand_featuregroup_type_info(featurestore_metadata): """ Gets the type information for an on-demand feature group that the backend expects Args: :featurestore_metadata: metadata of the featurestore Returns: the type information of the feature group, tuple of (type, dtotype) """ featuregroup_type = featurestore_metadata.settings.on_demand_featuregroup_type featuregroup_type_dto = featurestore_metadata.settings.on_demand_featuregroup_dto_type return featuregroup_type, featuregroup_type_dto def _get_cached_featuregroup_type_info(featurestore_metadata): """ Gets the type information for a cached feature group that the backend expects Args: :featurestore_metadata: metadata of the featurestore Returns: the type information of the feature group, tuple of (type, dtotype) """ featuregroup_type = featurestore_metadata.settings.cached_featuregroup_type featuregroup_type_dto = featurestore_metadata.settings.cached_featuregroup_dto_type return featuregroup_type, featuregroup_type_dto def _get_training_dataset_type_info(featurestore_metadata, external=False): """ Gets the type information of a training datasetthat the backend expects Args: :featurestore_metadata: metadata of the featurestore :external: whether it is an external featuregroup or not Returns: the type information of the training dataset, tuple of (type, dtotype) """ if external: training_dataset_type = featurestore_metadata.settings.external_training_dataset_type training_dataset_type_dto = featurestore_metadata.settings.external_training_dataset_dto_type else: training_dataset_type = featurestore_metadata.settings.hopsfs_training_dataset_type training_dataset_type_dto = featurestore_metadata.settings.hopsfs_training_dataset_dto_type return training_dataset_type, training_dataset_type_dto def _get_hopsfs_training_dataset_path(training_dataset_name, hdfs_store_path, data_format): """ Utility function for getting the hopsfs path of a training dataset in the feature store Args: :training_dataset_name: name of the training dataset :hdfs_store_path: the hdfs path to the dataset where all the training datasets are stored :data_format: data format of the training datataset Return: the hdfs path to the training dataset """ hdfs_path = hdfs_store_path + \ constants.DELIMITERS.SLASH_DELIMITER + training_dataset_name if data_format == constants.FEATURE_STORE.TRAINING_DATASET_IMAGE_FORMAT: hdfs_path = hdfs_store_path # abspath means "hdfs://namenode:port/ is preprended path = util.abspath(hdfs_path) return path def _get_external_training_dataset_path(training_dataset_name, training_dataset_version, bucket, path): """ Utility function for getting the S3 path of a training dataset in the feature store Args: :training_dataset_name: name of the training dataset :training_dataset_version: version of the training dataset :bucket: the s3 bucket :path: user-supplied path Returns: the s3 path to the training dataset """ if path is None or path is "": path = "" if constants.S3_CONFIG.S3_FILE_PREFIX not in bucket: path = constants.S3_CONFIG.S3_FILE_PREFIX path = path + bucket + constants.DELIMITERS.SLASH_DELIMITER + constants.S3_CONFIG.S3_TRAINING_DATASETS_FOLDER \ + constants.DELIMITERS.SLASH_DELIMITER + _get_table_name(training_dataset_name, training_dataset_version) else: if constants.S3_CONFIG.S3_FILE_PREFIX not in bucket: path = constants.S3_CONFIG.S3_FILE_PREFIX + bucket + constants.DELIMITERS.SLASH_DELIMITER + path else: path = bucket + constants.DELIMITERS.SLASH_DELIMITER + path path = path + constants.DELIMITERS.SLASH_DELIMITER + _get_table_name(training_dataset_name, training_dataset_version) return path def _setup_s3_credentials_for_spark(access_key, secret_key, spark): """ Registers the access key and secret key environment variables for writing to S3 with Spark Args: :access_key: the S3 access key ID :secret_key: the S3 secret key :spark: the spark session Returns: None """ sc = spark.sparkContext sc._jsc.hadoopConfiguration().set(constants.S3_CONFIG.S3_ACCESS_KEY_ENV, access_key) sc._jsc.hadoopConfiguration().set(constants.S3_CONFIG.S3_SECRET_KEY_ENV, secret_key) def _get_bucket_path(bucket, dataset_path): """ Utility function for getting the S3 path of a feature
that we have three windings... for w in range(3): # Instanciate a DiTTo Winding object try: api_winding = Winding(model) except: raise ValueError("Unable to instanciate Winding DiTTo object.") # Set the base voltage # We assume that 1st winding is primary, 2nd secondary, and third tertiary try: if w == 0: api_winding.nominal_voltage = ( float(settings["primarybasevoltage"]) * 10 ** 3 ) # DiTTo in volt if w == 1: api_winding.nominal_voltage = ( float(settings["secondarybasevoltage"]) * 10 ** 3 ) # DiTTo in volt if w == 2: api_winding.nominal_voltage = ( float(settings["tertiarybasevoltage"]) * 10 ** 3 ) # DiTTo in volt except: pass # Set the rated power try: if w == 0: api_winding.rated_power = ( float(settings["primaryratedcapacity"]) * 10 ** 3 ) # DiTTo in volt ampere if w == 1: api_winding.rated_power = ( float(settings["secondaryratedcapacity"]) * 10 ** 3 ) # DiTTo in volt ampere if w == 2: api_winding.rated_power = ( float(settings["tertiaryratedcapacity"]) * 10 ** 3 ) # DiTTo in volt ampere except: pass # Create the phase windings for p in phases: # Instanciate a PhaseWinding DiTTo object try: api_phase_winding = PhaseWinding(model) except: raise ValueError( "Unable to instanciate PhaseWinding DiTTo object." ) # Set the phase try: api_phase_winding.phase = p except: pass # Set the tap position try: if w == 0: api_phase_winding.tap_position = int( settings["primaryfixedtapsetting"] ) if w == 1: api_phase_winding.tap_position = int( settings["secondaryfixedtapsetting"] ) if w == 2: api_phase_winding.tap_position = int( settings["tertiaryfixedtapsetting"] ) except: pass # Add the phase winding object to the winding api_winding.phase_windings.append(api_phase_winding) # Add the winding object to the transformer api_transformer.windings.append(api_winding) # Handle two windings transformers if settings["type"] == "transformer": if settings["eqid"] in self.transformers: transformer_data = self.transformers[settings["eqid"]] else: transformer_data = self.transformers["DEFAULT"] # Resistance # # Note: Imported from Julietta's code # Z1 = float(transformer_data["z1"]) Z0 = float(transformer_data["z0"]) XR = float(transformer_data["xr"]) XR0 = float(transformer_data["xr0"]) if XR == 0: R1 = 0 X1 = 0 else: R1 = Z1 / math.sqrt(1 + XR * XR) X1 = Z1 / math.sqrt(1 + 1 / (XR * XR)) if XR0 == 0: R0 = 0 X0 = 0 else: R0 = Z0 / math.sqrt(1 + XR0 * XR0) X0 = Z0 / math.sqrt(1 + 1 / (XR0 * XR0)) complex0 = complex(R0, X0) complex1 = complex(R1, X1) matrix = np.array( [[complex0, 0, 0], [0, complex1, 0], [0, 0, complex1]] ) a = 1 * cmath.exp(2 * math.pi * 1j / 3) T = np.array([[1.0, 1.0, 1.0], [1.0, a * a, a], [1.0, a, a * a]]) T_inv = np.linalg.inv(T) Zabc = T * matrix * T_inv Z_perc = Zabc.item((0, 0)) R_perc = Z_perc.real / 2.0 xhl = Z_perc.imag # Check if it's an LTC # if "isltc" in transformer_data and transformer_data["isltc"]: # Instanciate a Regulator DiTTo object try: api_regulator = Regulator(model) except: raise ValueError( "Unable to instanciate Regulator DiTTo object." ) try: api_regulator.name = "Reg_" + settings["sectionid"] except: pass api_regulator.feeder_name = self.section_feeder_mapping[sectionID] try: api_regulator.connected_transformer = api_transformer.name except: raise ValueError("Unable to connect LTC to transformer") taps = float(transformer_data["taps"]) lowerbandwidth = float(transformer_data["lowerbandwidth"]) upperbandwidth = float(transformer_data["upperbandwidth"]) minreg_range = int(float(settings["maxbuck"])) maxreg_range = int(float(settings["maxboost"])) setpoint = float(settings["setpoint"]) ct = int(float(settings["ct"])) pt = int(float(settings["pt"])) center_bandwidth = upperbandwidth - lowerbandwidth api_regulator.ltc = 1 api_regulator.highstep = minreg_range api_regulator.lowstep = maxreg_range api_regulator.pt_ratio = pt api_regulator.ct_ratio = ct api_regulator.setpoint = setpoint api_regulator.center_bandwidth = center_bandwidth api_regulator.bandwidth = ( upperbandwidth + lowerbandwidth ) # ie. use the average bandwidth. The upper and lower are typically the same # TODO: Add unit checking. These units are in percentages. Need to be updated to be in Volts for consistency (BUG in cyme writer too) # TODO: Decide whether or not to put parameters in for the regulator range, and what units they should be. try: api_transformer.reactances = [float(xhl)] except: pass # Here we know that we have two windings... for w in range(2): # Instanciate a Winding DiTTo object try: api_winding = Winding(model) except: raise ValueError("Unable to instanciate Winding DiTTo object.") # Set the rated power try: if w == 0: api_winding.rated_power = ( float(transformer_data["kva"]) * 10 ** 3 ) # DiTTo in volt ampere if w == 1: api_winding.rated_power = ( float(transformer_data["kva"]) * 10 ** 3 ) # DiTTo in volt ampere except: pass # Set the nominal voltage try: if w == 0: api_winding.nominal_voltage = ( float(transformer_data["kvllprim"]) * 10 ** 3 ) # DiTTo in volt if w == 1: api_winding.nominal_voltage = ( float(transformer_data["kvllsec"]) * 10 ** 3 ) # DiTTo in volt except: pass # Connection configuration try: api_winding.connection_type = self.transformer_connection_configuration_mapping( transformer_data["conn"], w ) except: pass # Resistance try: api_winding.resistance = R_perc except: pass # For each phase... for p in phases: # Instanciate a PhaseWinding DiTTo object try: api_phase_winding = PhaseWinding(model) except: raise ValueError( "Unable to instanciate PhaseWinding DiTTo object." ) # Set the phase try: api_phase_winding.phase = p except: pass # Add the phase winding object to the winding api_winding.phase_windings.append(api_phase_winding) # Add the winding object to the transformer api_transformer.windings.append(api_winding) # Handle Grounding transformers if settings["type"] == "grounding_transformer": if settings["equipmentid"] in self.grounding_transformers: transformer_data = self.grounding_transformers[ settings["equipmentid"] ] else: transformer_data = {} # Here we know that we have two windings... for w in range(2): # Instanciate a Winding DiTTo object try: api_winding = Winding(model) except: raise ValueError("Unable to instanciate Winding DiTTo object.") # Set the rated power try: if w == 0: api_winding.rated_power = ( float(transformer_data["ratedcapacity"]) * 10 ** 3 ) # DiTTo in volt ampere if w == 1: api_winding.rated_power = ( float(transformer_data["ratedcapacity"]) * 10 ** 3 ) # DiTTo in volt ampere except: pass # Set the nominal voltage try: if w == 0: api_winding.nominal_voltage = ( float(transformer_data["ratedvoltage"]) * 10 ** 3 ) # DiTTo in volt if w == 1: api_winding.nominal_voltage = ( float(transformer_data["ratedvoltage"]) * 10 ** 3 ) # DiTTo in volt except: pass # Set the connection configuration try: api_winding.connection_type = self.connection_configuration_mapping( transformer_data["conn"] ) except: pass # For each phase... for p in phases: # Instanciate a PhaseWinding DiTTo object try: api_phase_winding = PhaseWinding(model) except: raise ValueError( "Unable to instanciate PhaseWinding DiTTo object." ) # Set the phase try: api_phase_winding.phase = p except: pass # Add the phase winding object to the winding api_winding.phase_windings.append(api_phase_winding) # Add the winding object to the transformer api_transformer.windings.append(api_winding) # Add the transformer object to the list of transformers self._transformers.append(api_transformer) if not sectionID in self.section_duplicates: self.section_duplicates[sectionID] = [] self.section_duplicates[sectionID].append(api_transformer) return 1 def parse_regulators(self, model): """Parse the regulators from CYME to DiTTo. .. note:: In CYME a regulator does not have to be associated with a transformer (as it is the case for OpenDSS for example). In addition, a regulator can monitor multiple phases. The parser should create the transformers and create separate regulator objects for different phases. """ # Instanciate the list in which we store the DiTTo regulator objects self._regulators = [] mapp_regulators = { "id": 0, "type": 1, "kva": 2, "kva_1": 3, "kva_2": 4, "kva_3": 5, "kva_4": 6, "kvln": 7, "forwardbandwidth": 11, "bandwidth": 11, # For old CYME version 'forwardbandwidth' is just 'bandwidth' "ct": 13, "pt": 14, } mapp_regulator_settings = { "sectionid": 0, "eqid": 2, "coordx": 7, "coordy": 8, "phaseon": 9, "ct": 12, "pt": 13, "vseta": 16, "vsetb": 17, "vsetc": 18, "bandwidtha": 25, "bandwidthb": 26, "bandwidthc": 27, "tapa": 28, "tapb": 29, "tapc": 30, "conn": 31, } self.settings = {} self.regulators = {} ##################################################### # # # NETWORK FILE # # # ##################################################### # # Open the network file self.get_file_content("network") # Loop over the network file for line in self.content: self.settings.update( self.parser_helper( line, ["regulator_settings"], [ "sectionid", "eqid", "coordx", "coordy", "phaseon", "ct", "pt", "vseta", "vsetb", "vsetc", "bandwidtha", "bandwidthb", "bandwidthc", "tapa", "tapb", "tapc", "conn", ], mapp_regulator_settings, ) ) ##################################################### # # # EQUIPMENT FILE # # # ##################################################### # # Open the network file self.get_file_content("equipment") # Loop over the network file for line in self.content: self.regulators.update( self.parser_helper( line, ["regulator"], [ "id", "type", "kva", "kva_1", "kva_2", "kva_3", "kva_4", "kvln", "forwardbandwidth", "bandwidth", "ct", "pt", ],