hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a034bfab9b2d31d840360d866d7e73747d53c4f
| 397
|
py
|
Python
|
setup.py
|
mahi045/ApproxASP
|
8a453b16a87690275a89b58558c768c3f4c0568c
|
[
"MIT"
] | 3
|
2019-08-05T20:35:41.000Z
|
2020-09-30T15:10:59.000Z
|
setup.py
|
mahi045/ApproxASP
|
8a453b16a87690275a89b58558c768c3f4c0568c
|
[
"MIT"
] | null | null | null |
setup.py
|
mahi045/ApproxASP
|
8a453b16a87690275a89b58558c768c3f4c0568c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup
setup(
version = '1.0',
name = 'xorro',
description = 'Extending ASP with parity constraints.',
author = 'Flavio Everardo',
license = 'MIT',
packages = ['xorro'],
test_suite = 'xorro.tests',
zip_safe = False,
entry_points = {
'console_scripts': [
'xorro = xorro:main',
]
}
)
| 18.904762
| 59
| 0.564232
|
4a034dc86bb01c971fc4e15cc21e84d2eb8add57
| 4,803
|
py
|
Python
|
ask-smapi-model/ask_smapi_model/v1/skill/nlu/annotation_sets/annotation_set.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-smapi-model/ask_smapi_model/v1/skill/nlu/annotation_sets/annotation_set.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-smapi-model/ask_smapi_model/v1/skill/nlu/annotation_sets/annotation_set.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_smapi_model.v1.skill.nlu.annotation_sets.annotation_set_entity import AnnotationSetEntity
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
class AnnotationSet(AnnotationSetEntity):
"""
:param locale:
:type locale: (optional) str
:param name: Name of the NLU annotation set
:type name: (optional) str
:param number_of_entries: Number of entries which represents number of utterances in each NLU annotation set content
:type number_of_entries: (optional) int
:param updated_timestamp: The lastest updated timestamp for the NLU annotation set
:type updated_timestamp: (optional) datetime
:param annotation_id: Identifier of the NLU annotation set.
:type annotation_id: (optional) str
"""
deserialized_types = {
'locale': 'str',
'name': 'str',
'number_of_entries': 'int',
'updated_timestamp': 'datetime',
'annotation_id': 'str'
} # type: Dict
attribute_map = {
'locale': 'locale',
'name': 'name',
'number_of_entries': 'numberOfEntries',
'updated_timestamp': 'updatedTimestamp',
'annotation_id': 'annotationId'
} # type: Dict
supports_multiple_types = False
def __init__(self, locale=None, name=None, number_of_entries=None, updated_timestamp=None, annotation_id=None):
# type: (Optional[str], Optional[str], Optional[int], Optional[datetime], Optional[str]) -> None
"""
:param locale:
:type locale: (optional) str
:param name: Name of the NLU annotation set
:type name: (optional) str
:param number_of_entries: Number of entries which represents number of utterances in each NLU annotation set content
:type number_of_entries: (optional) int
:param updated_timestamp: The lastest updated timestamp for the NLU annotation set
:type updated_timestamp: (optional) datetime
:param annotation_id: Identifier of the NLU annotation set.
:type annotation_id: (optional) str
"""
self.__discriminator_value = None # type: str
super(AnnotationSet, self).__init__(locale=locale, name=name, number_of_entries=number_of_entries, updated_timestamp=updated_timestamp)
self.annotation_id = annotation_id
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, AnnotationSet):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 36.112782
| 143
| 0.624818
|
4a034e0d32f56a384d4162bcb2b9a507c290b9c0
| 127
|
py
|
Python
|
ex008.py
|
carlosvcerqueira/Projetos-Python
|
2fb91a0be8abf436af6a1d57fb2a1eafd0d30394
|
[
"MIT"
] | null | null | null |
ex008.py
|
carlosvcerqueira/Projetos-Python
|
2fb91a0be8abf436af6a1d57fb2a1eafd0d30394
|
[
"MIT"
] | null | null | null |
ex008.py
|
carlosvcerqueira/Projetos-Python
|
2fb91a0be8abf436af6a1d57fb2a1eafd0d30394
|
[
"MIT"
] | null | null | null |
n = float(input('Uma distância em metros: '))
c = n * 100
m = n * 1000
print('{}m possuem {} cm e {} mm.'.format(n, c, m))
| 25.4
| 52
| 0.543307
|
4a034e586a5e7fdf9ea4c99226e61774d3ff1918
| 7,565
|
py
|
Python
|
graphsense/model/block_tx_utxo.py
|
graphsense/graphsense-python
|
c0dafc97a04bc3dbf0caf08a981bb591bd1e430a
|
[
"MIT"
] | 9
|
2020-11-26T12:26:36.000Z
|
2022-02-07T22:08:16.000Z
|
graphsense/model/block_tx_utxo.py
|
graphsense/graphsense-python
|
c0dafc97a04bc3dbf0caf08a981bb591bd1e430a
|
[
"MIT"
] | 14
|
2020-11-17T13:28:08.000Z
|
2022-01-24T09:21:43.000Z
|
graphsense/model/block_tx_utxo.py
|
graphsense/graphsense-python
|
c0dafc97a04bc3dbf0caf08a981bb591bd1e430a
|
[
"MIT"
] | 3
|
2022-02-03T09:24:27.000Z
|
2022-02-16T10:13:55.000Z
|
"""
GraphSense API
GraphSense API # noqa: E501
The version of the OpenAPI document: 0.5
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from graphsense.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from graphsense.model.values import Values
globals()['Values'] = Values
class BlockTxUtxo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'currency_type': (str,), # noqa: E501
'no_inputs': (int,), # noqa: E501
'no_outputs': (int,), # noqa: E501
'total_input': (Values,), # noqa: E501
'total_output': (Values,), # noqa: E501
'tx_hash': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'currency_type': 'currency_type', # noqa: E501
'no_inputs': 'no_inputs', # noqa: E501
'no_outputs': 'no_outputs', # noqa: E501
'total_input': 'total_input', # noqa: E501
'total_output': 'total_output', # noqa: E501
'tx_hash': 'tx_hash', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, no_inputs, no_outputs, total_input, total_output, tx_hash, *args, **kwargs): # noqa: E501
"""BlockTxUtxo - a model defined in OpenAPI
Args:
no_inputs (int):
no_outputs (int):
total_input (Values):
total_output (Values):
tx_hash (str):
Keyword Args:
currency_type (str): defaults to "utxo" # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
currency_type = kwargs.get('currency_type', "utxo")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.currency_type = currency_type
self.no_inputs = no_inputs
self.no_outputs = no_outputs
self.total_input = total_input
self.total_output = total_output
self.tx_hash = tx_hash
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 38.596939
| 113
| 0.579379
|
4a034ea1e48775e22a5ef8882fc5894b1cc89041
| 754
|
py
|
Python
|
kensu/utils/dsl/ended_builder_element.py
|
vidma/kensu-py
|
aae1e04373f03c988d55772fde6563de3ca9f375
|
[
"Apache-2.0"
] | 16
|
2021-04-28T13:22:41.000Z
|
2022-03-02T10:45:19.000Z
|
kensu/utils/dsl/ended_builder_element.py
|
vidma/kensu-py
|
aae1e04373f03c988d55772fde6563de3ca9f375
|
[
"Apache-2.0"
] | 12
|
2021-05-17T08:06:42.000Z
|
2022-02-28T22:43:04.000Z
|
kensu/utils/dsl/ended_builder_element.py
|
vidma/kensu-py
|
aae1e04373f03c988d55772fde6563de3ca9f375
|
[
"Apache-2.0"
] | 5
|
2021-04-27T15:02:16.000Z
|
2021-10-15T16:07:21.000Z
|
from kensu.client import SchemaLineageDependencyDef
class EndedBuilderElement(object):
def __init__(self, builder):
self.builder = builder
self.input = self.builder.input
self.input_schema = self.builder.input_schema
self.output = self.builder.output
self.output_schema = self.builder.output_schema
def toSchemaLineageDependencyDef(self):
return SchemaLineageDependencyDef(from_schema_ref=self.builder.input_schema.to_ref(),
to_schema_ref=self.builder.output_schema.to_ref(),
column_data_dependencies=self.builder.data,
column_control_dependencies=self.builder.control)
| 47.125
| 93
| 0.645889
|
4a034ed50d7d127dad326456e281646aaf7746e3
| 31,419
|
py
|
Python
|
sdk/python/pulumi_aws/imagebuilder/image.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 260
|
2018-06-18T14:57:00.000Z
|
2022-03-29T11:41:03.000Z
|
sdk/python/pulumi_aws/imagebuilder/image.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,154
|
2018-06-19T20:38:20.000Z
|
2022-03-31T19:48:16.000Z
|
sdk/python/pulumi_aws/imagebuilder/image.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 115
|
2018-06-28T03:20:27.000Z
|
2022-03-29T11:41:06.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ImageArgs', 'Image']
@pulumi.input_type
class ImageArgs:
def __init__(__self__, *,
image_recipe_arn: pulumi.Input[str],
infrastructure_configuration_arn: pulumi.Input[str],
distribution_configuration_arn: Optional[pulumi.Input[str]] = None,
enhanced_image_metadata_enabled: Optional[pulumi.Input[bool]] = None,
image_tests_configuration: Optional[pulumi.Input['ImageImageTestsConfigurationArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Image resource.
:param pulumi.Input[str] image_recipe_arn: Amazon Resource Name (ARN) of the Image Builder Infrastructure Recipe.
:param pulumi.Input[str] infrastructure_configuration_arn: Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration.
:param pulumi.Input[str] distribution_configuration_arn: Amazon Resource Name (ARN) of the Image Builder Distribution Configuration.
:param pulumi.Input[bool] enhanced_image_metadata_enabled: Whether additional information about the image being created is collected. Defaults to `true`.
:param pulumi.Input['ImageImageTestsConfigurationArgs'] image_tests_configuration: Configuration block with image tests configuration. Detailed below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags for the Image Builder Image. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
pulumi.set(__self__, "image_recipe_arn", image_recipe_arn)
pulumi.set(__self__, "infrastructure_configuration_arn", infrastructure_configuration_arn)
if distribution_configuration_arn is not None:
pulumi.set(__self__, "distribution_configuration_arn", distribution_configuration_arn)
if enhanced_image_metadata_enabled is not None:
pulumi.set(__self__, "enhanced_image_metadata_enabled", enhanced_image_metadata_enabled)
if image_tests_configuration is not None:
pulumi.set(__self__, "image_tests_configuration", image_tests_configuration)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="imageRecipeArn")
def image_recipe_arn(self) -> pulumi.Input[str]:
"""
Amazon Resource Name (ARN) of the Image Builder Infrastructure Recipe.
"""
return pulumi.get(self, "image_recipe_arn")
@image_recipe_arn.setter
def image_recipe_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "image_recipe_arn", value)
@property
@pulumi.getter(name="infrastructureConfigurationArn")
def infrastructure_configuration_arn(self) -> pulumi.Input[str]:
"""
Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration.
"""
return pulumi.get(self, "infrastructure_configuration_arn")
@infrastructure_configuration_arn.setter
def infrastructure_configuration_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "infrastructure_configuration_arn", value)
@property
@pulumi.getter(name="distributionConfigurationArn")
def distribution_configuration_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the Image Builder Distribution Configuration.
"""
return pulumi.get(self, "distribution_configuration_arn")
@distribution_configuration_arn.setter
def distribution_configuration_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "distribution_configuration_arn", value)
@property
@pulumi.getter(name="enhancedImageMetadataEnabled")
def enhanced_image_metadata_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether additional information about the image being created is collected. Defaults to `true`.
"""
return pulumi.get(self, "enhanced_image_metadata_enabled")
@enhanced_image_metadata_enabled.setter
def enhanced_image_metadata_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enhanced_image_metadata_enabled", value)
@property
@pulumi.getter(name="imageTestsConfiguration")
def image_tests_configuration(self) -> Optional[pulumi.Input['ImageImageTestsConfigurationArgs']]:
"""
Configuration block with image tests configuration. Detailed below.
"""
return pulumi.get(self, "image_tests_configuration")
@image_tests_configuration.setter
def image_tests_configuration(self, value: Optional[pulumi.Input['ImageImageTestsConfigurationArgs']]):
pulumi.set(self, "image_tests_configuration", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags for the Image Builder Image. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ImageState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
date_created: Optional[pulumi.Input[str]] = None,
distribution_configuration_arn: Optional[pulumi.Input[str]] = None,
enhanced_image_metadata_enabled: Optional[pulumi.Input[bool]] = None,
image_recipe_arn: Optional[pulumi.Input[str]] = None,
image_tests_configuration: Optional[pulumi.Input['ImageImageTestsConfigurationArgs']] = None,
infrastructure_configuration_arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
os_version: Optional[pulumi.Input[str]] = None,
output_resources: Optional[pulumi.Input[Sequence[pulumi.Input['ImageOutputResourceArgs']]]] = None,
platform: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Image resources.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the image.
:param pulumi.Input[str] date_created: Date the image was created.
:param pulumi.Input[str] distribution_configuration_arn: Amazon Resource Name (ARN) of the Image Builder Distribution Configuration.
:param pulumi.Input[bool] enhanced_image_metadata_enabled: Whether additional information about the image being created is collected. Defaults to `true`.
:param pulumi.Input[str] image_recipe_arn: Amazon Resource Name (ARN) of the Image Builder Infrastructure Recipe.
:param pulumi.Input['ImageImageTestsConfigurationArgs'] image_tests_configuration: Configuration block with image tests configuration. Detailed below.
:param pulumi.Input[str] infrastructure_configuration_arn: Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration.
:param pulumi.Input[str] name: Name of the AMI.
:param pulumi.Input[str] os_version: Operating System version of the image.
:param pulumi.Input[Sequence[pulumi.Input['ImageOutputResourceArgs']]] output_resources: List of objects with resources created by the image.
:param pulumi.Input[str] platform: Platform of the image.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags for the Image Builder Image. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[str] version: Version of the image.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if date_created is not None:
pulumi.set(__self__, "date_created", date_created)
if distribution_configuration_arn is not None:
pulumi.set(__self__, "distribution_configuration_arn", distribution_configuration_arn)
if enhanced_image_metadata_enabled is not None:
pulumi.set(__self__, "enhanced_image_metadata_enabled", enhanced_image_metadata_enabled)
if image_recipe_arn is not None:
pulumi.set(__self__, "image_recipe_arn", image_recipe_arn)
if image_tests_configuration is not None:
pulumi.set(__self__, "image_tests_configuration", image_tests_configuration)
if infrastructure_configuration_arn is not None:
pulumi.set(__self__, "infrastructure_configuration_arn", infrastructure_configuration_arn)
if name is not None:
pulumi.set(__self__, "name", name)
if os_version is not None:
pulumi.set(__self__, "os_version", os_version)
if output_resources is not None:
pulumi.set(__self__, "output_resources", output_resources)
if platform is not None:
pulumi.set(__self__, "platform", platform)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the image.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="dateCreated")
def date_created(self) -> Optional[pulumi.Input[str]]:
"""
Date the image was created.
"""
return pulumi.get(self, "date_created")
@date_created.setter
def date_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "date_created", value)
@property
@pulumi.getter(name="distributionConfigurationArn")
def distribution_configuration_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the Image Builder Distribution Configuration.
"""
return pulumi.get(self, "distribution_configuration_arn")
@distribution_configuration_arn.setter
def distribution_configuration_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "distribution_configuration_arn", value)
@property
@pulumi.getter(name="enhancedImageMetadataEnabled")
def enhanced_image_metadata_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether additional information about the image being created is collected. Defaults to `true`.
"""
return pulumi.get(self, "enhanced_image_metadata_enabled")
@enhanced_image_metadata_enabled.setter
def enhanced_image_metadata_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enhanced_image_metadata_enabled", value)
@property
@pulumi.getter(name="imageRecipeArn")
def image_recipe_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the Image Builder Infrastructure Recipe.
"""
return pulumi.get(self, "image_recipe_arn")
@image_recipe_arn.setter
def image_recipe_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_recipe_arn", value)
@property
@pulumi.getter(name="imageTestsConfiguration")
def image_tests_configuration(self) -> Optional[pulumi.Input['ImageImageTestsConfigurationArgs']]:
"""
Configuration block with image tests configuration. Detailed below.
"""
return pulumi.get(self, "image_tests_configuration")
@image_tests_configuration.setter
def image_tests_configuration(self, value: Optional[pulumi.Input['ImageImageTestsConfigurationArgs']]):
pulumi.set(self, "image_tests_configuration", value)
@property
@pulumi.getter(name="infrastructureConfigurationArn")
def infrastructure_configuration_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration.
"""
return pulumi.get(self, "infrastructure_configuration_arn")
@infrastructure_configuration_arn.setter
def infrastructure_configuration_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "infrastructure_configuration_arn", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the AMI.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="osVersion")
def os_version(self) -> Optional[pulumi.Input[str]]:
"""
Operating System version of the image.
"""
return pulumi.get(self, "os_version")
@os_version.setter
def os_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "os_version", value)
@property
@pulumi.getter(name="outputResources")
def output_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ImageOutputResourceArgs']]]]:
"""
List of objects with resources created by the image.
"""
return pulumi.get(self, "output_resources")
@output_resources.setter
def output_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ImageOutputResourceArgs']]]]):
pulumi.set(self, "output_resources", value)
@property
@pulumi.getter
def platform(self) -> Optional[pulumi.Input[str]]:
"""
Platform of the image.
"""
return pulumi.get(self, "platform")
@platform.setter
def platform(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "platform", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags for the Image Builder Image. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the image.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
class Image(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
distribution_configuration_arn: Optional[pulumi.Input[str]] = None,
enhanced_image_metadata_enabled: Optional[pulumi.Input[bool]] = None,
image_recipe_arn: Optional[pulumi.Input[str]] = None,
image_tests_configuration: Optional[pulumi.Input[pulumi.InputType['ImageImageTestsConfigurationArgs']]] = None,
infrastructure_configuration_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages an Image Builder Image.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.imagebuilder.Image("example",
distribution_configuration_arn=aws_imagebuilder_distribution_configuration["example"]["arn"],
image_recipe_arn=aws_imagebuilder_image_recipe["example"]["arn"],
infrastructure_configuration_arn=aws_imagebuilder_infrastructure_configuration["example"]["arn"])
```
## Import
`aws_imagebuilder_image` resources can be imported using the Amazon Resource Name (ARN), e.g.
```sh
$ pulumi import aws:imagebuilder/image:Image example arn:aws:imagebuilder:us-east-1:123456789012:image/example/1.0.0/1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] distribution_configuration_arn: Amazon Resource Name (ARN) of the Image Builder Distribution Configuration.
:param pulumi.Input[bool] enhanced_image_metadata_enabled: Whether additional information about the image being created is collected. Defaults to `true`.
:param pulumi.Input[str] image_recipe_arn: Amazon Resource Name (ARN) of the Image Builder Infrastructure Recipe.
:param pulumi.Input[pulumi.InputType['ImageImageTestsConfigurationArgs']] image_tests_configuration: Configuration block with image tests configuration. Detailed below.
:param pulumi.Input[str] infrastructure_configuration_arn: Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags for the Image Builder Image. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ImageArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Image Builder Image.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.imagebuilder.Image("example",
distribution_configuration_arn=aws_imagebuilder_distribution_configuration["example"]["arn"],
image_recipe_arn=aws_imagebuilder_image_recipe["example"]["arn"],
infrastructure_configuration_arn=aws_imagebuilder_infrastructure_configuration["example"]["arn"])
```
## Import
`aws_imagebuilder_image` resources can be imported using the Amazon Resource Name (ARN), e.g.
```sh
$ pulumi import aws:imagebuilder/image:Image example arn:aws:imagebuilder:us-east-1:123456789012:image/example/1.0.0/1
```
:param str resource_name: The name of the resource.
:param ImageArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ImageArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
distribution_configuration_arn: Optional[pulumi.Input[str]] = None,
enhanced_image_metadata_enabled: Optional[pulumi.Input[bool]] = None,
image_recipe_arn: Optional[pulumi.Input[str]] = None,
image_tests_configuration: Optional[pulumi.Input[pulumi.InputType['ImageImageTestsConfigurationArgs']]] = None,
infrastructure_configuration_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ImageArgs.__new__(ImageArgs)
__props__.__dict__["distribution_configuration_arn"] = distribution_configuration_arn
__props__.__dict__["enhanced_image_metadata_enabled"] = enhanced_image_metadata_enabled
if image_recipe_arn is None and not opts.urn:
raise TypeError("Missing required property 'image_recipe_arn'")
__props__.__dict__["image_recipe_arn"] = image_recipe_arn
__props__.__dict__["image_tests_configuration"] = image_tests_configuration
if infrastructure_configuration_arn is None and not opts.urn:
raise TypeError("Missing required property 'infrastructure_configuration_arn'")
__props__.__dict__["infrastructure_configuration_arn"] = infrastructure_configuration_arn
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["date_created"] = None
__props__.__dict__["name"] = None
__props__.__dict__["os_version"] = None
__props__.__dict__["output_resources"] = None
__props__.__dict__["platform"] = None
__props__.__dict__["tags_all"] = None
__props__.__dict__["version"] = None
super(Image, __self__).__init__(
'aws:imagebuilder/image:Image',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
date_created: Optional[pulumi.Input[str]] = None,
distribution_configuration_arn: Optional[pulumi.Input[str]] = None,
enhanced_image_metadata_enabled: Optional[pulumi.Input[bool]] = None,
image_recipe_arn: Optional[pulumi.Input[str]] = None,
image_tests_configuration: Optional[pulumi.Input[pulumi.InputType['ImageImageTestsConfigurationArgs']]] = None,
infrastructure_configuration_arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
os_version: Optional[pulumi.Input[str]] = None,
output_resources: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ImageOutputResourceArgs']]]]] = None,
platform: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None) -> 'Image':
"""
Get an existing Image resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the image.
:param pulumi.Input[str] date_created: Date the image was created.
:param pulumi.Input[str] distribution_configuration_arn: Amazon Resource Name (ARN) of the Image Builder Distribution Configuration.
:param pulumi.Input[bool] enhanced_image_metadata_enabled: Whether additional information about the image being created is collected. Defaults to `true`.
:param pulumi.Input[str] image_recipe_arn: Amazon Resource Name (ARN) of the Image Builder Infrastructure Recipe.
:param pulumi.Input[pulumi.InputType['ImageImageTestsConfigurationArgs']] image_tests_configuration: Configuration block with image tests configuration. Detailed below.
:param pulumi.Input[str] infrastructure_configuration_arn: Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration.
:param pulumi.Input[str] name: Name of the AMI.
:param pulumi.Input[str] os_version: Operating System version of the image.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ImageOutputResourceArgs']]]] output_resources: List of objects with resources created by the image.
:param pulumi.Input[str] platform: Platform of the image.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags for the Image Builder Image. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[str] version: Version of the image.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ImageState.__new__(_ImageState)
__props__.__dict__["arn"] = arn
__props__.__dict__["date_created"] = date_created
__props__.__dict__["distribution_configuration_arn"] = distribution_configuration_arn
__props__.__dict__["enhanced_image_metadata_enabled"] = enhanced_image_metadata_enabled
__props__.__dict__["image_recipe_arn"] = image_recipe_arn
__props__.__dict__["image_tests_configuration"] = image_tests_configuration
__props__.__dict__["infrastructure_configuration_arn"] = infrastructure_configuration_arn
__props__.__dict__["name"] = name
__props__.__dict__["os_version"] = os_version
__props__.__dict__["output_resources"] = output_resources
__props__.__dict__["platform"] = platform
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["version"] = version
return Image(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the image.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="dateCreated")
def date_created(self) -> pulumi.Output[str]:
"""
Date the image was created.
"""
return pulumi.get(self, "date_created")
@property
@pulumi.getter(name="distributionConfigurationArn")
def distribution_configuration_arn(self) -> pulumi.Output[Optional[str]]:
"""
Amazon Resource Name (ARN) of the Image Builder Distribution Configuration.
"""
return pulumi.get(self, "distribution_configuration_arn")
@property
@pulumi.getter(name="enhancedImageMetadataEnabled")
def enhanced_image_metadata_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether additional information about the image being created is collected. Defaults to `true`.
"""
return pulumi.get(self, "enhanced_image_metadata_enabled")
@property
@pulumi.getter(name="imageRecipeArn")
def image_recipe_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the Image Builder Infrastructure Recipe.
"""
return pulumi.get(self, "image_recipe_arn")
@property
@pulumi.getter(name="imageTestsConfiguration")
def image_tests_configuration(self) -> pulumi.Output['outputs.ImageImageTestsConfiguration']:
"""
Configuration block with image tests configuration. Detailed below.
"""
return pulumi.get(self, "image_tests_configuration")
@property
@pulumi.getter(name="infrastructureConfigurationArn")
def infrastructure_configuration_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration.
"""
return pulumi.get(self, "infrastructure_configuration_arn")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the AMI.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osVersion")
def os_version(self) -> pulumi.Output[str]:
"""
Operating System version of the image.
"""
return pulumi.get(self, "os_version")
@property
@pulumi.getter(name="outputResources")
def output_resources(self) -> pulumi.Output[Sequence['outputs.ImageOutputResource']]:
"""
List of objects with resources created by the image.
"""
return pulumi.get(self, "output_resources")
@property
@pulumi.getter
def platform(self) -> pulumi.Output[str]:
"""
Platform of the image.
"""
return pulumi.get(self, "platform")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags for the Image Builder Image. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@property
@pulumi.getter
def version(self) -> pulumi.Output[str]:
"""
Version of the image.
"""
return pulumi.get(self, "version")
| 48.262673
| 277
| 0.682453
|
4a034f4e0f49cc2b19615dc72e474b4c0625abce
| 1,704
|
py
|
Python
|
vb_suite/timeseries.py
|
takluyver/pandas
|
6c820b4b1a3b945d52cffbd9a4d40a582c077b5d
|
[
"BSD-3-Clause"
] | null | null | null |
vb_suite/timeseries.py
|
takluyver/pandas
|
6c820b4b1a3b945d52cffbd9a4d40a582c077b5d
|
[
"BSD-3-Clause"
] | null | null | null |
vb_suite/timeseries.py
|
takluyver/pandas
|
6c820b4b1a3b945d52cffbd9a4d40a582c077b5d
|
[
"BSD-3-Clause"
] | null | null | null |
from vbench.api import Benchmark
from datetime import datetime
common_setup = """from pandas_vb_common import *
N = 100000
try:
rng = date_range('1/1/2000', periods=N, freq='min')
except NameError:
rng = DateRange('1/1/2000', periods=N, offset=datetools.Minute())
ts = Series(np.random.randn(N), index=rng)
"""
#----------------------------------------------------------------------
# Test slice minutely series
timeseries_slice_minutely = Benchmark('ts[:10000]', common_setup)
#----------------------------------------------------------------------
# Test conversion
setup = common_setup + """
"""
timeseries_1min_5min_ohlc = Benchmark("ts[:10000].convert('5min', how='ohlc')",
common_setup)
timeseries_1min_5min_mean = Benchmark("ts[:10000].convert('5min', how='mean')",
common_setup)
#----------------------------------------------------------------------
# Irregular alignment
setup = common_setup + """
lindex = np.random.permutation(N)[:N // 2]
rindex = np.random.permutation(N)[:N // 2]
left = Series(ts.values.take(lindex), index=ts.index.take(lindex))
right = Series(ts.values.take(rindex), index=ts.index.take(rindex))
"""
timeseries_add_irregular = Benchmark('left + right', setup)
#----------------------------------------------------------------------
# Sort large irregular time series
setup = common_setup + """
N = 100000
rng = date_range('1/1/2000', periods=N, freq='s')
rng = rng.take(np.random.permutation(N))
ts = Series(np.random.randn(N), index=rng)
"""
timeseries_sort_index = Benchmark('ts.sort_index()', setup,
start_date=datetime(2011, 11, 1))
| 29.894737
| 79
| 0.546948
|
4a034f6063c609eb965fc3ecc7f2eee93c7213f9
| 3,965
|
py
|
Python
|
aries_cloudagency/utils/tests/test_classloader.py
|
osancus/aries-cloudagency-python
|
d0ce77a3a11927715d1cb3533313d17a27b1cf7a
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagency/utils/tests/test_classloader.py
|
osancus/aries-cloudagency-python
|
d0ce77a3a11927715d1cb3533313d17a27b1cf7a
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagency/utils/tests/test_classloader.py
|
osancus/aries-cloudagency-python
|
d0ce77a3a11927715d1cb3533313d17a27b1cf7a
|
[
"Apache-2.0"
] | 3
|
2020-07-03T21:35:34.000Z
|
2020-09-09T13:26:20.000Z
|
from unittest import TestCase, mock
from ...core.error import BaseError
from .. import classloader as test_module
from ..classloader import ClassLoader, ClassNotFoundError, ModuleLoadError
class TestClassLoader(TestCase):
def test_import_loaded(self):
assert ClassLoader.load_module("unittest")
def test_import_local(self):
with mock.patch.object(test_module.sys, "modules", {}):
assert (
ClassLoader.load_module("aries_cloudagency.transport").__name__
== "aries_cloudagency.transport"
)
def test_import_relative(self):
with mock.patch.object(test_module.sys, "modules", {}):
assert (
ClassLoader.load_module("transport", "aries_cloudagency").__name__
== "aries_cloudagency.transport"
)
with mock.patch.object(test_module.sys, "modules", {}):
assert (
ClassLoader.load_module(".transport", "aries_cloudagency").__name__
== "aries_cloudagency.transport"
)
with mock.patch.object(test_module.sys, "modules", {}):
assert (
ClassLoader.load_module(
"..transport", "aries_cloudagency.config"
).__name__
== "aries_cloudagency.transport"
)
def test_import_missing(self):
with mock.patch.object(test_module.sys, "modules", {}):
assert ClassLoader.load_module("aries_cloudagency.not") is None
with mock.patch.object(test_module.sys, "modules", {}):
assert ClassLoader.load_module("aries_cloudagency.not.a-module") is None
with mock.patch.object(test_module.sys, "modules", {}):
assert ClassLoader.load_module("aries_cloudagency", "not.a-module") is None
def test_import_error(self):
with mock.patch.object(
test_module, "import_module", autospec=True
) as import_module, mock.patch.object(test_module.sys, "modules", {}):
import_module.side_effect = ModuleNotFoundError
with self.assertRaises(ModuleLoadError):
ClassLoader.load_module("aries_cloudagency.config")
def test_load_class(self):
assert ClassLoader.load_class("TestCase", "unittest") is TestCase
assert ClassLoader.load_class("unittest.TestCase") is TestCase
def test_load_class_missing(self):
with self.assertRaises(ClassNotFoundError):
# with no default module
assert ClassLoader.load_class("NotAClass")
with self.assertRaises(ClassNotFoundError):
assert ClassLoader.load_class("aries_cloudagency.NotAClass")
with self.assertRaises(ClassNotFoundError):
assert ClassLoader.load_class("not-a-module.NotAClass")
with self.assertRaises(ClassNotFoundError):
# should be a string, not a type
assert ClassLoader.load_class("aries_cloudagency.version.__version__")
def test_load_subclass(self):
assert ClassLoader.load_subclass_of(BaseError, "aries_cloudagency.config.base")
def test_load_subclass_missing(self):
with self.assertRaises(ClassNotFoundError):
assert ClassLoader.load_subclass_of(
TestCase, "aries_cloudagency.config.base"
)
with self.assertRaises(ClassNotFoundError):
assert ClassLoader.load_subclass_of(
TestCase, "aries_cloudagency.not-a-module"
)
def test_scan_packages(self):
pkgs = ClassLoader.scan_subpackages("aries_cloudagency")
assert "aries_cloudagency.transport" in pkgs
pkgs = ClassLoader.scan_subpackages("aries_cloudagency.transport")
assert "aries_cloudagency.transport.inbound" in pkgs
def test_scan_packages_missing(self):
with self.assertRaises(ModuleLoadError):
ClassLoader.scan_subpackages("aries_cloudagency.not-a-module")
| 42.634409
| 87
| 0.659016
|
4a034fcc72c05199f1e6a371e631fc79f66aa4f7
| 636
|
py
|
Python
|
remove.py
|
anandpopat9/Twitter-Search
|
72cb54c26deb0be4c3b1f0bbd8347c418e2d2c17
|
[
"0BSD"
] | null | null | null |
remove.py
|
anandpopat9/Twitter-Search
|
72cb54c26deb0be4c3b1f0bbd8347c418e2d2c17
|
[
"0BSD"
] | null | null | null |
remove.py
|
anandpopat9/Twitter-Search
|
72cb54c26deb0be4c3b1f0bbd8347c418e2d2c17
|
[
"0BSD"
] | null | null | null |
import sys
import re
import codecs
import unicodedata
import ast
import json
from datetime import datetime
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
file_w = open('/Users/anandpopat/desktop/demonetization/demonetization_final3.json','a')###change topic
file_r = open('/Users/anandpopat/desktop/demonetization/demonetization45_pfs.json','r')###change topic
for line in file_r:
#print(type(line))
#file_w.write(line)
d = json.loads(line)
d["tweet_text"]=d["tweet_text"].replace('\n','')
d["tweet_text"]=d["tweet_text"].replace('"','')
file_w.write('{"id": '+str(d["id"])+', "text": "'+d["tweet_text"]+'"}'+'\n')
| 26.5
| 103
| 0.707547
|
4a0351196ef460fb831100a291611727e42cd380
| 1,282
|
py
|
Python
|
asclib/logging.py
|
AdaCore/style_checker
|
17108ebfc44375498063ecdad6c6e4430458e60a
|
[
"CNRI-Python"
] | 2
|
2017-10-22T18:04:26.000Z
|
2020-03-06T11:07:41.000Z
|
asclib/logging.py
|
AdaCore/style_checker
|
17108ebfc44375498063ecdad6c6e4430458e60a
|
[
"CNRI-Python"
] | null | null | null |
asclib/logging.py
|
AdaCore/style_checker
|
17108ebfc44375498063ecdad6c6e4430458e60a
|
[
"CNRI-Python"
] | 4
|
2018-05-22T12:08:54.000Z
|
2020-12-14T15:25:27.000Z
|
"""Simplistic level-based logging."""
import sys
# By default, no logging (mostly used for debugging).
logging_level = 0
def log_info(message, level=1):
"""Print the info message if logs are enabled at the given level.
Does nothing if the given level is higher than the current logging
level.
:param message: The message to print.
:type message: str
:param level: The imortance level of the message. The smaller
the number, the more important the message.
:type level: int
"""
if logging_level < level:
return
print(message)
def log_error(error_message):
"""Print the given error message on standard error.
:param error_message: The error message. When given a list of strings,
all the strings are joined together with a newline in between.
A newline is also added at the end of the error message if not
already terminated by a newline.
:type error_message: str | list[str]
"""
if not isinstance(error_message, str):
# It's a list of strings. Convert to a string with new line
# characters in between.
error_message = "\n".join(error_message)
if not error_message.endswith("\n"):
error_message += "\n"
sys.stderr.write(error_message)
| 31.268293
| 74
| 0.677847
|
4a035142338ee5b166604498fc1a173373c20232
| 7,677
|
py
|
Python
|
official/vision/beta/dataloaders/segmentation_input.py
|
e10101/models
|
5c3e08b7697f0035b8731607277dc4e47e18317c
|
[
"Apache-2.0"
] | 2
|
2017-10-26T06:23:51.000Z
|
2020-09-11T21:09:41.000Z
|
official/vision/beta/dataloaders/segmentation_input.py
|
e10101/models
|
5c3e08b7697f0035b8731607277dc4e47e18317c
|
[
"Apache-2.0"
] | 2
|
2018-06-18T17:08:12.000Z
|
2021-04-12T05:39:04.000Z
|
official/vision/beta/dataloaders/segmentation_input.py
|
e10101/models
|
5c3e08b7697f0035b8731607277dc4e47e18317c
|
[
"Apache-2.0"
] | 2
|
2020-04-11T19:31:17.000Z
|
2021-04-07T12:53:28.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data parser and processing for segmentation datasets."""
import tensorflow as tf
from official.vision.beta.dataloaders import decoder
from official.vision.beta.dataloaders import parser
from official.vision.beta.ops import preprocess_ops
class Decoder(decoder.Decoder):
"""A tf.Example decoder for segmentation task."""
def __init__(self):
self._keys_to_features = {
'image/encoded': tf.io.FixedLenFeature((), tf.string, default_value=''),
'image/height': tf.io.FixedLenFeature((), tf.int64, default_value=0),
'image/width': tf.io.FixedLenFeature((), tf.int64, default_value=0),
'image/segmentation/class/encoded':
tf.io.FixedLenFeature((), tf.string, default_value='')
}
def decode(self, serialized_example):
return tf.io.parse_single_example(
serialized_example, self._keys_to_features)
class Parser(parser.Parser):
"""Parser to parse an image and its annotations into a dictionary of tensors.
"""
def __init__(self,
output_size,
train_on_crops=False,
resize_eval_groundtruth=True,
groundtruth_padded_size=None,
ignore_label=255,
aug_rand_hflip=False,
aug_scale_min=1.0,
aug_scale_max=1.0,
dtype='float32'):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
train_on_crops: `bool`, if True, a training crop of size output_size
is returned. This is useful for cropping original images during training
while evaluating on original image sizes.
resize_eval_groundtruth: `bool`, if True, eval groundtruth masks are
resized to output_size.
groundtruth_padded_size: `Tensor` or `list` for [height, width]. When
resize_eval_groundtruth is set to False, the groundtruth masks are
padded to this size.
ignore_label: `int` the pixel with ignore label will not used for training
and evaluation.
aug_rand_hflip: `bool`, if True, augment training with random
horizontal flip.
aug_scale_min: `float`, the minimum scale applied to `output_size` for
data augmentation during training.
aug_scale_max: `float`, the maximum scale applied to `output_size` for
data augmentation during training.
dtype: `str`, data type. One of {`bfloat16`, `float32`, `float16`}.
"""
self._output_size = output_size
self._train_on_crops = train_on_crops
self._resize_eval_groundtruth = resize_eval_groundtruth
if (not resize_eval_groundtruth) and (groundtruth_padded_size is None):
raise ValueError('groundtruth_padded_size ([height, width]) needs to be'
'specified when resize_eval_groundtruth is False.')
self._groundtruth_padded_size = groundtruth_padded_size
self._ignore_label = ignore_label
# Data augmentation.
self._aug_rand_hflip = aug_rand_hflip
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
# dtype.
self._dtype = dtype
def _prepare_image_and_label(self, data):
"""Prepare normalized image and label."""
image = tf.io.decode_image(data['image/encoded'], channels=3)
label = tf.io.decode_image(data['image/segmentation/class/encoded'],
channels=1)
height = data['image/height']
width = data['image/width']
image = tf.reshape(image, (height, width, 3))
label = tf.reshape(label, (1, height, width))
label = tf.cast(label, tf.float32)
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image)
return image, label
def _parse_train_data(self, data):
"""Parses data for training and evaluation."""
image, label = self._prepare_image_and_label(data)
if self._train_on_crops:
label = tf.reshape(label, [data['image/height'], data['image/width'], 1])
image_mask = tf.concat([image, label], axis=2)
image_mask_crop = tf.image.random_crop(image_mask,
self._output_size + [4])
image = image_mask_crop[:, :, :-1]
label = tf.reshape(image_mask_crop[:, :, -1], [1] + self._output_size)
# Flips image randomly during training.
if self._aug_rand_hflip:
image, _, label = preprocess_ops.random_horizontal_flip(
image, masks=label)
# Resizes and crops image.
image, image_info = preprocess_ops.resize_and_crop_image(
image,
self._output_size,
self._output_size,
aug_scale_min=self._aug_scale_min,
aug_scale_max=self._aug_scale_max)
# Resizes and crops boxes.
image_scale = image_info[2, :]
offset = image_info[3, :]
# Pad label and make sure the padded region assigned to the ignore label.
# The label is first offset by +1 and then padded with 0.
label += 1
label = tf.expand_dims(label, axis=3)
label = preprocess_ops.resize_and_crop_masks(
label, image_scale, self._output_size, offset)
label -= 1
label = tf.where(tf.equal(label, -1),
self._ignore_label * tf.ones_like(label), label)
label = tf.squeeze(label, axis=0)
valid_mask = tf.not_equal(label, self._ignore_label)
labels = {
'masks': label,
'valid_masks': valid_mask,
'image_info': image_info,
}
# Cast image as self._dtype
image = tf.cast(image, dtype=self._dtype)
return image, labels
def _parse_eval_data(self, data):
"""Parses data for training and evaluation."""
image, label = self._prepare_image_and_label(data)
# The label is first offset by +1 and then padded with 0.
label += 1
label = tf.expand_dims(label, axis=3)
# Resizes and crops image.
image, image_info = preprocess_ops.resize_and_crop_image(
image, self._output_size, self._output_size)
if self._resize_eval_groundtruth:
# Resizes eval masks to match input image sizes. In that case, mean IoU
# is computed on output_size not the original size of the images.
image_scale = image_info[2, :]
offset = image_info[3, :]
label = preprocess_ops.resize_and_crop_masks(label, image_scale,
self._output_size, offset)
else:
label = tf.image.pad_to_bounding_box(
label, 0, 0, self._groundtruth_padded_size[0],
self._groundtruth_padded_size[1])
label -= 1
label = tf.where(tf.equal(label, -1),
self._ignore_label * tf.ones_like(label), label)
label = tf.squeeze(label, axis=0)
valid_mask = tf.not_equal(label, self._ignore_label)
labels = {
'masks': label,
'valid_masks': valid_mask,
'image_info': image_info
}
# Cast image as self._dtype
image = tf.cast(image, dtype=self._dtype)
return image, labels
| 38.577889
| 80
| 0.671486
|
4a0352e5cccf600636f6d38bb7c956ea52544623
| 20,812
|
py
|
Python
|
training/training_loop.py
|
ioalzx/stylegan2
|
90f11c9a86dca1cca0681f5cdd52dbe09d5f885c
|
[
"BSD-Source-Code"
] | null | null | null |
training/training_loop.py
|
ioalzx/stylegan2
|
90f11c9a86dca1cca0681f5cdd52dbe09d5f885c
|
[
"BSD-Source-Code"
] | null | null | null |
training/training_loop.py
|
ioalzx/stylegan2
|
90f11c9a86dca1cca0681f5cdd52dbe09d5f885c
|
[
"BSD-Source-Code"
] | null | null | null |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Main training script."""
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
from training import dataset
from training import misc
from metrics import metric_base
#----------------------------------------------------------------------------
# Just-in-time processing of training images before feeding them to the networks.
def process_reals(x, labels, lod, mirror_augment, mirror_augment_v, drange_data, drange_net):
with tf.name_scope('DynamicRange'):
x = tf.cast(x, tf.float32)
x = misc.adjust_dynamic_range(x, drange_data, drange_net)
if mirror_augment:
with tf.name_scope('MirrorAugment'):
x = tf.where(tf.random_uniform([tf.shape(x)[0]]) < 0.5, x, tf.reverse(x, [3]))
if mirror_augment_v:
with tf.name_scope('MirrorAugment_V'):
x = tf.where(tf.random_uniform([tf.shape(x)[0]]) < 0.5, x, tf.reverse(x, [2]))
with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
s = tf.shape(x)
y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)
y = tf.tile(y, [1, 1, 1, 2, 1, 2])
y = tf.reshape(y, [-1, s[1], s[2], s[3]])
x = tflib.lerp(x, y, lod - tf.floor(lod))
with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
s = tf.shape(x)
factor = tf.cast(2 ** tf.floor(lod), tf.int32)
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x, labels
#----------------------------------------------------------------------------
# Evaluate time-varying training parameters.
def training_schedule(
cur_nimg,
training_set,
lod_initial_resolution = None, # Image resolution used at the beginning.
lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution.
lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers.
minibatch_size_base = 32, # Global minibatch size.
minibatch_size_dict = {}, # Resolution-specific overrides.
minibatch_gpu_base = 4, # Number of samples processed at a time by one GPU.
minibatch_gpu_dict = {}, # Resolution-specific overrides.
G_lrate_base = 0.002, # Learning rate for the generator.
G_lrate_dict = {}, # Resolution-specific overrides.
D_lrate_base = 0.002, # Learning rate for the discriminator.
D_lrate_dict = {}, # Resolution-specific overrides.
lrate_rampup_kimg = 0, # Duration of learning rate ramp-up.
tick_kimg_base = 4, # Default interval of progress snapshots.
tick_kimg_dict = {8:28, 16:24, 32:20, 64:16, 128:12, 256:8, 512:6, 1024:4}): # Resolution-specific overrides.
# Initialize result dict.
s = dnnlib.EasyDict()
s.kimg = cur_nimg / 1000.0
# Training phase.
phase_dur = lod_training_kimg + lod_transition_kimg
phase_idx = int(np.floor(s.kimg / phase_dur)) if phase_dur > 0 else 0
phase_kimg = s.kimg - phase_idx * phase_dur
# Level-of-detail and resolution.
if lod_initial_resolution is None:
s.lod = 0.0
else:
s.lod = training_set.resolution_log2
s.lod -= np.floor(np.log2(lod_initial_resolution))
s.lod -= phase_idx
if lod_transition_kimg > 0:
s.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg
s.lod = max(s.lod, 0.0)
#s.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(s.lod)))
# Minibatch size.
#s.minibatch_size = minibatch_size_dict.get(s.resolution, minibatch_size_base)
#s.minibatch_gpu = minibatch_gpu_dict.get(s.resolution, minibatch_gpu_base)
s.minibatch_size = minibatch_size_base
s.minibatch_gpu = minibatch_gpu_base
# Learning rate.
# Removed dict reading, no progressive training support here
#s.G_lrate = G_lrate_dict.get(s.resolution, G_lrate_base)
#s.D_lrate = D_lrate_dict.get(s.resolution, D_lrate_base)
s.G_lrate = G_lrate_base
s.D_lrate = D_lrate_base
if lrate_rampup_kimg > 0:
rampup = min(s.kimg / lrate_rampup_kimg, 1.0)
s.G_lrate *= rampup
s.D_lrate *= rampup
# Other parameters.
s.tick_kimg = 6 #tick_kimg_dict.get(s.resolution, tick_kimg_base)
return s
#----------------------------------------------------------------------------
# Main training script.
def training_loop(
G_args = {}, # Options for generator network.
D_args = {}, # Options for discriminator network.
G_opt_args = {}, # Options for generator optimizer.
D_opt_args = {}, # Options for discriminator optimizer.
G_loss_args = {}, # Options for generator loss.
D_loss_args = {}, # Options for discriminator loss.
dataset_args = {}, # Options for dataset.load_dataset().
sched_args = {}, # Options for train.TrainingSchedule.
grid_args = {}, # Options for train.setup_snapshot_image_grid().
metric_arg_list = [], # Options for MetricGroup.
tf_config = {}, # Options for tflib.init_tf().
data_dir = None, # Directory to load datasets from.
G_smoothing_kimg = 10.0, # Half-life of the running average of generator weights.
minibatch_repeats = 4, # Number of minibatches to run before adjusting training parameters.
lazy_regularization = True, # Perform regularization as a separate training step?
G_reg_interval = 4, # How often the perform regularization for G? Ignored if lazy_regularization=False.
D_reg_interval = 16, # How often the perform regularization for D? Ignored if lazy_regularization=False.
reset_opt_for_new_lod = True, # Reset optimizer internal state (e.g. Adam moments) when new layers are introduced?
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
mirror_augment = False, # Enable mirror augment?
mirror_augment_v = False, # Enable mirror augment vertically?
drange_net = [-1,1], # Dynamic range used when feeding image data to the networks.
image_snapshot_ticks = 4, # How often to save image snapshots? None = only save 'reals.png' and 'fakes-init.png'.
network_snapshot_ticks = 4, # How often to save network snapshots? None = only save 'networks-final.pkl'.
save_tf_graph = False, # Include full TensorFlow computation graph in the tfevents file?
save_weight_histograms = False, # Include weight histograms in the tfevents file?
resume_pkl = 'latest', # Network pickle to resume training from, None = train from scratch.
resume_kimg = 0.0, # Assumed training progress at the beginning. Affects reporting and training schedule.
resume_time = 0.0, # Assumed wallclock time at the beginning. Affects reporting.
resume_with_new_nets = False): # Construct new networks according to G_args and D_args before resuming training?
# Initialize dnnlib and TensorFlow.
tflib.init_tf(tf_config)
num_gpus = dnnlib.submit_config.num_gpus
# Load training set.
training_set = dataset.load_dataset(data_dir=dnnlib.convert_path(data_dir), verbose=True, **dataset_args)
grid_size, grid_reals, grid_labels = misc.setup_snapshot_image_grid(training_set, **grid_args)
misc.save_image_grid(grid_reals, dnnlib.make_run_dir_path('reals.jpg'), drange=training_set.dynamic_range, grid_size=grid_size)
# Construct or load networks.
with tf.device('/gpu:0'):
if resume_pkl == 'latest':
resume_pkl, resume_kimg = misc.locate_latest_pkl(dnnlib.submit_config.run_dir_root)
if resume_pkl is None or resume_with_new_nets:
print('Constructing networks...')
G = tflib.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **G_args)
D = tflib.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **D_args)
Gs = G.clone('Gs')
if resume_pkl is not None:
print('Loading networks from "%s"...' % resume_pkl)
rG, rD, rGs = misc.load_pkl(resume_pkl)
if resume_with_new_nets: G.copy_vars_from(rG); D.copy_vars_from(rD); Gs.copy_vars_from(rGs)
else: G = rG; D = rD; Gs = rGs
# Print layers and generate initial image snapshot.
G.print_layers(); D.print_layers()
sched = training_schedule(cur_nimg=total_kimg*1000, training_set=training_set, **sched_args)
grid_latents = np.random.randn(np.prod(grid_size), *G.input_shape[1:])
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch_gpu)
misc.save_image_grid(grid_fakes, dnnlib.make_run_dir_path('fakes_init.jpg'), drange=drange_net, grid_size=grid_size)
# Setup training inputs.
print('Building TensorFlow graph...')
with tf.name_scope('Inputs'), tf.device('/cpu:0'):
lod_in = tf.placeholder(tf.float32, name='lod_in', shape=[])
lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[])
minibatch_size_in = tf.placeholder(tf.int32, name='minibatch_size_in', shape=[])
minibatch_gpu_in = tf.placeholder(tf.int32, name='minibatch_gpu_in', shape=[])
minibatch_multiplier = minibatch_size_in // (minibatch_gpu_in * num_gpus)
Gs_beta = 0.5 ** tf.div(tf.cast(minibatch_size_in, tf.float32), G_smoothing_kimg * 1000.0) if G_smoothing_kimg > 0.0 else 0.0
# Setup optimizers.
G_opt_args = dict(G_opt_args)
D_opt_args = dict(D_opt_args)
for args, reg_interval in [(G_opt_args, G_reg_interval), (D_opt_args, D_reg_interval)]:
args['minibatch_multiplier'] = minibatch_multiplier
args['learning_rate'] = lrate_in
if lazy_regularization:
mb_ratio = reg_interval / (reg_interval + 1)
args['learning_rate'] *= mb_ratio
if 'beta1' in args: args['beta1'] **= mb_ratio
if 'beta2' in args: args['beta2'] **= mb_ratio
G_opt = tflib.Optimizer(name='TrainG', **G_opt_args)
D_opt = tflib.Optimizer(name='TrainD', **D_opt_args)
G_reg_opt = tflib.Optimizer(name='RegG', share=G_opt, **G_opt_args)
D_reg_opt = tflib.Optimizer(name='RegD', share=D_opt, **D_opt_args)
# Build training graph for each GPU.
data_fetch_ops = []
for gpu in range(num_gpus):
with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu):
# Create GPU-specific shadow copies of G and D.
G_gpu = G if gpu == 0 else G.clone(G.name + '_shadow')
D_gpu = D if gpu == 0 else D.clone(D.name + '_shadow')
# Fetch training data via temporary variables.
with tf.name_scope('DataFetch'):
sched = training_schedule(cur_nimg=int(resume_kimg*1000), training_set=training_set, **sched_args)
reals_var = tf.Variable(name='reals', trainable=False, initial_value=tf.zeros([sched.minibatch_gpu] + training_set.shape))
labels_var = tf.Variable(name='labels', trainable=False, initial_value=tf.zeros([sched.minibatch_gpu, training_set.label_size]))
reals_write, labels_write = training_set.get_minibatch_tf()
reals_write, labels_write = process_reals(reals_write, labels_write, lod_in, mirror_augment, mirror_augment_v, training_set.dynamic_range, drange_net)
reals_write = tf.concat([reals_write, reals_var[minibatch_gpu_in:]], axis=0)
labels_write = tf.concat([labels_write, labels_var[minibatch_gpu_in:]], axis=0)
data_fetch_ops += [tf.assign(reals_var, reals_write)]
data_fetch_ops += [tf.assign(labels_var, labels_write)]
reals_read = reals_var[:minibatch_gpu_in]
labels_read = labels_var[:minibatch_gpu_in]
# Evaluate loss functions.
lod_assign_ops = []
if 'lod' in G_gpu.vars: lod_assign_ops += [tf.assign(G_gpu.vars['lod'], lod_in)]
if 'lod' in D_gpu.vars: lod_assign_ops += [tf.assign(D_gpu.vars['lod'], lod_in)]
with tf.control_dependencies(lod_assign_ops):
with tf.name_scope('G_loss'):
G_loss, G_reg = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=G_opt, training_set=training_set, minibatch_size=minibatch_gpu_in, **G_loss_args)
with tf.name_scope('D_loss'):
D_loss, D_reg = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=D_opt, training_set=training_set, minibatch_size=minibatch_gpu_in, reals=reals_read, labels=labels_read, **D_loss_args)
# Register gradients.
if not lazy_regularization:
if G_reg is not None: G_loss += G_reg
if D_reg is not None: D_loss += D_reg
else:
if G_reg is not None: G_reg_opt.register_gradients(tf.reduce_mean(G_reg * G_reg_interval), G_gpu.trainables)
if D_reg is not None: D_reg_opt.register_gradients(tf.reduce_mean(D_reg * D_reg_interval), D_gpu.trainables)
G_opt.register_gradients(tf.reduce_mean(G_loss), G_gpu.trainables)
D_opt.register_gradients(tf.reduce_mean(D_loss), D_gpu.trainables)
# Setup training ops.
data_fetch_op = tf.group(*data_fetch_ops)
G_train_op = G_opt.apply_updates()
D_train_op = D_opt.apply_updates()
G_reg_op = G_reg_opt.apply_updates(allow_no_op=True)
D_reg_op = D_reg_opt.apply_updates(allow_no_op=True)
Gs_update_op = Gs.setup_as_moving_average_of(G, beta=Gs_beta)
# Finalize graph.
with tf.device('/gpu:0'):
try:
peak_gpu_mem_op = tf.contrib.memory_stats.MaxBytesInUse()
except tf.errors.NotFoundError:
peak_gpu_mem_op = tf.constant(0)
tflib.init_uninitialized_vars()
print('Initializing logs...')
summary_log = tf.summary.FileWriter(dnnlib.make_run_dir_path())
if save_tf_graph:
summary_log.add_graph(tf.get_default_graph())
if save_weight_histograms:
G.setup_weight_histograms(); D.setup_weight_histograms()
metrics = metric_base.MetricGroup(metric_arg_list)
print('Training for %d kimg...\n' % total_kimg)
dnnlib.RunContext.get().update('', cur_epoch=resume_kimg, max_epoch=total_kimg)
maintenance_time = dnnlib.RunContext.get().get_last_update_interval()
cur_nimg = int(resume_kimg * 1000)
cur_tick = -1
tick_start_nimg = cur_nimg
prev_lod = -1.0
running_mb_counter = 0
while cur_nimg < total_kimg * 1000:
if dnnlib.RunContext.get().should_stop(): break
# Choose training parameters and configure training ops.
sched = training_schedule(cur_nimg=cur_nimg, training_set=training_set, **sched_args)
assert sched.minibatch_size % (sched.minibatch_gpu * num_gpus) == 0
training_set.configure(sched.minibatch_gpu, sched.lod)
if reset_opt_for_new_lod:
if np.floor(sched.lod) != np.floor(prev_lod) or np.ceil(sched.lod) != np.ceil(prev_lod):
G_opt.reset_optimizer_state(); D_opt.reset_optimizer_state()
prev_lod = sched.lod
# Run training ops.
# Seperate to two feed_dict, G/D rate matters for G/D train and reg optimizers, not for data_fetch_op and Gs_update_op
feed_dict_g = {lod_in: sched.lod, lrate_in: sched.G_lrate, minibatch_size_in: sched.minibatch_size, minibatch_gpu_in: sched.minibatch_gpu}
feed_dict_d = {lod_in: sched.lod, lrate_in: sched.D_lrate, minibatch_size_in: sched.minibatch_size, minibatch_gpu_in: sched.minibatch_gpu}
for _repeat in range(minibatch_repeats):
rounds = range(0, sched.minibatch_size, sched.minibatch_gpu * num_gpus)
run_G_reg = (lazy_regularization and running_mb_counter % G_reg_interval == 0)
run_D_reg = (lazy_regularization and running_mb_counter % D_reg_interval == 0)
cur_nimg += sched.minibatch_size
running_mb_counter += 1
# Fast path without gradient accumulation.
if len(rounds) == 1:
tflib.run([G_train_op, data_fetch_op], feed_dict_g)
if run_G_reg:
tflib.run(G_reg_op, feed_dict_g)
tflib.run([D_train_op, Gs_update_op], feed_dict_d)
if run_D_reg:
tflib.run(D_reg_op, feed_dict_d)
# Slow path with gradient accumulation.
else:
for _round in rounds:
tflib.run(G_train_op, feed_dict_g)
if run_G_reg:
for _round in rounds:
tflib.run(G_reg_op, feed_dict_g)
tflib.run(Gs_update_op, feed_dict_g)
for _round in rounds:
tflib.run(data_fetch_op, feed_dict_d)
tflib.run(D_train_op, feed_dict_d)
if run_D_reg:
for _round in rounds:
tflib.run(D_reg_op, feed_dict_d)
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if cur_tick < 0 or cur_nimg >= tick_start_nimg + sched.tick_kimg * 1000 or done:
cur_tick += 1
tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0
tick_start_nimg = cur_nimg
tick_time = dnnlib.RunContext.get().get_time_since_last_update()
total_time = dnnlib.RunContext.get().get_time_since_start() + resume_time
# Report progress.
print('tick %-5d kimg %-8.1f lod %-5.2f minibatch %-4d time %-12s sec/tick %-7.1f sec/kimg %-7.2f maintenance %-6.1f gpumem %.1f' % (
autosummary('Progress/tick', cur_tick),
autosummary('Progress/kimg', cur_nimg / 1000.0),
autosummary('Progress/lod', sched.lod),
autosummary('Progress/minibatch', sched.minibatch_size),
dnnlib.util.format_time(autosummary('Timing/total_sec', total_time)),
autosummary('Timing/sec_per_tick', tick_time),
autosummary('Timing/sec_per_kimg', tick_time / tick_kimg),
autosummary('Timing/maintenance_sec', maintenance_time),
autosummary('Resources/peak_gpu_mem_gb', peak_gpu_mem_op.eval() / 2**30)))
autosummary('Timing/total_hours', total_time / (60.0 * 60.0))
autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0))
# Save snapshots.
if image_snapshot_ticks is not None and (cur_tick % image_snapshot_ticks == 0 or done):
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch_gpu)
misc.save_image_grid(grid_fakes, dnnlib.make_run_dir_path('fakes%06d.jpg' % (cur_nimg // 1000)), drange=drange_net, grid_size=grid_size)
if network_snapshot_ticks is not None and (cur_tick % network_snapshot_ticks == 0 or done):
pkl = dnnlib.make_run_dir_path('network-snapshot-%06d.pkl' % (cur_nimg // 1000))
misc.save_pkl((G, D, Gs), pkl)
metrics.run(pkl, run_dir=dnnlib.make_run_dir_path(), data_dir=dnnlib.convert_path(data_dir), num_gpus=num_gpus, tf_config=tf_config)
# Update summaries and RunContext.
metrics.update_autosummaries()
tflib.autosummary.save_summaries(summary_log, cur_nimg)
dnnlib.RunContext.get().update('%.2f' % sched.lod, cur_epoch=cur_nimg // 1000, max_epoch=total_kimg)
maintenance_time = dnnlib.RunContext.get().get_last_update_interval() - tick_time
# Save final snapshot.
misc.save_pkl((G, D, Gs), dnnlib.make_run_dir_path('network-final.pkl'))
# All done.
summary_log.close()
training_set.close()
#----------------------------------------------------------------------------
| 56.248649
| 207
| 0.639439
|
4a035378df0fed5086e190d3acfd0fdb971794fa
| 357
|
py
|
Python
|
src/sortingandsearching/tests/test_stick_lengths.py
|
seahrh/cses-problem-set-python
|
53e8dba200893cc13a0f7b66fc18e55fa0af137a
|
[
"MIT"
] | null | null | null |
src/sortingandsearching/tests/test_stick_lengths.py
|
seahrh/cses-problem-set-python
|
53e8dba200893cc13a0f7b66fc18e55fa0af137a
|
[
"MIT"
] | null | null | null |
src/sortingandsearching/tests/test_stick_lengths.py
|
seahrh/cses-problem-set-python
|
53e8dba200893cc13a0f7b66fc18e55fa0af137a
|
[
"MIT"
] | null | null | null |
from sortingandsearching.stick_lengths import *
class TestStickLengths:
def test_array_length_of_2_or_less(self):
assert solve([9]) == 0
assert solve([2, 1]) == 1
def test_case_1(self):
assert solve([2, 3, 1, 5, 2]) == 5
def test_case_2(self):
assert solve([1, 4, 7, 8, 10, 3, 2, 5, 6, 9]) == 25
| 25.5
| 60
| 0.571429
|
4a03543c1be2df962b41ed22bd42be60f8e4cbd2
| 5,948
|
py
|
Python
|
tune/noniterative/study.py
|
fugue-project/tune
|
bf2288ddcb29c8345d996a9b22c0910da9002da1
|
[
"Apache-2.0"
] | 14
|
2021-03-03T20:02:09.000Z
|
2021-11-10T20:32:22.000Z
|
tune/noniterative/study.py
|
fugue-project/tune
|
bf2288ddcb29c8345d996a9b22c0910da9002da1
|
[
"Apache-2.0"
] | 26
|
2021-04-30T19:56:06.000Z
|
2022-01-18T04:40:00.000Z
|
tune/noniterative/study.py
|
fugue-project/tune
|
bf2288ddcb29c8345d996a9b22c0910da9002da1
|
[
"Apache-2.0"
] | 2
|
2021-04-30T03:12:21.000Z
|
2022-02-05T12:13:37.000Z
|
from typing import Any, Callable, Dict, Iterable, Optional
from fugue import ArrayDataFrame, DataFrame, ExecutionEngine
from triad import assert_or_throw
from tune._utils import run_monitored_process
from tune.concepts.dataset import StudyResult, TuneDataset, _get_trials_from_row
from tune.concepts.flow import RemoteTrialJudge, TrialCallback, TrialJudge, TrialReport
from tune.concepts.flow.judge import Monitor, NoOpTrailJudge
from tune.constants import TUNE_REPORT_ADD_SCHEMA, TUNE_STOPPER_DEFAULT_CHECK_INTERVAL
from tune.exceptions import TuneCompileError, TuneInterrupted
from tune.noniterative.objective import (
NonIterativeObjectiveFunc,
NonIterativeObjectiveLocalOptimizer,
)
from tune.noniterative.stopper import NonIterativeStopper
def _make_judge(
monitor: Optional[Monitor] = None, stopper: Optional[NonIterativeStopper] = None
) -> Optional[TrialJudge]:
if monitor is None and stopper is None:
return None
if stopper is None and monitor is not None:
return NoOpTrailJudge(monitor)
if stopper is not None and monitor is None:
return stopper
if stopper is not None and monitor is not None:
stopper.reset_monitor(monitor)
return stopper
raise NotImplementedError # pragma: no cover
class NonIterativeStudy:
def __init__(
self,
objective: NonIterativeObjectiveFunc,
optimizer: NonIterativeObjectiveLocalOptimizer,
):
self._objective = objective
self._optimizer = optimizer
def optimize( # noqa: C901
self,
dataset: TuneDataset,
distributed: Optional[bool] = None,
monitor: Optional[Monitor] = None,
stopper: Optional[NonIterativeStopper] = None,
stop_check_interval: Any = None,
) -> StudyResult:
_dist = self._get_distributed(distributed)
entrypoint: Any = None
judge = _make_judge(monitor, stopper)
if judge is not None:
cb = TrialCallback(judge)
entrypoint = cb.entrypoint
if stopper is None:
_interval: Any = None
else:
_interval = stop_check_interval or TUNE_STOPPER_DEFAULT_CHECK_INTERVAL
def compute_processor(engine: ExecutionEngine, df: DataFrame) -> DataFrame:
out_schema = df.schema + TUNE_REPORT_ADD_SCHEMA
def get_rows() -> Iterable[Any]:
for row in self._compute_transformer(
df.as_local().as_dict_iterable(),
entrypoint=entrypoint,
stop_check_interval=_interval,
):
yield [row[k] for k in out_schema.names]
# TODO: need to add back execution_engine for engine aware optimizers
# t._execution_engine = engine # type:ignore
return ArrayDataFrame(get_rows(), out_schema)
def preprocess(df: DataFrame) -> DataFrame:
if judge is not None:
judge.monitor.initialize()
return df
def postprocess(df: DataFrame) -> None:
if judge is not None:
judge.monitor.finalize()
if not _dist:
res = dataset.data.process(preprocess).process(compute_processor)
else:
res = (
dataset.data.process(preprocess)
.per_row()
.transform(
self._compute_transformer,
schema=f"*,{TUNE_REPORT_ADD_SCHEMA}",
callback=entrypoint,
params=dict(stop_check_interval=_interval),
)
)
res.persist().output(postprocess)
return StudyResult(dataset=dataset, result=res)
def _get_distributed(self, distributed: Optional[bool]) -> bool:
if distributed is None:
return self._optimizer.distributable
if distributed:
assert_or_throw(
self._optimizer.distributable,
TuneCompileError(
f"can't distribute non-distributable optimizer {self._optimizer}"
),
)
return True
return False
def _compute_transformer(
self,
df: Iterable[Dict[str, Any]],
entrypoint: Optional[Callable[[str, Dict[str, Any]], Any]] = None,
stop_check_interval: Any = None,
) -> Iterable[Dict[str, Any]]:
j: Optional[RemoteTrialJudge] = (
None if entrypoint is None else RemoteTrialJudge(entrypoint)
)
for row in df:
for n, trial in enumerate(_get_trials_from_row(row, with_dfs=False)):
if j is not None:
if stop_check_interval is None:
# monitor only
report = self._local_process_trial(row, n)
j.judge(report)
yield report.fill_dict(dict(row))
elif j.can_accept(trial):
try:
report = run_monitored_process(
self._local_process_trial,
[row, n],
{},
lambda: not j.can_accept(trial), # type: ignore
stop_check_interval,
)
except TuneInterrupted:
continue
except Exception:
raise
j.judge(report)
yield report.fill_dict(dict(row))
else:
report = self._local_process_trial(row, n)
yield report.fill_dict(dict(row))
def _local_process_trial(self, row: Dict[str, Any], idx: int) -> TrialReport:
trial = list(_get_trials_from_row(row))[idx]
return self._optimizer.run(self._objective, trial)
| 38.374194
| 87
| 0.584398
|
4a0354530111c110a3a59dafcd3665afaf2af046
| 3,399
|
py
|
Python
|
tests/compas/test_iotools.py
|
funkchaser/compas
|
b58de8771484aa0c6068d43df78b1679503215de
|
[
"MIT"
] | 235
|
2017-11-07T07:33:22.000Z
|
2022-03-25T16:20:00.000Z
|
tests/compas/test_iotools.py
|
funkchaser/compas
|
b58de8771484aa0c6068d43df78b1679503215de
|
[
"MIT"
] | 770
|
2017-09-22T13:42:06.000Z
|
2022-03-31T21:26:45.000Z
|
tests/compas/test_iotools.py
|
funkchaser/compas
|
b58de8771484aa0c6068d43df78b1679503215de
|
[
"MIT"
] | 99
|
2017-11-06T23:15:28.000Z
|
2022-03-25T16:05:36.000Z
|
import io
import math
import os
import tempfile
import pytest
from compas import _iotools
BASE_FOLDER = os.path.dirname(__file__)
IMAGE_FILE_SIZE = 252391
TEXT_FILE_SIZE = 747
REMOTE_IMAGE_FILE_SIZE = 2734
@pytest.fixture
def path_image():
return os.path.join(BASE_FOLDER, 'fixtures', 'iotools', 'image.png')
@pytest.fixture
def path_text():
return os.path.join(BASE_FOLDER, 'fixtures', 'iotools', 'text.txt')
@pytest.fixture
def url_text():
return 'https://raw.githubusercontent.com/compas-dev/compas/main/README.md'
@pytest.fixture
def url_image():
return 'https://en.wikipedia.org/favicon.ico'
def test_open_file_path_binary(path_image):
with _iotools.open_file(path_image, mode='rb') as file:
assert len(file.read()) == IMAGE_FILE_SIZE
def test_open_file_path_text(path_text):
with _iotools.open_file(path_text, mode='r') as file:
assert len(file.read()) == TEXT_FILE_SIZE
def test_open_file_object_binary(path_image):
with open(path_image, mode='rb') as f:
with _iotools.open_file(f) as file:
assert len(file.read()) == IMAGE_FILE_SIZE
def test_open_file_object_text(path_text):
with open(path_text, mode='r') as f:
with _iotools.open_file(f) as file:
assert len(file.read()) == TEXT_FILE_SIZE
def test_open_file_memory_stream():
text = b"All Gaul is divided into three parts, one of which the Belgae inhabit, the Aquitani another, those who in their own language are called Celts, in our Gauls, the third."
data = io.BytesIO(text)
with _iotools.open_file(data, mode='rb') as f:
assert f.read() == text
def test_open_file_url_image(url_image):
with _iotools.open_file(url_image) as file:
assert len(file.read()) == REMOTE_IMAGE_FILE_SIZE
def test_open_file_url_text(url_text):
with _iotools.open_file(url_text) as file:
assert b'COMPAS framework' in file.read()
def test_open_file_url_as_write_fails(url_text):
with pytest.raises(ValueError):
with _iotools.open_file(url_text, mode='w') as _:
pass
def test_iter_file_chunks_path_image(path_image):
CHUNK_SIZE = 30
chunks = []
with _iotools.open_file(path_image, 'rb') as file:
for data in _iotools.iter_file(file, size=CHUNK_SIZE):
chunks.append(data)
assert len(chunks) == math.ceil(IMAGE_FILE_SIZE / float(CHUNK_SIZE))
def test_open_file_write_path():
path = os.path.join(tempfile.gettempdir(), 'test-file.txt')
with _iotools.open_file(path, 'w') as file:
file.write('Hello world')
with _iotools.open_file(path, 'r') as file:
assert file.read() == 'Hello world'
def test_open_file_write_file_object():
path = os.path.join(tempfile.gettempdir(), 'test-file.txt')
with open(path, mode='w') as f:
with _iotools.open_file(f) as file:
file.write('Hello world')
with open(path, mode='r') as f:
with _iotools.open_file(f) as file:
assert file.read() == 'Hello world'
def test_open_file_does_not_close_file_objects(path_image):
with open(path_image, mode='rb') as f:
with _iotools.open_file(f):
pass
assert not f.closed
assert f.closed
def test_open_file_closes_path_like(path_image):
with _iotools.open_file(path_image, mode='rb') as file:
assert not file.closed
assert file.closed
| 26.97619
| 181
| 0.695793
|
4a03554a2b6d6bcf56739c9c8bf900e84fe93c22
| 1,198
|
py
|
Python
|
setup.py
|
ofples/thinglang
|
b1391e8fb42d518fc0018400eecb608d18da915a
|
[
"MIT"
] | 5
|
2017-11-07T21:32:22.000Z
|
2019-06-30T18:35:17.000Z
|
setup.py
|
ofples/thinglang
|
b1391e8fb42d518fc0018400eecb608d18da915a
|
[
"MIT"
] | null | null | null |
setup.py
|
ofples/thinglang
|
b1391e8fb42d518fc0018400eecb608d18da915a
|
[
"MIT"
] | 1
|
2020-03-25T18:14:08.000Z
|
2020-03-25T18:14:08.000Z
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# To use a consistent encoding
from codecs import open
from os import path
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='thinglang',
version='0.0.0',
description='Yet another general purpose programming language',
long_description=long_description,
url='https://github.com/ytanay/thinglang',
author='Yotam Tanay',
author_email='yotam@yotamtanay.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
packages=find_packages(exclude=['docs', 'tests']),
package_data={'': ['*.thingsymbols']},
include_package_data=True
)
| 24.958333
| 67
| 0.691152
|
4a0355d177a59d2cc7a4d1f13e21c91f9d0c6d94
| 12,915
|
py
|
Python
|
pyequion2/activity/pitzer.py
|
pyequion/pyequion
|
733cf1c59b5a63f7346d4cb4c21a9ffd2218a5fc
|
[
"BSD-3-Clause"
] | null | null | null |
pyequion2/activity/pitzer.py
|
pyequion/pyequion
|
733cf1c59b5a63f7346d4cb4c21a9ffd2218a5fc
|
[
"BSD-3-Clause"
] | null | null | null |
pyequion2/activity/pitzer.py
|
pyequion/pyequion
|
733cf1c59b5a63f7346d4cb4c21a9ffd2218a5fc
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*
import os
import pathlib
import re
import functools
import warnings
import numpy as np
try:
from .pitzer_sanity_assertions import make_sanity_assertions
make_sanity_assertions()
from .coo_tensor_ops import coo_tensor_ops
from . import py_coo_tensor_ops
except (ImportError, AssertionError): #Some import error. Use pythonized way
warnings.warn("Problem with Cython import. Using pure python operation.")
from . import py_coo_tensor_ops as coo_tensor_ops
from .. import utils
from .. import constants
from .. import datamods
def setup_pitzer(solutes, calculate_osmotic_coefficient=False):
property_dict = make_pitzer_dictionary()
B0, B0_inds = make_parameter_matrix(solutes, 'B0', property_dict)
B1, B1_inds = make_parameter_matrix(solutes, 'B1', property_dict)
B2, B2_inds = make_parameter_matrix(solutes, 'B2', property_dict)
C0, C0_inds = make_parameter_matrix(solutes, 'C0', property_dict)
THETA, THETA_inds = make_parameter_matrix(solutes, 'THETA', property_dict)
LAMBDA, LAMBDA_inds = make_parameter_matrix(
solutes, 'LAMDA', property_dict)
PSI, PSI_inds = make_parameter_3_tensor(solutes, 'PSI', property_dict)
zarray = np.array([utils.charge_number(specie) for specie in solutes],
dtype=np.double)
f = functools.partial(loggamma_and_osmotic, zarray=zarray,
calculate_osmotic_coefficient=calculate_osmotic_coefficient,
B0_=B0, B0_inds=B0_inds,
B1_=B1, B1_inds=B1_inds,
B2_=B2, B2_inds=B2_inds,
C0_=C0, C0_inds=C0_inds,
THETA_=THETA, THETA_inds=THETA_inds,
PSI_=PSI, PSI_inds=PSI_inds,
LAMBDA_=LAMBDA, LAMBDA_inds=LAMBDA_inds)
def g(xarray, TK):
# ln(gamma) to log10(gamma)
return constants.LOG10E * f(xarray, TK)
return g
def loggamma_and_osmotic(carray, T, zarray,
calculate_osmotic_coefficient,
B0_, B0_inds,
B1_, B1_inds,
B2_, B2_inds,
C0_, C0_inds,
THETA_, THETA_inds,
PSI_, PSI_inds,
LAMBDA_, LAMBDA_inds):
temp_vector = temperature_vector(T)
B0 = np.sum(temp_vector*B0_, axis=-1)
B1 = np.sum(temp_vector*B1_, axis=-1)
B2 = np.sum(temp_vector*B2_, axis=-1)
C0 = np.sum(temp_vector*C0_, axis=-1)
THETA = np.sum(temp_vector*THETA_, axis=-1)
PSI = np.sum(temp_vector*PSI_, axis=-1)
LAMBDA = np.sum(temp_vector*LAMBDA_, axis=-1)
# We are excluding ZETA here
dim_matrices = np.array([carray.shape[0], carray.shape[0]], dtype=np.intc)
dim_tensors = np.array(
[carray.shape[0], carray.shape[0], carray.shape[0]], dtype=np.intc)
if carray.dtype != np.double:
carray = carray.astype(np.double)
if zarray.dtype != np.double:
zarray = zarray.astype(np.double)
I = 0.5*np.sum(carray*zarray**2)
sqrtI = np.sqrt(I)
Z = np.sum(carray*np.abs(zarray))
A = A_debye(T)
# (1,1),(2,1) < 4, (2,2) = 4, (3,2),(4,2) > 4
valence_prod_1 = -1*zarray[B1_inds[:, 0]]*zarray[B1_inds[:, 1]]
valence_prod_2 = -1*zarray[B2_inds[:, 0]]*zarray[B2_inds[:, 1]]
alpha1 = 2.0*(valence_prod_1 != 4) + 1.4*(valence_prod_1 == 4)
alpha2 = 12.0*(valence_prod_2 <= 4) + 50.0*(valence_prod_2 > 4)
x_mn = 6*A*zarray[THETA_inds[:, 0]]*zarray[THETA_inds[:, 1]]*sqrtI
x_mm = 6*A*zarray[THETA_inds[:, 0]]*zarray[THETA_inds[:, 0]]*sqrtI
x_nn = 6*A*zarray[THETA_inds[:, 1]]*zarray[THETA_inds[:, 1]]*sqrtI
J = jtheta(x_mn) - 0.5*jtheta(x_mm) - 0.5*jtheta(x_nn)
J_prime = (x_mn*jprime(x_mn) - 0.5*x_mm *
jprime(x_mm) - 0.5*x_nn*jprime(x_nn))/(2*I)
K1_theta = zarray[THETA_inds[:, 0]]*zarray[THETA_inds[:, 1]]/(4*I)
K2_theta = K1_theta/I
THETA_e = K1_theta*J
PHI = THETA + THETA_e
PHI_prime = K1_theta*J_prime - K2_theta*J # THETA_e_prime
C = C0/(2*np.sqrt(-1*zarray[C0_inds[:, 0]]*zarray[C0_inds[:, 1]]))
F_1 = A*f_debye(sqrtI)
F_21 = 0.5*coo_tensor_ops.coo_matrix_vector_vector(
B1*gprime(alpha1*sqrtI)/I, B1_inds, dim_matrices, carray, carray)
F_22 = 0.5*coo_tensor_ops.coo_matrix_vector_vector(
B2*gprime(alpha2*sqrtI)/I, B2_inds, dim_matrices, carray, carray)
F_31 = 0.5*coo_tensor_ops.coo_matrix_vector_vector(
PHI_prime, THETA_inds, dim_matrices, carray, carray)
F = F_1 + F_21 + F_22 + F_31
res1 = zarray**2*F
sum_11 = 2*coo_tensor_ops.coo_matrix_vector(
B0, B0_inds, dim_matrices, carray)
sum_12 = 2*coo_tensor_ops.coo_matrix_vector(
B1*gb(alpha1*sqrtI), B1_inds, dim_matrices, carray)
sum_13 = 2*coo_tensor_ops.coo_matrix_vector(
B2*gb(alpha2*sqrtI), B2_inds, dim_matrices, carray)
sum_21 = Z*coo_tensor_ops.coo_matrix_vector(
C, C0_inds, dim_matrices, carray)
sum_22 = 0.5*np.abs(zarray) *\
coo_tensor_ops.coo_matrix_vector_vector(
C, C0_inds, dim_matrices, carray, carray)
res2 = sum_11 + sum_12 + sum_13 + sum_21 + sum_22
sum_31 = 2*coo_tensor_ops.coo_matrix_vector(
PHI, THETA_inds, dim_matrices, carray)
sum_32 = 0.5*coo_tensor_ops.coo_tensor_vector_vector(
PSI, PSI_inds, dim_tensors, carray, carray)
res3 = sum_31 + sum_32
sum_41 = 2*coo_tensor_ops.coo_matrix_vector(
LAMBDA, LAMBDA_inds, dim_matrices, carray)
res4 = sum_41
logg = res1 + res2 + res3 + res4 # res1 + res2 + res3 + res4
#B + I*Bprime
#B = B0 + B1*g(alpha1*sqrtI) + B2*g(alpha*sqrtI)
#Bprime = (B1*gprime(alpha1*sqrtI) + B2*gprime(alpha2*sqrtI))/I
# B + I*Bprime = B0 + B1*(g(alpha1*sqrtI) + gprime(alpha1*sqrtI))
# + B2*(g(alpha2*sqrtI) + gprime(alpha2*sqrtI))
# Water activity
if not calculate_osmotic_coefficient:
osmotic_coefficient = 1.0
else:
res1w = -A*sqrtI**3/(1 + constants.B_DEBYE*sqrtI)
sum_11w = 0.5*coo_tensor_ops.coo_matrix_vector_vector(
B0, B0_inds, dim_matrices, carray, carray)
sum_12w = 0.5*coo_tensor_ops.coo_matrix_vector_vector(
B1*(gb(alpha1*sqrtI) + gprime(alpha1*sqrtI)), B0_inds, dim_matrices, carray, carray)
sum_13w = 0.5*coo_tensor_ops.coo_matrix_vector_vector(
B2*(gb(alpha2*sqrtI) + gprime(alpha2*sqrtI)), B0_inds, dim_matrices, carray, carray)
sum_21w = 0.5*Z*coo_tensor_ops.coo_matrix_vector_vector(
C, C0_inds, dim_matrices, carray, carray)
res2w = sum_11w + sum_12w + sum_13w + sum_21w
sum_31w = 0.5*coo_tensor_ops.coo_matrix_vector_vector(
PHI + I*PHI_prime, THETA_inds, dim_matrices, carray, carray)
sum_32w = 1/6*coo_tensor_ops.coo_tensor_vector_vector_vector(
PSI, PSI_inds, dim_tensors, carray, carray, carray)
res3w = sum_31w + sum_32w
sum_41 = 0.5*coo_tensor_ops.coo_matrix_vector_vector(
LAMBDA, LAMBDA_inds, dim_matrices, carray, carray)
res4w = sum_41
resw = 2/np.sum(carray)*(res1w + res2w + res3w + res4w)
osmotic_coefficient = (resw + 1)
logg = np.insert(logg, 0, osmotic_coefficient)
return logg
def A_debye(T):
Na = 6.0232e23
ee = 4.8029e-10
k = 1.38045e-16
ds = -0.0004 * T + 1.1188
eer = 305.7 * np.exp(-np.exp(-12.741 + 0.01875 * T) - T / 219.0)
Aphi = 1.0/3.0*(2.0 * np.pi * Na * ds / 1000) ** 0.5 * \
(ee / (eer * k * T) ** 0.5) ** 3.0
return Aphi
def gprime(x):
return -2*(1-(1+x+x**2/2)*np.exp(-x))/(x**2)
def jprime(x):
a, b, c, d = 4.581, 0.7237, 0.0120, 0.528
return ((4+a/(x**b)*np.exp(c*x**d)) -
(x**(1-2*b)*a*np.exp(c*x**d)*(c*d*x**(b+d-1)-b*x**(b-1)))) / \
((4+a*np.exp(c*x**d)/(x**b))**2)
def gb(x):
return 2*(1-(1+x)*np.exp(-x))/(x**2)
def jtheta(x):
return x/(4 + 4.581/(x**(0.7237))*np.exp(0.0120*x**(0.528)))
def f_debye(sqrtI):
res = -(sqrtI/(1+constants.B_DEBYE*sqrtI) +
2/constants.B_DEBYE*np.log(1 + constants.B_DEBYE*sqrtI))
return res
def make_pitzer_dictionary():
# ownpath = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
# filepath = ownpath.parents[0]/'data'/'pitzer.txt'
# with open(filepath, 'r') as file:
# lines = file.read().split('\n')
lines = datamods.pitzer_data.split('\n')
# Excluding (OH) labeled elements (boron elements, essentialy) and PITZER line
# lines = [line for line in lines[1:] if '(OH)' not in line]
# Excluding PITZER line
lines = lines[1:]
lines_processed = [_process_line_pitzer(line) for line in lines]
property_names = []
property_indexes = []
for i, line in enumerate(lines_processed):
if len(line) == 1:
property_names.append(line[0][1:])
property_indexes.append(i)
property_dict = dict()
i_low = 0
i_max = len(property_names) - 1
for j, name in enumerate(property_names):
if j < i_max:
i_high = property_indexes[j+1]
lines_processed_i = lines_processed[i_low+1:i_high]
i_low = i_high
# property_dict[name] = lines_processed_i
else:
lines_processed_i = lines_processed[i_low+1:]
# property_dict[name] = lines_processed_i
property_dict_i = dict()
for line in lines_processed_i:
value = line[-6:]
key = tuple(sorted(line[:-6]))
property_dict_i[key] = value
property_dict[name] = property_dict_i
return property_dict
def temperature_vector(T):
T0 = 298.15 # K
res = np.array([1,
1/T - 1/T0,
np.log(T/T0),
T - T0,
T**2 - T0**2,
1/T**2 - 1/T0**2], dtype=np.double)
return res
def make_parameter_matrix(species, parameter, property_dict):
indexes = []
values = []
for i, specie1 in enumerate(species):
for j, specie2 in enumerate(species):
key = tuple(sorted([specie1, specie2]))
if key in property_dict[parameter]:
res_ij = np.array(property_dict[parameter][key])
if (i == j):
res_ij *= 2
values.append(res_ij)
indexes.append((i, j))
M = np.array(values, dtype=np.double)
M_inds = np.array(indexes, dtype=np.intc)
if M.shape[0] == 0: #Case of an empty matrix
M = M.reshape(0, 6)
M_inds = M_inds.reshape(0, 2)
return M, M_inds
def make_parameter_3_tensor(species, parameter, property_dict):
indexes = []
values = []
for i, specie1 in enumerate(species):
for j, specie2 in enumerate(species):
for k, specie3 in enumerate(species):
key = tuple(sorted([specie1, specie2, specie3]))
if key in property_dict[parameter]:
res_ij = np.array(property_dict[parameter][key])
if (i == j) and (i == k):
res_ij *= 3
elif (i == j) or (i == k) or (j == k):
res_ij *= 2
values.append(res_ij)
indexes.append((i, j, k))
M = np.array(values, dtype=np.double)
M_inds = np.array(indexes, dtype=np.intc)
if M.shape[0] == 0: #Case of an empty matrix
M = M.reshape(0, 6)
M_inds = M_inds.reshape(0, 3)
return M, M_inds
def _find_and_replace_charge_signed(string, sign):
# Cation finding
pattern = r'.*(\%s\d).*'%sign
match = re.search(pattern, string)
if match:
number = int(match.group(1)[-1])
patternsub = r'\%s\d' % sign
new_string = re.sub(patternsub, sign*number, string)
else:
new_string = string
return new_string
def _find_and_replace_charge(string):
string = _find_and_replace_charge_signed(string, '+')
string = _find_and_replace_charge_signed(string, '-')
return string
def _remove_after_hash(linestrings):
for i, string in enumerate(linestrings):
if '#' in string:
return linestrings[:i]
return linestrings
def _process_line_pitzer(line):
linestrings = line.split()
if len(linestrings) == 1: # Parameter name
return linestrings
linestrings = _remove_after_hash(linestrings) # Remove comments
for i, string in enumerate(linestrings):
try: # Should be a float
linestrings[i] = float(string)
except: # Is a element
linestrings[i] = _find_and_replace_charge(string)
max_size = 8 if type(linestrings[2]) == float else 9
if len(linestrings) < max_size:
linestrings = linestrings + [0.0]*(max_size - len(linestrings))
return linestrings
| 37.985294
| 96
| 0.606272
|
4a03564ee37d2ab45462d9be5e2694af85ed863e
| 1,136
|
py
|
Python
|
gerador_planilha_desktop/gui/layout.py
|
jjpaulo2/gerador-planilha
|
c2f6b6482bc72482dc71df7349197856c827487c
|
[
"MIT"
] | 1
|
2021-02-27T16:09:21.000Z
|
2021-02-27T16:09:21.000Z
|
gerador_planilha_desktop/gui/layout.py
|
jjpaulo2/gerador-planilha
|
c2f6b6482bc72482dc71df7349197856c827487c
|
[
"MIT"
] | null | null | null |
gerador_planilha_desktop/gui/layout.py
|
jjpaulo2/gerador-planilha
|
c2f6b6482bc72482dc71df7349197856c827487c
|
[
"MIT"
] | null | null | null |
from typing import Any
import PySimpleGUIQt
def get_layout(styles: Any, sg: PySimpleGUIQt) -> list:
"""
Função que retorna o layout utilizado na janela da aplicação.
Argurments:
styles (Any): módulo/arquivo com os atributos de estilo
sg (PySimpleGUIQt): módulo PySimpleGUIQt
Return:
list: matriz com o layout do PySimpleGUI
"""
return [
[sg.Text(' Selecione o arquivo de planilha do Excel para análise', **styles.text_style)],
[sg.InputText('C:\\', enable_events=True, **styles.file_name_input_style), sg.FileBrowse(**styles.file_browse_style)],
[sg.Text('')],
[sg.Text(' Selecione o lugar onde o novo arquivo será salvo', **styles.text_style)],
[sg.InputText('C:\\planilha-final.xlsx', enable_events=True, **styles.file_name_output_style), sg.FileSaveAs(**styles.file_save_style)],
[sg.Text(''), sg.Text(''), sg.Button('Gerar planilha', **styles.generate_button_style)],
[sg.Text('')],
[sg.Output()],
[sg.Text('')],
[sg.Text('gerador-planilha-desktop © 2021, desenvolvido por João Paulo Carvalho')]
]
| 43.692308
| 144
| 0.651408
|
4a0356846980fbc15b4ee674723e773a4444b541
| 134,290
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/recoveryservices/v20190513/_inputs.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/recoveryservices/v20190513/_inputs.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/recoveryservices/v20190513/_inputs.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'AzureFileshareProtectedItemArgs',
'AzureFileshareProtectedItemExtendedInfoArgs',
'AzureIaaSVMProtectedItemArgs',
'AzureIaaSVMProtectedItemExtendedInfoArgs',
'AzureSqlProtectedItemArgs',
'AzureSqlProtectedItemExtendedInfoArgs',
'AzureVmWorkloadProtectedItemArgs',
'AzureVmWorkloadProtectedItemExtendedInfoArgs',
'DPMProtectedItemArgs',
'DPMProtectedItemExtendedInfoArgs',
'DiskExclusionPropertiesArgs',
'ExtendedPropertiesArgs',
'GenericProtectedItemArgs',
'MabFileFolderProtectedItemArgs',
'MabFileFolderProtectedItemExtendedInfoArgs',
]
@pulumi.input_type
class AzureFileshareProtectedItemArgs:
def __init__(__self__, *,
backup_management_type: Optional[pulumi.Input[str]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureFileshareProtectedItemExtendedInfoArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
health_status: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
last_backup_status: Optional[pulumi.Input[str]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_type: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[str]] = None,
protection_status: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[str]] = None):
"""
Azure File Share workload-specific backup item.
:param pulumi.Input[str] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[str] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureFileshareProtectedItemExtendedInfoArgs'] extended_info: Additional information with this backup item.
:param pulumi.Input[str] friendly_name: Friendly name of the fileshare represented by this backup item.
:param pulumi.Input[str] health_status: backups running status for this backup item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[str] last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_type: backup item type.
:param pulumi.Input[str] protection_state: Backup state of this backup item.
:param pulumi.Input[str] protection_status: Backup status of this backup item.
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[str] workload_type: Type of workload this item represents.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_type is not None:
pulumi.set(__self__, "protected_item_type", 'AzureFileShareProtectedItem')
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[str]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureFileshareProtectedItemExtendedInfoArgs']]:
"""
Additional information with this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureFileshareProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the fileshare represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[pulumi.Input[str]]:
"""
backups running status for this backup item.
"""
return pulumi.get(self, "health_status")
@health_status.setter
def health_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_status", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[str]]:
"""
Last backup operation status. Possible values: Healthy, Unhealthy.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> Optional[pulumi.Input[str]]:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[str]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[pulumi.Input[str]]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@protection_status.setter
def protection_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_status", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class AzureFileshareProtectedItemExtendedInfoArgs:
def __init__(__self__, *,
oldest_recovery_point: Optional[pulumi.Input[str]] = None,
policy_state: Optional[pulumi.Input[str]] = None,
recovery_point_count: Optional[pulumi.Input[int]] = None):
"""
Additional information about Azure File Share backup item.
:param pulumi.Input[str] oldest_recovery_point: The oldest backup copy available for this item in the service.
:param pulumi.Input[str] policy_state: Indicates consistency of policy object and policy applied to this backup item.
:param pulumi.Input[int] recovery_point_count: Number of available backup copies associated with this backup item.
"""
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if policy_state is not None:
pulumi.set(__self__, "policy_state", policy_state)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
The oldest backup copy available for this item in the service.
"""
return pulumi.get(self, "oldest_recovery_point")
@oldest_recovery_point.setter
def oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oldest_recovery_point", value)
@property
@pulumi.getter(name="policyState")
def policy_state(self) -> Optional[pulumi.Input[str]]:
"""
Indicates consistency of policy object and policy applied to this backup item.
"""
return pulumi.get(self, "policy_state")
@policy_state.setter
def policy_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_state", value)
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of available backup copies associated with this backup item.
"""
return pulumi.get(self, "recovery_point_count")
@recovery_point_count.setter
def recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "recovery_point_count", value)
@pulumi.input_type
class AzureIaaSVMProtectedItemArgs:
def __init__(__self__, *,
backup_management_type: Optional[pulumi.Input[str]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs']] = None,
extended_properties: Optional[pulumi.Input['ExtendedPropertiesArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
health_status: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
last_backup_status: Optional[pulumi.Input[str]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_data_id: Optional[pulumi.Input[str]] = None,
protected_item_type: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[str]] = None,
protection_status: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
virtual_machine_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[str]] = None):
"""
IaaS VM workload-specific backup item.
:param pulumi.Input[str] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[str] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs'] extended_info: Additional information for this backup item.
:param pulumi.Input['ExtendedPropertiesArgs'] extended_properties: Extended Properties for Azure IaasVM Backup.
:param pulumi.Input[str] friendly_name: Friendly name of the VM represented by this backup item.
:param pulumi.Input[str] health_status: Health status of protected item
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[str] last_backup_status: Last backup operation status.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_data_id: Data ID of the protected item.
:param pulumi.Input[str] protected_item_type: backup item type.
:param pulumi.Input[str] protection_state: Backup state of this backup item.
:param pulumi.Input[str] protection_status: Backup status of this backup item.
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[str] virtual_machine_id: Fully qualified ARM ID of the virtual machine represented by this item.
:param pulumi.Input[str] workload_type: Type of workload this item represents.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if extended_properties is not None:
pulumi.set(__self__, "extended_properties", extended_properties)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_id is not None:
pulumi.set(__self__, "protected_item_data_id", protected_item_data_id)
if protected_item_type is not None:
pulumi.set(__self__, "protected_item_type", 'AzureIaaSVMProtectedItem')
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if virtual_machine_id is not None:
pulumi.set(__self__, "virtual_machine_id", virtual_machine_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[str]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs']]:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="extendedProperties")
def extended_properties(self) -> Optional[pulumi.Input['ExtendedPropertiesArgs']]:
"""
Extended Properties for Azure IaasVM Backup.
"""
return pulumi.get(self, "extended_properties")
@extended_properties.setter
def extended_properties(self, value: Optional[pulumi.Input['ExtendedPropertiesArgs']]):
pulumi.set(self, "extended_properties", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the VM represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[pulumi.Input[str]]:
"""
Health status of protected item
"""
return pulumi.get(self, "health_status")
@health_status.setter
def health_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_status", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[str]]:
"""
Last backup operation status.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemDataId")
def protected_item_data_id(self) -> Optional[pulumi.Input[str]]:
"""
Data ID of the protected item.
"""
return pulumi.get(self, "protected_item_data_id")
@protected_item_data_id.setter
def protected_item_data_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_data_id", value)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> Optional[pulumi.Input[str]]:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[str]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[pulumi.Input[str]]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@protection_status.setter
def protection_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_status", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="virtualMachineId")
def virtual_machine_id(self) -> Optional[pulumi.Input[str]]:
"""
Fully qualified ARM ID of the virtual machine represented by this item.
"""
return pulumi.get(self, "virtual_machine_id")
@virtual_machine_id.setter
def virtual_machine_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_machine_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class AzureIaaSVMProtectedItemExtendedInfoArgs:
def __init__(__self__, *,
oldest_recovery_point: Optional[pulumi.Input[str]] = None,
policy_inconsistent: Optional[pulumi.Input[bool]] = None,
recovery_point_count: Optional[pulumi.Input[int]] = None):
"""
Additional information on Azure IaaS VM specific backup item.
:param pulumi.Input[str] oldest_recovery_point: The oldest backup copy available for this backup item.
:param pulumi.Input[bool] policy_inconsistent: Specifies if backup policy associated with the backup item is inconsistent.
:param pulumi.Input[int] recovery_point_count: Number of backup copies available for this backup item.
"""
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if policy_inconsistent is not None:
pulumi.set(__self__, "policy_inconsistent", policy_inconsistent)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
The oldest backup copy available for this backup item.
"""
return pulumi.get(self, "oldest_recovery_point")
@oldest_recovery_point.setter
def oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oldest_recovery_point", value)
@property
@pulumi.getter(name="policyInconsistent")
def policy_inconsistent(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if backup policy associated with the backup item is inconsistent.
"""
return pulumi.get(self, "policy_inconsistent")
@policy_inconsistent.setter
def policy_inconsistent(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "policy_inconsistent", value)
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of backup copies available for this backup item.
"""
return pulumi.get(self, "recovery_point_count")
@recovery_point_count.setter
def recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "recovery_point_count", value)
@pulumi.input_type
class AzureSqlProtectedItemArgs:
def __init__(__self__, *,
backup_management_type: Optional[pulumi.Input[str]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureSqlProtectedItemExtendedInfoArgs']] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_data_id: Optional[pulumi.Input[str]] = None,
protected_item_type: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[str]] = None):
"""
Azure SQL workload-specific backup item.
:param pulumi.Input[str] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[str] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureSqlProtectedItemExtendedInfoArgs'] extended_info: Additional information for this backup item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_data_id: Internal ID of a backup item. Used by Azure SQL Backup engine to contact Recovery Services.
:param pulumi.Input[str] protected_item_type: backup item type.
:param pulumi.Input[str] protection_state: Backup state of the backed up item.
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[str] workload_type: Type of workload this item represents.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_id is not None:
pulumi.set(__self__, "protected_item_data_id", protected_item_data_id)
if protected_item_type is not None:
pulumi.set(__self__, "protected_item_type", 'Microsoft.Sql/servers/databases')
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[str]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureSqlProtectedItemExtendedInfoArgs']]:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureSqlProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemDataId")
def protected_item_data_id(self) -> Optional[pulumi.Input[str]]:
"""
Internal ID of a backup item. Used by Azure SQL Backup engine to contact Recovery Services.
"""
return pulumi.get(self, "protected_item_data_id")
@protected_item_data_id.setter
def protected_item_data_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_data_id", value)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> Optional[pulumi.Input[str]]:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[str]]:
"""
Backup state of the backed up item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class AzureSqlProtectedItemExtendedInfoArgs:
def __init__(__self__, *,
oldest_recovery_point: Optional[pulumi.Input[str]] = None,
policy_state: Optional[pulumi.Input[str]] = None,
recovery_point_count: Optional[pulumi.Input[int]] = None):
"""
Additional information on Azure Sql specific protected item.
:param pulumi.Input[str] oldest_recovery_point: The oldest backup copy available for this item in the service.
:param pulumi.Input[str] policy_state: State of the backup policy associated with this backup item.
:param pulumi.Input[int] recovery_point_count: Number of available backup copies associated with this backup item.
"""
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if policy_state is not None:
pulumi.set(__self__, "policy_state", policy_state)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
The oldest backup copy available for this item in the service.
"""
return pulumi.get(self, "oldest_recovery_point")
@oldest_recovery_point.setter
def oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oldest_recovery_point", value)
@property
@pulumi.getter(name="policyState")
def policy_state(self) -> Optional[pulumi.Input[str]]:
"""
State of the backup policy associated with this backup item.
"""
return pulumi.get(self, "policy_state")
@policy_state.setter
def policy_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_state", value)
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of available backup copies associated with this backup item.
"""
return pulumi.get(self, "recovery_point_count")
@recovery_point_count.setter
def recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "recovery_point_count", value)
@pulumi.input_type
class AzureVmWorkloadProtectedItemArgs:
def __init__(__self__, *,
backup_management_type: Optional[pulumi.Input[str]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
last_backup_status: Optional[pulumi.Input[str]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
parent_name: Optional[pulumi.Input[str]] = None,
parent_type: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_data_source_id: Optional[pulumi.Input[str]] = None,
protected_item_health_status: Optional[pulumi.Input[str]] = None,
protected_item_type: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[str]] = None,
protection_status: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[str]] = None):
"""
Azure VM workload-specific protected item.
:param pulumi.Input[str] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[str] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs'] extended_info: Additional information for this backup item.
:param pulumi.Input[str] friendly_name: Friendly name of the DB represented by this backup item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[str] last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] parent_name: Parent name of the DB such as Instance or Availability Group.
:param pulumi.Input[str] parent_type: Parent type of protected item, example: for a DB, standalone server or distributed
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_data_source_id: Data ID of the protected item.
:param pulumi.Input[str] protected_item_health_status: Health status of the backup item, evaluated based on last heartbeat received
:param pulumi.Input[str] protected_item_type: backup item type.
:param pulumi.Input[str] protection_state: Backup state of this backup item.
:param pulumi.Input[str] protection_status: Backup status of this backup item.
:param pulumi.Input[str] server_name: Host/Cluster Name for instance or AG
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[str] workload_type: Type of workload this item represents.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if parent_name is not None:
pulumi.set(__self__, "parent_name", parent_name)
if parent_type is not None:
pulumi.set(__self__, "parent_type", parent_type)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_source_id is not None:
pulumi.set(__self__, "protected_item_data_source_id", protected_item_data_source_id)
if protected_item_health_status is not None:
pulumi.set(__self__, "protected_item_health_status", protected_item_health_status)
if protected_item_type is not None:
pulumi.set(__self__, "protected_item_type", 'AzureVmWorkloadProtectedItem')
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if server_name is not None:
pulumi.set(__self__, "server_name", server_name)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[str]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']]:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the DB represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[str]]:
"""
Last backup operation status. Possible values: Healthy, Unhealthy.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="parentName")
def parent_name(self) -> Optional[pulumi.Input[str]]:
"""
Parent name of the DB such as Instance or Availability Group.
"""
return pulumi.get(self, "parent_name")
@parent_name.setter
def parent_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_name", value)
@property
@pulumi.getter(name="parentType")
def parent_type(self) -> Optional[pulumi.Input[str]]:
"""
Parent type of protected item, example: for a DB, standalone server or distributed
"""
return pulumi.get(self, "parent_type")
@parent_type.setter
def parent_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_type", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemDataSourceId")
def protected_item_data_source_id(self) -> Optional[pulumi.Input[str]]:
"""
Data ID of the protected item.
"""
return pulumi.get(self, "protected_item_data_source_id")
@protected_item_data_source_id.setter
def protected_item_data_source_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_data_source_id", value)
@property
@pulumi.getter(name="protectedItemHealthStatus")
def protected_item_health_status(self) -> Optional[pulumi.Input[str]]:
"""
Health status of the backup item, evaluated based on last heartbeat received
"""
return pulumi.get(self, "protected_item_health_status")
@protected_item_health_status.setter
def protected_item_health_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_health_status", value)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> Optional[pulumi.Input[str]]:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[str]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[pulumi.Input[str]]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@protection_status.setter
def protection_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_status", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> Optional[pulumi.Input[str]]:
"""
Host/Cluster Name for instance or AG
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class AzureVmWorkloadProtectedItemExtendedInfoArgs:
def __init__(__self__, *,
oldest_recovery_point: Optional[pulumi.Input[str]] = None,
policy_state: Optional[pulumi.Input[str]] = None,
recovery_point_count: Optional[pulumi.Input[int]] = None):
"""
Additional information on Azure Workload for SQL specific backup item.
:param pulumi.Input[str] oldest_recovery_point: The oldest backup copy available for this backup item.
:param pulumi.Input[str] policy_state: Indicates consistency of policy object and policy applied to this backup item.
:param pulumi.Input[int] recovery_point_count: Number of backup copies available for this backup item.
"""
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if policy_state is not None:
pulumi.set(__self__, "policy_state", policy_state)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
The oldest backup copy available for this backup item.
"""
return pulumi.get(self, "oldest_recovery_point")
@oldest_recovery_point.setter
def oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oldest_recovery_point", value)
@property
@pulumi.getter(name="policyState")
def policy_state(self) -> Optional[pulumi.Input[str]]:
"""
Indicates consistency of policy object and policy applied to this backup item.
"""
return pulumi.get(self, "policy_state")
@policy_state.setter
def policy_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_state", value)
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of backup copies available for this backup item.
"""
return pulumi.get(self, "recovery_point_count")
@recovery_point_count.setter
def recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "recovery_point_count", value)
@pulumi.input_type
class DPMProtectedItemArgs:
def __init__(__self__, *,
backup_engine_name: Optional[pulumi.Input[str]] = None,
backup_management_type: Optional[pulumi.Input[str]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['DPMProtectedItemExtendedInfoArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_type: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[str]] = None):
"""
Additional information on Backup engine specific backup item.
:param pulumi.Input[str] backup_engine_name: Backup Management server protecting this backup item
:param pulumi.Input[str] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[str] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['DPMProtectedItemExtendedInfoArgs'] extended_info: Extended info of the backup item.
:param pulumi.Input[str] friendly_name: Friendly name of the managed item
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_type: backup item type.
:param pulumi.Input[str] protection_state: Protection state of the backup engine
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[str] workload_type: Type of workload this item represents.
"""
if backup_engine_name is not None:
pulumi.set(__self__, "backup_engine_name", backup_engine_name)
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_type is not None:
pulumi.set(__self__, "protected_item_type", 'DPMProtectedItem')
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="backupEngineName")
def backup_engine_name(self) -> Optional[pulumi.Input[str]]:
"""
Backup Management server protecting this backup item
"""
return pulumi.get(self, "backup_engine_name")
@backup_engine_name.setter
def backup_engine_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_engine_name", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[str]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['DPMProtectedItemExtendedInfoArgs']]:
"""
Extended info of the backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['DPMProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the managed item
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> Optional[pulumi.Input[str]]:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[str]]:
"""
Protection state of the backup engine
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class DPMProtectedItemExtendedInfoArgs:
def __init__(__self__, *,
disk_storage_used_in_bytes: Optional[pulumi.Input[str]] = None,
is_collocated: Optional[pulumi.Input[bool]] = None,
is_present_on_cloud: Optional[pulumi.Input[bool]] = None,
last_backup_status: Optional[pulumi.Input[str]] = None,
last_refreshed_at: Optional[pulumi.Input[str]] = None,
oldest_recovery_point: Optional[pulumi.Input[str]] = None,
on_premise_latest_recovery_point: Optional[pulumi.Input[str]] = None,
on_premise_oldest_recovery_point: Optional[pulumi.Input[str]] = None,
on_premise_recovery_point_count: Optional[pulumi.Input[int]] = None,
protectable_object_load_path: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
protected: Optional[pulumi.Input[bool]] = None,
protection_group_name: Optional[pulumi.Input[str]] = None,
recovery_point_count: Optional[pulumi.Input[int]] = None,
total_disk_storage_size_in_bytes: Optional[pulumi.Input[str]] = None):
"""
Additional information of DPM Protected item.
:param pulumi.Input[str] disk_storage_used_in_bytes: Used Disk storage in bytes.
:param pulumi.Input[bool] is_collocated: To check if backup item is collocated.
:param pulumi.Input[bool] is_present_on_cloud: To check if backup item is cloud protected.
:param pulumi.Input[str] last_backup_status: Last backup status information on backup item.
:param pulumi.Input[str] last_refreshed_at: Last refresh time on backup item.
:param pulumi.Input[str] oldest_recovery_point: Oldest cloud recovery point time.
:param pulumi.Input[str] on_premise_latest_recovery_point: latest disk recovery point time.
:param pulumi.Input[str] on_premise_oldest_recovery_point: Oldest disk recovery point time.
:param pulumi.Input[int] on_premise_recovery_point_count: disk recovery point count.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] protectable_object_load_path: Attribute to provide information on various DBs.
:param pulumi.Input[bool] protected: To check if backup item is disk protected.
:param pulumi.Input[str] protection_group_name: Protection group name of the backup item.
:param pulumi.Input[int] recovery_point_count: cloud recovery point count.
:param pulumi.Input[str] total_disk_storage_size_in_bytes: total Disk storage in bytes.
"""
if disk_storage_used_in_bytes is not None:
pulumi.set(__self__, "disk_storage_used_in_bytes", disk_storage_used_in_bytes)
if is_collocated is not None:
pulumi.set(__self__, "is_collocated", is_collocated)
if is_present_on_cloud is not None:
pulumi.set(__self__, "is_present_on_cloud", is_present_on_cloud)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_refreshed_at is not None:
pulumi.set(__self__, "last_refreshed_at", last_refreshed_at)
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if on_premise_latest_recovery_point is not None:
pulumi.set(__self__, "on_premise_latest_recovery_point", on_premise_latest_recovery_point)
if on_premise_oldest_recovery_point is not None:
pulumi.set(__self__, "on_premise_oldest_recovery_point", on_premise_oldest_recovery_point)
if on_premise_recovery_point_count is not None:
pulumi.set(__self__, "on_premise_recovery_point_count", on_premise_recovery_point_count)
if protectable_object_load_path is not None:
pulumi.set(__self__, "protectable_object_load_path", protectable_object_load_path)
if protected is not None:
pulumi.set(__self__, "protected", protected)
if protection_group_name is not None:
pulumi.set(__self__, "protection_group_name", protection_group_name)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
if total_disk_storage_size_in_bytes is not None:
pulumi.set(__self__, "total_disk_storage_size_in_bytes", total_disk_storage_size_in_bytes)
@property
@pulumi.getter(name="diskStorageUsedInBytes")
def disk_storage_used_in_bytes(self) -> Optional[pulumi.Input[str]]:
"""
Used Disk storage in bytes.
"""
return pulumi.get(self, "disk_storage_used_in_bytes")
@disk_storage_used_in_bytes.setter
def disk_storage_used_in_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_storage_used_in_bytes", value)
@property
@pulumi.getter(name="isCollocated")
def is_collocated(self) -> Optional[pulumi.Input[bool]]:
"""
To check if backup item is collocated.
"""
return pulumi.get(self, "is_collocated")
@is_collocated.setter
def is_collocated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_collocated", value)
@property
@pulumi.getter(name="isPresentOnCloud")
def is_present_on_cloud(self) -> Optional[pulumi.Input[bool]]:
"""
To check if backup item is cloud protected.
"""
return pulumi.get(self, "is_present_on_cloud")
@is_present_on_cloud.setter
def is_present_on_cloud(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_present_on_cloud", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[str]]:
"""
Last backup status information on backup item.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastRefreshedAt")
def last_refreshed_at(self) -> Optional[pulumi.Input[str]]:
"""
Last refresh time on backup item.
"""
return pulumi.get(self, "last_refreshed_at")
@last_refreshed_at.setter
def last_refreshed_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_refreshed_at", value)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Oldest cloud recovery point time.
"""
return pulumi.get(self, "oldest_recovery_point")
@oldest_recovery_point.setter
def oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oldest_recovery_point", value)
@property
@pulumi.getter(name="onPremiseLatestRecoveryPoint")
def on_premise_latest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
latest disk recovery point time.
"""
return pulumi.get(self, "on_premise_latest_recovery_point")
@on_premise_latest_recovery_point.setter
def on_premise_latest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_premise_latest_recovery_point", value)
@property
@pulumi.getter(name="onPremiseOldestRecoveryPoint")
def on_premise_oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Oldest disk recovery point time.
"""
return pulumi.get(self, "on_premise_oldest_recovery_point")
@on_premise_oldest_recovery_point.setter
def on_premise_oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_premise_oldest_recovery_point", value)
@property
@pulumi.getter(name="onPremiseRecoveryPointCount")
def on_premise_recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
disk recovery point count.
"""
return pulumi.get(self, "on_premise_recovery_point_count")
@on_premise_recovery_point_count.setter
def on_premise_recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "on_premise_recovery_point_count", value)
@property
@pulumi.getter(name="protectableObjectLoadPath")
def protectable_object_load_path(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Attribute to provide information on various DBs.
"""
return pulumi.get(self, "protectable_object_load_path")
@protectable_object_load_path.setter
def protectable_object_load_path(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "protectable_object_load_path", value)
@property
@pulumi.getter
def protected(self) -> Optional[pulumi.Input[bool]]:
"""
To check if backup item is disk protected.
"""
return pulumi.get(self, "protected")
@protected.setter
def protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "protected", value)
@property
@pulumi.getter(name="protectionGroupName")
def protection_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Protection group name of the backup item.
"""
return pulumi.get(self, "protection_group_name")
@protection_group_name.setter
def protection_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_group_name", value)
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
cloud recovery point count.
"""
return pulumi.get(self, "recovery_point_count")
@recovery_point_count.setter
def recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "recovery_point_count", value)
@property
@pulumi.getter(name="totalDiskStorageSizeInBytes")
def total_disk_storage_size_in_bytes(self) -> Optional[pulumi.Input[str]]:
"""
total Disk storage in bytes.
"""
return pulumi.get(self, "total_disk_storage_size_in_bytes")
@total_disk_storage_size_in_bytes.setter
def total_disk_storage_size_in_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "total_disk_storage_size_in_bytes", value)
@pulumi.input_type
class DiskExclusionPropertiesArgs:
def __init__(__self__, *,
disk_lun_list: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
is_inclusion_list: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] disk_lun_list: List of Disks' Logical Unit Numbers (LUN) to be used for VM Protection.
:param pulumi.Input[bool] is_inclusion_list: Flag to indicate whether DiskLunList is to be included/ excluded from backup.
"""
if disk_lun_list is not None:
pulumi.set(__self__, "disk_lun_list", disk_lun_list)
if is_inclusion_list is not None:
pulumi.set(__self__, "is_inclusion_list", is_inclusion_list)
@property
@pulumi.getter(name="diskLunList")
def disk_lun_list(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
List of Disks' Logical Unit Numbers (LUN) to be used for VM Protection.
"""
return pulumi.get(self, "disk_lun_list")
@disk_lun_list.setter
def disk_lun_list(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "disk_lun_list", value)
@property
@pulumi.getter(name="isInclusionList")
def is_inclusion_list(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to indicate whether DiskLunList is to be included/ excluded from backup.
"""
return pulumi.get(self, "is_inclusion_list")
@is_inclusion_list.setter
def is_inclusion_list(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_inclusion_list", value)
@pulumi.input_type
class ExtendedPropertiesArgs:
def __init__(__self__, *,
disk_exclusion_properties: Optional[pulumi.Input['DiskExclusionPropertiesArgs']] = None):
"""
Extended Properties for Azure IaasVM Backup.
:param pulumi.Input['DiskExclusionPropertiesArgs'] disk_exclusion_properties: Extended Properties for Disk Exclusion.
"""
if disk_exclusion_properties is not None:
pulumi.set(__self__, "disk_exclusion_properties", disk_exclusion_properties)
@property
@pulumi.getter(name="diskExclusionProperties")
def disk_exclusion_properties(self) -> Optional[pulumi.Input['DiskExclusionPropertiesArgs']]:
"""
Extended Properties for Disk Exclusion.
"""
return pulumi.get(self, "disk_exclusion_properties")
@disk_exclusion_properties.setter
def disk_exclusion_properties(self, value: Optional[pulumi.Input['DiskExclusionPropertiesArgs']]):
pulumi.set(self, "disk_exclusion_properties", value)
@pulumi.input_type
class GenericProtectedItemArgs:
def __init__(__self__, *,
backup_management_type: Optional[pulumi.Input[str]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
fabric_name: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
policy_state: Optional[pulumi.Input[str]] = None,
protected_item_id: Optional[pulumi.Input[int]] = None,
protected_item_type: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[str]] = None,
source_associations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[str]] = None):
"""
Base class for backup items.
:param pulumi.Input[str] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[str] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input[str] fabric_name: Name of this backup item's fabric.
:param pulumi.Input[str] friendly_name: Friendly name of the container.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] policy_state: Indicates consistency of policy object and policy applied to this backup item.
:param pulumi.Input[int] protected_item_id: Data Plane Service ID of the protected item.
:param pulumi.Input[str] protected_item_type: backup item type.
:param pulumi.Input[str] protection_state: Backup state of this backup item.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] source_associations: Loosely coupled (type, value) associations (example - parent of a protected item)
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[str] workload_type: Type of workload this item represents.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if fabric_name is not None:
pulumi.set(__self__, "fabric_name", fabric_name)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if policy_state is not None:
pulumi.set(__self__, "policy_state", policy_state)
if protected_item_id is not None:
pulumi.set(__self__, "protected_item_id", protected_item_id)
if protected_item_type is not None:
pulumi.set(__self__, "protected_item_type", 'GenericProtectedItem')
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_associations is not None:
pulumi.set(__self__, "source_associations", source_associations)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[str]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="fabricName")
def fabric_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of this backup item's fabric.
"""
return pulumi.get(self, "fabric_name")
@fabric_name.setter
def fabric_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fabric_name", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the container.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="policyState")
def policy_state(self) -> Optional[pulumi.Input[str]]:
"""
Indicates consistency of policy object and policy applied to this backup item.
"""
return pulumi.get(self, "policy_state")
@policy_state.setter
def policy_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_state", value)
@property
@pulumi.getter(name="protectedItemId")
def protected_item_id(self) -> Optional[pulumi.Input[int]]:
"""
Data Plane Service ID of the protected item.
"""
return pulumi.get(self, "protected_item_id")
@protected_item_id.setter
def protected_item_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "protected_item_id", value)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> Optional[pulumi.Input[str]]:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[str]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="sourceAssociations")
def source_associations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Loosely coupled (type, value) associations (example - parent of a protected item)
"""
return pulumi.get(self, "source_associations")
@source_associations.setter
def source_associations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "source_associations", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class MabFileFolderProtectedItemArgs:
def __init__(__self__, *,
backup_management_type: Optional[pulumi.Input[str]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
computer_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
deferred_delete_sync_time_in_utc: Optional[pulumi.Input[int]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['MabFileFolderProtectedItemExtendedInfoArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
last_backup_status: Optional[pulumi.Input[str]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_type: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[str]] = None):
"""
MAB workload-specific backup item.
:param pulumi.Input[str] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] computer_name: Name of the computer associated with this backup item.
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[str] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[int] deferred_delete_sync_time_in_utc: Sync time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['MabFileFolderProtectedItemExtendedInfoArgs'] extended_info: Additional information with this backup item.
:param pulumi.Input[str] friendly_name: Friendly name of this backup item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[str] last_backup_status: Status of last backup operation.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_type: backup item type.
:param pulumi.Input[str] protection_state: Protected, ProtectionStopped, IRPending or ProtectionError
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[str] workload_type: Type of workload this item represents.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if computer_name is not None:
pulumi.set(__self__, "computer_name", computer_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_sync_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_sync_time_in_utc", deferred_delete_sync_time_in_utc)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_type is not None:
pulumi.set(__self__, "protected_item_type", 'MabFileFolderProtectedItem')
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="computerName")
def computer_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the computer associated with this backup item.
"""
return pulumi.get(self, "computer_name")
@computer_name.setter
def computer_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "computer_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[str]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteSyncTimeInUTC")
def deferred_delete_sync_time_in_utc(self) -> Optional[pulumi.Input[int]]:
"""
Sync time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_sync_time_in_utc")
@deferred_delete_sync_time_in_utc.setter
def deferred_delete_sync_time_in_utc(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "deferred_delete_sync_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['MabFileFolderProtectedItemExtendedInfoArgs']]:
"""
Additional information with this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['MabFileFolderProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[str]]:
"""
Status of last backup operation.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> Optional[pulumi.Input[str]]:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[str]]:
"""
Protected, ProtectionStopped, IRPending or ProtectionError
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class MabFileFolderProtectedItemExtendedInfoArgs:
def __init__(__self__, *,
last_refreshed_at: Optional[pulumi.Input[str]] = None,
oldest_recovery_point: Optional[pulumi.Input[str]] = None,
recovery_point_count: Optional[pulumi.Input[int]] = None):
"""
Additional information on the backed up item.
:param pulumi.Input[str] last_refreshed_at: Last time when the agent data synced to service.
:param pulumi.Input[str] oldest_recovery_point: The oldest backup copy available.
:param pulumi.Input[int] recovery_point_count: Number of backup copies associated with the backup item.
"""
if last_refreshed_at is not None:
pulumi.set(__self__, "last_refreshed_at", last_refreshed_at)
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="lastRefreshedAt")
def last_refreshed_at(self) -> Optional[pulumi.Input[str]]:
"""
Last time when the agent data synced to service.
"""
return pulumi.get(self, "last_refreshed_at")
@last_refreshed_at.setter
def last_refreshed_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_refreshed_at", value)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
The oldest backup copy available.
"""
return pulumi.get(self, "oldest_recovery_point")
@oldest_recovery_point.setter
def oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oldest_recovery_point", value)
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of backup copies associated with the backup item.
"""
return pulumi.get(self, "recovery_point_count")
@recovery_point_count.setter
def recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "recovery_point_count", value)
| 44.823097
| 163
| 0.684846
|
4a0356b3f10dd48dad887988753eded83dd2e953
| 1,115
|
py
|
Python
|
tests/test_orcid2vivo.py
|
gwu-libraries/orcid2vivo
|
14c4c8ebb828d862261324a13616aad1f2f0c721
|
[
"MIT"
] | 12
|
2015-04-23T19:09:03.000Z
|
2019-12-02T18:49:41.000Z
|
tests/test_orcid2vivo.py
|
gwu-libraries/orcid2vivo
|
14c4c8ebb828d862261324a13616aad1f2f0c721
|
[
"MIT"
] | 26
|
2015-05-01T13:51:54.000Z
|
2015-11-05T16:36:49.000Z
|
tests/test_orcid2vivo.py
|
gwu-libraries/orcid2vivo
|
14c4c8ebb828d862261324a13616aad1f2f0c721
|
[
"MIT"
] | 11
|
2015-05-01T21:53:24.000Z
|
2020-12-10T15:38:48.000Z
|
from unittest import TestCase
from rdflib import Graph, URIRef, RDF, OWL
import orcid2vivo_app.vivo_namespace as ns
from orcid2vivo import PersonCrosswalk
from orcid2vivo_app.vivo_namespace import VIVO
class TestPersonCrosswalk(TestCase):
def setUp(self):
self.graph = Graph(namespace_manager=ns.ns_manager)
self.person_uri = ns.D["test"]
self.orcid_id = "0000-0003-1527-0030"
self.orcid_id_uriref = URIRef("http://orcid.org/{}".format(self.orcid_id))
def test_add_orcid_id(self):
PersonCrosswalk._add_orcid_id(self.person_uri, self.orcid_id, self.graph, False)
self.assertEqual(2, len(self.graph))
self.assertTrue((self.person_uri, VIVO.orcidId, self.orcid_id_uriref) in self.graph)
self.assertTrue((self.orcid_id_uriref, RDF.type, OWL.Thing) in self.graph)
def test_add_orcid_id_confirmed(self):
PersonCrosswalk._add_orcid_id(self.person_uri, self.orcid_id, self.graph, True)
self.assertEqual(3, len(self.graph))
self.assertTrue((self.orcid_id_uriref, VIVO.confirmedOrcidId, self.person_uri) in self.graph)
| 41.296296
| 101
| 0.735426
|
4a0357f7d9e7584b955ae51b10f258c6a485a734
| 3,538
|
py
|
Python
|
aequilibrae/paths/traffic_class.py
|
barisdemirdelen/aequilibrae
|
2599a8309c638a9151facaad775285222b12b1f2
|
[
"MIT"
] | null | null | null |
aequilibrae/paths/traffic_class.py
|
barisdemirdelen/aequilibrae
|
2599a8309c638a9151facaad775285222b12b1f2
|
[
"MIT"
] | null | null | null |
aequilibrae/paths/traffic_class.py
|
barisdemirdelen/aequilibrae
|
2599a8309c638a9151facaad775285222b12b1f2
|
[
"MIT"
] | null | null | null |
from typing import Union
import numpy as np
from aequilibrae.paths.graph import Graph
from aequilibrae.matrix import AequilibraeMatrix
from aequilibrae.paths.results import AssignmentResults
from aequilibrae.starts_logging import logger
class TrafficClass():
"""Traffic class for equilibrium traffic assignment
::
from aequilibrae.paths import TrafficClass
tc = TrafficClass(graph, demand_matrix)
tc.set_pce(1.3)
"""
def __init__(self, name: str, graph: Graph, matrix: AequilibraeMatrix) -> None:
"""
Instantiates the class
Args:
name (:obj:`str`): UNIQUE class name.
graph (:obj:`Graph`): Class/mode-specific graph
matrix (:obj:`AequilibraeMatrix`): Class/mode-specific matrix. Supports multiple user classes
"""
if not np.array_equal(matrix.index, graph.centroids):
raise ValueError("Matrix and graph do not have compatible sets of centroids.")
if matrix.matrix_view.dtype != graph.default_types('float'):
raise TypeError("Matrix's computational view need to be of type np.float64")
self.graph = graph
self.matrix = matrix
self.pce = 1.0
self.vot = 1.0
self.mode = graph.mode
self.class_flow: np.array
self.results = AssignmentResults()
self.results.prepare(self.graph, self.matrix)
self.fixed_cost = np.zeros(graph.graph.shape[0], graph.default_types('float'))
self.fixed_cost_field = ''
self.fc_multiplier = 1.0
self.results.reset()
self._aon_results = AssignmentResults()
self._aon_results.prepare(self.graph, self.matrix)
self.__id__ = name
def set_pce(self, pce: Union[float, int]) -> None:
"""Sets Passenger Car equivalent
Args:
pce (:obj:`Union[float, int]`): PCE. Defaults to 1 if not set
"""
if not isinstance(pce, (float, int)):
raise ValueError('PCE needs to be either integer or float ')
self.pce = pce
def set_fixed_cost(self, field_name: str, multiplier=1):
"""Sets value of time
Args:
field_name (:obj:`str`): Name of the graph field with fixed costs for this class
multiplier (:obj:`Union[float, int]`): Multiplier for the fixed cost. Defaults to 1 if not set
"""
self.fc_multiplier = float(multiplier)
if field_name not in self.graph.graph.columns:
raise ValueError('Field does not exist in the graph')
self.fixed_cost_field = field_name
if np.any(np.isnan(self.graph.graph[field_name].values)):
logger.warning(f'Cost field {field_name} has NaN values. Converted to zero')
if self.graph.graph[field_name].min() < 0:
msg = f'Cost field {field_name} has negative values. That is not allowed'
logger.error(msg)
raise ValueError(msg)
def set_vot(self, value_of_time: float) -> None:
"""Sets value of time
Args:
value_of_time (:obj:`Union[float, int]`): Value of time. Defaults to 1 if not set
"""
self.vot = float(value_of_time)
def __setattr__(self, key, value):
if key not in ['graph', 'matrix', 'pce', 'mode', 'class_flow', 'results',
'_aon_results', '__id__', 'vot', 'fixed_cost', 'fc_multiplier', 'fixed_cost_field']:
raise KeyError('Traffic Class does not have that element')
self.__dict__[key] = value
| 36.102041
| 107
| 0.627473
|
4a0358f780f1e1bc86ec667ce89cbaf11a3f7464
| 449
|
py
|
Python
|
quickpay_api_client/client.py
|
mstendorf/quickpay-python-client
|
22350742859ba534662283d0251100a51d726db0
|
[
"MIT"
] | 7
|
2015-06-27T12:17:13.000Z
|
2020-07-07T19:40:31.000Z
|
quickpay_api_client/client.py
|
mstendorf/quickpay-python-client
|
22350742859ba534662283d0251100a51d726db0
|
[
"MIT"
] | 13
|
2015-06-28T07:11:34.000Z
|
2022-02-15T12:18:06.000Z
|
quickpay_api_client/client.py
|
mstendorf/quickpay-python-client
|
22350742859ba534662283d0251100a51d726db0
|
[
"MIT"
] | 10
|
2015-06-27T11:46:06.000Z
|
2020-01-03T21:07:02.000Z
|
import base64
import json
from functools import partial
from .api import QPApi
class QPClient(object):
METHODS = ['get', 'post', 'put', 'patch', 'delete']
def __init__(self, *args):
self.api = QPApi(*args)
def __getattr__(self, method):
if method in self.METHODS:
return partial(getattr(self.api, 'perform'), method)
else:
raise AttributeError('unsupported http method: %s' % method)
| 23.631579
| 72
| 0.632517
|
4a03597e5747993af28ef1f42b2ef46323135dd6
| 1,083
|
py
|
Python
|
alano/train/loss.py
|
zren96/alano
|
afd412ea37d8d4844cf1c8c9d53ff419b206a878
|
[
"MIT"
] | null | null | null |
alano/train/loss.py
|
zren96/alano
|
afd412ea37d8d4844cf1c8c9d53ff419b206a878
|
[
"MIT"
] | null | null | null |
alano/train/loss.py
|
zren96/alano
|
afd412ea37d8d4844cf1c8c9d53ff419b206a878
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
def huber_loss(output, target, thres=1):
"""
# Huber loss, 1D/2D
Reference: wikipedia
"""
diff = torch.abs(output - target)
b = (diff < thres).float()
loss = b * 0.5 * (diff**2) + (1 - b) * (thres * diff - 0.5 * (thres**2))
# Assign more weight to mininum weights
# for i in [0,2,4,6]:
# loss[:,i] *= 10
# for i in [1,3,5,7]:
# loss[:,i] *= 0
# weight = ((target > 0.5) | (target < -1))*3
# weight = ((target+0.2)**2)+1
# loss *= weight
return torch.mean(loss)
def focal_loss(output, target, alpha=1, gamma=1):
"""
# Focal loss
Reference: https://discuss.pytorch.org/t/is-this-a-correct-implementation-for-focal-loss-in-pytorch/43327/4
"""
BCE_loss = F.binary_cross_entropy_with_logits(output,
target,
reduction='none')
pt = torch.exp(-BCE_loss) # prevents nans when probability 0
loss = alpha * (1 - pt)**gamma * BCE_loss
return loss.mean()
| 27.075
| 108
| 0.540166
|
4a035a476d7dfe479c33c4e3498d4685a9ed5d8b
| 3,868
|
py
|
Python
|
tests/kafkatest/tests/streams/streams_broker_compatibility.py
|
YYTVicky/kafka
|
b0f3eb276fa034b215570cd4f837851d9fb9166a
|
[
"Apache-2.0"
] | 35
|
2016-09-22T22:53:14.000Z
|
2020-02-13T15:12:21.000Z
|
tests/kafkatest/tests/streams/streams_broker_compatibility.py
|
axbaretto/presto
|
f137d2709db42b5c3e4d43a631832a8f74853065
|
[
"Apache-2.0"
] | 15
|
2020-03-05T00:32:48.000Z
|
2022-02-16T00:55:24.000Z
|
tests/kafkatest/tests/streams/streams_broker_compatibility.py
|
axbaretto/presto
|
f137d2709db42b5c3e4d43a631832a8f74853065
|
[
"Apache-2.0"
] | 88
|
2016-11-27T02:16:11.000Z
|
2020-02-28T05:10:26.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import parametrize
from ducktape.tests.test import Test
from kafkatest.services.kafka import KafkaService
from kafkatest.services.streams import StreamsBrokerCompatibilityService
from kafkatest.services.verifiable_consumer import VerifiableConsumer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.version import DEV_BRANCH, LATEST_0_10_1, LATEST_0_10_0, KafkaVersion
class StreamsBrokerCompatibility(Test):
"""
These tests validate that Streams v0.10.2+ can connect to older brokers v0.10+
and that Streams fails fast for pre-0.10 brokers
"""
input = "brokerCompatibilitySourceTopic"
output = "brokerCompatibilitySinkTopic"
def __init__(self, test_context):
super(StreamsBrokerCompatibility, self).__init__(test_context=test_context)
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context,
num_nodes=1,
zk=self.zk,
topics={
self.input: {'partitions': 1, 'replication-factor': 1},
self.output: {'partitions': 1, 'replication-factor': 1}
})
self.processor = StreamsBrokerCompatibilityService(self.test_context, self.kafka)
self.consumer = VerifiableConsumer(test_context,
1,
self.kafka,
self.output,
"stream-broker-compatibility-verify-consumer")
def setUp(self):
self.zk.start()
@parametrize(broker_version=str(DEV_BRANCH))
@parametrize(broker_version=str(LATEST_0_10_1))
def test_compatible_brokers(self, broker_version):
self.kafka.set_version(KafkaVersion(broker_version))
self.kafka.start()
self.processor.start()
self.consumer.start()
self.processor.wait()
num_consumed_mgs = self.consumer.total_consumed()
self.consumer.stop()
self.kafka.stop()
assert num_consumed_mgs == 1, \
"Did expect to read exactly one message but got %d" % num_consumed_mgs
@parametrize(broker_version=str(LATEST_0_10_0))
def test_fail_fast_on_incompatible_brokers(self, broker_version):
self.kafka.set_version(KafkaVersion(broker_version))
self.kafka.start()
self.processor.start()
self.processor.node.account.ssh(self.processor.start_cmd(self.processor.node))
with self.processor.node.account.monitor_log(self.processor.STDERR_FILE) as monitor:
monitor.wait_until('Exception in thread "main" org.apache.kafka.streams.errors.StreamsException: Kafka Streams requires broker version 0.10.1.x or higher.',
timeout_sec=60,
err_msg="Never saw 'incompatible broker' error message " + str(self.processor.node.account))
self.kafka.stop()
| 42.505495
| 168
| 0.664168
|
4a035a7e3cca6ce4bd452ed19a82d026af20aa80
| 3,141
|
py
|
Python
|
scripts/ssc/pairings_visualization/utils_definitions.py
|
MrBellamonte/MT-VAEs-TDA
|
8881b5db607c673fb558f7b74ece27f244b16b77
|
[
"MIT"
] | null | null | null |
scripts/ssc/pairings_visualization/utils_definitions.py
|
MrBellamonte/MT-VAEs-TDA
|
8881b5db607c673fb558f7b74ece27f244b16b77
|
[
"MIT"
] | 1
|
2020-09-22T13:04:58.000Z
|
2020-09-22T13:05:23.000Z
|
scripts/ssc/pairings_visualization/utils_definitions.py
|
MrBellamonte/AEs-VAEs-TDA
|
8881b5db607c673fb558f7b74ece27f244b16b77
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
#PATH_ROOT_SWISSROLL = '/home/simonberg/PycharmProjects/MT-VAEs-TDA/output/SwissRoll_pairings/'
PATH_ROOT_SWISSROLL = '/Users/simons/PycharmProjects/MT-VAEs-TDA/output/SwissRoll_pairings/'
#PATH_ROOT_SWISSROLL = '/Users/simons/polybox/Studium/20FS/MT/plots_/test'
def make_plot(data, pairings, color,name = 'noname', path_root = PATH_ROOT_SWISSROLL, knn = False, dpi = 200, show = False, angle = 5,cmap = plt.cm.Spectral):
ax = plt.gca(projection="3d")
ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=color, s=100, cmap=cmap)
i = 0
if pairings is None:
pass
else:
for pairing in pairings:
if knn:
for ind in pairing:
ax.plot([data[i, 0], data[ind, 0]],
[data[i, 1], data[ind, 1]],
[data[i, 2], data[ind, 2]], color='grey')
else:
ax.plot([data[pairing[0], 0], data[pairing[1], 0]],
[data[pairing[0], 1], data[pairing[1], 1]],
[data[pairing[0], 2], data[pairing[1], 2]], color='grey')
i += 1
ax.view_init(angle, 90)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.zaxis.set_ticklabels([])
ax.margins(0, 0,0)
#plt.axis('scaled')
#find axis range
axis_min = [min(data[:, i]) for i in [0,1,2]]
axis_max = [max(data[:, i]) for i in [0, 1, 2]]
margin = [(axis_max[i] - axis_min[i])*0.05 for i in [0, 1, 2]]
axis_range = [np.array([axis_max[i]-margin[i], axis_max[i]+ margin[i]])for i in [0, 1, 2]]
ax.set_xlim(np.array([axis_min[0]-margin[0], axis_max[0]+ margin[0]]))
ax.set_ylim(np.array([axis_min[1]-margin[1], axis_max[1]+ margin[1]]))
ax.set_zlim(np.array([axis_min[2]-margin[2], axis_max[2]+ margin[2]]))
#ax.axis('equal')
for line in ax.xaxis.get_ticklines():
line.set_visible(False)
for line in ax.yaxis.get_ticklines():
line.set_visible(False)
for line in ax.zaxis.get_ticklines():
line.set_visible(False)
if path_root is not None:
fig = ax.get_figure()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace = 0, hspace = 0)
fig.savefig(path_root+'btightplotsc_{}'.format(name)+'.pdf', dpi=dpi, bbox_inches='tight',
pad_inches=0)
bbox = fig.bbox_inches.from_bounds(1, 1, 5, 5)
fig.savefig(path_root + 'b5plotsc_{}'.format(name) + '.pdf', dpi=dpi,bbox_inches = bbox,
pad_inches = 0)
bbox = fig.bbox_inches.from_bounds(1, 1, 4, 4)
fig.savefig(path_root + 'b4plotsc_{}'.format(name) + '.pdf', dpi=dpi,bbox_inches = bbox,
pad_inches = 0)
bbox = fig.bbox_inches.from_bounds(1, 1, 3, 3)
fig.savefig(path_root + 'b3plotsc_{}'.format(name) + '.pdf', dpi=dpi,bbox_inches = bbox,
pad_inches = 0)
bbox = fig.bbox_inches.from_bounds(1, 1, 6, 6)
fig.savefig(path_root + 'b6plotsc_{}'.format(name) + '.pdf', dpi=dpi,bbox_inches = bbox,
pad_inches = 0)
if show:
plt.show()
plt.close()
| 39.2625
| 158
| 0.584846
|
4a035b2f2b717b624a2d189d48993e5be4b97786
| 1,937
|
py
|
Python
|
home_application/migrations/0004_auto_20190109_1616.py
|
hanseryukiri/jenkins
|
808c70164bb26d90f28bab542d2d5dc9e2d5e1f3
|
[
"Apache-2.0"
] | null | null | null |
home_application/migrations/0004_auto_20190109_1616.py
|
hanseryukiri/jenkins
|
808c70164bb26d90f28bab542d2d5dc9e2d5e1f3
|
[
"Apache-2.0"
] | 3
|
2020-06-05T20:01:31.000Z
|
2021-06-10T21:13:30.000Z
|
home_application/migrations/0004_auto_20190109_1616.py
|
hanseryukiri/jenkins
|
808c70164bb26d90f28bab542d2d5dc9e2d5e1f3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('home_application', '0003_auto_20180808_1451'),
]
operations = [
migrations.CreateModel(
name='BkTaskInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('app_id', models.IntegerField(max_length=10, verbose_name=b'app_id')),
('taks_id', models.IntegerField(max_length=10, verbose_name=b'task_id')),
],
options={
'db_table': 'bk_task_info',
'verbose_name': '\u84dd\u9cb8\u811a\u672c\u4fe1\u606f',
},
),
migrations.CreateModel(
name='JobInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tag_name', models.CharField(max_length=128, verbose_name=b'TAG\xe5\x90\x8d')),
('jenkins_name', models.CharField(max_length=128, verbose_name=b'jenkins\xe4\xbb\xbb\xe5\x8a\xa1\xe5\x90\x8d')),
],
options={
'db_table': 'job_info',
'verbose_name': '\u6784\u5efa\u4efb\u52a1\u4fe1\u606f',
},
),
migrations.AlterField(
model_name='buildhistory',
name='is_release',
field=models.IntegerField(default=0, verbose_name=b'\xe5\x8f\x91\xe5\xb8\x83\xe7\x8a\xb6\xe6\x80\x81', choices=[(1, b'\xe5\xb7\xb2\xe5\xae\x8c\xe6\x88\x90'), (0, b'\xe6\x9c\xaa\xe5\xae\x8c\xe6\x88\x90')]),
),
migrations.AddField(
model_name='bktaskinfo',
name='tag_name',
field=models.ForeignKey(related_name='tasks', to='home_application.JobInfo'),
),
]
| 39.530612
| 217
| 0.5746
|
4a035b6f8090773d5a46d582da52fd4c42b746e0
| 11,851
|
py
|
Python
|
carrera-sdk/producer/python/thrift/server/TNonblockingServer.py
|
MemorySpring/DDMQ
|
4f8f0a69109ae1d7fe30f36b15fbfe0a7cbf33fe
|
[
"Apache-2.0"
] | 1,200
|
2018-12-29T10:31:39.000Z
|
2022-03-31T08:26:33.000Z
|
thrift_0_9_2/lib/py/thrift/server/TNonblockingServer.py
|
ChimeraCoder/lightstep-tracer-go
|
3f04c904839c54177e85df579d910718b09c42a3
|
[
"MIT"
] | 24
|
2019-01-15T01:53:31.000Z
|
2021-09-29T09:55:20.000Z
|
thrift_0_9_2/lib/py/thrift/server/TNonblockingServer.py
|
ChimeraCoder/lightstep-tracer-go
|
3f04c904839c54177e85df579d910718b09c42a3
|
[
"MIT"
] | 323
|
2018-12-29T10:30:05.000Z
|
2022-03-31T06:51:37.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Implementation of non-blocking server.
The main idea of the server is to receive and send requests
only from the main thread.
The thread poool should be sized for concurrent tasks, not
maximum connections
"""
import threading
import socket
import Queue
import select
import struct
import logging
logger = logging.getLogger(__name__)
from thrift.transport import TTransport
from thrift.protocol.TBinaryProtocol import TBinaryProtocolFactory
__all__ = ['TNonblockingServer']
class Worker(threading.Thread):
"""Worker is a small helper to process incoming connection."""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
"""Process queries from task queue, stop if processor is None."""
while True:
try:
processor, iprot, oprot, otrans, callback = self.queue.get()
if processor is None:
break
processor.process(iprot, oprot)
callback(True, otrans.getvalue())
except Exception:
logger.exception("Exception while processing request")
callback(False, '')
WAIT_LEN = 0
WAIT_MESSAGE = 1
WAIT_PROCESS = 2
SEND_ANSWER = 3
CLOSED = 4
def locked(func):
"""Decorator which locks self.lock."""
def nested(self, *args, **kwargs):
self.lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self.lock.release()
return nested
def socket_exception(func):
"""Decorator close object on socket.error."""
def read(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
self.close()
return read
class Connection:
"""Basic class is represented connection.
It can be in state:
WAIT_LEN --- connection is reading request len.
WAIT_MESSAGE --- connection is reading request.
WAIT_PROCESS --- connection has just read whole request and
waits for call ready routine.
SEND_ANSWER --- connection is sending answer string (including length
of answer).
CLOSED --- socket was closed and connection should be deleted.
"""
def __init__(self, new_socket, wake_up):
self.socket = new_socket
self.socket.setblocking(False)
self.status = WAIT_LEN
self.len = 0
self.message = ''
self.lock = threading.Lock()
self.wake_up = wake_up
def _read_len(self):
"""Reads length of request.
It's a safer alternative to self.socket.recv(4)
"""
read = self.socket.recv(4 - len(self.message))
if len(read) == 0:
# if we read 0 bytes and self.message is empty, then
# the client closed the connection
if len(self.message) != 0:
logger.error("can't read frame size from socket")
self.close()
return
self.message += read
if len(self.message) == 4:
self.len, = struct.unpack('!i', self.message)
if self.len < 0:
logger.error("negative frame size, it seems client "
"doesn't use FramedTransport")
self.close()
elif self.len == 0:
logger.error("empty frame, it's really strange")
self.close()
else:
self.message = ''
self.status = WAIT_MESSAGE
@socket_exception
def read(self):
"""Reads data from stream and switch state."""
assert self.status in (WAIT_LEN, WAIT_MESSAGE)
if self.status == WAIT_LEN:
self._read_len()
# go back to the main loop here for simplicity instead of
# falling through, even though there is a good chance that
# the message is already available
elif self.status == WAIT_MESSAGE:
read = self.socket.recv(self.len - len(self.message))
if len(read) == 0:
logger.error("can't read frame from socket (get %d of "
"%d bytes)" % (len(self.message), self.len))
self.close()
return
self.message += read
if len(self.message) == self.len:
self.status = WAIT_PROCESS
@socket_exception
def write(self):
"""Writes data from socket and switch state."""
assert self.status == SEND_ANSWER
sent = self.socket.send(self.message)
if sent == len(self.message):
self.status = WAIT_LEN
self.message = ''
self.len = 0
else:
self.message = self.message[sent:]
@locked
def ready(self, all_ok, message):
"""Callback function for switching state and waking up main thread.
This function is the only function witch can be called asynchronous.
The ready can switch Connection to three states:
WAIT_LEN if request was oneway.
SEND_ANSWER if request was processed in normal way.
CLOSED if request throws unexpected exception.
The one wakes up main thread.
"""
assert self.status == WAIT_PROCESS
if not all_ok:
self.close()
self.wake_up()
return
self.len = ''
if len(message) == 0:
# it was a oneway request, do not write answer
self.message = ''
self.status = WAIT_LEN
else:
self.message = struct.pack('!i', len(message)) + message
self.status = SEND_ANSWER
self.wake_up()
@locked
def is_writeable(self):
"""Return True if connection should be added to write list of select"""
return self.status == SEND_ANSWER
# it's not necessary, but...
@locked
def is_readable(self):
"""Return True if connection should be added to read list of select"""
return self.status in (WAIT_LEN, WAIT_MESSAGE)
@locked
def is_closed(self):
"""Returns True if connection is closed."""
return self.status == CLOSED
def fileno(self):
"""Returns the file descriptor of the associated socket."""
return self.socket.fileno()
def close(self):
"""Closes connection"""
self.status = CLOSED
self.socket.close()
class TNonblockingServer:
"""Non-blocking server."""
def __init__(self,
processor,
lsocket,
inputProtocolFactory=None,
outputProtocolFactory=None,
threads=10):
self.processor = processor
self.socket = lsocket
self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory()
self.out_protocol = outputProtocolFactory or self.in_protocol
self.threads = int(threads)
self.clients = {}
self.tasks = Queue.Queue()
self._read, self._write = socket.socketpair()
self.prepared = False
self._stop = False
def setNumThreads(self, num):
"""Set the number of worker threads that should be created."""
# implement ThreadPool interface
assert not self.prepared, "Can't change number of threads after start"
self.threads = num
def prepare(self):
"""Prepares server for serve requests."""
if self.prepared:
return
self.socket.listen()
for _ in xrange(self.threads):
thread = Worker(self.tasks)
thread.setDaemon(True)
thread.start()
self.prepared = True
def wake_up(self):
"""Wake up main thread.
The server usualy waits in select call in we should terminate one.
The simplest way is using socketpair.
Select always wait to read from the first socket of socketpair.
In this case, we can just write anything to the second socket from
socketpair.
"""
self._write.send('1')
def stop(self):
"""Stop the server.
This method causes the serve() method to return. stop() may be invoked
from within your handler, or from another thread.
After stop() is called, serve() will return but the server will still
be listening on the socket. serve() may then be called again to resume
processing requests. Alternatively, close() may be called after
serve() returns to close the server socket and shutdown all worker
threads.
"""
self._stop = True
self.wake_up()
def _select(self):
"""Does select on open connections."""
readable = [self.socket.handle.fileno(), self._read.fileno()]
writable = []
for i, connection in self.clients.items():
if connection.is_readable():
readable.append(connection.fileno())
if connection.is_writeable():
writable.append(connection.fileno())
if connection.is_closed():
del self.clients[i]
return select.select(readable, writable, readable)
def handle(self):
"""Handle requests.
WARNING! You must call prepare() BEFORE calling handle()
"""
assert self.prepared, "You have to call prepare before handle"
rset, wset, xset = self._select()
for readable in rset:
if readable == self._read.fileno():
# don't care i just need to clean readable flag
self._read.recv(1024)
elif readable == self.socket.handle.fileno():
client = self.socket.accept().handle
self.clients[client.fileno()] = Connection(client,
self.wake_up)
else:
connection = self.clients[readable]
connection.read()
if connection.status == WAIT_PROCESS:
itransport = TTransport.TMemoryBuffer(connection.message)
otransport = TTransport.TMemoryBuffer()
iprot = self.in_protocol.getProtocol(itransport)
oprot = self.out_protocol.getProtocol(otransport)
self.tasks.put([self.processor, iprot, oprot,
otransport, connection.ready])
for writeable in wset:
self.clients[writeable].write()
for oob in xset:
self.clients[oob].close()
del self.clients[oob]
def close(self):
"""Closes the server."""
for _ in xrange(self.threads):
self.tasks.put([None, None, None, None, None])
self.socket.close()
self.prepared = False
def serve(self):
"""Serve requests.
Serve requests forever, or until stop() is called.
"""
self._stop = False
self.prepare()
while not self._stop:
self.handle()
| 33.95702
| 79
| 0.590246
|
4a035be4fefccbe3d33047b64b2fd12d997c82a3
| 11,167
|
py
|
Python
|
rllib/agents/dqn/r2d2_torch_policy.py
|
naijoaix/ray
|
5a43b075bcc346fb95f1d30c85dbcb79a7fa5769
|
[
"Apache-2.0"
] | null | null | null |
rllib/agents/dqn/r2d2_torch_policy.py
|
naijoaix/ray
|
5a43b075bcc346fb95f1d30c85dbcb79a7fa5769
|
[
"Apache-2.0"
] | null | null | null |
rllib/agents/dqn/r2d2_torch_policy.py
|
naijoaix/ray
|
5a43b075bcc346fb95f1d30c85dbcb79a7fa5769
|
[
"Apache-2.0"
] | null | null | null |
"""PyTorch policy class used for R2D2."""
from typing import Dict, Tuple
import gym
import ray
from ray.rllib.agents.dqn.dqn_tf_policy import PRIO_WEIGHTS, postprocess_nstep_and_prio
from ray.rllib.agents.dqn.dqn_torch_policy import (
adam_optimizer,
build_q_model_and_distribution,
compute_q_values,
)
from ray.rllib.agents.dqn.r2d2_tf_policy import get_distribution_inputs_and_class
from ray.rllib.agents.dqn.simple_q_torch_policy import TargetNetworkMixin
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import LearningRateSchedule
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_utils import (
apply_grad_clipping,
concat_multi_gpu_td_errors,
FLOAT_MIN,
huber_loss,
sequence_mask,
)
from ray.rllib.utils.typing import TensorType, TrainerConfigDict
torch, nn = try_import_torch()
F = None
if nn:
F = nn.functional
def build_r2d2_model_and_distribution(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
) -> Tuple[ModelV2, TorchDistributionWrapper]:
"""Build q_model and target_model for DQN
Args:
policy (Policy): The policy, which will use the model for optimization.
obs_space (gym.spaces.Space): The policy's observation space.
action_space (gym.spaces.Space): The policy's action space.
config (TrainerConfigDict):
Returns:
(q_model, TorchCategorical)
Note: The target q model will not be returned, just assigned to
`policy.target_model`.
"""
# Create the policy's models and action dist class.
model, distribution_cls = build_q_model_and_distribution(
policy, obs_space, action_space, config
)
# Assert correct model type by checking the init state to be present.
# For attention nets: These don't necessarily publish their init state via
# Model.get_initial_state, but may only use the trajectory view API
# (view_requirements).
assert (
model.get_initial_state() != []
or model.view_requirements.get("state_in_0") is not None
), (
"R2D2 requires its model to be a recurrent one! Try using "
"`model.use_lstm` or `model.use_attention` in your config "
"to auto-wrap your model with an LSTM- or attention net."
)
return model, distribution_cls
def r2d2_loss(policy: Policy, model, _, train_batch: SampleBatch) -> TensorType:
"""Constructs the loss for R2D2TorchPolicy.
Args:
policy (Policy): The Policy to calculate the loss for.
model (ModelV2): The Model to calculate the loss for.
train_batch (SampleBatch): The training data.
Returns:
TensorType: A single loss tensor.
"""
target_model = policy.target_models[model]
config = policy.config
# Construct internal state inputs.
i = 0
state_batches = []
while "state_in_{}".format(i) in train_batch:
state_batches.append(train_batch["state_in_{}".format(i)])
i += 1
assert state_batches
# Q-network evaluation (at t).
q, _, _, _ = compute_q_values(
policy,
model,
train_batch,
state_batches=state_batches,
seq_lens=train_batch.get(SampleBatch.SEQ_LENS),
explore=False,
is_training=True,
)
# Target Q-network evaluation (at t+1).
q_target, _, _, _ = compute_q_values(
policy,
target_model,
train_batch,
state_batches=state_batches,
seq_lens=train_batch.get(SampleBatch.SEQ_LENS),
explore=False,
is_training=True,
)
actions = train_batch[SampleBatch.ACTIONS].long()
dones = train_batch[SampleBatch.DONES].float()
rewards = train_batch[SampleBatch.REWARDS]
weights = train_batch[PRIO_WEIGHTS]
B = state_batches[0].shape[0]
T = q.shape[0] // B
# Q scores for actions which we know were selected in the given state.
one_hot_selection = F.one_hot(actions, policy.action_space.n)
q_selected = torch.sum(
torch.where(q > FLOAT_MIN, q, torch.tensor(0.0, device=q.device))
* one_hot_selection,
1,
)
if config["double_q"]:
best_actions = torch.argmax(q, dim=1)
else:
best_actions = torch.argmax(q_target, dim=1)
best_actions_one_hot = F.one_hot(best_actions, policy.action_space.n)
q_target_best = torch.sum(
torch.where(
q_target > FLOAT_MIN, q_target, torch.tensor(0.0, device=q_target.device)
)
* best_actions_one_hot,
dim=1,
)
if config["num_atoms"] > 1:
raise ValueError("Distributional R2D2 not supported yet!")
else:
q_target_best_masked_tp1 = (1.0 - dones) * torch.cat(
[q_target_best[1:], torch.tensor([0.0], device=q_target_best.device)]
)
if config["use_h_function"]:
h_inv = h_inverse(q_target_best_masked_tp1, config["h_function_epsilon"])
target = h_function(
rewards + config["gamma"] ** config["n_step"] * h_inv,
config["h_function_epsilon"],
)
else:
target = (
rewards + config["gamma"] ** config["n_step"] * q_target_best_masked_tp1
)
# Seq-mask all loss-related terms.
seq_mask = sequence_mask(train_batch[SampleBatch.SEQ_LENS], T)[:, :-1]
# Mask away also the burn-in sequence at the beginning.
burn_in = policy.config["replay_buffer_config"]["replay_burn_in"]
if burn_in > 0 and burn_in < T:
seq_mask[:, :burn_in] = False
num_valid = torch.sum(seq_mask)
def reduce_mean_valid(t):
return torch.sum(t[seq_mask]) / num_valid
# Make sure use the correct time indices:
# Q(t) - [gamma * r + Q^(t+1)]
q_selected = q_selected.reshape([B, T])[:, :-1]
td_error = q_selected - target.reshape([B, T])[:, :-1].detach()
td_error = td_error * seq_mask
weights = weights.reshape([B, T])[:, :-1]
total_loss = reduce_mean_valid(weights * huber_loss(td_error))
# Store values for stats function in model (tower), such that for
# multi-GPU, we do not override them during the parallel loss phase.
model.tower_stats["total_loss"] = total_loss
model.tower_stats["mean_q"] = reduce_mean_valid(q_selected)
model.tower_stats["min_q"] = torch.min(q_selected)
model.tower_stats["max_q"] = torch.max(q_selected)
model.tower_stats["mean_td_error"] = reduce_mean_valid(td_error)
# Store per time chunk (b/c we need only one mean
# prioritized replay weight per stored sequence).
model.tower_stats["td_error"] = torch.mean(td_error, dim=-1)
return total_loss
def h_function(x, epsilon=1.0):
"""h-function to normalize target Qs, described in the paper [1].
h(x) = sign(x) * [sqrt(abs(x) + 1) - 1] + epsilon * x
Used in [1] in combination with h_inverse:
targets = h(r + gamma * h_inverse(Q^))
"""
return torch.sign(x) * (torch.sqrt(torch.abs(x) + 1.0) - 1.0) + epsilon * x
def h_inverse(x, epsilon=1.0):
"""Inverse if the above h-function, described in the paper [1].
If x > 0.0:
h-1(x) = [2eps * x + (2eps + 1) - sqrt(4eps x + (2eps + 1)^2)] /
(2 * eps^2)
If x < 0.0:
h-1(x) = [2eps * x + (2eps + 1) + sqrt(-4eps x + (2eps + 1)^2)] /
(2 * eps^2)
"""
two_epsilon = epsilon * 2
if_x_pos = (
two_epsilon * x
+ (two_epsilon + 1.0)
- torch.sqrt(4.0 * epsilon * x + (two_epsilon + 1.0) ** 2)
) / (2.0 * epsilon ** 2)
if_x_neg = (
two_epsilon * x
- (two_epsilon + 1.0)
+ torch.sqrt(-4.0 * epsilon * x + (two_epsilon + 1.0) ** 2)
) / (2.0 * epsilon ** 2)
return torch.where(x < 0.0, if_x_neg, if_x_pos)
class ComputeTDErrorMixin:
"""Assign the `compute_td_error` method to the R2D2TorchPolicy
This allows us to prioritize on the worker side.
"""
def __init__(self):
def compute_td_error(
obs_t, act_t, rew_t, obs_tp1, done_mask, importance_weights
):
input_dict = self._lazy_tensor_dict({SampleBatch.CUR_OBS: obs_t})
input_dict[SampleBatch.ACTIONS] = act_t
input_dict[SampleBatch.REWARDS] = rew_t
input_dict[SampleBatch.NEXT_OBS] = obs_tp1
input_dict[SampleBatch.DONES] = done_mask
input_dict[PRIO_WEIGHTS] = importance_weights
# Do forward pass on loss to update td error attribute
r2d2_loss(self, self.model, None, input_dict)
return self.model.tower_stats["td_error"]
self.compute_td_error = compute_td_error
def build_q_stats(policy: Policy, batch: SampleBatch) -> Dict[str, TensorType]:
return {
"cur_lr": policy.cur_lr,
"total_loss": torch.mean(torch.stack(policy.get_tower_stats("total_loss"))),
"mean_q": torch.mean(torch.stack(policy.get_tower_stats("mean_q"))),
"min_q": torch.mean(torch.stack(policy.get_tower_stats("min_q"))),
"max_q": torch.mean(torch.stack(policy.get_tower_stats("max_q"))),
"mean_td_error": torch.mean(
torch.stack(policy.get_tower_stats("mean_td_error"))
),
}
def setup_early_mixins(
policy: Policy, obs_space, action_space, config: TrainerConfigDict
) -> None:
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
def before_loss_init(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
) -> None:
ComputeTDErrorMixin.__init__(policy)
TargetNetworkMixin.__init__(policy)
def grad_process_and_td_error_fn(
policy: Policy, optimizer: "torch.optim.Optimizer", loss: TensorType
) -> Dict[str, TensorType]:
# Clip grads if configured.
return apply_grad_clipping(policy, optimizer, loss)
def extra_action_out_fn(
policy: Policy, input_dict, state_batches, model, action_dist
) -> Dict[str, TensorType]:
return {"q_values": policy.q_values}
R2D2TorchPolicy = build_policy_class(
name="R2D2TorchPolicy",
framework="torch",
loss_fn=r2d2_loss,
get_default_config=lambda: ray.rllib.agents.dqn.r2d2.R2D2_DEFAULT_CONFIG,
make_model_and_action_dist=build_r2d2_model_and_distribution,
action_distribution_fn=get_distribution_inputs_and_class,
stats_fn=build_q_stats,
postprocess_fn=postprocess_nstep_and_prio,
optimizer_fn=adam_optimizer,
extra_grad_process_fn=grad_process_and_td_error_fn,
extra_learn_fetches_fn=concat_multi_gpu_td_errors,
extra_action_out_fn=extra_action_out_fn,
before_init=setup_early_mixins,
before_loss_init=before_loss_init,
mixins=[
TargetNetworkMixin,
ComputeTDErrorMixin,
LearningRateSchedule,
],
)
| 33.942249
| 88
| 0.663831
|
4a035d0261f2f38d229cacee9a81ac7ce19e5100
| 1,895
|
py
|
Python
|
CpuX64/disassembler_tool.py
|
robertmuth/Cwerg
|
fdf30b06c93b4620c0a45b448b6d92acb81c35f0
|
[
"Apache-2.0"
] | 171
|
2020-01-30T16:58:07.000Z
|
2022-03-27T22:12:17.000Z
|
CpuX64/disassembler_tool.py
|
robertmuth/Cwerg
|
fdf30b06c93b4620c0a45b448b6d92acb81c35f0
|
[
"Apache-2.0"
] | 14
|
2021-05-15T02:12:09.000Z
|
2022-03-16T04:16:18.000Z
|
CpuX64/disassembler_tool.py
|
robertmuth/Cwerg
|
fdf30b06c93b4620c0a45b448b6d92acb81c35f0
|
[
"Apache-2.0"
] | 5
|
2021-03-01T20:52:13.000Z
|
2022-03-07T06:35:03.000Z
|
#!/usr/bin/python3
import sys
from CpuX64 import symbolic
from CpuX64 import opcode_tab as x64
def disass(data):
ins = x64.Disassemble(data)
if ins is None:
print(f"could not disassemble {x64.Hexify(data)}")
return
enum_name, ops_str = symbolic.InsSymbolizeObjdumpCompat(ins, False)
print(f"{x64.Hexify(data)}", f"{ins.opcode.name}.{ins.opcode.variant} {' '.join(ops_str)}")
enum_name, ops_str = symbolic.InsSymbolize(ins)
print(" " + enum_name)
for f, o, o_str in zip(ins.opcode.fields, ins.operands, ops_str):
if isinstance(f, x64.OK):
f = f.name
print(f" {f:35s} {o_str:10} (0x{o:x})")
print()
data2 = x64.Assemble(ins)
assert data == data2
ins2 = symbolic.InsFromSymbolized(enum_name, ops_str)
assert tuple(ins.operands) == tuple(
ins2.operands), f"{ins.operands} vs {ins2.operands}"
def HexToData(s: str):
return [int(x, 16) for x in s.split()]
def batch():
for line in sys.stdin:
line = line.split("#")[0].strip()
if not line.strip(): continue
data = HexToData(line.strip())
ins = x64.Disassemble(data)
if ins.opcode is None:
print(f"could not disassemble [{x64.Hexify(data)}]")
continue
enum_name, ops_str = symbolic.InsSymbolize(ins)
print(f"{x64.Hexify(data)}", f"{ins.opcode.name}.{ins.opcode.variant} {' '.join(ops_str)}")
data2 = x64.Assemble(ins)
assert data == data2
ins2 = symbolic.InsFromSymbolized(enum_name, ops_str)
assert tuple(ins.operands) == tuple(
ins2.operands), f"{ins.operands} vs {ins2.operands}"
if __name__ == "__main__":
if len(sys.argv) > 1:
if sys.argv[1] == "batch":
batch()
else:
for seq in " ".join(sys.argv[1:]).split(","):
disass(HexToData(seq))
| 30.564516
| 99
| 0.596834
|
4a035d8e012d130ec6aaab634805cb43d2658573
| 477
|
py
|
Python
|
api_client/python/grr_api_client/connectors/abstract.py
|
Dazbeni/grr
|
5b49a83eba2f84e346a2b50d154264c190a24f08
|
[
"Apache-2.0"
] | null | null | null |
api_client/python/grr_api_client/connectors/abstract.py
|
Dazbeni/grr
|
5b49a83eba2f84e346a2b50d154264c190a24f08
|
[
"Apache-2.0"
] | null | null | null |
api_client/python/grr_api_client/connectors/abstract.py
|
Dazbeni/grr
|
5b49a83eba2f84e346a2b50d154264c190a24f08
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""API connector base class definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
class Connector(object):
"""An abstract GRR connector class."""
@property
def page_size(self):
raise NotImplementedError()
def SendRequest(self, handler_name, args):
raise NotImplementedError()
def SendStreamingRequest(self, handler_name, args):
raise NotImplementedError()
| 23.85
| 53
| 0.765199
|
4a035dff5b912edf0ee35f3c00b73593011d523f
| 268
|
py
|
Python
|
manage.py
|
default-007/news
|
91be10b77a95876e108110ef30dffe27080de242
|
[
"MIT"
] | null | null | null |
manage.py
|
default-007/news
|
91be10b77a95876e108110ef30dffe27080de242
|
[
"MIT"
] | 4
|
2021-06-08T21:25:01.000Z
|
2022-03-12T00:26:36.000Z
|
manage.py
|
default-007/news
|
91be10b77a95876e108110ef30dffe27080de242
|
[
"MIT"
] | null | null | null |
from app import create_app
from flask_script import Manager,Server
# Creating app instance
app = create_app('development')
manager = Manager(app)
manager.add_command('server', Server)
manager.add_command('server',Server)
if __name__ == '__main__':
manager.run()
| 22.333333
| 39
| 0.768657
|
4a035e1a6d16c1479cc7a0e84028704500075900
| 243
|
py
|
Python
|
Python/variable_1.py
|
aaavinash85/Python
|
cd58606f6f01c9bdccffb58e0f73825958679125
|
[
"MIT"
] | null | null | null |
Python/variable_1.py
|
aaavinash85/Python
|
cd58606f6f01c9bdccffb58e0f73825958679125
|
[
"MIT"
] | null | null | null |
Python/variable_1.py
|
aaavinash85/Python
|
cd58606f6f01c9bdccffb58e0f73825958679125
|
[
"MIT"
] | null | null | null |
a=2 # assigning value 2 to variable a
print('a=',a) # printing the value of a
b=3
print('b=',b)
c=a+b
print('c=',c)
type(a) # type is used to know the type of variable
print(type(a)) # eg. here 'a' is of int type
| 30.375
| 56
| 0.572016
|
4a035f045a1d744da6b599dbfb2e775b61a63d5b
| 27,895
|
py
|
Python
|
leo/modes/pvwave.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | 1,550
|
2015-01-14T16:30:37.000Z
|
2022-03-31T08:55:58.000Z
|
leo/modes/pvwave.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | 2,009
|
2015-01-13T16:28:52.000Z
|
2022-03-31T18:21:48.000Z
|
leo/modes/pvwave.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | 200
|
2015-01-05T15:07:41.000Z
|
2022-03-07T17:05:01.000Z
|
# Leo colorizer control file for pvwave mode.
# This file is in the public domain.
# Properties for pvwave mode.
properties = {
"lineComment": ";",
}
# Attributes dict for pvwave_main ruleset.
pvwave_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for pvwave mode.
attributesDictDict = {
"pvwave_main": pvwave_main_attributes_dict,
}
# Keywords dict for pvwave_main ruleset.
pvwave_main_keywords_dict = {
"abs": "keyword1",
"acos": "keyword1",
"add_exec_on_select": "keyword1",
"addsysvar": "keyword1",
"addvar": "keyword1",
"affine": "keyword1",
"alog": "keyword1",
"alog10": "keyword1",
"and": "keyword3",
"asarr": "keyword1",
"asin": "keyword1",
"askeys": "keyword1",
"assoc": "keyword1",
"atan": "keyword1",
"avg": "keyword1",
"axis": "keyword1",
"bar": "keyword1",
"bar2d": "keyword1",
"bar3d": "keyword1",
"begin": "keyword2",
"beseli": "keyword1",
"beselj": "keyword1",
"besely": "keyword1",
"bilinear": "keyword1",
"bindgen": "keyword1",
"blob": "keyword1",
"blobcount": "keyword1",
"boundary": "keyword1",
"breakpoint": "keyword2",
"build_table": "keyword1",
"buildresourcefilename": "keyword1",
"bytarr": "keyword1",
"byte": "keyword1",
"byteorder": "keyword1",
"bytscl": "keyword1",
"c_edit": "keyword1",
"call_unix": "keyword1",
"case": "keyword2",
"cd": "keyword1",
"center_view": "keyword1",
"chebyshev": "keyword1",
"check_math": "keyword1",
"checkfile": "keyword1",
"cindgen": "keyword1",
"close": "keyword1",
"color_convert": "keyword1",
"color_edit": "keyword1",
"color_palette": "keyword1",
"common": "keyword2",
"compile": "keyword2",
"complex": "keyword1",
"complexarr": "keyword1",
"cone": "keyword1",
"congrid": "keyword1",
"conj": "keyword1",
"contour": "keyword1",
"contour2": "keyword1",
"contourfill": "keyword1",
"conv_from_rect": "keyword1",
"conv_to_rect": "keyword1",
"convert_coord": "keyword1",
"convol": "keyword1",
"correlate": "keyword1",
"cos": "keyword1",
"cosh": "keyword1",
"cosines": "keyword1",
"cprod": "keyword1",
"create_holidays": "keyword1",
"create_weekdends": "keyword1",
"crossp": "keyword1",
"cursor": "keyword1",
"curvatures": "keyword1",
"curvefit": "keyword1",
"cylinder": "keyword1",
"day_name": "keyword1",
"day_of_week": "keyword1",
"day_of_year": "keyword1",
"dblarr": "keyword1",
"dc_error_msg": "keyword1",
"dc_options": "keyword1",
"dc_read_24_bit": "keyword1",
"dc_read_8_bit": "keyword1",
"dc_read_container": "keyword1",
"dc_read_dib": "keyword1",
"dc_read_fixed": "keyword1",
"dc_read_free": "keyword1",
"dc_read_tiff": "keyword1",
"dc_scan_container": "keyword1",
"dc_write_24_bit": "keyword1",
"dc_write_8_bit": "keyword1",
"dc_write_dib": "keyword1",
"dc_write_fixed": "keyword1",
"dc_write_free": "keyword1",
"dc_write_tiff": "keyword1",
"dcindgen": "keyword1",
"dcomplex": "keyword1",
"dcomplexarr": "keyword1",
"declare": "keyword2",
"define_key": "keyword1",
"defroi": "keyword1",
"defsysv": "keyword1",
"del_file": "keyword1",
"delfunc": "keyword1",
"dellog": "keyword1",
"delproc": "keyword1",
"delstruct": "keyword1",
"delvar": "keyword1",
"demo": "keyword1",
"deriv": "keyword1",
"derivn": "keyword1",
"determ": "keyword1",
"device": "keyword1",
"diag": "keyword1",
"dicm_tag_info": "keyword1",
"digital_filter": "keyword1",
"dilate": "keyword1",
"dindgen": "keyword1",
"dist": "keyword1",
"dminit": "keyword1",
"do": "keyword2",
"doc_lib_unix": "keyword1",
"doc_library": "keyword1",
"double": "keyword1",
"drop_exec_on_select": "keyword1",
"dt_add": "keyword1",
"dt_addly": "keyword1",
"dt_compress": "keyword1",
"dt_duration": "keyword1",
"dt_print": "keyword1",
"dt_subly": "keyword1",
"dt_subtract": "keyword1",
"dt_to_sec": "keyword1",
"dt_to_str": "keyword1",
"dt_to_var": "keyword1",
"dtegn": "keyword1",
"else": "keyword2",
"empty": "keyword1",
"end": "keyword2",
"endcase": "keyword2",
"endelse": "keyword2",
"endfor": "keyword2",
"endif": "keyword2",
"endrepeat": "keyword2",
"endwhile": "keyword2",
"environment": "keyword1",
"eof": "keyword1",
"eq": "keyword3",
"erase": "keyword1",
"erode": "keyword1",
"errorf": "keyword1",
"errplot": "keyword1",
"euclidean": "keyword1",
"exec_on_select": "keyword1",
"execute": "keyword1",
"exit": "keyword2",
"exp": "keyword1",
"expand": "keyword1",
"expon": "keyword1",
"extrema": "keyword1",
"factor": "keyword1",
"fast_grid2": "keyword1",
"fast_grid3": "keyword1",
"fast_grid4": "keyword1",
"fft": "keyword1",
"filepath": "keyword1",
"findfile": "keyword1",
"findgen": "keyword1",
"finite": "keyword1",
"fix": "keyword1",
"float": "keyword1",
"fltarr": "keyword1",
"flush": "keyword1",
"for": "keyword2",
"free_lun": "keyword1",
"fstat": "keyword1",
"func": "keyword2",
"funct": "keyword1",
"function": "keyword2",
"gamma": "keyword1",
"gaussfit": "keyword1",
"gaussint": "keyword1",
"gcd": "keyword1",
"ge": "keyword3",
"get_kbrd": "keyword1",
"get_lun": "keyword1",
"get_named_color": "keyword1",
"getenv": "keyword1",
"getncerr": "keyword1",
"getncopts": "keyword1",
"getparam": "keyword1",
"goto": "keyword2",
"great_int": "keyword1",
"grid": "keyword1",
"grid_2d": "keyword1",
"grid_3d": "keyword1",
"grid_4d": "keyword1",
"grid_sphere": "keyword1",
"gridn": "keyword1",
"group_by": "keyword1",
"gt": "keyword3",
"hak": "keyword1",
"hanning": "keyword1",
"hdf_test": "keyword1",
"hdfgetsds": "keyword1",
"help": "keyword2",
"hilbert": "keyword1",
"hist_equal": "keyword1",
"hist_equal_ct": "keyword1",
"histn": "keyword1",
"histogram": "keyword1",
"hls": "keyword1",
"hsv": "keyword1",
"hsv_to_rgd": "keyword1",
"if": "keyword2",
"image_check": "keyword1",
"image_color_quant": "keyword1",
"image_cont": "keyword1",
"image_create": "keyword1",
"image_display": "keyword1",
"image_filetypes": "keyword1",
"image_query_file": "keyword1",
"image_read": "keyword1",
"image_write": "keyword1",
"imaginary": "keyword1",
"img_true8": "keyword1",
"index_and": "keyword1",
"index_conv": "keyword1",
"index_or": "keyword1",
"indgen": "keyword1",
"info": "keyword2",
"intarr": "keyword1",
"interpol": "keyword1",
"interpolate": "keyword1",
"intrp": "keyword1",
"invert": "keyword1",
"isaskey": "keyword1",
"ishft": "keyword1",
"jacobian": "keyword1",
"journal": "keyword2",
"jul_to_dt": "keyword1",
"keyword_set": "keyword1",
"lcm": "keyword1",
"le": "keyword3",
"leefilt": "keyword1",
"legend": "keyword1",
"lindgen": "keyword1",
"linknload": "keyword1",
"list": "keyword1",
"listarr": "keyword1",
"load_holidays": "keyword1",
"load_option": "keyword1",
"load_weekends": "keyword1",
"loadct": "keyword1",
"loadct_custom": "keyword1",
"loadresources": "keyword1",
"loadstrings": "keyword1",
"locals": "keyword2",
"lonarr": "keyword1",
"long": "keyword1",
"lt": "keyword3",
"lubksb": "keyword1",
"ludcmp": "keyword1",
"make_array": "keyword1",
"map": "keyword1",
"map_axes": "keyword1",
"map_contour": "keyword1",
"map_grid": "keyword1",
"map_plots": "keyword1",
"map_polyfill": "keyword1",
"map_proj": "keyword1",
"map_reverse": "keyword1",
"map_velovect": "keyword1",
"map_version": "keyword1",
"map_xyouts": "keyword1",
"max": "keyword1",
"median": "keyword1",
"mesh": "keyword1",
"message": "keyword1",
"min": "keyword1",
"mod": "keyword3",
"modifyct": "keyword1",
"molec": "keyword1",
"moment": "keyword1",
"month_name": "keyword1",
"movie": "keyword1",
"mprove": "keyword1",
"msword_cgm_setup": "keyword1",
"n_elements": "keyword1",
"n_params": "keyword1",
"n_tags": "keyword1",
"ne": "keyword3",
"nint": "keyword1",
"normals": "keyword1",
"not": "keyword3",
"null_processor": "keyword1",
"of": "keyword2",
"on_error": "keyword2",
"on_error_goto": "keyword2",
"on_ioerror": "keyword2",
"openr": "keyword1",
"openu": "keyword1",
"openw": "keyword1",
"oplot": "keyword1",
"oploterr": "keyword1",
"option_is_loaded": "keyword1",
"or": "keyword3",
"order_by": "keyword1",
"packimage": "keyword1",
"packtable": "keyword1",
"padit": "keyword1",
"palette": "keyword1",
"param_present": "keyword1",
"parsefilename": "keyword1",
"pie": "keyword1",
"pie_chart": "keyword1",
"plot": "keyword1",
"plot_field": "keyword1",
"plot_histogram": "keyword1",
"plot_io": "keyword1",
"plot_oi": "keyword1",
"plot_oo": "keyword1",
"plot_windrose": "keyword1",
"ploterr": "keyword1",
"plots": "keyword1",
"pm": "keyword1",
"pmf": "keyword1",
"point_lun": "keyword1",
"poly": "keyword1",
"poly_2d": "keyword1",
"poly_area": "keyword1",
"poly_c_conv": "keyword1",
"poly_count": "keyword1",
"poly_dev": "keyword1",
"poly_fit": "keyword1",
"poly_merge": "keyword1",
"poly_norm": "keyword1",
"poly_plot": "keyword1",
"poly_sphere": "keyword1",
"poly_surf": "keyword1",
"poly_trans": "keyword1",
"polyfill": "keyword1",
"polyfillv": "keyword1",
"polyfitw": "keyword1",
"polyshade": "keyword1",
"polywarp": "keyword1",
"popd": "keyword1",
"prime": "keyword1",
"print": "keyword1",
"printd": "keyword1",
"printf": "keyword1",
"pro": "keyword2",
"profile": "keyword1",
"profiles": "keyword1",
"prompt": "keyword1",
"pseudo": "keyword1",
"pushd": "keyword1",
"query_table": "keyword1",
"quit": "keyword2",
"randomn": "keyword1",
"randomu": "keyword1",
"rdpix": "keyword1",
"read": "keyword1",
"read_airs": "keyword1",
"read_xbm": "keyword1",
"readf": "keyword1",
"readu": "keyword1",
"rebin": "keyword1",
"reform": "keyword1",
"regress": "keyword1",
"rename": "keyword1",
"render": "keyword1",
"render24": "keyword1",
"repeat": "keyword2",
"replicate": "keyword1",
"replv": "keyword1",
"resamp": "keyword1",
"restore": "keyword2",
"retall": "keyword2",
"return": "keyword2",
"reverse": "keyword1",
"rgb_to_hsv": "keyword1",
"rm": "keyword1",
"rmf": "keyword1",
"roberts": "keyword1",
"rot": "keyword1",
"rot_int": "keyword1",
"rotate": "keyword1",
"same": "keyword1",
"save": "keyword2",
"scale3d": "keyword1",
"sec_to_dt": "keyword1",
"select_read_lun": "keyword1",
"set_plot": "keyword1",
"set_screen": "keyword1",
"set_shading": "keyword1",
"set_symbol": "keyword1",
"set_view3d": "keyword1",
"set_viewport": "keyword1",
"set_xy": "keyword1",
"setdemo": "keyword1",
"setenv": "keyword1",
"setimagesize": "keyword1",
"setlog": "keyword1",
"setncopts": "keyword1",
"setup_keys": "keyword1",
"sgn": "keyword1",
"shade_surf": "keyword1",
"shade_surf_irr": "keyword1",
"shade_volume": "keyword1",
"shif": "keyword1",
"shift": "keyword1",
"show3": "keyword1",
"show_options": "keyword1",
"sigma": "keyword1",
"sin": "keyword1",
"sindgen": "keyword1",
"sinh": "keyword1",
"size": "keyword1",
"skipf": "keyword1",
"slice": "keyword1",
"slice_vol": "keyword1",
"small_int": "keyword1",
"smooth": "keyword1",
"sobel": "keyword1",
"socket_accept": "keyword1",
"socket_close": "keyword1",
"socket_connect": "keyword1",
"socket_getport": "keyword1",
"socket_init": "keyword1",
"socket_read": "keyword1",
"socket_write": "keyword1",
"sort": "keyword1",
"sortn": "keyword1",
"spawn": "keyword1",
"sphere": "keyword1",
"spline": "keyword1",
"sqrt": "keyword1",
"stdev": "keyword1",
"stop": "keyword2",
"str_to_dt": "keyword1",
"strarr": "keyword1",
"strcompress": "keyword1",
"stretch": "keyword1",
"string": "keyword1",
"strjoin": "keyword1",
"strlen": "keyword1",
"strlookup": "keyword1",
"strlowcase": "keyword1",
"strmatch": "keyword1",
"strmessage": "keyword1",
"strmid": "keyword1",
"strpos": "keyword1",
"strput": "keyword1",
"strsplit": "keyword1",
"strsubst": "keyword1",
"strtrim": "keyword1",
"structref": "keyword1",
"strupcase": "keyword1",
"sum": "keyword1",
"surface": "keyword1",
"surface_fit": "keyword1",
"surfr": "keyword1",
"svbksb": "keyword1",
"svd": "keyword1",
"svdfit": "keyword1",
"systime": "keyword1",
"t3d": "keyword1",
"tag_names": "keyword1",
"tan": "keyword1",
"tanh": "keyword1",
"tek_color": "keyword1",
"tensor_add": "keyword1",
"tensor_div": "keyword1",
"tensor_eq": "keyword1",
"tensor_exp": "keyword1",
"tensor_ge": "keyword1",
"tensor_gt": "keyword1",
"tensor_le": "keyword1",
"tensor_lt": "keyword1",
"tensor_max": "keyword1",
"tensor_min": "keyword1",
"tensor_mod": "keyword1",
"tensor_mul": "keyword1",
"tensor_ne": "keyword1",
"tensor_sub": "keyword1",
"then": "keyword2",
"threed": "keyword1",
"today": "keyword1",
"total": "keyword1",
"tqli": "keyword1",
"transpose": "keyword1",
"tred2": "keyword1",
"tridag": "keyword1",
"tv": "keyword1",
"tvcrs": "keyword1",
"tvlct": "keyword1",
"tvrd": "keyword1",
"tvscl": "keyword1",
"tvsize": "keyword1",
"uniqn": "keyword1",
"unique": "keyword1",
"unix_listen": "keyword1",
"unix_reply": "keyword1",
"unload_option": "keyword1",
"upvar": "keyword1",
"usersym": "keyword1",
"usgs_names": "keyword1",
"value_length": "keyword1",
"var_match": "keyword1",
"var_to_dt": "keyword1",
"vector_field3": "keyword1",
"vel": "keyword1",
"velovect": "keyword1",
"viewer": "keyword1",
"vol_marker": "keyword1",
"vol_pad": "keyword1",
"vol_red": "keyword1",
"vol_trans": "keyword1",
"volume": "keyword1",
"vtkaddattribute": "keyword1",
"vtkaxes": "keyword1",
"vtkcamera": "keyword1",
"vtkclose": "keyword1",
"vtkcolorbar": "keyword1",
"vtkcolornames": "keyword1",
"vtkcommand": "keyword1",
"vtkerase": "keyword1",
"vtkformat": "keyword1",
"vtkgrid": "keyword1",
"vtkhedgehog": "keyword1",
"vtkinit": "keyword1",
"vtklight": "keyword1",
"vtkplots": "keyword1",
"vtkpolydata": "keyword1",
"vtkpolyformat": "keyword1",
"vtkpolyshade": "keyword1",
"vtkppmread": "keyword1",
"vtkppmwrite": "keyword1",
"vtkreadvtk": "keyword1",
"vtkrectilineargrid": "keyword1",
"vtkrenderwindow": "keyword1",
"vtkscatter": "keyword1",
"vtkslicevol": "keyword1",
"vtkstructuredgrid": "keyword1",
"vtkstructuredpoints": "keyword1",
"vtksurface": "keyword1",
"vtksurfgen": "keyword1",
"vtktext": "keyword1",
"vtktvrd": "keyword1",
"vtkunstructuredgrid": "keyword1",
"vtkwdelete": "keyword1",
"vtkwindow": "keyword1",
"vtkwritevrml": "keyword1",
"vtkwset": "keyword1",
"wait": "keyword1",
"wavedatamanager": "keyword1",
"waveserver": "keyword1",
"wcopy": "keyword1",
"wdelete": "keyword1",
"wganimatetool": "keyword3",
"wgcbartool": "keyword3",
"wgcttool": "keyword3",
"wgisosurftool": "keyword3",
"wgmovietool": "keyword3",
"wgsimagetool": "keyword3",
"wgslicetool": "keyword3",
"wgsurfacetool": "keyword3",
"wgtexttool": "keyword3",
"where": "keyword1",
"wherein": "keyword1",
"while": "keyword2",
"whow": "keyword1",
"window": "keyword1",
"wmenu": "keyword1",
"woaddbuttons": "keyword3",
"woaddmessage": "keyword3",
"woaddstatus": "keyword3",
"wobuttonbar": "keyword3",
"wocheckfile": "keyword3",
"wocolorbutton": "keyword3",
"wocolorconvert": "keyword3",
"wocolorgrid": "keyword3",
"wocolorwheel": "keyword3",
"woconfirmclose": "keyword3",
"wodialogstatus": "keyword3",
"wofontoptionmenu": "keyword3",
"wogenericdialog": "keyword3",
"wolabeledtext": "keyword3",
"womenubar": "keyword3",
"womessage": "keyword3",
"wosaveaspixmap": "keyword3",
"wosetcursor": "keyword3",
"wosetwindowtitle": "keyword3",
"wostatus": "keyword3",
"wovariableoptionmenu": "keyword3",
"wpaste": "keyword1",
"wprint": "keyword1",
"wread_dib": "keyword1",
"wread_meta": "keyword1",
"write_xbm": "keyword1",
"writeu": "keyword1",
"wset": "keyword1",
"wtaddcallback": "keyword3",
"wtaddhandler": "keyword3",
"wtcursor": "keyword3",
"wtget": "keyword3",
"wtpointer": "keyword3",
"wtset": "keyword3",
"wttimer": "keyword3",
"wwalert": "keyword3",
"wwalertpopdown": "keyword3",
"wwbuttonbox": "keyword3",
"wwcallback": "keyword3",
"wwcontrolsbox": "keyword3",
"wwdialog": "keyword3",
"wwdrawing": "keyword3",
"wwfileselection": "keyword3",
"wwgenericdialog": "keyword3",
"wwgetbutton": "keyword3",
"wwgetkey": "keyword3",
"wwgetposition": "keyword3",
"wwgetvalue": "keyword3",
"wwhandler": "keyword3",
"wwinit": "keyword3",
"wwlayout": "keyword3",
"wwlist": "keyword3",
"wwlistutils": "keyword3",
"wwloop": "keyword3",
"wwmainwindow": "keyword3",
"wwmenubar": "keyword3",
"wwmenuitem": "keyword3",
"wwmessage": "keyword3",
"wwmulticlickhandler": "keyword3",
"wwoptionmenu": "keyword3",
"wwpickfile": "keyword3",
"wwpopupmenu": "keyword3",
"wwpreview": "keyword3",
"wwpreviewutils": "keyword3",
"wwradiobox": "keyword3",
"wwresource": "keyword3",
"wwrite_dib": "keyword1",
"wwrite_meta": "keyword1",
"wwseparator": "keyword3",
"wwsetcursor": "keyword3",
"wwsetvalue": "keyword3",
"wwtable": "keyword3",
"wwtableutils": "keyword3",
"wwtext": "keyword3",
"wwtimer": "keyword3",
"wwtoolbox": "keyword3",
"wzanimate": "keyword3",
"wzcoloredit": "keyword3",
"wzcontour": "keyword3",
"wzexport": "keyword3",
"wzhistogram": "keyword3",
"wzimage": "keyword3",
"wzimport": "keyword3",
"wzmultiview": "keyword3",
"wzplot": "keyword3",
"wzpreview": "keyword3",
"wzsurface": "keyword3",
"wztable": "keyword3",
"wzvariable": "keyword3",
"xor": "keyword3",
"xyouts": "keyword1",
"zoom": "keyword1",
"zroots": "keyword1",
}
# Dictionary of keywords dictionaries for pvwave mode.
keywordsDictDict = {
"pvwave_main": pvwave_main_keywords_dict,
}
# Rules for pvwave_main ruleset.
def pvwave_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def pvwave_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def pvwave_rule2(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq=";",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def pvwave_rule3(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="(",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule4(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=")",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule5(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule6(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="+",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule7(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="-",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule8(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="/",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule9(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="*",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule10(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="#",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule11(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule12(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule13(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="^",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule14(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="}",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule15(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="{",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule16(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=".",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule17(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=",",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule18(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="]",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule19(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="[",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule20(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=":",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule21(colorer, s, i):
return colorer.match_seq(s, i, kind="label", seq="$",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule22(colorer, s, i):
return colorer.match_seq(s, i, kind="label", seq="&",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule23(colorer, s, i):
return colorer.match_seq(s, i, kind="label", seq="@",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule24(colorer, s, i):
return colorer.match_seq(s, i, kind="label", seq="!",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def pvwave_rule25(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for pvwave_main ruleset.
rulesDict1 = {
"!": [pvwave_rule24,],
"\"": [pvwave_rule0,],
"#": [pvwave_rule10,],
"$": [pvwave_rule21,],
"&": [pvwave_rule22,],
"'": [pvwave_rule1,],
"(": [pvwave_rule3,],
")": [pvwave_rule4,],
"*": [pvwave_rule9,],
"+": [pvwave_rule6,],
",": [pvwave_rule17,],
"-": [pvwave_rule7,],
".": [pvwave_rule16,],
"/": [pvwave_rule8,],
"0": [pvwave_rule25,],
"1": [pvwave_rule25,],
"2": [pvwave_rule25,],
"3": [pvwave_rule25,],
"4": [pvwave_rule25,],
"5": [pvwave_rule25,],
"6": [pvwave_rule25,],
"7": [pvwave_rule25,],
"8": [pvwave_rule25,],
"9": [pvwave_rule25,],
":": [pvwave_rule20,],
";": [pvwave_rule2,],
"<": [pvwave_rule12,],
"=": [pvwave_rule5,],
">": [pvwave_rule11,],
"@": [pvwave_rule23,pvwave_rule25,],
"A": [pvwave_rule25,],
"B": [pvwave_rule25,],
"C": [pvwave_rule25,],
"D": [pvwave_rule25,],
"E": [pvwave_rule25,],
"F": [pvwave_rule25,],
"G": [pvwave_rule25,],
"H": [pvwave_rule25,],
"I": [pvwave_rule25,],
"J": [pvwave_rule25,],
"K": [pvwave_rule25,],
"L": [pvwave_rule25,],
"M": [pvwave_rule25,],
"N": [pvwave_rule25,],
"O": [pvwave_rule25,],
"P": [pvwave_rule25,],
"Q": [pvwave_rule25,],
"R": [pvwave_rule25,],
"S": [pvwave_rule25,],
"T": [pvwave_rule25,],
"U": [pvwave_rule25,],
"V": [pvwave_rule25,],
"W": [pvwave_rule25,],
"X": [pvwave_rule25,],
"Y": [pvwave_rule25,],
"Z": [pvwave_rule25,],
"[": [pvwave_rule19,],
"]": [pvwave_rule18,],
"^": [pvwave_rule13,],
"_": [pvwave_rule25,],
"a": [pvwave_rule25,],
"b": [pvwave_rule25,],
"c": [pvwave_rule25,],
"d": [pvwave_rule25,],
"e": [pvwave_rule25,],
"f": [pvwave_rule25,],
"g": [pvwave_rule25,],
"h": [pvwave_rule25,],
"i": [pvwave_rule25,],
"j": [pvwave_rule25,],
"k": [pvwave_rule25,],
"l": [pvwave_rule25,],
"m": [pvwave_rule25,],
"n": [pvwave_rule25,],
"o": [pvwave_rule25,],
"p": [pvwave_rule25,],
"q": [pvwave_rule25,],
"r": [pvwave_rule25,],
"s": [pvwave_rule25,],
"t": [pvwave_rule25,],
"u": [pvwave_rule25,],
"v": [pvwave_rule25,],
"w": [pvwave_rule25,],
"x": [pvwave_rule25,],
"y": [pvwave_rule25,],
"z": [pvwave_rule25,],
"{": [pvwave_rule15,],
"}": [pvwave_rule14,],
}
# x.rulesDictDict for pvwave mode.
rulesDictDict = {
"pvwave_main": rulesDict1,
}
# Import dict for pvwave mode.
importDict = {}
| 30.653846
| 88
| 0.576698
|
4a035f83b230f53c4c2bc7369b5417746f3c9f09
| 6,312
|
py
|
Python
|
model/SR/DistgSSR.py
|
ZhengyuLiang24/BasicLFSR
|
940be5424dd54ae91238a394fe889862d5bfe235
|
[
"MIT"
] | 43
|
2021-11-29T02:38:18.000Z
|
2022-03-16T07:29:54.000Z
|
model/SR/DistgSSR.py
|
ZhengyuLiang24/BasicLFSR
|
940be5424dd54ae91238a394fe889862d5bfe235
|
[
"MIT"
] | 3
|
2021-12-12T04:50:50.000Z
|
2022-01-21T11:04:18.000Z
|
model/SR/DistgSSR.py
|
ZhengyuLiang24/BasicLFSR
|
940be5424dd54ae91238a394fe889862d5bfe235
|
[
"MIT"
] | 10
|
2021-11-29T05:32:33.000Z
|
2022-02-28T01:33:19.000Z
|
'''
@Article{DistgLF,
author = {Wang, Yingqian and Wang, Longguang and Wu, Gaochang and Yang, Jungang and An, Wei and Yu, Jingyi and Guo, Yulan},
title = {Disentangling Light Fields for Super-Resolution and Disparity Estimation},
journal = {IEEE TPAMI},
year = {2022},
}
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class get_model(nn.Module):
def __init__(self, args):
super(get_model, self).__init__()
channels = 64
n_group = 4
n_block = 4
self.angRes = args.angRes_in
self.factor = args.scale_factor
self.init_conv = nn.Conv2d(1, channels, kernel_size=3, stride=1, dilation=self.angRes, padding=self.angRes, bias=False)
self.disentg = CascadeDisentgGroup(n_group, n_block, self.angRes, channels)
self.upsample = nn.Sequential(
nn.Conv2d(channels, channels * self.factor ** 2, kernel_size=1, stride=1, padding=0),
nn.PixelShuffle(self.factor),
nn.Conv2d(channels, 1, kernel_size=1, stride=1, padding=0, bias=False))
def forward(self, x, info=None):
x_upscale = F.interpolate(x, scale_factor=self.factor, mode='bilinear', align_corners=False)
x = SAI2MacPI(x, self.angRes)
buffer = self.init_conv(x)
buffer = self.disentg(buffer)
buffer_SAI = MacPI2SAI(buffer, self.angRes)
out = self.upsample(buffer_SAI) + x_upscale
return out
class CascadeDisentgGroup(nn.Module):
def __init__(self, n_group, n_block, angRes, channels):
super(CascadeDisentgGroup, self).__init__()
self.n_group = n_group
Groups = []
for i in range(n_group):
Groups.append(DisentgGroup(n_block, angRes, channels))
self.Group = nn.Sequential(*Groups)
self.conv = nn.Conv2d(channels, channels, kernel_size=3, stride=1, dilation=int(angRes), padding=int(angRes), bias=False)
def forward(self, x):
buffer = x
for i in range(self.n_group):
buffer = self.Group[i](buffer)
return self.conv(buffer) + x
class DisentgGroup(nn.Module):
def __init__(self, n_block, angRes, channels):
super(DisentgGroup, self).__init__()
self.n_block = n_block
Blocks = []
for i in range(n_block):
Blocks.append(DisentgBlock(angRes, channels))
self.Block = nn.Sequential(*Blocks)
self.conv = nn.Conv2d(channels, channels, kernel_size=3, stride=1, dilation=int(angRes), padding=int(angRes), bias=False)
def forward(self, x):
buffer = x
for i in range(self.n_block):
buffer = self.Block[i](buffer)
return self.conv(buffer) + x
class DisentgBlock(nn.Module):
def __init__(self, angRes, channels):
super(DisentgBlock, self).__init__()
SpaChannel, AngChannel, EpiChannel = channels, channels//4, channels//2
self.SpaConv = nn.Sequential(
nn.Conv2d(channels, SpaChannel, kernel_size=3, stride=1, dilation=int(angRes), padding=int(angRes), bias=False),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(SpaChannel, SpaChannel, kernel_size=3, stride=1, dilation=int(angRes), padding=int(angRes), bias=False),
nn.LeakyReLU(0.1, inplace=True),
)
self.AngConv = nn.Sequential(
nn.Conv2d(channels, AngChannel, kernel_size=angRes, stride=angRes, padding=0, bias=False),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(AngChannel, angRes * angRes * AngChannel, kernel_size=1, stride=1, padding=0, bias=False),
nn.LeakyReLU(0.1, inplace=True),
nn.PixelShuffle(angRes),
)
self.EPIConv = nn.Sequential(
nn.Conv2d(channels, EpiChannel, kernel_size=[1, angRes * angRes], stride=[1, angRes], padding=[0, angRes * (angRes - 1)//2], bias=False),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(EpiChannel, angRes * EpiChannel, kernel_size=1, stride=1, padding=0, bias=False),
nn.LeakyReLU(0.1, inplace=True),
PixelShuffle1D(angRes),
)
self.fuse = nn.Sequential(
nn.Conv2d(SpaChannel + AngChannel + 2 * EpiChannel, channels, kernel_size=1, stride=1, padding=0, bias=False),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(channels, channels, kernel_size=3, stride=1, dilation=int(angRes), padding=int(angRes), bias=False),
)
def forward(self, x):
feaSpa = self.SpaConv(x)
feaAng = self.AngConv(x)
feaEpiH = self.EPIConv(x)
feaEpiV = self.EPIConv(x.permute(0, 1, 3, 2).contiguous()).permute(0, 1, 3, 2)
buffer = torch.cat((feaSpa, feaAng, feaEpiH, feaEpiV), dim=1)
buffer = self.fuse(buffer)
return buffer + x
class PixelShuffle1D(nn.Module):
"""
1D pixel shuffler
Upscales the last dimension (i.e., W) of a tensor by reducing its channel length
inout: x of size [b, factor*c, h, w]
output: y of size [b, c, h, w*factor]
"""
def __init__(self, factor):
super(PixelShuffle1D, self).__init__()
self.factor = factor
def forward(self, x):
b, fc, h, w = x.shape
c = fc // self.factor
x = x.contiguous().view(b, self.factor, c, h, w)
x = x.permute(0, 2, 3, 4, 1).contiguous() # b, c, h, w, factor
y = x.view(b, c, h, w * self.factor)
return y
def MacPI2SAI(x, angRes):
out = []
for i in range(angRes):
out_h = []
for j in range(angRes):
out_h.append(x[:, :, i::angRes, j::angRes])
out.append(torch.cat(out_h, 3))
out = torch.cat(out, 2)
return out
def SAI2MacPI(x, angRes):
b, c, hu, wv = x.shape
h, w = hu // angRes, wv // angRes
tempU = []
for i in range(h):
tempV = []
for j in range(w):
tempV.append(x[:, :, i::h, j::w])
tempU.append(torch.cat(tempV, dim=3))
out = torch.cat(tempU, dim=2)
return out
class get_loss(nn.Module):
def __init__(self, args):
super(get_loss, self).__init__()
self.criterion_Loss = torch.nn.L1Loss()
def forward(self, SR, HR, criterion_data=[]):
loss = self.criterion_Loss(SR, HR)
return loss
def weights_init(m):
pass
| 36.485549
| 149
| 0.608999
|
4a035f8e64d316134d6ee52be37efb667f76d551
| 6,555
|
py
|
Python
|
scripts/train_bow_cnn_tf2.py
|
sbaete/pyapetnet
|
8bd972cf17c0e96a8e2112e887f7fd57720f6f98
|
[
"MIT"
] | 9
|
2020-08-24T15:57:00.000Z
|
2021-08-31T15:47:53.000Z
|
scripts/train_bow_cnn_tf2.py
|
sbaete/pyapetnet
|
8bd972cf17c0e96a8e2112e887f7fd57720f6f98
|
[
"MIT"
] | 4
|
2021-06-14T10:48:34.000Z
|
2022-01-14T21:09:06.000Z
|
scripts/train_bow_cnn_tf2.py
|
sbaete/pyapetnet
|
8bd972cf17c0e96a8e2112e887f7fd57720f6f98
|
[
"MIT"
] | 2
|
2020-10-03T19:43:47.000Z
|
2022-01-12T21:04:46.000Z
|
import sys
if not '..' in sys.path: sys.path.append('..')
import os
#------------------------------------------------------------------------------------------------
# parse the command line
from argparse import ArgumentParser
parser = ArgumentParser(description = 'Train APETNET')
parser.add_argument('--cfg_file', default = 'train_cfg.json', help = 'training config file')
args = parser.parse_args()
#------------------------------------------------------------------------------------------------
import pathlib
import numpy as np
import h5py
import json
import shutil
import random
import warnings
import tensorflow as tf
from glob import glob
from datetime import datetime
import tensorflow
from tensorflow.python.client import device_lib
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, CSVLogger, ReduceLROnPlateau
from tensorflow.keras.utils import model_to_dot
from tensorflow.keras.utils import multi_gpu_model
from pyapetnet.generators import PatchSequence, petmr_brain_data_augmentation
from pyapetnet.models import apetnet, apetnet_vv5_onnx
from pyapetnet.losses import ssim_3d_loss, mix_ssim_3d_mae_loss
np.random.seed(42)
# check if we have an X display
has_x_disp = os.getenv('DISPLAY') is not None
#------------------------------------------------------------------------------------------------
# read and process the config file
with open(args.cfg_file) as f:
cfg = json.load(f)
# input parameters
n_epochs = cfg['n_epochs'] # number of epochs to train (around 300 is reasonable)
steps_per_epoch = cfg['steps_per_epoch'] # training steps per epoch
batch_size = cfg['batch_size'] # batch size in training
patch_size = (cfg['patch_size'],)*3 # patch size for training batches
val_patch_size = (cfg['val_patch_size'],)*3 # patch size for validation data
learning_rate = cfg['learning_rate'] # learning rate
model_kwargs = cfg['model_kwargs']
data_aug_kwargs = cfg['data_aug_kwargs']
output_suffix = cfg['output_suffix']
masterlogdir = cfg['masterlogdir']
internal_voxsize = cfg['internal_voxsize']*np.ones(3) # internal voxsize (mm)
loss = cfg['loss']
input_fnames = []
target_fnames = []
val_input_fnames = []
val_target_fnames = []
# get the training and validation names
for train_files in cfg['training_files']:
input_fnames.append(train_files[:-1])
target_fnames.append(train_files[-1])
for vf in cfg['validation_files']:
val_input_fnames.append(vf[:-1])
val_target_fnames.append(vf[-1])
#shuffle the input list
rinds = random.sample(range(len(input_fnames)),len(input_fnames))
input_fnames = [input_fnames[x] for x in rinds]
target_fnames = [target_fnames[x] for x in rinds]
rvinds = random.sample(range(len(val_input_fnames)),len(val_input_fnames))
val_input_fnames = [val_input_fnames[x] for x in rvinds]
val_target_fnames = [val_target_fnames[x] for x in rvinds]
ps = PatchSequence(input_fnames, target_fnames = target_fnames, batch_size = batch_size,
patch_size = patch_size, data_aug_func = petmr_brain_data_augmentation,
data_aug_kwargs = data_aug_kwargs, random_flip = True,
internal_voxsize = internal_voxsize, preload_data = True)
val_ps = PatchSequence(val_input_fnames, target_fnames = val_target_fnames,
batch_size = batch_size, patch_size = val_patch_size,
internal_voxsize = internal_voxsize)
# for the validation we only use the first patch
validation_data = val_ps.get_input_vols_center_crop(val_patch_size + (1,), (0,0,0,0))
#-----------------------------------------------------------------------------------------------
# set up the log dir
pathlib.Path(masterlogdir).mkdir(exist_ok = True)
time_str = str(datetime.now())[:-7].replace(' ','_').replace(':','_')
tmp_logdir = os.path.join(masterlogdir, time_str + '_' + output_suffix)
pathlib.Path(tmp_logdir).mkdir(exist_ok = True)
checkpoint_path = os.path.join(tmp_logdir, 'cnn_bow_check')
output_model_file = os.path.join(tmp_logdir, 'trained_model')
# copy the input config file to the logdir
shutil.copyfile(args.cfg_file, os.path.join(tmp_logdir,'config.json'))
#-----------------------------------------------------------------------------------------------
# set up the model to train
n_gpus = len([x for x in device_lib.list_local_devices() if x.device_type == 'GPU'])
if n_gpus >= 2:
# define not parallized model on CPU
with tf.device('/cpu:0'):
model = apetnet(**model_kwargs)
parallel_model = multi_gpu_model(model, gpus = n_gpus, cpu_merge = False)
else:
parallel_model = apetnet(**model_kwargs)
if loss == 'ssim':
loss = ssim_3d_loss
elif loss == 'mix_ssim_mae':
loss = mix_ssim_3d_mae_loss
metrics = [ssim_3d_loss, mix_ssim_3d_mae_loss, 'mse', 'mae']
parallel_model.compile(optimizer = Adam(lr = learning_rate), loss = loss, metrics = metrics)
#----------------------------------------------------------------------------------------------
# define the keras call backs
checkpoint = ModelCheckpoint(checkpoint_path,
monitor = 'val_loss',
verbose = 1,
save_best_only = True,
save_weights_only = False,
mode ='min')
csvlog = CSVLogger(os.path.join(tmp_logdir,'log.csv'))
# reduce learning rate by a factor of 2 if validation loss does not improve for 1000 epochs
lr_reduce = ReduceLROnPlateau(monitor = 'val_loss', factor = 0.5, patience = 100,
verbose = 1, min_lr = 1e-4)
#-----------------------------------------------------------------------------------------------
# train the model
history = parallel_model.fit(x = ps,
epochs = n_epochs,
steps_per_epoch = steps_per_epoch,
verbose = 1,
callbacks = [checkpoint, csvlog, lr_reduce],
validation_data = validation_data,
validation_steps = 1)
shutil.copytree(checkpoint_path, output_model_file)
parallel_model.save(output_model_file + '_last')
parallel_model.save(output_model_file + '_last.h5')
| 39.727273
| 98
| 0.606102
|
4a036176de22a478523c5910fc882d60f0ecd861
| 498
|
py
|
Python
|
blender/arm/logicnode/array_get.py
|
ValtoGameEngines/Armory
|
ad3d3c63e64e9225e62b414b7ec4dd9fb93fab32
|
[
"Zlib"
] | 1
|
2021-03-17T05:51:45.000Z
|
2021-03-17T05:51:45.000Z
|
blender/arm/logicnode/array_get.py
|
ValtoGameEngines/Armory
|
ad3d3c63e64e9225e62b414b7ec4dd9fb93fab32
|
[
"Zlib"
] | 1
|
2019-12-13T08:16:20.000Z
|
2019-12-13T08:16:20.000Z
|
blender/arm/logicnode/array_get.py
|
ValtoGameEngines/Armory
|
ad3d3c63e64e9225e62b414b7ec4dd9fb93fab32
|
[
"Zlib"
] | 1
|
2020-06-29T07:54:21.000Z
|
2020-06-29T07:54:21.000Z
|
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class ArrayGetNode(Node, ArmLogicTreeNode):
'''Array get node'''
bl_idname = 'LNArrayGetNode'
bl_label = 'Array Get'
bl_icon = 'QUESTION'
def init(self, context):
self.inputs.new('ArmNodeSocketArray', 'Array')
self.inputs.new('NodeSocketInt', 'Index')
self.outputs.new('NodeSocketShader', 'Value')
add_node(ArrayGetNode, category='Array')
| 27.666667
| 54
| 0.694779
|
4a03622b2261d9d680506e119ed06da7b9dce1f6
| 34,855
|
py
|
Python
|
stackgan/utils/datasets.py
|
gchochla/stackgan-pp
|
41ea73dd6ddf1aa85cb5901cbace17222f62719f
|
[
"MIT"
] | 3
|
2020-12-04T19:01:02.000Z
|
2021-11-11T02:28:15.000Z
|
stackgan/utils/datasets.py
|
gchochla/stackgan-pp
|
41ea73dd6ddf1aa85cb5901cbace17222f62719f
|
[
"MIT"
] | null | null | null |
stackgan/utils/datasets.py
|
gchochla/stackgan-pp
|
41ea73dd6ddf1aa85cb5901cbace17222f62719f
|
[
"MIT"
] | null | null | null |
"""Dataset."""
# pylint: disable=no-member
import pickle
import os
import glob
from PIL import Image
import pandas as pd
import torch
from torchvision.transforms import transforms
from stackgan.utils.conf import CACHE_DIR
class CUBDatasetEager(torch.utils.data.Dataset):
"""CUB dataset.
Class for CUB Dataset with precomputed embeddings. Loads
images at once during initialization. If your RAM cannot
sustain such a load, use CUBDatasetLazy instead. If training
dataset, mismatching image is also returned.
Attributes:
embeddings(torch.Tensor): embeddings of captions.
images(list of PIL Images): images corresponding to each
embedding (at the same index).
class_ids(list): class of image and embeddings (at the
same index).
train(bool): whether this is the training dataset.
synthetic_ids(dict): correspondence between
real and synthetic IDs. Meant for use
during testing.
transform(torchvision Transform): transform applied to
every PIL image (after boundind box).
"""
def __init__(self, dataset_dir, image_dir, embedding_dir,
available_classes=None, train=None, use_bbox=True):
"""Init.
Args:
dataset_dir(str): root directory of dataset.
image_dir(str): image directory w.r.t. dataset_dir.
embedding_dir(str): embedding directory w.r.t
dataset_dir.
available_classes(str, optional): txt file to define
restrict the classes used from the predefined
train/test split, default=`None`.
train(bool, optional): indicating whether training
on this dataset. If not provided, it is determined
by the embedding_dir name.
use_bbox(bool): whether to crop using bboxes,
default=`True`.
"""
#########################################
########## parse pickle files ###########
#########################################
embeddings_fn = os.path.join(dataset_dir, embedding_dir,
'char-CNN-RNN-embeddings.pickle')
with open(embeddings_fn, 'rb') as emb_fp:
# latin1 enc hack bcs pickle compatibility issue between python2 and 3
embeddings = torch.tensor(pickle.load(emb_fp, encoding='latin1')) # pylint: disable=not-callable
class_ids_fn = os.path.join(dataset_dir, embedding_dir,
'class_info.pickle')
with open(class_ids_fn, 'rb') as cls_fp:
# latin1 enc hack bcs pickle compatibility issue between python2 and 3
class_ids = pickle.load(cls_fp, encoding='latin1')
img_fns_fn = os.path.join(dataset_dir, embedding_dir,
'filenames.pickle')
with open(img_fns_fn, 'rb') as fns_fp:
# latin1 enc hack bcs pickle compatibility issue between python2 and 3
img_fns = pickle.load(fns_fp, encoding='latin1')
####################################################
####################################################
if available_classes: # if available_classes is set
# keep only them
# get class ids used in dataset
with open(os.path.join(dataset_dir, available_classes), 'r') as avcls:
available_class_ids = [int(line.strip().split('.')[0])
for line in avcls.readlines()]
idcs = [i for i, cls_id in enumerate(class_ids) if cls_id in available_class_ids]
self.embeddings = embeddings[idcs]
image_filenames = [img_fns[i] for i in idcs]
self.class_ids = [cls_id for cls_id in class_ids if cls_id in available_class_ids]
else: # if available_classes is not set, keep them all
self.embeddings = embeddings
image_filenames = img_fns
self.class_ids = class_ids
unique_ids = set(self.class_ids)
self.synthetic_ids = dict(zip(unique_ids, range(len(unique_ids))))
bboxes = _load_bboxes(dataset_dir)
# crop to bbox, make 3 channels if grayscale
if use_bbox:
load_transform = transforms.Compose([
transforms.Lambda(lambda x: _bbox_crop(*x)),
transforms.Lambda(lambda x: transforms.Grayscale(3)(x) if _is_grayscale(x) else x),
transforms.Resize(304)
])
else:
load_transform = transforms.Compose([
# x[0] to get rid of bbox
transforms.Lambda(lambda x: transforms.Grayscale(3)(x[0])
if _is_grayscale(x[0]) else x[0]),
transforms.Resize(304)
])
self.transform = transforms.Compose([
transforms.RandomRotation(2),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(256),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
])
self.images = []
for img_fn in image_filenames:
self.images.append(
load_transform(
(Image.open(os.path.join(dataset_dir, image_dir, img_fn + '.jpg')),
bboxes[img_fn])
)
)
if train is not None:
self.train = train
else:
# if train is not set, `embedding_dir` should be embeddings_{train, test}
self.train = (embedding_dir.split('_')[1] == 'train')
def __len__(self):
"""Return len of dataset.
Returns:
Number of images in the dataset.
"""
return len(self.images)
def __getitem__(self, idx):
"""Returns an image, its embedding, and maybe
a mismatching image.
Retrieve an image, one of its embeddings and,
if this is a training dataset, a mismatching image.
Class ID is last returned value.
Arguments:
idx(int): index.
Returns:
An image as a torch.Tensor of size (3,256,256),
if training a mismatching image and one of its
embeddings, and its class id (synthetic if testing).
"""
image = self.transform(self.images[idx])
if not self.train:
return image, self.synthetic_ids[self.class_ids[idx]]
rand_caption = torch.randint(10, (1,)).item()
embedding = self.embeddings[idx, rand_caption]
while True:
# get an image from a different class (match-aware discr)
mis_idx = torch.randint(len(self), (1,)).item()
if self.class_ids[idx] != self.class_ids[mis_idx]:
break
mis_image = self.transform(self.images[mis_idx])
return image, mis_image, embedding, self.synthetic_ids[self.class_ids[idx]]
def embeddings_by_class(self):
"""Fetches the embeddings per class.
Yields:
torch.Tensor with embeddings of size
(#, 10, 1024) and the corresponding
int synthetic ID.
"""
prev = 0
while True:
curr_id = self.class_ids[prev]
for curr in range(prev + 1, len(self)):
if self.class_ids[curr] != curr_id:
break # break at first point where id changes
if curr == prev: # handle case with one instance in class
yield self.embeddings[prev][None, ...], self.synthetic_ids[curr_id]
else:
yield self.embeddings[prev:curr], self.synthetic_ids[curr_id]
prev = curr
if curr == len(self) - 1:
break
class CUBDatasetLazy(torch.utils.data.Dataset):
"""CUB dataset.
Class for CUB Dataset with precomputed embeddings. Reads
images constantly with PIL and doesn't load them at once.
To load and keep them as an attribute, use CUBDatasetEager.
If training dataset, mismatching image is also returned.
Attributes:
embeddings(torch.Tensor): embeddings of captions.
image_filenames(list): filename of image corresponding
to each embedding (at the same index).
class_ids(list): class of image and embeddings (at the
same index).
dataset_dir(str): directory of data.
image_dir(str): directory of actual images relative to
dataset_dir.
train(bool): whether this is the training dataset.
synthetic_ids(dict): correspondence between
real and synthetic IDs. Meant for use
during testing.
bboxes(dict): keys are the filenames
of images and values the bounding box to
retain a proper image to body ratio.
transform(torchvision Transform): transform applied to every PIL image
(as is read from image_dir).
"""
def __init__(self, dataset_dir, image_dir, embedding_dir,
available_classes=None, train=None, use_bbox=True):
"""Init.
Args:
dataset_dir(str): root directory of dataset.
image_dir(str): image directory w.r.t. dataset_dir.
embedding_dir(str): embedding directory w.r.t
dataset_dir.
available_classes(str, optional): txt file to define
restrict the classes used from the predefined
train/test split, default=`None`.
train(bool, optional): indicating whether training
on this dataset. If not provided, it is determined
by the embedding_dir name.
use_bbox(bool): whether to crop using bboxes,
default=`True`.
"""
#########################################
########## parse pickle files ###########
#########################################
embeddings_fn = os.path.join(dataset_dir, embedding_dir,
'char-CNN-RNN-embeddings.pickle')
with open(embeddings_fn, 'rb') as emb_fp:
# latin1 enc hack bcs pickle compatibility issue between python2 and 3
embeddings = torch.tensor(pickle.load(emb_fp, encoding='latin1')) # pylint: disable=not-callable
class_ids_fn = os.path.join(dataset_dir, embedding_dir,
'class_info.pickle')
with open(class_ids_fn, 'rb') as cls_fp:
# latin1 enc hack bcs pickle compatibility issue between python2 and 3
class_ids = pickle.load(cls_fp, encoding='latin1')
img_fns_fn = os.path.join(dataset_dir, embedding_dir,
'filenames.pickle')
with open(img_fns_fn, 'rb') as fns_fp:
# latin1 enc hack bcs pickle compatibility issue between python2 and 3
img_fns = pickle.load(fns_fp, encoding='latin1')
####################################################
####################################################
if available_classes: # if available_classes is set
# keep only them
# get class ids used in dataset
with open(os.path.join(dataset_dir, available_classes), 'r') as avcls:
available_class_ids = [int(line.strip().split('.')[0])
for line in avcls.readlines()]
idcs = [i for i, cls_id in enumerate(class_ids) if cls_id in available_class_ids]
self.embeddings = embeddings[idcs]
self.image_filenames = [img_fns[i] for i in idcs]
self.class_ids = [cls_id for cls_id in class_ids if cls_id in available_class_ids]
else: # if available_classes is not set, keep them all
self.embeddings = embeddings
self.image_filenames = img_fns
self.class_ids = class_ids
unique_ids = set(self.class_ids)
self.synthetic_ids = dict(zip(unique_ids, range(len(unique_ids))))
self.dataset_dir = dataset_dir
self.image_dir = image_dir
if train is not None:
self.train = train
else:
# if train is not set, `embedding_dir` should be embeddings_{train, test}
self.train = (embedding_dir.split('_')[1] == 'train')
self.bboxes = _load_bboxes(dataset_dir)
common_postfix = transforms.Compose([
transforms.Resize(304),
transforms.RandomRotation(2),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
])
if use_bbox:
self.transform = transforms.Compose([
transforms.Lambda(lambda x: _bbox_crop(*x)),
transforms.Lambda(lambda x: transforms.Grayscale(3)(x) if _is_grayscale(x) else x),
common_postfix,
])
else:
self.transform = transforms.Compose([
# x[0] to get rid of bbox
transforms.Lambda(lambda x: transforms.Grayscale(3)(x[0])
if _is_grayscale(x[0]) else x[0]),
common_postfix,
])
def __len__(self):
"""Return len of dataset.
Returns:
Number of images in the dataset.
"""
return len(self.image_filenames)
def __getitem__(self, idx):
"""Returns an image, its embedding, and maybe
a mismatching image.
Retrieve an image, if this is a training dataset
one of its embeddings and a mismatching image.
Class ID is last returned value.
Arguments:
idx(int): index.
Returns:
An image as a torch.Tensor of size (3,256,256),
if training a mismatching image and one of its
embeddings, and its class id.
"""
image_fn = self.image_filenames[idx]
image = Image.open(os.path.join(self.dataset_dir, self.image_dir, image_fn + '.jpg'))
if not self.train:
return (self.transform((image, self.bboxes[image_fn])),
self.synthetic_ids[self.class_ids[idx]])
rand_caption = torch.randint(10, (1,)).item()
embedding = self.embeddings[idx, rand_caption]
while True:
# get an image from a different class (match-aware discr)
mis_idx = torch.randint(len(self), (1,)).item()
if self.class_ids[idx] != self.class_ids[mis_idx]:
break
mis_image_fn = self.image_filenames[mis_idx]
mis_image = Image.open(os.path.join(self.dataset_dir, self.image_dir,
mis_image_fn + '.jpg'))
return (self.transform((image, self.bboxes[image_fn])),
self.transform((mis_image, self.bboxes[mis_image_fn])),
embedding, self.synthetic_ids[self.class_ids[idx]])
def embeddings_by_class(self):
"""Fetches the embeddings per class.
Yields:
torch.Tensor with embeddings of size
(#, 10, 1024) and the corresponding
int synthetic ID.
"""
prev = 0
while True:
curr_id = self.class_ids[prev]
for curr in range(prev + 1, len(self)):
if self.class_ids[curr] != curr_id:
break # break at first point where id changes
if curr == prev: # handle case with one instance in class
yield self.embeddings[prev][None, ...], self.synthetic_ids[curr_id]
else:
yield self.embeddings[prev:curr], self.synthetic_ids[curr_id]
prev = curr
if curr == len(self) - 1:
break
class CUBTextDatasetLazy(torch.utils.data.Dataset):
"""CUB dataset with actual text instead of
representations.
Class for CUB Dataset with captions. Reads images constantly
with PIL and doesn't load them at once.
Attributes:
filenames(list): filename of images and texts alike.
class_ids(list): class of image and embeddings (at the
same index).
dataset_dir(str): directory of data.
image_dir(str): directory of actual images relative to
dataset_dir.
text_dir(str): directory of texts relative to
dataset_dir.
train(bool): whether this is the training dataset.
synthetic_ids(dict): correspondence between
real and synthetic IDs. Meant for use
during testing.
bboxes(dict): keys are the filenames
of images and values the bounding box to
retain a proper image to body ratio.
transform(torchvision Transform): transform applied
to every PIL image (as is read from image_dir).
"""
def __init__(self, dataset_dir, image_dir, text_dir, available_classes, use_bbox=True):
"""Init.
Args:
dataset_dir(str): root directory of dataset.
image_dir(str): image directory w.r.t. dataset_dir.
text_dir(str): embedding directory w.r.t
dataset_dir.
available_classes(str): txt file to define
restrict the classes used from the predefined
train/test split.
use_bbox(bool): whether to crop using bboxes,
default=`True`.
"""
self.dataset_dir = dataset_dir
self.image_dir = image_dir
self.text_dir = text_dir
# get class ids used in dataset
with open(os.path.join(dataset_dir, available_classes), 'r') as avcls:
available_class_ids = [int(line.strip().split('.')[0])
for line in avcls.readlines()]
class_ids = []
filenames = []
for cls_dir in os.listdir(os.path.join(dataset_dir, image_dir)):
if int(cls_dir.split('.')[0]) in available_class_ids:
cls_imgs = os.listdir(os.path.join(dataset_dir, image_dir, cls_dir))
filenames.extend(
[os.path.join(os.path.join(cls_dir, os.path.splitext(cls_img)[0]))
for cls_img in cls_imgs]
)
class_ids.extend([int(cls_dir.split('.')[0])] * len(cls_imgs))
self.class_ids = class_ids
self.filenames = filenames
unique_ids = set(self.class_ids)
self.synthetic_ids = dict(zip(unique_ids, range(len(unique_ids))))
self.train = available_classes.startswith('train')
self.bboxes = _load_bboxes(dataset_dir)
common_postfix = transforms.Compose([
transforms.Resize(304),
transforms.RandomRotation(2),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
])
if use_bbox:
self.transform = transforms.Compose([
transforms.Lambda(lambda x: _bbox_crop(*x)),
transforms.Lambda(lambda x: transforms.Grayscale(3)(x) if _is_grayscale(x) else x),
common_postfix,
])
else:
self.transform = transforms.Compose([
# x[0] to get rid of bbox
transforms.Lambda(lambda x: transforms.Grayscale(3)(x[0])
if _is_grayscale(x[0]) else x[0]),
common_postfix,
])
def __len__(self):
"""Returns len of dataset.
Returns:
Number of images in the dataset.
"""
return len(self.filenames)
def __getitem__(self, idx):
"""Returns an image, its caption, and a mismatching image.
Retrieves an image, one of its captions and,
if this is a training dataset, a mismatching image.
Class ID is last returned value.
Arguments:
idx(int): index.
Returns:
An image as a torch.Tensor of size (3,256,256),
if training a mismatching image and one of its
captions of size (70, 201), and its class id.
"""
image_fn = self.filenames[idx]
image = Image.open(os.path.join(self.dataset_dir, self.image_dir,
image_fn + '.jpg'))
if not self.train:
return (self.transform((image, self.bboxes[image_fn])),
self.synthetic_ids[self.class_ids[idx]])
rand_caption = torch.randint(10, (1,)).item()
with open(os.path.join(self.dataset_dir, self.text_dir, image_fn + '.txt'), 'r') as txt_fp:
text = txt_fp.readlines()[rand_caption].strip().lower()
text = process_text(text)
while True:
# get an image from a different class (match-aware discr)
mis_idx = torch.randint(len(self), (1,)).item()
if self.class_ids[idx] != self.class_ids[mis_idx]:
break
mis_image_fn = self.filenames[mis_idx]
mis_image = Image.open(os.path.join(self.dataset_dir, self.image_dir,
mis_image_fn + '.jpg'))
return (self.transform((image, self.bboxes[image_fn])),
self.transform((mis_image, self.bboxes[mis_image_fn])),
text, self.synthetic_ids[self.class_ids[idx]])
def captions_by_class(self):
"""Fetches the captions per class.
Yields:
torch.Tensor with captions of size
(#, 10, 70, 201) and the corresponding
int synthetic ID.
"""
cls_dirs = {os.path.split(filename)[0] for filename in self.filenames}
for cls_dir in cls_dirs:
texts = []
for filename in os.listdir(os.path.join(self.dataset_dir, self.text_dir, cls_dir)):
if not filename.endswith('.txt'):
continue
with open(os.path.join(self.dataset_dir, self.text_dir, cls_dir, filename), 'r') \
as txt_fp:
texts.append(torch.stack([process_text(text.strip().lower())
for text in txt_fp.readlines()]))
yield torch.stack(texts), self.synthetic_ids[int(cls_dir.split('.')[0])]
def process_text(text):
"""Transform np array of ascii codes to one-hot sequence."""
alphabet = 'abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:\'"/\\|_@#$%^&*~`+-=<>()[]{} '
ohvec = torch.zeros(len(alphabet), 201)
ohvec[[alphabet.index(tok) for tok in text][:201], range(min(len(text), 201))] = 1
return ohvec
def _load_bboxes(dataset_dir):
"""Retrieve bounding boxes.
Builds a dictionary of {filename: bounding_box} pairs
to crop images to 75% body to image ratio.
Args:
dataset_dir: Dataset directory.
Returns:
A dictionary of
class_dir/image_filename (without extension): list of bbox coordinates
key-value pairs.
"""
# id 4xcoords
df_bboxes = pd.read_csv(os.path.join(dataset_dir, 'bounding_boxes.txt'),
delim_whitespace=True, header=None).astype(int)
# id fn
df_corr_fns = pd.read_csv(os.path.join(dataset_dir, 'images.txt'),
delim_whitespace=True, header=None)
bbox_dict = {
os.path.splitext(df_corr_fns.iloc[i][1])[0]: df_bboxes.iloc[i][1:].tolist()
for i in range(len(df_bboxes))
}
return bbox_dict
def _bbox_crop(image, bbox):
"""Crop PIL.Image according to bbox.
Args:
image(PIL.Image): image to crop
bbox(iterable): iterable with 4 elements.
Returns:
Cropped image.
"""
width, height = image.size
ratio = int(max(bbox[2], bbox[3]) * 0.75)
center_x = int((2 * bbox[0] + bbox[2]) / 2)
center_y = int((2 * bbox[1] + bbox[3]) / 2)
y_low = max(0, center_y - ratio)
y_high = min(height, center_y + ratio)
x_low = max(0, center_x - ratio)
x_high = min(width, center_x + ratio)
image = image.crop([x_low, y_low, x_high, y_high])
return image
def _is_grayscale(image):
"""Return if image is grayscale.
Assert if image only has 1 channel.
Args:
image(PIL.Image): image to check.
Returns:
bool indicating whether image is grayscale.
"""
try:
# channel==1 is 2nd channel
image.getchannel(1)
return False
except ValueError:
return True
class SyntheticDataset(torch.utils.data.Dataset):
"""Dataset for synthetic samples.
Dataset to store and retrieve synthetic samples rather than
holding them all in RAM. Only cares for the samples it
was used to store. Stores with sequential filenames akin
to indices for trivial indexing.
Attributes:
dataset_dir(str): dataset directory.
n_samples(int): number of samples.
sample_key(str): key of dictionary used to store
and retrieve samples.
label_key(str): key of dictionary used to store
and retrieve corresponding labels.
template_fn(str): formattable filename for each
different sample.
"""
def __init__(self, dataset_dir=None):
"""Init.
Args:
dataset_dir(str, optional): directory of dataset,
default=directory 'dataset' under the
invisible-to-git cache directory specified
in configuration file.
"""
if dataset_dir is None:
dataset_dir = os.path.join(CACHE_DIR, 'dataset')
if not os.path.exists(dataset_dir):
os.makedirs(dataset_dir)
self.dataset_dir = dataset_dir
self.n_samples = 0
self.sample_key = 'sample'
self.label_key = 'label'
self.fn_template = os.path.join(dataset_dir, 'sample_{}.pt')
@classmethod
def existing(cls, dataset_dir=None):
"""Init from existing directory.
Args:
dataset_dir(str, optional): directory of dataset,
default=directory 'dataset' under the
invisible-to-git cache directory specified
in configuration file.
"""
obj = cls(dataset_dir)
obj.n_samples = len(os.listdir(obj.dataset_dir))
return obj
def _getitem(self, idx):
"""__getitem__ but only for ints.
Args:
idx(int): index.
Returns:
torch.Tensors sample and label.
"""
if idx < - len(self) or idx >= len(self):
raise IndexError('Index {} out of range'.format(idx))
if idx < 0:
idx += len(self)
# saved as cpu tensors
sample_dict = torch.load(self.fn_template.format(idx))
return sample_dict[self.sample_key], sample_dict[self.label_key]
def __getitem__(self, idx):
"""Loads and returns a sample and its label.
Args:
idx(int|slice|torch.Tensor|list): index/indices
of sample(s).
Returns:
torch.Tensors sample(s) and label(s).
"""
if torch.is_tensor(idx):
if idx.ndim == 0:
idx = idx.item()
else:
idx = list(idx.numpy())
if isinstance(idx, int):
return self._getitem(idx)
if isinstance(idx, slice):
# slice (for kNN etc)
samples, labels = [], []
for i in range(*idx.indices(len(self))):
sample, label = self._getitem(i)
samples.append(sample)
labels.append(label)
if not samples:
raise IndexError('No elements corresponding to {}'.format(idx))
return torch.stack(samples), torch.stack(labels)
if isinstance(idx, list):
samples, labels = [], []
for i in idx:
sample, label = self._getitem(i)
samples.append(sample)
labels.append(label)
if not samples:
raise IndexError('No elements corresponding to {}'.format(idx))
return torch.stack(samples), torch.stack(labels)
raise IndexError('Unhandled index type')
def __len__(self):
"""Returns number of stored samples."""
return self.n_samples
def save_pairs(self, samples, label):
"""Saves sample-label pairs.
Saves pairs of samples and their corresponding label
(assumed to be the same for all samples, thus only an
integer is expected) with a filename specified by the
template and order of receival.
Args:
samples(torch.tensor): batch of samples.
label(int): their corresponding label.
"""
if not torch.is_tensor(label):
label = torch.tensor(label, dtype=torch.long) # pylint: disable=not-callable
samples = samples.cpu()
label = label.cpu()
sample_dict = {self.label_key: label}
for i in range(samples.size(0)):
sample_dict[self.sample_key] = samples[i]
torch.save(sample_dict, self.fn_template.format(self.n_samples))
self.n_samples += 1
class SyntheticImageDataset(torch.utils.data.Dataset):
"""Dataset for synthetic images.
Dataset to store and retrieve synthetic images with
JPEG compression rather than holding them all in RAM.
Only cares for the images it was used to store.
Stores with sequential filenames akin to indices for
trivial indexing.
Attributes:
dataset_dir(str): dataset directory.
n_samples(int): number of samples.
template_fn(tuple of strs): formattable filename for
each different image. Tuple because 1st dim uniquely
identifies an image, 2nd dim encodes label.
save_transform(torchvision Transform): tensor transform
to save as image.
load_transform(torchvision Transform): image transform
to load as tensor (& data augmentation).
"""
def __init__(self, dataset_dir=None):
"""Init.
Args:
dataset_dir(str, optional): directory of dataset,
default=directory 'dataset' under the
invisible-to-git cache directory specified
in configuration file.
"""
if dataset_dir is None:
dataset_dir = os.path.join(CACHE_DIR, 'dataset')
if not os.path.exists(dataset_dir):
os.makedirs(dataset_dir)
self.dataset_dir = dataset_dir
self.n_samples = 0
self.fn_template = os.path.join(dataset_dir, 'image_{}_'), '{}.jpg'
self.save_transform = transforms.Compose([
transforms.Normalize(mean=(-1, -1, -1), std=(2, 2, 2)),
transforms.ToPILImage(),
])
self.load_transform = transforms.Compose([
transforms.Resize(304),
transforms.RandomRotation(2),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
@classmethod
def existing(cls, dataset_dir=None):
"""Init from existing directory.
Args:
dataset_dir(str, optional): directory of dataset,
default=directory 'dataset' under the
invisible-to-git cache directory specified
in configuration file.
"""
obj = cls(dataset_dir)
obj.n_samples = len(os.listdir(obj.dataset_dir))
return obj
def _getitem(self, idx):
"""__getitem__ but only for ints.
Args:
idx(int): index.
Returns:
torch.Tensors image and label.
"""
if idx < - len(self) or idx >= len(self):
raise IndexError('Index {} out of range'.format(idx))
if idx < 0:
idx += len(self)
# saved as cpu tensors
image_fn = glob.glob(self.fn_template[0].format(idx) + '*')[0] # certain there is only 1
image = self.load_transform(Image.open(image_fn))
return image, int(image_fn.split('_')[-1].split('.')[0])
def __getitem__(self, idx):
"""Loads and returns a sample and its label.
Args:
idx(int|slice|torch.Tensor|list): index/indices
of sample(s).
Returns:
torch.Tensors image(s) and label(s).
"""
if torch.is_tensor(idx):
if idx.ndim == 0:
idx = idx.item()
else:
idx = list(idx.numpy())
if isinstance(idx, int):
return self._getitem(idx)
if isinstance(idx, slice):
# slice (for kNN etc)
samples, labels = [], []
for i in range(*idx.indices(len(self))):
sample, label = self._getitem(i)
samples.append(sample)
labels.append(label)
if not samples:
raise IndexError('No elements corresponding to {}'.format(idx))
return torch.stack(samples), torch.stack(labels)
if isinstance(idx, list):
samples, labels = [], []
for i in idx:
sample, label = self._getitem(i)
samples.append(sample)
labels.append(label)
if not samples:
raise IndexError('No elements corresponding to {}'.format(idx))
return torch.stack(samples), torch.stack(labels)
raise IndexError('Unhandled index type')
def __len__(self):
"""Returns number of stored images."""
return self.n_samples
def save_pairs(self, samples, label):
"""Saves sample-label pairs.
Saves pairs of images and their corresponding label
(assumed to be the same for all samples, thus only an
integer is expected) with a filename specified by the
template and order of receival.
Args:
samples(torch.tensor): batch of samples.
label(int): their corresponding label.
"""
if not torch.is_tensor(label):
label = torch.tensor(label, dtype=torch.long) # pylint: disable=not-callable
samples = samples.cpu()
label = label.cpu()
image_fn_template = self.fn_template[0] + self.fn_template[1].format(label)
for i in range(samples.size(0)):
self.save_transform(samples[i]).save(image_fn_template.format(self.n_samples))
self.n_samples += 1
| 35.530071
| 109
| 0.57306
|
4a036293f8d7690bdbf7853c34a27d555b209f8c
| 24
|
py
|
Python
|
stampman/services/sendgrid/__init__.py
|
thunderboltsid/stampman
|
a360672df9b0ccbaa9f5f3d25a61470a18fe5a7a
|
[
"MIT"
] | 1
|
2016-12-02T19:24:20.000Z
|
2016-12-02T19:24:20.000Z
|
stampman/services/sendgrid/__init__.py
|
thunderboltsid/stampman
|
a360672df9b0ccbaa9f5f3d25a61470a18fe5a7a
|
[
"MIT"
] | null | null | null |
stampman/services/sendgrid/__init__.py
|
thunderboltsid/stampman
|
a360672df9b0ccbaa9f5f3d25a61470a18fe5a7a
|
[
"MIT"
] | null | null | null |
from .sendgrid import *
| 12
| 23
| 0.75
|
4a0363051802eb0877686c2da6223dc2d759fb44
| 9,351
|
py
|
Python
|
src/models/Old/modelBase.py
|
sfu-natlang/HMM-Aligner
|
ca6bb678749b8d8af07b9e56762013aa43d2cabe
|
[
"MIT"
] | 11
|
2017-06-24T20:17:59.000Z
|
2022-01-05T14:08:09.000Z
|
src/models/Old/modelBase.py
|
sfu-natlang/HMM-Aligner
|
ca6bb678749b8d8af07b9e56762013aa43d2cabe
|
[
"MIT"
] | 2
|
2017-06-15T18:10:43.000Z
|
2017-07-05T18:44:54.000Z
|
src/models/Old/modelBase.py
|
sfu-natlang/HMM-Aligner
|
ca6bb678749b8d8af07b9e56762013aa43d2cabe
|
[
"MIT"
] | 4
|
2018-02-11T22:54:59.000Z
|
2020-05-05T13:43:56.000Z
|
# -*- coding: utf-8 -*-
#
# Base model of HMM Aligner
# Simon Fraser University
# NLP Lab
#
# This is the base model for all models. It is recommanded that one uses an
# AlignmentModelBase as the parent class of their own model. The base model
# here provides the function to export and load existing models.
#
import gzip
import os
import cPickle as pickle
from collections import defaultdict
from loggers import logging
__version__ = "0.4a"
# This is a private module for transmitting test results. Please ignore.
class DummyTask():
def __init__(self, taskName="Untitled", serial="XXXX"):
return
def progress(self, msg):
return
try:
from progress import Task
except ImportError:
Task = DummyTask
class AlignmentModelBase():
def __init__(self):
'''
self.modelComponents contains all of the names of the variables that
own wishes to save in the model. It is vital that one calles __init__
here at the end of their own __init__ as it would try to automatically
load the model specified in self._savedModelFile.
One should always include a logger in ones own model. Otherwise a blank
one will be provided here.
One should also, always come up with a unique name for their model. It
will be marked in the saved model files to prevent accidentally loading
the wrong model. It should be saved in self.modelName.
Optionally, when there is a self.supportedVersion list and self.version
str, the loader will only load the files with supported versions.
'''
if "logger" not in vars(self):
self.logger = logging.getLogger('MODEL')
if "modelComponents" not in vars(self):
self.modelComponents = []
if "_savedModelFile" not in vars(self):
self._savedModelFile = ""
return
def loadModel(self, fileName=None, force=False):
if fileName is None:
fileName = self._savedModelFile
if fileName == "":
self.logger.warning("Destination not specified, model will not" +
" be loaded")
return
self.logger.info("Loading model from " + fileName)
fileName = os.path.expanduser(fileName)
if fileName.endswith("pklz"):
pklFile = gzip.open(fileName, 'rb')
else:
pklFile = open(fileName, 'rb')
modelName = pickle.load(pklFile)
modelVersion = pickle.load(pklFile)
if not isinstance(modelName, str) or not isinstance(modelVersion, str):
raise RuntimeError("Incorrect model file format")
msg = modelName + " v" + modelVersion
self.logger.info("model identified as: " + msg)
entity = vars(self)
# check model name and version
if "modelName" in entity:
if modelName != self.modelName:
if force:
self.logger.warning("Current model requires file for " +
self.modelName + " model, while the" +
" model of the model file is " +
modelName)
self.logger.warning("Under current setting, will force" +
"load. Good luck.")
else:
raise RuntimeError("Current model requires file for " +
self.modelName + " model, while the" +
" model of the model file is " +
modelName)
if "supportedVersion" in entity:
if modelVersion not in self.supportedVersion:
if force:
self.logger.warning(
"Unsupported version of model file")
self.logger.warning(
"Current setting will force load. Good luck.")
else:
raise RuntimeError("Unsupported version of model file")
# load components
for componentName in self.modelComponents:
if componentName not in entity:
raise RuntimeError("object " + componentName +
" doesn't exist in this class")
entity[componentName] = pickle.load(pklFile)
pklFile.close()
self.logger.info("Model loaded")
return
def saveModel(self, fileName=""):
if fileName == "":
self.logger.warning("Destination not specified, model will not" +
" be saved")
return
entity = vars(self)
if fileName.endswith("pklz"):
output = gzip.open(fileName, 'wb')
elif fileName.endswith("pkl"):
output = open(fileName, 'wb')
else:
fileName = fileName + ".pkl"
output = open(fileName, 'wb')
self.logger.info("Saving model to " + fileName)
# dump model name and version
if "modelName" in vars(self):
pickle.dump(self.modelName, output)
else:
pickle.dump("Unspecified Model", output)
if "version" in vars(self):
pickle.dump(self.version, output)
else:
pickle.dump("???", output)
# dump components
for componentName in self.modelComponents:
if componentName not in entity:
raise RuntimeError("object in _savedModelFile doesn't exist")
# Remove zero valued entires from defaultdict
if isinstance(entity[componentName], defaultdict):
self.logger.info("component: " + componentName +
", size before trim: " +
str(len(entity[componentName])))
emptyKeys =\
[key for key in entity[componentName]
if entity[componentName][key] == 0]
for key in emptyKeys:
del entity[componentName][key]
self.logger.info("component: " + componentName +
", size after trim: " +
str(len(entity[componentName])))
pickle.dump(entity[componentName], output)
output.close()
self.logger.info("Model saved")
return
def initialiseBiwordCount(self, dataset, index=0):
# We don't use .clear() here for reusability of models.
# Sometimes one would need one or more of the following parts for other
# Purposes. We wouldn't want to accidentally clear them up.
self.t = defaultdict(float)
self.f_count = defaultdict(int)
self.e_count = defaultdict(int)
self.fe_count = defaultdict(int)
for item in dataset:
f, e = item[0:2]
for f_i in f:
self.f_count[f_i[index]] += 1
for e_j in e:
self.fe_count[(f_i[index], e_j[index])] += 1
for e_j in e:
self.e_count[e_j[index]] += 1
initialValue = 1.0 / len(self.f_count)
for key in self.fe_count:
self.t[key] = initialValue
return
def initialiseAlignTypeDist(self, dataset, loadTypeDist={}):
typeDist = defaultdict(float)
typeTotalCount = 0
for (f, e, alignment) in dataset:
# Initialise total_f_e_type count
for (f_i, e_i, typ) in alignment:
typeDist[typ] += 1
typeTotalCount += 1
# Calculate alignment type distribution
for typ in typeDist:
typeDist[typ] /= typeTotalCount
# Manually override alignment type distribution
for typ in loadTypeDist:
typeDist[typ] = loadTypeDist[typ]
# Create typeIndex and typeList
self.typeList = []
self.typeIndex = {}
for typ in typeDist:
self.typeList.append(typ)
self.typeIndex[typ] = len(self.typeList) - 1
self.typeDist = []
for h in range(len(self.typeList)):
self.typeDist.append(typeDist[self.typeList[h]])
return
def calculateS(self, dataset, fe_count, index=0):
total_f_e_type = defaultdict(float)
for (f, e, alignment) in dataset:
# Initialise total_f_e_type count
for (f_i, e_i, typ) in alignment:
fWord = f[f_i - 1]
eWord = e[e_i - 1]
total_f_e_type[(fWord[index],
eWord[index],
self.typeIndex[typ])] += 1
s = defaultdict(list)
for key in fe_count:
s[key] = [0.0 for h in range(len(self.typeIndex))]
for f, e, t in total_f_e_type:
s[(f, e)][t] = total_f_e_type[(f, e, t)] / fe_count[(f, e)]
return s
def decode(self, dataset):
self.logger.info("Start decoding")
self.logger.info("Testing size: " + str(len(dataset)))
result = []
for sentence in dataset:
sentenceAlignment = self.decodeSentence(sentence)
result.append(sentenceAlignment)
self.logger.info("Decoding Complete")
return result
| 37.25498
| 79
| 0.55331
|
4a0363594a7d54203631566552d38c1d1c351947
| 9,621
|
py
|
Python
|
torch_utils/ops/bias_act.py
|
holman57/Lafite
|
9e5981a666cd2dcd3ff2a7f38229d6b8678ce6bb
|
[
"MIT"
] | 45
|
2022-03-10T23:49:44.000Z
|
2022-03-31T21:47:45.000Z
|
torch_utils/ops/bias_act.py
|
holman57/Lafite
|
9e5981a666cd2dcd3ff2a7f38229d6b8678ce6bb
|
[
"MIT"
] | 7
|
2022-03-13T15:13:18.000Z
|
2022-03-31T16:57:38.000Z
|
torch_utils/ops/bias_act.py
|
holman57/Lafite
|
9e5981a666cd2dcd3ff2a7f38229d6b8678ce6bb
|
[
"MIT"
] | 8
|
2022-03-10T23:49:29.000Z
|
2022-03-31T18:20:17.000Z
|
"""Custom PyTorch ops for efficient bias and activation."""
import os
import warnings
import numpy as np
import torch
import dnnlib
import traceback
from .. import custom_ops
from .. import misc
#----------------------------------------------------------------------------
activation_funcs = {
'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
}
#----------------------------------------------------------------------------
_inited = False
_plugin = None
_null_tensor = torch.empty([0])
def _init():
global _inited, _plugin
if not _inited:
_inited = True
sources = ['bias_act.cpp', 'bias_act.cu']
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
try:
_plugin = custom_ops.get_plugin('bias_act_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])
except:
warnings.warn('Failed to build CUDA kernels for bias_act. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
return _plugin is not None
#----------------------------------------------------------------------------
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
r"""Fused bias and activation function.
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
and scales the result by `gain`. Each of the steps is optional. In most cases,
the fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports first and second order gradients,
but not third order gradients.
Args:
x: Input activation tensor. Can be of any shape.
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The shape must be known, and it must match the dimension of `x`
corresponding to `dim`.
dim: The dimension in `x` corresponding to the elements of `b`.
The value of `dim` is ignored if `b` is not specified.
act: Name of the activation function to evaluate, or `"linear"` to disable.
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
See `activation_funcs` for a full list. `None` is not allowed.
alpha: Shape parameter for the activation function, or `None` to use the default.
gain: Scaling factor for the output tensor, or `None` to use default.
See `activation_funcs` for the default scaling of each activation function.
If unsure, consider specifying 1.
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
the clamping (default).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
#----------------------------------------------------------------------------
@misc.profiled_function
def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Slow reference implementation of `bias_act()` using standard TensorFlow ops.
"""
assert isinstance(x, torch.Tensor)
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Add bias.
if b is not None:
assert isinstance(b, torch.Tensor) and b.ndim == 1
assert 0 <= dim < x.ndim
assert b.shape[0] == x.shape[dim]
x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
# Evaluate activation function.
alpha = float(alpha)
x = spec.func(x, alpha=alpha)
# Scale by gain.
gain = float(gain)
if gain != 1:
x = x * gain
# Clamp.
if clamp >= 0:
x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
return x
#----------------------------------------------------------------------------
_bias_act_cuda_cache = dict()
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Fast CUDA implementation of `bias_act()` using custom ops.
"""
# Parse arguments.
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Lookup from cache.
key = (dim, act, alpha, gain, clamp)
if key in _bias_act_cuda_cache:
return _bias_act_cuda_cache[key]
# Forward op.
class BiasActCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, b): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[1] == 1 else torch.contiguous_format
x = x.contiguous(memory_format=ctx.memory_format)
b = b.contiguous() if b is not None else _null_tensor
y = x
if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
y if 'y' in spec.ref else _null_tensor)
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
dy = dy.contiguous(memory_format=ctx.memory_format)
x, b, y = ctx.saved_tensors
dx = None
db = None
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
dx = dy
if act != 'linear' or gain != 1 or clamp >= 0:
dx = BiasActCudaGrad.apply(dy, x, b, y)
if ctx.needs_input_grad[1]:
db = dx.sum([i for i in range(dx.ndim) if i != dim])
return dx, db
# Backward op.
class BiasActCudaGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[1] == 1 else torch.contiguous_format
dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
dy if spec.has_2nd_grad else _null_tensor,
x, b, y)
return dx
@staticmethod
def backward(ctx, d_dx): # pylint: disable=arguments-differ
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
dy, x, b, y = ctx.saved_tensors
d_dy = None
d_x = None
d_b = None
d_y = None
if ctx.needs_input_grad[0]:
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
if spec.has_2nd_grad and ctx.needs_input_grad[2]:
d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
return d_dy, d_x, d_b, d_y
# Add to cache.
_bias_act_cuda_cache[key] = BiasActCuda
return BiasActCuda
#----------------------------------------------------------------------------
| 46.703883
| 185
| 0.574057
|
4a03639b5466783e066b9c8667da62c70d690ea2
| 5,172
|
py
|
Python
|
Projects/PlantMaker/archive/20100818-nopydev/main.py
|
fredmorcos/attic
|
0da3b94aa525df59ddc977c32cb71c243ffd0dbd
|
[
"Unlicense"
] | 2
|
2021-01-24T09:00:51.000Z
|
2022-01-23T20:52:17.000Z
|
Projects/PlantMaker/archive/20100818-nopydev/main.py
|
fredmorcos/attic
|
0da3b94aa525df59ddc977c32cb71c243ffd0dbd
|
[
"Unlicense"
] | 6
|
2020-02-29T01:59:03.000Z
|
2022-02-15T10:25:40.000Z
|
Projects/PlantMaker/archive/20100818-nopydev/main.py
|
fredmorcos/attic
|
0da3b94aa525df59ddc977c32cb71c243ffd0dbd
|
[
"Unlicense"
] | 1
|
2019-03-22T14:41:21.000Z
|
2019-03-22T14:41:21.000Z
|
#!/usr/bin/python
"""
This module provides the main entry to the program. Provides helper functions
to create, edit and run Plant instances, Recipe instances, OrderList instances
as well as the scheduler.
commands provides a dict of tuples that map from an str representation to either
a help str or to a function pointer, it is used to generalize resolution of both
cases when calling the program with command line arguments.
"""
import sys
from extra import *
from order import *
from plant import *
from scheduler import *
from os import path
from xml.dom import minidom
from constraint import BacktrackingSolver,\
RecursiveBacktrackingSolver, MinConflictsSolver
def plantFileExists(plantName):
"""
Constructs the Plant filename from str plantName and returns it if the file
exists, if it doesn't, an Exception is thrown.
"""
plantFilename = path.join("plants", plantName + ".xml")
if not path.exists(plantFilename):
raise Exception("Plant doesn't exist")
return plantFilename
def plantFileNoExists(plantName):
"""
Constructs the Plant filename from str plantName and returns it if the file
doesn't exist, if it does, an Exception is thrown.
"""
plantFilename = path.join("plants", plantName + ".xml")
if path.exists(plantFilename):
raise Exception("Plant already exists")
return plantFilename
def orderListExists(orderListName):
"""
Constructs the OrderList filename from str orderListName and returns it if
the file exists, if it doesn't, an Exception is thrown.
"""
orderListFilename = path.join("orders", orderListName + ".xml")
if not path.exists(orderListFilename):
raise Exception("Order list already exists")
return orderListFilename
def orderListNoExists(orderListName):
"""
Constructs the OrderList filename from str orderListName and returns it if
the file doesn't exist, if it does, an Exception is thrown.
"""
orderListFilename = path.join("orders", orderListName + ".xml")
if path.exists(orderListFilename):
raise Exception("Order list already exists")
return orderListFilename
def createPlant(args):
"""
Creates a new Plant XML file from the plantName.
"""
Plant().toXmlFile(plantFileNoExists(args[0]))
def showPlant(args):
"""
Print the XML description of a Plant from plantName.
"""
print Plant.fromXmlFile(plantFileExists(args[0])).toXml().toprettyxml()
def addMachine(args):
"""
Adds a Machine to a Plant with plantName.
"""
plantName = args[0]
machineName = args[1]
machineQuantity = args[2]
machineDelay = args[3]
machineCanUnhook = args[4]
plant = Plant.fromXmlFile(plantFileExists(plantName))
plant.addMachine(Machine(name = machineName, quantity = int(machineQuantity),
minDelay = int(machineDelay), canUnhook = strToBool(machineCanUnhook)))
plant.toXmlFile(plantFilename)
def createOrderList(args):
"""
Creates a new OrderList file with orderListName.
"""
OrderList().toXmlFile(orderListNoExists(args[0]))
def addOrder(args):
"""
Adds an Order along with its Recipe to an OrderList.
"""
orderListName = args[0]
plantName = args[1]
orderId = args[2]
orderDeadline = args[3]
plant = Plant.fromXmlFile(plantFileExists(plantName))
order = Order(id = int(orderId), deadline = int(orderDeadline))
recipe = Recipe()
for m in plant.machines:
print "Time for", m.name + ":",
time = int(input())
recipe.addRecipe(m.name, time)
order.recipe = recipe
orderListFilename = orderListExists(orderListName)
orderList = OrderList.fromXmlFile(orderListFilename)
orderList.addOrder(order)
orderList.toXmlFile(orderListFilename)
def schedule(args):
"""
Runs the Scheduler with the OrderList from orderListName on the Plant
with plantName.
"""
plantName = args[0]
orderListName = args[1]
try:
solverIndex = int(args[2])
except:
solverIndex = -1
plant = Plant.fromXmlFile(plantFileExists(plantName))
orderList = OrderList.fromXmlFile(orderListExists(orderListName))
solver = None
if solverIndex == 0:
solver = BacktrackingSolver()
else:
if solverIndex == 1:
solver = RecursiveBacktrackingSolver()
else:
solver = MinConflictsSolver()
scheduler = Scheduler(plant, orderList, solver)
scheduler.run()
commands = {
"create-plant": (createPlant, ["plant-name"], "Create a new plant"),
"show-plant": (showPlant, ["plant-name"], "Show plant information"),
"add-machine": (addMachine, ["plant-name", "machine-name", "machine-quantity",
"machine-delay", "machine-can-unhook"], "Add a machine to a plant"),
"create-order-list": (createOrderList, ["order-list-name"],
"Create a new order list"),
"add-order": (addOrder, ["order-list-name", "plant-name", "order-id", "order-deadline"],
"Add an order to an order list"),
"schedule": (schedule, ["plant-name", "order-list-name",
"solver [0 = BT | 1 = RBT | 2 = MC]"],
"Compute a schedule for an order list on a plant")
}
def showHelp():
"""
Shows help data from the commands dict.
"""
print "Plant Maker\nUsage:\n"
for c in commands:
print c,
for a in commands[c][1]:
print "<" + a + ">",
print "-- " + commands[c][2]
if __name__ == '__main__':
if len(sys.argv) < 3:
showHelp()
else:
arguments = sys.argv[2:]
commands[sys.argv[1]][0](arguments)
| 28.893855
| 89
| 0.727185
|
4a03645cd9ba5cfe4495befdbeb320814f5a54cd
| 5,053
|
py
|
Python
|
extraction_functions.py
|
youssefEdrees/omr-image-processing
|
ae44c6d9d3d9750c9f127242bc4096bf1c6a9a7d
|
[
"MIT"
] | null | null | null |
extraction_functions.py
|
youssefEdrees/omr-image-processing
|
ae44c6d9d3d9750c9f127242bc4096bf1c6a9a7d
|
[
"MIT"
] | null | null | null |
extraction_functions.py
|
youssefEdrees/omr-image-processing
|
ae44c6d9d3d9750c9f127242bc4096bf1c6a9a7d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import math
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from commonfunctions import *
def find_4_points(img):
img = np.copy(img)
img = cv.Canny(img, 50, 200, None, 3)
pts = np.transpose((np.nonzero(img)[1], np.nonzero(img)[0]))
#pts = np.non(pts)
rect = np.zeros((4, 2), dtype="float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def fix_skew_and_rotation(original_img):
img = smooth_image(original_img)
#img = make_horizontal(img)
points = find_4_points(img)
diameter1 = math.sqrt((points[0][0]-points[2][0])**2+(points[0][1]-points[2][1])**2)
diameter2 = math.sqrt((points[1][0]-points[3][0])**2+(points[1][1]-points[3][1])**2)
direction1 = points[2]-points[0]
direction2 = points[3]-points[1]
points[0] = points[0] - 0.07 * direction1
points[2] = points[2] + 0.07 * direction1
points[1] = points[1] - 0.07 * direction2
points[3] = points[3] + 0.07 * direction2
diameter1 = math.sqrt((points[0][0]-points[2][0])**2+(points[0][1]-points[2][1])**2)
diameter2 = math.sqrt((points[1][0]-points[3][0])**2+(points[1][1]-points[3][1])**2)
direction1 = points[2]-points[0]
direction2 = points[3]-points[1]
d = max(diameter1,diameter2)
unit_direction1 = direction1 / np.linalg.norm(direction1)
unit_direction2 = direction2 / np.linalg.norm(direction2)
dot_product = np.dot(unit_direction1, unit_direction2)
angle_between = np.arccos(dot_product)
angle = (math.pi-angle_between)/2
maxWidth = round( d * math.cos(angle))
maxHeight = round(d * math.sin(angle))
print(maxWidth)
print(maxHeight)
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv.getPerspectiveTransform(points, dst)
warped = cv.warpPerspective(binarize_image(original_img), M, (maxWidth, maxHeight),borderValue=(255,255,255))
# return the warped image
return make_horizontal(warped)
# In[167]:
def make_horizontal(img):
dst = cv.Canny(img, 50, 200, None, 3)
# Copy edges to the images that will display the results in BGR
cdst = cv.cvtColor(dst, cv.COLOR_GRAY2RGB)
cdstP = np.copy(cdst)
linesP = cv.HoughLinesP(dst, 4, np.pi / 180, 50, None, 50, 10)
lines = []
for i in range(6):
lines.append(linesP[i][0])
angles = []
for i in range(6):
angles.append(math.degrees(math.atan2(lines[i][1]-lines[i][3],lines[i][0]-lines[i][2])))
if angles[i] < 0 :
angles[i] = -1 * angles[i]
if (angles[i] > 90):
angles[i] = -1 * (180-angles[i])
angle = np.median(np.array(angles))
return rotate_bound(img,angle)
def rotate_bound(image, angle):
(h, w) = image.shape[:2]
(cX, cY) = (w / 2, h / 2)
M = cv.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
return cv.warpAffine(image, M, (nW, nH),borderValue=(255,255,255))
# In[7]:
def binarize_image(original_img):
img = np.copy(original_img)
#img = cv.medianBlur(img,5)
#img = cv.threshold(img,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
img = cv.adaptiveThreshold(img, 256, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY,11,5)
#structuring_element = np.ones((3,3))
img = cv.medianBlur(img,3)
#img = cv.dilate(img , structuring_element)
#img = cv.erode(img , structuring_element)
return img
def smooth_image(original_img):
img = np.copy(original_img)
img = cv.GaussianBlur(img, (5,5) , 2)
structuring_element = np.ones((7,7))
img = cv.medianBlur(img,7)
img = cv.erode(img , structuring_element)
img = cv.dilate(img , structuring_element)
#blur = cv.GaussianBlur(img,(7,7),4)
#ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
#img = cv.medianBlur(img,7)
return img
def draw_points(img):
points = find_4_points(img)
img2 = np.copy(img)
for i in range(4):
img2 = cv.circle(img,(points[i][0],points[i][1]), radius=10, color=(0,0, 0), thickness=-1)
return img2
| 24.64878
| 113
| 0.5943
|
4a03647ad0bbb62b8a7c246cc6c46c4460554298
| 433
|
py
|
Python
|
backend/validators/appointment_val.py
|
NelsonM9/senaSoft
|
d72b5ed32b86a53aac962ec440d84ecce4555780
|
[
"Apache-2.0"
] | null | null | null |
backend/validators/appointment_val.py
|
NelsonM9/senaSoft
|
d72b5ed32b86a53aac962ec440d84ecce4555780
|
[
"Apache-2.0"
] | null | null | null |
backend/validators/appointment_val.py
|
NelsonM9/senaSoft
|
d72b5ed32b86a53aac962ec440d84ecce4555780
|
[
"Apache-2.0"
] | null | null | null |
from marshmallow import validate, fields, Schema
class AppointmentVal(Schema):
id_a = fields.Str(required=True, validator=validate.Length(max=10))
id_p = fields.Str(required=True, validator=validate.Length(min=8, max=20))
id_d = fields.Str(required=True, validator=validate.Length(min=8, max=20))
date_a = fields.Date(required=True)
reason = fields.Str(required=True, validator=validate.Length(min=2, max=250))
| 48.111111
| 81
| 0.741339
|
4a036579f7e76891f11d246b8c79605819e3ec81
| 849
|
py
|
Python
|
MicBinToRegs.py
|
branisha/rac
|
d2704d3fbc6f978f1ee46266c3f83c5ad17417fb
|
[
"MIT"
] | null | null | null |
MicBinToRegs.py
|
branisha/rac
|
d2704d3fbc6f978f1ee46266c3f83c5ad17417fb
|
[
"MIT"
] | null | null | null |
MicBinToRegs.py
|
branisha/rac
|
d2704d3fbc6f978f1ee46266c3f83c5ad17417fb
|
[
"MIT"
] | null | null | null |
#!/use/bin/env python
import sys
if __name__ == "__main__":
if(len(sys.argv) < 2):
print("{} bitstring".format(sys.argv[0]))
bitstring = sys.argv[1]
if(len(bitstring) != 32):
print("Length mismatch")
sys.exit(-1)
print("AMUX:\t{}".format(bitstring[:1]))
print("COND:\t{}".format(bitstring[1:3]))
print("ALU:\t{}".format(bitstring[3:5]))
print("SH:\t{}".format(bitstring[5:7]))
print("MBR:\t{}".format(bitstring[7:8]))
print("MAR:\t{}".format(bitstring[8:9]))
print("RD:\t{}".format(bitstring[9:10]))
print("WR:\t{}".format(bitstring[10:11]))
print("ENC:\t{}".format(bitstring[11:12]))
print("C:\t{}".format(bitstring[12:16]))
print("B:\t{}".format(bitstring[16:20]))
print("A:\t{}".format(bitstring[20:24]))
print("ADDR:\t{}".format(bitstring[24:32]))
| 30.321429
| 49
| 0.573616
|
4a036659bd2441ac839da270f5ae87182d62987c
| 1,408
|
py
|
Python
|
api/methods/account.py
|
Vic121/forex-server
|
507f4e4d424093a416159430ad77005b613aff76
|
[
"MIT"
] | 3
|
2020-07-25T05:57:05.000Z
|
2020-11-14T06:23:24.000Z
|
api/methods/account.py
|
Vic121/forex-server
|
507f4e4d424093a416159430ad77005b613aff76
|
[
"MIT"
] | null | null | null |
api/methods/account.py
|
Vic121/forex-server
|
507f4e4d424093a416159430ad77005b613aff76
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
#
# Copyright (c) 2013 Victor Chinedu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# from django.conf import settings
from api.helpers import api_call
from api.models.account import Account
@api_call
def index(request, token):
return Account(request).get_list()
@api_call
def details(request, token, account_id):
return Account(request).get_account(account_id)
| 39.111111
| 79
| 0.777699
|
4a0366c0d8fb1baf7d4e2428376bd3bf7b8b5ec2
| 4,231
|
py
|
Python
|
object detection/src/lib/datasets/dataset/uadetrac1on10.py
|
hu64/SpotNet
|
247d3eabd05afa097fab105b930dcc786c0a7c46
|
[
"MIT"
] | 48
|
2020-05-14T08:56:50.000Z
|
2022-02-14T08:32:48.000Z
|
object detection/src/lib/datasets/dataset/uadetrac1on10.py
|
hu64/SpotNet
|
247d3eabd05afa097fab105b930dcc786c0a7c46
|
[
"MIT"
] | 17
|
2020-07-31T11:07:20.000Z
|
2021-12-01T02:08:06.000Z
|
object detection/src/lib/datasets/dataset/uadetrac1on10.py
|
hu64/SpotNet
|
247d3eabd05afa097fab105b930dcc786c0a7c46
|
[
"MIT"
] | 16
|
2020-06-18T02:50:02.000Z
|
2022-02-12T00:22:47.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class UADETRAC1ON10(data.Dataset):
num_classes = 4
# default_resolution = [960, 540]
default_resolution = [512, 512]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(UADETRAC1ON10, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'coco')
self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split))
if split == 'test':
self.annot_path = '/store/datasets/UA-Detrac/COCO-format/test-1-on-30.json'
elif split == 'val':
self.annot_path = '/store/datasets/UA-Detrac/COCO-format/val.json'
else:
self.annot_path = '/store/datasets/UA-Detrac/COCO-format/train-1-on-10.json'
self.max_objs = 128
self.class_name = [
'__background__', 'bus', 'car', 'others', 'van']
self._valid_ids = [
1, 2, 3, 4]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \
for v in range(1, self.num_classes + 1)]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
# self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
# self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
self.split = split
self.opt = opt
print('==> initializing UA-Detrac {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[cls_ind - 1]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| 38.816514
| 88
| 0.565115
|
4a0367513b8bfe155f1dc97c46638abb5762e5a4
| 1,772
|
py
|
Python
|
test/multi/test_radio_program_switching.py
|
uniflex/module_gnuradio
|
411135d6623dfb948f41adbd2931adf88e9ec636
|
[
"MIT"
] | null | null | null |
test/multi/test_radio_program_switching.py
|
uniflex/module_gnuradio
|
411135d6623dfb948f41adbd2931adf88e9ec636
|
[
"MIT"
] | null | null | null |
test/multi/test_radio_program_switching.py
|
uniflex/module_gnuradio
|
411135d6623dfb948f41adbd2931adf88e9ec636
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import time
import xmlrpc.client
import subprocess
'''
Run-time control of meta radio program
which allows very fast switching from
one protocol to another:
- context switching
'''
__author__ = "A. Zubow"
__copyright__ = "Copyright (c) 2016, Technische Universität Berlin"
__version__ = "0.1.0"
__email__ = "{zubow}@tkn.tu-berlin.de"
START_GNURADIO = False
if __name__ == '__main__':
if START_GNURADIO:
gr_process_io = {'stdout': open('/tmp/gnuradio.log', 'w+'), 'stderr': open('/tmp/gnuradio-err.log', 'w+')}
gr_process = subprocess.Popen(["env", "python2", '../testdata/top_block.py'],
stdout=gr_process_io['stdout'], stderr=gr_process_io['stderr'])
time.sleep(1)
# open proxy
proxy = xmlrpc.client.ServerProxy("http://localhost:8080/")
# load metadata
proto_usrp_src_dicts = eval(open('../testdata/proto_usrp_src_dicts.txt', 'r').read())
usrp_source_fields = eval(open('../testdata/usrp_source_fields.txt', 'r').read())
# main control loop
while True:
res = getattr(proxy, "get_session_var")()
print('Current proto: %s' % str(res))
last_proto = res[0]
last_proto = (last_proto + 1) % 2
# read variables of new protocol
init_session_value = []
init_session_value.append(last_proto)
for field in usrp_source_fields:
res = getattr(proxy, "get_%s" % proto_usrp_src_dicts[last_proto][field])()
init_session_value.append(float(res))
print('Switch to protocol %d with cfg %s' % (last_proto, str(init_session_value)))
getattr(proxy, "set_session_var")(init_session_value)
if START_GNURADIO:
gr_process.kill()
| 31.087719
| 114
| 0.641648
|
4a036871f13a1339b2240bdd9794adec31c75d46
| 2,518
|
py
|
Python
|
themes/prompt-toolkit/base16/base16-tomorrow-night.py
|
base16-fork/base16-fork
|
79856b7e6195dde0874a9e6d191101ac6c5c74f5
|
[
"MIT"
] | null | null | null |
themes/prompt-toolkit/base16/base16-tomorrow-night.py
|
base16-fork/base16-fork
|
79856b7e6195dde0874a9e6d191101ac6c5c74f5
|
[
"MIT"
] | null | null | null |
themes/prompt-toolkit/base16/base16-tomorrow-night.py
|
base16-fork/base16-fork
|
79856b7e6195dde0874a9e6d191101ac6c5c74f5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# base16-prompt-toolkit (https://github.com/memeplex/base16-prompt-toolkit)
# Base16 Prompt Toolkit template by Carlos Pita (carlosjosepita@gmail.com
# Tomorrow Night scheme by Chris Kempson (http://chriskempson.com)
try:
# older than v2
from prompt_toolkit.output.vt100 import _256_colors
except ModuleNotFoundError:
# version 2
from prompt_toolkit.formatted_text.ansi import _256_colors
from pygments.style import Style
from pygments.token import (Keyword, Name, Comment, String, Error, Text,
Number, Operator, Literal, Token)
# See http://chriskempson.com/projects/base16/ for a description of the role
# of the different colors in the base16 palette.
base00 = '#1d1f21'
base01 = '#282a2e'
base02 = '#373b41'
base03 = '#969896'
base04 = '#b4b7b4'
base05 = '#c5c8c6'
base06 = '#e0e0e0'
base07 = '#ffffff'
base08 = '#cc6666'
base09 = '#de935f'
base0A = '#f0c674'
base0B = '#b5bd68'
base0C = '#8abeb7'
base0D = '#81a2be'
base0E = '#b294bb'
base0F = '#a3685a'
# See https://github.com/jonathanslenders/python-prompt-toolkit/issues/355
colors = (globals()['base0' + d] for d in '08BADEC5379F1246')
for i, color in enumerate(colors):
r, g, b = int(color[1:3], 16), int(color[3:5], 16), int(color[5:], 16)
_256_colors[r, g, b] = i + 6 if i > 8 else i
# See http://pygments.org/docs/tokens/ for a description of the different
# pygments tokens.
class Base16Style(Style):
background_color = base00
highlight_color = base02
default_style = base05
styles = {
Text: base05,
Error: '%s bold' % base08,
Comment: base03,
Keyword: base0E,
Keyword.Constant: base09,
Keyword.Namespace: base0D,
Name.Builtin: base0D,
Name.Function: base0D,
Name.Class: base0D,
Name.Decorator: base0E,
Name.Exception: base08,
Number: base09,
Operator: base0E,
Literal: base0B,
String: base0B
}
# See https://github.com/jonathanslenders/python-prompt-toolkit/blob/master/prompt_toolkit/styles/defaults.py
# for a description of prompt_toolkit related pseudo-tokens.
overrides = {
Token.Prompt: base0B,
Token.PromptNum: '%s bold' % base0B,
Token.OutPrompt: base08,
Token.OutPromptNum: '%s bold' % base08,
Token.Menu.Completions.Completion: 'bg:%s %s' % (base01, base04),
Token.Menu.Completions.Completion.Current: 'bg:%s %s' % (base04, base01),
Token.MatchingBracket.Other: 'bg:%s %s' % (base03, base00)
}
| 30.337349
| 109
| 0.673153
|
4a0368b98f5ef68254ea8df520097b172b150d17
| 1,322
|
py
|
Python
|
xlsxwriter/test/worksheet/test_write_col_breaks.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/worksheet/test_write_col_breaks.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/worksheet/test_write_col_breaks.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ...worksheet import Worksheet
class TestWriteColBreaks(unittest.TestCase):
"""
Test the Worksheet _write_col_breaks() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_col_breaks_1(self):
"""Test the _write_col_breaks() method"""
self.worksheet.vbreaks = [1]
self.worksheet._write_col_breaks()
exp = """<colBreaks count="1" manualBreakCount="1"><brk id="1" max="1048575" man="1"/></colBreaks>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_col_breaks_2(self):
"""Test the _write_col_breaks() method"""
self.worksheet.vbreaks = [8, 3, 1, 0]
self.worksheet._write_col_breaks()
exp = """<colBreaks count="3" manualBreakCount="3"><brk id="1" max="1048575" man="1"/><brk id="3" max="1048575" man="1"/><brk id="8" max="1048575" man="1"/></colBreaks>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| 27.541667
| 179
| 0.594554
|
4a0369d1a2aee34815a1ba4eacdab127a250516e
| 3,405
|
py
|
Python
|
tests/tests_outlier_detection_peak.py
|
trancept/batch7_satellite_ges
|
31126b398eba4312a245b97bfae35c55fbd5be37
|
[
"Apache-2.0"
] | 14
|
2020-03-04T18:27:56.000Z
|
2021-09-13T17:49:20.000Z
|
tests/tests_outlier_detection_peak.py
|
chrischris69/batch7_satellite_ges
|
8cd1cffc9e148b2aa7e128d75d4b42d954e502c6
|
[
"MIT"
] | 3
|
2020-04-28T06:31:52.000Z
|
2021-09-28T02:56:20.000Z
|
tests/tests_outlier_detection_peak.py
|
chrischris69/batch7_satellite_ges
|
8cd1cffc9e148b2aa7e128d75d4b42d954e502c6
|
[
"MIT"
] | 12
|
2020-03-05T13:26:01.000Z
|
2020-09-23T14:35:18.000Z
|
import pipeline.outlier_detection_peak as od
import pipeline.find_peak as fp
import pandas as pd
import numpy as np
STORE = "https://storage.gra.cloud.ovh.net/v1/AUTH_2aaacef8e88a4ca897bb93b984bd04dd/oco2//datasets/oco-2/soudings/oco2_"
# Test read from OVH storage TODO replace future storage with these paths
# df = pd.read_csv(STORE + "1409.csv.xz", compression="xz")
# Detect outliers on all peaks detected one month
peaks_detected = pd.read_csv("http://courty.fr/OCO2/result_for_oco2_1808-no_delta")
df = pd.read_csv("dataset/oco2_1808.csv", sep=";") # to be replaced with path OVH
df = df.loc[(df['orbit'].isin(list(peaks_detected.orbit.unique()))), :]
df_dis = fp.compute_distance(df)
peaks_trans = od.add_features(peaks_detected, df_dis)
peaks_out = peaks_trans.copy()
peaks_out["y_class_lof"], peaks_out["outlier_score_lof"] = od.detect_outliers_lof(peaks_trans, neighbors=10,
features=["latitude", "longitude",
"slope", "intercept",
"amplitude", "sigma",
"delta", "R", "surf_pres"])
peaks_out.y_class_lof.value_counts()
# 1 2690
# -1 49
np.random.seed(18)
od.compare_peaks(df_dis, peaks_out, "y_class_lof")
peaks_out["y_class_lof_only_gaussian_param"], _ = od.detect_outliers_lof(peaks_trans, neighbors=10,
features=["slope", "intercept",
"amplitude", "sigma",
"delta", "R"])
peaks_out.y_class_lof_only_gaussian_param.value_counts()
# 1 2688
# -1 51
np.random.seed(123)
od.compare_peaks(df_dis, peaks_out, "y_class_lof_only_gaussian_param")
# Issue DBSCAN :very dependent on eps and nmin
peaks_out["y_class_dbscan"] = od.detect_outliers_dbscan(peaks_trans,
features=["latitude", "longitude", "slope", "intercept",
"amplitude", "sigma", "delta", "R", "surf_pres"])
peaks_out.y_class_dbscan.value_counts()
# Out[12]:
# 1 1964
# -1 775
np.random.seed(18)
od.compare_peaks(df_dis, peaks_out, "y_class_dbscan")
peaks_out["y_class_dbscan_only_gaussian_param"] = od.detect_outliers_dbscan(peaks_trans,
features=["slope", "intercept",
"amplitude", "sigma", "delta",
"R"])
peaks_out.y_class_dbscan_only_gaussian_param.value_counts()
# Out[12]:
# 1 2603
# -1 136
np.random.seed(18)
od.compare_peaks(df_dis, peaks_out, "y_class_dbscan_only_gaussian_param")
# 4 methods: LOF using all features, LOF using only gaussian param features, DBSCAN using all features,
# DBSCAN using only param features
peaks_out.drop(columns="Unnamed: 0").to_csv("dataset/output/" + "peaks_out_1808.csv", index=False)
| 52.384615
| 120
| 0.539501
|
4a036aa1fdd7839e6769656626ce495fb1f6cc6e
| 3,162
|
py
|
Python
|
tests/test_unique_fields_mixin.py
|
radicalbiscuit/drf-writable-nested
|
b4da841b1eb64e476a5ce0158be176364ece6577
|
[
"BSD-2-Clause"
] | 754
|
2017-05-22T21:36:40.000Z
|
2022-03-30T07:04:12.000Z
|
tests/test_unique_fields_mixin.py
|
radicalbiscuit/drf-writable-nested
|
b4da841b1eb64e476a5ce0158be176364ece6577
|
[
"BSD-2-Clause"
] | 149
|
2017-05-22T04:10:05.000Z
|
2022-03-31T19:04:41.000Z
|
tests/test_unique_fields_mixin.py
|
radicalbiscuit/drf-writable-nested
|
b4da841b1eb64e476a5ce0158be176364ece6577
|
[
"BSD-2-Clause"
] | 121
|
2017-06-23T20:40:10.000Z
|
2022-02-17T14:56:25.000Z
|
from django.test import TestCase
from rest_framework.exceptions import ValidationError, ErrorDetail
from . import (
models,
serializers,
)
class UniqueFieldsMixinTestCase(TestCase):
def test_create_update_success(self):
serializer = serializers.UFMParentSerializer(
data={'child': {'field': 'value'}})
self.assertTrue(serializer.is_valid())
parent = serializer.save()
serializer = serializers.UFMParentSerializer(
instance=parent,
data={
'pk': parent.pk,
'child': {
'pk': parent.child.pk,
'field': 'value',
}
}
)
self.assertTrue(serializer.is_valid())
serializer.save()
def test_create_update_failed(self):
# In this case everything is valid on the validation stage, because
# UniqueValidator is skipped
# But `save` should raise an exception on create/update
child = models.UFMChild.objects.create(field='value')
parent = models.UFMParent.objects.create(child=child)
default_error_detail = ErrorDetail(
string='ufm child with this field already exists.',
code='unique')
unique_message_error_detail = ErrorDetail(
string=serializers.UNIQUE_ERROR_MESSAGE,
code='unique'
)
serializer = serializers.UFMParentSerializer(
data={
'child': {
'field': child.field,
}
}
)
self.assertTrue(serializer.is_valid())
with self.assertRaises(ValidationError) as ctx:
serializer.save()
self.assertEqual(
ctx.exception.detail,
{'child': {'field': [default_error_detail]}}
)
serializer = serializers.UFMParentSerializer(
instance=parent,
data={
'pk': parent.pk,
'child': {
'field': child.field,
}
}
)
self.assertTrue(serializer.is_valid())
with self.assertRaises(ValidationError) as ctx:
serializer.save()
self.assertEqual(
ctx.exception.detail,
{'child': {'field': [default_error_detail]}}
)
serializer = serializers.UFMParentSerializerForValidatorMessage(
data={
'child': {
'field': child.field,
}
}
)
self.assertTrue(serializer.is_valid())
with self.assertRaises(ValidationError) as ctx:
serializer.save()
self.assertEqual(
ctx.exception.detail,
{'child': {'field': [unique_message_error_detail]}}
)
def test_unique_field_not_required_for_partial_updates(self):
child = models.UFMChild.objects.create(field='value')
serializer = serializers.UFMChildSerializer(
instance=child,
data={},
partial=True
)
self.assertTrue(serializer.is_valid())
serializer.save()
| 30.403846
| 75
| 0.552498
|
4a036ac131241028db99ab8ad0aeb29471f1eb74
| 1,470
|
py
|
Python
|
dpm/distributions/gumbel_mixture_model.py
|
nextBillyonair/DPM
|
840ffaafe15c208b200b74094ffa8fe493b4c975
|
[
"MIT"
] | 1
|
2021-07-20T14:02:55.000Z
|
2021-07-20T14:02:55.000Z
|
dpm/distributions/gumbel_mixture_model.py
|
nextBillyonair/DPM
|
840ffaafe15c208b200b74094ffa8fe493b4c975
|
[
"MIT"
] | null | null | null |
dpm/distributions/gumbel_mixture_model.py
|
nextBillyonair/DPM
|
840ffaafe15c208b200b74094ffa8fe493b4c975
|
[
"MIT"
] | null | null | null |
import torch
from torch.nn import ModuleList
from .distribution import Distribution
from .gumbel_softmax import GumbelSoftmax
# Differentiable, Learnable Mixture Weights
class GumbelMixtureModel(Distribution):
def __init__(self, models, probs, temperature=1.0, hard=True):
super().__init__()
self.n_dims = models[0].n_dims
self.categorical = GumbelSoftmax(probs, temperature, hard)
self.models = ModuleList(models)
def log_prob(self, value):
log_probs = torch.stack([sub_model.log_prob(value)
for sub_model in self.models], dim=-1)
cat_log_probs = self.categorical.probs.log()
return torch.logsumexp(log_probs + cat_log_probs, dim=-1)
def sample(self, batch_size):
one_hot = self.categorical.sample(batch_size)
samples = torch.stack([sub_model.sample(batch_size)
for sub_model in self.models], dim=1)
return (samples * one_hot.unsqueeze(-1)).squeeze(-1).sum(-1).view(batch_size, samples.size(-1))
def cdf(self, value):
cdfs = torch.stack([sub_model.cdf(value)
for sub_model in self.models], dim=1)
cat_cdfs = self.categorical.probs
return torch.sum(cdfs * cat_cdfs, dim=-1)
def get_parameters(self):
return {'probs': self.categorical.probs.detach().numpy(),
'models': [model.get_parameters() for model in self.models]}
| 40.833333
| 103
| 0.644898
|
4a036b43566730c5f2225a79c25c30abc3887f56
| 6,247
|
py
|
Python
|
research/tcn/estimators/mvtcn_estimator.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 3,326
|
2018-01-26T22:42:25.000Z
|
2022-02-16T13:16:39.000Z
|
research/tcn/estimators/mvtcn_estimator.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 150
|
2017-08-28T14:59:36.000Z
|
2022-03-11T23:21:35.000Z
|
research/tcn/estimators/mvtcn_estimator.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 1,474
|
2018-02-01T04:33:18.000Z
|
2022-03-08T07:02:20.000Z
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MVTCN trainer implementations with various metric learning losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import data_providers
import model as model_module
from estimators import base_estimator
import tensorflow as tf
class MVTCNEstimator(base_estimator.BaseEstimator):
"""Multi-view TCN base class."""
def __init__(self, config, logdir):
super(MVTCNEstimator, self).__init__(config, logdir)
def _pairs_provider(self, records, is_training):
config = self._config
num_views = config.data.num_views
window = config.mvtcn.window
num_parallel_calls = config.data.num_parallel_calls
sequence_prefetch_size = config.data.sequence_prefetch_size
batch_prefetch_size = config.data.batch_prefetch_size
examples_per_seq = config.data.examples_per_sequence
return functools.partial(
data_providers.multiview_pairs_provider,
file_list=records,
preprocess_fn=self.preprocess_data,
num_views=num_views,
window=window,
is_training=is_training,
examples_per_seq=examples_per_seq,
num_parallel_calls=num_parallel_calls,
sequence_prefetch_size=sequence_prefetch_size,
batch_prefetch_size=batch_prefetch_size)
def forward(self, images_concat, is_training, reuse=False):
"""See base class."""
embedder_strategy = self._config.embedder_strategy
loss_strategy = self._config.loss_strategy
l2_normalize_embedding = self._config[loss_strategy].embedding_l2
embedder = model_module.get_embedder(
embedder_strategy,
self._config,
images_concat,
is_training=is_training,
l2_normalize_embedding=l2_normalize_embedding, reuse=reuse)
embeddings_concat = embedder.construct_embedding()
variables_to_train = embedder.get_trainable_variables()
self.variables_to_train = variables_to_train
self.pretrained_init_fn = embedder.init_fn
return embeddings_concat
def _collect_image_summaries(self, anchor_images, positive_images,
images_concat):
image_summaries = self._config.logging.summary.image_summaries
if image_summaries and not self._config.use_tpu:
batch_pairs_summary = tf.concat(
[anchor_images, positive_images], axis=2)
tf.summary.image('training/mvtcn_pairs', batch_pairs_summary)
tf.summary.image('training/images_preprocessed_concat', images_concat)
class MVTCNTripletEstimator(MVTCNEstimator):
"""Multi-View TCN with semihard triplet loss."""
def __init__(self, config, logdir):
super(MVTCNTripletEstimator, self).__init__(config, logdir)
def construct_input_fn(self, records, is_training):
"""See base class."""
def input_fn(params):
"""Provides input to MVTCN models."""
if is_training and self._config.use_tpu:
batch_size = params['batch_size']
else:
batch_size = self._batch_size
(images_concat,
anchor_labels,
positive_labels,
anchor_images,
positive_images) = self._pairs_provider(
records, is_training)(batch_size=batch_size)
if is_training:
self._collect_image_summaries(anchor_images, positive_images,
images_concat)
labels = tf.concat([anchor_labels, positive_labels], axis=0)
features = {'batch_preprocessed': images_concat}
return (features, labels)
return input_fn
def define_loss(self, embeddings, labels, is_training):
"""See base class."""
margin = self._config.triplet_semihard.margin
loss = tf.contrib.losses.metric_learning.triplet_semihard_loss(
labels=labels, embeddings=embeddings, margin=margin)
self._loss = loss
if is_training and not self._config.use_tpu:
tf.summary.scalar('training/triplet_semihard', loss)
return loss
def define_eval_metric_ops(self):
"""See base class."""
return {'validation/triplet_semihard': tf.metrics.mean(self._loss)}
class MVTCNNpairsEstimator(MVTCNEstimator):
"""Multi-View TCN with npairs loss."""
def __init__(self, config, logdir):
super(MVTCNNpairsEstimator, self).__init__(config, logdir)
def construct_input_fn(self, records, is_training):
"""See base class."""
def input_fn(params):
"""Provides input to MVTCN models."""
if is_training and self._config.use_tpu:
batch_size = params['batch_size']
else:
batch_size = self._batch_size
(images_concat,
npairs_labels,
_,
anchor_images,
positive_images) = self._pairs_provider(
records, is_training)(batch_size=batch_size)
if is_training:
self._collect_image_summaries(anchor_images, positive_images,
images_concat)
features = {'batch_preprocessed': images_concat}
return (features, npairs_labels)
return input_fn
def define_loss(self, embeddings, labels, is_training):
"""See base class."""
embeddings_anchor, embeddings_positive = tf.split(embeddings, 2, axis=0)
loss = tf.contrib.losses.metric_learning.npairs_loss(
labels=labels, embeddings_anchor=embeddings_anchor,
embeddings_positive=embeddings_positive)
self._loss = loss
if is_training and not self._config.use_tpu:
tf.summary.scalar('training/npairs', loss)
return loss
def define_eval_metric_ops(self):
"""See base class."""
return {'validation/npairs': tf.metrics.mean(self._loss)}
| 37.63253
| 80
| 0.712822
|
4a036bd4968f9452bd3fa8442b7b1396ce560f7c
| 681
|
py
|
Python
|
pyUtils.py
|
tgadf/utils
|
e0176d4f8a5bed4ecc3b63cb4bf2ee1265840900
|
[
"MIT"
] | null | null | null |
pyUtils.py
|
tgadf/utils
|
e0176d4f8a5bed4ecc3b63cb4bf2ee1265840900
|
[
"MIT"
] | null | null | null |
pyUtils.py
|
tgadf/utils
|
e0176d4f8a5bed4ecc3b63cb4bf2ee1265840900
|
[
"MIT"
] | null | null | null |
# coding: utf-8
def getKeys(dct, merge=False):
"""
Get dictionary keys for Python3
Input:
> dct: A dictionary
Output:
> The first key, all other keys
"""
if not isinstance(dct, (dict,list)):
print("Argument is not a dictionary")
return None
if isinstance(dct,list):
fKey = dct[0]
lKeys = dct[1:]
else:
allKeys = list(dct.keys())
if merge is True:
return allKeys
if len(allKeys) > 1:
fKey = allKeys[0]
lKeys = allKeys[1:]
else:
fKey = allKeys[0]
lKeys = []
return fKey, lKeys
| 18.916667
| 45
| 0.48605
|
4a036d74921849a74fa40842ef8b3d63c59070ca
| 3,216
|
py
|
Python
|
spinoffs/oryx/oryx/core/kwargs_util_test.py
|
bourov/probability
|
1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2
|
[
"Apache-2.0"
] | 2
|
2020-12-17T20:43:24.000Z
|
2021-06-11T22:09:16.000Z
|
spinoffs/oryx/oryx/core/kwargs_util_test.py
|
bourov/probability
|
1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2
|
[
"Apache-2.0"
] | null | null | null |
spinoffs/oryx/oryx/core/kwargs_util_test.py
|
bourov/probability
|
1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2
|
[
"Apache-2.0"
] | 1
|
2020-10-22T21:09:22.000Z
|
2020-10-22T21:09:22.000Z
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Lint as: python3
"""Tests for tensorflow_probability.spinoffs.oryx.core.kwargs_util."""
from absl.testing import absltest
from oryx.core import kwargs_util
class KwargsUtilTest(absltest.TestCase):
def test_filter_kwargs(self):
kwargs = dict(
rng=1,
training=True
)
def foo1(x, y):
return x + y
self.assertDictEqual(
kwargs_util.filter_kwargs(foo1, kwargs),
{})
def foo2(x, y, rng=None):
del rng
return x + y
self.assertDictEqual(
kwargs_util.filter_kwargs(foo2, kwargs),
{'rng': 1})
def foo3(x, y, rng=None, training=False):
del rng, training
return x + y
self.assertDictEqual(
kwargs_util.filter_kwargs(foo3, kwargs),
{'rng': 1, 'training': True})
def test_filter_kwargs_accepts_all(self):
kwargs = dict(
rng=1,
training=True
)
def foo1(x, y, **kwargs):
del kwargs
return x + y
self.assertDictEqual(
kwargs_util.filter_kwargs(foo1, kwargs),
{'rng': 1, 'training': True})
def foo2(x, y, training=False, **kwargs):
del training, kwargs
return x + y
self.assertDictEqual(
kwargs_util.filter_kwargs(foo2, kwargs),
{'rng': 1, 'training': True})
def test_filter_incomplete_kwargs(self):
kwargs = dict(
rng=1,
)
def foo1(x, y):
return x + y
self.assertDictEqual(
kwargs_util.filter_kwargs(foo1, kwargs),
{})
def foo2(x, y, rng=None):
del rng
return x + y
self.assertDictEqual(
kwargs_util.filter_kwargs(foo2, kwargs),
{'rng': 1})
def foo3(x, y, rng=None, training=False):
del rng, training
return x + y
self.assertDictEqual(
kwargs_util.filter_kwargs(foo3, kwargs),
{'rng': 1})
def test_check_kwargs(self):
def foo1(x, y):
return x + y
self.assertFalse(
kwargs_util.check_in_kwargs(foo1, 'rng'))
self.assertFalse(
kwargs_util.check_in_kwargs(foo1, 'training'))
def foo2(x, y, rng=None):
del rng
return x + y
self.assertTrue(
kwargs_util.check_in_kwargs(foo2, 'rng'))
self.assertFalse(
kwargs_util.check_in_kwargs(foo2, 'training'))
def foo3(x, y, rng=None, training=False):
del rng, training
return x + y
self.assertTrue(
kwargs_util.check_in_kwargs(foo3, 'rng'))
self.assertTrue(
kwargs_util.check_in_kwargs(foo3, 'training'))
if __name__ == '__main__':
absltest.main()
| 29.236364
| 78
| 0.623134
|
4a036e1e289c160aa19be370aa52d9a47f04536b
| 1,222
|
py
|
Python
|
test.py
|
ankit-rajvanshi/online-banking-system
|
05fa5103512f91cf1903b577c2526ce592b489cf
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
ankit-rajvanshi/online-banking-system
|
05fa5103512f91cf1903b577c2526ce592b489cf
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
ankit-rajvanshi/online-banking-system
|
05fa5103512f91cf1903b577c2526ce592b489cf
|
[
"Apache-2.0"
] | null | null | null |
from pymongo import MongoClient
# name='a'
#client = MongoClient("mongodb://aviformat:sweswe@ds155634.mlab.com:55634/onlinebank")
#db = client.onlinebank
#now = datetime.datetime.now()
#x = ntplib.NTPClient()
#ctime(fromtimestamp(x.request('europe.pool.ntp.org').tx_time))
#now=datetime.datetime.utcfromtimestamp(x.request('europe.pool.ntp.org').tx_time)
#now.ctime()
#print now.hour
# c = ntplib.NTPClient()
# response = c.request('europe.pool.ntp.org', version=3)
# ctime(response.tx_time)
client = MongoClient("mongodb://aviformat:sweswe@ds155634.mlab.com:55634/onlinebank")
db = client.onlinebank
cursor = db.withdrawal.find({})
for document in cursor:
document['Transaction_id']
document['Account_number']
document['Description']
document['Debit']
document['Time'] #format Fri Nov 3 06:39:33 2017
document['Employee_id']
client = MongoClient("mongodb://aviformat:sweswe@ds155634.mlab.com:55634/onlinebank")
db = client.onlinebank
cursor2 = db.deposit.find({})
for document in cursor2:
document['Transaction_id']
document['Account_number']
document['Description']
document['Debit']
document['Time'] #format Fri Nov 3 06:39:33 2017
document['Employee_id']
| 28.418605
| 86
| 0.723404
|
4a036f402e51c6de97b572c976fbbdc222a79f07
| 10,344
|
py
|
Python
|
corehq/apps/couch_sql_migration/casedifftool.py
|
akyogi/commcare-hq
|
44c34634e1b54f566ca200f828ea2aa112f33aa4
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/couch_sql_migration/casedifftool.py
|
akyogi/commcare-hq
|
44c34634e1b54f566ca200f828ea2aa112f33aa4
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/couch_sql_migration/casedifftool.py
|
akyogi/commcare-hq
|
44c34634e1b54f566ca200f828ea2aa112f33aa4
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import os
import pdb
import signal
from contextlib import contextmanager, suppress
from django.db import close_old_connections
from django.db.utils import DatabaseError, InterfaceError
from dimagi.utils.chunked import chunked
from dimagi.utils.retry import retry_on
from corehq.form_processor.utils.general import set_local_domain_sql_backend_override
from corehq.util.log import with_progress_bar
from .casediff import (
diff_cases,
get_couch_cases,
global_diff_state,
make_result_saver,
should_diff,
)
from .casediffqueue import ProcessNotAllowed, get_casediff_state_path
from .couchsqlmigration import (
CouchSqlDomainMigrator,
get_main_forms_iteration_stop_date,
)
from .parallel import Pool
from .progress import MigrationStatus, get_couch_sql_migration_status
from .util import get_ids_from_string_or_file
log = logging.getLogger(__name__)
PENDING_WARNING = "Diffs pending. Run again with --cases=pending"
def get_migrator(domain, state_dir):
# Set backend for CouchSqlDomainMigrator._check_for_migration_restrictions
status = get_couch_sql_migration_status(domain)
live_migrate = status == MigrationStatus.DRY_RUN
set_local_domain_sql_backend_override(domain)
return CouchSqlDomainMigrator(
domain, state_dir, live_migrate=live_migrate, case_diff="none")
def do_case_diffs(migrator, cases, stop, batch_size):
"""Diff cases and save the results
:param migrator: CouchSqlDomainMigrator instance. See also `get_migrator`.
:param cases: string specifying a subset of cases to be diffed. All
cases will be diffed if `None`. Accepted values:
- 'pending': clear out in-process diffs
- 'with-diffs': re-diff cases that previously had diffs
- a space-delimited list of case ids
- a path to a file containing a case id on each line. The path must
begin with / or ./
:param stop: Boolean. If false perform diffs in parallel processes.
If true perform diffs in the main process and drop into a pdb
session when the first batch of cases with diffs is encountered.
"""
casediff = CaseDiffTool(migrator, stop, batch_size)
with migrator.counter, migrator.stopper:
casediff.diff_cases(cases)
if cases is None and casediff.should_diff_pending():
return PENDING_WARNING
class CaseDiffTool:
"""A multi-process case diff tool
This tool performs case diffs but does not save the results.
See also `do_case_diffs`.
"""
def __init__(self, migrator, stop=False, batch_size=100):
self.migrator = migrator
self.domain = migrator.domain
self.statedb = migrator.statedb
self.stop = stop
self.batch_size = batch_size
if not migrator.live_migrate:
cutoff_date = None
elif hasattr(migrator.stopper, "stop_date"):
cutoff_date = migrator.stopper.stop_date
else:
cutoff_date = get_main_forms_iteration_stop_date(self.statedb)
migrator.stopper.stop_date = cutoff_date
self.cutoff_date = cutoff_date
self.lock_out_casediff_process()
def lock_out_casediff_process(self):
if not self.statedb.get(ProcessNotAllowed.__name__):
state_path = get_casediff_state_path(self.statedb.db_filepath)
if os.path.exists(state_path):
self.statedb.clone_casediff_data_from(state_path)
self.statedb.set(ProcessNotAllowed.__name__, True)
def diff_cases(self, cases=None):
log.info("case diff cutoff date = %s", self.cutoff_date)
with self.migrator.counter('diff_cases', 'CommCareCase.id') as add_cases:
save_result = make_result_saver(self.statedb, add_cases)
for data in self.iter_case_diff_results(cases):
save_result(data)
def iter_case_diff_results(self, cases):
if cases is None:
return self.resumable_iter_diff_cases()
if cases == "pending":
return self.iter_diff_cases(self.get_pending_cases())
if cases == "with-diffs":
return self.iter_diff_cases_with_diffs()
case_ids = get_ids_from_string_or_file(cases)
return self.iter_diff_cases(case_ids, log_cases=True)
def resumable_iter_diff_cases(self):
def diff_batch(case_ids):
case_ids = list(case_ids)
statedb.add_cases_to_diff(case_ids) # add pending cases
return case_ids
statedb = self.statedb
case_ids = self.migrator._get_resumable_iterator(
['CommCareCase.id'],
progress_name="Diff",
offset_key='CommCareCase.id',
)
return self.iter_diff_cases(case_ids, diff_batch)
def iter_diff_cases_with_diffs(self):
count = self.statedb.count_case_ids_with_diffs()
cases = self.statedb.iter_case_ids_with_diffs()
cases = with_progress_bar(cases, count, prefix="Cases with diffs", oneline=False)
return self.iter_diff_cases(cases)
def iter_diff_cases(self, case_ids, batcher=None, log_cases=False):
def list_or_stop(items):
if self.is_stopped():
raise StopIteration
return list(items)
batches = chunked(case_ids, self.batch_size, batcher or list_or_stop)
if not self.stop:
yield from self.pool.imap_unordered(load_and_diff_cases, batches)
return
stop = [1]
with global_diff_state(*self.initargs), suppress(pdb.bdb.BdbQuit):
for batch in batches:
data = load_and_diff_cases(batch, log_cases=log_cases)
yield data
diffs = [(kind, case_id, diffs) for kind, case_id, diffs in data.diffs if diffs]
if diffs:
log.info("found cases with diffs:\n%s", format_diffs(diffs))
if stop:
pdb.set_trace()
def is_stopped(self):
return self.migrator.stopper.clean_break
def get_pending_cases(self):
count = self.statedb.count_undiffed_cases()
if not count:
return []
pending = self.statedb.iter_undiffed_case_ids()
return with_progress_bar(
pending, count, prefix="Pending case diffs", oneline=False)
should_diff_pending = get_pending_cases
@property
def pool(self):
return Pool(
processes=os.cpu_count() * 2,
initializer=init_worker,
initargs=self.initargs,
)
@property
def initargs(self):
return self.domain, self.statedb.get_no_action_case_forms(), self.cutoff_date
def load_and_diff_cases(case_ids, log_cases=False):
couch_cases = {c.case_id: c.to_json()
for c in get_couch_cases(case_ids) if should_diff(c)}
if log_cases:
skipped = [id for id in case_ids if id not in couch_cases]
if skipped:
log.info("skipping cases modified since cutoff date: %s", skipped)
return diff_cases_with_retry(couch_cases, log_cases=log_cases)
def _close_connections(err):
"""Close old connections, then return true to retry"""
log.warning("retry diff cases on %s: %s", type(err).__name__, err)
close_old_connections()
return True
@retry_on(DatabaseError, InterfaceError, should_retry=_close_connections)
def diff_cases_with_retry(*args, **kw):
return diff_cases(*args, **kw)
def iter_sql_cases_with_sorted_transactions(domain):
from corehq.form_processor.models import CommCareCaseSQL, CaseTransaction
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from .rebuildcase import SortTransactionsRebuild
sql = f"""
SELECT cx.case_id
FROM {CommCareCaseSQL._meta.db_table} cx
INNER JOIN {CaseTransaction._meta.db_table} tx ON cx.case_id = tx.case_id
WHERE cx.domain = %s AND tx.details LIKE %s
"""
reason = f'%{SortTransactionsRebuild._REASON}%'
for dbname in get_db_aliases_for_partitioned_query():
with CommCareCaseSQL.get_cursor_for_partition_db(dbname) as cursor:
cursor.execute(sql, [domain, reason])
yield from iter(set(case_id for case_id, in cursor.fetchall()))
def format_diffs(json_diffs, changes=False):
lines = []
for kind, doc_id, diffs in sorted(json_diffs, key=lambda x: x[1]):
lines.append(f"{kind} {doc_id} {diffs[0].reason if changes else ''}")
for diff in sorted(diffs, key=lambda d: (d.diff_type, d.path)):
if len(repr(diff.old_value) + repr(diff.new_value)) > 60:
lines.append(f" {diff.diff_type} {list(diff.path)}")
lines.append(f" - {diff.old_value!r}")
lines.append(f" + {diff.new_value!r}")
else:
lines.append(
f" {diff.diff_type} {list(diff.path)}"
f" {diff.old_value!r} -> {diff.new_value!r}"
)
return "\n".join(lines)
def init_worker(domain, *args):
def on_break(signum, frame):
nonlocal clean_break
if clean_break:
raise KeyboardInterrupt
print("clean break... (Ctrl+C to abort)")
clean_break = True
clean_break = False
reset_django_db_connections()
reset_couchdb_connections()
signal.signal(signal.SIGINT, on_break)
set_local_domain_sql_backend_override(domain)
return global_diff_state(domain, *args)
def reset_django_db_connections():
# cannot use db.connections.close_all() because that results in
# InterfaceError: connection already closed
# see also https://github.com/psycopg/psycopg2/blob/master/psycopg/connection_type.c
# /* close the connection only if this is the same process it was created
# * into, otherwise using multiprocessing we may close the connection
# * belonging to another process. */
from django import db
for alias in db.connections:
try:
del db.connections[alias]
except AttributeError:
pass
def reset_couchdb_connections():
from couchdbkit.ext.django.loading import CouchdbkitHandler
dbs = CouchdbkitHandler.__shared_state__["_databases"]
for db in dbs.values():
server = db[0] if isinstance(db, tuple) else db.server
server.cloudant_client.r_session.close()
| 37.208633
| 96
| 0.678751
|
4a036f65fea9ddc4ddfa31904163cfb0504ead29
| 2,376
|
py
|
Python
|
wrappers/python/examples/opencv_viewer_example.py
|
NobuoTsukamoto/librealsense
|
bc0910f8ba3c33307ff247a29dd2b9e9ef1b269d
|
[
"Apache-2.0"
] | 5
|
2021-09-23T13:31:32.000Z
|
2021-09-28T05:41:09.000Z
|
wrappers/python/examples/opencv_viewer_example.py
|
NobuoTsukamoto/librealsense
|
bc0910f8ba3c33307ff247a29dd2b9e9ef1b269d
|
[
"Apache-2.0"
] | null | null | null |
wrappers/python/examples/opencv_viewer_example.py
|
NobuoTsukamoto/librealsense
|
bc0910f8ba3c33307ff247a29dd2b9e9ef1b269d
|
[
"Apache-2.0"
] | 2
|
2021-01-25T01:18:47.000Z
|
2021-03-18T06:44:07.000Z
|
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.
###############################################
## Open CV and Numpy integration ##
###############################################
import pyrealsense2 as rs
import numpy as np
import cv2
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
if device_product_line == 'L500':
config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
else:
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
try:
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
depth_colormap_dim = depth_colormap.shape
color_colormap_dim = color_image.shape
# If depth and color resolutions are different, resize color image to match depth image for display
if depth_colormap_dim != color_colormap_dim:
resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
images = np.hstack((resized_color_image, depth_colormap))
else:
images = np.hstack((color_image, depth_colormap))
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)
cv2.waitKey(1)
finally:
# Stop streaming
pipeline.stop()
| 35.462687
| 141
| 0.683081
|
4a036f7daf8fa04e85b07b93c323326f13735458
| 1,401
|
py
|
Python
|
servers/python/tournament_of_lulz/modules/tournament/tests/test_model_tournament.py
|
xaroth8088/tournament-of-lulz
|
241bc15c0697f0e3ffabbd6ff48203915f79fddb
|
[
"MIT"
] | null | null | null |
servers/python/tournament_of_lulz/modules/tournament/tests/test_model_tournament.py
|
xaroth8088/tournament-of-lulz
|
241bc15c0697f0e3ffabbd6ff48203915f79fddb
|
[
"MIT"
] | null | null | null |
servers/python/tournament_of_lulz/modules/tournament/tests/test_model_tournament.py
|
xaroth8088/tournament-of-lulz
|
241bc15c0697f0e3ffabbd6ff48203915f79fddb
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import patch
from tournament_of_lulz.modules.tournament.model_tournament import ModelTournament
class ModelTournamentTest(unittest.TestCase):
def setUp(self):
self.model = ModelTournament(None)
@patch('tournament_of_lulz.modules.tournament.model_tournament.insert')
@patch('tournament_of_lulz.modules.tournament.model_tournament.fetchall')
def test_load_normal(self, mock_fetchall, mock_insert):
# Setup
data = [
(
'1', 'f4176acb7d90e0bfef404857c2a5d2a9', 'http://i.imgur.com/IHSU4Bh.png',
'http://imgur.com/gallery/IHSU4Bh', 'http://i.imgur.com/IHSU4Bhm.png',
'Communication is key to any successful relationship.', 1500, 350, 0.06
),
(
'2', '499a2c346bc66480982b464d3433a983', 'http://i.imgur.com/0DXJ8BS.png',
'http://imgur.com/gallery/0DXJ8BS', 'http://i.imgur.com/0DXJ8BSm.png', 'An actual unpopular opinion.',
1500, 350, 0.06
)
]
mock_fetchall.return_value = data
# Preconditions
self.assertIsNone(self.model.images)
# Run the test
self.model.create_new_tournament(2, None)
# Postconditions
self.assertEqual(mock_insert.call_count, 1)
self.assertEqual(len(self.model.images), 2)
# Cleanup
| 35.923077
| 118
| 0.635261
|
4a036f853e6915b86e1fba923c38e1b1592a7dc0
| 9,882
|
py
|
Python
|
mbf_read/mbf_read.py
|
DLS-Controls-Private-org/pyMBF_read
|
04ed2630b2e7edb2c42c084a7a904b222c2b2176
|
[
"MIT"
] | null | null | null |
mbf_read/mbf_read.py
|
DLS-Controls-Private-org/pyMBF_read
|
04ed2630b2e7edb2c42c084a7a904b222c2b2176
|
[
"MIT"
] | null | null | null |
mbf_read/mbf_read.py
|
DLS-Controls-Private-org/pyMBF_read
|
04ed2630b2e7edb2c42c084a7a904b222c2b2176
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import argparse
import socket
import struct
class MBF_mem():
# Array for decoding memory readback sample types
sample_types = {0: np.int16, 1: np.float32, 2: np.complex64}
def __init__(self, device_name, layer='epics'):
"""
Connect to MBF system.
Parameters :
- device: The Tango or EPICS name of the MBF system.
- layer : Which layer will be use to get information on the system
(hostname, port, bunches). Can be either 'epics' or 'tango'.
Return :
Throws :
Example :
"""
layer = layer.lower()
self.layer = layer
if layer == 'tango':
import PyTango
elif layer == 'epics':
from cothread import catools
if layer == 'tango':
dev_tango = PyTango.DeviceProxy(device_name)
self.dev_tango = dev_tango
self.bunch_nb = dev_tango.BUNCHES
hostname_l = dev_tango.HOSTNAME
hostname_tmp = "".join(map(chr, hostname_l))
hostname = hostname_tmp.rstrip("\0")
port = dev_tango.SOCKET
elif layer == 'epics':
self.bunch_nb = catools.caget(device_name + ":INFO:BUNCHES")
hostname = catools.caget(device_name + ":INFO:HOSTNAME",
datatype = catools.DBR_CHAR_STR)
port = catools.caget(device_name + ":INFO:SOCKET")
self.device_name = device_name
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((hostname, port))
self.s = s.makefile('rwb')
def get_turn_min_max(self):
if self.layer == 'tango':
runout_ = self.dev_tango.MEM_RUNOUT_S
elif self.layer == 'epics':
from cothread import catools
runout_ = catools.caget(self.device_name + ":MEM:RUNOUT_S")
runout = [0.125, 0.25, 0.5, 0.75, 255./256][runout_]
min_turn = np.ceil(((runout-1)*2.**29)/self.bunch_nb)
max_turn = np.floor((runout*2.**29)/self.bunch_nb)
return min_turn, max_turn
def get_turns_max(self, decimate):
return ((2**29)//self.bunch_nb)//decimate
def get_max_decimate(self):
READ_BUFFER_BYTES = 2**20-64
sizeof_uint32 = 4
buffer_size = READ_BUFFER_BYTES / sizeof_uint32 - self.bunch_nb
return int(np.ceil((1.*buffer_size) / self.bunch_nb) - 1)
def read_mem_avg(self, turns, offset=0, channel=None, decimate=None,
tune=None, lock=None, verbose=False):
d = self.read_mem(turns, offset=offset, channel=channel,
decimate=decimate, tune=tune, lock=lock, verbose=verbose)
n = np.size(d)
out_buffer_size = self.bunch_nb
if channel is None:
out_buffer_size *= 2
N = n//out_buffer_size
d.shape = (N, out_buffer_size)
return d.mean(0)
# Sends the given command and checks the response for success. If an error
# code is returned an exception is raised.
def __send_command(self, command, verbose):
if verbose:
print("cmd_str:", command)
out_bytes = (command + '\n').encode()
self.s.write(out_bytes)
self.s.flush()
status = bytearray(self.s.read(1))
if status[0] != 0:
error = self.s.readline()
raise NameError((status + error[:-1]).decode())
def read_mem(self, turns, offset=0, channel=None, bunch=None,
decimate=None, tune=None, lock=None, verbose=False):
"""\
Reads out the currently captured detectors for the given axis. If no axis is
specified, the default is 0.
Parameters
----------
turns : int
Number of samples to read (not the number of turns if decimate is used).
offset : int
Offset in turns from the trigger point from which to start returning data.
This can be positive or negative.
channel : int or None
Channel number (0 or 1).
If None, the returned array has two columns (first dimension is 2),
one for each captured channel.
bunch : int or None
Readout of a specific bunch instead of a complete turns.
decimate : int or None
Bunch-by-bunch binned averaging of data. Cannot be combined with 'bunch'.
tune : float or None
Data will be frequency shifted by the given tune (in units of rotations
per machine revolution). Cannot be combined with 'bunch'.
lock : float or None
Lock the readout channel to ensure that memory capture is not armed and is
not retriggered during readout.
The value is a timeout in seconds, if the memory cannot be locked within
this time the readout will fail.
If None, doesn't try to lock the channel.
verbose : bool
Activates verbose mode
Returns
-------
d : 2d array of int16, float32 or complex64 with shape (channel_nb, samples)
Type and shape depends on input arguments.
Raises
------
NameError
if MBF returns an error.
"""
cmd_str = "M{}FO{}".format(int(turns), int(offset))
if channel is not None:
cmd_str += "C{}".format(channel)
if bunch is not None:
cmd_str += "B{}".format(int(bunch))
if decimate is not None:
cmd_str += "D{}".format(int(decimate))
if tune is not None:
cmd_str += "T{}".format(float(tune))
if lock is not None:
cmd_str += "L"
if lock > 0:
cmd_str += "W{:.0f}".format(lock*1000)
self.__send_command(cmd_str, verbose)
# First read and decode the header
header = struct.unpack('<IHH', self.s.read(8))
samples = header[0]
channels = header[1]
sample_format = header[2]
data_type = self.sample_types[sample_format]
length = samples * channels * data_type().itemsize
if verbose:
print("samples:", samples)
print("ch_per_sample", channels)
print("format", sample_format)
print("expected_msg_len", length)
data = self.s.read(length)
return np.frombuffer(data, dtype = data_type).reshape(-1, channels).T
def read_det(self, channel=0, lock=None, verbose=False):
"""\
Reads out the currently captured detectors for the given axis. If no axis is
specified, the default is 0.
Parameters
----------
channel : int
Channel number (0 or 1).
lock : float or None
Locks the detector readout channel and throws an error after
lock seconds if the channel cannot be locked.
If None, doesn't try to lock the channel.
verbose : bool
Activates verbose mode
Returns
-------
d : ndarray of complex128 with shape (nb_detec, N_samples)
detector(s) data
s : array
Frequency scale in units of cycles per turn.
t : array
Timebase scale in units of turns.
Raises
------
NameError
if MBF returns an error.
"""
cmd_str = "D{}FSLT".format(int(channel))
if lock is not None:
cmd_str += "L"
if lock > 0:
cmd_str += "W%d" % (lock * 1000)
self.__send_command(cmd_str, verbose)
# First read the header
header = struct.unpack('<BBHII', self.s.read(12))
# Get header data
det_count = header[0]
det_mask = header[1]
compensation_delay = header[2]
sample_count = header[3]
bunch_count = header[4]
if verbose:
print("N: ", sample_count)
print("Nb of detectors: ", det_count)
print("bunches:", bunch_count)
print("Compensation delay:", compensation_delay)
# First read the detector data
data = self.s.read(sample_count * det_count * 8)
d = np.frombuffer(data, dtype=np.int32)
d.shape = (sample_count, det_count, 2)
# Compute conjugate of detector data to compensate for detector
d_cmpl = d[:, :, 0] - 1j*d[:, :, 1]
d_cmpl *= 2**-31
# Next the frequency scale
data = self.s.read(sample_count * 8)
s = np.frombuffer(data, dtype=np.uint64)
s = bunch_count * s.astype(np.float64) * 2**-48
# Finally the timebase
data = self.s.read(sample_count * 4)
t = np.frombuffer(data, dtype=np.uint32)
# Compute corrected data
group_delay = 2.0 * np.pi * compensation_delay / bunch_count
correction = np.exp(1j * group_delay * s)
d_cmpl *= correction[:, np.newaxis]
return (d_cmpl.T, s, t)
def read_mem(device_name, turns, offset=0, channel=None, layer='epics',
**kargs):
return MBF_mem(device_name, layer).read_mem(turns, offset, channel, **kargs)
def read_det(device_name, channel=0, layer='epics', **kargs):
return MBF_mem(device_name, layer).read_det(channel, **kargs)
__all__ = ['MBF_mem', 'read_mem', 'read_det']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Read memory buffer.")
parser.add_argument("-c", default=None, type=int,
help="Channel number", dest="channel")
parser.add_argument("-d", type=str, required=True,
help="TMBF device name (EPICS or Tango)", dest="device_name")
parser.add_argument("-l", default="epics", type=str,
help="Layer: 'tango' or 'epics'", dest="layer")
parser.add_argument("-t", default=None, type=float,
help="Frequency for homodyne detection (in SR turns units)",
dest="tune")
args = parser.parse_args()
device_name = args.device_name
layer = args.layer
tune = args.tune
channel = args.channel
mbf = MBF_mem(device_name, layer=layer)
bunch = None
decimate = mbf.get_max_decimate()
turns = mbf.get_turns_max(decimate)
min_turn, _ = mbf.get_turn_min_max()
offset = min_turn
data = mbf.read_mem(turns, offset, channel, bunch, decimate, tune)
print(data)
| 30.785047
| 80
| 0.617486
|
4a036fcc7580adc8169e67d3bc7d84632d8ceb59
| 1,505
|
py
|
Python
|
WebMirror/management/rss_parser_funcs/feed_parse_extractDjurasicoBlogspotCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 193
|
2016-08-02T22:04:35.000Z
|
2022-03-09T20:45:41.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractDjurasicoBlogspotCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractDjurasicoBlogspotCom.py
|
rrosajp/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 19
|
2015-08-13T18:01:08.000Z
|
2021-07-12T17:13:09.000Z
|
def extractDjurasicoBlogspotCom(item):
'''
Parser for 'djurasico.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Common Sense of a Dukes daughter', 'Common Sense of a Duke\'s Daughter', 'translated'),
('Common Sense of a Duke\'s Daughter', 'Common Sense of a Duke\'s Daughter', 'translated'),
('Koushaku Reijou no Tashinami', 'Common Sense of a Duke\'s Daughter', 'translated'),
('Koushaku Reijou no Tashinami novel', 'Common Sense of a Duke\'s Daughter', 'translated'),
('The adventurer who received undesired immortality', 'Unwilling Undead Adventurer', 'translated'),
('Garudeina Oukoku Koukoku Ki', 'Garudeina Oukoku Koukoku Ki', 'translated'),
('Maidens grand summoning', 'Maidens grand summoning', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 55.740741
| 135
| 0.531561
|
4a037025d46a78349f695beae42b1f53b6ec24a0
| 15,516
|
py
|
Python
|
mlir/lib/Bindings/Python/mlir/dialects/linalg/opdsl/lang/emitter.py
|
keryell/llvm-2
|
4dc23a26d1bd6ced23969c0525dedbddf8c6fddc
|
[
"Apache-2.0"
] | null | null | null |
mlir/lib/Bindings/Python/mlir/dialects/linalg/opdsl/lang/emitter.py
|
keryell/llvm-2
|
4dc23a26d1bd6ced23969c0525dedbddf8c6fddc
|
[
"Apache-2.0"
] | null | null | null |
mlir/lib/Bindings/Python/mlir/dialects/linalg/opdsl/lang/emitter.py
|
keryell/llvm-2
|
4dc23a26d1bd6ced23969c0525dedbddf8c6fddc
|
[
"Apache-2.0"
] | null | null | null |
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from typing import Any, Dict, Sequence
from mlir.ir import *
from mlir.dialects import linalg
from mlir.dialects import std
# TODO: resolve name collision for Linalg functionality that is injected inside
# the _mlir.dialects.linalg directly via pybind.
from _mlir.dialects.linalg import fill_builtin_region
from .scalar_expr import *
from .config import *
__all__ = [
"emit_generic_structured_op",
"emit_named_structured_op",
]
def isa(cls : Type, ty : Type):
try:
cls(ty)
return True
except ValueError:
return False
def prepare_common_structured_op(op_config: LinalgStructuredOpConfig,
*ins: Value,
outs: Sequence[Value],
captures: Sequence[Value]):
all_arg_defs = op_config.ordered_tensor_args
in_arg_defs = [arg for arg in all_arg_defs if arg.usage == "input"]
out_arg_defs = [arg for arg in all_arg_defs if arg.usage == "output"]
capture_arg_defs = op_config.ordered_capture_args
# Verify outs and captures are sequences.
if not isinstance(outs, Sequence):
raise ValueError(f"Expected named argument outs to have type Sequence "
f"but got {type(outs)}")
if not isinstance(captures, Sequence):
raise ValueError(f"Expected named argument captures to have type Sequence "
f"but got {type(outs)}")
# Arity validation.
if len(ins) != len(in_arg_defs):
raise ValueError(f"Expected {len(in_arg_defs)} inputs but got "
f"{len(ins)} for {op_config}")
if outs and len(outs) != len(out_arg_defs):
raise ValueError(f"Expected {len(out_arg_defs)} outputs but got "
f"{len(outs)} for {op_config}")
if captures and len(captures) != len(capture_arg_defs):
raise ValueError(f"Expected {len(capture_arg_defs)} captures but got "
f"{len(captures)} for {op_config}")
outs, out_types = _infer_structured_outs(op_config, in_arg_defs, ins,
out_arg_defs, outs)
result_types = [t for t in out_types if isa(RankedTensorType, t)]
# Initialize the type dictionary with the predefined types.
type_mapping = dict() # type: Dict[str, Type]
type_mapping["F32"] = F32Type.get()
type_mapping["F64"] = F64Type.get()
type_mapping["I32"] = IntegerType.get_signless(32)
type_mapping["I64"] = IntegerType.get_signless(64)
# Extract type vars for input/output based types.
for arg_def, arg_element_type in zip(
in_arg_defs + out_arg_defs,
_get_shaped_element_types_from_values(*ins, *outs)):
_add_type_mapping(arg_def.tensor_def.type_var.name, arg_element_type,
type_mapping)
# Extract type vars for captures and compute capture argument mapping.
capture_arg_mapping = dict() # type: Dict[str, Value]
for arg_def, capture_value in zip(capture_arg_defs, captures):
_add_type_mapping(arg_def.capture_def.type_var.name, capture_value.type,
type_mapping)
capture_arg_mapping[arg_def.capture_def.capture_name] = capture_value
# Emit the generic op.
# TODO: Support emission of pure memref form.
indexing_maps_attr = ArrayAttr.get(
[AffineMapAttr.get(am)
# TODO: linalg verification does not currently allow symbols.
# Compress them for now.
for am in AffineMap.compress_unused_symbols(op_config.indexing_maps, Context.current)])
iterator_types_attr = ArrayAttr.get(
[StringAttr.get(s) for s in op_config.iterator_types])
# TODO: Add support for sparse operands once there is a stable interface.
sparse_attr = None
return (all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types,
type_mapping, capture_arg_mapping, indexing_maps_attr,
iterator_types_attr, sparse_attr)
def emit_generic_structured_op(op_config: LinalgStructuredOpConfig,
*ins: Value,
outs: Sequence[Value] = (),
captures: Sequence[Value] = ()):
all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types, type_mapping, \
capture_arg_mapping, indexing_maps_attr, iterator_types_attr, sparse_attr = \
prepare_common_structured_op(op_config, *ins, outs = outs,
captures=captures)
generic_op = linalg.GenericOp(
result_tensors=result_types,
inputs=ins,
outputs=outs,
indexing_maps=indexing_maps_attr,
iterator_types=iterator_types_attr,
doc=None, # TODO: Make optional.
library_call=None, # TODO: Make optional.
sparse=sparse_attr) # TODO: Make optional.
# Construct the body.
block_arg_names = _get_tensor_def_names(*in_arg_defs, *out_arg_defs)
block_arg_types = _get_shaped_element_types_from_values(*ins, *outs)
block = generic_op.regions[0].blocks.append(*block_arg_types)
block_arg_mapping = dict(zip(block_arg_names, block.arguments))
with InsertionPoint(block):
body_builder = _BodyBuilder(type_mapping, block_arg_mapping,
capture_arg_mapping)
for assignment in op_config.assignments:
body_builder.assign(assignment)
body_builder.yield_outputs(*_get_tensor_def_names(*out_arg_defs))
if len(result_types) == 1:
return generic_op.result
else:
return generic_op.results
def emit_named_structured_op(op_config: LinalgStructuredOpConfig,
op_name: str,
op_class_name: str,
*ins: Value,
outs: Sequence[Value] = (),
captures: Sequence[Value] = ()):
all_arg_defs, in_arg_defs, out_arg_defs, outs, result_types, type_mapping, \
capture_arg_mapping, indexing_maps_attr, iterator_types_attr, sparse_attr = \
prepare_common_structured_op(op_config, *ins, outs = outs,
captures = captures)
# If we get here, there must exist a builtin class `op_class_name`.
ctx = Context.current
fully_qualified_name = 'linalg.' + op_name
if (not ctx.is_registered_operation(fully_qualified_name) or
not op_class_name in linalg.__dict__.keys()):
raise NotImplementedError(
f"Unknown named op_name / op_class_name: {op_name} / {op_class_name}")
named_op = getattr(linalg, op_class_name)(ins, outs, result_types)
linalgDialect = ctx.get_dialect_descriptor("linalg")
fill_builtin_region(linalgDialect, named_op.operation)
# Note: mlir-linalg-ods-yaml-gen.cpp uses a special linalg.memoized_indexing_maps
# attribute that the non-yaml path does not. The non-yaml path hardcodes the
# indexing_maps in C++ directly.
named_op.operation.attributes["linalg.memoized_indexing_maps"] = indexing_maps_attr
# iterator_types are hardcoded in C++ both in the yaml and non-yaml path.
if len(result_types) == 1:
return named_op.result
else:
return named_op.results
class _BodyBuilder:
"""Constructs a structured op body by evaluating assignments."""
def __init__(self,
type_mapping: Dict[str, Type],
block_arg_mapping: Dict[str, Value],
capture_arg_mapping: Dict[str, Value]):
self.type_mapping = type_mapping
self.block_arg_mapping = block_arg_mapping
self.capture_arg_mapping = capture_arg_mapping
self.yield_mapping = dict() # type: Dict[str, Value]
def assign(self, assignment: ScalarAssign):
if assignment.arg in self.yield_mapping:
raise ValueError(
f"Multiple assignments to the same argument are forbidden: "
f"{assignment}")
self.yield_mapping[assignment.arg] = self.expression(assignment.value)
def expression(self, expr: ScalarExpression) -> Value:
if expr.scalar_arg:
try:
return self.block_arg_mapping[expr.scalar_arg.arg]
except KeyError:
raise ValueError(f"Argument {expr.scalar_arg.arg} is not bound for "
f"this structured op.")
elif expr.scalar_capture:
try:
return self.capture_arg_mapping[expr.scalar_capture.capture]
except KeyError:
raise ValueError(f"Capture {expr.scalar_capture.capture} is not bound for "
f"this structured op.")
elif expr.scalar_const:
return self.constant(expr.scalar_const.type_var.name, expr.scalar_const.value)
elif expr.scalar_index:
return self.index(expr.scalar_index.dim)
elif expr.scalar_apply:
try:
fn = getattr(self, f"_eval_{expr.scalar_apply.fn_name}")
except AttributeError:
raise ValueError(
f"Function '{expr.scalar_apply.fn_name}' is not a known "
"scalar body function")
operand_values = [
self.expression(operand) for operand in expr.scalar_apply.operands
]
return fn(*operand_values)
elif expr.symbolic_cast:
operand_value = self.expression(expr.symbolic_cast.operand)
return self.cast(expr.symbolic_cast.to_type.name, operand_value)
raise NotImplementedError(f"Unimplemented scalar body expression: {expr}")
def constant(self, type_var_name: str, value: Any) -> Value:
try:
type = self.type_mapping[type_var_name]
except KeyError:
raise ValueError(f"Unbound type variable '{type_var_name}' ("
f"expected one of {self.type_mappings.keys()}")
try:
if(_is_floating_point_type(type)):
return std.ConstantOp(type, FloatAttr.get(type, float(value))).result
elif(_is_integer_type(type)):
return std.ConstantOp(type, IntegerAttr.get(type, int(value))).result
except ValueError:
raise ValueError(f"Unable to cast value {value} to type {type}")
raise NotImplementedError(f"Unimplemented constant type {type}")
def index(self, dim: int) -> Value:
dim_attr = IntegerAttr.get(IntegerType.get_signless(64), dim)
return linalg.IndexOp(IndexType.get(), dim_attr).result
def cast(self, type_var_name: str, operand: Value) -> Value:
try:
to_type = self.type_mapping[type_var_name]
except KeyError:
raise ValueError(f"Unbound type variable '{type_var_name}' ("
f"expected one of {self.type_mappings.keys()}")
if operand.type == to_type:
return operand
if _is_integer_type(to_type):
return self._cast_to_integer(to_type, operand)
elif _is_floating_point_type(to_type):
return self._cast_to_floating_point(to_type, operand)
def _cast_to_integer(self, to_type: Type, operand: Value) -> Value:
to_width = IntegerType(to_type).width
operand_type = operand.type
if _is_floating_point_type(operand_type):
return std.FPToSIOp(to_type, operand).result
if _is_index_type(operand_type):
return std.IndexCastOp(to_type, operand).result
# Assume integer.
from_width = IntegerType(operand_type).width
if to_width > from_width:
return std.SignExtendIOp(to_type, operand).result
elif to_width < from_width:
return std.TruncateIOp(to_type, operand).result
raise ValueError(f"Unable to cast body expression from {operand_type} to "
f"{to_type}")
def _cast_to_floating_point(self, to_type: Type, operand: Value) -> Value:
operand_type = operand.type
if _is_integer_type(operand_type):
return std.SIToFPOp(to_type, operand).result
# Assume FloatType.
to_width = _get_floating_point_width(to_type)
from_width = _get_floating_point_width(operand_type)
if to_width > from_width:
return std.FPExtOp(to_type, operand).result
elif to_width < from_width:
return std.FPTruncOp(to_type, operand).result
raise ValueError(f"Unable to cast body expression from {operand_type} to "
f"{to_type}")
def yield_outputs(self, *output_names: str):
output_values = []
for n in output_names:
try:
output_values.append(self.yield_mapping[n])
except KeyError:
raise ValueError(f"Body assignments do not assign all outputs: "
f"missing '{n}'")
linalg.YieldOp(output_values)
def _eval_add(self, lhs: Value, rhs: Value) -> Value:
if _is_floating_point_type(lhs.type):
return std.AddFOp(lhs.type, lhs, rhs).result
if _is_integer_type(lhs.type) or _is_index_type(lhs.type):
return std.AddIOp(lhs.type, lhs, rhs).result
raise NotImplementedError("Unsupported 'add' operand: {lhs}")
def _eval_sub(self, lhs: Value, rhs: Value) -> Value:
if _is_floating_point_type(lhs.type):
return std.SubFOp(lhs.type, lhs, rhs).result
if _is_integer_type(lhs.type) or _is_index_type(lhs.type):
return std.SubIOp(lhs.type, lhs, rhs).result
raise NotImplementedError("Unsupported 'sub' operand: {lhs}")
def _eval_mul(self, lhs: Value, rhs: Value) -> Value:
if _is_floating_point_type(lhs.type):
return std.MulFOp(lhs.type, lhs, rhs).result
if _is_integer_type(lhs.type) or _is_index_type(lhs.type):
return std.MulIOp(lhs.type, lhs, rhs).result
raise NotImplementedError("Unsupported 'mul' operand: {lhs}")
def _infer_structured_outs(op_config: LinalgStructuredOpConfig,
in_arg_defs: Sequence[TensorDefConfig],
ins: Sequence[Value],
out_arg_defs: Sequence[TensorDefConfig],
outs: Sequence[Value]):
"""Infers implicit outs and output types.
Respects existing contents of outs if not empty.
Returns:
normalized outs, output types
"""
# If outs were explicitly provided, we accept them verbatim.
if outs:
return outs, [out.type for out in outs]
raise NotImplementedError(f"Output tensor inference not yet supported for "
"structured ops")
def _get_shaped_element_types_from_values(*values: Value) -> Sequence[Type]:
types = []
for v in values:
try:
t = ShapedType(v.type)
except Exception as e:
raise ValueError(f"Expected ShapedType but got {v}") from e
types.append(t.element_type)
return types
def _get_tensor_def_names(
*tensor_def_configs: TensorDefConfig) -> Sequence[str]:
return [tdc.tensor_def.tensor_name for tdc in tensor_def_configs]
def _add_type_mapping(name: str, type: Type, type_mapping: Dict[str, Type]):
if name in type_mapping:
if type_mapping[name] != type:
raise ValueError(f"Cannot overwrite type mapping {name} = "
f"{type_mapping[name]} by type {type}")
type_mapping[name] = type
def _is_floating_point_type(t: Type) -> bool:
# TODO: Create a FloatType in the Python API and implement the switch
# there.
return (F64Type.isinstance(t) or F32Type.isinstance(t) or
F16Type.isinstance(t) or BF16Type.isinstance(t))
def _is_integer_type(t: Type) -> bool:
return IntegerType.isinstance(t)
def _is_index_type(t: Type) -> bool:
return IndexType.isinstance(t)
def _get_floating_point_width(t: Type) -> int:
# TODO: Create a FloatType in the Python API and implement the switch
# there.
if F64Type.isinstance(t):
return 64
if F32Type.isinstance(t):
return 32
if F16Type.isinstance(t):
return 16
if BF16Type.isinstance(t):
return 16
raise NotImplementedError(f"Unhandled floating point type switch {t}")
| 40.617801
| 94
| 0.685808
|
4a03714c2456218846233702e7a635a019b3330f
| 43
|
py
|
Python
|
pyetherbalance/__init__.py
|
araa47/pyetherbalance-
|
4ac9470d9a94d02f81449c1c5c77b17773b90256
|
[
"MIT"
] | 9
|
2019-02-18T13:58:21.000Z
|
2021-12-13T19:04:41.000Z
|
pyetherbalance/__init__.py
|
araa47/pyetherbalance-
|
4ac9470d9a94d02f81449c1c5c77b17773b90256
|
[
"MIT"
] | 1
|
2022-01-18T06:59:20.000Z
|
2022-01-18T06:59:20.000Z
|
pyetherbalance/__init__.py
|
araa47/pyetherbalance-
|
4ac9470d9a94d02f81449c1c5c77b17773b90256
|
[
"MIT"
] | 3
|
2020-06-30T17:11:09.000Z
|
2021-12-13T19:09:03.000Z
|
from .pyetherbalance import PyEtherBalance
| 21.5
| 42
| 0.883721
|
4a0372c96b95025aac83ed5a39ab872e74cce63c
| 5,884
|
py
|
Python
|
python/GafferSceneUI/SetUI.py
|
PaulDoessel/gaffer-play
|
8b72dabb388e12424c230acfb0bd209049b01bd6
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferSceneUI/SetUI.py
|
PaulDoessel/gaffer-play
|
8b72dabb388e12424c230acfb0bd209049b01bd6
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferSceneUI/SetUI.py
|
PaulDoessel/gaffer-play
|
8b72dabb388e12424c230acfb0bd209049b01bd6
|
[
"BSD-3-Clause"
] | 1
|
2020-02-15T16:15:54.000Z
|
2020-02-15T16:15:54.000Z
|
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
Gaffer.Metadata.registerNode(
GafferScene.Set,
"description",
"""
Creates and edits sets of objects. Each set contains a list of paths
to locations within the scene. After creation, sets can be used
by the SetFilter to limit scene operations to only the members of
a particular set.
""",
plugs = {
"mode" : [
"description",
"""
Create mode creates a new set containing only the
specified paths. If a set with the same name already
exists, it is replaced.
Add mode adds the specified paths to an existing set,
keeping the paths already in the set. If the set does
not exist yet, this is the same as create mode.
Remove mode removes the specified paths from an
existing set. If the set does not exist yet, nothing
is done.
""",
"preset:Create", GafferScene.Set.Mode.Create,
"preset:Add", GafferScene.Set.Mode.Add,
"preset:Remove", GafferScene.Set.Mode.Remove,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"name" : [
"description",
"""
The name of the set that will be created or edited.
You can create multiple set names at once by separating them with spaces.
""",
"ui:scene:acceptsSetName", True,
],
"paths" : [
"description",
"""
The paths to be added to or removed from the set.
""",
"ui:scene:acceptsPaths", True,
"vectorDataPlugValueWidget:dragPointer", "objects",
],
"filter" : [
"description",
"""
A filter to define additional paths to be added to
or removed from the set.
""",
],
}
)
##########################################################################
# Right click menu for sets
# This is driven by metadata so it can be used for plugs on other
# nodes too.
##########################################################################
def __setValue( plug, value, *unused ) :
with Gaffer.UndoContext( plug.ancestor( Gaffer.ScriptNode ) ) :
plug.setValue( value )
def __setsPopupMenu( menuDefinition, plugValueWidget ) :
plug = plugValueWidget.getPlug()
if plug is None :
return
acceptsSetName = Gaffer.Metadata.value( plug, "ui:scene:acceptsSetName" )
acceptsSetNames = Gaffer.Metadata.value( plug, "ui:scene:acceptsSetNames" )
if not acceptsSetName and not acceptsSetNames :
return
node = plug.node()
if isinstance( node, GafferScene.Filter ) :
nodes = [ o.node() for o in node["out"].outputs() ]
else :
nodes = [ node ]
setNames = set()
with plugValueWidget.getContext() :
for node in nodes :
for scenePlug in node.children( GafferScene.ScenePlug ) :
if scenePlug.direction() != scenePlug.Direction.In :
continue
setNames.update( [ str( n ) for n in scenePlug["setNames"].getValue() ] )
if not setNames :
return
menuDefinition.prepend( "/SetsDivider", { "divider" : True } )
with plugValueWidget.getContext() :
if acceptsSetNames :
currentNames = set( plug.getValue().split() )
else :
currentNames = set( [ plug.getValue() ] )
for setName in reversed( sorted( list( setNames ) ) ) :
newNames = set( currentNames ) if acceptsSetNames else set()
if setName not in currentNames :
newNames.add( setName )
else :
newNames.discard( setName )
menuDefinition.prepend(
"/Sets/%s" % setName,
{
"command" : functools.partial( __setValue, plug, " ".join( sorted( newNames ) ) ),
"checkBox" : setName in currentNames,
"active" : plug.settable() and not plugValueWidget.getReadOnly() and not Gaffer.readOnly( plug ),
}
)
__setsPopupMenuConnection = GafferUI.PlugValueWidget.popupMenuSignal().connect( __setsPopupMenu )
##########################################################################
# Gadgets
##########################################################################
def __nodeGadget( node ) :
nodeGadget = GafferUI.StandardNodeGadget( node )
GafferSceneUI.PathFilterUI.addObjectDropTarget( nodeGadget )
return nodeGadget
GafferUI.NodeGadget.registerNodeGadget( GafferScene.Set, __nodeGadget )
| 28.985222
| 101
| 0.656016
|
4a037502866204805abf29363428750ab704f265
| 27,909
|
py
|
Python
|
sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2020_07_01/aio/operations/_custom_domains_operations.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2020_07_01/aio/operations/_custom_domains_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2020_07_01/aio/operations/_custom_domains_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._custom_domains_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CustomDomainsOperations:
"""CustomDomainsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
app_name: str,
domain_name: str,
**kwargs: Any
) -> "_models.CustomDomainResource":
"""Get the custom domain of one lifecycle application.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param domain_name: The name of the custom domain resource.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomDomainResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2020_07_01.models.CustomDomainResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomDomainResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-07-01") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
domain_name=domain_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CustomDomainResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
domain_name: str,
domain_resource: "_models.CustomDomainResource",
**kwargs: Any
) -> "_models.CustomDomainResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomDomainResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-07-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain_resource, 'CustomDomainResource')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
domain_name=domain_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CustomDomainResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CustomDomainResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('CustomDomainResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
domain_name: str,
domain_resource: "_models.CustomDomainResource",
**kwargs: Any
) -> AsyncLROPoller["_models.CustomDomainResource"]:
"""Create or update custom domain of one lifecycle application.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param domain_name: The name of the custom domain resource.
:type domain_name: str
:param domain_resource: Parameters for the create or update operation.
:type domain_resource: ~azure.mgmt.appplatform.v2020_07_01.models.CustomDomainResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CustomDomainResource or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2020_07_01.models.CustomDomainResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-07-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomDomainResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
domain_name=domain_name,
domain_resource=domain_resource,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('CustomDomainResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
service_name: str,
app_name: str,
domain_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-07-01") # type: str
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
domain_name=domain_name,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
service_name: str,
app_name: str,
domain_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete the custom domain of one lifecycle application.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param domain_name: The name of the custom domain resource.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-07-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
domain_name=domain_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}"} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
domain_name: str,
domain_resource: "_models.CustomDomainResource",
**kwargs: Any
) -> "_models.CustomDomainResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomDomainResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-07-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain_resource, 'CustomDomainResource')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
domain_name=domain_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CustomDomainResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('CustomDomainResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}"} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
domain_name: str,
domain_resource: "_models.CustomDomainResource",
**kwargs: Any
) -> AsyncLROPoller["_models.CustomDomainResource"]:
"""Update custom domain of one lifecycle application.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param domain_name: The name of the custom domain resource.
:type domain_name: str
:param domain_resource: Parameters for the create or update operation.
:type domain_resource: ~azure.mgmt.appplatform.v2020_07_01.models.CustomDomainResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CustomDomainResource or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2020_07_01.models.CustomDomainResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-07-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomDomainResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
domain_name=domain_name,
domain_resource=domain_resource,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('CustomDomainResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}"} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
service_name: str,
app_name: str,
**kwargs: Any
) -> AsyncIterable["_models.CustomDomainResourceCollection"]:
"""List the custom domains of one lifecycle application.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CustomDomainResourceCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.appplatform.v2020_07_01.models.CustomDomainResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-07-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomDomainResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CustomDomainResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains"} # type: ignore
| 45.978583
| 223
| 0.663836
|
4a037537b6c0a6a2803b99f6e395c838e174a03d
| 882
|
py
|
Python
|
dns/rdtypes/ANY/PTR.py
|
balabit-deps/balabit-os-6-dnspython
|
c7d01e597f052d6214ce3de99a2b15e87ad62f1b
|
[
"0BSD"
] | 12
|
2017-04-04T00:39:04.000Z
|
2020-04-03T10:52:37.000Z
|
dns/rdtypes/ANY/PTR.py
|
balabit-deps/balabit-os-6-dnspython
|
c7d01e597f052d6214ce3de99a2b15e87ad62f1b
|
[
"0BSD"
] | 5
|
2017-01-15T20:55:46.000Z
|
2019-09-05T04:52:14.000Z
|
dns/rdtypes/ANY/PTR.py
|
balabit-deps/balabit-os-6-dnspython
|
c7d01e597f052d6214ce3de99a2b15e87ad62f1b
|
[
"0BSD"
] | 7
|
2017-03-08T13:21:17.000Z
|
2020-06-12T15:36:02.000Z
|
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.nsbase
class PTR(dns.rdtypes.nsbase.NSBase):
"""PTR record"""
pass
| 42
| 72
| 0.773243
|
4a0375cd00d2452e9676ba8c5e12453861c53d1a
| 17,612
|
py
|
Python
|
tests/test_views.py
|
Koomook/django-rest-framework-jwt
|
7f40b137115fc30621c97649c91a05d48a93e000
|
[
"MIT"
] | 1
|
2019-03-06T07:55:49.000Z
|
2019-03-06T07:55:49.000Z
|
tests/test_views.py
|
Koomook/django-rest-framework-jwt
|
7f40b137115fc30621c97649c91a05d48a93e000
|
[
"MIT"
] | null | null | null |
tests/test_views.py
|
Koomook/django-rest-framework-jwt
|
7f40b137115fc30621c97649c91a05d48a93e000
|
[
"MIT"
] | null | null | null |
import unittest
from calendar import timegm
from datetime import datetime, timedelta
import time
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from django import get_version
from django.test import TestCase
from django.test.utils import override_settings
from rest_framework import status
from rest_framework.test import APIClient
from rest_framework_jwt import utils, views
from rest_framework_jwt.compat import get_user_model
from rest_framework_jwt.settings import api_settings, DEFAULTS
from . import utils as test_utils
User = get_user_model()
NO_CUSTOM_USER_MODEL = 'Custom User Model only supported after Django 1.5'
orig_datetime = datetime
class BaseTestCase(TestCase):
def setUp(self):
self.email = 'jpueblo@example.com'
self.username = 'jpueblo'
self.password = 'password'
self.user = User.objects.create_user(
self.username, self.email, self.password)
self.data = {
'username': self.username,
'password': self.password
}
class TestCustomResponsePayload(BaseTestCase):
def setUp(self):
self.original_handler = views.jwt_response_payload_handler
views.jwt_response_payload_handler = test_utils\
.jwt_response_payload_handler
return super(TestCustomResponsePayload, self).setUp()
def test_jwt_login_custom_response_json(self):
"""
Ensure JWT login view using JSON POST works.
"""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/', self.data, format='json')
decoded_payload = utils.jwt_decode_handler(response.data['token'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(decoded_payload['username'], self.username)
self.assertEqual(response.data['user'], self.username)
def tearDown(self):
views.jwt_response_payload_handler = self.original_handler
class ObtainJSONWebTokenTests(BaseTestCase):
def test_jwt_login_json(self):
"""
Ensure JWT login view using JSON POST works.
"""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/', self.data, format='json')
decoded_payload = utils.jwt_decode_handler(response.data['token'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(decoded_payload['username'], self.username)
def test_jwt_login_json_bad_creds(self):
"""
Ensure JWT login view using JSON POST fails
if bad credentials are used.
"""
client = APIClient(enforce_csrf_checks=True)
self.data['password'] = 'wrong'
response = client.post('/auth-token/', self.data, format='json')
self.assertEqual(response.status_code, 400)
def test_jwt_login_json_missing_fields(self):
"""
Ensure JWT login view using JSON POST fails if missing fields.
"""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username}, format='json')
self.assertEqual(response.status_code, 400)
def test_jwt_login_form(self):
"""
Ensure JWT login view using form POST works.
"""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/', self.data)
decoded_payload = utils.jwt_decode_handler(response.data['token'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(decoded_payload['username'], self.username)
def test_jwt_login_with_expired_token(self):
"""
Ensure JWT login view works even if expired token is provided
"""
payload = utils.jwt_payload_handler(self.user)
payload['exp'] = 1
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
client = APIClient(enforce_csrf_checks=True)
response = client.post(
'/auth-token/', self.data,
HTTP_AUTHORIZATION=auth, format='json')
decoded_payload = utils.jwt_decode_handler(response.data['token'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(decoded_payload['username'], self.username)
def test_jwt_login_using_zero(self):
"""
Test to reproduce issue #33
"""
client = APIClient(enforce_csrf_checks=True)
data = {
'username': '0',
'password': '0'
}
response = client.post('/auth-token/', data, format='json')
self.assertEqual(response.status_code, 400)
@unittest.skipIf(get_version() < '1.5.0', 'No Configurable User model feature')
@override_settings(AUTH_USER_MODEL='tests.CustomUser')
class CustomUserObtainJSONWebTokenTests(TestCase):
"""JSON Web Token Authentication"""
def setUp(self):
from .models import CustomUser
self.email = 'jpueblo@example.com'
self.password = 'password'
user = CustomUser.objects.create(email=self.email)
user.set_password(self.password)
user.save()
self.user = user
self.data = {
'email': self.email,
'password': self.password
}
def test_jwt_login_json(self):
"""
Ensure JWT login view using JSON POST works.
"""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/', self.data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
decoded_payload = utils.jwt_decode_handler(response.data['token'])
self.assertEqual(decoded_payload['email'], self.email)
def test_jwt_login_json_bad_creds(self):
"""
Ensure JWT login view using JSON POST fails
if bad credentials are used.
"""
client = APIClient(enforce_csrf_checks=True)
self.data['password'] = 'wrong'
response = client.post('/auth-token/', self.data, format='json')
self.assertEqual(response.status_code, 400)
@override_settings(AUTH_USER_MODEL='tests.CustomUserUUID')
class CustomUserUUIDObtainJSONWebTokenTests(TestCase):
"""JSON Web Token Authentication"""
def setUp(self):
from .models import CustomUserUUID
self.email = 'jpueblo@example.com'
self.password = 'password'
user = CustomUserUUID.objects.create(email=self.email)
user.set_password(self.password)
user.save()
self.user = user
self.data = {
'email': self.email,
'password': self.password
}
def test_jwt_login_json(self):
"""
Ensure JWT login view using JSON POST works.
"""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/', self.data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
decoded_payload = utils.jwt_decode_handler(response.data['token'])
self.assertEqual(decoded_payload['user_id'], str(self.user.id))
def test_jwt_login_json_bad_creds(self):
"""
Ensure JWT login view using JSON POST fails
if bad credentials are used.
"""
client = APIClient(enforce_csrf_checks=True)
self.data['password'] = 'wrong'
response = client.post('/auth-token/', self.data, format='json')
self.assertEqual(response.status_code, 400)
class TokenTestCase(BaseTestCase):
"""
Handlers for getting tokens from the API, or creating arbitrary ones.
"""
def setUp(self):
super(TokenTestCase, self).setUp()
def get_token(self):
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/', self.data, format='json')
return response.data['token']
def create_token(self, user, exp=None, orig_iat=None):
payload = utils.jwt_payload_handler(user)
if exp:
payload['exp'] = exp
if orig_iat:
payload['orig_iat'] = timegm(orig_iat.utctimetuple())
token = utils.jwt_encode_handler(payload)
return token
class VerifyJSONWebTokenTestsSymmetric(TokenTestCase):
def test_verify_jwt(self):
"""
Test that a valid, non-expired token will return a 200 response
and itself when passed to the validation endpoint.
"""
client = APIClient(enforce_csrf_checks=True)
orig_token = self.get_token()
# Now try to get a refreshed token
response = client.post('/auth-token-verify/', {'token': orig_token},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['token'], orig_token)
def test_verify_jwt_fails_with_expired_token(self):
"""
Test that an expired token will fail with the correct error.
"""
client = APIClient(enforce_csrf_checks=True)
# Make an expired token..
token = self.create_token(
self.user,
exp=datetime.utcnow() - timedelta(seconds=5),
orig_iat=datetime.utcnow() - timedelta(hours=1)
)
response = client.post('/auth-token-verify/', {'token': token},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertRegexpMatches(response.data['non_field_errors'][0],
'Signature has expired')
def test_verify_jwt_fails_with_bad_token(self):
"""
Test that an invalid token will fail with the correct error.
"""
client = APIClient(enforce_csrf_checks=True)
token = "i am not a correctly formed token"
response = client.post('/auth-token-verify/', {'token': token},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertRegexpMatches(response.data['non_field_errors'][0],
'Error decoding signature')
def test_verify_jwt_fails_with_missing_user(self):
"""
Test that an invalid token will fail with a user that does not exist.
"""
client = APIClient(enforce_csrf_checks=True)
user = User.objects.create_user(
email='jsmith@example.com', username='jsmith', password='password')
token = self.create_token(user)
# Delete the user used to make the token
user.delete()
response = client.post('/auth-token-verify/', {'token': token},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertRegexpMatches(response.data['non_field_errors'][0],
"User doesn't exist")
class VerifyJSONWebTokenTestsAsymmetric(TokenTestCase):
def setUp(self):
super(VerifyJSONWebTokenTestsAsymmetric, self).setUp()
private_key = rsa.generate_private_key(public_exponent=65537,
key_size=2048,
backend=default_backend())
public_key = private_key.public_key()
api_settings.JWT_PRIVATE_KEY = private_key
api_settings.JWT_PUBLIC_KEY = public_key
api_settings.JWT_ALGORITHM = 'RS512'
def test_verify_jwt_with_pub_pvt_key(self):
"""
Test that a token can be signed with asymmetrics keys
"""
client = APIClient(enforce_csrf_checks=True)
orig_token = self.get_token()
# Now try to get a refreshed token
response = client.post('/auth-token-verify/', {'token': orig_token},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['token'], orig_token)
def test_verify_jwt_fails_with_expired_token(self):
"""
Test that an expired token will fail with the correct error.
"""
client = APIClient(enforce_csrf_checks=True)
# Make an expired token..
token = self.create_token(
self.user,
exp=datetime.utcnow() - timedelta(seconds=5),
orig_iat=datetime.utcnow() - timedelta(hours=1)
)
response = client.post('/auth-token-verify/', {'token': token},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertRegexpMatches(response.data['non_field_errors'][0],
'Signature has expired')
def test_verify_jwt_fails_with_bad_token(self):
"""
Test that an invalid token will fail with the correct error.
"""
client = APIClient(enforce_csrf_checks=True)
token = "i am not a correctly formed token"
response = client.post('/auth-token-verify/', {'token': token},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertRegexpMatches(response.data['non_field_errors'][0],
'Error decoding signature')
def test_verify_jwt_fails_with_bad_pvt_key(self):
"""
Test that an mismatched private key token will fail with
the correct error.
"""
# Generate a new private key
private_key = rsa.generate_private_key(public_exponent=65537,
key_size=2048,
backend=default_backend())
# Don't set the private key
api_settings.JWT_PRIVATE_KEY = private_key
client = APIClient(enforce_csrf_checks=True)
orig_token = self.get_token()
# Now try to get a refreshed token
response = client.post('/auth-token-verify/', {'token': orig_token},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertRegexpMatches(response.data['non_field_errors'][0],
'Error decoding signature')
def tearDown(self):
# Restore original settings
api_settings.JWT_ALGORITHM = DEFAULTS['JWT_ALGORITHM']
api_settings.JWT_PRIVATE_KEY = DEFAULTS['JWT_PRIVATE_KEY']
api_settings.JWT_PUBLIC_KEY = DEFAULTS['JWT_PUBLIC_KEY']
class RefreshJSONWebTokenTests(TokenTestCase):
def setUp(self):
super(RefreshJSONWebTokenTests, self).setUp()
api_settings.JWT_ALLOW_REFRESH = True
def test_refresh_jwt(self):
"""
Test getting a refreshed token from original token works
No date/time modifications are neccessary because it is assumed
that this operation will take less than 300 seconds.
"""
client = APIClient(enforce_csrf_checks=True)
orig_token = self.get_token()
orig_token_decoded = utils.jwt_decode_handler(orig_token)
expected_orig_iat = timegm(datetime.utcnow().utctimetuple())
# Make sure 'orig_iat' exists and is the current time (give some slack)
orig_iat = orig_token_decoded['orig_iat']
self.assertLessEqual(orig_iat - expected_orig_iat, 1)
time.sleep(1)
# Now try to get a refreshed token
response = client.post('/auth-token-refresh/', {'token': orig_token},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
new_token = response.data['token']
new_token_decoded = utils.jwt_decode_handler(new_token)
# Make sure 'orig_iat' on the new token is same as original
self.assertEquals(new_token_decoded['orig_iat'], orig_iat)
self.assertGreater(new_token_decoded['exp'], orig_token_decoded['exp'])
def test_refresh_after_token_expiration_and_before_refresh_expiration(self):
client = APIClient(enforce_csrf_checks=True)
token = self.create_token(
self.user,
exp=datetime.utcnow() + timedelta(seconds=0.1),
orig_iat=datetime.utcnow(),
)
time.sleep(0.5)
# now the access token is expired but still can be refreshed
response = client.post(
'/auth-token-refresh/',
{'token': token},
format='json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_refresh_jwt_after_refresh_expiration(self):
"""
Test that token can't be refreshed after token refresh limit
"""
client = APIClient(enforce_csrf_checks=True)
orig_iat = (datetime.utcnow() - api_settings.JWT_REFRESH_EXPIRATION_DELTA -
timedelta(seconds=5))
token = self.create_token(
self.user,
exp=datetime.utcnow() + timedelta(hours=1),
orig_iat=orig_iat
)
response = client.post('/auth-token-refresh/', {'token': token},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['non_field_errors'][0],
'Refresh has expired.')
def tearDown(self):
# Restore original settings
api_settings.JWT_ALLOW_REFRESH = DEFAULTS['JWT_ALLOW_REFRESH']
| 34.264591
| 83
| 0.634567
|
4a03767c81b6975c517bf8e8aa973c492ab2a808
| 989
|
py
|
Python
|
sentiment-analysis/data_collection/TweetsByTopic.py
|
suraj-jayakumar/sentiment-analysis
|
84f843beba171260b10a075eaf6085eed81e1ad0
|
[
"MIT"
] | 2
|
2016-09-13T19:47:27.000Z
|
2018-12-30T03:39:22.000Z
|
sentiment-analysis/data_collection/TweetsByTopic.py
|
sjayakum/sentiment-analysis
|
84f843beba171260b10a075eaf6085eed81e1ad0
|
[
"MIT"
] | 1
|
2016-02-21T09:11:11.000Z
|
2016-02-21T09:11:11.000Z
|
sentiment-analysis/data_collection/TweetsByTopic.py
|
sjayakum/sentiment-analysis
|
84f843beba171260b10a075eaf6085eed81e1ad0
|
[
"MIT"
] | null | null | null |
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
# Twitter API credentials
consumer_key = ""
consumer_secret = ""
# After the step above, you will be redirected to your app's page.
# Create an access token under the the "Your access token" section
access_token="74381237-NoD1lqLycundzFCcsoxEhoVGZR3K1fw9c8BmpaIug"
access_token_secret= "tGV3lJG3BLAaJ4qInfgkdo9zpArMcrXdmcH1HeHRIuaHB"
class StdOutListener(StreamListener):
""" A listener handles tweets that are received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
def on_data(self, data):
print(data)
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
stream.filter(track=['flipkart'])
| 29.969697
| 72
| 0.749242
|
4a0376aa33471bfbe7cb6a6dd162c64f44b5df93
| 4,352
|
py
|
Python
|
utils/argparser.py
|
HaiDangDang/2020-flatland
|
abbf2f7f62fabf6da0937f80c2181f1c457ce24a
|
[
"MIT"
] | null | null | null |
utils/argparser.py
|
HaiDangDang/2020-flatland
|
abbf2f7f62fabf6da0937f80c2181f1c457ce24a
|
[
"MIT"
] | null | null | null |
utils/argparser.py
|
HaiDangDang/2020-flatland
|
abbf2f7f62fabf6da0937f80c2181f1c457ce24a
|
[
"MIT"
] | null | null | null |
import argparse
from ray.tune.config_parser import make_parser
from ray.tune.result import DEFAULT_RESULTS_DIR
EXAMPLE_USAGE = """
Training example:
python ./train.py --run DQN --env CartPole-v0 --no-log-flatland-stats
Training with Config:
python ./train.py -f experiments/flatland_random_sparse_small/global_obs/ppo.yaml
Note that -f overrides all other trial-specific command-line options.
"""
def create_parser(parser_creator=None):
parser = make_parser(
parser_creator=parser_creator,
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Train a reinforcement learning agent.",
epilog=EXAMPLE_USAGE)
# See also the base parser definition in ray/tune/config_parser.py
parser.add_argument(
"--ray-address",
default=None,
type=str,
help="Connect to an existing Ray cluster at this address instead "
"of starting a new one.")
parser.add_argument(
"--ray-num-cpus",
default=None,
type=int,
help="--num-cpus to use if starting a new cluster.")
parser.add_argument(
"--ray-num-gpus",
default=None,
type=int,
help="--num-gpus to use if starting a new cluster.")
parser.add_argument(
"--ray-num-nodes",
default=None,
type=int,
help="Emulate multiple cluster nodes for debugging.")
parser.add_argument(
"--ray-redis-max-memory",
default=None,
type=int,
help="--redis-max-memory to use if starting a new cluster.")
parser.add_argument(
"--ray-memory",
default=None,
type=int,
help="--memory to use if starting a new cluster.")
parser.add_argument(
"--ray-object-store-memory",
default=None,
type=int,
help="--object-store-memory to use if starting a new cluster.")
parser.add_argument(
"--experiment-name",
default="default",
type=str,
help="Name of the subdirectory under `local_dir` to put results in.")
parser.add_argument(
"--local-dir",
default=DEFAULT_RESULTS_DIR,
type=str,
help="Local dir to save training results to. Defaults to '{}'.".format(
DEFAULT_RESULTS_DIR))
parser.add_argument(
"--upload-dir",
default="",
type=str,
help="Optional URI to sync training results to (e.g. s3://bucket).")
parser.add_argument(
"-v", action="store_true", help="Whether to use INFO level logging.")
parser.add_argument(
"-vv", action="store_true", help="Whether to use DEBUG level logging.")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume previous Tune experiments.")
parser.add_argument(
"--torch",
action="store_true",
help="Whether to use PyTorch (instead of tf) as the DL framework.")
parser.add_argument(
"--eager",
action="store_true",
help="Whether to attempt to enable TF eager execution.")
parser.add_argument(
"--trace",
action="store_true",
help="Whether to attempt to enable tracing for eager mode.")
parser.add_argument(
"--log-flatland-stats",
action="store_true",
default=True,
help="Whether to log additional flatland specfic metrics such as percentage complete or normalized score.")
parser.add_argument(
"--bind-all",
action="store_true",
default=False,
help="Whether to expose on network (binding on all network interfaces).")
parser.add_argument(
"--env", default=None, type=str, help="The gym environment to use.")
parser.add_argument(
"--queue-trials",
action="store_true",
help=(
"Whether to queue trials when the cluster does not currently have "
"enough resources to launch one. This should be set to True when "
"running on an autoscaling cluster to enable automatic scale-up."))
parser.add_argument(
"-f",
"--config-file",
default=None,
type=str,
help="If specified, use config options from this file. Note that this "
"overrides any trial-specific options set via flags above.")
return parser
| 34.816
| 115
| 0.62523
|
4a03776107ff7aacbbb6731c594d83297de7cc00
| 1,685
|
py
|
Python
|
LSPI_FROM_GIT/plot.py
|
orenpeer12/randomized_value_functions
|
a10fe99a9a3a92fea02b38740753da7de7db8f1a
|
[
"MIT"
] | null | null | null |
LSPI_FROM_GIT/plot.py
|
orenpeer12/randomized_value_functions
|
a10fe99a9a3a92fea02b38740753da7de7db8f1a
|
[
"MIT"
] | null | null | null |
LSPI_FROM_GIT/plot.py
|
orenpeer12/randomized_value_functions
|
a10fe99a9a3a92fea02b38740753da7de7db8f1a
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
class Plot:
def plot_rewad(self,x, y1,y2):
plt.ylim(-220, 100)
plt.plot(x, y1,'bo-',linewidth=2.5, linestyle="-", label="LSPI-model-based LSTDQ")
plt.plot(x, y2,'ro-',linewidth=2.5, linestyle="-", label="LSPI-IS")
plt.legend(loc='upper left')
plt.show()
def plot(self):
x = np.linspace(0, 30, 30)
y = np.cos(x / 6 * np.pi) + np.sin(x / 3 * np.pi)
error = np.random.rand(len(y)) * 2
y += np.random.normal(0, 0.1, size=y.shape)
print(np.random.normal(0, 0.1, size=y.shape))
print("\n", np.random.rand(len(y)) * 2)
plt.plot(x, y, 'k', color='#CC4F1B') # color='#3F7F4C')color="#4682b4"
plt.fill_between(x, y - error, y + error,
edgecolor='#3F7F4C', facecolor='#7EFF99', linewidth=1,
)
plt.show()
def plot_function(self,x,y,z,rbf):
# plot original data
plt.figure(figsize=(12, 8))
plt.plot(x, y, 'k-')
# plot learned model
plt.plot(x, z, 'r-', linewidth=2)
# plot rbfs
#plt.plot(rbf.centers, np.zeros(rbf.numCenters), 'gs')
for c in rbf.centers:
# RF prediction lines
cx = np.arange(c - 0.7, c + 0.7, 0.01)
cy = [rbf._basisfunc(np.array([cx_]), np.array([c])) for cx_ in cx]
# print "-----",cx.shape,len(cy)," "
#plt.plot(cx, cy, '-', color='gray', linewidth=0.2)
# print "\n",cx, cy
plt.plot(cx, cy, '-', color='gray', linewidth=0.2)
plt.xlim(-1.2, 1.2)
# print "plottt"
plt.show()
| 28.559322
| 90
| 0.506231
|
4a0377be86d8dd7cee24b5dd186ccef030f0cfd9
| 2,973
|
py
|
Python
|
tests/crawler/media/test_nownews.py
|
allenyummy/GoodInfo
|
94ab7421d1377450ac4cfdfd6e4667fa52b20d0c
|
[
"MIT"
] | 1
|
2022-01-17T14:06:27.000Z
|
2022-01-17T14:06:27.000Z
|
tests/crawler/media/test_nownews.py
|
allenyummy/GoodInfo
|
94ab7421d1377450ac4cfdfd6e4667fa52b20d0c
|
[
"MIT"
] | 9
|
2021-08-12T07:39:01.000Z
|
2021-08-20T08:38:29.000Z
|
tests/crawler/media/test_nownews.py
|
allenyummy/GoodInfo
|
94ab7421d1377450ac4cfdfd6e4667fa52b20d0c
|
[
"MIT"
] | 1
|
2022-02-21T15:45:13.000Z
|
2022-02-21T15:45:13.000Z
|
# encoding=utf-8
# Author: Yu-Lun Chiang
# Description: Test NewsCrawler
import logging
import pytest
from collections import namedtuple
from src.crawler.media import nownews
from src.utils.struct import NewsStruct
logger = logging.getLogger(__name__)
TEST_DATA = namedtuple(
typename="TEST_DATA",
field_names=[
"name",
"link",
"expected_output",
],
)
TEST_DATA_1 = TEST_DATA(
name="今日新聞_1",
link="https://www.nownews.com/news/5357780",
expected_output=NewsStruct(
title="聯亞疫苗EUA沒過 將重評估三期臨床效益",
content="\n\r\n 中央流行疫情指揮中心今(16)日宣布聯亞生技的新冠疫苗未通過衛福部食藥署的新冠疫苗專案製造緊急使用授權(EUA)。對此,興櫃上市公司聯亞藥代母公司發表公告,表示目前國內臨床二期試驗持續進行,母公司將重新執行第三期臨床試驗的效益評估。\n\n我是廣告 請繼續往下閱讀\n\n\n\n\n\n\n\n\r\n\t\t\t\t \t聯亞藥指出,母公司聯亞生技開發股份有限公司於6月30日向台灣衛福部食藥署申請COVID-19疫苗UB-612專案製造EUA,經食藥署8月15日召開專家會議審查,UB-612未符合「新冠疫苗專案製造或輸入技術性資料審查基準」。聯亞藥表示,該公司與母公司簽訂UB-612疫苗委託製造合約,已收取50%不可退還訂金,應可支應已投入生產疫苗成本。另自有產品銷售情形良好,並有穩定國內外客戶藥品委託製造及藥品開發訂單,上述事件對財務業務影響有限。聯亞藥表示,聯亞生技的UB-612新冠疫苗國內臨床二期試驗持續進行,至於下一階段,母公司將重新執行第三期臨床試驗效益評估。6月底聯亞生技的二期臨床試驗期中分析報告記者會中曾表示,該公司的新冠疫苗對變種病毒具保護力,除加速前往印度進行1.1萬人三期臨床試驗,也將在美國進行「混合補強施打」的第二期臨床試驗。",
keywords=["聯亞藥", "聯亞生技", "疫苗", "EUA", "臨床試驗"],
category="財經",
media="今日新聞",
datetime="2021-08-16T19:54:23+08:00",
link="https://www.nownews.com/news/5357780",
),
)
TEST_DATA_2 = TEST_DATA(
name="今日新聞_2",
link="https://www.nownews.com/news/5357268",
expected_output=NewsStruct(
title="仁寶也出手了 捐贈1億元助慈濟購買BNT疫苗",
content="\n\r\n 繼台泥之後,仁寶電腦今(16)日也發布重訊表示,為實踐企業社會責任並支持社會公益,將參與捐贈慈濟慈善事業基金會新台幣1億元,支持慈濟基金會捐助防疫疫苗予台灣,祝愿台灣社會早日戰勝疫情。\n\n我是廣告 請繼續往下閱讀\n\n\n\n\n\n\n\n\r\n\t\t\t\t \t慈濟基金會在今年7月21日成功簽訂500萬劑BNT疫苗,所購得的疫苗將全數捐給政府主管機關,以作為新冠肺炎(COVID-19)防疫使用。台泥在12日率先宣布,董事長張安平、董事會認同慈濟基金會疫苗捐贈用途,並善盡台泥企業社會責任,因此董事會決議參與捐贈資助慈濟慈善基金會1億元用於購買疫苗。仁寶也在今日發布重訊表示,為實踐企業社會責任並支持社會公益,將參與捐贈慈濟基金會新台幣1億元,支持慈濟基金會捐助防疫疫苗予台灣,祝愿台灣社會早日戰勝疫情。由於此捐贈因屬重大天然災害所為急難救助的公益性質捐贈,將會提公司董事會追認。※【NOWnews 今日新聞】提醒您:因應新冠肺炎疫情,疾管署持續加強疫情監測與邊境管制措施,國外入境後如有發燒、咳嗽等不適症狀,請撥打「1922」專線,或「0800-001922」,並依指示配戴口罩儘速就醫,同時主動告知醫師旅遊史及接觸史,以利及時診斷及通報。",
keywords=["新冠肺炎疫苗", "仁寶", "台泥", "慈濟", "BNT疫苗"],
category="生活",
media="今日新聞",
datetime="2021-08-16T18:53:37+08:00",
link="https://www.nownews.com/news/5357268",
),
)
TEST_DATA_LIST = [TEST_DATA_1, TEST_DATA_2]
@pytest.fixture(scope="module")
def newsCrawler():
logger.warning("Init News Crawler ...")
return nownews.NowNewsNewsCrawler()
@pytest.mark.parametrize(
argnames="name, link, expected_output",
argvalues=[tuple(t) for t in TEST_DATA_LIST],
ids=[
f"{t.name}, {t.link[:50]+'...' if len(t.link) > 50 else t.link}"
for t in TEST_DATA_LIST
],
)
def test_get_info(
newsCrawler,
name,
link,
expected_output,
):
output = newsCrawler.getInfo(link=link)
assert NewsStruct.__2dict__(output) == NewsStruct.__2dict__(expected_output)
| 39.118421
| 601
| 0.704675
|
4a0377fd522ba507701e8b3e7ed38b1a9d7352cb
| 1,902
|
py
|
Python
|
menu.py
|
yijiehuang0/connect4AI
|
5a134ded8009fd51210a96ba2169920bf1b19aa8
|
[
"MIT"
] | null | null | null |
menu.py
|
yijiehuang0/connect4AI
|
5a134ded8009fd51210a96ba2169920bf1b19aa8
|
[
"MIT"
] | null | null | null |
menu.py
|
yijiehuang0/connect4AI
|
5a134ded8009fd51210a96ba2169920bf1b19aa8
|
[
"MIT"
] | null | null | null |
import pygame
import connect4
import sys
def main():
def text_objects(text, font):
textSurface = font.render(text, True, BLACK)
return textSurface, textSurface.get_rect()
def is_valid_location(x,y,w,h):
if x+w > mouse[0] > x and y+h > mouse[1] > y:
return True
else:
return False
WHITE = (255,255,255)
BLACK = (0,0,0)
GREEN = (0,255, 0)
RED = (255,0, 0)
(width, height) = (800, 700)
screen = pygame.display.set_mode((width, height))
pygame.display.set_mode((width, height))
screen.fill(WHITE)
pygame.display.flip()
pygame.init()
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
font = pygame.font.SysFont("monospace", 90)
pygame.display.set_caption('Connect Four Game')
TextSurf, TextRect = text_objects("Play Connect Four", font)
TextRect.center = ((width/2),(height/3))
screen.blit(TextSurf, TextRect)
pygame.draw.rect(screen, GREEN,(150,450,100,50))
smallText = pygame.font.Font("freesansbold.ttf",20)
textSurf, textRect = text_objects("Play!", smallText)
textRect.center = ( (150+(100/2)), (450+(50/2)) )
screen.blit(textSurf, textRect)
pygame.display.update()
startButton = quitButton = pygame.Rect(150,450,100,50)
pygame.draw.rect(screen, RED,(550,450,100,50))
smallText = pygame.font.Font("freesansbold.ttf",20)
textSurf, textRect = text_objects("QUIT!", smallText)
textRect.center = ( (550+(100/2)), (450+(50/2)) )
screen.blit(textSurf, textRect)
pygame.display.update()
quitButton = pygame.Rect(550,450,100,50)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = event.pos
if quitButton.collidepoint(mouse_pos):
sys.exit()
if startButton.collidepoint(mouse_pos):
connect4.main()
main()
| 22.915663
| 61
| 0.663512
|
4a03782ad307e8fbe1f79154e71df2460c6e07c3
| 1,417
|
py
|
Python
|
src/waldur_mastermind/marketplace_script/extension.py
|
waldur/waldur-mastermind
|
70ce819d9632c9362841ca8889f3519e1e542ffd
|
[
"MIT"
] | 4
|
2021-11-22T23:05:14.000Z
|
2022-03-02T10:11:40.000Z
|
src/waldur_mastermind/marketplace_script/extension.py
|
waldur/waldur-mastermind
|
70ce819d9632c9362841ca8889f3519e1e542ffd
|
[
"MIT"
] | 10
|
2021-10-30T00:45:03.000Z
|
2022-03-30T05:10:43.000Z
|
src/waldur_mastermind/marketplace_script/extension.py
|
waldur/waldur-mastermind
|
70ce819d9632c9362841ca8889f3519e1e542ffd
|
[
"MIT"
] | 5
|
2021-06-17T11:45:02.000Z
|
2022-02-07T19:25:13.000Z
|
from waldur_core.core import WaldurExtension
class MarketplaceScriptExtension(WaldurExtension):
class Settings:
WALDUR_MARKETPLACE_SCRIPT = {
# See also: https://docker-py.readthedocs.io/en/stable/client.html#docker.client.DockerClient
'DOCKER_CLIENT': {'base_url': 'unix://var/run/docker.sock',},
# See also: https://docker-py.readthedocs.io/en/stable/containers.html#docker.models.containers.ContainerCollection.run
'DOCKER_RUN_OPTIONS': {'mem_limit': '64m',},
# Path to folder on executor machine where to create temporary submission scripts. If None uses OS-dependent location
# OS X users, see https://github.com/docker/for-mac/issues/1532
'DOCKER_SCRIPT_DIR': None,
# Key is command to execute script, value is image name.
'DOCKER_IMAGES': {'python': 'python:3.8-alpine', 'shell': 'alpine:3',},
}
@staticmethod
def django_app():
return 'waldur_mastermind.marketplace_script'
@staticmethod
def is_assembly():
return True
@staticmethod
def celery_tasks():
from datetime import timedelta
return {
'waldur-marketplace-script-pull-resources': {
'task': 'waldur_marketplace_script.pull_resources',
'schedule': timedelta(hours=1),
'args': (),
},
}
| 38.297297
| 131
| 0.628088
|
4a0379bb31312b03c51e5c6c06dda7cd57c0e29a
| 251
|
py
|
Python
|
Python/7/MaximumMultiple/maximum_multiple.py
|
hwakabh/codewars
|
7afce5a7424d35abc55c350301ac134f2d3edd3d
|
[
"MIT"
] | null | null | null |
Python/7/MaximumMultiple/maximum_multiple.py
|
hwakabh/codewars
|
7afce5a7424d35abc55c350301ac134f2d3edd3d
|
[
"MIT"
] | 6
|
2020-02-21T17:01:59.000Z
|
2021-05-04T07:04:41.000Z
|
Python/7/MaximumMultiple/maximum_multiple.py
|
hwakabh/codewars
|
7afce5a7424d35abc55c350301ac134f2d3edd3d
|
[
"MIT"
] | null | null | null |
import sys
def max_multiple(divisor, bound):
return (bound // divisor) * divisor
if __name__ == "__main__":
if len(sys.argv) == 3:
print(max_multiple(divisor=int(sys.argv[1]), bound=int(sys.argv[2])))
else:
sys.exit(1)
| 19.307692
| 77
| 0.61753
|
4a037a5ea75c9d714d5383a5c1aeee3c70c8e677
| 10,942
|
py
|
Python
|
accelbyte_py_sdk/api/group/operations/configuration/update_group_configurat_3473ca.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/group/operations/configuration/update_group_configurat_3473ca.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
accelbyte_py_sdk/api/group/operations/configuration/update_group_configurat_3473ca.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# justice-group-service (2.11.5)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ModelsUpdateGroupConfigurationGlobalRulesRequestV1
from ...models import ModelsUpdateGroupConfigurationResponseV1
from ...models import ResponseErrorResponse
class UpdateGroupConfigurationGlobalRuleAdminV1(Operation):
"""update existing configuration global rule (updateGroupConfigurationGlobalRuleAdminV1)
Required permission 'ADMIN:NAMESPACE:{namespace}:GROUP:CONFIGURATION [UPDATE]'
This endpoint is used to update existing global rule configuration based on the allowed action. It will replace the permission with the request
Action Code: 73106
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:GROUP:CONFIGURATION [UPDATE]
Properties:
url: /group/v1/admin/namespaces/{namespace}/configuration/{configurationCode}/rules/{allowedAction}
method: PUT
tags: ["Configuration"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH]
body: (body) REQUIRED ModelsUpdateGroupConfigurationGlobalRulesRequestV1 in body
allowed_action: (allowedAction) REQUIRED str in path
configuration_code: (configurationCode) REQUIRED str in path
namespace: (namespace) REQUIRED str in path
Responses:
200: OK - ModelsUpdateGroupConfigurationResponseV1 (OK)
400: Bad Request - ResponseErrorResponse (20019: unable to parse request body | 20002: validation error)
401: Unauthorized - ResponseErrorResponse (20001: unauthorized access)
403: Forbidden - ResponseErrorResponse (20013: insufficient permissions | 20022: token is not user token)
404: Not Found - ResponseErrorResponse (73131: global configuration not found)
500: Internal Server Error - ResponseErrorResponse (Internal Server Error)
"""
# region fields
_url: str = "/group/v1/admin/namespaces/{namespace}/configuration/{configurationCode}/rules/{allowedAction}"
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
body: ModelsUpdateGroupConfigurationGlobalRulesRequestV1 # REQUIRED in [body]
allowed_action: str # REQUIRED in [path]
configuration_code: str # REQUIRED in [path]
namespace: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
if not hasattr(self, "body") or self.body is None:
return None
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "allowed_action"):
result["allowedAction"] = self.allowed_action
if hasattr(self, "configuration_code"):
result["configurationCode"] = self.configuration_code
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_body(self, value: ModelsUpdateGroupConfigurationGlobalRulesRequestV1) -> UpdateGroupConfigurationGlobalRuleAdminV1:
self.body = value
return self
def with_allowed_action(self, value: str) -> UpdateGroupConfigurationGlobalRuleAdminV1:
self.allowed_action = value
return self
def with_configuration_code(self, value: str) -> UpdateGroupConfigurationGlobalRuleAdminV1:
self.configuration_code = value
return self
def with_namespace(self, value: str) -> UpdateGroupConfigurationGlobalRuleAdminV1:
self.namespace = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ModelsUpdateGroupConfigurationGlobalRulesRequestV1()
if hasattr(self, "allowed_action") and self.allowed_action:
result["allowedAction"] = str(self.allowed_action)
elif include_empty:
result["allowedAction"] = ""
if hasattr(self, "configuration_code") and self.configuration_code:
result["configurationCode"] = str(self.configuration_code)
elif include_empty:
result["configurationCode"] = ""
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, ModelsUpdateGroupConfigurationResponseV1], Union[None, HttpResponse, ResponseErrorResponse]]:
"""Parse the given response.
200: OK - ModelsUpdateGroupConfigurationResponseV1 (OK)
400: Bad Request - ResponseErrorResponse (20019: unable to parse request body | 20002: validation error)
401: Unauthorized - ResponseErrorResponse (20001: unauthorized access)
403: Forbidden - ResponseErrorResponse (20013: insufficient permissions | 20022: token is not user token)
404: Not Found - ResponseErrorResponse (73131: global configuration not found)
500: Internal Server Error - ResponseErrorResponse (Internal Server Error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return ModelsUpdateGroupConfigurationResponseV1.create_from_dict(content), None
if code == 400:
return None, ResponseErrorResponse.create_from_dict(content)
if code == 401:
return None, ResponseErrorResponse.create_from_dict(content)
if code == 403:
return None, ResponseErrorResponse.create_from_dict(content)
if code == 404:
return None, ResponseErrorResponse.create_from_dict(content)
if code == 500:
return None, ResponseErrorResponse.create_from_dict(content)
return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
body: ModelsUpdateGroupConfigurationGlobalRulesRequestV1,
allowed_action: str,
configuration_code: str,
namespace: str,
) -> UpdateGroupConfigurationGlobalRuleAdminV1:
instance = cls()
instance.body = body
instance.allowed_action = allowed_action
instance.configuration_code = configuration_code
instance.namespace = namespace
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> UpdateGroupConfigurationGlobalRuleAdminV1:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ModelsUpdateGroupConfigurationGlobalRulesRequestV1.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = ModelsUpdateGroupConfigurationGlobalRulesRequestV1()
if "allowedAction" in dict_ and dict_["allowedAction"] is not None:
instance.allowed_action = str(dict_["allowedAction"])
elif include_empty:
instance.allowed_action = ""
if "configurationCode" in dict_ and dict_["configurationCode"] is not None:
instance.configuration_code = str(dict_["configurationCode"])
elif include_empty:
instance.configuration_code = ""
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"allowedAction": "allowed_action",
"configurationCode": "configuration_code",
"namespace": "namespace",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"body": True,
"allowedAction": True,
"configurationCode": True,
"namespace": True,
}
# endregion static methods
| 34.847134
| 187
| 0.665418
|
4a037be08a7a86cb81a0824eac703c6abf5cccf8
| 6,144
|
py
|
Python
|
LH.py
|
eicky/LightHouse_Automatic_Shutdown
|
317ee4aa196006e6357268439dbeec7a4d789703
|
[
"MIT"
] | null | null | null |
LH.py
|
eicky/LightHouse_Automatic_Shutdown
|
317ee4aa196006e6357268439dbeec7a4d789703
|
[
"MIT"
] | null | null | null |
LH.py
|
eicky/LightHouse_Automatic_Shutdown
|
317ee4aa196006e6357268439dbeec7a4d789703
|
[
"MIT"
] | null | null | null |
import json
import time
import requests
import os
from tencentcloud.common import credential
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.lighthouse.v20200324 import lighthouse_client, models
gaojinData="流量告警"
gaojinResult="流量结果"
gaojinSatus="告警状态"
SecretId = os.environ["SecretId"]
SecretKey = os.environ["SecretKey"]
regions = ["ap-beijing", "ap-chengdu", "ap-guangzhou", "ap-hongkong", "ap-nanjing", "ap-shanghai", "ap-singapore", "ap-tokyo", "eu-moscow", "na-siliconvalley"]
percent = 0.90 # 流量限额,1表示使用到100%关机,默认设置为95%
tgToken = os.environ["tgToken"]
#钉钉机器人告警
def sendmessage(message):
#修改为你自己的钉钉webhook
url = "https://oapi.dingtalk.com/robot/send?access_token=******************************************"
HEADERS = {
"Content-Type": "application/json ;charset=utf-8"
}
String_textMsg = {
"msgtype": "text",
"text": {"content": message},
"at": {
"atMobiles": [
"15*********" #如果需要@某人,这里写他的手机号
],
"isAtAll": 1 #如果需要@所有人,这里写1
}
}
String_textMsg = json.dumps(String_textMsg)
res = requests.post(url, data=String_textMsg, headers=HEADERS)
print(res.text)
#key参数
def doCheck():
try:
# 参数
ids = SecretId.split(",")
keys = SecretKey.split(",")
# print(ids)
for i in range(len(ids)):
for ap in regions:
dofetch(ids[i], keys[i], ap)
except TencentCloudSDKException as err:
print(err)
def dofetch(id, key, region):
# 以下不用管
global gaojinSatus
global gaojinResult
cred = credential.Credential(id, key)
httpProfile = HttpProfile()
httpProfile.endpoint = "lighthouse.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = lighthouse_client.LighthouseClient(cred, region, clientProfile)
#获取实例列表
req_instances = models.DescribeInstancesRequest()
params = {}
req_instances.from_json_string(json.dumps(params))
resp_instances = client.DescribeInstances(req_instances)
s1=json.loads(resp_instances.to_json_string())['InstanceSet']
for j in range (len(s1)):
params.setdefault("InstanceIds",[]).append(s1[j]['InstanceId'])#获取实例ID
#获取实例流量
req = models.DescribeInstancesTrafficPackagesRequest()
req.from_json_string(json.dumps(params))
resp = client.DescribeInstancesTrafficPackages(req)
s2=json.loads(resp.to_json_string())["InstanceTrafficPackageSet"]
GB=1024*1024*1024
for i in range (len(s2)):
InstanceId= s2[i]['InstanceId']
s3= s2[i]['TrafficPackageSet'][0]
InstanceState =s1[i]["InstanceState"]
TrafficPackageTotal = round(s3['TrafficPackageTotal']/GB,2)
TrafficUsed = round(s3['TrafficUsed']/GB,2)
TrafficPackageRemaining=str(round(s3['TrafficPackageRemaining']/GB,2))
#告警数据
global gaojinData
gaojinData="流量告警数据:\n"+"已使用:"+str(TrafficUsed)+"GB"+"\n"+"总流量:"+str(TrafficPackageTotal)+"GB"+"\n"+"剩余量:"+str(TrafficPackageRemaining)+"GB"
#获取实例状态
print (i+1,":",InstanceId,":","已使用:",TrafficUsed,"总流量:",TrafficPackageTotal,"剩余:",TrafficPackageRemaining)
if (InstanceState == "RUNNING"):
gaojinSatus="流量告警状态:运行中!"
print("运行中")
#实例流量超出限制自动关闭
if (TrafficUsed/TrafficPackageTotal<percent):
#告警结果:
print("剩余流量充足")
gaojinResult="流量告警结果:剩余流量充足!"
else:
print(InstanceId,":","流量超出限制,自动关闭")
req_Stop = models.StopInstancesRequest()
params_Stop = {
}
params_Stop.setdefault("InstanceIds",[]).append(InstanceId)
req_Stop.from_json_string(json.dumps(params_Stop))
resp_Stop = client.StopInstances(req_Stop)
print(resp_Stop.to_json_string())
#添加TG酱通知
msgContent= InstanceId+ " :流量超出限制,即将自动关机。" + "剩余流量:" + TrafficPackageRemaining+ "GB"
msgUrl="https://tgbot-red.vercel.app/api?token="+ tgToken +"&message="+ msgContent
#告警结果:
gaojinResult="流量告警结果:流量超出限制,即将自动关机。\n"+"剩余流量:" + str(TrafficPackageRemaining)+ "GB"
response= requests.get(url=msgUrl).text
print (response)
else:
if (TrafficUsed/TrafficPackageTotal<percent):
#告警结果:
print("剩余流量充足,将自动开机")
req_Start = models.StartInstancesRequest()
params_Start = {
"InstanceIds": [InstanceId]
}
req_Start.from_json_string(json.dumps(params_Start))
resp_Start = client.StartInstances(req_Start)
print(resp_Start.to_json_string())
#添加TG酱通知
msgContent= InstanceId+ " :流量有剩余,即将自动开机。" + "剩余流量:" + TrafficPackageRemaining+ "GB"
msgUrl="https://tgbot-red.vercel.app/api?token="+ tgToken +"&message="+ msgContent
#告警结果:
gaojinResult="流量告警结果:流量有剩余,即将自动开机。\n"+"剩余流量:" + str(TrafficPackageRemaining)+ "GB"
response= requests.get(url=msgUrl).text
print (response)
else:
gaojinSatus="流量告警状态:已关机!"
print("流量告警状态:已关机!")
#添加时间戳
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
print ("--------------------")
#except TencentCloudSDKException as err:
# print(err)
if __name__ == '__main__':
doCheck()
gaojinTime="流量告警时间:"+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+"\n"+"\n"
gaojin=gaojinData+"\n"+"\n"+gaojinSatus+"\n"+"\n"+gaojinResult+"\n"+"\n"+gaojinTime
#sendmessage(gaojin)
# ck_kafka()
pass
| 39.133758
| 159
| 0.592936
|
4a037d0bea22f8f56b4590068416a578fb4ce032
| 321
|
py
|
Python
|
applications/easypylab/models/menu.py
|
wasuaje/web2py5
|
02f310b9526f92c4ec62ab5b0271069a1c101e9f
|
[
"BSD-3-Clause"
] | null | null | null |
applications/easypylab/models/menu.py
|
wasuaje/web2py5
|
02f310b9526f92c4ec62ab5b0271069a1c101e9f
|
[
"BSD-3-Clause"
] | null | null | null |
applications/easypylab/models/menu.py
|
wasuaje/web2py5
|
02f310b9526f92c4ec62ab5b0271069a1c101e9f
|
[
"BSD-3-Clause"
] | null | null | null |
response.title = settings.title
response.subtitle = settings.subtitle
response.meta.author = '%(author)s <%(author_email)s>' % settings
response.meta.keywords = settings.keywords
response.meta.description = settings.description
#response.menu = [
#(T('Index'),URL('default','index')==URL(),URL('default','index'),[]),
#]
| 35.666667
| 70
| 0.725857
|
4a037ea25d3786ffba359e9733069bbc5d50d7e0
| 7,888
|
py
|
Python
|
New paper writing/memory_generator/predictor.py
|
leiloong/PaperRobot
|
070972dc1548571c28d89d2c54fb379e87d172c7
|
[
"MIT"
] | 453
|
2019-05-21T04:25:03.000Z
|
2022-03-31T16:37:01.000Z
|
New paper writing/memory_generator/predictor.py
|
leiloong/PaperRobot
|
070972dc1548571c28d89d2c54fb379e87d172c7
|
[
"MIT"
] | 20
|
2019-05-30T15:36:44.000Z
|
2022-02-13T23:53:33.000Z
|
New paper writing/memory_generator/predictor.py
|
leiloong/PaperRobot
|
070972dc1548571c28d89d2c54fb379e87d172c7
|
[
"MIT"
] | 145
|
2019-05-25T04:49:03.000Z
|
2022-03-16T12:19:21.000Z
|
import gc
from itertools import groupby
import torch
import statistics
def filter_duplicate(sents):
sents = sents.split('.')
used = []
used_s = []
tmp = ""
for ss in sents:
tttmp = ''
for s in ss.split(','):
if s not in used:
if len(s) < 2:
continue
used.append(s)
no_dupes = ([k for k, v in groupby(s.split())])
ns = ' '.join(no_dupes)
if ns not in used_s:
used_s.append(ns)
if s[-1] == ',':
tttmp += ns + ' '
else:
tttmp += ns + ' , '
if len(tttmp) == 0:
continue
tttmp = "%s%s" % (tttmp[0].upper(), tttmp[1:])
if tttmp[-1] == '.':
tmp += tttmp + ' '
else:
if tttmp[-2:] == ', ':
tmp += tttmp[:-2]
else:
tmp += tttmp
tmp += ' . '
return tmp
class Predictor(object):
def __init__(self, model, id2word, vocab_size):
self.model = model
self.model.eval()
self.id2word = id2word
self.vocab_size = vocab_size
def predict(self, batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs):
torch.set_grad_enabled(False)
decoded_outputs, lengths = self.model(batch_s, batch_o_s, source_len, max_source_oov, batch_term,
batch_o_term)
length = lengths[0]
output = []
# print(decoded_outputs)
for i in range(length):
symbol = decoded_outputs[0][i].item()
if symbol < self.vocab_size:
output.append(self.id2word[symbol])
else:
output.append(list_oovs[0][symbol-self.vocab_size])
return self.prepare_for_bleu(output, True)[0]
def predict_beam(self, batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs,
stopwords, sflag=False):
torch.set_grad_enabled(False)
decoded_outputs = self.model(batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term,
beam=True, stopwords=stopwords, sflag=sflag)
outputs = []
for symbol in decoded_outputs:
if symbol < self.vocab_size:
outputs.append(self.id2word[symbol])
else:
outputs.append(list_oovs[0][symbol - self.vocab_size])
outputs = self.prepare_for_bleu(outputs, True)[0]
print(outputs)
return outputs
def preeval_batch(self, dataset, pmid=False):
torch.set_grad_enabled(False)
refs = {}
cands = {}
titles = {}
new_terms = {}
new_pmids = {}
avg_len_ref = []
avg_len_out = []
i = 0
for batch_idx in range(len(dataset.corpus)):
if pmid:
batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs, targets, \
sources, terms, pmids = dataset.get_batch(batch_idx, False)
else:
batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs, targets, \
sources, terms = dataset.get_batch(batch_idx, False)
decoded_outputs, lengths = self.model(batch_s, batch_o_s, source_len, max_source_oov, batch_term,
batch_o_term)
for j in range(len(lengths)):
i += 1
ref, lref = self.prepare_for_bleu(targets[j])
if pmid:
refs[i] = ref.split()
titles[i] = sources[j]
new_terms[i] = terms[j]
else:
avg_len_ref.append(lref)
refs[i] = [ref]
titles[i] = " ".join(sources[j])
new_terms[i] = " ".join(terms[j])
out_seq = []
for k in range(lengths[j]):
symbol = decoded_outputs[j][k].item()
if symbol < self.vocab_size:
out_seq.append(self.id2word[symbol])
else:
out_seq.append(list_oovs[j][symbol-self.vocab_size])
out, lout = self.prepare_for_bleu(out_seq, True)
if pmid:
new_pmids[i] = pmids[j]
cands[i] = out.split()
else:
avg_len_out.append(lout)
cands[i] = out
if i % 500 == 0:
print("Percentages: %.4f" % (i/float(dataset.len)))
# del batch_s, batch_o_s, source_len, batch_term, batch_o_term
# gc.collect()
# torch.cuda.empty_cache()
if pmid:
return cands, refs, titles, new_terms, new_pmids
else:
print("Reference length ", statistics.mean(avg_len_ref))
print("Output length ", statistics.mean(avg_len_out))
return cands, refs, titles, new_terms
def preeval_batch_beam(self, dataset, pmid=False, stopwords=None, sflag=True):
torch.set_grad_enabled(False)
refs = {}
cands = {}
titles = {}
new_terms = {}
new_pmids = {}
avg_len_ref = []
avg_len_out = []
i = 0
for batch_idx in range(len(dataset.corpus)): #
if pmid:
batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs, targets, \
sources, terms, pmids = dataset.get_batch(batch_idx, False)
else:
batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs, targets, \
sources, terms = dataset.get_batch(batch_idx, False)
decoded_outputs = self.model(batch_s, batch_o_s, source_len, max_source_oov, batch_term,
batch_o_term, beam=True, stopwords=stopwords, sflag=sflag)
i += 1
ref, lref = self.prepare_for_bleu(targets[0])
if pmid:
refs[i] = ref.split()
titles[i] = sources[0]
new_terms[i] = terms[0]
else:
avg_len_ref.append(lref)
refs[i] = [ref]
titles[i] = " ".join(sources[0])
new_terms[i] = " ".join(terms[0])
out_seq = []
for symbol in decoded_outputs:
if symbol < self.vocab_size:
out_seq.append(self.id2word[symbol])
else:
out_seq.append(list_oovs[0][symbol-self.vocab_size])
out, lout = self.prepare_for_bleu(out_seq, True)
if pmid:
new_pmids[i] = pmids[0]
cands[i] = out.split()
else:
avg_len_out.append(lout)
cands[i] = out
if i % 10 == 0:
print("Percentages: %.4f" % (i/float(dataset.len)))
# del batch_s, batch_o_s, source_len, batch_term, batch_o_term
# gc.collect()
# torch.cuda.empty_cache()
if pmid:
return cands, refs, titles, new_terms, new_pmids
else:
print("Reference length ", statistics.mean(avg_len_ref))
print("Output length ", statistics.mean(avg_len_out))
return cands, refs, titles, new_terms
def prepare_for_bleu(self, sentence, train=False):
sent = [x for x in sentence if x != '<pad>' and x != '<eos>' and x != '<sos>']
l = len(sent)
sent = ' '.join(sent)
if train:
sent = filter_duplicate(sent)
return sent, l
| 38.857143
| 111
| 0.506339
|
4a037f290ce423a48733ed9cf933c698eed2d58d
| 469
|
py
|
Python
|
examples/data_analysis/categorical_example.py
|
bhumikapahariapuresoftware/visions
|
8838d89b4f02e401112378b4662a779227ead9f8
|
[
"BSD-4-Clause"
] | 142
|
2020-01-07T21:17:10.000Z
|
2022-03-30T13:10:14.000Z
|
examples/data_analysis/categorical_example.py
|
bhumikapahariapuresoftware/visions
|
8838d89b4f02e401112378b4662a779227ead9f8
|
[
"BSD-4-Clause"
] | 121
|
2020-01-07T02:26:38.000Z
|
2022-03-29T17:18:19.000Z
|
examples/data_analysis/categorical_example.py
|
bhumikapahariapuresoftware/visions
|
8838d89b4f02e401112378b4662a779227ead9f8
|
[
"BSD-4-Clause"
] | 18
|
2020-02-17T03:17:37.000Z
|
2022-02-20T14:01:11.000Z
|
import pandas as pd
from examples.data_analysis.categorical import Category
from visions.functional import detect_type
from visions.types import Boolean, Categorical
from visions.typesets import StandardSet
ts = StandardSet()
ts -= Boolean
ts -= Categorical
ts += Category
s1 = pd.Series(["A", "B", "C"] * 1000, dtype="category")
print(s1 in Category)
print(detect_type(s1, ts))
s2 = pd.Series([True, False] * 1000)
print(s2 in Category)
print(detect_type(s2, ts))
| 23.45
| 56
| 0.750533
|
4a037f747e0aee01ee4c25dbb4d79bf2f6212a62
| 7,326
|
py
|
Python
|
anchore_engine/common/images.py
|
dspalmer99/anchore-engine
|
8c61318be6fec5d767426fa4ccd98472cc85b5cd
|
[
"Apache-2.0"
] | 1
|
2020-06-22T07:27:41.000Z
|
2020-06-22T07:27:41.000Z
|
anchore_engine/common/images.py
|
dspalmer99/anchore-engine
|
8c61318be6fec5d767426fa4ccd98472cc85b5cd
|
[
"Apache-2.0"
] | 4
|
2020-11-07T00:16:02.000Z
|
2020-11-08T20:52:06.000Z
|
anchore_engine/common/images.py
|
dspalmer99/anchore-engine
|
8c61318be6fec5d767426fa4ccd98472cc85b5cd
|
[
"Apache-2.0"
] | null | null | null |
import json
import re
import anchore_engine.services
import anchore_engine.utils
from anchore_engine import db
from anchore_engine.clients import docker_registry
from anchore_engine.subsys import logger
def lookup_registry_image(userId, image_info, registry_creds):
digest = None
manifest = None
# TODO: push this upstream in the call chain or wrap with an authz checker
#if not registry_access(userId, image_info['registry']):
# raise Exception("access denied for user ("+str(userId)+") registry ("+str(image_info['registry'])+")")
#else:
# try clause from below is in the else-clause
try:
manifest,digest,parentdigest,parentmanifest = docker_registry.get_image_manifest(userId, image_info, registry_creds)
except Exception as err:
raise anchore_engine.common.helpers.make_anchore_exception(err, input_message="cannot fetch image digest/manifest from registry", input_httpcode=400)
return digest, manifest
def get_image_info(userId, image_type, input_string, registry_lookup=False, registry_creds=[]):
ret = {}
if image_type == 'docker':
try:
image_info = anchore_engine.utils.parse_dockerimage_string(input_string)
except Exception as err:
raise anchore_engine.common.helpers.make_anchore_exception(err, input_message="cannot handle image input string", input_httpcode=400)
ret.update(image_info)
if registry_lookup and image_info['registry'] != 'localbuild':
#digest, manifest = lookup_registry_image(userId, image_info, registry_creds)
try:
manifest,digest,parentdigest,parentmanifest = docker_registry.get_image_manifest(userId, image_info, registry_creds)
except Exception as err:
raise anchore_engine.common.helpers.make_anchore_exception(err, input_message="cannot fetch image digest/manifest from registry", input_httpcode=400)
image_info['digest'] = digest
image_info['fulldigest'] = image_info['registry']+"/"+image_info['repo']+"@"+digest
image_info['manifest'] = manifest
image_info['parentmanifest'] = parentmanifest
image_info['parentdigest'] = parentdigest
# if we got a manifest, and the image_info does not yet contain an imageId, try to get it from the manifest
if manifest and not image_info['imageId']:
try:
imageId = re.sub("^sha256:", "", manifest['config']['digest'])
image_info['imageId'] = imageId
except Exception as err:
logger.debug("could not extract imageId from fetched manifest - exception: " + str(err))
logger.debug("using digest hash as imageId due to incomplete manifest ("+str(image_info['fulldigest'])+")")
htype, image_info['imageId'] = image_info['digest'].split(":", 1)
ret.update(image_info)
else:
image_info['manifest'] = {}
image_info['parentmanifest'] = {}
else:
raise Exception("image type ("+str(image_type)+") not supported")
return ret
def clean_docker_image_details_for_update(image_details):
ret = []
for image_detail in image_details:
el = {}
for k in list(image_detail.keys()):
if image_detail[k] != None:
el[k] = image_detail[k]
ret.append(el)
return ret
def make_image_record(userId, image_type, input_string, image_metadata={}, registry_lookup=True, registry_creds=[]):
if image_type == 'docker':
try:
dockerfile = image_metadata.get('dockerfile', None)
except:
dockerfile = None
try:
dockerfile_mode = image_metadata.get('dockerfile_mode', None)
except:
dockerfile_mode = None
try:
tag = image_metadata.get('tag', None)
except:
tag = None
try:
imageId = image_metadata.get('imageId', None)
except:
imageId = None
try:
digest = image_metadata.get('digest', None)
except:
digest = None
try:
annotations = image_metadata.get('annotations', {})
except:
annotations = {}
parentdigest = image_metadata.get('parentdigest', None)
created_at = image_metadata.get('created_at', None)
return make_docker_image(userId, input_string=input_string, tag=tag, digest=digest, imageId=imageId, parentdigest=parentdigest, created_at=created_at, dockerfile=dockerfile, dockerfile_mode=dockerfile_mode, registry_lookup=registry_lookup, registry_creds=registry_creds, annotations=annotations)
else:
raise Exception("image type ("+str(image_type)+") not supported")
return None
def make_docker_image(userId, input_string=None, tag=None, digest=None, imageId=None, parentdigest=None, created_at=None, dockerfile=None, dockerfile_mode=None, registry_lookup=True, registry_creds=[], annotations={}):
ret = {}
if input_string:
image_info = get_image_info(userId, "docker", input_string, registry_lookup=registry_lookup, registry_creds=registry_creds)
else:
if digest:
image_info = get_image_info(userId, "docker", digest, registry_lookup=registry_lookup, registry_creds=registry_creds)
digest = image_info['digest']
if tag:
image_info = get_image_info(userId, "docker", tag, registry_lookup=registry_lookup, registry_creds=registry_creds)
if digest and not image_info['digest']:
image_info['digest'] = digest
if 'digest' in image_info:
imageDigest = str(image_info['digest'])
else:
raise Exception("input image_info needs to have a digest")
if imageId:
image_info['imageId'] = imageId
new_input = db.CatalogImage().make()
new_input['imageDigest'] = imageDigest
new_input['userId'] = userId
new_input['image_type'] = 'docker'
new_input['dockerfile_mode'] = dockerfile_mode
if not parentdigest:
parentdigest = imageDigest
new_input['parentDigest'] = parentdigest
if created_at:
new_input['created_at'] = created_at
final_annotation_data = {}
for k,v in list(annotations.items()):
if v != 'null':
final_annotation_data[k] = v
new_input['annotations'] = json.dumps(final_annotation_data)
new_image_obj = db.CatalogImage(**new_input)
new_image = dict((key,value) for key, value in vars(new_image_obj).items() if not key.startswith('_'))
new_image['image_detail'] = []
if image_info['tag']:
new_input = db.CatalogImageDocker().make()
new_input['imageDigest'] = imageDigest
new_input['userId'] = userId
new_input['dockerfile'] = dockerfile
for t in ['registry', 'repo', 'tag', 'digest', 'imageId']:
if t in image_info:
new_input[t] = image_info[t]
new_docker_image_obj = db.CatalogImageDocker(**new_input)
new_docker_image = dict((key,value) for key, value in vars(new_docker_image_obj).items() if not key.startswith('_'))
new_image['image_detail'] = [new_docker_image]
ret = new_image
return ret
| 38.761905
| 303
| 0.655747
|
4a03806772680dbe2bf198cbedbb493162565a6b
| 120
|
py
|
Python
|
17.Python for Automation/01.Automate File Folder Terminal/02.terminal_command.py
|
ptyadana/python-dojo
|
98c7234b84f0afea99a091c7198342d66bbdff5b
|
[
"MIT"
] | 3
|
2020-06-01T04:17:18.000Z
|
2020-12-18T03:05:55.000Z
|
17.Python for Automation/01.Automate File Folder Terminal/02.terminal_command.py
|
ptyadana/python-dojo
|
98c7234b84f0afea99a091c7198342d66bbdff5b
|
[
"MIT"
] | 1
|
2020-04-25T08:01:59.000Z
|
2020-04-25T08:01:59.000Z
|
17.Python for Automation/01.Automate File Folder Terminal/02.terminal_command.py
|
ptyadana/python-dojo
|
98c7234b84f0afea99a091c7198342d66bbdff5b
|
[
"MIT"
] | 7
|
2020-04-26T10:02:36.000Z
|
2021-06-08T05:12:46.000Z
|
import subprocess
for i in range(5):
# call another script
subprocess.check_call(['python', 'hello_world.py'])
| 20
| 55
| 0.7
|
4a0380b871f240f96ee665347d02a6597d0e848b
| 6,816
|
py
|
Python
|
lib/rpn/proposal_layer.py
|
maheriya/py-faster-rcnn
|
e75137db4c60a142f3f8c0e91eaa34037a662655
|
[
"BSD-2-Clause"
] | null | null | null |
lib/rpn/proposal_layer.py
|
maheriya/py-faster-rcnn
|
e75137db4c60a142f3f8c0e91eaa34037a662655
|
[
"BSD-2-Clause"
] | null | null | null |
lib/rpn/proposal_layer.py
|
maheriya/py-faster-rcnn
|
e75137db4c60a142f3f8c0e91eaa34037a662655
|
[
"BSD-2-Clause"
] | null | null | null |
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import caffe
import numpy as np
import yaml
from fast_rcnn.config import cfg
from generate_anchors import generate_anchors
from fast_rcnn.bbox_transform import bbox_transform_inv, clip_boxes
from fast_rcnn.nms_wrapper import nms
DEBUG = False
class ProposalLayer(caffe.Layer):
"""
Outputs object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def setup(self, bottom, top):
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str_)
self._feat_stride = layer_params['feat_stride']
anchor_scales = layer_params.get('scales', (4, 8, 16)) #(8, 16, 32))
self._anchors = generate_anchors(scales=np.array(anchor_scales))
self._num_anchors = self._anchors.shape[0]
if DEBUG:
print 'feat_stride: {}'.format(self._feat_stride)
print 'anchors:'
print self._anchors
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
top[0].reshape(1, 5)
# scores blob: holds scores for R regions of interest
if len(top) > 1:
top[1].reshape(1, 1, 1, 1)
def forward(self, bottom, top):
# Algorithm:
#
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
assert bottom[0].data.shape[0] == 1, \
'Only single item batches are supported'
cfg_key = str(self.phase) # either 'TRAIN' or 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs, which we want
scores = bottom[0].data[:, self._num_anchors:, :, :]
bbox_deltas = bottom[1].data
im_info = bottom[2].data[0, :]
if DEBUG:
print 'im_size: ({}, {})'.format(im_info[0], im_info[1])
print 'scale: {}'.format(im_info[2])
# 1. Generate proposals from bbox deltas and shifted anchors
height, width = scores.shape[-2:]
if DEBUG:
print 'score map size: {}'.format(scores.shape)
# Enumerate all shifts
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
anchors = self._anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
#
# bbox deltas will be (1, 4 * A, H, W) format
# transpose to (1, H, W, 4 * A)
# reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
# in slowest to fastest order
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))
# Same story for the scores:
#
# scores are (1, A, H, W) format
# transpose to (1, H, W, A)
# reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)
scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
# Convert anchors into proposals via bbox transformations
proposals = bbox_transform_inv(anchors, bbox_deltas)
# 2. clip predicted boxes to image
proposals = clip_boxes(proposals, im_info[:2])
# 3. remove predicted boxes with either height or width < threshold
# (NOTE: convert min_size to input image scale stored in im_info[2])
keep = _filter_boxes(proposals, min_size * im_info[2])
proposals = proposals[keep, :]
scores = scores[keep]
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
keep = nms(np.hstack((proposals, scores)), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Output rois blob
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
top[0].reshape(*(blob.shape))
top[0].data[...] = blob
# [Optional] output scores blob
if len(top) > 1:
top[1].reshape(*(scores.shape))
top[1].data[...] = scores
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
| 38.508475
| 80
| 0.590229
|
4a038171b8c7d93017fdac963237be1d1b4955f2
| 5,894
|
py
|
Python
|
internal-import-file/import-document/src/reportimporter/models.py
|
stevie-codes/connectors
|
67adc4cfcaa37559a02cc711ad881aa06ae0915d
|
[
"Apache-2.0"
] | 132
|
2019-06-28T23:23:18.000Z
|
2022-03-30T07:47:55.000Z
|
internal-import-file/import-document/src/reportimporter/models.py
|
stevie-codes/connectors
|
67adc4cfcaa37559a02cc711ad881aa06ae0915d
|
[
"Apache-2.0"
] | 472
|
2019-06-26T12:14:54.000Z
|
2022-03-31T13:49:53.000Z
|
internal-import-file/import-document/src/reportimporter/models.py
|
stevie-codes/connectors
|
67adc4cfcaa37559a02cc711ad881aa06ae0915d
|
[
"Apache-2.0"
] | 185
|
2019-07-01T09:32:14.000Z
|
2022-03-28T05:29:12.000Z
|
import json
import os
import re
from json import JSONDecodeError
from typing import List, Optional, Dict, Pattern, Any
from pycti import OpenCTIConnectorHelper
from pydantic import BaseModel, validator
from reportimporter.constants import (
COMMENT_INDICATOR,
CONFIG_PATH,
OBSERVABLE_DETECTION_CUSTOM_REGEX,
OBSERVABLE_DETECTION_OPTIONS,
)
class Observable(BaseModel):
name: str
detection_option: str
# Custom Regex approach
regex_patterns: List[str] = []
regex: List[Pattern] = []
# Further processing
stix_target: str
# Whitelisting options
filter_config: List[str] = []
filter_regex: List[Pattern] = []
def __init__(self, **data: Any) -> None:
super().__init__(**data)
if self.detection_option == OBSERVABLE_DETECTION_CUSTOM_REGEX:
self.regex = self._load_regex_pattern(self.regex_patterns)
self.filter_regex = self._load_filter_values(self.filter_config)
@validator("detection_option")
def validate_detection_value(cls, value: str) -> str:
if value not in OBSERVABLE_DETECTION_OPTIONS:
raise ValueError("{} is not a valid detection_option value")
return value
@validator("filter_config")
def validate_files_exist(cls, filter_config: List[str]) -> List[str]:
if len(filter_config) == 0:
return filter_config
filter_paths = []
for filter_file in filter_config:
base_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(base_path, CONFIG_PATH, filter_file)
if not os.path.isfile(file_path):
raise ValueError(
"{} is not a valid filter config file".format(file_path)
)
filter_paths.append(file_path)
return filter_paths
@validator("regex_patterns", "filter_config", pre=True)
def pre_validate_transform_str_to_list(cls, field: str) -> Any:
return list(filter(None, (x.strip() for x in field.splitlines())))
def _load_regex_pattern(self, regex_values: List[str]) -> List[Pattern]:
regexes = []
if len(regex_values) == 0:
return []
for regex_value in regex_values:
try:
compiled_re = re.compile(regex_value, re.IGNORECASE)
regexes.append(compiled_re)
except re.error as e:
raise ValueError(
f"Observable {self.name}: Unable to create regex from value '{regex_value}' ({e})"
)
return regexes
def _load_filter_values(self, filter_config_paths: List[str]) -> List[Pattern]:
if len(filter_config_paths) == 0:
return []
filter_patterns = []
for filter_file in filter_config_paths:
with open(filter_file, "r") as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith(COMMENT_INDICATOR):
continue
filter_patterns.append("\\b{}\\b".format(line))
filter_patterns = self._load_regex_pattern(filter_patterns)
return filter_patterns
class Entity(BaseModel):
name: str
stix_class: str
stix_id: str
values: List[str]
regex: List[Pattern] = []
omit_match_in: List[str] = []
class EntityConfig(BaseModel):
name: str
stix_class: str
filter: Optional[Dict]
fields: List[str]
exclude_values: List[str] = []
regex: List[Pattern] = []
omit_match_in: List[str] = []
@validator("fields", "exclude_values", "omit_match_in", pre=True)
def pre_validate_transform_str_to_list(cls, field: str) -> List[str]:
return list(filter(None, (x.strip() for x in field.splitlines())))
@validator("filter", pre=True)
def pre_validate_transform_str_to_json(cls, filter_string: str) -> Any:
try:
return json.loads(filter_string)
except JSONDecodeError as e:
raise ValueError(f"filter received an invalid json string: {e}")
def convert_to_entity(
self, opencti_response: List[Dict], helper: OpenCTIConnectorHelper
) -> List[Entity]:
entities = []
for item in opencti_response:
_id = item.get("standard_id")
item_values = set()
for relevant_field in self.fields:
elem = item.get(relevant_field, None)
if elem:
if type(elem) == list:
item_values.update(elem)
elif type(elem) == str:
item_values.add(elem)
indicators = []
for value in item_values:
# Remove SDO names which are defined to be excluded in the entity config
if value.lower() in self.exclude_values:
helper.log_debug(
f"Entity: Discarding value '{value}' due to explicit exclusion as defined in {self.exclude_values}"
)
continue
value = re.escape(value)
value = f"\\b{value}\\b"
try:
compiled_re = re.compile(value, re.IGNORECASE)
indicators.append(compiled_re)
except re.error as e:
helper.log_error(
f"Entity {self.name}: Unable to create regex from value '{value}' ({e})"
)
if len(indicators) == 0:
continue
entity = Entity(
name=self.name,
stix_class=self.stix_class,
stix_id=_id,
values=item_values,
regex=indicators,
omit_match_in=self.omit_match_in,
)
entities.append(entity)
return entities
| 33.11236
| 123
| 0.584493
|
4a0382031feda78576c0bbcfb815091ee3861212
| 46,340
|
py
|
Python
|
SipMask-VIS/mmdet/models/anchor_heads/sipmask_head.py
|
Traderain/SipMask
|
267cf3921cbec24b1e667a1a71c137bd61038d36
|
[
"MIT"
] | null | null | null |
SipMask-VIS/mmdet/models/anchor_heads/sipmask_head.py
|
Traderain/SipMask
|
267cf3921cbec24b1e667a1a71c137bd61038d36
|
[
"MIT"
] | null | null | null |
SipMask-VIS/mmdet/models/anchor_heads/sipmask_head.py
|
Traderain/SipMask
|
267cf3921cbec24b1e667a1a71c137bd61038d36
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import distance2bbox, bbox_overlaps, force_fp32, multi_apply, multiclass_nms, multiclass_nms_idx
from ..builder import build_loss
from ..registry import HEADS
from ..utils import bias_init_with_prob, ConvModule, Scale
from mmdet.ops import DeformConv, CropSplit, CropSplitGt
from ..losses import cross_entropy, accuracy
import torch.nn.functional as F
import pycocotools.mask as mask_util
import numpy as np
INF = 1e8
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat(((boxes[:, 2:] + boxes[:, :2]) / 2, # cx, cy
boxes[:, 2:] - boxes[:, :2]), 1) # w, h
class FeatureAlign(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
deformable_groups=4):
super(FeatureAlign, self).__init__()
offset_channels = kernel_size * kernel_size * 2
self.conv_offset = nn.Conv2d(4,
deformable_groups * offset_channels,
1,
bias=False)
self.conv_adaption = DeformConv(in_channels,
out_channels,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
deformable_groups=deformable_groups)
self.relu = nn.ReLU(inplace=True)
self.norm = nn.GroupNorm(32, in_channels)
def init_weights(self, bias_value=0):
torch.nn.init.normal_(self.conv_offset.weight, std=1e-3)
torch.nn.init.normal_(self.conv_adaption.weight, std=0.01)
def forward(self, x, shape):
offset = self.conv_offset(shape.detach())
x = self.relu(self.norm(self.conv_adaption(x, offset)))
return x
def crop_split(masks00, masks01, masks10, masks11, boxes, masksG=None):
"""
"Crop" predicted masks by zeroing out everything not in the predicted bbox.
Vectorized by Chong (thanks Chong).
Args:
- masks should be a size [h, w, n] tensor of masks
- boxes should be a size [n, 4] tensor of bbox coords in relative point form
"""
h, w, n = masks00.size()
rows = torch.arange(w, device=masks00.device, dtype=boxes.dtype).view(1, -1, 1).expand(h, w, n)
cols = torch.arange(h, device=masks00.device, dtype=boxes.dtype).view(-1, 1, 1).expand(h, w, n)
x1, x2 = boxes[:, 0], boxes[:, 2] # sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, padding, cast=False)
y1, y2 = boxes[:, 1], boxes[:, 3] # sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, padding, cast=False)
xc = (x1 + x2) / 2
yc = (y1 + y2) / 2
x1 = torch.clamp(x1, min=0, max=w - 1)
y1 = torch.clamp(y1, min=0, max=h - 1)
x2 = torch.clamp(x2, min=0, max=w - 1)
y2 = torch.clamp(y2, min=0, max=h - 1)
xc = torch.clamp(xc, min=0, max=w - 1)
yc = torch.clamp(yc, min=0, max=h - 1)
##x1,y1,xc,yc
crop_mask = (rows >= x1.view(1, 1, -1)) & (rows < xc.view(1, 1, -1)) & (cols >= y1.view(1, 1, -1)) & (
cols < yc.view(1, 1, -1))
crop_mask = crop_mask.float().detach()
masks00 = masks00 * crop_mask
##xc,y1,x2,yc
crop_mask = (rows >= xc.view(1, 1, -1)) & (rows < x2.view(1, 1, -1)) & (cols >= y1.view(1, 1, -1)) & (
cols < yc.view(1, 1, -1))
crop_mask = crop_mask.float().detach()
masks01 = masks01 * crop_mask
crop_mask = (rows >= x1.view(1, 1, -1)) & (rows < xc.view(1, 1, -1)) & (cols >= yc.view(1, 1, -1)) & (
cols < y2.view(1, 1, -1))
crop_mask = crop_mask.float().detach()
masks10 = masks10 * crop_mask
crop_mask = (rows >= xc.view(1, 1, -1)) & (rows < x2.view(1, 1, -1)) & (cols >= yc.view(1, 1, -1)) & (
cols < y2.view(1, 1, -1))
crop_mask = crop_mask.float().detach()
masks11 = masks11 * crop_mask
masks = masks00 + masks01 + masks10 + masks11
########whole
if masksG is not None:
crop_mask = (rows >= x1.view(1, 1, -1)) & (rows < x2.view(1, 1, -1)) & (cols >= y1.view(1, 1, -1)) & (
cols < y2.view(1, 1, -1))
crop_mask = crop_mask.float()
masksG = masksG * crop_mask
return masks, masksG
return masks
@HEADS.register_module
class SipMaskHead(nn.Module):
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
center_sampling=False,
center_sample_radius=1.5,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(SipMaskHead, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.regress_ranges = regress_ranges
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_centerness = build_loss(loss_centerness)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.center_sampling = center_sampling
self.center_sample_radius = center_sample_radius
self.fpn_strides = [8, 16, 32, 64, 128]
self.match_coeff = [1.0, 2.0, 10]
self.loss_track = build_loss(dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
self.prev_roi_feats = None
self.prev_bboxes = None
self.prev_det_labels = None
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs - 1):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fcos_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
self.nc = 32
###########instance##############
self.feat_align = FeatureAlign(self.feat_channels, self.feat_channels, 3)
self.sip_cof = nn.Conv2d(self.feat_channels, self.nc * 4, 3, padding=1)
self.sip_mask_lat = nn.Conv2d(512, self.nc, 3, padding=1)
self.sip_mask_lat0 = nn.Conv2d(768, 512, 1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.crop_cuda = CropSplit(2)
self.crop_gt_cuda = CropSplitGt(2)
self.track_convs = nn.ModuleList()
for i in range(self.stacked_convs - 1):
chn = self.in_channels if i == 0 else self.feat_channels
self.track_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.sipmask_track = nn.Conv2d(self.feat_channels * 3, 512, 1, padding=0)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fcos_cls, std=0.01, bias=bias_cls)
normal_init(self.fcos_reg, std=0.01)
normal_init(self.fcos_centerness, std=0.01)
normal_init(self.sip_cof, std=0.01)
normal_init(self.sip_mask_lat, std=0.01)
normal_init(self.sip_mask_lat0, std=0.01)
self.feat_align.init_weights()
for m in self.track_convs:
normal_init(m.conv, std=0.01)
def forward(self, feats, feats_x, flag_train=True):
# return multi_apply(self.forward_single, feats, self.scales)
cls_scores = []
bbox_preds = []
centernesses = []
cof_preds = []
feat_masks = []
track_feats = []
track_feats_ref = []
count = 0
for x, x_f, scale, stride in zip(feats, feats_x, self.scales, self.strides):
cls_feat = x
reg_feat = x
track_feat = x
track_feat_f = x_f
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
if count < 3:
for track_layer in self.track_convs:
track_feat = track_layer(track_feat)
track_feat = F.interpolate(track_feat, scale_factor=(2 ** count), mode='bilinear', align_corners=False)
track_feats.append(track_feat)
if flag_train:
for track_layer in self.track_convs:
track_feat_f = track_layer(track_feat_f)
track_feat_f = F.interpolate(track_feat_f, scale_factor=(2 ** count), mode='bilinear',
align_corners=False)
track_feats_ref.append(track_feat_f)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(self.fcos_reg(reg_feat))
cls_feat = self.feat_align(cls_feat, bbox_pred)
cls_score = self.fcos_cls(cls_feat)
centerness = self.fcos_centerness(reg_feat)
centernesses.append(centerness)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred.float() * stride)
########COFFECIENTS###############
cof_pred = self.sip_cof(cls_feat)
cof_preds.append(cof_pred)
############contextual#######################
if count < 3:
feat_up = F.interpolate(reg_feat, scale_factor=(2 ** count), mode='bilinear', align_corners=False)
feat_masks.append(feat_up)
count = count + 1
# ################contextual enhanced##################
feat_masks = torch.cat(feat_masks, dim=1)
feat_masks = self.relu(self.sip_mask_lat(self.relu(self.sip_mask_lat0(feat_masks))))
feat_masks = F.interpolate(feat_masks, scale_factor=4, mode='bilinear', align_corners=False)
track_feats = torch.cat(track_feats, dim=1)
track_feats = self.sipmask_track(track_feats)
if flag_train:
track_feats_ref = torch.cat(track_feats_ref, dim=1)
track_feats_ref = self.sipmask_track(track_feats_ref)
return cls_scores, bbox_preds, centernesses, cof_preds, feat_masks, track_feats, track_feats_ref
else:
return cls_scores, bbox_preds, centernesses, cof_preds, feat_masks, track_feats, track_feats
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def loss(self,
cls_scores,
bbox_preds,
centernesses,
cof_preds,
feat_masks,
track_feats,
track_feats_ref,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None,
gt_masks_list=None,
ref_bboxes_list=None,
gt_pids_list=None):
assert len(cls_scores) == len(bbox_preds) == len(centernesses)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, bbox_targets, label_list, bbox_targets_list, gt_inds = self.fcos_target(all_level_points,
gt_bboxes, gt_labels)
# decode detection and groundtruth
det_bboxes = []
det_targets = []
num_levels = len(bbox_preds)
for img_id in range(len(img_metas)):
bbox_pred_list = [
bbox_preds[i][img_id].permute(1, 2, 0).reshape(-1, 4).detach() for i in range(num_levels)
]
bbox_target_list = bbox_targets_list[img_id]
bboxes = []
targets = []
for i in range(len(bbox_pred_list)):
bbox_pred = bbox_pred_list[i]
bbox_target = bbox_target_list[i]
points = all_level_points[i]
bboxes.append(distance2bbox(points, bbox_pred))
targets.append(distance2bbox(points, bbox_target))
bboxes = torch.cat(bboxes, dim=0)
targets = torch.cat(targets, dim=0)
det_bboxes.append(bboxes)
det_targets.append(targets)
gt_masks = []
for i in range(len(gt_labels)):
gt_label = gt_labels[i]
gt_masks.append(
torch.from_numpy(np.array(gt_masks_list[i][:gt_label.shape[0]], dtype=np.float32)).to(gt_label.device))
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_centerness = [
centerness.permute(0, 2, 3, 1).reshape(-1)
for centerness in centernesses
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_centerness = torch.cat(flatten_centerness)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_centerness_targets = self.centerness_target(pos_bbox_targets)
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
# centerness weighted iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds,
weight=pos_centerness_targets,
avg_factor=pos_centerness_targets.sum())
loss_centerness = self.loss_centerness(pos_centerness,
pos_centerness_targets)
else:
loss_bbox = pos_bbox_preds.sum()
loss_centerness = pos_centerness.sum()
##########mask loss#################
flatten_cls_scores1 = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_cls_scores1 = torch.cat(flatten_cls_scores1, dim=1)
flatten_cof_preds = [
cof_pred.permute(0, 2, 3, 1).reshape(cof_pred.shape[0], -1, 32 * 4)
for cof_pred in cof_preds
]
loss_mask = 0
loss_match = 0
match_acc = 0
n_total = 0
flatten_cof_preds = torch.cat(flatten_cof_preds, dim=1)
for i in range(num_imgs):
labels = torch.cat([labels_level.flatten() for labels_level in label_list[i]])
bbox_dt = det_bboxes[i] / 2
bbox_dt = bbox_dt.detach()
pos_inds = (labels > 0).nonzero().view(-1)
cof_pred = flatten_cof_preds[i][pos_inds]
img_mask = feat_masks[i]
mask_h = img_mask.shape[1]
mask_w = img_mask.shape[2]
idx_gt = gt_inds[i]
bbox_dt = bbox_dt[pos_inds, :4]
area = (bbox_dt[:, 2] - bbox_dt[:, 0]) * (bbox_dt[:, 3] - bbox_dt[:, 1])
bbox_dt = bbox_dt[area > 1.0, :]
idx_gt = idx_gt[area > 1.0]
cof_pred = cof_pred[area > 1.0]
if bbox_dt.shape[0] == 0:
loss_mask += area.sum()*0
continue
bbox_gt = gt_bboxes[i]
cls_score = flatten_cls_scores1[i, pos_inds, labels[pos_inds] - 1].sigmoid().detach()
cls_score = cls_score[area > 1.0]
ious = bbox_overlaps(bbox_gt[idx_gt] / 2, bbox_dt, is_aligned=True)
weighting = cls_score * ious
weighting = weighting / (torch.sum(weighting) + 0.0001) * len(weighting)
###################track####################
bboxes = ref_bboxes_list[i]
amplitude = 0.05
random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_(
-amplitude, amplitude)
# before jittering
cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2
wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs()
# after jittering
new_cxcy = cxcy + wh * random_offsets[:, :2]
new_wh = wh * (1 + random_offsets[:, 2:])
# xywh to xyxy
new_x1y1 = (new_cxcy - new_wh / 2)
new_x2y2 = (new_cxcy + new_wh / 2)
new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1)
# clip bboxes
# print(bbox_dt.shape)
track_feat_i = self.extract_box_feature_center_single(track_feats[i], bbox_dt * 2)
track_box_ref = self.extract_box_feature_center_single(track_feats_ref[i], new_bboxes)
gt_pids = gt_pids_list[i]
cur_ids = gt_pids[idx_gt]
prod = torch.mm(track_feat_i, torch.transpose(track_box_ref, 0, 1))
m = prod.size(0)
dummy = torch.zeros(m, 1, device=torch.cuda.current_device())
prod_ext = torch.cat([dummy, prod], dim=1)
loss_match += cross_entropy(prod_ext, cur_ids)
n_total += len(idx_gt)
match_acc += accuracy(prod_ext, cur_ids) * len(idx_gt)
gt_mask = F.interpolate(gt_masks[i].unsqueeze(0), scale_factor=0.5, mode='bilinear',
align_corners=False).squeeze(0)
shape = np.minimum(feat_masks[i].shape, gt_mask.shape)
gt_mask_new = gt_mask.new_zeros(gt_mask.shape[0], mask_h, mask_w)
gt_mask_new[:gt_mask.shape[0], :shape[1], :shape[2]] = gt_mask[:gt_mask.shape[0], :shape[1], :shape[2]]
gt_mask_new = gt_mask_new.gt(0.5).float()
gt_mask_new = torch.index_select(gt_mask_new, 0, idx_gt).permute(1, 2, 0).contiguous()
#######spp###########################
img_mask1 = img_mask.permute(1, 2, 0)
pos_masks00 = torch.sigmoid(img_mask1 @ cof_pred[:, 0:32].t())
pos_masks01 = torch.sigmoid(img_mask1 @ cof_pred[:, 32:64].t())
pos_masks10 = torch.sigmoid(img_mask1 @ cof_pred[:, 64:96].t())
pos_masks11 = torch.sigmoid(img_mask1 @ cof_pred[:, 96:128].t())
pred_masks = torch.stack([pos_masks00, pos_masks01, pos_masks10, pos_masks11], dim=0)
pred_masks = self.crop_cuda(pred_masks, bbox_dt)
gt_mask_crop = self.crop_gt_cuda(gt_mask_new, bbox_dt)
# pred_masks, gt_mask_crop = crop_split(pos_masks00, pos_masks01, pos_masks10, pos_masks11, bbox_dt,
# gt_mask_new)
pre_loss = F.binary_cross_entropy(pred_masks, gt_mask_crop, reduction='none')
pos_get_csize = center_size(bbox_dt)
gt_box_width = pos_get_csize[:, 2]
gt_box_height = pos_get_csize[:, 3]
pre_loss = pre_loss.sum(dim=(0, 1)) / gt_box_width / gt_box_height / pos_get_csize.shape[0]
loss_mask += torch.sum(pre_loss * weighting.detach())
loss_mask = loss_mask / num_imgs
loss_match = loss_match / num_imgs
match_acc = match_acc / n_total
if loss_mask == 0:
loss_mask = bbox_dt[:, 0].sum()*0
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_centerness=loss_centerness,
loss_mask=loss_mask,
loss_match=loss_match,
match_acc=match_acc)
def compute_comp_scores(self, match_ll, bbox_scores, bbox_ious, label_delta, add_bbox_dummy=False):
# compute comprehensive matching score based on matchig likelihood,
# bbox confidence, and ious
if add_bbox_dummy:
bbox_iou_dummy = torch.ones(bbox_ious.size(0), 1,
device=torch.cuda.current_device()) * 0
bbox_ious = torch.cat((bbox_iou_dummy, bbox_ious), dim=1)
label_dummy = torch.ones(bbox_ious.size(0), 1,
device=torch.cuda.current_device())
label_delta = torch.cat((label_dummy, label_delta), dim=1)
if self.match_coeff is None:
return match_ll
else:
# match coeff needs to be length of 3
assert (len(self.match_coeff) == 3)
return match_ll + self.match_coeff[0] * \
torch.log(bbox_scores) + self.match_coeff[1] * bbox_ious \
+ self.match_coeff[2] * label_delta
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def get_bboxes(self,
cls_scores,
bbox_preds,
centernesses,
cof_preds,
feat_masks,
track_feats,
track_feats_ref,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
centerness_pred_list = [
centernesses[i][img_id].detach() for i in range(num_levels)
]
cof_pred_list = [
cof_preds[i][img_id].detach() for i in range(num_levels)
]
feat_mask_list = feat_masks[img_id]
track_feat_list = track_feats[img_id]
is_first = True
if 'is_first' in img_metas[img_id]:
is_first = img_metas[img_id]['is_first']
img_shape = img_metas[img_id]['img_shape']
ori_shape = img_metas[img_id]['ori_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
centerness_pred_list, cof_pred_list, feat_mask_list,
mlvl_points, img_shape, ori_shape,
scale_factor, cfg, rescale)
if det_bboxes[0].shape[0] == 0:
cls_segms = [[] for _ in range(self.num_classes - 1)]
result_list.append([det_bboxes[0], det_bboxes[1], cls_segms, []])
return result_list
res_det_bboxes = det_bboxes[0] + 0.0
if rescale:
res_det_bboxes[:, :4] *= scale_factor
det_roi_feats = self.extract_box_feature_center_single(track_feat_list, res_det_bboxes[:, :4])
# recompute bbox match feature
det_labels = det_bboxes[1]
if is_first or (not is_first and self.prev_bboxes is None):
det_obj_ids = np.arange(res_det_bboxes.size(0))
# save bbox and features for later matching
self.prev_bboxes = det_bboxes[0]
self.prev_roi_feats = det_roi_feats
self.prev_det_labels = det_labels
else:
assert self.prev_roi_feats is not None
# only support one image at a time
prod = torch.mm(det_roi_feats, torch.transpose(self.prev_roi_feats, 0, 1))
m = prod.size(0)
dummy = torch.zeros(m, 1, device=torch.cuda.current_device())
match_score = torch.cat([dummy, prod], dim=1)
match_logprob = torch.nn.functional.log_softmax(match_score, dim=1)
label_delta = (self.prev_det_labels == det_labels.view(-1, 1)).float()
bbox_ious = bbox_overlaps(det_bboxes[0][:, :4], self.prev_bboxes[:, :4])
# compute comprehensive score
comp_scores = self.compute_comp_scores(match_logprob,
det_bboxes[0][:, 4].view(-1, 1),
bbox_ious,
label_delta,
add_bbox_dummy=True)
match_likelihood, match_ids = torch.max(comp_scores, dim=1)
# translate match_ids to det_obj_ids, assign new id to new objects
# update tracking features/bboxes of exisiting object,
# add tracking features/bboxes of new object
match_ids = match_ids.cpu().numpy().astype(np.int32)
det_obj_ids = np.ones((match_ids.shape[0]), dtype=np.int32) * (-1)
best_match_scores = np.ones((self.prev_bboxes.size(0))) * (-100)
for idx, match_id in enumerate(match_ids):
if match_id == 0:
# add new object
det_obj_ids[idx] = self.prev_roi_feats.size(0)
self.prev_roi_feats = torch.cat((self.prev_roi_feats, det_roi_feats[idx][None]), dim=0)
self.prev_bboxes = torch.cat((self.prev_bboxes, det_bboxes[0][idx][None]), dim=0)
self.prev_det_labels = torch.cat((self.prev_det_labels, det_labels[idx][None]), dim=0)
else:
# multiple candidate might match with previous object, here we choose the one with
# largest comprehensive score
obj_id = match_id - 1
match_score = comp_scores[idx, match_id]
if match_score > best_match_scores[obj_id]:
det_obj_ids[idx] = obj_id
best_match_scores[obj_id] = match_score
# udpate feature
self.prev_roi_feats[obj_id] = det_roi_feats[idx]
self.prev_bboxes[obj_id] = det_bboxes[0][idx]
obj_segms = {}
masks = det_bboxes[2]
for i in range(det_bboxes[0].shape[0]):
label = det_labels[i]
mask = masks[i].cpu().numpy()
im_mask = np.zeros((ori_shape[0], ori_shape[1]), dtype=np.uint8)
shape = np.minimum(mask.shape, ori_shape[0:2])
im_mask[:shape[0], :shape[1]] = mask[:shape[0], :shape[1]]
rle = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F'))[0]
if det_obj_ids[i] >= 0:
obj_segms[det_obj_ids[i]] = rle
result_list.append([det_bboxes[0], det_bboxes[1], obj_segms, det_obj_ids])
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
centernesses,
cof_preds,
feat_mask,
mlvl_points,
img_shape,
ori_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
mlvl_centerness = []
mlvl_cofs = []
for cls_score, bbox_pred, cof_pred, centerness, points in zip(
cls_scores, bbox_preds, cof_preds, centernesses, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
cof_pred = cof_pred.permute(1, 2, 0).reshape(-1, 32 * 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = (scores * centerness[:, None]).max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
cof_pred = cof_pred[topk_inds, :]
scores = scores[topk_inds, :]
centerness = centerness[topk_inds]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_cofs.append(cof_pred)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_centerness.append(centerness)
mlvl_bboxes = torch.cat(mlvl_bboxes)
mlvl_cofs = torch.cat(mlvl_cofs)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
mlvl_centerness = torch.cat(mlvl_centerness)
mlvl_scores = mlvl_scores * mlvl_centerness.view(-1, 1)
det_bboxes, det_labels, det_cofs = self.fast_nms(mlvl_bboxes, mlvl_scores[:, 1:].transpose(1, 0).contiguous(),
mlvl_cofs, cfg, iou_threshold=0.5)
masks = []
if det_bboxes.shape[0] > 0:
scale = 2
#####spp########################
img_mask1 = feat_mask.permute(1, 2, 0)
pos_masks00 = torch.sigmoid(img_mask1 @ det_cofs[:, 0:32].t())
pos_masks01 = torch.sigmoid(img_mask1 @ det_cofs[:, 32:64].t())
pos_masks10 = torch.sigmoid(img_mask1 @ det_cofs[:, 64:96].t())
pos_masks11 = torch.sigmoid(img_mask1 @ det_cofs[:, 96:128].t())
if rescale:
pos_masks = torch.stack([pos_masks00, pos_masks01, pos_masks10, pos_masks11], dim=0)
pos_masks = self.crop_cuda(pos_masks, det_bboxes[:, :4] * det_bboxes.new_tensor(scale_factor) / scale)
# pos_masks = crop_split(pos_masks00, pos_masks01, pos_masks10, pos_masks11, det_bboxes * det_bboxes.new_tensor(scale_factor) / scale)
else:
pos_masks = torch.stack([pos_masks00, pos_masks01, pos_masks10, pos_masks11], dim=0)
pos_masks = self.crop_cuda(pos_masks, det_bboxes[:, :4] / scale)
# pos_masks = crop_split(pos_masks00, pos_masks01, pos_masks10, pos_masks11, det_bboxes / scale)
pos_masks = pos_masks.permute(2, 0, 1)
if rescale:
masks = F.interpolate(pos_masks.unsqueeze(0), scale_factor=scale / scale_factor, mode='bilinear',
align_corners=False).squeeze(0)
else:
masks = F.interpolate(pos_masks.unsqueeze(0), scale_factor=scale, mode='bilinear',
align_corners=False).squeeze(0)
masks.gt_(0.5)
return det_bboxes, det_labels, masks
def extract_box_feature_center_single(self, track_feats, gt_bboxs):
track_box_feats = track_feats.new_zeros(gt_bboxs.size()[0], 512)
#####extract feature box############
ref_feat_stride = 8
gt_center_xs = torch.floor((gt_bboxs[:, 2] + gt_bboxs[:, 0]) / 2.0 / ref_feat_stride).long()
gt_center_ys = torch.floor((gt_bboxs[:, 3] + gt_bboxs[:, 1]) / 2.0 / ref_feat_stride).long()
aa = track_feats.permute(1, 2, 0)
bb = aa[gt_center_ys, gt_center_xs, :]
track_box_feats += bb
return track_box_feats
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
points = self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device)
mlvl_points.append(points)
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def fcos_target(self, points, gt_bboxes_list, gt_labels_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# the number of points per img, per lvl
num_points = [center.size(0) for center in points]
# get labels and bbox_targets of each image
labels_list, bbox_targets_list, gt_inds = multi_apply(
self.fcos_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges,
num_points_per_lvl=num_points)
# split to per img, per level
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets, labels_list, bbox_targets_list, gt_inds
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges,
num_points_per_lvl):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
bbox_targets = bbox_targets
if self.center_sampling:
# condition1: inside a `center bbox`
radius = self.center_sample_radius
center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2
center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2
center_gts = torch.zeros_like(gt_bboxes)
stride = center_xs.new_zeros(center_xs.shape)
# project the points on current lvl back to the `original` sizes
lvl_begin = 0
for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):
lvl_end = lvl_begin + num_points_lvl
stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius
lvl_begin = lvl_end
x_mins = center_xs - stride
y_mins = center_ys - stride
x_maxs = center_xs + stride
y_maxs = center_ys + stride
center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0],
x_mins, gt_bboxes[..., 0])
center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1],
y_mins, gt_bboxes[..., 1])
center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2],
gt_bboxes[..., 2], x_maxs)
center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3],
gt_bboxes[..., 3], y_maxs)
cb_dist_left = xs - center_gts[..., 0]
cb_dist_right = center_gts[..., 2] - xs
cb_dist_top = ys - center_gts[..., 1]
cb_dist_bottom = center_gts[..., 3] - ys
center_bbox = torch.stack(
(cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)
inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0
else:
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0
bbox_targets = bbox_targets[range(num_points), min_area_inds]
gt_ind = min_area_inds[labels > 0]
return labels, bbox_targets, gt_ind
def centerness_target(self, pos_bbox_targets):
# only calculate pos centerness targets, otherwise there may be nan
left_right = pos_bbox_targets[:, [0, 2]]
top_bottom = pos_bbox_targets[:, [1, 3]]
centerness_targets = (
left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness_targets)
def fast_nms(self, boxes, scores, masks, cfg, iou_threshold=0.5, top_k=200):
scores, idx = scores.sort(1, descending=True)
idx = idx[:, :top_k].contiguous()
scores = scores[:, :top_k]
num_classes, num_dets = idx.size()
boxes = boxes[idx.view(-1), :].view(num_classes, num_dets, 4)
masks = masks[idx.view(-1), :].view(num_classes, num_dets, -1)
iou = self.jaccard(boxes, boxes)
iou.triu_(diagonal=1)
iou_max, _ = iou.max(dim=1)
# Now just filter out the ones higher than the threshold
keep = (iou_max <= iou_threshold)
# We should also only keep detections over the confidence threshold, but at the cost of
# maxing out your detection count for every image, you can just not do that. Because we
# have such a minimal amount of computation per detection (matrix mulitplication only),
# this increase doesn't affect us much (+0.2 mAP for 34 -> 33 fps), so we leave it out.
# However, when you implement this in your method, you should do this second threshold.
keep *= (scores > cfg.score_thr)
# Assign each kept detection to its corresponding class
classes = torch.arange(num_classes, device=boxes.device)[:, None].expand_as(keep)
classes = classes[keep]
boxes = boxes[keep]
masks = masks[keep]
scores = scores[keep]
# Only keep the top cfg.max_num_detections highest scores across all classes
scores, idx = scores.sort(0, descending=True)
idx = idx[:cfg.max_per_img]
scores = scores[:cfg.max_per_img]
classes = classes[idx]
boxes = boxes[idx]
masks = masks[idx]
boxes = torch.cat([boxes, scores[:, None]], dim=1)
return boxes, classes, masks
def jaccard(self, box_a, box_b, iscrowd: bool = False):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes. If iscrowd=True, put the crowd in box_b.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
use_batch = True
if box_a.dim() == 2:
use_batch = False
box_a = box_a[None, ...]
box_b = box_b[None, ...]
inter = self.intersect(box_a, box_b)
area_a = ((box_a[:, :, 2] - box_a[:, :, 0]) *
(box_a[:, :, 3] - box_a[:, :, 1])).unsqueeze(2).expand_as(inter) # [A,B]
area_b = ((box_b[:, :, 2] - box_b[:, :, 0]) *
(box_b[:, :, 3] - box_b[:, :, 1])).unsqueeze(1).expand_as(inter) # [A,B]
union = area_a + area_b - inter
out = inter / area_a if iscrowd else inter / union
return out if use_batch else out.squeeze(0)
def intersect(self, box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [n,A,4].
box_b: (tensor) bounding boxes, Shape: [n,B,4].
Return:
(tensor) intersection area, Shape: [n,A,B].
"""
n = box_a.size(0)
A = box_a.size(1)
B = box_b.size(1)
max_xy = torch.min(box_a[:, :, 2:].unsqueeze(2).expand(n, A, B, 2),
box_b[:, :, 2:].unsqueeze(1).expand(n, A, B, 2))
min_xy = torch.max(box_a[:, :, :2].unsqueeze(2).expand(n, A, B, 2),
box_b[:, :, :2].unsqueeze(1).expand(n, A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, :, 0] * inter[:, :, :, 1]
| 44.386973
| 150
| 0.55533
|
4a03820fa4a0e7d506db7015cc439ad6dde14e62
| 2,317
|
py
|
Python
|
app/api/alt_envelope_api.py
|
nappernick/envelope
|
af4f574c04c51293b90ee2e09d0f95d12ca36d2c
|
[
"MIT"
] | 2
|
2021-01-13T22:52:16.000Z
|
2021-01-29T18:37:51.000Z
|
app/api/alt_envelope_api.py
|
nappernick/envelope
|
af4f574c04c51293b90ee2e09d0f95d12ca36d2c
|
[
"MIT"
] | 32
|
2021-01-08T19:05:33.000Z
|
2021-04-07T22:01:54.000Z
|
app/api/alt_envelope_api.py
|
nappernick/envelope
|
af4f574c04c51293b90ee2e09d0f95d12ca36d2c
|
[
"MIT"
] | null | null | null |
import pandas as pd
import csv
import tempfile
import pysurveycto
# # SAM ORIGINAL
# # Get data from SurveyCTO API
scto = pysurveycto.SurveyCTOObject('envelope', 'nickfmatthews@gmail.com', 'Envelope-VisX')
raw = scto.get_form_data('NORC-IGA-Endline-Menage')
# # Change Directory
# import os
# os.chdir(r"C:\Users\Samuel\PycharmProjects\pysurveycto_example")
# cwd = os.getcwd()
# # Write raw text to CSV file
# text_file = open("csv.txt", "w")
# csv = text_file.write(raw)
# text_file.close()
# # NICK STUFF
# # Import CSV file
# df = pd.read_csv('csv.txt', header = 0, encoding='latin-1')
# # Get SurveyCTO data
# scto = pysurveycto.SurveyCTOObject('envelope', 'nickfmatthews@gmail.com', 'Envelope-VisX')
# raw = scto.get_form_data('NORC-IGA-Endline-Menage')
# # Turn raw data into dataframe
# tp = tempfile.NamedTemporaryFile()
# tp.write(raw.encode("utf-8"))
# with open(tp.name, newline="\n", mode='r') as f:
# df = pd.read_csv(f, header = 0, encoding='latin-1')
# # print will only show a small part of the dataframe, but proves it works
# # print(df.head)
# for thing in df.iterrows():
# print(thing)
# # Close the temporary file, which deletes it - new dataframe persists
# tp.close()
# # Alternatively, if you don't need to use a dataframe, this approach will give you
# # something very easy to work with - a list of lists where each list in the list is a row:
# # [ ["SubmissionDate", "start", "end", "deviceid", ...], ["Jan 26, 2021 2:43:48 AM","Jan 26, 2021 2:40:55 AM","Jan 26, 2021 2:43:48 AM","(web)",...] ]
file_it = raw.split("\n")
reader = csv.reader(file_it, delimiter=",")
print([row for row in reader])
# # TESTING
# # API is very slow & I'm testing, so using the local file:
# df = pd.read_csv('/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/api/csv.txt', header = 0, encoding='latin-1')
# # for thing in df.iterrows():
# # print(thing)
# headers = df.columns.values
# for index, header in enumerate(headers):
# print(index)
# print(header)
with open('/Users/nicholasmatthews/Library/Mobile Documents/com~apple~CloudDocs/app_academy/capstone/envelope/app/api/csv.txt') as f:
reader = csv.reader(f, delimiter=",")
ls = []
for index, row in reader:
ls.append(index)
print(ls)
| 32.633803
| 168
| 0.686232
|
4a0382376a4712edf58cf9061052dd42ff3e9bf7
| 176
|
py
|
Python
|
med3d/__init__.py
|
Borda/MedicalNet
|
d5688589ed28ebc95b27eb3e08e5cbfb0b4fe966
|
[
"MIT"
] | 1
|
2021-09-16T20:56:24.000Z
|
2021-09-16T20:56:24.000Z
|
med3d/__init__.py
|
Borda/MedicalNet
|
d5688589ed28ebc95b27eb3e08e5cbfb0b4fe966
|
[
"MIT"
] | null | null | null |
med3d/__init__.py
|
Borda/MedicalNet
|
d5688589ed28ebc95b27eb3e08e5cbfb0b4fe966
|
[
"MIT"
] | null | null | null |
__version__ = "0.1.0"
__docs__ = "MedicalNet 3D"
__author__ = "Sihong Chen"
__author_email__ = "TBD"
__homepage__ = "https://github.com/Tencent/MedicalNet"
__license__ = "MIT"
| 25.142857
| 54
| 0.738636
|
4a03830ef40bf15709601ddc86663092ee2695a5
| 21,098
|
py
|
Python
|
examples/main_simulations.py
|
energyinpython/pre-pyrepo
|
92e44594e12d1110247f011e51734e5ce1fe0b8e
|
[
"MIT"
] | null | null | null |
examples/main_simulations.py
|
energyinpython/pre-pyrepo
|
92e44594e12d1110247f011e51734e5ce1fe0b8e
|
[
"MIT"
] | null | null | null |
examples/main_simulations.py
|
energyinpython/pre-pyrepo
|
92e44594e12d1110247f011e51734e5ce1fe0b8e
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import copy
from tabulate import tabulate
import itertools
from visualizations import *
from pyrepo import distance_metrics as dists
from pyrepo import correlations as corrs
from pyrepo import normalizations as norms
from pyrepo.additions import rank_preferences
from pyrepo.mcda_methods import CODAS
from pyrepo.mcda_methods import TOPSIS
from pyrepo.mcda_methods import WASPAS
from pyrepo.mcda_methods import VIKOR
from pyrepo.mcda_methods import SPOTIS
from pyrepo.mcda_methods import EDAS
from pyrepo.mcda_methods import MABAC
from pyrepo.mcda_methods import MULTIMOORA
from pyrepo.sensitivity_analysis import Sensitivity_analysis_weights
def main():
# load name of a file with input data in csv file
data = pd.read_csv('data.csv', index_col = 'Ai')
df_data = data.iloc[:len(data) - 2, :]
weights = data.iloc[len(data) - 2, :].to_numpy()
types = data.iloc[len(data) - 1, :].to_numpy()
list_alt_names = [r'$A_{' + str(i) + '}$' for i in range(1, df_data.shape[0] + 1)]
matrix = df_data.to_numpy()
distance_metrics = [
dists.euclidean,
dists.manhattan,
#dists.hausdorff,
#dists.correlation,
dists.chebyshev,
#dists.std_euclidean,
#dists.cosine,
#dists.squared_euclidean,
dists.bray_curtis,
#dists.canberra,
dists.lorentzian,
dists.jaccard,
dists.dice,
#dists.bhattacharyya,
#dists.hellinger,
dists.matusita,
#dists.squared_chord,
#dists.pearson_chi_square,
#dists.square_chi_square
]
distance_metrics_names = []
for el in distance_metrics:
name = el.__name__.capitalize()
name = name.replace('_', ' ')
distance_metrics_names.append(name)
# collecting results for weights given by the decision-maker
# dataframe for TOPSIS rankings using different distance metrics
rank_results_topsis = pd.DataFrame()
rank_results_topsis['Ai'] = list_alt_names
# dataframe for TOPSIS preferences
pref_results_topsis = copy.deepcopy(rank_results_topsis)
# dataframe for CODAS rankings
rank_results_codas = copy.deepcopy(rank_results_topsis)
# dataframe for CODAS preferences
pref_results_codas = copy.deepcopy(rank_results_topsis)
# dataframe for rank results provided by all methods without weights changing
rank_results = copy.deepcopy(rank_results_topsis)
# results provided by TOPSIS and CODAS without weights changing using different distance metrics
for metric in distance_metrics:
topsis = TOPSIS(normalization_method = norms.minmax_normalization, distance_metric = metric)
pref = topsis(matrix, weights, types)
rank = rank_preferences(pref, reverse = True)
pref_results_topsis[metric.__name__] = pref
rank_results_topsis[metric.__name__] = rank
codas = CODAS(normalization_method = norms.linear_normalization, distance_metric = metric)
pref = codas(matrix, weights, types)
rank = rank_preferences(pref, reverse = True)
pref_results_codas[metric.__name__] = pref
rank_results_codas[metric.__name__] = rank
# results provided by all methods without weights changing
#TOPSIS
topsis = TOPSIS(normalization_method = norms.minmax_normalization, distance_metric = dists.euclidean)
pref = topsis(matrix, weights, types)
rank = rank_preferences(pref, reverse = True)
rank_results['TOPSIS'] = rank
#CODAS
codas = CODAS(normalization_method = norms.linear_normalization, distance_metric = dists.euclidean)
pref = codas(matrix, weights, types)
rank = rank_preferences(pref, reverse = True)
rank_results['CODAS'] = rank
#VIKOR
vikor = VIKOR(normalization_method = norms.minmax_normalization)
pref = vikor(matrix, weights, types)
rank = rank_preferences(pref, reverse = False)
rank_results['VIKOR'] = rank
#SPOTIS
bounds_min = np.amin(matrix, axis = 0)
bounds_max = np.amax(matrix, axis = 0)
bounds = np.vstack((bounds_min, bounds_max))
spotis = SPOTIS()
pref = spotis(matrix, weights, types, bounds)
rank = rank_preferences(pref, reverse = False)
rank_results['SPOTIS'] = rank
#EDAS
edas = EDAS()
pref = edas(matrix, weights, types)
rank = rank_preferences(pref, reverse = True)
rank_results['EDAS'] = rank
#MABAC
mabac = MABAC(normalization_method = norms.minmax_normalization)
pref = mabac(matrix, weights, types)
rank = rank_preferences(pref, reverse = True)
rank_results['MABAC'] = rank
#MULTIMOORA
multimoora = MULTIMOORA()
rank = multimoora(matrix, weights, types)
rank_results['MMOORA'] = rank
#WASPAS
waspas = WASPAS(normalization_method = norms.linear_normalization, lambda_param = 0.5)
pref = waspas(matrix, weights, types)
rank = rank_preferences(pref, reverse = True)
rank_results['WASPAS'] = rank
rank_results = rank_results.set_index('Ai')
header = [rank_results.index.name]
header = header + list(rank_results.columns)
print('Results of all methods without weights modification:')
print(tabulate(rank_results, headers = header, tablefmt='github'))
# Simulations
# dictionaries for collecting data obtained in simulations
# dictionary for collecting changes in TOPSIS rankings using different distance metrics
results_dict_topsis_rank = {
'Distance metric' : [],
'Changes' : [],
'Weight change' : []
}
# dictionary for collecting variability in TOPSIS preferences after weights change using different distance metrics
results_dict_topsis_pref = {
'Distance metric' : [],
'Distance' : [],
'Weight change' : []
}
# dictionary for collecting changes in all methods rankings
results_dict_rank = {
'Method' : [],
'Changes' : [],
'Weight change' : []
}
results_dict_codas_rank = copy.deepcopy(results_dict_topsis_rank)
results_dict_codas_pref = copy.deepcopy(results_dict_topsis_pref)
# dictionary for collecting the distances of the alternatives from the ideal solution (TOPSIS)
results_dict_topsis_dist_to_ideal = {
'r$A_{i}$' : [],
'Distance to ideal' : [],
'Weight change' : []
}
# dictionary for collecting the distances of the alternatives from the anti-ideal solution (CODAS)
results_dict_codas_dist_to_nonideal = {
'r$A_{i}$' : [],
'Distance to anti-ideal' : [],
'Weight change' : []
}
# Function for Simulation 1: collecting of changes in rankings for all MCDA methods
# Simulation for all MCDA methods - variability in rankings
# changes in rankings according to changes in weights
def complete_dataframe_methods(matrix, weights_copy, types, change_val):
#TOPSIS
topsis = TOPSIS(normalization_method = norms.minmax_normalization, distance_metric = dists.euclidean)
pref = topsis(matrix, weights_copy, types)
rank = rank_preferences(pref, reverse = True)
results_dict_rank['Method'].append('TOPSIS')
num_of_changes = np.sum(abs(rank - rank_results['TOPSIS'].to_numpy()))
results_dict_rank['Changes'].append(num_of_changes)
results_dict_rank['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
#CODAS
codas = CODAS(normalization_method = norms.linear_normalization, distance_metric = dists.euclidean)
pref = codas(matrix, weights_copy, types)
rank = rank_preferences(pref, reverse = True)
results_dict_rank['Method'].append('CODAS')
num_of_changes = np.sum(abs(rank - rank_results['CODAS'].to_numpy()))
results_dict_rank['Changes'].append(num_of_changes)
results_dict_rank['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
#VIKOR
vikor = VIKOR(normalization_method = norms.minmax_normalization)
pref = vikor(matrix, weights_copy, types)
rank = rank_preferences(pref, reverse = False)
results_dict_rank['Method'].append('VIKOR')
num_of_changes = np.sum(abs(rank - rank_results['VIKOR'].to_numpy()))
results_dict_rank['Changes'].append(num_of_changes)
results_dict_rank['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
#SPOTIS
bounds_min = np.amin(matrix, axis = 0)
bounds_max = np.amax(matrix, axis = 0)
bounds = np.vstack((bounds_min, bounds_max))
spotis = SPOTIS()
pref = spotis(matrix, weights_copy, types, bounds)
rank = rank_preferences(pref, reverse = False)
results_dict_rank['Method'].append('SPOTIS')
num_of_changes = np.sum(abs(rank - rank_results['SPOTIS'].to_numpy()))
results_dict_rank['Changes'].append(num_of_changes)
results_dict_rank['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
#EDAS
edas = EDAS()
pref = edas(matrix, weights_copy, types)
rank = rank_preferences(pref, reverse = True)
results_dict_rank['Method'].append('EDAS')
num_of_changes = np.sum(abs(rank - rank_results['EDAS'].to_numpy()))
results_dict_rank['Changes'].append(num_of_changes)
results_dict_rank['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
#MABAC
mabac = MABAC(normalization_method = norms.minmax_normalization)
pref = mabac(matrix, weights_copy, types)
rank = rank_preferences(pref, reverse = True)
results_dict_rank['Method'].append('MABAC')
num_of_changes = np.sum(abs(rank - rank_results['MABAC'].to_numpy()))
results_dict_rank['Changes'].append(num_of_changes)
results_dict_rank['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
#MULTIMOORA
multimoora = MULTIMOORA()
rank = multimoora(matrix, weights_copy, types)
results_dict_rank['Method'].append('MMOORA')
num_of_changes = np.sum(abs(rank - rank_results['MMOORA'].to_numpy()))
results_dict_rank['Changes'].append(num_of_changes)
results_dict_rank['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
#WASPAS
waspas = WASPAS(normalization_method = norms.linear_normalization, lambda_param = 0.5)
pref = waspas(matrix, weights_copy, types)
rank = rank_preferences(pref, reverse = True)
results_dict_rank['Method'].append('WASPAS')
num_of_changes = np.sum(abs(rank - rank_results['WASPAS'].to_numpy()))
results_dict_rank['Changes'].append(num_of_changes)
results_dict_rank['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
# Function for Simulation 2: collecting of distances to reference points
# the distance of the alternatives from the ideal solution (TOPSIS) and the anti-ideal solution (CODAS)
def complete_dataframe_distance(matrix, weights_copy, types, change_val):
#TOPSIS
topsis = TOPSIS(normalization_method = norms.minmax_normalization, distance_metric = dists.euclidean)
pref = topsis(matrix, weights_copy, types)
rankingPrep = np.argsort(-pref)
rank = rank_preferences(pref, reverse = True)
dp = topsis.Dp
for r in range(len(rank)):
results_dict_topsis_dist_to_ideal['r$A_{i}$'].append(list_alt_names[rankingPrep[r]])
results_dict_topsis_dist_to_ideal['Distance to ideal'].append(dp[rankingPrep[r]])
results_dict_topsis_dist_to_ideal['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
#CODAS
codas = CODAS(normalization_method = norms.minmax_normalization, distance_metric = dists.euclidean)
pref = codas(matrix, weights_copy, types)
rankingPrep = np.argsort(-pref)
rank = rank_preferences(pref, reverse = True)
e = codas.E
for r in range(len(rank)):
results_dict_codas_dist_to_nonideal['r$A_{i}$'].append(list_alt_names[rankingPrep[r]])
results_dict_codas_dist_to_nonideal['Distance to anti-ideal'].append(e[rankingPrep[r]])
results_dict_codas_dist_to_nonideal['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
# Function for Simulation 3 and 4: collecting of changes in preferences in rankings for TOPSIS and CODAS
# choose distance measurement method, for example, manhattan_distance
# variability in rankings and preferences for different distance metrics for TOPSIS and CODAS
def complete_dataframe(matrix, distance_metrics, weights_copy, types, change_val, distance_measurement_method = dists.manhattan):
for metric in distance_metrics:
#TOPSIS
# changes in rankings
topsis = TOPSIS(normalization_method = norms.minmax_normalization, distance_metric = metric)
pref = topsis(matrix, weights_copy, types)
rank = rank_preferences(pref, reverse = True)
metric_name = metric.__name__.capitalize()
metric_name = metric_name.replace('_', ' ')
results_dict_topsis_rank['Distance metric'].append(metric_name)
num_of_changes = np.sum(abs(rank - rank_results_topsis[metric.__name__].to_numpy()))
results_dict_topsis_rank['Changes'].append(num_of_changes)
results_dict_topsis_rank['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
# changes in preferences (measured by distance metric)
results_dict_topsis_pref['Distance metric'].append(metric_name)
dist = distance_measurement_method(pref, pref_results_topsis[metric.__name__].to_numpy())
results_dict_topsis_pref['Distance'].append(dist)
results_dict_topsis_pref['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
#CODAS
# changes in rankings
codas = CODAS(normalization_method = norms.linear_normalization, distance_metric = metric)
pref = codas(matrix, weights_copy, types)
rank = rank_preferences(pref, reverse = True)
results_dict_codas_rank['Distance metric'].append(metric_name)
num_of_changes = np.sum(abs(rank - rank_results_codas[metric.__name__].to_numpy()))
results_dict_codas_rank['Changes'].append(num_of_changes)
results_dict_codas_rank['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
# changes in preferences (measured by distance metric)
results_dict_codas_pref['Distance metric'].append(metric_name)
dist = distance_measurement_method(pref, pref_results_codas[metric.__name__].to_numpy())
results_dict_codas_pref['Distance'].append(dist)
results_dict_codas_pref['Weight change'].append('{:.0f}'.format(abs(change_val * 100)) + '%')
# Simulations
# Input percentages of weights modification for simulation
percentages = [0.05, 0.2, 0.35, 0.5]
list_of_percentage = ['{:.0f}'.format(p * 100) + '%' for p in percentages]
changing_weights_direction = [1, -1]
sensitivity = Sensitivity_analysis_weights()
# Simulation with weights modification
for j, change_val, dir in itertools.product(range(df_data.shape[1]), percentages, changing_weights_direction):
weights_copy = sensitivity._change_weights(j, weights, change_val * dir)
complete_dataframe(matrix, distance_metrics, weights_copy, types, change_val)
complete_dataframe_distance(matrix, weights_copy, types, change_val)
complete_dataframe_methods(matrix, weights_copy, types, change_val)
# Results of simulations
#TOPSIS ranking
results_pd_topsis_rank = pd.DataFrame(results_dict_topsis_rank)
#CODAS ranking
results_pd_codas_rank = pd.DataFrame(results_dict_codas_rank)
#TOPSIS preferences
results_pd_topsis_pref = pd.DataFrame(results_dict_topsis_pref)
plot_boxplot_simulation(results_pd_topsis_pref, x = 'Distance metric', y = 'Distance',
xtitle = 'Distance metrics for TOPSIS', ytitle = 'Variability in preferences',
title = 'TOPSIS', filename = 'topsis_pref_varia_boxplot')
#CODAS preferences
results_pd_codas_pref = pd.DataFrame(results_dict_codas_pref)
plot_boxplot_simulation(results_pd_codas_pref, x = 'Distance metric', y = 'Distance',
xtitle = 'Distance metrics for CODAS', ytitle = 'Variability in preferences',
title = 'CODAS', filename = 'codas_pref_varia_boxplot')
# distance of alternatives' from reference solution
#TOPSIS
results_pd_topsis_dist_to_ideal = pd.DataFrame(results_dict_topsis_dist_to_ideal)
plot_boxplot_simulation(results_pd_topsis_dist_to_ideal, x = 'r$A_{i}$', y = 'Distance to ideal',
xtitle = 'Alternatives in TOPSIS rankings', ytitle = 'Variability in distance to ideal solution',
title = 'TOPSIS', filename = 'topsis_distance_boxplot', flag_rotation = False)
#CODAS
results_pd_codas_dist_to_nonideal = pd.DataFrame(results_dict_codas_dist_to_nonideal)
plot_boxplot_simulation(results_pd_codas_dist_to_nonideal, x = 'r$A_{i}$', y = 'Distance to anti-ideal',
xtitle = 'Alternatives in CODAS rankings', ytitle = 'Variability in distance to anti-ideal solution',
title = 'CODAS', filename = 'codas_distance_boxplot', flag_rotation = False)
# Average number of changes in TOPSIS ranking
df_rank_topsis_mean = pd.DataFrame()
df_rank_topsis_mean['Distance metric'] = distance_metrics_names
for perc in list_of_percentage:
list_of_averages = []
for el in distance_metrics_names:
df_tmp = results_pd_topsis_rank[(results_pd_topsis_rank['Distance metric'] == el) & (results_pd_topsis_rank['Weight change'] == perc)]
av = df_tmp['Changes'].mean()
list_of_averages.append(av)
df_rank_topsis_mean[perc] = list_of_averages
df_rank_topsis_mean = df_rank_topsis_mean.set_index('Distance metric')
header = [df_rank_topsis_mean.index.name]
header = header + list(df_rank_topsis_mean.columns)
print('The average number of changes in TOPSIS rankings with weights modification:')
print(tabulate(df_rank_topsis_mean, headers = header, tablefmt='github'))
# Average number of changes in CODAS ranking
df_rank_codas_mean = pd.DataFrame()
df_rank_codas_mean['Distance metric'] = distance_metrics_names
for perc in list_of_percentage:
list_of_averages = []
for el in distance_metrics_names:
df_tmp = results_pd_codas_rank[(results_pd_codas_rank['Distance metric'] == el) & (results_pd_codas_rank['Weight change'] == perc)]
av = df_tmp['Changes'].mean()
list_of_averages.append(av)
df_rank_codas_mean[perc] = list_of_averages
df_rank_codas_mean = df_rank_codas_mean.set_index('Distance metric')
header = [df_rank_codas_mean.index.name]
header = header + list(df_rank_codas_mean.columns)
print('The average number of changes in CODAS rankings with weights modification:')
print(tabulate(df_rank_codas_mean, headers = header, tablefmt='github'))
plot_barplot_simulations(df_rank_topsis_mean, xtitle = 'Distance metrics for TOPSIS',
ytitle = 'Mean number of changes in rankings', title = 'TOPSIS',
filename = 'topsis_mean_rank_changes', wider = True)
plot_barplot_simulations(df_rank_codas_mean, xtitle = 'Distance metrics for CODAS',
ytitle = 'Mean number of changes in rankings', title = 'CODAS',
filename = 'codas_mean_rank_changes', wider = True)
# Average variability in rankings for all MCDA methods
results_df_rank = pd.DataFrame(results_dict_rank)
df_rank_mean = pd.DataFrame()
df_rank_mean['Method'] = list(rank_results.columns)
for perc in list_of_percentage:
list_of_averages = []
for el in list(rank_results.columns):
df_tmp = results_df_rank[(results_df_rank['Method'] == el) & (results_df_rank['Weight change'] == perc)]
av = df_tmp['Changes'].mean()
list_of_averages.append(av)
df_rank_mean[perc] = list_of_averages
df_rank_mean = df_rank_mean.set_index('Method')
header = [df_rank_mean.index.name]
header = header + list(df_rank_mean.columns)
print('The average number of changes in all rankings with weights modification:')
print(tabulate(df_rank_mean, headers = header, tablefmt='github'))
plot_barplot_simulations(df_rank_mean, xtitle = 'MCDA methods',
ytitle = 'Mean number of changes in rankings',
title = 'MCDA methods', filename = 'mcda_rank_varia', wider = True)
if __name__ == "__main__":
main()
| 44.138075
| 146
| 0.681629
|
4a0384ce036caf3fd66c6d2061712c26eadf4a3c
| 2,573
|
py
|
Python
|
common/util.py
|
fipu-lab/p2p_bn
|
f2c67766f030de28fed82b11188f391d338bbe12
|
[
"MIT"
] | null | null | null |
common/util.py
|
fipu-lab/p2p_bn
|
f2c67766f030de28fed82b11188f391d338bbe12
|
[
"MIT"
] | null | null | null |
common/util.py
|
fipu-lab/p2p_bn
|
f2c67766f030de28fed82b11188f391d338bbe12
|
[
"MIT"
] | null | null | null |
import os
import json
import psutil
import GPUtil
import random
import tensorflow as tf
import numpy as np
import time
def set_seed(seed):
if seed:
random.seed(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
print("Seed:", seed)
def available_device_memory(device_name):
if 'GPU' in device_name.upper():
gpu_cur, gpu_total = gpu_memory(device_name, 'MB')
return gpu_total - gpu_cur
elif 'CPU' in device_name.upper():
return psutil.virtual_memory()[1] / 1024**2
def total_device_memory(device_name):
if 'GPU' in device_name.upper():
gpu_cur, gpu_total = gpu_memory(device_name, 'MB')
return gpu_total
elif 'CPU' in device_name.upper():
return psutil.virtual_memory()[0] / 1024**2
def gpu_memory(gpu_name, units='MB'):
power_f = ['B', 'MB', 'GB'].index(units) + 1
viz_devs = os.environ["CUDA_VISIBLE_DEVICES"].split(', ')
gpu_ind = gpu_name.replace('GPU', '').replace(':', '').strip()
usage = tf.config.experimental.get_memory_info(gpu_name)
current = round(usage['current'] / (1024 ** power_f), 2)
total = round(GPUtil.getGPUs()[int(viz_devs[int(gpu_ind)])].memoryTotal * 1024 ** 2 / (1024 ** power_f), 2)
return current, total
def memory_info():
gpu_devices = tf.config.list_physical_devices('GPU')
mem_dict = {}
for device in gpu_devices:
dev_name = device.name.replace('/physical_device:', '')
gpu_cur, gpu_total = gpu_memory(dev_name, 'GB')
mem_dict[dev_name] = "{}/{} GB".format(gpu_cur, gpu_total)
ram_mem = psutil.virtual_memory()
mem_dict['RAM'] = "{}/{} GB".format(round((ram_mem[0]-ram_mem[1]) / 1024**3, 2), round(ram_mem[0] / 1024**3, 2))
return mem_dict
def time_elapsed_info(start_time):
minutes = round((time.time() - start_time)/60)
hours = int(minutes / 60)
minutes = minutes % 60
return "{:02d}:{:02d}h".format(hours, minutes)
def save_json(filename, json_dict):
class NumpyValuesEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as outfile:
json.dump(json_dict, outfile, indent=None, cls=NumpyValuesEncoder)
print("Saved to", filename)
| 31
| 116
| 0.63972
|
4a03852ba20bfab6ce45932899c79c3c1677162d
| 6,639
|
py
|
Python
|
transH.py
|
Wjerry5/TensorFlow-TransX
|
41302d7a68d4880c1634b81c19639c72d4e3c65e
|
[
"MIT"
] | 1
|
2020-02-28T14:14:07.000Z
|
2020-02-28T14:14:07.000Z
|
transH.py
|
davidie/TensorFlow-TransX
|
41302d7a68d4880c1634b81c19639c72d4e3c65e
|
[
"MIT"
] | null | null | null |
transH.py
|
davidie/TensorFlow-TransX
|
41302d7a68d4880c1634b81c19639c72d4e3c65e
|
[
"MIT"
] | 1
|
2020-03-01T11:41:04.000Z
|
2020-03-01T11:41:04.000Z
|
#coding:utf-8
import numpy as np
import tensorflow as tf
import os
import time
import datetime
import ctypes
ll = ctypes.cdll.LoadLibrary
lib = ll("./init.so")
test_lib = ll("./test.so")
class Config(object):
def __init__(self):
lib.setInPath("./data/FB15K/")
test_lib.setInPath("./data/FB15K/")
self.testFlag = True
self.loadFromData = True
self.L1_flag = True
self.hidden_size = 50
self.nbatches = 100
self.entity = 0
self.relation = 0
self.trainTimes = 500
self.margin = 1.0
class TransHModel(object):
def calc(self, e, n):
norm = tf.nn.l2_normalize(n, 1)
return e - tf.reduce_sum(e * norm, 1, keep_dims = True) * norm
def __init__(self, config):
entity_total = config.entity
relation_total = config.relation
batch_size = config.batch_size
size = config.hidden_size
margin = config.margin
self.pos_h = tf.placeholder(tf.int32, [None])
self.pos_t = tf.placeholder(tf.int32, [None])
self.pos_r = tf.placeholder(tf.int32, [None])
self.neg_h = tf.placeholder(tf.int32, [None])
self.neg_t = tf.placeholder(tf.int32, [None])
self.neg_r = tf.placeholder(tf.int32, [None])
with tf.name_scope("embedding"):
self.ent_embeddings = tf.get_variable(name = "ent_embedding", shape = [entity_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.rel_embeddings = tf.get_variable(name = "rel_embedding", shape = [relation_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.normal_vector = tf.get_variable(name = "normal_vector", shape = [relation_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
pos_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_h)
pos_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_t)
pos_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.pos_r)
neg_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_h)
neg_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_t)
neg_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.neg_r)
pos_norm = tf.nn.embedding_lookup(self.normal_vector, self.pos_r)
neg_norm = tf.nn.embedding_lookup(self.normal_vector, self.neg_r)
pos_h_e = self.calc(pos_h_e, pos_norm)
pos_t_e = self.calc(pos_t_e, pos_norm)
neg_h_e = self.calc(neg_h_e, neg_norm)
neg_t_e = self.calc(neg_t_e, neg_norm)
if config.L1_flag:
pos = tf.reduce_sum(abs(pos_h_e + pos_r_e - pos_t_e), 1, keep_dims = True)
neg = tf.reduce_sum(abs(neg_h_e + neg_r_e - neg_t_e), 1, keep_dims = True)
self.predict = pos
else:
pos = tf.reduce_sum((pos_h_e + pos_r_e - pos_t_e) ** 2, 1, keep_dims = True)
neg = tf.reduce_sum((neg_h_e + neg_r_e - neg_t_e) ** 2, 1, keep_dims = True)
self.predict = pos
with tf.name_scope("output"):
self.loss = tf.reduce_sum(tf.maximum(pos - neg + margin, 0))
def main(_):
config = Config()
if (config.testFlag):
test_lib.init()
config.relation = test_lib.getRelationTotal()
config.entity = test_lib.getEntityTotal()
config.batch = test_lib.getEntityTotal()
config.batch_size = config.batch
else:
lib.init()
config.relation = lib.getRelationTotal()
config.entity = lib.getEntityTotal()
config.batch_size = lib.getTripleTotal() // config.nbatches
with tf.Graph().as_default():
sess = tf.Session()
with sess.as_default():
initializer = tf.contrib.layers.xavier_initializer(uniform = False)
with tf.variable_scope("model", reuse=None, initializer = initializer):
trainModel = TransHModel(config = config)
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.GradientDescentOptimizer(0.001)
grads_and_vars = optimizer.compute_gradients(trainModel.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
if (config.loadFromData):
saver.restore(sess, 'model.vec')
def train_step(pos_h_batch, pos_t_batch, pos_r_batch, neg_h_batch, neg_t_batch, neg_r_batch):
feed_dict = {
trainModel.pos_h: pos_h_batch,
trainModel.pos_t: pos_t_batch,
trainModel.pos_r: pos_r_batch,
trainModel.neg_h: neg_h_batch,
trainModel.neg_t: neg_t_batch,
trainModel.neg_r: neg_r_batch
}
_, step, loss = sess.run(
[train_op, global_step, trainModel.loss], feed_dict)
return loss
def test_step(pos_h_batch, pos_t_batch, pos_r_batch):
feed_dict = {
trainModel.pos_h: pos_h_batch,
trainModel.pos_t: pos_t_batch,
trainModel.pos_r: pos_r_batch,
}
step, predict = sess.run(
[global_step, trainModel.predict], feed_dict)
return predict
ph = np.zeros(config.batch_size, dtype = np.int32)
pt = np.zeros(config.batch_size, dtype = np.int32)
pr = np.zeros(config.batch_size, dtype = np.int32)
nh = np.zeros(config.batch_size, dtype = np.int32)
nt = np.zeros(config.batch_size, dtype = np.int32)
nr = np.zeros(config.batch_size, dtype = np.int32)
ph_addr = ph.__array_interface__['data'][0]
pt_addr = pt.__array_interface__['data'][0]
pr_addr = pr.__array_interface__['data'][0]
nh_addr = nh.__array_interface__['data'][0]
nt_addr = nt.__array_interface__['data'][0]
nr_addr = nr.__array_interface__['data'][0]
lib.getBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
test_lib.getHeadBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
test_lib.getTailBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
test_lib.testHead.argtypes = [ctypes.c_void_p]
test_lib.testTail.argtypes = [ctypes.c_void_p]
print "hx"
if not config.testFlag:
for times in range(config.trainTimes):
res = 0.0
for batch in range(config.nbatches):
lib.getBatch(ph_addr, pt_addr, pr_addr, nh_addr, nt_addr, nr_addr, config.batch_size)
res += train_step(ph, pt, pr, nh, nt, nr)
current_step = tf.train.global_step(sess, global_step)
print times
print res
saver.save(sess, 'model.vec')
else:
total = test_lib.getTestTotal()
for times in range(total):
test_lib.getHeadBatch(ph_addr, pt_addr, pr_addr)
res = test_step(ph, pt, pr)
test_lib.testHead(res.__array_interface__['data'][0])
test_lib.getTailBatch(ph_addr, pt_addr, pr_addr)
res = test_step(ph, pt, pr)
test_lib.testTail(res.__array_interface__['data'][0])
print times
if (times % 50 == 0):
test_lib.test()
test_lib.test()
if __name__ == "__main__":
tf.app.run()
| 35.693548
| 165
| 0.712155
|
4a03855831c7b7e676ed8989ad9aa650c2ecd7a8
| 1,809
|
py
|
Python
|
tests/test_stream_xep_0092.py
|
elrond79/SleekXMPP
|
62ebbe2d7c37f55fa63cbe24b2a610c1e3eb7b9f
|
[
"BSD-3-Clause"
] | 1
|
2016-10-24T05:30:25.000Z
|
2016-10-24T05:30:25.000Z
|
tests/test_stream_xep_0092.py
|
elrond79/SleekXMPP
|
62ebbe2d7c37f55fa63cbe24b2a610c1e3eb7b9f
|
[
"BSD-3-Clause"
] | 1
|
2017-11-07T13:03:48.000Z
|
2017-11-07T13:03:48.000Z
|
tests/test_stream_xep_0092.py
|
elrond79/SleekXMPP
|
62ebbe2d7c37f55fa63cbe24b2a610c1e3eb7b9f
|
[
"BSD-3-Clause"
] | null | null | null |
import threading
from sleekxmpp.test import *
class TestStreamSet(SleekTest):
def tearDown(self):
self.stream_close()
def testHandleSoftwareVersionRequest(self):
self.stream_start(mode='client', plugins=['xep_0030', 'xep_0092'])
self.xmpp['xep_0092'].name = 'SleekXMPP'
self.xmpp['xep_0092'].version = 'dev'
self.xmpp['xep_0092'].os = 'Linux'
self.recv("""
<iq type="get" id="1">
<query xmlns="jabber:iq:version" />
</iq>
""")
self.send("""
<iq type="result" id="1">
<query xmlns="jabber:iq:version">
<name>SleekXMPP</name>
<version>dev</version>
<os>Linux</os>
</query>
</iq>
""")
def testMakeSoftwareVersionRequest(self):
results = []
def query():
r = self.xmpp['xep_0092'].get_version('foo@bar')
results.append(r)
self.stream_start(mode='client', plugins=['xep_0030', 'xep_0092'])
t = threading.Thread(target=query)
t.start()
self.send("""
<iq type="get" id="1" to="foo@bar">
<query xmlns="jabber:iq:version" />
</iq>
""")
self.recv("""
<iq type="result" id="1" from="foo@bar" to="tester@localhost">
<query xmlns="jabber:iq:version">
<name>Foo</name>
<version>1.0</version>
<os>Linux</os>
</query>
</iq>
""")
t.join()
expected = [{'name': 'Foo', 'version': '1.0', 'os':'Linux'}]
self.assertEqual(results, expected,
"Did not receive expected results: %s" % results)
suite = unittest.TestLoader().loadTestsFromTestCase(TestStreamSet)
| 25.842857
| 74
| 0.509121
|
4a038669f669c8bea3d3c83fd73a068dfc3ca66c
| 1,202
|
py
|
Python
|
configs/_base_/models/tpn_tsm_r50.py
|
rlleshi/mmaction2
|
6993693f178b1a59e5eb07f1a3db484d5e5de61a
|
[
"Apache-2.0"
] | 1,870
|
2020-07-11T09:33:46.000Z
|
2022-03-31T13:21:36.000Z
|
configs/_base_/models/tpn_tsm_r50.py
|
rlleshi/mmaction2
|
6993693f178b1a59e5eb07f1a3db484d5e5de61a
|
[
"Apache-2.0"
] | 1,285
|
2020-07-11T11:18:57.000Z
|
2022-03-31T08:41:17.000Z
|
configs/_base_/models/tpn_tsm_r50.py
|
rlleshi/mmaction2
|
6993693f178b1a59e5eb07f1a3db484d5e5de61a
|
[
"Apache-2.0"
] | 557
|
2020-07-11T09:51:57.000Z
|
2022-03-31T13:21:35.000Z
|
# model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNetTSM',
pretrained='torchvision://resnet50',
depth=50,
out_indices=(2, 3),
norm_eval=False,
shift_div=8),
neck=dict(
type='TPN',
in_channels=(1024, 2048),
out_channels=1024,
spatial_modulation_cfg=dict(
in_channels=(1024, 2048), out_channels=2048),
temporal_modulation_cfg=dict(downsample_scales=(8, 8)),
upsample_cfg=dict(scale_factor=(1, 1, 1)),
downsample_cfg=dict(downsample_scale=(1, 1, 1)),
level_fusion_cfg=dict(
in_channels=(1024, 1024),
mid_channels=(1024, 1024),
out_channels=2048,
downsample_scales=((1, 1, 1), (1, 1, 1))),
aux_head_cfg=dict(out_channels=174, loss_weight=0.5)),
cls_head=dict(
type='TPNHead',
num_classes=174,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob', fcn_test=True))
| 32.486486
| 63
| 0.59401
|
4a03867557ec7df0df63ec2ccd9a4dff93455a19
| 3,937
|
py
|
Python
|
setup.py
|
hazcod/mitmproxy
|
45eac1607ee8c9c00a41c3d399a331397bdeb99a
|
[
"MIT"
] | 1
|
2019-02-23T14:37:39.000Z
|
2019-02-23T14:37:39.000Z
|
setup.py
|
aresayu/mitmproxy
|
5a03098d23b7a2ad2ecfbbb33f6ac0bbd4692790
|
[
"MIT"
] | null | null | null |
setup.py
|
aresayu/mitmproxy
|
5a03098d23b7a2ad2ecfbbb33f6ac0bbd4692790
|
[
"MIT"
] | null | null | null |
import os
import re
from codecs import open
from setuptools import find_packages, setup
# Based on https://github.com/pypa/sampleproject/blob/main/setup.py
# and https://python-packaging-user-guide.readthedocs.org/
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
long_description_content_type = "text/markdown"
with open(os.path.join(here, "mitmproxy", "version.py")) as f:
match = re.search(r'VERSION = "(.+?)"', f.read())
assert match
VERSION = match.group(1)
setup(
name="mitmproxy",
version=VERSION,
description="An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.",
long_description=long_description,
long_description_content_type=long_description_content_type,
url="http://mitmproxy.org",
author="Aldo Cortesi",
author_email="aldo@corte.si",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 5 - Production/Stable",
"Environment :: Console :: Curses",
"Operating System :: MacOS",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Security",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: Proxy Servers",
"Topic :: System :: Networking :: Monitoring",
"Topic :: Software Development :: Testing",
"Typing :: Typed",
],
project_urls={
'Documentation': 'https://docs.mitmproxy.org/stable/',
'Source': 'https://github.com/mitmproxy/mitmproxy/',
'Tracker': 'https://github.com/mitmproxy/mitmproxy/issues',
},
packages=find_packages(include=[
"mitmproxy", "mitmproxy.*",
]),
include_package_data=True,
entry_points={
'console_scripts': [
"mitmproxy = mitmproxy.tools.main:mitmproxy",
"mitmdump = mitmproxy.tools.main:mitmdump",
"mitmweb = mitmproxy.tools.main:mitmweb",
]
},
python_requires='>=3.8',
# https://packaging.python.org/en/latest/requirements/#install-requires
# It is not considered best practice to use install_requires to pin dependencies to specific versions.
install_requires=[
"asgiref>=3.2.10,<3.4",
"blinker>=1.4, <1.5",
"Brotli>=1.0,<1.1",
"certifi>=2019.9.11", # no semver here - this should always be on the last release!
"click>=7.0,<8.1",
"cryptography>=3.3,<3.5",
"flask>=1.1.1,<2.1",
"h11>=0.11,<0.13",
"h2>=4.0,<5",
"hyperframe>=6.0,<7",
"kaitaistruct>=0.7,<0.10",
"ldap3>=2.8,<2.10",
"msgpack>=1.0.0, <1.1.0",
"passlib>=1.6.5, <1.8",
"protobuf>=3.14,<3.18",
"pyOpenSSL>=20.0,<20.1",
"pyparsing>=2.4.2,<2.5",
"pyperclip>=1.6.0,<1.9",
"ruamel.yaml>=0.16,<0.18",
"sortedcontainers>=2.3,<2.4",
"tornado>=4.3,<7",
"urwid>=2.1.1,<2.2",
"wsproto>=1.0,<1.1",
"publicsuffix2>=2.20190812,<3",
"zstandard>=0.11,<0.16",
],
extras_require={
':sys_platform == "win32"': [
"pydivert>=2.0.3,<2.2",
],
'dev': [
"hypothesis>=5.8,<6.13",
"parver>=0.1,<2.0",
"pdoc>=4.0.0",
"pyinstaller==4.3",
"pytest-asyncio>=0.10.0,<0.14,!=0.14",
"pytest-cov>=2.7.1,<3",
"pytest-timeout>=1.3.3,<2",
"pytest-xdist>=2.1.0,<3",
"pytest>=6.1.0,<7",
"requests>=2.9.1,<3",
"tox>=3.5,<4",
"wheel>=0.36.2,<0.37"
],
}
)
| 34.234783
| 106
| 0.557785
|
4a03871091031c298344cabebd26d3244c337ae0
| 1,011
|
py
|
Python
|
chatto_transform/datastores/mongodb_datastore.py
|
chatto-hub-test2/Spaceboy2
|
7b6b91baf06290e6b047ae75e7ea61cee4846b3a
|
[
"Unlicense",
"MIT"
] | null | null | null |
chatto_transform/datastores/mongodb_datastore.py
|
chatto-hub-test2/Spaceboy2
|
7b6b91baf06290e6b047ae75e7ea61cee4846b3a
|
[
"Unlicense",
"MIT"
] | null | null | null |
chatto_transform/datastores/mongodb_datastore.py
|
chatto-hub-test2/Spaceboy2
|
7b6b91baf06290e6b047ae75e7ea61cee4846b3a
|
[
"Unlicense",
"MIT"
] | null | null | null |
from .datastore_base import DataStore
from ..schema.schema_base import *
import pandas
from pymongo import MongoClient
from bson.code import Code
def collection_attrs(db, collection_name):
mapper = Code("""
function () {
for (var attr in this) {
emit(attr, null);
}
}
""")
reducer = Code("""
function (key, stuff) {
return null
}
""")
res = db[collection_name].map_reduce(mapper, reducer, {'inline': True})
return set(r['_id'] for r in res['results'])
def collection_to_schema(db, collection_name):
attrs = collection_attrs(db, collection_name)
return Schema(collection_name, [obj(attr) for attr in attrs])
class MongoDBDataStore(DataStore):
def __init__(self, schema, db):
super().__init__(schema)
self.db = db
def _load(self):
cursor = self.db[self.schema.name].find()
collection = pandas.DataFrame.from_records(cursor)
return collection
| 26.605263
| 75
| 0.623145
|
4a03877b2fa05b6957dc42eb63dd46171e20f414
| 14,398
|
py
|
Python
|
tests/components/switch/test_template.py
|
shire210/home-assistant
|
63cd8bbee6f1b74ae9c6c249ac820119a8a573d8
|
[
"Apache-2.0"
] | 2
|
2017-02-25T00:27:06.000Z
|
2017-02-25T03:09:30.000Z
|
tests/components/switch/test_template.py
|
shire210/home-assistant
|
63cd8bbee6f1b74ae9c6c249ac820119a8a573d8
|
[
"Apache-2.0"
] | null | null | null |
tests/components/switch/test_template.py
|
shire210/home-assistant
|
63cd8bbee6f1b74ae9c6c249ac820119a8a573d8
|
[
"Apache-2.0"
] | 1
|
2020-08-26T20:54:14.000Z
|
2020-08-26T20:54:14.000Z
|
"""The tests for the Template switch platform."""
import asyncio
from homeassistant.core import callback, State, CoreState
from homeassistant import setup
import homeassistant.components as core
from homeassistant.const import STATE_ON, STATE_OFF
from homeassistant.helpers.restore_state import DATA_RESTORE_CACHE
from tests.common import (
get_test_home_assistant, assert_setup_component, mock_component)
class TestTemplateSwitch:
"""Test the Template switch."""
hass = None
calls = None
# pylint: disable=invalid-name
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.calls = []
@callback
def record_call(service):
"""Track function calls.."""
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_template_state_text(self):
""""Test the state text of a template."""
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.set('switch.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_ON
state = self.hass.states.set('switch.test_state', STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_OFF
def test_template_state_boolean_on(self):
"""Test the setting of the state with boolean on."""
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ 1 == 1 }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_ON
def test_template_state_boolean_off(self):
"""Test the setting of the state with off."""
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ 1 == 2 }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_OFF
def test_template_syntax_error(self):
"""Test templating syntax error."""
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{% if rubbish %}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_name_does_not_create(self):
"""Test invalid name."""
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test INVALID switch': {
'value_template':
"{{ rubbish }",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_switch_does_not_create(self):
"""Test invalid switch."""
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': 'Invalid'
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_no_switches_does_not_create(self):
"""Test if there are no switches no creation."""
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template'
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_template_does_not_create(self):
"""Test missing template."""
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'not_value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_on_does_not_create(self):
"""Test missing on."""
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'not_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_off_does_not_create(self):
"""Test missing off."""
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'not_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_on_action(self):
"""Test on action."""
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'test.automation'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
self.hass.states.set('switch.test_state', STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_OFF
core.switch.turn_on(self.hass, 'switch.test_template_switch')
self.hass.block_till_done()
assert len(self.calls) == 1
def test_off_action(self):
"""Test off action."""
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'test.automation'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
self.hass.states.set('switch.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_ON
core.switch.turn_off(self.hass, 'switch.test_template_switch')
self.hass.block_till_done()
assert len(self.calls) == 1
@asyncio.coroutine
def test_restore_state(hass):
"""Ensure states are restored on startup."""
hass.data[DATA_RESTORE_CACHE] = {
'switch.test_template_switch':
State('switch.test_template_switch', 'on'),
}
hass.state = CoreState.starting
mock_component(hass, 'recorder')
yield from setup.async_setup_component(hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
state = hass.states.get('switch.test_template_switch')
assert state.state == 'on'
yield from hass.async_start()
yield from hass.async_block_till_done()
state = hass.states.get('switch.test_template_switch')
assert state.state == 'unavailable'
| 34.693976
| 71
| 0.429782
|
4a03886b82deb6713e99cbc285eee5bfc11b754f
| 555
|
py
|
Python
|
level_1.py
|
ceafdc/PythonChallenge
|
fe4f64aabf1d6e672a06785538391dd16a03ad04
|
[
"MIT"
] | 1
|
2018-01-20T17:58:10.000Z
|
2018-01-20T17:58:10.000Z
|
level_1.py
|
ceafdc/PythonChallenge
|
fe4f64aabf1d6e672a06785538391dd16a03ad04
|
[
"MIT"
] | null | null | null |
level_1.py
|
ceafdc/PythonChallenge
|
fe4f64aabf1d6e672a06785538391dd16a03ad04
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# url: http://www.pythonchallenge.com/pc/def/map.html
import string
text = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj."
k = 2
src = string.ascii_lowercase
dst = string.ascii_lowercase[k:] + string.ascii_lowercase[:k]
table = text.maketrans(src, dst)
print(text.translate(table))
print('map'.translate(table))
# next: http://www.pythonchallenge.com/pc/def/ocr.html
| 29.210526
| 212
| 0.744144
|
4a038932c6dac2e1b6fb6c7d5cca401b697dfefd
| 38,576
|
py
|
Python
|
toolchain/riscv/MSYS/python/Lib/test/test_bz2.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | 207
|
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
toolchain/riscv/MSYS/python/Lib/test/test_bz2.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | 8
|
2019-06-29T14:18:51.000Z
|
2022-02-19T07:30:27.000Z
|
toolchain/riscv/MSYS/python/Lib/test/test_bz2.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | 76
|
2020-03-16T01:47:46.000Z
|
2022-03-21T16:37:07.000Z
|
from test import support
from test.support import bigmemtest, _4G
import unittest
from io import BytesIO, DEFAULT_BUFFER_SIZE
import os
import pickle
import glob
import pathlib
import random
import shutil
import subprocess
import threading
from test.support import unlink
import _compression
import sys
# Skip tests if the bz2 module doesn't exist.
bz2 = support.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = None
def ext_decompress(data):
global has_cmdline_bunzip2
if has_cmdline_bunzip2 is None:
has_cmdline_bunzip2 = bool(shutil.which('bunzip2'))
if has_cmdline_bunzip2:
return subprocess.check_output(['bunzip2'], input=data)
else:
return bz2.decompress(data)
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
# Some tests need more than one block of uncompressed data. Since one block
# is at least 100,000 bytes, we gather some data dynamically and compress it.
# Note that this assumes that compression works correctly, so we cannot
# simply use the bigger test data for all tests.
test_size = 0
BIG_TEXT = bytearray(128*1024)
for fname in glob.glob(os.path.join(os.path.dirname(__file__), '*.py')):
with open(fname, 'rb') as fh:
test_size += fh.readinto(memoryview(BIG_TEXT)[test_size:])
if test_size > 128*1024:
break
BIG_DATA = bz2.compress(BIG_TEXT, compresslevel=1)
def setUp(self):
self.filename = support.TESTFN
def tearDown(self):
if os.path.isfile(self.filename):
os.unlink(self.filename)
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, os.devnull, "z")
self.assertRaises(ValueError, BZ2File, os.devnull, "rx")
self.assertRaises(ValueError, BZ2File, os.devnull, "rbt")
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=0)
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=10)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = _compression.BUFFER_SIZE
_compression.BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
_compression.BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
with support.start_threads(threads):
pass
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testOpenPathLikeFilename(self):
filename = pathlib.Path(self.filename)
with BZ2File(filename, "wb") as f:
f.write(self.DATA)
with BZ2File(filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testDecompressLimited(self):
"""Decompressed data buffering should be limited"""
bomb = bz2.compress(b'\0' * int(2e6), compresslevel=9)
self.assertLess(len(bomb), _compression.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(decomp.read(1), b'\0')
max_decomp = 1 + DEFAULT_BUFFER_SIZE
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp,
"Excessive amount of data was decompressed")
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(ext_decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor(), proto)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little')
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor(), proto)
def testDecompressorChunksMaxsize(self):
bzd = BZ2Decompressor()
max_length = 100
out = []
# Feed some input
len_ = len(self.BIG_DATA) - 64
out.append(bzd.decompress(self.BIG_DATA[:len_],
max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data without providing more input
out.append(bzd.decompress(b'', max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data while providing more input
out.append(bzd.decompress(self.BIG_DATA[len_:],
max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
# Retrieve remaining uncompressed data
while not bzd.eof:
out.append(bzd.decompress(b'', max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
out = b"".join(out)
self.assertEqual(out, self.BIG_TEXT)
self.assertEqual(bzd.unused_data, b"")
def test_decompressor_inputbuf_1(self):
# Test reusing input buffer after moving existing
# contents to beginning
bzd = BZ2Decompressor()
out = []
# Create input buffer and fill it
self.assertEqual(bzd.decompress(self.DATA[:100],
max_length=0), b'')
# Retrieve some results, freeing capacity at beginning
# of input buffer
out.append(bzd.decompress(b'', 2))
# Add more data that fits into input buffer after
# moving existing data to beginning
out.append(bzd.decompress(self.DATA[100:105], 15))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[105:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_2(self):
# Test reusing input buffer by appending data at the
# end right away
bzd = BZ2Decompressor()
out = []
# Create input buffer and empty it
self.assertEqual(bzd.decompress(self.DATA[:200],
max_length=0), b'')
out.append(bzd.decompress(b''))
# Fill buffer with new data
out.append(bzd.decompress(self.DATA[200:280], 2))
# Append some more data, not enough to require resize
out.append(bzd.decompress(self.DATA[280:300], 2))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_3(self):
# Test reusing input buffer after extending it
bzd = BZ2Decompressor()
out = []
# Create almost full input buffer
out.append(bzd.decompress(self.DATA[:200], 5))
# Add even more data to it, requiring resize
out.append(bzd.decompress(self.DATA[200:300], 5))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_failure(self):
bzd = BZ2Decompressor()
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
# Previously, a second call could crash due to internal inconsistency
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
@support.refcount_test
def test_refleaks_in___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
bzd = BZ2Decompressor()
refs_before = gettotalrefcount()
for i in range(100):
bzd.__init__()
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
with self.open(self.filename, mode) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
| 38.499002
| 1,182
| 0.583005
|
4a038957ad4015bd05a5603d3caa21b8348c72c8
| 17,714
|
py
|
Python
|
QDE/offline/offline_test_environment_creation.py
|
oxquantum-repo/drl_for_quantum_measurement
|
a02a8f3a7c5b40458f440a63355932409c66921c
|
[
"MIT"
] | 5
|
2021-05-18T01:07:04.000Z
|
2022-01-29T13:31:18.000Z
|
QDE/offline/offline_test_environment_creation.py
|
oxquantum-repo/drl_for_quantum_measurement
|
a02a8f3a7c5b40458f440a63355932409c66921c
|
[
"MIT"
] | null | null | null |
QDE/offline/offline_test_environment_creation.py
|
oxquantum-repo/drl_for_quantum_measurement
|
a02a8f3a7c5b40458f440a63355932409c66921c
|
[
"MIT"
] | 1
|
2021-05-18T01:07:20.000Z
|
2021-05-18T01:07:20.000Z
|
import sys
import numpy as np
import tensorflow as tf
from tensorflow.keras import models
import math
import random
import matplotlib.pyplot as plt
from tqdm import tqdm
from mock_pygor import mock_pygor
class double_dot_2d:
def __init__(self, block_size , file_name, offset=0):
# D is the dimension of each patch
# self.dim is the dimension of blocks in the images
self.block_size = block_size
self.pygor = mock_pygor(file_name, block_size, offset)
self.image = self.pygor.scan
self.allowed_n_blocks = self.pygor.allowed_n_blocks
self.D = 9
self.K = 6
self.dim = [self.allowed_n_blocks,self.allowed_n_blocks]
# create an empty visit_map
self.visit_map = np.zeros((self.allowed_n_blocks,self.allowed_n_blocks))
self.isquantum = np.zeros((self.allowed_n_blocks,self.allowed_n_blocks))
self.cnn_prediction = np.zeros((self.allowed_n_blocks,self.allowed_n_blocks))
self.pre_classification_prediction = np.zeros((self.allowed_n_blocks,self.allowed_n_blocks))
self.total_measurement = np.zeros((self.allowed_n_blocks*self.block_size,self.allowed_n_blocks*self.block_size))
self.starting_pos = self.pygor.get_location(self.pygor.start_params)
self.current_pos = self.pygor.get_location(self.pygor.get_current_params())
# based on this location, construct the data
self.small_window_measurements = [0] * self.allowed_n_blocks
self.small_window_statistics = [0] * self.allowed_n_blocks
for ii in range(self.allowed_n_blocks):
self.small_window_measurements[ii] = [0] * self.allowed_n_blocks
self.small_window_statistics[ii] = [0] * self.allowed_n_blocks
self.pre_classify()
self.model_binary_classifier = self.load_cnn()
self.pixels_measured = 0
measurement, statistics = self.random_point_measurement([self.starting_pos[0], self.starting_pos[1]])
self.small_window_measurements[self.starting_pos[0]][self.starting_pos[1]], self.small_window_statistics[self.starting_pos[0]][self.starting_pos[1]] = measurement, statistics
self.visit_map[self.starting_pos[0], self.starting_pos[1]] += 1
classification = self.check_for_bias_triangle(self.starting_pos[0],self.starting_pos[1])
if classification == 1:
self.isquantum[self.starting_pos[0], self.starting_pos[1]] = 1
def pre_classify(self):
self.trace_0 = self.pygor.do1d(0,0,0,self.pygor.env_size)
self.trace_1 = self.pygor.do1d(0,1,0,self.pygor.env_size)
self.environment_max_current = max([max(self.trace_0),max(self.trace_1)])
self.environment_min_current = min([min(self.trace_0),min(self.trace_1)])
self.standard_deviation_trace_0 = np.std(self.trace_0)
self.standard_deviation_trace_1 = np.std(self.trace_1)
self.standard_deviation_for_normalisation = max([min([self.standard_deviation_trace_0,self.standard_deviation_trace_1]),abs(self.standard_deviation_trace_0-self.standard_deviation_trace_1)])
self.trace_range_max = self.normalise_mean_function(abs(self.environment_max_current - self.environment_min_current))
self.threshold_upper = self.trace_range_max * 0.4
self.threshold_lower = self.trace_range_max * 0.005
self.pygor.setvals(self.pygor.start_params)
def get_neightborMapIndividual(self, location):
id1, id2 = location
norm_factor = 5.0
output = []
# return a 6 dimensional vector
if id1 == 0: # decrease d1
output.append(0)
else:
output.append(self.visit_map[id1 - 1, id2] / norm_factor)
if id1 == self.dim[0] - 1: # increase d1
output.append(0)
else:
output.append(self.visit_map[id1 + 1, id2] / norm_factor)
if id2 == 0: # decrease d2
output.append(0)
else:
output.append(self.visit_map[id1, id2 - 1] / norm_factor)
if id2 == self.dim[1] - 1: # increase d2
output.append(0)
else:
output.append(self.visit_map[id1, id2 + 1] / norm_factor)
if id1 < self.dim[0] - 1 and id2 < self.dim[1] - 1: # decrease d1 and decrease d2
output.append(self.visit_map[id1 + 1, id2 + 1] / norm_factor)
else:
output.append(0)
if id1 > 0 and id2 > 0: # increase d1 and increase d2
output.append(self.visit_map[id1 - 1, id2 - 1] / norm_factor)
else:
output.append(0)
# replace zero by -1
output2 = [-1 / norm_factor if o == 0 else o * 1 for o in output]
return output2
def get_neighborMap(self, locations):
locations = np.asarray(locations)
if len(locations.shape) == 1: # 1 data point
output = self.get_neightborMapIndividual(locations)
else:
output = np.apply_along_axis(self.get_neightborMapIndividual, 1, locations)
return output
def get_statistics(self,location):
stats = self.small_window_statistics[location[0]][location[1]]
return stats
def get_location(self):
location = np.copy(self.current_pos)
return location
def take_measurement(self,location):
initial_x, final_x, intial_y, final_y = self.pygor.get_params([location[0]]),self.pygor.get_params([location[0]+1]), self.pygor.get_params([location[1]]),self.pygor.get_params([location[1]+1])
scan = self.pygor.do2d(initial_x,final_x,intial_y,final_y)
small_window_measurements = scan
small_window_statistics = self.block_splitting(small_window_measurements)
self.total_measurement[initial_x:final_x,intial_y:final_y] = small_window_measurements
small_window_measurements = list(map(self.normalise_mean_function,small_window_measurements))
return small_window_measurements, small_window_statistics
def random_point_measurement(self,location):
small_window_measurements = np.zeros((self.block_size,self.block_size))
log_stats_mean = []
log_stats_std = []
stats = np.zeros(18)
i = 0
while any(stats == 0):
small_window_measurements = self.sample_random_pixels(small_window_measurements,location)
stats = self.block_splitting_statistics(small_window_measurements)
log_stats_mean.append(stats[0])
log_stats_std.append(stats[8])
i+=1
small_window_measurements = self.sample_random_pixels(small_window_measurements,location)
new_stats = self.block_splitting_statistics(small_window_measurements)
while sum(abs((stats - new_stats)/stats)) > 0.1:
i+=1
small_window_measurements = self.sample_random_pixels(small_window_measurements,location)
stats = new_stats
new_stats = self.block_splitting_statistics(small_window_measurements)
log_stats_mean.append(new_stats[0])
log_stats_std.append(new_stats[8])
self.pixels_measured += i
small_window_statistics = self.block_splitting_statistics(small_window_measurements)
return small_window_measurements, small_window_statistics
def sample_random_pixels(self,small_window_measurements,location):
x, y = random.randint(0, self.block_size - 1), random.randint(0, self.block_size - 1)
loc_x,loc_y = self.pygor.get_params([location[0]])+x, self.pygor.get_params([location[1]])+y
point_measurement = self.pygor.do0d(loc_x,loc_y)
small_window_measurements[x, y] = point_measurement
return small_window_measurements
def block_splitting_statistics(self, measurement):
measurement_size = np.shape(measurement)[0]
n_over_2 = math.floor(measurement_size / 2.0)
n_over_4 = math.floor(measurement_size / 4.0)
n_3_over_4 = math.floor(3 * measurement_size / 4.0)
# Split into blocks based:
block_1 = measurement[0:n_over_2, 0:n_over_2]
block_2 = measurement[0:n_over_2, n_over_2:measurement_size]
block_3 = measurement[n_over_2:measurement_size, 0:n_over_2]
block_4 = measurement[n_over_2:measurement_size, n_over_2:measurement_size]
block_5 = measurement[n_over_4:n_3_over_4, n_over_4:n_3_over_4]
block_6 = measurement[n_over_4:n_3_over_4, 0:n_over_2]
block_7 = measurement[n_over_4:n_3_over_4, n_over_2:measurement_size]
block_8 = measurement[0:n_over_2, n_over_4:n_3_over_4]
block_9 = measurement[n_over_2:measurement_size, n_over_4:n_3_over_4]
blocks = [block_1, block_2, block_3, block_4, block_5, block_6, block_7, block_8, block_9]
mean_current = []
stds_current = []
for block in blocks:
data_set = []
for row in block:
for element in row:
if element != 0.0:
data_set.append(element)
#print("Element",element)
if data_set == []:
data_set = 0
mean_current.append(np.mean(data_set))
stds_current.append(np.std(data_set))
normalised_mean = list(map(self.normalise_mean_function,mean_current))
normalised_stds = list(map(self.normalise_std_function,stds_current))
# Concatenate data into single 18-feature array:
current_statistics = np.concatenate((normalised_mean, normalised_stds))
return current_statistics # mean_current, stds_current
def normalise_mean_function(self, mean):
normalised_mean = (mean - self.environment_min_current)/(self.environment_max_current - self.environment_min_current)
return normalised_mean
def normalise_std_function(self, std):
normalised_std = (std)/(self.standard_deviation_for_normalisation)
return normalised_std
def block_splitting(self,measurement):
measurement_size = np.shape(measurement)[0]
n_over_2 = math.floor(measurement_size / 2.0)
n_over_4 = math.floor(measurement_size / 4.0)
n_3_over_4 = math.floor(3 * measurement_size / 4.0)
# Split into blocks based:
block_1 = measurement[0:n_over_2, 0:n_over_2]
block_2 = measurement[0:n_over_2, n_over_2:measurement_size]
block_3 = measurement[n_over_2:measurement_size, 0:n_over_2]
block_4 = measurement[n_over_2:measurement_size, n_over_2:measurement_size]
block_5 = measurement[n_over_4:n_3_over_4, n_over_4:n_3_over_4]
block_6 = measurement[n_over_4:n_3_over_4, 0:n_over_2]
block_7 = measurement[n_over_4:n_3_over_4, n_over_2:measurement_size]
block_8 = measurement[0:n_over_2, n_over_4:n_3_over_4]
block_9 = measurement[n_over_2:measurement_size, n_over_4:n_3_over_4]
# Concatenate data into single 18-feature array:
mean_current = np.array(
[np.mean(block_1), np.mean(block_2), np.mean(block_3), np.mean(block_4), np.mean(block_5), np.mean(block_6),
np.mean(block_7), np.mean(block_8), np.mean(block_9)])
stds_current = np.array(
[np.std(block_1), np.std(block_2), np.std(block_3), np.std(block_4), np.std(block_5), np.std(block_6),
np.std(block_7), np.std(block_8), np.std(block_9)])
normalised_mean = list(map(self.normalise_mean_function,mean_current))
normalised_stds = list(map(self.normalise_std_function,stds_current))
current_statistics = np.concatenate((normalised_mean, normalised_stds))
return current_statistics
def possible_actions_from_location(self, location=None):
if location is None:
location = self.current_pos
irow, icol = location
possible_actions = []
if irow > 0: # decrease d1
possible_actions.append(0)
if irow < self.dim[0] - 1: # increase d1
possible_actions.append(1)
if icol > 0: # decrease d2
possible_actions.append(2)
if icol < self.dim[1] - 1: # increase d2
possible_actions.append(3)
if irow < (self.dim[0] - 1) and icol < (self.dim[1] - 1): # decrease d1 and d2
possible_actions.append(4)
if (irow > 0) and (icol > 0): # increase d1 and d2
possible_actions.append(5)
# possible_actions=[0,1,2,3,4,5]
return possible_actions
def step(self, action):
# perform an action to move to the next state
# 0: Decrease dim 1
# 1: Increase dim 1
# 2: Decrease dim 2
# 3: Increase dim 2
# 4: Decrease both
# 5: Increase both
flagoutside = 0
# flagRepeat=0
if action == 0:
if self.current_pos[0] == 0:
flagoutside = 1
print("cannot decrease d1")
else:
self.current_pos[0] = self.current_pos[0] - 1
elif action == 1:
if self.current_pos[0] == self.dim[0] - 1:
flagoutside = 1
print("cannot increase d1")
else:
self.current_pos[0] = self.current_pos[0] + 1
elif action == 2:
if self.current_pos[1] == 0:
flagoutside = 1
print("cannot decrease d2")
else:
self.current_pos[1] = self.current_pos[1] - 1
elif action == 3:
if self.current_pos[1] == self.dim[1] - 1:
flagoutside = 1
print("cannot decrease d2")
else:
self.current_pos[1] = self.current_pos[1] + 1
elif action == 4:
if self.current_pos[0] < self.dim[0] - 1 and self.current_pos[1] < self.dim[1] - 1:
self.current_pos[1] = self.current_pos[1] + 1
self.current_pos[0] = self.current_pos[0] + 1
else:
flagoutside = 1
print("cannot increase both d1 and d2")
elif action == 5:
if self.current_pos[0] > 0 and self.current_pos[1] > 0:
self.current_pos[1] = self.current_pos[1] - 1
self.current_pos[0] = self.current_pos[0] - 1
else:
flagoutside = 1
print("cannot decrease both d1 and d2")
else:
print("action is 0-6")
id1, id2 = self.current_pos
if self.visit_map[id1, id2] == 1:
reward = 0
statistics =self.small_window_statistics[id1][id2]
done = False
location = np.copy(self.current_pos)
revisited = True
return statistics, reward, done, location, revisited
measurement, statistics = self.random_point_measurement([id2, id1])
self.small_window_measurements[id1][id2], self.small_window_statistics[id1][id2] = measurement, statistics
self.visit_map[id1, id2] += 1
reward = -1
done = False
revisited = False
classification = self.check_for_bias_triangle(id1,id2)
if classification == 1:
reward += 100
done = True
location = np.copy(self.current_pos)
return statistics, reward, done, location, revisited
def normalise(self,x):
x_max = np.amax(x)
x_min = np.amin(x)
y = (x - x_min) / (x_max - x_min)
return y
def load_cnn(self):
#model_binary_classifier = models.load_model('../../classifier/bias_triangle_binary_classifier.h5')
model_binary_classifier = models.load_model('../../classifier/bias_triangle_binary_classifier.h5')
return model_binary_classifier
def predict_cnn(self,ii,jj):
large_patch = self.small_window_measurements[ii][jj]
x, y = np.shape(large_patch)
test_image = tf.image.resize(self.normalise(np.array(large_patch)).reshape(-1, x, y, 1), (32, 32))
self.cnn_prediction[ii,jj] = self.model_binary_classifier.predict(test_image, steps=1)
return
def check_for_bias_triangle(self, ii, jj):
statistics = self.small_window_statistics[ii][jj]
self.small_window_measurements[ii][jj], statistics = self.take_measurement([ii, jj])
self.pre_classification_prediction[ii, jj] = 0
means = statistics[:9]
for mean in means:
if (abs(mean) > self.threshold_lower) and (abs(mean) < self.threshold_upper):
self.pre_classification_prediction[ii, jj] += 1
if self.pre_classification_prediction[ii,jj] == 0:
return 0
measurement, statistics = self.take_measurement([ii, jj])
self.pixels_measured += self.block_size*self.block_size
self.small_window_measurements[ii][jj], self.small_window_statistics[ii][jj] = measurement, statistics
self.predict_cnn(ii,jj)
if self.cnn_prediction[ii,jj] > 0.5:
self.isquantum[ii,jj] = 1
return 1
else:
self.isquantum[ii, jj] = 0
return 0
def where_is_quantum(self):
self.model_binary_classifier = self.load_cnn()
# return a map telling the quantum location
ndim1, ndim2 = self.dim
self.isquantum = np.zeros(self.dim)
self.threshold_test = np.zeros(self.dim)
self.prediction = np.zeros(self.dim)
for ii in tqdm(range(ndim1)):
for jj in range(ndim2):
# self.isquantum[ii,jj]=self.check_where_is_quantum_2d(ii,jj)
self.isquantum[ii, jj] = self.check_for_bias_triangle(ii, jj)
return self.isquantum
| 38.094624
| 200
| 0.639664
|
4a038b5a1b491320e78f7d70682e4b1d99216042
| 23,266
|
py
|
Python
|
ankisync/anki.py
|
patarapolw/ankisync
|
fabffef15f57f457a7b2ba4d44d57aead0488ba7
|
[
"MIT"
] | 65
|
2018-10-31T09:59:55.000Z
|
2022-02-28T15:44:59.000Z
|
ankisync/anki.py
|
patarapolw/ankisync
|
fabffef15f57f457a7b2ba4d44d57aead0488ba7
|
[
"MIT"
] | 10
|
2018-11-04T13:49:37.000Z
|
2021-05-18T08:22:27.000Z
|
ankisync/anki.py
|
patarapolw/ankisync
|
fabffef15f57f457a7b2ba4d44d57aead0488ba7
|
[
"MIT"
] | 4
|
2020-02-11T12:50:13.000Z
|
2021-05-28T14:39:50.000Z
|
from typing import Union
import warnings
import psutil
from time import time
import tinydb as tdb
from tinydb.storages import MemoryStorage
from . import anki_db
from .dir import get_collection_path
from .builder.models import ModelBuilder, FieldBuilder
from .builder.decks import DeckBuilder, DConfBuilder
from .builder.notes import NoteBuilder, CardBuilder
class Anki:
def __init__(self, anki2_path=None, disallow_unsafe: Union[bool, None]=False, **kwargs):
if anki2_path is None:
anki2_path = get_collection_path(account_name=kwargs.setdefault('account_name', None))
try:
assert 'Anki' not in (p.name() for p in psutil.process_iter()), \
"Please close Anki first before accessing Application Data collection.anki2 directly."
except psutil.ZombieProcess as e:
warnings.warn(e)
kwargs.pop('account_name')
anki_db.database.init(anki2_path, pragmas={
'foreign_keys': 0
}, **kwargs)
self.disallow_unsafe = disallow_unsafe
self.tdb = tdb.TinyDB(storage=MemoryStorage)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
yield from self.iter_notes()
def iter_notes(self):
for db_note in anki_db.Notes.select():
record = db_note.flds
record = [db_note.id, db_note.mid] + record + [db_note.tags]
header = self.model_field_names_by_id(db_note.mid)
header = ['_nid', '_mid'] + header + ['_tags']
yield dict(zip(header, record))
@classmethod
def iter_cards(cls):
for db_card in anki_db.Cards.select():
db_note = anki_db.Notes.get(id=db_card.nid)
record = db_note.flds
record += db_note.tags
db_col = anki_db.Col.get()
db_deck = db_col.decks[str(db_card.did)]
record += db_deck['name']
db_model = db_col.models[str(db_note.mid)]
model_name = db_model['name']
template_names = [f['name'] for f in db_model['tmpls']]
try:
template = template_names[db_card.ord]
except IndexError:
template = template_names[0]
record += [model_name, template, db_card.ord]
header = [f['name'] for f in db_model['flds']]
header += ['tags', 'deck', 'model', 'template', 'order']
yield dict(zip(header, record))
@classmethod
def iter_excel(cls):
for d in cls.iter_cards():
d['tags'] = ' '.join(d['tags'])
yield d
def _warning(self):
msg = 'Please use _id() methods instead.'
if self.disallow_unsafe is True:
raise ValueError(msg)
elif self.disallow_unsafe is False:
warnings.warn(msg)
else:
pass
@classmethod
def init(cls,
first_model: Union[ModelBuilder, dict],
first_deck: Union[DeckBuilder, str]='Default',
first_dconf: Union[DConfBuilder, dict]=None,
first_note_data: Union[bool, dict]=True):
anki_db.database.create_tables([anki_db.Col, anki_db.Notes, anki_db.Cards, anki_db.Revlog, anki_db.Graves])
if not isinstance(first_model, ModelBuilder):
first_model = ModelBuilder(
name=first_model.pop('name'),
fields=first_model.pop('fields'),
templates=first_model.pop('templates'),
type_=first_model.pop('type_', 0),
**first_model
)
db_models = dict()
db_models[str(first_model.id)] = first_model
if first_dconf is None:
first_dconf = DConfBuilder('Default')
elif not isinstance(first_dconf, DConfBuilder):
first_dconf = DConfBuilder(
name=first_dconf.pop('name'),
**first_dconf
)
db_dconf = dict()
db_dconf[str(first_dconf.id)] = first_dconf
if isinstance(first_deck, str):
first_deck = DeckBuilder(first_deck, dconf=first_dconf.id)
db_decks = dict()
db_decks[str(first_deck.id)] = first_deck
if not anki_db.Col.get_or_none():
anki_db.Col.create(
models=db_models,
decks=db_decks,
dconf=db_dconf
)
if first_note_data:
if not anki_db.Notes.get_or_none():
if first_note_data is True:
first_note_data = dict()
first_note = NoteBuilder(model_id=first_model.id,
model_field_names=first_model.field_names,
data=first_note_data)
db_notes = anki_db.Notes.create(**first_note)
first_note.id = db_notes.id
for template_name in first_model.template_names:
first_card = CardBuilder(first_note, first_deck.id, model=first_model, template=template_name)
anki_db.Cards.create(**first_card)
@classmethod
def add_model(cls, name, fields, templates, **kwargs):
db_col = anki_db.Col.get()
db_models = db_col.models
new_model = ModelBuilder(name, fields, templates, **kwargs)
db_models[str(new_model.id)] = new_model
db_col.models = db_models
db_col.save()
return new_model.id
@classmethod
def iter_model(cls, model_id):
header = cls.model_field_names_by_id(model_id)
for db_note in anki_db.Notes.select().where(anki_db.Notes.mid == model_id):
yield dict(
id=db_note.id,
**dict(zip(header, db_note.flds))
)
def get_tinydb_table(self):
if len(self.tdb) == 0:
for note_data in self:
self.tdb.insert(note_data)
return self.tdb
@classmethod
def change_deck_by_id(cls, card_ids, deck_id)->None:
anki_db.Cards.update(did=deck_id).where(anki_db.Cards.id.in_(card_ids)).execute()
@classmethod
def delete_decks_by_id(cls, deck_ids, cards_too=False)->None:
db_col = anki_db.Col.get()
db_decks = db_col.decks
for deck_id in deck_ids:
db_decks.pop(str(deck_id))
if cards_too:
for db_card in anki_db.Cards.select().where(anki_db.Cards.did == int(deck_id)):
db_card.delete_instance()
db_col.decks = db_decks
db_col.save()
@classmethod
def model_by_id(cls, model_id) -> dict:
return anki_db.Col.get().models[str(model_id)]
@classmethod
def model_field_names_by_id(cls, model_id):
model = cls.model_by_id(model_id)
return [f['name'] for f in model['flds']]
@classmethod
def model_template_names_by_id(cls, model_id):
model = cls.model_by_id(model_id)
return [t['name'] for t in model['tmpls']]
@classmethod
def note_to_cards(cls, note_id):
def _get_dict():
db_note = anki_db.Notes.get(id=note_id)
template_names = cls.model_template_names_by_id(db_note.mid)
for c in anki_db.Cards\
.select(anki_db.Cards.id, anki_db.Cards.ord, anki_db.Cards.nid)\
.where(anki_db.Cards.nid == note_id):
yield template_names[c.ord], c.id
return dict(_get_dict())
@classmethod
def card_set_next_review(cls, card_id, type_, queue, due):
"""
:param card_id:
:param type_:
-- 0=new, 1=learning, 2=due, 3=filtered
:param queue:
-- -3=sched buried, -2=user buried, -1=suspended,
-- 0=new, 1=learning, 2=due (as for type)
-- 3=in learning, next rev in at least a day after the previous review
:param due:
-- Due is used differently for different card types:
-- new: note id or random int
-- due: integer day, relative to the collection's creation time
-- learning: integer timestamp
:return:
"""
db_card = anki_db.Cards.get(id=card_id)
db_card.type = type_
db_card.queue = queue
db_card.due = due
db_card.save()
@classmethod
def card_set_stat(cls, card_id, reps, lapses, **revlog):
"""
:param card_id:
:param reps:
-- number of reviews
:param lapses:
-- the number of times the card went from a "was answered correctly"
-- to "was answered incorrectly" state
:param revlog:
usn integer not null,
-- update sequence number: for finding diffs when syncing.
-- See the description in the cards table for more info
ease integer not null,
-- which button you pushed to score your recall.
-- review: 1(wrong), 2(hard), 3(ok), 4(easy)
-- learn/relearn: 1(wrong), 2(ok), 3(easy)
ivl integer not null,
-- interval
lastIvl integer not null,
-- last interval
factor integer not null,
-- factor
time integer not null,
-- how many milliseconds your review took, up to 60000 (60s)
type integer not null
-- 0=learn, 1=review, 2=relearn, 3=cram
:return:
"""
with anki_db.database.atomic():
db_card = anki_db.Cards.get(id=card_id)
db_card.reps = reps
db_card.lapses = lapses
db_card.save()
anki_db.Revlog.create(
cid=db_card.id,
**revlog
)
@classmethod
def get_deck_config_by_deck_name(cls, deck_name):
deck_id = cls.deck_names_and_ids()[deck_name]
conf_id = anki_db.Col.get().decks[str(deck_id)]['conf']
db_dconf = anki_db.Col.get().dconf
return db_dconf[str(conf_id)]
@classmethod
def deck_config_names_and_ids(cls):
def _gen_dict():
for dconf_id, d in anki_db.Col.get().dconf.items():
yield d['name'], int(dconf_id)
return dict(_gen_dict())
@classmethod
def note_info(cls, note_id):
db_note = anki_db.Notes.get(id=note_id)
header, row = cls._raw_note_info(db_note)
return {
'noteId': db_note.id,
'modelId': db_note.mid,
'tags': db_note.tags,
'fields': dict(zip(header, row))
}
@classmethod
def _raw_note_info(cls, db_note):
db_model = cls.model_by_id(db_note.mid)
return db_model['flds'], db_note.flds
def _extract_ac_note(self, ac_note):
data = ac_note['fields']
model_id = ac_note.get('modelId', None)
if model_id is None:
self._warning()
model_name = ac_note['modelName']
model_id = self.model_names_and_ids()[model_name]
return data, model_id
def upsert_note(self, ac_note, defaults_key='defaults', _lock=True):
"""
:param ac_note: ac_note['data'] uses the same format as
http://docs.peewee-orm.com/en/latest/peewee/api.html?highlight=get_or_create#Model.get_or_create
:param defaults_key:
:param _lock:
:return:
"""
def _update_fields():
for note_id in matching_ids:
self.update_note_fields(note_id=note_id, fields=data)
self.add_tags(note_ids=matching_ids, tags=ac_note.get('tags', []))
data, model_id = self._extract_ac_note(ac_note)
tdb_table = self.get_tinydb_table()
original_data = data.copy()
data.update(data.pop(defaults_key))
matching_t_doc_ids = tdb_table.update(data,
self._build_tdb_query(original_data,
model_id=model_id,
_skip=defaults_key))
if matching_t_doc_ids:
matching_ids = [self.tdb.get(doc_id=doc_id)['_nid'] for doc_id in matching_t_doc_ids]
if _lock:
with anki_db.database.atomic():
_update_fields()
else:
_update_fields()
return matching_ids
else:
return [self._add_note(data, model_id, ac_note)]
def upsert_notes(self, ac_notes, defaults_key='defaults'):
note_ids_2d = []
with anki_db.database.atomic():
for ac_note in ac_notes:
note_ids_2d.append(self.upsert_note(ac_note, defaults_key=defaults_key, _lock=False))
return sum(note_ids_2d, [])
def search_notes(self, conditions):
return self.get_tinydb_table().search(self._build_tdb_query(conditions))
@staticmethod
def _build_tdb_query(data, model_id=None, _skip=None):
query = None
for k, v in data.items():
if k != _skip:
if query is None:
query = (tdb.Query()[k] == v)
else:
query &= (tdb.Query()[k] == v)
if model_id:
query &= (tdb.Query()['_mid'] == model_id)
return query
def _add_note(self, data, model_id, ac_note):
deck_id = ac_note.get('deckId', None)
if deck_id is None:
self._warning()
deck_name = ac_note['deckName']
deck_id = self.deck_names_and_ids().get(deck_name, None)
if deck_id is None:
deck_id = self.create_deck(deck_name, conf=ac_note.get('dconf', 1))
tags = ac_note.get('tags', [])
model_field_names = self.model_field_names_by_id(model_id)
first_note = NoteBuilder(model_id=model_id,
model_field_names=model_field_names,
data=data,
tags=tags)
db_note = anki_db.Notes.create(**first_note)
first_note.id = db_note.id
for i, _ in enumerate(self.model_template_names_by_id(model_id)):
first_card = CardBuilder(first_note, deck_id, template=i)
anki_db.Cards.create(**first_card)
tdb_table = self.get_tinydb_table()
tdb_table.insert({
'_nid': db_note.id,
'_mid': model_id,
'_tags': tags,
**data
})
return db_note.id
@classmethod
def update_note(cls, note_id, data, tags):
cls.update_note_fields(note_id, data)
cls.add_tags([note_id], tags)
################################
# Original AnkiConnect Methods #
################################
@classmethod
def deck_names(cls):
return [d['name'] for d in anki_db.Col.get().decks.values()]
@classmethod
def deck_names_and_ids(cls):
def _gen_dict():
for did, d in anki_db.Col.get().decks.items():
yield d['name'], int(did)
return dict(_gen_dict())
@classmethod
def get_decks(cls, card_ids):
def _gen_dict():
for did, d in anki_db.Col.get().decks.items():
db_cards = anki_db.Cards.select(anki_db.Cards.id, anki_db.Cards.did)\
.where((anki_db.Cards.did == int(did)) & (anki_db.Cards.id.in_(card_ids)))
if len(db_cards) > 0:
yield d['name'], [c.id for c in db_cards]
return dict(_gen_dict())
@classmethod
def create_deck(cls, deck_name, desc='', dconf=1, **kwargs):
db_col = anki_db.Col.get()
db_decks = db_col.decks
existing_decks = cls.deck_names()
deck_name_parts = deck_name.split('::')
sub_deck_parts = []
for i, part in enumerate(deck_name_parts):
sub_deck_parts.append(part)
sub_deck = '::'.join(sub_deck_parts)
if sub_deck not in existing_decks:
new_deck = DeckBuilder(name=sub_deck, desc=desc, dconf=dconf, id_=int(time() * 1000) + i, **kwargs)
db_decks[str(new_deck.id)] = new_deck
db_col.decks = db_decks
db_col.save()
return cls.deck_names_and_ids()[deck_name]
def change_deck(self, card_ids, deck_name, dconf=1):
self._warning()
deck_id = self.deck_names_and_ids().get(deck_name, None)
if deck_id is None:
deck_id = self.create_deck(deck_name, dconf=dconf)
self.change_deck_by_id(card_ids, deck_id)
def delete_decks(self, deck_names, cards_too=False):
self._warning()
deck_mapping = self.deck_names_and_ids()
deck_ids = [deck_mapping[deck_name] for deck_name in deck_names]
self.delete_decks_by_id(deck_ids, cards_too)
def get_deck_config(self, deck_name):
self._warning()
return self.get_deck_config_by_deck_name(deck_name)
@classmethod
def save_deck_config(cls, config: dict):
db_col = anki_db.Col.get()
db_dconf = db_col.dconf
dconf = DConfBuilder(config.pop('name'), **config)
db_dconf[str(dconf.id)] = dconf
db_col.dconf = db_dconf
db_col.save()
return dconf.id
@classmethod
def set_deck_config_id(cls, deck_names, config_id):
is_edited = False
db_col = anki_db.Col.get()
db_decks = db_col.decks
for k, v in cls.deck_names_and_ids().items():
if k in deck_names:
db_decks[str(v)]['conf'] = config_id
is_edited = True
if is_edited:
db_col.decks = db_decks
db_col.save()
return is_edited
@classmethod
def clone_deck_config_id(cls, dconf_name, clone_from: int):
db_col = anki_db.Col.get()
db_dconf = db_col.dconf
new_dconf = DConfBuilder(dconf_name)
new_dconf.update(db_dconf[str(clone_from)])
db_dconf[new_dconf.id] = new_dconf
db_col.dconf = db_dconf
db_col.save()
return new_dconf.id
@classmethod
def remove_deck_config_id(cls, config_id):
db_col = anki_db.Col.get()
db_dconf = db_col.dconf
db_dconf.pop(config_id)
db_col.dconf = db_dconf
db_col.save()
return True
@classmethod
def model_names(cls):
return [m['name'] for m in anki_db.Col.get().models.values()]
@classmethod
def model_names_and_ids(cls):
def _gen_dict():
for mid, m in anki_db.Col.get().models.items():
yield m['name'], int(mid)
return dict(_gen_dict())
@classmethod
def model_field_names(cls, model_name):
model_id = cls.model_names_and_ids()[model_name]
return cls.model_field_names_by_id(model_id)
@classmethod
def model_template_names(cls, model_name):
model_id = cls.model_names_and_ids()[model_name]
return cls.model_template_names_by_id(model_id)
# @classmethod
# def model_fields_on_templates(cls, model_name):
# raise NotImplementedError
def add_note(self, ac_note):
data, model_id = self._extract_ac_note(ac_note)
return self._add_note(data, model_id, ac_note)
def add_notes(self, ac_notes):
return [self.add_note(ac_note) for ac_note in ac_notes]
# @classmethod
# def can_add_notes(cls, ac_notes):
# raise NotImplementedError
@classmethod
def update_note_fields(cls, note_id, fields: dict):
db_note = anki_db.Notes.get(id=note_id)
field_names = cls.model_field_names_by_id(db_note.mid)
prev_note_fields = db_note.flds
note_fields = []
for i, name in enumerate(field_names):
note_field = fields.pop(name, None)
if note_field is not None:
note_fields.append(note_field)
else:
note_fields.append(prev_note_fields[i])
if fields:
model = cls.model_by_id(db_note.mid)
for k, v in fields.items():
model['flds'].append(
FieldBuilder(name=k, order=len(model['flds']))
)
note_fields.append(v)
col = anki_db.Col.get()
m = col.models
m[str(model['id'])] = model
col.models = m
col.save()
anki_db.Notes.update(
flds=note_fields
).where(anki_db.Notes.id == note_id).execute()
@classmethod
def add_tags(cls, note_ids, tags: Union[str, list]):
if isinstance(tags, str):
tags = [tags]
else:
tags = list(tags)
anki_db.Notes.update(
tags=sorted(set(list(anki_db.Notes.tags)) | set(tags))
).where(anki_db.Notes.id.in_(note_ids)).execute()
@classmethod
def remove_tags(cls, note_ids, tags: Union[str, list]):
if isinstance(tags, str):
tags = [tags]
anki_db.Notes.update(
tags=sorted(set(anki_db.Notes.tags) - set(tags))
).where(anki_db.Notes.id.in_(note_ids))
@classmethod
def get_tags(cls):
all_tags = set()
for db_note in anki_db.Notes.select(anki_db.Notes.tags):
all_tags.update(db_note.tags)
return sorted(all_tags)
# @classmethod
# def find_notes(cls, query: str):
# raise NotImplementedError
@classmethod
def notes_info(cls, note_ids):
return [cls.note_info(note_id) for note_id in note_ids]
@classmethod
def suspend(cls, card_ids):
if anki_db.Cards.update(queue=-1).where(anki_db.Cards.id.in_(card_ids)).execute() > 0:
return True
return False
@classmethod
def unsuspend(cls, card_ids):
if anki_db.Cards.update(queue=anki_db.Cards.type).where(anki_db.Cards.id.in_(card_ids)).execute() > 0:
return True
return False
@classmethod
def are_suspended(cls, card_ids):
def _gen_list():
for card_id in card_ids:
db_card = anki_db.Cards.get(id=card_id)
yield (db_card.queue == -1)
return list(_gen_list())
@classmethod
def are_due(cls, card_ids):
def _gen_list():
for card_id in card_ids:
db_card = anki_db.Cards.get(id=card_id)
yield (db_card.type == 2)
return list(_gen_list())
# @classmethod
# def get_intervals(cls, card_ids, complete=False):
# raise NotImplementedError
#
# @classmethod
# def find_cards(cls, query):
# raise NotImplementedError
@classmethod
def cards_to_notes(cls, card_ids):
note_ids = set()
for db_card in anki_db.Cards.select(anki_db.Cards.id, anki_db.Cards.nid).where(anki_db.Cards.id.in_(card_ids)):
note_ids.update(db_card.nid)
return sorted(note_ids)
@classmethod
def cards_info(cls, card_ids):
all_info = list()
for card_id in card_ids:
db_card = anki_db.Cards.get(id=card_id)
all_info += cls.notes_info([db_card.nid])
return all_info
| 32.046832
| 119
| 0.579687
|
4a038baa200ba16d78a803da8abcf7a6dbea5de3
| 6,114
|
py
|
Python
|
octavia/controller/worker/flows/listener_flows.py
|
Sirius21c/octavia
|
e7057a566689967f23ede519235d6075a01a6d61
|
[
"Apache-2.0"
] | null | null | null |
octavia/controller/worker/flows/listener_flows.py
|
Sirius21c/octavia
|
e7057a566689967f23ede519235d6075a01a6d61
|
[
"Apache-2.0"
] | null | null | null |
octavia/controller/worker/flows/listener_flows.py
|
Sirius21c/octavia
|
e7057a566689967f23ede519235d6075a01a6d61
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow
from octavia.common import constants
from octavia.controller.worker.tasks import amphora_driver_tasks
from octavia.controller.worker.tasks import database_tasks
from octavia.controller.worker.tasks import lifecycle_tasks
from octavia.controller.worker.tasks import model_tasks
from octavia.controller.worker.tasks import network_tasks
class ListenerFlows(object):
def get_create_listener_flow(self):
"""Create a flow to create a listener
:returns: The flow for creating a listener
"""
create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW)
create_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
create_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
create_listener_flow.add(network_tasks.UpdateVIP(
requires=constants.LOADBALANCER))
create_listener_flow.add(database_tasks.
MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENERS]))
return create_listener_flow
def get_create_all_listeners_flow(self):
"""Create a flow to create all listeners
:returns: The flow for creating all listeners
"""
create_all_listeners_flow = linear_flow.Flow(
constants.CREATE_LISTENERS_FLOW)
create_all_listeners_flow.add(
database_tasks.GetListenersFromLoadbalancer(
requires=constants.LOADBALANCER,
provides=constants.LISTENERS))
create_all_listeners_flow.add(database_tasks.ReloadLoadBalancer(
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
create_all_listeners_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
create_all_listeners_flow.add(network_tasks.UpdateVIP(
requires=constants.LOADBALANCER))
return create_all_listeners_flow
def get_delete_listener_flow(self):
"""Create a flow to delete a listener
:returns: The flow for deleting a listener
"""
delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW)
delete_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask(
requires=constants.LISTENER))
delete_listener_flow.add(amphora_driver_tasks.ListenerDelete(
requires=[constants.LOADBALANCER, constants.LISTENER]))
delete_listener_flow.add(network_tasks.UpdateVIPForDelete(
requires=constants.LOADBALANCER))
delete_listener_flow.add(database_tasks.DeleteListenerInDB(
requires=constants.LISTENER))
delete_listener_flow.add(database_tasks.DecrementListenerQuota(
requires=constants.LISTENER))
delete_listener_flow.add(database_tasks.MarkLBActiveInDB(
requires=constants.LOADBALANCER))
return delete_listener_flow
def get_delete_listener_internal_flow(self, listener_name):
"""Create a flow to delete a listener and l7policies internally
(will skip deletion on the amp and marking LB active)
:returns: The flow for deleting a listener
"""
delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW)
# Should cascade delete all L7 policies
delete_listener_flow.add(network_tasks.UpdateVIPForDelete(
name='delete_update_vip_' + listener_name,
requires=constants.LOADBALANCER))
delete_listener_flow.add(database_tasks.DeleteListenerInDB(
name='delete_listener_in_db_' + listener_name,
requires=constants.LISTENER,
rebind={constants.LISTENER: listener_name}))
delete_listener_flow.add(database_tasks.DecrementListenerQuota(
name='decrement_listener_quota_' + listener_name,
requires=constants.LISTENER,
rebind={constants.LISTENER: listener_name}))
return delete_listener_flow
def get_update_listener_flow(self):
"""Create a flow to update a listener
:returns: The flow for updating a listener
"""
update_listener_flow = linear_flow.Flow(constants.UPDATE_LISTENER_FLOW)
update_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
update_listener_flow.add(model_tasks.
UpdateAttributes(
rebind={constants.OBJECT:
constants.LISTENER},
requires=[constants.UPDATE_DICT]))
update_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
update_listener_flow.add(database_tasks.UpdateListenerInDB(
requires=[constants.LISTENER, constants.UPDATE_DICT]))
update_listener_flow.add(database_tasks.
MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENERS]))
return update_listener_flow
| 45.969925
| 79
| 0.689728
|
4a038c554c36b447c22f943e9f221be85527433d
| 8,752
|
py
|
Python
|
scripts/import_individual_variants.py
|
phenopolis/phenopolis_browser
|
b2e55c899c115963ce062ca0fbc42d4cab7c4dc7
|
[
"MIT"
] | 24
|
2021-05-20T06:56:38.000Z
|
2021-09-20T08:41:06.000Z
|
scripts/import_individual_variants.py
|
phenopolis/phenopolis_browser
|
b2e55c899c115963ce062ca0fbc42d4cab7c4dc7
|
[
"MIT"
] | 68
|
2021-05-18T20:35:29.000Z
|
2021-07-15T06:16:43.000Z
|
scripts/import_individual_variants.py
|
phenopolis/phenopolis_browser
|
b2e55c899c115963ce062ca0fbc42d4cab7c4dc7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Import an individual's VAR.tsv file"""
import os
import re
import sys
import atexit
import logging
import tempfile
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from urllib.parse import urlparse
import boto3
import psycopg2 # type: ignore
from psycopg2 import sql
from botocore.exceptions import ClientError
logger = logging.getLogger()
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)s %(message)s")
IMPORT_TABLE = sql.Identifier("iv_import")
class ScriptError(Exception):
"""Controlled exception raised by the script."""
def main():
opt = parse_cmdline()
logger.setLevel(opt.loglevel)
if opt.resource.startswith("s3://"):
download_from_aws(opt)
else:
opt.file = opt.resource
with psycopg2.connect(opt.dsn) as conn:
create_temp_table(opt, conn)
import_temp_table(opt, conn)
# upsert_individual(opt, conn) # if we will need it
import_variant(opt, conn)
import_variant_gene(opt, conn)
import_individual_variant(opt, conn)
def download_from_aws(opt):
"""
Download opt.resource from aws into a temp file.
After download store the file name in `opt.file`.
"""
check_aws_config(opt)
# s3://phenopolis-individuals/PH00009704/VAR.tsv
# ^ ^
# bucket filename
parts = urlparse(opt.resource)
path = parts.path.lstrip("/")
indid = path.split("/", 1)[0]
if not indid.startswith("PH"):
raise ScriptError(f"cannot see an individual id in the {opt.resource} url")
if not opt.individual:
opt.individual = indid
# Download the s3 file into the temporary file
s3 = boto3.resource("s3", endpoint_url="https://s3.eu-central-1.wasabisys.com")
bucket = s3.Bucket(parts.netloc)
with tempfile.NamedTemporaryFile(delete=False) as f:
opt.file = f.name
atexit.register(drop_temp_file, f.name)
logger.info("downloading %s into temp file %s", opt.resource, f.name)
try:
bucket.download_fileobj(path, f)
except ClientError as exc:
raise ScriptError(f"error downloading file: {exc}")
def drop_temp_file(filename):
if os.path.exists(filename):
logger.info("dropping temp file %s", filename)
os.remove(filename)
else:
logger.warn("file name %s not found", filename)
def check_aws_config(opt):
"""Bail out if there's something obviously broken in aws."""
for varname in ("AWS_SECRET_ACCESS_KEY", "AWS_ACCESS_KEY_ID"):
if not os.environ.get(varname):
raise ScriptError(f"env var {varname} not set: this is not gonna work")
def create_temp_table(opt, conn):
temp = sql.SQL("temp " if not opt.keep_temp else "")
logger.info("creating %stable %s", temp.as_string(conn), IMPORT_TABLE.as_string(conn))
titles = get_tsv_titles(opt)
parts = []
parts.append(sql.SQL("create {}table {} (").format(temp, IMPORT_TABLE))
types = {
"pos": "bigint",
"dann": "float4",
"cadd_phred": "float4",
"revel": "float4",
"fathmm_score": "text",
"canonical": "int",
"dp": "int",
"fs": "float4",
"mq": "float4",
"qd": "float4",
"het": "bool",
"hom": "bool",
"strand": "smallint",
}
for title in titles:
parts.append(sql.Identifier(title))
parts.append(sql.SQL(types.get(title, "text")))
parts.append(sql.SQL(","))
parts[-1] = sql.SQL(")")
cur = conn.cursor()
try:
cur.execute(sql.SQL(" ").join(parts))
except psycopg2.errors.DuplicateTable:
raise ScriptError(
f"table {IMPORT_TABLE.strings[0]} already exists: if you used '--keep-temp' you should remove it"
)
if opt.keep_temp:
conn.commit()
def import_temp_table(opt, conn):
logger.info("importing %s into %s", opt.file, IMPORT_TABLE.as_string(conn))
cur = conn.cursor()
with open(opt.file) as f:
stmt = sql.SQL("copy {} from stdin (format csv, header true, delimiter '\t')").format(IMPORT_TABLE)
cur.copy_expert(stmt, f)
cur.execute(sql.SQL("analyze {}").format(IMPORT_TABLE))
if opt.keep_temp:
conn.commit()
# def upsert_individual(opt, conn):
# indid = get_individual_id(opt)
# cur = conn.cursor()
# cur.execute(
# "select id from phenopolis.individual where phenopolis_id = %s", (indid,),
# )
# rec = cur.fetchone()
# if not rec:
# # TODO: insert new?
# raise ScriptError(f"individual not found: {indid}")
# return rec[0]
def import_variant(opt, conn):
cur = conn.cursor()
cur.execute(
sql.SQL(
"""
insert into phenopolis.variant (
chrom, pos, ref, alt, dbsnp, variant_class, dann, cadd_phred, revel, fathmm_score)
select
iv.chrom, iv.pos, iv.ref, iv.alt, iv.dbsnp, iv.variant_class,
iv.dann, iv.cadd_phred, iv.revel,
string_to_array(iv.fathmm_score, ',', '.')::float4[]
from {} iv
on conflict on constraint variant_key do nothing
"""
).format(IMPORT_TABLE)
)
logger.info("variant records imported: %s", cur.rowcount)
def import_individual_variant(opt, conn):
cur = conn.cursor()
indid = get_individual_id(opt)
cur.execute(
sql.SQL(
"""
insert into phenopolis.individual_variant (
individual_id, variant_id, chrom, pos, ref, alt,
dp, fs, mq, qd, filter, zygosity
)
select
%s, v.id, iv.chrom, iv.pos, iv.ref, iv.alt,
iv.dp, iv.fs, iv.mq, iv.qd, iv.filter,
case when iv.het then 'HET' when iv.hom then 'HOM' end
from {} iv
join phenopolis.variant v
on (v.chrom, v.pos, v.ref, v.alt) = (iv.chrom, iv.pos, iv.ref, iv.alt)
on conflict on constraint individual_variant_pkey do nothing
"""
).format(IMPORT_TABLE),
(indid,),
)
logger.info("individual/variant records imported: %s", cur.rowcount)
def import_variant_gene(opt, conn):
cur = conn.cursor()
cur.execute(
sql.SQL(
"""
insert into phenopolis.variant_gene (
variant_id, gene_id, transcript_id, strand, exon, most_severe_consequence,
impact, hgvs_c, hgvs_p, canonical)
select
v.id, iv.gene_id, iv.transcript_id, iv.strand, iv.exon, iv.most_severe_consequence,
lower(iv.impact), iv.hgvsc, iv.hgvsp, iv.canonical != 0
from {} iv
join phenopolis.variant v
on (v.chrom, v.pos, v.ref, v.alt) = (iv.chrom, iv.pos, iv.ref, iv.alt)
on conflict on constraint variant_gene_pkey do nothing
"""
).format(IMPORT_TABLE)
)
logger.info("variant/gene records imported: %s", cur.rowcount)
def get_tsv_titles(opt, __cache=[]):
if __cache:
return __cache[0]
with open(opt.file) as f:
line = f.readline()
titles = line.lower().split()
__cache.append(titles)
return titles
def get_individual_id(opt, __cache=[]):
if __cache:
return __cache[0]
if opt.individual:
rv = int(opt.individual.replace("PH", ""))
else:
m = re.search(r"PH(\d+)", opt.file)
if m:
rv = int(m.group(1))
if rv:
__cache.append(rv)
logger.info("importing data for individual %s", rv)
return rv
else:
raise ScriptError("no individual found in the resource or --individual")
def parse_cmdline():
parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("resource", metavar="RES", help="the resource to import (file, s3:// url)")
parser.add_argument("--dsn", default="", help="connection string to import into [default: %(default)r]")
parser.add_argument("--keep-temp", action="store_true", help="keep the temp table after import (for debugging)")
parser.add_argument("--individual", help="individual id to import (otherwise try from the filename)")
g = parser.add_mutually_exclusive_group()
g.add_argument(
"-q",
"--quiet",
help="talk less",
dest="loglevel",
action="store_const",
const=logging.WARN,
default=logging.INFO,
)
g.add_argument(
"-v",
"--verbose",
help="talk more",
dest="loglevel",
action="store_const",
const=logging.DEBUG,
default=logging.INFO,
)
opt = parser.parse_args()
return opt
if __name__ == "__main__":
try:
sys.exit(main())
except ScriptError as e:
logger.error("%s", e)
sys.exit(1)
except Exception:
logger.exception("unexpected error")
sys.exit(1)
except KeyboardInterrupt:
logger.info("user interrupt")
sys.exit(1)
| 28.051282
| 116
| 0.627514
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.