code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from pprint import pformat
from six import iteritems
import re
class V1beta1CustomResourceDefinitionSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'additional_printer_columns': 'list[V1beta1CustomResourceColumnDefinition]',
'group': 'str',
'names': 'V1beta1CustomResourceDefinitionNames',
'scope': 'str',
'subresources': 'V1beta1CustomResourceSubresources',
'validation': 'V1beta1CustomResourceValidation',
'version': 'str',
'versions': 'list[V1beta1CustomResourceDefinitionVersion]'
}
attribute_map = {
'additional_printer_columns': 'additionalPrinterColumns',
'group': 'group',
'names': 'names',
'scope': 'scope',
'subresources': 'subresources',
'validation': 'validation',
'version': 'version',
'versions': 'versions'
}
def __init__(self, additional_printer_columns=None, group=None, names=None, scope=None, subresources=None, validation=None, version=None, versions=None):
"""
V1beta1CustomResourceDefinitionSpec - a model defined in Swagger
"""
self._additional_printer_columns = None
self._group = None
self._names = None
self._scope = None
self._subresources = None
self._validation = None
self._version = None
self._versions = None
self.discriminator = None
if additional_printer_columns is not None:
self.additional_printer_columns = additional_printer_columns
self.group = group
self.names = names
self.scope = scope
if subresources is not None:
self.subresources = subresources
if validation is not None:
self.validation = validation
if version is not None:
self.version = version
if versions is not None:
self.versions = versions
@property
def additional_printer_columns(self):
"""
Gets the additional_printer_columns of this V1beta1CustomResourceDefinitionSpec.
AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column.
:return: The additional_printer_columns of this V1beta1CustomResourceDefinitionSpec.
:rtype: list[V1beta1CustomResourceColumnDefinition]
"""
return self._additional_printer_columns
@additional_printer_columns.setter
def additional_printer_columns(self, additional_printer_columns):
"""
Sets the additional_printer_columns of this V1beta1CustomResourceDefinitionSpec.
AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column.
:param additional_printer_columns: The additional_printer_columns of this V1beta1CustomResourceDefinitionSpec.
:type: list[V1beta1CustomResourceColumnDefinition]
"""
self._additional_printer_columns = additional_printer_columns
@property
def group(self):
"""
Gets the group of this V1beta1CustomResourceDefinitionSpec.
Group is the group this resource belongs in
:return: The group of this V1beta1CustomResourceDefinitionSpec.
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""
Sets the group of this V1beta1CustomResourceDefinitionSpec.
Group is the group this resource belongs in
:param group: The group of this V1beta1CustomResourceDefinitionSpec.
:type: str
"""
if group is None:
raise ValueError("Invalid value for `group`, must not be `None`")
self._group = group
@property
def names(self):
"""
Gets the names of this V1beta1CustomResourceDefinitionSpec.
Names are the names used to describe this custom resource
:return: The names of this V1beta1CustomResourceDefinitionSpec.
:rtype: V1beta1CustomResourceDefinitionNames
"""
return self._names
@names.setter
def names(self, names):
"""
Sets the names of this V1beta1CustomResourceDefinitionSpec.
Names are the names used to describe this custom resource
:param names: The names of this V1beta1CustomResourceDefinitionSpec.
:type: V1beta1CustomResourceDefinitionNames
"""
if names is None:
raise ValueError("Invalid value for `names`, must not be `None`")
self._names = names
@property
def scope(self):
"""
Gets the scope of this V1beta1CustomResourceDefinitionSpec.
Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced
:return: The scope of this V1beta1CustomResourceDefinitionSpec.
:rtype: str
"""
return self._scope
@scope.setter
def scope(self, scope):
"""
Sets the scope of this V1beta1CustomResourceDefinitionSpec.
Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced
:param scope: The scope of this V1beta1CustomResourceDefinitionSpec.
:type: str
"""
if scope is None:
raise ValueError("Invalid value for `scope`, must not be `None`")
self._scope = scope
@property
def subresources(self):
"""
Gets the subresources of this V1beta1CustomResourceDefinitionSpec.
Subresources describes the subresources for CustomResources
:return: The subresources of this V1beta1CustomResourceDefinitionSpec.
:rtype: V1beta1CustomResourceSubresources
"""
return self._subresources
@subresources.setter
def subresources(self, subresources):
"""
Sets the subresources of this V1beta1CustomResourceDefinitionSpec.
Subresources describes the subresources for CustomResources
:param subresources: The subresources of this V1beta1CustomResourceDefinitionSpec.
:type: V1beta1CustomResourceSubresources
"""
self._subresources = subresources
@property
def validation(self):
"""
Gets the validation of this V1beta1CustomResourceDefinitionSpec.
Validation describes the validation methods for CustomResources
:return: The validation of this V1beta1CustomResourceDefinitionSpec.
:rtype: V1beta1CustomResourceValidation
"""
return self._validation
@validation.setter
def validation(self, validation):
"""
Sets the validation of this V1beta1CustomResourceDefinitionSpec.
Validation describes the validation methods for CustomResources
:param validation: The validation of this V1beta1CustomResourceDefinitionSpec.
:type: V1beta1CustomResourceValidation
"""
self._validation = validation
@property
def version(self):
"""
Gets the version of this V1beta1CustomResourceDefinitionSpec.
Version is the version this resource belongs in Should be always first item in Versions field if provided. Optional, but at least one of Version or Versions must be set. Deprecated: Please use `Versions`.
:return: The version of this V1beta1CustomResourceDefinitionSpec.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this V1beta1CustomResourceDefinitionSpec.
Version is the version this resource belongs in Should be always first item in Versions field if provided. Optional, but at least one of Version or Versions must be set. Deprecated: Please use `Versions`.
:param version: The version of this V1beta1CustomResourceDefinitionSpec.
:type: str
"""
self._version = version
@property
def versions(self):
"""
Gets the versions of this V1beta1CustomResourceDefinitionSpec.
Versions is the list of all supported versions for this resource. If Version field is provided, this field is optional. Validation: All versions must use the same validation schema for now. i.e., top level Validation field is applied to all of these versions. Order: The version name will be used to compute the order. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
:return: The versions of this V1beta1CustomResourceDefinitionSpec.
:rtype: list[V1beta1CustomResourceDefinitionVersion]
"""
return self._versions
@versions.setter
def versions(self, versions):
"""
Sets the versions of this V1beta1CustomResourceDefinitionSpec.
Versions is the list of all supported versions for this resource. If Version field is provided, this field is optional. Validation: All versions must use the same validation schema for now. i.e., top level Validation field is applied to all of these versions. Order: The version name will be used to compute the order. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
:param versions: The versions of this V1beta1CustomResourceDefinitionSpec.
:type: list[V1beta1CustomResourceDefinitionVersion]
"""
self._versions = versions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1CustomResourceDefinitionSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | kubernetes/client/models/v1beta1_custom_resource_definition_spec.py | from pprint import pformat
from six import iteritems
import re
class V1beta1CustomResourceDefinitionSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'additional_printer_columns': 'list[V1beta1CustomResourceColumnDefinition]',
'group': 'str',
'names': 'V1beta1CustomResourceDefinitionNames',
'scope': 'str',
'subresources': 'V1beta1CustomResourceSubresources',
'validation': 'V1beta1CustomResourceValidation',
'version': 'str',
'versions': 'list[V1beta1CustomResourceDefinitionVersion]'
}
attribute_map = {
'additional_printer_columns': 'additionalPrinterColumns',
'group': 'group',
'names': 'names',
'scope': 'scope',
'subresources': 'subresources',
'validation': 'validation',
'version': 'version',
'versions': 'versions'
}
def __init__(self, additional_printer_columns=None, group=None, names=None, scope=None, subresources=None, validation=None, version=None, versions=None):
"""
V1beta1CustomResourceDefinitionSpec - a model defined in Swagger
"""
self._additional_printer_columns = None
self._group = None
self._names = None
self._scope = None
self._subresources = None
self._validation = None
self._version = None
self._versions = None
self.discriminator = None
if additional_printer_columns is not None:
self.additional_printer_columns = additional_printer_columns
self.group = group
self.names = names
self.scope = scope
if subresources is not None:
self.subresources = subresources
if validation is not None:
self.validation = validation
if version is not None:
self.version = version
if versions is not None:
self.versions = versions
@property
def additional_printer_columns(self):
"""
Gets the additional_printer_columns of this V1beta1CustomResourceDefinitionSpec.
AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column.
:return: The additional_printer_columns of this V1beta1CustomResourceDefinitionSpec.
:rtype: list[V1beta1CustomResourceColumnDefinition]
"""
return self._additional_printer_columns
@additional_printer_columns.setter
def additional_printer_columns(self, additional_printer_columns):
"""
Sets the additional_printer_columns of this V1beta1CustomResourceDefinitionSpec.
AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column.
:param additional_printer_columns: The additional_printer_columns of this V1beta1CustomResourceDefinitionSpec.
:type: list[V1beta1CustomResourceColumnDefinition]
"""
self._additional_printer_columns = additional_printer_columns
@property
def group(self):
"""
Gets the group of this V1beta1CustomResourceDefinitionSpec.
Group is the group this resource belongs in
:return: The group of this V1beta1CustomResourceDefinitionSpec.
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""
Sets the group of this V1beta1CustomResourceDefinitionSpec.
Group is the group this resource belongs in
:param group: The group of this V1beta1CustomResourceDefinitionSpec.
:type: str
"""
if group is None:
raise ValueError("Invalid value for `group`, must not be `None`")
self._group = group
@property
def names(self):
"""
Gets the names of this V1beta1CustomResourceDefinitionSpec.
Names are the names used to describe this custom resource
:return: The names of this V1beta1CustomResourceDefinitionSpec.
:rtype: V1beta1CustomResourceDefinitionNames
"""
return self._names
@names.setter
def names(self, names):
"""
Sets the names of this V1beta1CustomResourceDefinitionSpec.
Names are the names used to describe this custom resource
:param names: The names of this V1beta1CustomResourceDefinitionSpec.
:type: V1beta1CustomResourceDefinitionNames
"""
if names is None:
raise ValueError("Invalid value for `names`, must not be `None`")
self._names = names
@property
def scope(self):
"""
Gets the scope of this V1beta1CustomResourceDefinitionSpec.
Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced
:return: The scope of this V1beta1CustomResourceDefinitionSpec.
:rtype: str
"""
return self._scope
@scope.setter
def scope(self, scope):
"""
Sets the scope of this V1beta1CustomResourceDefinitionSpec.
Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced
:param scope: The scope of this V1beta1CustomResourceDefinitionSpec.
:type: str
"""
if scope is None:
raise ValueError("Invalid value for `scope`, must not be `None`")
self._scope = scope
@property
def subresources(self):
"""
Gets the subresources of this V1beta1CustomResourceDefinitionSpec.
Subresources describes the subresources for CustomResources
:return: The subresources of this V1beta1CustomResourceDefinitionSpec.
:rtype: V1beta1CustomResourceSubresources
"""
return self._subresources
@subresources.setter
def subresources(self, subresources):
"""
Sets the subresources of this V1beta1CustomResourceDefinitionSpec.
Subresources describes the subresources for CustomResources
:param subresources: The subresources of this V1beta1CustomResourceDefinitionSpec.
:type: V1beta1CustomResourceSubresources
"""
self._subresources = subresources
@property
def validation(self):
"""
Gets the validation of this V1beta1CustomResourceDefinitionSpec.
Validation describes the validation methods for CustomResources
:return: The validation of this V1beta1CustomResourceDefinitionSpec.
:rtype: V1beta1CustomResourceValidation
"""
return self._validation
@validation.setter
def validation(self, validation):
"""
Sets the validation of this V1beta1CustomResourceDefinitionSpec.
Validation describes the validation methods for CustomResources
:param validation: The validation of this V1beta1CustomResourceDefinitionSpec.
:type: V1beta1CustomResourceValidation
"""
self._validation = validation
@property
def version(self):
"""
Gets the version of this V1beta1CustomResourceDefinitionSpec.
Version is the version this resource belongs in Should be always first item in Versions field if provided. Optional, but at least one of Version or Versions must be set. Deprecated: Please use `Versions`.
:return: The version of this V1beta1CustomResourceDefinitionSpec.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this V1beta1CustomResourceDefinitionSpec.
Version is the version this resource belongs in Should be always first item in Versions field if provided. Optional, but at least one of Version or Versions must be set. Deprecated: Please use `Versions`.
:param version: The version of this V1beta1CustomResourceDefinitionSpec.
:type: str
"""
self._version = version
@property
def versions(self):
"""
Gets the versions of this V1beta1CustomResourceDefinitionSpec.
Versions is the list of all supported versions for this resource. If Version field is provided, this field is optional. Validation: All versions must use the same validation schema for now. i.e., top level Validation field is applied to all of these versions. Order: The version name will be used to compute the order. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
:return: The versions of this V1beta1CustomResourceDefinitionSpec.
:rtype: list[V1beta1CustomResourceDefinitionVersion]
"""
return self._versions
@versions.setter
def versions(self, versions):
"""
Sets the versions of this V1beta1CustomResourceDefinitionSpec.
Versions is the list of all supported versions for this resource. If Version field is provided, this field is optional. Validation: All versions must use the same validation schema for now. i.e., top level Validation field is applied to all of these versions. Order: The version name will be used to compute the order. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
:param versions: The versions of this V1beta1CustomResourceDefinitionSpec.
:type: list[V1beta1CustomResourceDefinitionVersion]
"""
self._versions = versions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1CustomResourceDefinitionSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | 0.756897 | 0.132066 |
import os
import time
import hashlib
import codecs
from unittest import TestCase
from cryptodetector import Options, CryptoDetector, MethodFactory
class TestCryptoDetector(TestCase):
"""Unit Tests
"""
KNOWN_TEST_SHA1 = "370aef2687f5d68f3696b0190d459600a22dccf7"
def method(self, method_id):
for mc in MethodFactory.method_classes:
if mc.method_id == method_id:
return mc
def scan_package(self, test_packages, extra_options={}, keyword_ignore_case=True):
options = Options()._get_options()
for option in extra_options:
options[option] = extra_options[option]
current_directory = os.path.dirname(os.path.abspath(__file__))
options["packages"] = []
for package in test_packages:
package_full_path = os.path.join(current_directory, package)
options["packages"].append(package_full_path)
self.method("keyword").options["kwlist_path"] = os.path.join(current_directory, \
"test_keyword_list.conf")
self.method("keyword").options["ignore_case"] = keyword_ignore_case
self.method("api").options["kwlist_path"] = os.path.join(current_directory, \
"test_api_list.conf")
return CryptoDetector(options, skip_output=True).scan()
def sha1(self, file_full_path):
with open(file_full_path) as f:
checksum_calculator = hashlib.sha1()
checksum_calculator.update(codecs.encode(f.read(), "utf-8"))
return checksum_calculator.hexdigest()
def count_matches(self, data, package, file, evidence_type, package_name=None, known_sha1=None):
current_directory = os.path.dirname(os.path.abspath(__file__))
file_full_path = os.path.join(current_directory, package)
file_full_path = os.path.abspath(os.path.join(file_full_path, file))
if known_sha1 is None:
file_sha1 = self.sha1(file_full_path)
else:
file_sha1 = known_sha1
if package_name is None:
package_name = package
self.assertTrue(file_sha1 in data[package_name]["crypto_evidence"])
count = 0
for match in data[package_name]["crypto_evidence"][file_sha1]["hits"]:
if match["evidence_type"] == evidence_type:
count += 1
return count
def assert_result_not_empty(self, result, package):
self.assertTrue(package in result)
self.assertTrue("crypto_evidence" in result[package])
self.assertTrue(result[package]["crypto_evidence"] != {})
def archive_test(self, archive_type):
result = self.scan_package(["extract_test/test." + archive_type], \
{"methods": ["keyword"]})
self.assert_result_not_empty(result, "test." + archive_type)
self.assertEqual(self.count_matches(result, "extract_test/test." + archive_type \
, "test", "keyword_boundary_all", "test." + archive_type, \
known_sha1=self.KNOWN_TEST_SHA1), 40)
def get_testpkg3_matches(self):
result = self.scan_package(["testpkg3"], {"methods": ["keyword"]})
self.assert_result_not_empty(result, "testpkg3")
current_directory = os.path.dirname(os.path.abspath(__file__))
file_full_path = os.path.join(current_directory, "testpkg3")
file_full_path = os.path.join(file_full_path, "test")
file_sha1 = self.sha1(file_full_path)
self.assertTrue(file_sha1 in result["testpkg3"]["crypto_evidence"])
matches = result["testpkg3"]["crypto_evidence"][file_sha1]["hits"]
self.assertEqual(len(matches), 3)
return matches
def test_match_boundary(self):
result = self.scan_package(["testpkg1"], {"methods": ["keyword"]})
self.assert_result_not_empty(result, "testpkg1")
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_any"), \
60)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_begin"), \
48)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_end"), \
48)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_all"), \
40)
def test_keyword_ignore_case(self):
result = self.scan_package(["testpkg1"], {"methods": ["keyword"]}, keyword_ignore_case=False)
self.assert_result_not_empty(result, "testpkg1")
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_any"), \
45)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_begin"), \
36)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_end"), \
36)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_all"), \
30)
def test_multiple_packages(self):
result = self.scan_package(["testpkg1", "testpkg3"], {"methods": ["keyword"]})
self.assert_result_not_empty(result, "testpkg1")
self.assert_result_not_empty(result, "testpkg3")
def test_extract_zip(self):
self.archive_test("zip")
def test_extract_tar_bz2(self):
self.archive_test("tar.bz2")
def test_extract_tar_xz(self):
self.archive_test("tar.xz")
def test_extract_tar_gz(self):
self.archive_test("tar.gz")
def test_extract_tar_lzma(self):
self.archive_test("tar.lzma")
def test_extract_rpm(self):
self.archive_test("rpm")
def test_extract_jar(self):
self.archive_test("jar")
def test_extract_tar(self):
self.archive_test("tar")
def test_extract_war(self):
self.archive_test("war")
def test_extract_gz(self):
self.archive_test("gz")
def test_extract_bz2(self):
self.archive_test("bz2")
def test_extract_xz(self):
self.archive_test("xz")
def test_extract_lzma(self):
self.archive_test("lzma")
def test_extract_recursive_archives(self):
result = self.scan_package(["extract_test/recursive.zip"], \
{"methods": ["keyword"]})
self.assert_result_not_empty(result, "recursive.zip")
self.assertEqual(self.count_matches(result, "extract_test/recursive.zip/test.gz", \
"test", "keyword_boundary_all", "recursive.zip", \
known_sha1=self.KNOWN_TEST_SHA1), 120)
def test_ignore_evidence_types(self):
result = self.scan_package(["testpkg1"], {"methods": ["keyword"], \
"ignore_evidence_types": ["keyword_any", "keyword_boundary_end"]})
self.assert_result_not_empty(result, "testpkg1")
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_any"), \
0)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_begin"), \
48)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_end"), \
0)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_all"), \
40)
def test_stop_after(self):
result = self.scan_package(["testpkg1"], {"methods": ["keyword"], "stop_after": 2})
self.assert_result_not_empty(result, "testpkg1")
self.assertEqual(len(result["testpkg1"]["crypto_evidence"]), 2)
result = self.scan_package(["testpkg1"], {"methods": ["keyword"], "stop_after": 1})
self.assert_result_not_empty(result, "testpkg1")
self.assertEqual(len(result["testpkg1"]["crypto_evidence"]), 1)
def test_source_files_only(self):
result = self.scan_package(["testpkg1"], {"methods": ["keyword"], "source_files_only": True})
self.assert_result_not_empty(result, "testpkg1")
self.assertEqual(len(result["testpkg1"]["crypto_evidence"]), 1)
current_directory = os.path.dirname(os.path.abspath(__file__))
file_full_path = os.path.join(current_directory, "testpkg1")
file_full_path = os.path.abspath(os.path.join(file_full_path, "file.cpp"))
file_sha1 = self.sha1(file_full_path)
self.assertTrue(file_sha1 in result["testpkg1"]["crypto_evidence"])
def test_no_matches(self):
result = self.scan_package(["testpkg2"], {"methods": ["keyword"]})
self.assertTrue("testpkg2" in result)
self.assertTrue("crypto_evidence" in result["testpkg2"])
self.assertTrue(result["testpkg2"]["crypto_evidence"] == {})
def test_line_text(self):
for match in self.get_testpkg3_matches():
if match["evidence_type"] == "keyword_any":
self.assertEqual(match["line_text"], "testtestloremtesttest")
elif match["evidence_type"] == "keyword_boundary_begin":
self.assertEqual(match["line_text"], "testtest IPSUM test")
elif match["evidence_type"] == "keyword_boundary_end":
self.assertEqual(match["line_text"], "test dolor")
def test_file_index(self):
for match in self.get_testpkg3_matches():
if match["evidence_type"] == "keyword_any":
self.assertEqual(match["file_index_begin"], 8)
self.assertEqual(match["file_index_end"], 13)
elif match["evidence_type"] == "keyword_boundary_begin":
self.assertEqual(match["file_index_begin"], 41)
self.assertEqual(match["file_index_end"], 46)
elif match["evidence_type"] == "keyword_boundary_end":
self.assertEqual(match["file_index_begin"], 67)
self.assertEqual(match["file_index_end"], 72)
def test_line_index(self):
for match in self.get_testpkg3_matches():
if match["evidence_type"] == "keyword_any":
self.assertEqual(match["line_index_begin"], 8)
self.assertEqual(match["line_index_end"], 13)
elif match["evidence_type"] == "keyword_boundary_begin":
self.assertEqual(match["line_index_begin"], 9)
self.assertEqual(match["line_index_end"], 14)
elif match["evidence_type"] == "keyword_boundary_end":
self.assertEqual(match["line_index_begin"], 5)
self.assertEqual(match["line_index_end"], 10)
def test_line_number(self):
for match in self.get_testpkg3_matches():
if match["evidence_type"] == "keyword_any":
self.assertEqual(match["line_number"], 1)
elif match["evidence_type"] == "keyword_boundary_begin":
self.assertEqual(match["line_number"], 4)
elif match["evidence_type"] == "keyword_boundary_end":
self.assertEqual(match["line_number"], 7)
def test_matched_text(self):
for match in self.get_testpkg3_matches():
if match["evidence_type"] == "keyword_any":
self.assertEqual(match["matched_text"], "lorem")
elif match["evidence_type"] == "keyword_boundary_begin":
self.assertEqual(match["matched_text"], "IPSUM")
elif match["evidence_type"] == "keyword_boundary_end":
self.assertEqual(match["matched_text"], "dolor") | tests/test_all.py | import os
import time
import hashlib
import codecs
from unittest import TestCase
from cryptodetector import Options, CryptoDetector, MethodFactory
class TestCryptoDetector(TestCase):
"""Unit Tests
"""
KNOWN_TEST_SHA1 = "370aef2687f5d68f3696b0190d459600a22dccf7"
def method(self, method_id):
for mc in MethodFactory.method_classes:
if mc.method_id == method_id:
return mc
def scan_package(self, test_packages, extra_options={}, keyword_ignore_case=True):
options = Options()._get_options()
for option in extra_options:
options[option] = extra_options[option]
current_directory = os.path.dirname(os.path.abspath(__file__))
options["packages"] = []
for package in test_packages:
package_full_path = os.path.join(current_directory, package)
options["packages"].append(package_full_path)
self.method("keyword").options["kwlist_path"] = os.path.join(current_directory, \
"test_keyword_list.conf")
self.method("keyword").options["ignore_case"] = keyword_ignore_case
self.method("api").options["kwlist_path"] = os.path.join(current_directory, \
"test_api_list.conf")
return CryptoDetector(options, skip_output=True).scan()
def sha1(self, file_full_path):
with open(file_full_path) as f:
checksum_calculator = hashlib.sha1()
checksum_calculator.update(codecs.encode(f.read(), "utf-8"))
return checksum_calculator.hexdigest()
def count_matches(self, data, package, file, evidence_type, package_name=None, known_sha1=None):
current_directory = os.path.dirname(os.path.abspath(__file__))
file_full_path = os.path.join(current_directory, package)
file_full_path = os.path.abspath(os.path.join(file_full_path, file))
if known_sha1 is None:
file_sha1 = self.sha1(file_full_path)
else:
file_sha1 = known_sha1
if package_name is None:
package_name = package
self.assertTrue(file_sha1 in data[package_name]["crypto_evidence"])
count = 0
for match in data[package_name]["crypto_evidence"][file_sha1]["hits"]:
if match["evidence_type"] == evidence_type:
count += 1
return count
def assert_result_not_empty(self, result, package):
self.assertTrue(package in result)
self.assertTrue("crypto_evidence" in result[package])
self.assertTrue(result[package]["crypto_evidence"] != {})
def archive_test(self, archive_type):
result = self.scan_package(["extract_test/test." + archive_type], \
{"methods": ["keyword"]})
self.assert_result_not_empty(result, "test." + archive_type)
self.assertEqual(self.count_matches(result, "extract_test/test." + archive_type \
, "test", "keyword_boundary_all", "test." + archive_type, \
known_sha1=self.KNOWN_TEST_SHA1), 40)
def get_testpkg3_matches(self):
result = self.scan_package(["testpkg3"], {"methods": ["keyword"]})
self.assert_result_not_empty(result, "testpkg3")
current_directory = os.path.dirname(os.path.abspath(__file__))
file_full_path = os.path.join(current_directory, "testpkg3")
file_full_path = os.path.join(file_full_path, "test")
file_sha1 = self.sha1(file_full_path)
self.assertTrue(file_sha1 in result["testpkg3"]["crypto_evidence"])
matches = result["testpkg3"]["crypto_evidence"][file_sha1]["hits"]
self.assertEqual(len(matches), 3)
return matches
def test_match_boundary(self):
result = self.scan_package(["testpkg1"], {"methods": ["keyword"]})
self.assert_result_not_empty(result, "testpkg1")
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_any"), \
60)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_begin"), \
48)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_end"), \
48)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_all"), \
40)
def test_keyword_ignore_case(self):
result = self.scan_package(["testpkg1"], {"methods": ["keyword"]}, keyword_ignore_case=False)
self.assert_result_not_empty(result, "testpkg1")
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_any"), \
45)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_begin"), \
36)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_end"), \
36)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_all"), \
30)
def test_multiple_packages(self):
result = self.scan_package(["testpkg1", "testpkg3"], {"methods": ["keyword"]})
self.assert_result_not_empty(result, "testpkg1")
self.assert_result_not_empty(result, "testpkg3")
def test_extract_zip(self):
self.archive_test("zip")
def test_extract_tar_bz2(self):
self.archive_test("tar.bz2")
def test_extract_tar_xz(self):
self.archive_test("tar.xz")
def test_extract_tar_gz(self):
self.archive_test("tar.gz")
def test_extract_tar_lzma(self):
self.archive_test("tar.lzma")
def test_extract_rpm(self):
self.archive_test("rpm")
def test_extract_jar(self):
self.archive_test("jar")
def test_extract_tar(self):
self.archive_test("tar")
def test_extract_war(self):
self.archive_test("war")
def test_extract_gz(self):
self.archive_test("gz")
def test_extract_bz2(self):
self.archive_test("bz2")
def test_extract_xz(self):
self.archive_test("xz")
def test_extract_lzma(self):
self.archive_test("lzma")
def test_extract_recursive_archives(self):
result = self.scan_package(["extract_test/recursive.zip"], \
{"methods": ["keyword"]})
self.assert_result_not_empty(result, "recursive.zip")
self.assertEqual(self.count_matches(result, "extract_test/recursive.zip/test.gz", \
"test", "keyword_boundary_all", "recursive.zip", \
known_sha1=self.KNOWN_TEST_SHA1), 120)
def test_ignore_evidence_types(self):
result = self.scan_package(["testpkg1"], {"methods": ["keyword"], \
"ignore_evidence_types": ["keyword_any", "keyword_boundary_end"]})
self.assert_result_not_empty(result, "testpkg1")
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_any"), \
0)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_begin"), \
48)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_end"), \
0)
self.assertEqual(self.count_matches(result, "testpkg1", "file1", "keyword_boundary_all"), \
40)
def test_stop_after(self):
result = self.scan_package(["testpkg1"], {"methods": ["keyword"], "stop_after": 2})
self.assert_result_not_empty(result, "testpkg1")
self.assertEqual(len(result["testpkg1"]["crypto_evidence"]), 2)
result = self.scan_package(["testpkg1"], {"methods": ["keyword"], "stop_after": 1})
self.assert_result_not_empty(result, "testpkg1")
self.assertEqual(len(result["testpkg1"]["crypto_evidence"]), 1)
def test_source_files_only(self):
result = self.scan_package(["testpkg1"], {"methods": ["keyword"], "source_files_only": True})
self.assert_result_not_empty(result, "testpkg1")
self.assertEqual(len(result["testpkg1"]["crypto_evidence"]), 1)
current_directory = os.path.dirname(os.path.abspath(__file__))
file_full_path = os.path.join(current_directory, "testpkg1")
file_full_path = os.path.abspath(os.path.join(file_full_path, "file.cpp"))
file_sha1 = self.sha1(file_full_path)
self.assertTrue(file_sha1 in result["testpkg1"]["crypto_evidence"])
def test_no_matches(self):
result = self.scan_package(["testpkg2"], {"methods": ["keyword"]})
self.assertTrue("testpkg2" in result)
self.assertTrue("crypto_evidence" in result["testpkg2"])
self.assertTrue(result["testpkg2"]["crypto_evidence"] == {})
def test_line_text(self):
for match in self.get_testpkg3_matches():
if match["evidence_type"] == "keyword_any":
self.assertEqual(match["line_text"], "testtestloremtesttest")
elif match["evidence_type"] == "keyword_boundary_begin":
self.assertEqual(match["line_text"], "testtest IPSUM test")
elif match["evidence_type"] == "keyword_boundary_end":
self.assertEqual(match["line_text"], "test dolor")
def test_file_index(self):
for match in self.get_testpkg3_matches():
if match["evidence_type"] == "keyword_any":
self.assertEqual(match["file_index_begin"], 8)
self.assertEqual(match["file_index_end"], 13)
elif match["evidence_type"] == "keyword_boundary_begin":
self.assertEqual(match["file_index_begin"], 41)
self.assertEqual(match["file_index_end"], 46)
elif match["evidence_type"] == "keyword_boundary_end":
self.assertEqual(match["file_index_begin"], 67)
self.assertEqual(match["file_index_end"], 72)
def test_line_index(self):
for match in self.get_testpkg3_matches():
if match["evidence_type"] == "keyword_any":
self.assertEqual(match["line_index_begin"], 8)
self.assertEqual(match["line_index_end"], 13)
elif match["evidence_type"] == "keyword_boundary_begin":
self.assertEqual(match["line_index_begin"], 9)
self.assertEqual(match["line_index_end"], 14)
elif match["evidence_type"] == "keyword_boundary_end":
self.assertEqual(match["line_index_begin"], 5)
self.assertEqual(match["line_index_end"], 10)
def test_line_number(self):
for match in self.get_testpkg3_matches():
if match["evidence_type"] == "keyword_any":
self.assertEqual(match["line_number"], 1)
elif match["evidence_type"] == "keyword_boundary_begin":
self.assertEqual(match["line_number"], 4)
elif match["evidence_type"] == "keyword_boundary_end":
self.assertEqual(match["line_number"], 7)
def test_matched_text(self):
for match in self.get_testpkg3_matches():
if match["evidence_type"] == "keyword_any":
self.assertEqual(match["matched_text"], "lorem")
elif match["evidence_type"] == "keyword_boundary_begin":
self.assertEqual(match["matched_text"], "IPSUM")
elif match["evidence_type"] == "keyword_boundary_end":
self.assertEqual(match["matched_text"], "dolor") | 0.412175 | 0.340746 |
import errno
import json
import logging
import os
import sys
from pathlib import Path
from typing import Optional
LOGGER = logging.getLogger("napari.monitor")
# If False monitor is disabled even if we meet all other requirements.
ENABLE_MONITOR = True
def _load_config(path: str) -> dict:
"""Load the JSON formatted config file.
Parameters
----------
path : str
The path of the JSON file we should load.
Return
------
dict
The parsed data from the JSON file.
"""
path = Path(path).expanduser()
if not path.exists():
raise FileNotFoundError(
errno.ENOENT, f"Monitor: Config file not found: {path}"
)
with path.open() as infile:
return json.load(infile)
def _load_monitor_config() -> Optional[dict]:
"""Return the MonitorService config file data, or None.
Return
------
Optional[dict]
The parsed config file data or None if no config.
"""
# We shouldn't even call into this file unless NAPARI_MON is defined
# but check to be sure.
value = os.getenv("NAPARI_MON")
if value in [None, "0"]:
return None
return _load_config(value)
def _setup_logging(config: dict) -> None:
"""Log "napari.monitor" messages to the configured file.
Parameters
----------
config : dict
Monitor configuration
"""
try:
log_path = config['log_path']
except KeyError:
return # No log file.
# Nuke/reset log for now.
# Path(log_path).unlink()
fh = logging.FileHandler(log_path)
LOGGER.addHandler(fh)
LOGGER.setLevel(logging.DEBUG)
LOGGER.info("Writing to log path %s", log_path)
def _get_monitor_config() -> Optional[dict]:
"""Create and return the configuration for the MonitorService.
The routine might return None for one serveral reasons:
1) We're not running under Python 3.9 or now.
2) The monitor is explicitly disable, ENABLED_MONITOR is False.
3) The NAPARI_MON environment variable is not defined.
4) The NAPARI_MON config file cannot be found and parsed.
Return
------
Optional[dict]
The configuration for the MonitorService.
"""
if sys.version_info[:2] < (3, 9):
# We require Python 3.9 for now. The shared memory features we need
# were added in 3.8, but the 3.8 implemention was buggy. It's
# possible we could backport to or otherwise fix 3.8 or even 3.7,
# but for now we're making 3.9 a requirement.
print("Monitor: not starting, requires Python 3.9 or newer")
return None
if not ENABLE_MONITOR:
print("Monitor: not starting, disabled")
return None
# The NAPARI_MON environment variable points to our config file.
config = _load_monitor_config()
if config is None:
print("Monitor: not starting, no usable config file")
return None
return config
class Monitor:
"""Wraps the monitor service.
We can't start the monitor service at import time. Under the hood the
multiprocessing complains about a "partially started process".
Instead someone must call our start() method explicitly once the
process has fully started.
"""
def __init__(self):
# Both are set when start() is called, and only if we have
# a parseable config file, have Python 3.9, etc.
self._service = None
self._api = None
self._running = False
def __nonzero__(self) -> bool:
"""Return True if the service is running.
So that callers can do:
if monitor:
monitor.add(...)
"""
return self._running
@property
def run_command_event(self):
"""The MonitorAPI fires this event for commands from clients."""
return self._api.events.run_command
def start(self) -> bool:
"""Start the monitor service, if it hasn't been started already.
Return
------
bool
True if we started the service or it was already started.
"""
if self._running:
return True # It was already started.
config = _get_monitor_config()
if config is None:
return False # Can't start without config.
_setup_logging(config)
# Late imports so no multiprocessing modules are even
# imported unless we are going to start the service.
from ._api import MonitorApi
from ._service import MonitorService
# Create the API first. It will register our callbacks, then
# we start the manager that will serve those callbacks.
self._api = MonitorApi()
# Now we can start our service.
self._service = MonitorService(config, self._api.manager)
self._running = True
return True # We started the service.
def stop(self) -> None:
"""Stop the monitor service."""
if not self._running:
return
self._api.stop()
self._api = None
self._service.stop()
self._service = None
self._running = False
def on_poll(self, event=None) -> None:
"""The QtPoll object polls us.
Probably we could get rid of polling by creating a thread that
blocks waiting for client messages. Then it posts those messages as
Qt Events. So the GUI doesn't block, but gracefully handles
incoming messages as Qt events.
"""
if self._running:
self._api.poll()
# Handle the event to say "keep polling us".
event.handled = True
def add_data(self, data) -> None:
"""Add data to the monitor service.
Caller should use this pattern:
if monitor:
monitor.add(...)
So no time wasted assembling the dict unless the monitor is running.
"""
if self._running:
self._api.add_napari_data(data)
def send_message(self, message: dict) -> None:
"""Send a message to shared memory clients.
Parameters
----------
message : dict
Post this message to clients.
"""
if self._running:
self._api.send_napari_message(message)
monitor = Monitor() | napari/components/experimental/monitor/_monitor.py | import errno
import json
import logging
import os
import sys
from pathlib import Path
from typing import Optional
LOGGER = logging.getLogger("napari.monitor")
# If False monitor is disabled even if we meet all other requirements.
ENABLE_MONITOR = True
def _load_config(path: str) -> dict:
"""Load the JSON formatted config file.
Parameters
----------
path : str
The path of the JSON file we should load.
Return
------
dict
The parsed data from the JSON file.
"""
path = Path(path).expanduser()
if not path.exists():
raise FileNotFoundError(
errno.ENOENT, f"Monitor: Config file not found: {path}"
)
with path.open() as infile:
return json.load(infile)
def _load_monitor_config() -> Optional[dict]:
"""Return the MonitorService config file data, or None.
Return
------
Optional[dict]
The parsed config file data or None if no config.
"""
# We shouldn't even call into this file unless NAPARI_MON is defined
# but check to be sure.
value = os.getenv("NAPARI_MON")
if value in [None, "0"]:
return None
return _load_config(value)
def _setup_logging(config: dict) -> None:
"""Log "napari.monitor" messages to the configured file.
Parameters
----------
config : dict
Monitor configuration
"""
try:
log_path = config['log_path']
except KeyError:
return # No log file.
# Nuke/reset log for now.
# Path(log_path).unlink()
fh = logging.FileHandler(log_path)
LOGGER.addHandler(fh)
LOGGER.setLevel(logging.DEBUG)
LOGGER.info("Writing to log path %s", log_path)
def _get_monitor_config() -> Optional[dict]:
"""Create and return the configuration for the MonitorService.
The routine might return None for one serveral reasons:
1) We're not running under Python 3.9 or now.
2) The monitor is explicitly disable, ENABLED_MONITOR is False.
3) The NAPARI_MON environment variable is not defined.
4) The NAPARI_MON config file cannot be found and parsed.
Return
------
Optional[dict]
The configuration for the MonitorService.
"""
if sys.version_info[:2] < (3, 9):
# We require Python 3.9 for now. The shared memory features we need
# were added in 3.8, but the 3.8 implemention was buggy. It's
# possible we could backport to or otherwise fix 3.8 or even 3.7,
# but for now we're making 3.9 a requirement.
print("Monitor: not starting, requires Python 3.9 or newer")
return None
if not ENABLE_MONITOR:
print("Monitor: not starting, disabled")
return None
# The NAPARI_MON environment variable points to our config file.
config = _load_monitor_config()
if config is None:
print("Monitor: not starting, no usable config file")
return None
return config
class Monitor:
"""Wraps the monitor service.
We can't start the monitor service at import time. Under the hood the
multiprocessing complains about a "partially started process".
Instead someone must call our start() method explicitly once the
process has fully started.
"""
def __init__(self):
# Both are set when start() is called, and only if we have
# a parseable config file, have Python 3.9, etc.
self._service = None
self._api = None
self._running = False
def __nonzero__(self) -> bool:
"""Return True if the service is running.
So that callers can do:
if monitor:
monitor.add(...)
"""
return self._running
@property
def run_command_event(self):
"""The MonitorAPI fires this event for commands from clients."""
return self._api.events.run_command
def start(self) -> bool:
"""Start the monitor service, if it hasn't been started already.
Return
------
bool
True if we started the service or it was already started.
"""
if self._running:
return True # It was already started.
config = _get_monitor_config()
if config is None:
return False # Can't start without config.
_setup_logging(config)
# Late imports so no multiprocessing modules are even
# imported unless we are going to start the service.
from ._api import MonitorApi
from ._service import MonitorService
# Create the API first. It will register our callbacks, then
# we start the manager that will serve those callbacks.
self._api = MonitorApi()
# Now we can start our service.
self._service = MonitorService(config, self._api.manager)
self._running = True
return True # We started the service.
def stop(self) -> None:
"""Stop the monitor service."""
if not self._running:
return
self._api.stop()
self._api = None
self._service.stop()
self._service = None
self._running = False
def on_poll(self, event=None) -> None:
"""The QtPoll object polls us.
Probably we could get rid of polling by creating a thread that
blocks waiting for client messages. Then it posts those messages as
Qt Events. So the GUI doesn't block, but gracefully handles
incoming messages as Qt events.
"""
if self._running:
self._api.poll()
# Handle the event to say "keep polling us".
event.handled = True
def add_data(self, data) -> None:
"""Add data to the monitor service.
Caller should use this pattern:
if monitor:
monitor.add(...)
So no time wasted assembling the dict unless the monitor is running.
"""
if self._running:
self._api.add_napari_data(data)
def send_message(self, message: dict) -> None:
"""Send a message to shared memory clients.
Parameters
----------
message : dict
Post this message to clients.
"""
if self._running:
self._api.send_napari_message(message)
monitor = Monitor() | 0.757615 | 0.236032 |
from functools import cached_property
from lss.drums import MiDIDrums
from lss.utils import Color
class Pad:
"""
Represents one of 81 pads.
Pad is defined by (x, y) pair which strictly corresponds with note
assigned to the pad.
Note map:
91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 || 99
===========================================
81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 || 89
71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 || 79
61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 || 69
51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 || 59
41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 || 49
31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 || 39
21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 || 29
11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 || 19
"""
def __init__(self, x: int, y: int, launchpad, is_function_pad: bool = False):
assert 0 <= x < 9, f"x has to be between 0 and 9, got {x}"
assert 0 <= y < 9, f"y has to be between 0 and 9, got {y}"
self._launchpad = launchpad
self.x = x
self.y = y
self._is_on = False
self._muted = False
self.sound = MiDIDrums.get_sound(self.y)
self.is_function_pad = is_function_pad
def __repr__(self):
return f"Pad({self.x}, {self.y}, note={self.note})"
@staticmethod
def get_note(x: int, y: int) -> int:
return 10 * (y + 1) + x + 1
@cached_property
def note(self) -> int:
return self.get_note(self.x, self.y)
def on(self, color: int) -> None:
self._launchpad.on(self.note, color)
def off(self) -> None:
self._launchpad.off(self.note)
def _set_active_color(self) -> None:
if self._is_on:
if self._muted:
self.on(Color.GREEN_DIMMED)
else:
self.on(Color.GREEN)
else:
self._launchpad.off(self.note)
def blink(self) -> None:
if not self._is_on:
self.on(Color.PINK)
async def unblink(self) -> None:
self._set_active_color()
def mute(self) -> None:
self._muted = not self._muted
self._set_active_color()
def click(self) -> None:
if self.is_function_pad:
return
self._is_on = not self._is_on
self._set_active_color()
async def process_pad(self, callback) -> None:
self.blink()
if self._is_on and not self._muted:
callback(self) | lss/pad.py | from functools import cached_property
from lss.drums import MiDIDrums
from lss.utils import Color
class Pad:
"""
Represents one of 81 pads.
Pad is defined by (x, y) pair which strictly corresponds with note
assigned to the pad.
Note map:
91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 || 99
===========================================
81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 || 89
71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 || 79
61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 || 69
51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 || 59
41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 || 49
31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 || 39
21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 || 29
11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 || 19
"""
def __init__(self, x: int, y: int, launchpad, is_function_pad: bool = False):
assert 0 <= x < 9, f"x has to be between 0 and 9, got {x}"
assert 0 <= y < 9, f"y has to be between 0 and 9, got {y}"
self._launchpad = launchpad
self.x = x
self.y = y
self._is_on = False
self._muted = False
self.sound = MiDIDrums.get_sound(self.y)
self.is_function_pad = is_function_pad
def __repr__(self):
return f"Pad({self.x}, {self.y}, note={self.note})"
@staticmethod
def get_note(x: int, y: int) -> int:
return 10 * (y + 1) + x + 1
@cached_property
def note(self) -> int:
return self.get_note(self.x, self.y)
def on(self, color: int) -> None:
self._launchpad.on(self.note, color)
def off(self) -> None:
self._launchpad.off(self.note)
def _set_active_color(self) -> None:
if self._is_on:
if self._muted:
self.on(Color.GREEN_DIMMED)
else:
self.on(Color.GREEN)
else:
self._launchpad.off(self.note)
def blink(self) -> None:
if not self._is_on:
self.on(Color.PINK)
async def unblink(self) -> None:
self._set_active_color()
def mute(self) -> None:
self._muted = not self._muted
self._set_active_color()
def click(self) -> None:
if self.is_function_pad:
return
self._is_on = not self._is_on
self._set_active_color()
async def process_pad(self, callback) -> None:
self.blink()
if self._is_on and not self._muted:
callback(self) | 0.822332 | 0.357904 |
import re
birth_year = 'byr'
issue_year = 'iyr'
expiration_year = 'eyr'
height = 'hgt'
hair_color = 'hcl'
eye_color = 'ecl'
passport_id = 'pid'
country_id = 'cid'
def is_valid_passport_part1(passport: dict):
mandatory_passport_fields = [birth_year, issue_year, expiration_year, height, hair_color, eye_color, passport_id]
for field in mandatory_passport_fields:
if field not in passport:
return False
return True
def is_valid_passport_part2(passport: dict):
mandatory_passport_fields = [birth_year, issue_year, expiration_year, height, hair_color, eye_color, passport_id]
for field in mandatory_passport_fields:
if field not in passport:
return False
return validate_fields(passport)
def validate_fields(passport: dict):
if not validate_birth_year(passport[birth_year]):
return False
if not validate_issue_year(passport[issue_year]):
return False
if not validate_expiration_year(passport[expiration_year]):
return False
if not validate_height(passport[height]):
return False
if not validate_hair_color(passport[hair_color]):
return False
if not validate_eye_color(passport[eye_color]):
return False
if not validate_passport_id(passport[passport_id]):
return False
return True
def validate_birth_year(birth_year):
birth_year = int(birth_year)
if 1920 <= birth_year <= 2002:
return True
return False
def validate_issue_year(issue_year):
issue_year = int(issue_year)
if 2010 <= issue_year <= 2020:
return True
return False
def validate_expiration_year(expiration_year):
expiration_year = int(expiration_year)
if 2020 <= expiration_year <= 2030:
return True
return False
def validate_height(height: str):
if height[-2:] == 'cm':
nr = int(height[:height.index('cm')])
if 150 <= nr <= 193:
return True
if height[-2:] == 'in':
nr = int(height[:height.index('in')])
if 59 <= nr <= 76:
return True
return False
def validate_hair_color(hair_color: str):
if re.search('^#[0-9a-f]{6}$', hair_color) is not None:
return True
return False
def validate_eye_color(eye_color):
if eye_color in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']:
return True
return False
def validate_passport_id(passport_id: str):
if re.search('^[0-9]{9}$', passport_id) is not None:
return True
return False
def main():
with open('in.txt') as file:
data = file.read()
passports_info = data.split('\n\n')
passports = []
for passport_info in passports_info:
passport_fields = passport_info.split()
passports.append(passport_fields)
for passport_index in range(len(passports)):
current_passport = passports[passport_index]
current_passport_dict = {field.split(':')[0]: field.split(':')[1] for field in current_passport}
passports[passport_index] = current_passport_dict
valid_passport_count_part1 = 0
valid_passport_count_part2 = 0
for passport in passports:
if is_valid_passport_part1(passport):
valid_passport_count_part1 += 1
if is_valid_passport_part2(passport):
valid_passport_count_part2 += 1
print('Part1:', valid_passport_count_part1)
print('Part2:', valid_passport_count_part2)
if __name__ == '__main__':
main() | 2020/day4/day4.py | import re
birth_year = 'byr'
issue_year = 'iyr'
expiration_year = 'eyr'
height = 'hgt'
hair_color = 'hcl'
eye_color = 'ecl'
passport_id = 'pid'
country_id = 'cid'
def is_valid_passport_part1(passport: dict):
mandatory_passport_fields = [birth_year, issue_year, expiration_year, height, hair_color, eye_color, passport_id]
for field in mandatory_passport_fields:
if field not in passport:
return False
return True
def is_valid_passport_part2(passport: dict):
mandatory_passport_fields = [birth_year, issue_year, expiration_year, height, hair_color, eye_color, passport_id]
for field in mandatory_passport_fields:
if field not in passport:
return False
return validate_fields(passport)
def validate_fields(passport: dict):
if not validate_birth_year(passport[birth_year]):
return False
if not validate_issue_year(passport[issue_year]):
return False
if not validate_expiration_year(passport[expiration_year]):
return False
if not validate_height(passport[height]):
return False
if not validate_hair_color(passport[hair_color]):
return False
if not validate_eye_color(passport[eye_color]):
return False
if not validate_passport_id(passport[passport_id]):
return False
return True
def validate_birth_year(birth_year):
birth_year = int(birth_year)
if 1920 <= birth_year <= 2002:
return True
return False
def validate_issue_year(issue_year):
issue_year = int(issue_year)
if 2010 <= issue_year <= 2020:
return True
return False
def validate_expiration_year(expiration_year):
expiration_year = int(expiration_year)
if 2020 <= expiration_year <= 2030:
return True
return False
def validate_height(height: str):
if height[-2:] == 'cm':
nr = int(height[:height.index('cm')])
if 150 <= nr <= 193:
return True
if height[-2:] == 'in':
nr = int(height[:height.index('in')])
if 59 <= nr <= 76:
return True
return False
def validate_hair_color(hair_color: str):
if re.search('^#[0-9a-f]{6}$', hair_color) is not None:
return True
return False
def validate_eye_color(eye_color):
if eye_color in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']:
return True
return False
def validate_passport_id(passport_id: str):
if re.search('^[0-9]{9}$', passport_id) is not None:
return True
return False
def main():
with open('in.txt') as file:
data = file.read()
passports_info = data.split('\n\n')
passports = []
for passport_info in passports_info:
passport_fields = passport_info.split()
passports.append(passport_fields)
for passport_index in range(len(passports)):
current_passport = passports[passport_index]
current_passport_dict = {field.split(':')[0]: field.split(':')[1] for field in current_passport}
passports[passport_index] = current_passport_dict
valid_passport_count_part1 = 0
valid_passport_count_part2 = 0
for passport in passports:
if is_valid_passport_part1(passport):
valid_passport_count_part1 += 1
if is_valid_passport_part2(passport):
valid_passport_count_part2 += 1
print('Part1:', valid_passport_count_part1)
print('Part2:', valid_passport_count_part2)
if __name__ == '__main__':
main() | 0.353317 | 0.105257 |
from . import core
from .. import mongo, logger, celery
from flask import (
render_template, redirect, url_for, jsonify, request, Response
)
from flask_login import login_required, current_user
from flask import current_app as app
from .forms import AccountSettingsForm, ChangePasswordForm
from ..utils.helpers import paranoid_clean
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash
@core.route('/')
@login_required
def root():
"""Render the index page."""
logger.debug("User: %s" % (current_user.get_id()))
c = mongo.db[app.config['USERS_COLLECTION']]
user = c.find_one({'username': current_user.get_id()})
return render_template('index.html', name=user.get('first_name'))
@core.route('/async-test')
@login_required
def heartbeat_example():
"""Run an async job in the background."""
logger.debug("Executing the heartbeat task and returning")
celery.send_task('heartbeat')
return render_template('index.html', name="HEARTBEAT")
@core.route('/settings')
@login_required
def settings():
"""Render the settings page."""
c = mongo.db[app.config['USERS_COLLECTION']]
user = c.find_one({'username': current_user.get_id()})
if not user:
return render_template()
user['id'] = str(user['_id'])
user.pop('_id', None)
return render_template('settings.html', user=user)
@core.route('/account/settings/update', methods=['POST'])
@login_required
def update_account():
"""Update account settings."""
logger.debug("User account settings update")
form = AccountSettingsForm(request.form)
if form.validate():
if 'user_id' not in request.form:
return jsonify({'success': False,
'error': 'ID not found in edit!'})
logger.debug("Form validated")
edit_id = paranoid_clean(request.form.get('user_id'))
c = mongo.db[app.config['USERS_COLLECTION']]
item = {'first_name': form.first_name.data,
'last_name': form.last_name.data,
'email': form.email.data}
c.update({'_id': ObjectId(edit_id)}, {'$set': item})
logger.debug("User account updated")
return redirect(url_for('core.settings'))
errors = ','.join([value[0] for value in form.errors.values()])
return jsonify({'errors': errors})
@core.route('/account/settings/change-password', methods=['POST'])
@login_required
def account_change_password():
"""Update account password."""
form = ChangePasswordForm(request.form)
if form.validate():
if 'user_id' not in request.form:
return jsonify({'success': False,
'error': 'ID not found in edit!'})
edit_id = paranoid_clean(request.form.get('user_id'))
c = mongo.db[app.config['USERS_COLLECTION']]
item = {'password': <PASSWORD>_password_hash(form.password.data)}
c.update({'_id': ObjectId(edit_id)}, {'$set': item})
return redirect(url_for('core.settings'))
errors = ','.join([value[0] for value in form.errors.values()])
return jsonify({'errors': errors}) | app/core/generic.py | from . import core
from .. import mongo, logger, celery
from flask import (
render_template, redirect, url_for, jsonify, request, Response
)
from flask_login import login_required, current_user
from flask import current_app as app
from .forms import AccountSettingsForm, ChangePasswordForm
from ..utils.helpers import paranoid_clean
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash
@core.route('/')
@login_required
def root():
"""Render the index page."""
logger.debug("User: %s" % (current_user.get_id()))
c = mongo.db[app.config['USERS_COLLECTION']]
user = c.find_one({'username': current_user.get_id()})
return render_template('index.html', name=user.get('first_name'))
@core.route('/async-test')
@login_required
def heartbeat_example():
"""Run an async job in the background."""
logger.debug("Executing the heartbeat task and returning")
celery.send_task('heartbeat')
return render_template('index.html', name="HEARTBEAT")
@core.route('/settings')
@login_required
def settings():
"""Render the settings page."""
c = mongo.db[app.config['USERS_COLLECTION']]
user = c.find_one({'username': current_user.get_id()})
if not user:
return render_template()
user['id'] = str(user['_id'])
user.pop('_id', None)
return render_template('settings.html', user=user)
@core.route('/account/settings/update', methods=['POST'])
@login_required
def update_account():
"""Update account settings."""
logger.debug("User account settings update")
form = AccountSettingsForm(request.form)
if form.validate():
if 'user_id' not in request.form:
return jsonify({'success': False,
'error': 'ID not found in edit!'})
logger.debug("Form validated")
edit_id = paranoid_clean(request.form.get('user_id'))
c = mongo.db[app.config['USERS_COLLECTION']]
item = {'first_name': form.first_name.data,
'last_name': form.last_name.data,
'email': form.email.data}
c.update({'_id': ObjectId(edit_id)}, {'$set': item})
logger.debug("User account updated")
return redirect(url_for('core.settings'))
errors = ','.join([value[0] for value in form.errors.values()])
return jsonify({'errors': errors})
@core.route('/account/settings/change-password', methods=['POST'])
@login_required
def account_change_password():
"""Update account password."""
form = ChangePasswordForm(request.form)
if form.validate():
if 'user_id' not in request.form:
return jsonify({'success': False,
'error': 'ID not found in edit!'})
edit_id = paranoid_clean(request.form.get('user_id'))
c = mongo.db[app.config['USERS_COLLECTION']]
item = {'password': <PASSWORD>_password_hash(form.password.data)}
c.update({'_id': ObjectId(edit_id)}, {'$set': item})
return redirect(url_for('core.settings'))
errors = ','.join([value[0] for value in form.errors.values()])
return jsonify({'errors': errors}) | 0.455441 | 0.049889 |
from unittest.mock import patch
from homeassistant.components.siren import ATTR_DURATION, DOMAIN as SIREN_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from .test_gateway import (
DECONZ_WEB_REQUEST,
mock_deconz_put_request,
setup_deconz_integration,
)
async def test_sirens(hass, aioclient_mock, mock_deconz_websocket):
"""Test that siren entities are created."""
data = {
"lights": {
"1": {
"name": "Warning device",
"type": "Warning device",
"state": {"alert": "lselect", "reachable": True},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"name": "Unsupported siren",
"type": "Not a siren",
"state": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:01-00",
},
}
}
with patch.dict(DECONZ_WEB_REQUEST, data):
config_entry = await setup_deconz_integration(hass, aioclient_mock)
assert len(hass.states.async_all()) == 2
assert hass.states.get("siren.warning_device").state == STATE_ON
assert not hass.states.get("siren.unsupported_siren")
event_changed_light = {
"t": "event",
"e": "changed",
"r": "lights",
"id": "1",
"state": {"alert": None},
}
await mock_deconz_websocket(data=event_changed_light)
await hass.async_block_till_done()
assert hass.states.get("siren.warning_device").state == STATE_OFF
# Verify service calls
mock_deconz_put_request(aioclient_mock, config_entry.data, "/lights/1/state")
# Service turn on siren
await hass.services.async_call(
SIREN_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "siren.warning_device"},
blocking=True,
)
assert aioclient_mock.mock_calls[1][2] == {"alert": "lselect"}
# Service turn off siren
await hass.services.async_call(
SIREN_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "siren.warning_device"},
blocking=True,
)
assert aioclient_mock.mock_calls[2][2] == {"alert": "none"}
# Service turn on siren with duration
await hass.services.async_call(
SIREN_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "siren.warning_device", ATTR_DURATION: 10},
blocking=True,
)
assert aioclient_mock.mock_calls[3][2] == {"alert": "lselect", "ontime": 100}
await hass.config_entries.async_unload(config_entry.entry_id)
states = hass.states.async_all()
assert len(states) == 2
for state in states:
assert state.state == STATE_UNAVAILABLE
await hass.config_entries.async_remove(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0 | tests/components/deconz/test_siren.py |
from unittest.mock import patch
from homeassistant.components.siren import ATTR_DURATION, DOMAIN as SIREN_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from .test_gateway import (
DECONZ_WEB_REQUEST,
mock_deconz_put_request,
setup_deconz_integration,
)
async def test_sirens(hass, aioclient_mock, mock_deconz_websocket):
"""Test that siren entities are created."""
data = {
"lights": {
"1": {
"name": "Warning device",
"type": "Warning device",
"state": {"alert": "lselect", "reachable": True},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"name": "Unsupported siren",
"type": "Not a siren",
"state": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:01-00",
},
}
}
with patch.dict(DECONZ_WEB_REQUEST, data):
config_entry = await setup_deconz_integration(hass, aioclient_mock)
assert len(hass.states.async_all()) == 2
assert hass.states.get("siren.warning_device").state == STATE_ON
assert not hass.states.get("siren.unsupported_siren")
event_changed_light = {
"t": "event",
"e": "changed",
"r": "lights",
"id": "1",
"state": {"alert": None},
}
await mock_deconz_websocket(data=event_changed_light)
await hass.async_block_till_done()
assert hass.states.get("siren.warning_device").state == STATE_OFF
# Verify service calls
mock_deconz_put_request(aioclient_mock, config_entry.data, "/lights/1/state")
# Service turn on siren
await hass.services.async_call(
SIREN_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "siren.warning_device"},
blocking=True,
)
assert aioclient_mock.mock_calls[1][2] == {"alert": "lselect"}
# Service turn off siren
await hass.services.async_call(
SIREN_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "siren.warning_device"},
blocking=True,
)
assert aioclient_mock.mock_calls[2][2] == {"alert": "none"}
# Service turn on siren with duration
await hass.services.async_call(
SIREN_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "siren.warning_device", ATTR_DURATION: 10},
blocking=True,
)
assert aioclient_mock.mock_calls[3][2] == {"alert": "lselect", "ontime": 100}
await hass.config_entries.async_unload(config_entry.entry_id)
states = hass.states.async_all()
assert len(states) == 2
for state in states:
assert state.state == STATE_UNAVAILABLE
await hass.config_entries.async_remove(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0 | 0.713531 | 0.510069 |
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2016 Black Radley Limited."
import helpers_list
import helpers_geo
ceremonial_counties_of_england = helpers_list.get_ceremonial_counties_of_england()
ceremonial_counties_of_england = ['Bristol', 'Cornwall', 'Devon', 'Dorset', 'Gloucestershire', 'Somerset', 'Wiltshire']
ceremonial_counties_of_england = ['Leicestershire']
ceremonial_counties_of_england = ['Essex']
for county in ceremonial_counties_of_england:
print '\n---'
museum_list_file_name = './data/List_of_museums_' + county + '.txt'
output_file_name = './data/List_of_museums_' + county + '_.txt'
print 'Output to: ' + output_file_name
with open(wikipedia_file_name, "r") as wikipedia_file, codecs.open(output_file_name, "w", "utf-8") as output_file:
output_file.write('name\tcounty\ttype\twikipedia_link\n')
output_file_name = '../upload/List_of_museums.txt'
print 'Output to: ' + output_file_name
output_file = open(output_file_name,"wb")
output_file.write('name\tlink\ttype\ticon\tlat\tlng\tcounty\n')
# open a file for output date
for county in counties:
# open the wiki page
wiki_file_name = '../download/List_of_museums_in_' + county + '.wiki'
print 'Input from: ' + wiki_file_name
the_page = open(wiki_file_name, "r").read()
type_column = helpers_list.get_museum_type_column(the_page)
# Get a list of the types
types = helpers_list.get_museum_types(the_page, type_column)
# Get a list of the museums
museums = helpers_list.get_museums_list(the_page)
# Merge the two lists in to one
merged_list = zip(museums, types)
# Write out the file, rearranging and geocoding as you go
for row in merged_list:
name = row[0][0]
link = row[0][1]
type = row[1]
classified_type = helpers_list.classify_type(type)
iconized_type = helpers_list.iconize_type(classified_type)
location = helpers_geo.get_wikipedia_location(link)
if location[0] == 0.0: # then there is no location in Wikipedia
location = helpers_geo.get_google_location(name, county)
lat = location[0]
lng = location[1]
print name
output_file.write(name + '\t' + link + '\t' + classified_type + '\t' + iconized_type +
'\t' + str(lat) + '\t' + str(lng) + '\t' + county + '\n')
output_file.close() | data/src/3_geocode_museums_from_wikipedia.py |
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2016 Black Radley Limited."
import helpers_list
import helpers_geo
ceremonial_counties_of_england = helpers_list.get_ceremonial_counties_of_england()
ceremonial_counties_of_england = ['Bristol', 'Cornwall', 'Devon', 'Dorset', 'Gloucestershire', 'Somerset', 'Wiltshire']
ceremonial_counties_of_england = ['Leicestershire']
ceremonial_counties_of_england = ['Essex']
for county in ceremonial_counties_of_england:
print '\n---'
museum_list_file_name = './data/List_of_museums_' + county + '.txt'
output_file_name = './data/List_of_museums_' + county + '_.txt'
print 'Output to: ' + output_file_name
with open(wikipedia_file_name, "r") as wikipedia_file, codecs.open(output_file_name, "w", "utf-8") as output_file:
output_file.write('name\tcounty\ttype\twikipedia_link\n')
output_file_name = '../upload/List_of_museums.txt'
print 'Output to: ' + output_file_name
output_file = open(output_file_name,"wb")
output_file.write('name\tlink\ttype\ticon\tlat\tlng\tcounty\n')
# open a file for output date
for county in counties:
# open the wiki page
wiki_file_name = '../download/List_of_museums_in_' + county + '.wiki'
print 'Input from: ' + wiki_file_name
the_page = open(wiki_file_name, "r").read()
type_column = helpers_list.get_museum_type_column(the_page)
# Get a list of the types
types = helpers_list.get_museum_types(the_page, type_column)
# Get a list of the museums
museums = helpers_list.get_museums_list(the_page)
# Merge the two lists in to one
merged_list = zip(museums, types)
# Write out the file, rearranging and geocoding as you go
for row in merged_list:
name = row[0][0]
link = row[0][1]
type = row[1]
classified_type = helpers_list.classify_type(type)
iconized_type = helpers_list.iconize_type(classified_type)
location = helpers_geo.get_wikipedia_location(link)
if location[0] == 0.0: # then there is no location in Wikipedia
location = helpers_geo.get_google_location(name, county)
lat = location[0]
lng = location[1]
print name
output_file.write(name + '\t' + link + '\t' + classified_type + '\t' + iconized_type +
'\t' + str(lat) + '\t' + str(lng) + '\t' + county + '\n')
output_file.close() | 0.256832 | 0.138695 |
from xstac import xarray_to_stac, fix_attrs
import xarray as xr
import numpy as np
import pandas as pd
import pytest
import pystac
data = np.empty((40, 584, 284), dtype="float32")
x = xr.DataArray(
np.arange(-5802250.0, -5519250 + 1000, 1000),
name="x",
dims="x",
attrs={
"units": "m",
"long_name": "x coordinate of projection",
"standard_name": "projection_x_coordinate",
},
)
y = xr.DataArray(
np.arange(-39000.0, -622000.0 - 1000, -1000.0),
name="y",
dims="y",
attrs={
"units": "m",
"long_name": "y coordinate of projection",
"standard_name": "projection_y_coordinate",
},
)
time = xr.DataArray(
pd.date_range(start="1980-07-01", freq="A-JUL", periods=40),
name="time",
dims="time",
attrs={
"standard_name": "time",
"bounds": "time_bnds",
"long_name": "24-hour day based on local time",
},
)
lat = xr.DataArray(
np.empty((584, 284)),
coords={"y": y, "x": x},
dims=("y", "x"),
name="lat",
attrs={
"units": "degrees_north",
"long_name": "latitude coordinate",
"standard_name": "latitude",
},
)
lon = xr.DataArray(
np.empty((584, 284)),
coords={"y": y, "x": x},
dims=("y", "x"),
name="lon",
attrs={
"units": "degrees_east",
"long_name": "longitude coordinate",
"standard_name": "longitude",
},
)
coords = dict(time=time, y=y, x=x, lat=lat, lon=lon)
@pytest.fixture
def ds():
ds = xr.Dataset(
{
"prcp": xr.DataArray(
data,
coords=coords,
dims=("time", "y", "x"),
attrs={
"grid_mapping": "lambert_conformal_conic",
"cell_methods": "area: mean time: sum within days time: sum over days",
"units": "mm",
"long_name": "annual total precipitation",
},
),
"swe": xr.DataArray(data, coords=coords, dims=("time", "y", "x")),
"time_bnds": xr.DataArray(
np.empty((40, 2), dtype="datetime64[ns]"),
name="time_bnds",
coords={"time": time},
dims=("time", "nv"),
attrs={"time": "days since 1950-01-01 00:00:00"},
),
"lambert_conformal_conic": xr.DataArray(
np.array(-32767, dtype="int16"),
name="lambert_conformal_conic",
attrs={
"grid_mapping_name": "lambert_conformal_conic",
"longitude_of_central_meridian": -100.0,
"latitude_of_projection_origin": 42.5,
"false_easting": 0.0,
"false_northing": 0.0,
"standard_parallel": np.array([25.0, 60.0]),
"semi_major_axis": 6378137.0,
"inverse_flattening": 298.257223563,
},
),
},
attrs={
"Conventions": "CF-1.6",
"Version_data": "Daymet Data Version 4.0",
"Version_software": "Daymet Software Version 4.0",
"citation": "Please see http://daymet.ornl.gov/ for current Daymet data citation information",
"references": "Please see http://daymet.ornl.gov/ for current information on Daymet references",
"source": "Daymet Software Version 4.0",
"start_year": [1980],
},
)
return ds
def test_xarray_to_stac(ds):
ds = fix_attrs(ds)
template = {
"id": "id",
"type": "Collection",
"links": [],
"description": "description",
"license": "license",
"stac_version": "1.0.0",
}
result = xarray_to_stac(
ds,
template=template,
temporal_dimension="time",
x_dimension="x",
y_dimension="y",
)
assert result.id == "id"
assert isinstance(result, pystac.Collection)
assert result.description == "description"
assert result.license == "license"
dimensions = result.extra_fields["cube:dimensions"]
expected = {
"time": {
"type": "temporal",
"description": "24-hour day based on local time",
# "values": None,
"extent": ["1980-07-31T00:00:00Z", "2019-07-31T00:00:00Z"],
"step": None,
},
"x": {
"type": "spatial",
"axis": "x",
"description": "x coordinate of projection",
"extent": [-5802250.0, -5519250.0],
"values": None,
"step": 1000.0,
"reference_system": {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "ProjectedCRS",
"name": "undefined",
"base_crs": {
"name": "undefined",
"datum": {
"type": "GeodeticReferenceFrame",
"name": "undefined",
"ellipsoid": {
"name": "undefined",
"semi_major_axis": 6378137,
"inverse_flattening": 298.257223563,
},
},
"coordinate_system": {
"subtype": "ellipsoidal",
"axis": [
{
"name": "Longitude",
"abbreviation": "lon",
"direction": "east",
"unit": "degree",
},
{
"name": "Latitude",
"abbreviation": "lat",
"direction": "north",
"unit": "degree",
},
],
},
},
"conversion": {
"name": "unknown",
"method": {
"name": "Lambert Conic Conformal (2SP)",
"id": {"authority": "EPSG", "code": 9802},
},
"parameters": [
{
"name": "Latitude of 1st standard parallel",
"value": 25,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8823},
},
{
"name": "Latitude of 2nd standard parallel",
"value": 60,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8824},
},
{
"name": "Latitude of false origin",
"value": 42.5,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8821},
},
{
"name": "Longitude of false origin",
"value": -100,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8822},
},
{
"name": "Easting at false origin",
"value": 0,
"unit": "metre",
"id": {"authority": "EPSG", "code": 8826},
},
{
"name": "Northing at false origin",
"value": 0,
"unit": "metre",
"id": {"authority": "EPSG", "code": 8827},
},
],
},
"coordinate_system": {
"subtype": "Cartesian",
"axis": [
{
"name": "Easting",
"abbreviation": "E",
"direction": "east",
"unit": "metre",
},
{
"name": "Northing",
"abbreviation": "N",
"direction": "north",
"unit": "metre",
},
],
},
},
},
"y": {
"type": "spatial",
"axis": "y",
"description": "y coordinate of projection",
"extent": [-622000.0, -39000.0],
"values": None,
"step": -1000.0,
"reference_system": {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "ProjectedCRS",
"name": "undefined",
"base_crs": {
"name": "undefined",
"datum": {
"type": "GeodeticReferenceFrame",
"name": "undefined",
"ellipsoid": {
"name": "undefined",
"semi_major_axis": 6378137,
"inverse_flattening": 298.257223563,
},
},
"coordinate_system": {
"subtype": "ellipsoidal",
"axis": [
{
"name": "Longitude",
"abbreviation": "lon",
"direction": "east",
"unit": "degree",
},
{
"name": "Latitude",
"abbreviation": "lat",
"direction": "north",
"unit": "degree",
},
],
},
},
"conversion": {
"name": "unknown",
"method": {
"name": "Lambert Conic Conformal (2SP)",
"id": {"authority": "EPSG", "code": 9802},
},
"parameters": [
{
"name": "Latitude of 1st standard parallel",
"value": 25,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8823},
},
{
"name": "Latitude of 2nd standard parallel",
"value": 60,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8824},
},
{
"name": "Latitude of false origin",
"value": 42.5,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8821},
},
{
"name": "Longitude of false origin",
"value": -100,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8822},
},
{
"name": "Easting at false origin",
"value": 0,
"unit": "metre",
"id": {"authority": "EPSG", "code": 8826},
},
{
"name": "Northing at false origin",
"value": 0,
"unit": "metre",
"id": {"authority": "EPSG", "code": 8827},
},
],
},
"coordinate_system": {
"subtype": "Cartesian",
"axis": [
{
"name": "Easting",
"abbreviation": "E",
"direction": "east",
"unit": "metre",
},
{
"name": "Northing",
"abbreviation": "N",
"direction": "north",
"unit": "metre",
},
],
},
},
},
}
assert dimensions == expected
variables = result.extra_fields["cube:variables"]
expected = {
"lat": {
"type": "auxiliary",
"description": "latitude coordinate",
"dimensions": ["y", "x"],
"values": None,
"extent": None,
"step": None,
"unit": "degrees_north",
"shape": [584, 284],
"chunks": None,
"attrs": {
"units": "degrees_north",
"long_name": "latitude coordinate",
"standard_name": "latitude",
},
},
"lon": {
"type": "auxiliary",
"description": "longitude coordinate",
"dimensions": ["y", "x"],
"values": None,
"extent": None,
"step": None,
"unit": "degrees_east",
"shape": [584, 284],
"chunks": None,
"attrs": {
"units": "degrees_east",
"long_name": "longitude coordinate",
"standard_name": "longitude",
},
},
"prcp": {
"type": "data",
"description": "annual total precipitation",
"dimensions": ["time", "y", "x"],
"values": None,
"extent": None,
"step": None,
"unit": "mm",
"shape": [40, 584, 284],
"chunks": None,
"attrs": {
"grid_mapping": "lambert_conformal_conic",
"cell_methods": "area: mean time: sum within days time: sum over days",
"units": "mm",
"long_name": "annual total precipitation",
},
},
"swe": {
"type": "data",
"description": None,
"dimensions": ["time", "y", "x"],
"values": None,
"extent": None,
"step": None,
"unit": None,
"shape": [40, 584, 284],
"chunks": None,
"attrs": {},
},
"time_bnds": {
"type": "data",
"description": None,
"dimensions": ["time", "nv"],
"values": None,
"extent": None,
"step": None,
"unit": None,
"shape": [40, 2],
"chunks": None,
"attrs": {"time": "days since 1950-01-01 00:00:00"},
},
"lambert_conformal_conic": {
"type": "data",
"description": None,
"dimensions": [],
"values": None,
"extent": None,
"step": None,
"unit": None,
"shape": [],
"chunks": None,
"attrs": {
"grid_mapping_name": "lambert_conformal_conic",
"longitude_of_central_meridian": -100.0,
"latitude_of_projection_origin": 42.5,
"false_easting": 0.0,
"false_northing": 0.0,
"standard_parallel": [25.0, 60.0],
"semi_major_axis": 6378137.0,
"inverse_flattening": 298.257223563,
},
},
}
assert result.extra_fields["cube:variables"] == expected
def test_validation_with_none():
# https://github.com/TomAugspurger/xstac/issues/9
template = {
"type": "Collection",
"id": "cesm2-lens",
"stac_version": "1.0.0",
"description": "desc",
"stac_extensions": [
"https://stac-extensions.github.io/datacube/v1.0.0/schema.json"
],
"extent": {
"spatial": {"bbox": [[-180, -90, 180, 90]]},
"temporal": {
"interval": [["1851-01-01T00:00:00Z", "1851-01-01T00:00:00Z"]]
},
},
"providers": [],
"license": "CC0-1.0",
"links": [],
}
ds = xr.Dataset(
{
"data": xr.DataArray(
[1, 2],
dims=("time",),
coords={"time": pd.to_datetime(["2021-01-01", "2021-01-02"])},
)
}
)
ds.time.attrs["long_name"] = "time"
c = xarray_to_stac(ds, template, temporal_dimension="time")
c.normalize_hrefs("/")
c.validate() | test_xstac.py | from xstac import xarray_to_stac, fix_attrs
import xarray as xr
import numpy as np
import pandas as pd
import pytest
import pystac
data = np.empty((40, 584, 284), dtype="float32")
x = xr.DataArray(
np.arange(-5802250.0, -5519250 + 1000, 1000),
name="x",
dims="x",
attrs={
"units": "m",
"long_name": "x coordinate of projection",
"standard_name": "projection_x_coordinate",
},
)
y = xr.DataArray(
np.arange(-39000.0, -622000.0 - 1000, -1000.0),
name="y",
dims="y",
attrs={
"units": "m",
"long_name": "y coordinate of projection",
"standard_name": "projection_y_coordinate",
},
)
time = xr.DataArray(
pd.date_range(start="1980-07-01", freq="A-JUL", periods=40),
name="time",
dims="time",
attrs={
"standard_name": "time",
"bounds": "time_bnds",
"long_name": "24-hour day based on local time",
},
)
lat = xr.DataArray(
np.empty((584, 284)),
coords={"y": y, "x": x},
dims=("y", "x"),
name="lat",
attrs={
"units": "degrees_north",
"long_name": "latitude coordinate",
"standard_name": "latitude",
},
)
lon = xr.DataArray(
np.empty((584, 284)),
coords={"y": y, "x": x},
dims=("y", "x"),
name="lon",
attrs={
"units": "degrees_east",
"long_name": "longitude coordinate",
"standard_name": "longitude",
},
)
coords = dict(time=time, y=y, x=x, lat=lat, lon=lon)
@pytest.fixture
def ds():
ds = xr.Dataset(
{
"prcp": xr.DataArray(
data,
coords=coords,
dims=("time", "y", "x"),
attrs={
"grid_mapping": "lambert_conformal_conic",
"cell_methods": "area: mean time: sum within days time: sum over days",
"units": "mm",
"long_name": "annual total precipitation",
},
),
"swe": xr.DataArray(data, coords=coords, dims=("time", "y", "x")),
"time_bnds": xr.DataArray(
np.empty((40, 2), dtype="datetime64[ns]"),
name="time_bnds",
coords={"time": time},
dims=("time", "nv"),
attrs={"time": "days since 1950-01-01 00:00:00"},
),
"lambert_conformal_conic": xr.DataArray(
np.array(-32767, dtype="int16"),
name="lambert_conformal_conic",
attrs={
"grid_mapping_name": "lambert_conformal_conic",
"longitude_of_central_meridian": -100.0,
"latitude_of_projection_origin": 42.5,
"false_easting": 0.0,
"false_northing": 0.0,
"standard_parallel": np.array([25.0, 60.0]),
"semi_major_axis": 6378137.0,
"inverse_flattening": 298.257223563,
},
),
},
attrs={
"Conventions": "CF-1.6",
"Version_data": "Daymet Data Version 4.0",
"Version_software": "Daymet Software Version 4.0",
"citation": "Please see http://daymet.ornl.gov/ for current Daymet data citation information",
"references": "Please see http://daymet.ornl.gov/ for current information on Daymet references",
"source": "Daymet Software Version 4.0",
"start_year": [1980],
},
)
return ds
def test_xarray_to_stac(ds):
ds = fix_attrs(ds)
template = {
"id": "id",
"type": "Collection",
"links": [],
"description": "description",
"license": "license",
"stac_version": "1.0.0",
}
result = xarray_to_stac(
ds,
template=template,
temporal_dimension="time",
x_dimension="x",
y_dimension="y",
)
assert result.id == "id"
assert isinstance(result, pystac.Collection)
assert result.description == "description"
assert result.license == "license"
dimensions = result.extra_fields["cube:dimensions"]
expected = {
"time": {
"type": "temporal",
"description": "24-hour day based on local time",
# "values": None,
"extent": ["1980-07-31T00:00:00Z", "2019-07-31T00:00:00Z"],
"step": None,
},
"x": {
"type": "spatial",
"axis": "x",
"description": "x coordinate of projection",
"extent": [-5802250.0, -5519250.0],
"values": None,
"step": 1000.0,
"reference_system": {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "ProjectedCRS",
"name": "undefined",
"base_crs": {
"name": "undefined",
"datum": {
"type": "GeodeticReferenceFrame",
"name": "undefined",
"ellipsoid": {
"name": "undefined",
"semi_major_axis": 6378137,
"inverse_flattening": 298.257223563,
},
},
"coordinate_system": {
"subtype": "ellipsoidal",
"axis": [
{
"name": "Longitude",
"abbreviation": "lon",
"direction": "east",
"unit": "degree",
},
{
"name": "Latitude",
"abbreviation": "lat",
"direction": "north",
"unit": "degree",
},
],
},
},
"conversion": {
"name": "unknown",
"method": {
"name": "Lambert Conic Conformal (2SP)",
"id": {"authority": "EPSG", "code": 9802},
},
"parameters": [
{
"name": "Latitude of 1st standard parallel",
"value": 25,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8823},
},
{
"name": "Latitude of 2nd standard parallel",
"value": 60,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8824},
},
{
"name": "Latitude of false origin",
"value": 42.5,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8821},
},
{
"name": "Longitude of false origin",
"value": -100,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8822},
},
{
"name": "Easting at false origin",
"value": 0,
"unit": "metre",
"id": {"authority": "EPSG", "code": 8826},
},
{
"name": "Northing at false origin",
"value": 0,
"unit": "metre",
"id": {"authority": "EPSG", "code": 8827},
},
],
},
"coordinate_system": {
"subtype": "Cartesian",
"axis": [
{
"name": "Easting",
"abbreviation": "E",
"direction": "east",
"unit": "metre",
},
{
"name": "Northing",
"abbreviation": "N",
"direction": "north",
"unit": "metre",
},
],
},
},
},
"y": {
"type": "spatial",
"axis": "y",
"description": "y coordinate of projection",
"extent": [-622000.0, -39000.0],
"values": None,
"step": -1000.0,
"reference_system": {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "ProjectedCRS",
"name": "undefined",
"base_crs": {
"name": "undefined",
"datum": {
"type": "GeodeticReferenceFrame",
"name": "undefined",
"ellipsoid": {
"name": "undefined",
"semi_major_axis": 6378137,
"inverse_flattening": 298.257223563,
},
},
"coordinate_system": {
"subtype": "ellipsoidal",
"axis": [
{
"name": "Longitude",
"abbreviation": "lon",
"direction": "east",
"unit": "degree",
},
{
"name": "Latitude",
"abbreviation": "lat",
"direction": "north",
"unit": "degree",
},
],
},
},
"conversion": {
"name": "unknown",
"method": {
"name": "Lambert Conic Conformal (2SP)",
"id": {"authority": "EPSG", "code": 9802},
},
"parameters": [
{
"name": "Latitude of 1st standard parallel",
"value": 25,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8823},
},
{
"name": "Latitude of 2nd standard parallel",
"value": 60,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8824},
},
{
"name": "Latitude of false origin",
"value": 42.5,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8821},
},
{
"name": "Longitude of false origin",
"value": -100,
"unit": "degree",
"id": {"authority": "EPSG", "code": 8822},
},
{
"name": "Easting at false origin",
"value": 0,
"unit": "metre",
"id": {"authority": "EPSG", "code": 8826},
},
{
"name": "Northing at false origin",
"value": 0,
"unit": "metre",
"id": {"authority": "EPSG", "code": 8827},
},
],
},
"coordinate_system": {
"subtype": "Cartesian",
"axis": [
{
"name": "Easting",
"abbreviation": "E",
"direction": "east",
"unit": "metre",
},
{
"name": "Northing",
"abbreviation": "N",
"direction": "north",
"unit": "metre",
},
],
},
},
},
}
assert dimensions == expected
variables = result.extra_fields["cube:variables"]
expected = {
"lat": {
"type": "auxiliary",
"description": "latitude coordinate",
"dimensions": ["y", "x"],
"values": None,
"extent": None,
"step": None,
"unit": "degrees_north",
"shape": [584, 284],
"chunks": None,
"attrs": {
"units": "degrees_north",
"long_name": "latitude coordinate",
"standard_name": "latitude",
},
},
"lon": {
"type": "auxiliary",
"description": "longitude coordinate",
"dimensions": ["y", "x"],
"values": None,
"extent": None,
"step": None,
"unit": "degrees_east",
"shape": [584, 284],
"chunks": None,
"attrs": {
"units": "degrees_east",
"long_name": "longitude coordinate",
"standard_name": "longitude",
},
},
"prcp": {
"type": "data",
"description": "annual total precipitation",
"dimensions": ["time", "y", "x"],
"values": None,
"extent": None,
"step": None,
"unit": "mm",
"shape": [40, 584, 284],
"chunks": None,
"attrs": {
"grid_mapping": "lambert_conformal_conic",
"cell_methods": "area: mean time: sum within days time: sum over days",
"units": "mm",
"long_name": "annual total precipitation",
},
},
"swe": {
"type": "data",
"description": None,
"dimensions": ["time", "y", "x"],
"values": None,
"extent": None,
"step": None,
"unit": None,
"shape": [40, 584, 284],
"chunks": None,
"attrs": {},
},
"time_bnds": {
"type": "data",
"description": None,
"dimensions": ["time", "nv"],
"values": None,
"extent": None,
"step": None,
"unit": None,
"shape": [40, 2],
"chunks": None,
"attrs": {"time": "days since 1950-01-01 00:00:00"},
},
"lambert_conformal_conic": {
"type": "data",
"description": None,
"dimensions": [],
"values": None,
"extent": None,
"step": None,
"unit": None,
"shape": [],
"chunks": None,
"attrs": {
"grid_mapping_name": "lambert_conformal_conic",
"longitude_of_central_meridian": -100.0,
"latitude_of_projection_origin": 42.5,
"false_easting": 0.0,
"false_northing": 0.0,
"standard_parallel": [25.0, 60.0],
"semi_major_axis": 6378137.0,
"inverse_flattening": 298.257223563,
},
},
}
assert result.extra_fields["cube:variables"] == expected
def test_validation_with_none():
# https://github.com/TomAugspurger/xstac/issues/9
template = {
"type": "Collection",
"id": "cesm2-lens",
"stac_version": "1.0.0",
"description": "desc",
"stac_extensions": [
"https://stac-extensions.github.io/datacube/v1.0.0/schema.json"
],
"extent": {
"spatial": {"bbox": [[-180, -90, 180, 90]]},
"temporal": {
"interval": [["1851-01-01T00:00:00Z", "1851-01-01T00:00:00Z"]]
},
},
"providers": [],
"license": "CC0-1.0",
"links": [],
}
ds = xr.Dataset(
{
"data": xr.DataArray(
[1, 2],
dims=("time",),
coords={"time": pd.to_datetime(["2021-01-01", "2021-01-02"])},
)
}
)
ds.time.attrs["long_name"] = "time"
c = xarray_to_stac(ds, template, temporal_dimension="time")
c.normalize_hrefs("/")
c.validate() | 0.566019 | 0.519217 |
"""Analysis plugin to look up files in VirusTotal and tag events."""
from __future__ import unicode_literals
from plaso.analysis import interface
from plaso.analysis import logger
from plaso.analysis import manager
from plaso.lib import errors
class VirusTotalAnalyzer(interface.HTTPHashAnalyzer):
"""Class that analyzes file hashes by consulting VirusTotal."""
_VIRUSTOTAL_API_REPORT_URL = (
'https://www.virustotal.com/vtapi/v2/file/report')
_EICAR_SHA256 = (
'275a021bbfb6489e54d471899f7db9d1663fc695ec2fe2a2c4538aabf651fd0f')
SUPPORTED_HASHES = ['md5', 'sha1', 'sha256']
def __init__(self, hash_queue, hash_analysis_queue, **kwargs):
"""Initializes a VirusTotal analyzer.
Args:
hash_queue (Queue.queue): queue that contains hashes to be analyzed.
hash_analysis_queue (Queue.queue): queue the analyzer will append
HashAnalysis objects to.
"""
super(VirusTotalAnalyzer, self).__init__(
hash_queue, hash_analysis_queue, **kwargs)
self._api_key = None
self._checked_for_old_python_version = False
def _QueryHashes(self, digests):
"""Queries VirusTotal for a specfic hashes.
Args:
digests (list[str]): hashes to look up.
Returns:
dict[str, object]: JSON response or None on error.
"""
url_parameters = {'apikey': self._api_key, 'resource': ', '.join(digests)}
try:
json_response = self.MakeRequestAndDecodeJSON(
self._VIRUSTOTAL_API_REPORT_URL, 'GET', params=url_parameters)
except errors.ConnectionError as exception:
json_response = None
logger.error('Unable to query VirusTotal with error: {0!s}.'.format(
exception))
return json_response
def Analyze(self, hashes):
"""Looks up hashes in VirusTotal using the VirusTotal HTTP API.
The API is documented here:
https://www.virustotal.com/en/documentation/public-api/
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: analysis results.
Raises:
RuntimeError: If the VirusTotal API key has not been set.
"""
if not self._api_key:
raise RuntimeError('No API key specified for VirusTotal lookup.')
hash_analyses = []
json_response = self._QueryHashes(hashes) or []
# VirusTotal returns a dictionary when a single hash is queried
# and a list when multiple hashes are queried.
if isinstance(json_response, dict):
json_response = [json_response]
for result in json_response:
resource = result['resource']
hash_analysis = interface.HashAnalysis(resource, result)
hash_analyses.append(hash_analysis)
return hash_analyses
def SetAPIKey(self, api_key):
"""Sets the VirusTotal API key to use in queries.
Args:
api_key (str): VirusTotal API key
"""
self._api_key = api_key
def TestConnection(self):
"""Tests the connection to VirusTotal
Returns:
bool: True if VirusTotal is reachable.
"""
json_response = self._QueryHashes([self._EICAR_SHA256])
return json_response is not None
class VirusTotalAnalysisPlugin(interface.HashTaggingAnalysisPlugin):
"""An analysis plugin for looking up hashes in VirusTotal."""
# TODO: Check if there are other file types worth checking VirusTotal for.
DATA_TYPES = ['pe:compilation:compilation_time']
URLS = ['https://virustotal.com']
NAME = 'virustotal'
_VIRUSTOTAL_NOT_PRESENT_RESPONSE_CODE = 0
_VIRUSTOTAL_PRESENT_RESPONSE_CODE = 1
_VIRUSTOTAL_ANALYSIS_PENDING_RESPONSE_CODE = -2
def __init__(self):
"""Initializes a VirusTotal analysis plugin."""
super(VirusTotalAnalysisPlugin, self).__init__(VirusTotalAnalyzer)
self._api_key = None
def EnableFreeAPIKeyRateLimit(self):
"""Configures Rate limiting for queries to VirusTotal.
The default rate limit for free VirusTotal API keys is 4 requests per
minute.
"""
self._analyzer.hashes_per_batch = 4
self._analyzer.wait_after_analysis = 60
self._analysis_queue_timeout = self._analyzer.wait_after_analysis + 1
def GenerateLabels(self, hash_information):
"""Generates a list of strings that will be used in the event tag.
Args:
hash_information (dict[str, object]): the JSON decoded contents of the
result of a VirusTotal lookup, as produced by the VirusTotalAnalyzer.
Returns:
list[str]: strings describing the results from VirusTotal.
"""
response_code = hash_information['response_code']
if response_code == self._VIRUSTOTAL_NOT_PRESENT_RESPONSE_CODE:
return ['virustotal_not_present']
if response_code == self._VIRUSTOTAL_PRESENT_RESPONSE_CODE:
positives = hash_information['positives']
if positives > 0:
return ['virustotal_detections_{0:d}'.format(positives)]
return ['virsutotal_no_detections']
if response_code == self._VIRUSTOTAL_ANALYSIS_PENDING_RESPONSE_CODE:
return ['virustotal_analysis_pending']
logger.error(
'VirusTotal returned unknown response code {0!s}'.format(
response_code))
return ['virustotal_unknown_response_code_{0:d}'.format(response_code)]
def SetAPIKey(self, api_key):
"""Sets the VirusTotal API key to use in queries.
Args:
api_key (str): VirusTotal API key
"""
self._analyzer.SetAPIKey(api_key)
def TestConnection(self):
"""Tests the connection to VirusTotal
Returns:
bool: True if VirusTotal is reachable.
"""
return self._analyzer.TestConnection()
manager.AnalysisPluginManager.RegisterPlugin(VirusTotalAnalysisPlugin) | plaso/analysis/virustotal.py | """Analysis plugin to look up files in VirusTotal and tag events."""
from __future__ import unicode_literals
from plaso.analysis import interface
from plaso.analysis import logger
from plaso.analysis import manager
from plaso.lib import errors
class VirusTotalAnalyzer(interface.HTTPHashAnalyzer):
"""Class that analyzes file hashes by consulting VirusTotal."""
_VIRUSTOTAL_API_REPORT_URL = (
'https://www.virustotal.com/vtapi/v2/file/report')
_EICAR_SHA256 = (
'275a021bbfb6489e54d471899f7db9d1663fc695ec2fe2a2c4538aabf651fd0f')
SUPPORTED_HASHES = ['md5', 'sha1', 'sha256']
def __init__(self, hash_queue, hash_analysis_queue, **kwargs):
"""Initializes a VirusTotal analyzer.
Args:
hash_queue (Queue.queue): queue that contains hashes to be analyzed.
hash_analysis_queue (Queue.queue): queue the analyzer will append
HashAnalysis objects to.
"""
super(VirusTotalAnalyzer, self).__init__(
hash_queue, hash_analysis_queue, **kwargs)
self._api_key = None
self._checked_for_old_python_version = False
def _QueryHashes(self, digests):
"""Queries VirusTotal for a specfic hashes.
Args:
digests (list[str]): hashes to look up.
Returns:
dict[str, object]: JSON response or None on error.
"""
url_parameters = {'apikey': self._api_key, 'resource': ', '.join(digests)}
try:
json_response = self.MakeRequestAndDecodeJSON(
self._VIRUSTOTAL_API_REPORT_URL, 'GET', params=url_parameters)
except errors.ConnectionError as exception:
json_response = None
logger.error('Unable to query VirusTotal with error: {0!s}.'.format(
exception))
return json_response
def Analyze(self, hashes):
"""Looks up hashes in VirusTotal using the VirusTotal HTTP API.
The API is documented here:
https://www.virustotal.com/en/documentation/public-api/
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: analysis results.
Raises:
RuntimeError: If the VirusTotal API key has not been set.
"""
if not self._api_key:
raise RuntimeError('No API key specified for VirusTotal lookup.')
hash_analyses = []
json_response = self._QueryHashes(hashes) or []
# VirusTotal returns a dictionary when a single hash is queried
# and a list when multiple hashes are queried.
if isinstance(json_response, dict):
json_response = [json_response]
for result in json_response:
resource = result['resource']
hash_analysis = interface.HashAnalysis(resource, result)
hash_analyses.append(hash_analysis)
return hash_analyses
def SetAPIKey(self, api_key):
"""Sets the VirusTotal API key to use in queries.
Args:
api_key (str): VirusTotal API key
"""
self._api_key = api_key
def TestConnection(self):
"""Tests the connection to VirusTotal
Returns:
bool: True if VirusTotal is reachable.
"""
json_response = self._QueryHashes([self._EICAR_SHA256])
return json_response is not None
class VirusTotalAnalysisPlugin(interface.HashTaggingAnalysisPlugin):
"""An analysis plugin for looking up hashes in VirusTotal."""
# TODO: Check if there are other file types worth checking VirusTotal for.
DATA_TYPES = ['pe:compilation:compilation_time']
URLS = ['https://virustotal.com']
NAME = 'virustotal'
_VIRUSTOTAL_NOT_PRESENT_RESPONSE_CODE = 0
_VIRUSTOTAL_PRESENT_RESPONSE_CODE = 1
_VIRUSTOTAL_ANALYSIS_PENDING_RESPONSE_CODE = -2
def __init__(self):
"""Initializes a VirusTotal analysis plugin."""
super(VirusTotalAnalysisPlugin, self).__init__(VirusTotalAnalyzer)
self._api_key = None
def EnableFreeAPIKeyRateLimit(self):
"""Configures Rate limiting for queries to VirusTotal.
The default rate limit for free VirusTotal API keys is 4 requests per
minute.
"""
self._analyzer.hashes_per_batch = 4
self._analyzer.wait_after_analysis = 60
self._analysis_queue_timeout = self._analyzer.wait_after_analysis + 1
def GenerateLabels(self, hash_information):
"""Generates a list of strings that will be used in the event tag.
Args:
hash_information (dict[str, object]): the JSON decoded contents of the
result of a VirusTotal lookup, as produced by the VirusTotalAnalyzer.
Returns:
list[str]: strings describing the results from VirusTotal.
"""
response_code = hash_information['response_code']
if response_code == self._VIRUSTOTAL_NOT_PRESENT_RESPONSE_CODE:
return ['virustotal_not_present']
if response_code == self._VIRUSTOTAL_PRESENT_RESPONSE_CODE:
positives = hash_information['positives']
if positives > 0:
return ['virustotal_detections_{0:d}'.format(positives)]
return ['virsutotal_no_detections']
if response_code == self._VIRUSTOTAL_ANALYSIS_PENDING_RESPONSE_CODE:
return ['virustotal_analysis_pending']
logger.error(
'VirusTotal returned unknown response code {0!s}'.format(
response_code))
return ['virustotal_unknown_response_code_{0:d}'.format(response_code)]
def SetAPIKey(self, api_key):
"""Sets the VirusTotal API key to use in queries.
Args:
api_key (str): VirusTotal API key
"""
self._analyzer.SetAPIKey(api_key)
def TestConnection(self):
"""Tests the connection to VirusTotal
Returns:
bool: True if VirusTotal is reachable.
"""
return self._analyzer.TestConnection()
manager.AnalysisPluginManager.RegisterPlugin(VirusTotalAnalysisPlugin) | 0.847779 | 0.31932 |
# here put the import lib
from . import STensor
import spartan as st
class Graph:
def __init__(self, graph_tensor: STensor, weighted: bool = False,
bipartite: bool = False, modet=None):
'''Construct a graph from sparse tensor.
If the sparse tensor has more than 2 modes, then it is a rich graph.
Parameters:
------
modet: int
The order of mode in graph tensor for temporal bins if exit, start from zero.
Default is 3.
'''
self.graph_tensor = graph_tensor
self.weighted = weighted
self.bipartite = bipartite
self.modet = modet # which mode is time dimension
self.nprop = graph_tensor.ndim - 2 # num of edge properties
self.sm = graph_tensor.sum_to_scipy_sparse(modes=(0, 1))
if not weighted:
self.sm = (self.sm > 0).astype(int)
if not bipartite:
self.sm = self.sm.maximum(self.sm.T)
def get_time_tensor(self):
'''Get the tensor only have time dimension.
If nprop == 1 and modet == 3, then the tensor is graph_tensor itself.
If modet is None, then None is returned.
'''
return self.get_one_prop_tensor(self.modet)
def get_one_prop_tensor(self, mode):
'''Get the tensor only have one edge-property dimension.
if nprop == 1 and mode == 3, then the tensor is graph_tensor itself.
If mode is None, and other invalidation, then None is returned.
'''
if self.nprop == 1 and mode == 3:
return graph_tensor
elif self.nprop > 1 and mode is not None and\
mode < self.nprop + 2:
return STensor((self.graph_tensor.coords[(0, 1, mode), :],
self.graph_tensor.data))
else:
return None
def get_sub_graph(self, rows, cols):
cootensor = self.graph_tensor
gr = -1 * st.ones(cootensor.shape[0], dtype=int)
gc = -1 * st.ones(cootensor.shape[1], dtype=int)
lr = len(rows)
lc = len(cols)
ar = st.arange(0, lr, 1)
ac = st.arange(0, lc, 1)
gr[rows[ar]] = ar
gc[cols[ac]] = ac
mrow = cootensor.coords[0]
mcol = cootensor.coords[1]
newelem = (gr[mrow] > -1) & (gc[mcol] > -1)
newrows = mrow[newelem]
newcols = mcol[newelem]
subcoords = st.stack((gr[newrows], gc[newcols],
*cootensor.coords[2:,newelem]), axis=0)
subvalues = cootensor.data[newelem]
subtensor = st.STensor((subcoords, subvalues),
shape=(lr,lc,*cootensor.shape[2:]) )
return st.Graph(subtensor, self.weighted, self.bipartite, self.modet)
def get_subgraph_nedges(self, rows, cols):
"""
Pulls out an arbitrary i.e. non-contiguous submatrix out of
a sparse.coo_matrix.
Returns
------
tuples of org_row_id, org_col_id, value
"""
matr = self.sm.tocoo()
gr = -1 * st.ones(matr.shape[0], dtype=int)
gc = -1 * st.ones(matr.shape[1], dtype=int)
lr = len(rows)
lc = len(cols)
ar = st.arange(0, lr, 1)
ac = st.arange(0, lc, 1)
gr[rows[ar]] = ar
gc[cols[ac]] = ac
mrow = matr.row
mcol = matr.col
newelem = (gr[mrow] > -1) & (gc[mcol] > -1)
subvalues = matr.data[newelem]
if self.weighted:
nedges = len(subvalues)
else:
nedges = subvalues.sum()
return nedges
def degrees(self):
rowdegs, coldegs = self.sm.sum(axis=1), self.sm.sum(axis=0)
return rowdegs, coldegs.T | spartan/tensor/graph.py | # here put the import lib
from . import STensor
import spartan as st
class Graph:
def __init__(self, graph_tensor: STensor, weighted: bool = False,
bipartite: bool = False, modet=None):
'''Construct a graph from sparse tensor.
If the sparse tensor has more than 2 modes, then it is a rich graph.
Parameters:
------
modet: int
The order of mode in graph tensor for temporal bins if exit, start from zero.
Default is 3.
'''
self.graph_tensor = graph_tensor
self.weighted = weighted
self.bipartite = bipartite
self.modet = modet # which mode is time dimension
self.nprop = graph_tensor.ndim - 2 # num of edge properties
self.sm = graph_tensor.sum_to_scipy_sparse(modes=(0, 1))
if not weighted:
self.sm = (self.sm > 0).astype(int)
if not bipartite:
self.sm = self.sm.maximum(self.sm.T)
def get_time_tensor(self):
'''Get the tensor only have time dimension.
If nprop == 1 and modet == 3, then the tensor is graph_tensor itself.
If modet is None, then None is returned.
'''
return self.get_one_prop_tensor(self.modet)
def get_one_prop_tensor(self, mode):
'''Get the tensor only have one edge-property dimension.
if nprop == 1 and mode == 3, then the tensor is graph_tensor itself.
If mode is None, and other invalidation, then None is returned.
'''
if self.nprop == 1 and mode == 3:
return graph_tensor
elif self.nprop > 1 and mode is not None and\
mode < self.nprop + 2:
return STensor((self.graph_tensor.coords[(0, 1, mode), :],
self.graph_tensor.data))
else:
return None
def get_sub_graph(self, rows, cols):
cootensor = self.graph_tensor
gr = -1 * st.ones(cootensor.shape[0], dtype=int)
gc = -1 * st.ones(cootensor.shape[1], dtype=int)
lr = len(rows)
lc = len(cols)
ar = st.arange(0, lr, 1)
ac = st.arange(0, lc, 1)
gr[rows[ar]] = ar
gc[cols[ac]] = ac
mrow = cootensor.coords[0]
mcol = cootensor.coords[1]
newelem = (gr[mrow] > -1) & (gc[mcol] > -1)
newrows = mrow[newelem]
newcols = mcol[newelem]
subcoords = st.stack((gr[newrows], gc[newcols],
*cootensor.coords[2:,newelem]), axis=0)
subvalues = cootensor.data[newelem]
subtensor = st.STensor((subcoords, subvalues),
shape=(lr,lc,*cootensor.shape[2:]) )
return st.Graph(subtensor, self.weighted, self.bipartite, self.modet)
def get_subgraph_nedges(self, rows, cols):
"""
Pulls out an arbitrary i.e. non-contiguous submatrix out of
a sparse.coo_matrix.
Returns
------
tuples of org_row_id, org_col_id, value
"""
matr = self.sm.tocoo()
gr = -1 * st.ones(matr.shape[0], dtype=int)
gc = -1 * st.ones(matr.shape[1], dtype=int)
lr = len(rows)
lc = len(cols)
ar = st.arange(0, lr, 1)
ac = st.arange(0, lc, 1)
gr[rows[ar]] = ar
gc[cols[ac]] = ac
mrow = matr.row
mcol = matr.col
newelem = (gr[mrow] > -1) & (gc[mcol] > -1)
subvalues = matr.data[newelem]
if self.weighted:
nedges = len(subvalues)
else:
nedges = subvalues.sum()
return nedges
def degrees(self):
rowdegs, coldegs = self.sm.sum(axis=1), self.sm.sum(axis=0)
return rowdegs, coldegs.T | 0.816662 | 0.446977 |
# pyre-unsafe
import logging
from typing import Optional, Sequence, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesChangePoint, TimeSeriesData
from kats.detectors.detector import Detector
from scipy.stats import chi2
from sklearn.covariance import MinCovDet
"""A module for detecting abnormal in hourly ratio.
This module contains the class :class:`HourlyRatioDetector`, which detects the abnormal intra-day hourly ratio patterns.
"""
class HourlyRatioDetector(Detector):
"""Hourly Ratio Anormaly detector.
This detector detects the abnormal intra-day hourly ratio patterns. This detector takes TimeSeriesDataas input and returns a list of TimeSeriesChangePoint representing the abnormal dates.
The detection algorithm assumes that the hourly ratio of each day should follow a multivariate normal distribution, and we utilize Mahalanobis distance tests to detect abnormal days.
This class provides detector and plot.
Attributes:
data: A :class:`kats.consts.TimeSeriesData` object representing the data to be examed, which should be of hour-level granularity.
freq: Optional; A string or a `pandas.Timedelta` object representing the data frequency (following the naming conventions of pandas). Can be 'H' (hourly frequency), 'T' minutely frequency, 'S' secondly frequency or any other frequency finer than hourly frequency.
Default is None, in which case the frequency will be infered by infer_freq_robust.
aggregate: Optional; A string representing the aggregation method for aggregating data to hourly level data. Can be 'min', 'max', 'sum', 'mean' or None. Default is None, which means no aggregation.
Sample Usage:
>>> hr = HourlyRatioDetector(data)
>>> anomlies=hr.detector()
>>> hr = hr.plot(weekday = 3) # Plot the anomalies of weekday 3
"""
def __init__(
self,
data: TimeSeriesData,
freq: Union[str, pd.Timedelta, None] = None,
aggregate: Optional[str] = None,
) -> None:
super(HourlyRatioDetector, self).__init__(data=data)
if len(data) == 0:
msg = "Input data is empty."
logging.error(msg)
raise ValueError(msg)
if not self.data.is_univariate():
msg = "Only support univariate time series, but get {}.".format(
type(self.data.value)
)
logging.error(msg)
raise ValueError(msg)
self._ratiodf = None
self.incomplete_dates = None
self.anomaly_dates = None
self.freq = freq
self.aggregate = aggregate
self._valid_frequency()
def _valid_frequency(self) -> None:
"""Valid frequency of the input timeseries.
If freq is given by user, then use the freq defined by user, otherwise we use ts.infer_freq_robust() to infer data frequencey.
Data freq should be at least hourly level. For data with the granularity finner than hourly level, aggregate function should be given using aggregate attribuate.
Now only support aggregation functions: min, max, sum, mean.
"""
lower_granularity = ["T", "S", "L", "U", "N"]
if self.freq is None:
self.freq = self.data.infer_freq_robust()
if self.freq == "H" or (
isinstance(self.freq, pd.Timedelta) and self.freq.value == 3600000000000
):
msg = "Input data is hourly data."
logging.info(msg)
return
if isinstance(self.freq, str):
for level in lower_granularity:
# pyre-fixme[58]: `in` is not supported for right operand type
# `Optional[str]`.
if level in self.freq:
msg = "Input data granularity is {} and we can continue processing using aggregation function.".format(
self.freq
)
logging.info(msg)
elif isinstance(self.freq, pd.Timedelta) and self.freq.value < 3600000000000:
pass
else:
msg = "Time series should be of hourly or finer granularity."
logging.error(msg)
raise ValueError(msg)
if self.aggregate is None:
msg = "Aggregation method is missing."
logging.error(msg)
raise ValueError(msg)
elif self.aggregate in ["min", "max", "sum", "mean"]:
msg = "Aggregation method is {}.".format(self.aggregate)
logging.info(msg)
return
else:
msg = "Aggregation methd {} is not implemented.".format(self.aggregate)
logging.error(msg)
raise ValueError(msg)
def _preprocess(self):
"""preprocess input data.
There are two steps for preprocess: 1) filter out dates with incomplete data, aggregate data to hourly level if necessary; and 2) calculate hourly ratio.
"""
if self._ratiodf is None:
df = self.data.to_dataframe()
df.dropna(inplace=True)
df.sort_values("time", inplace=True)
df["date"] = df["time"].dt.date
df["hour"] = df["time"].dt.hour
df["weekday"] = df["time"].dt.weekday
# aggregate the data to hourly level.
if self.freq != "H" and self.aggregate is not None:
df = (
df.groupby(["date", "hour", "weekday"])["value"]
.agg(self.aggregate)
.reset_index()
)
msg = "Successfully aggregate data to hourly level using {}".format(
self.aggregate
)
logging.info(msg)
df["counts"] = df.groupby("date")["hour"].transform("count")
# filter out dates with less than 24 observations
incomplete_dates = df["date"][df["counts"] < 24].unique()
self.incomplete_dates = [
TimeSeriesChangePoint(t, t, 1.0) for t in incomplete_dates
]
df = df[df["counts"] == 24]
if len(df) == 0:
msg = "Data should have hour-level granularity."
logging.error(msg)
raise ValueError(msg)
df["hourly_mean"] = df.groupby("date")["value"].transform("sum")
df["hourly_ratio"] = df["value"] / df["hourly_mean"]
self._ratiodf = df
return
def _mahalanobis_test(
self,
obs: np.ndarray,
median: np.ndarray,
cov: np.ndarray,
alpha: float = 0.01,
) -> Tuple[np.ndarray, np.ndarray]:
"""mahalanobis test function.
Args:
obs: A :class:`numpy.ndarray` object storing the data to be tested.
median: A :class:`numpy.ndarray` object storing the medians used to build centeralize test data.
cov: A :class:`numpy.ndarray` object representing the covariance matrix.
alpha: A float representing the significance level for the Mahalanobis test. We take the instance with pvalue<=alpha as an abnormal point.
Returns:
lab: A :class:`numpy.ndarray` object of booleans representing whether the corresponding instance is abnormal or not.
pvalue: A :class:`numpy.ndarray` object storing the pvalues of tests of each instance.
"""
diff = obs - median
scores = np.sum(diff * np.linalg.solve(cov, diff.T).T, axis=1)
pvalue = 1.0 - chi2(df=diff.shape[1]).cdf(scores)
lab = pvalue <= alpha
return (lab, pvalue)
# pyre-fixme[14]: `detector` overrides method defined in `Detector` inconsistently.
def detector(self, support_fraction=0.9) -> Sequence[TimeSeriesChangePoint]:
"""Run detection algorithm.
Args:
support_fraction: Optional; A float representing the support_fraction for MinCovDet class from scikit-learn. Default is 0.9.
See https://scikit-learn.org/stable/modules/generated/sklearn.covariance.MinCovDet.html for more details.
Returns:
A list of TimeSeriesChangePoint representing the anormal dates.
"""
if self._ratiodf is None:
self._preprocess()
anomaly = []
pvalues = []
for w in range(7):
obs = self._ratiodf[self._ratiodf["weekday"] == w][
"hourly_ratio"
].values.reshape(-1, 24)
dates = np.unique(
self._ratiodf[self._ratiodf["weekday"] == w]["date"].values
)
# we omit the last dimension due to linearity constrant
median = np.median(obs, axis=0)
median = (median / np.sum(median) * 24)[:-1]
cov = MinCovDet(
assume_centered=True, support_fraction=support_fraction
).fit(obs[:, :-1] - median)
lab, p = self._mahalanobis_test(obs[:, :-1], median, cov.covariance_)
anomaly.extend(list(dates[lab]))
pvalues.extend(p[lab])
anomaly = [
TimeSeriesChangePoint(anomaly[i], anomaly[i], 1.0 - pvalues[i])
for i in range(len(anomaly))
]
self.anomaly_dates = anomaly
return anomaly
def plot(self, weekday: int = 0) -> None:
"""plot the detection results.
Args:
weekday: Optional; An integer representing the weekday label, which should be in [0,6]. Default is 0.
Returns:
None.
"""
if self.anomaly_dates is None:
msg = "Please run detector method first."
logging.error(msg)
raise ValueError(msg)
anomaly_dates = [t.start_time for t in self.anomaly_dates]
anomaly_dates = set(anomaly_dates)
obs = self._ratiodf[self._ratiodf["weekday"] == weekday][
"hourly_ratio"
].values.reshape(-1, 24)
dates = np.unique(
self._ratiodf[self._ratiodf["weekday"] == weekday]["date"].values
)
labs = [(t in anomaly_dates) for t in dates]
logging.info("# of anomaly dates: {}".format(np.sum(labs)))
for i in range(len(obs)):
if not labs[i]:
plt.plot(obs[i], "--", color="silver", alpha=0.5)
else:
plt.plot(obs[i], "--", color="navy", label=str(dates[i]))
plt.title("Hourly Ratio Plot for Weeday {}".format(weekday))
plt.legend()
plt.show()
return | kats/detectors/hourly_ratio_detection.py |
# pyre-unsafe
import logging
from typing import Optional, Sequence, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesChangePoint, TimeSeriesData
from kats.detectors.detector import Detector
from scipy.stats import chi2
from sklearn.covariance import MinCovDet
"""A module for detecting abnormal in hourly ratio.
This module contains the class :class:`HourlyRatioDetector`, which detects the abnormal intra-day hourly ratio patterns.
"""
class HourlyRatioDetector(Detector):
"""Hourly Ratio Anormaly detector.
This detector detects the abnormal intra-day hourly ratio patterns. This detector takes TimeSeriesDataas input and returns a list of TimeSeriesChangePoint representing the abnormal dates.
The detection algorithm assumes that the hourly ratio of each day should follow a multivariate normal distribution, and we utilize Mahalanobis distance tests to detect abnormal days.
This class provides detector and plot.
Attributes:
data: A :class:`kats.consts.TimeSeriesData` object representing the data to be examed, which should be of hour-level granularity.
freq: Optional; A string or a `pandas.Timedelta` object representing the data frequency (following the naming conventions of pandas). Can be 'H' (hourly frequency), 'T' minutely frequency, 'S' secondly frequency or any other frequency finer than hourly frequency.
Default is None, in which case the frequency will be infered by infer_freq_robust.
aggregate: Optional; A string representing the aggregation method for aggregating data to hourly level data. Can be 'min', 'max', 'sum', 'mean' or None. Default is None, which means no aggregation.
Sample Usage:
>>> hr = HourlyRatioDetector(data)
>>> anomlies=hr.detector()
>>> hr = hr.plot(weekday = 3) # Plot the anomalies of weekday 3
"""
def __init__(
self,
data: TimeSeriesData,
freq: Union[str, pd.Timedelta, None] = None,
aggregate: Optional[str] = None,
) -> None:
super(HourlyRatioDetector, self).__init__(data=data)
if len(data) == 0:
msg = "Input data is empty."
logging.error(msg)
raise ValueError(msg)
if not self.data.is_univariate():
msg = "Only support univariate time series, but get {}.".format(
type(self.data.value)
)
logging.error(msg)
raise ValueError(msg)
self._ratiodf = None
self.incomplete_dates = None
self.anomaly_dates = None
self.freq = freq
self.aggregate = aggregate
self._valid_frequency()
def _valid_frequency(self) -> None:
"""Valid frequency of the input timeseries.
If freq is given by user, then use the freq defined by user, otherwise we use ts.infer_freq_robust() to infer data frequencey.
Data freq should be at least hourly level. For data with the granularity finner than hourly level, aggregate function should be given using aggregate attribuate.
Now only support aggregation functions: min, max, sum, mean.
"""
lower_granularity = ["T", "S", "L", "U", "N"]
if self.freq is None:
self.freq = self.data.infer_freq_robust()
if self.freq == "H" or (
isinstance(self.freq, pd.Timedelta) and self.freq.value == 3600000000000
):
msg = "Input data is hourly data."
logging.info(msg)
return
if isinstance(self.freq, str):
for level in lower_granularity:
# pyre-fixme[58]: `in` is not supported for right operand type
# `Optional[str]`.
if level in self.freq:
msg = "Input data granularity is {} and we can continue processing using aggregation function.".format(
self.freq
)
logging.info(msg)
elif isinstance(self.freq, pd.Timedelta) and self.freq.value < 3600000000000:
pass
else:
msg = "Time series should be of hourly or finer granularity."
logging.error(msg)
raise ValueError(msg)
if self.aggregate is None:
msg = "Aggregation method is missing."
logging.error(msg)
raise ValueError(msg)
elif self.aggregate in ["min", "max", "sum", "mean"]:
msg = "Aggregation method is {}.".format(self.aggregate)
logging.info(msg)
return
else:
msg = "Aggregation methd {} is not implemented.".format(self.aggregate)
logging.error(msg)
raise ValueError(msg)
def _preprocess(self):
"""preprocess input data.
There are two steps for preprocess: 1) filter out dates with incomplete data, aggregate data to hourly level if necessary; and 2) calculate hourly ratio.
"""
if self._ratiodf is None:
df = self.data.to_dataframe()
df.dropna(inplace=True)
df.sort_values("time", inplace=True)
df["date"] = df["time"].dt.date
df["hour"] = df["time"].dt.hour
df["weekday"] = df["time"].dt.weekday
# aggregate the data to hourly level.
if self.freq != "H" and self.aggregate is not None:
df = (
df.groupby(["date", "hour", "weekday"])["value"]
.agg(self.aggregate)
.reset_index()
)
msg = "Successfully aggregate data to hourly level using {}".format(
self.aggregate
)
logging.info(msg)
df["counts"] = df.groupby("date")["hour"].transform("count")
# filter out dates with less than 24 observations
incomplete_dates = df["date"][df["counts"] < 24].unique()
self.incomplete_dates = [
TimeSeriesChangePoint(t, t, 1.0) for t in incomplete_dates
]
df = df[df["counts"] == 24]
if len(df) == 0:
msg = "Data should have hour-level granularity."
logging.error(msg)
raise ValueError(msg)
df["hourly_mean"] = df.groupby("date")["value"].transform("sum")
df["hourly_ratio"] = df["value"] / df["hourly_mean"]
self._ratiodf = df
return
def _mahalanobis_test(
self,
obs: np.ndarray,
median: np.ndarray,
cov: np.ndarray,
alpha: float = 0.01,
) -> Tuple[np.ndarray, np.ndarray]:
"""mahalanobis test function.
Args:
obs: A :class:`numpy.ndarray` object storing the data to be tested.
median: A :class:`numpy.ndarray` object storing the medians used to build centeralize test data.
cov: A :class:`numpy.ndarray` object representing the covariance matrix.
alpha: A float representing the significance level for the Mahalanobis test. We take the instance with pvalue<=alpha as an abnormal point.
Returns:
lab: A :class:`numpy.ndarray` object of booleans representing whether the corresponding instance is abnormal or not.
pvalue: A :class:`numpy.ndarray` object storing the pvalues of tests of each instance.
"""
diff = obs - median
scores = np.sum(diff * np.linalg.solve(cov, diff.T).T, axis=1)
pvalue = 1.0 - chi2(df=diff.shape[1]).cdf(scores)
lab = pvalue <= alpha
return (lab, pvalue)
# pyre-fixme[14]: `detector` overrides method defined in `Detector` inconsistently.
def detector(self, support_fraction=0.9) -> Sequence[TimeSeriesChangePoint]:
"""Run detection algorithm.
Args:
support_fraction: Optional; A float representing the support_fraction for MinCovDet class from scikit-learn. Default is 0.9.
See https://scikit-learn.org/stable/modules/generated/sklearn.covariance.MinCovDet.html for more details.
Returns:
A list of TimeSeriesChangePoint representing the anormal dates.
"""
if self._ratiodf is None:
self._preprocess()
anomaly = []
pvalues = []
for w in range(7):
obs = self._ratiodf[self._ratiodf["weekday"] == w][
"hourly_ratio"
].values.reshape(-1, 24)
dates = np.unique(
self._ratiodf[self._ratiodf["weekday"] == w]["date"].values
)
# we omit the last dimension due to linearity constrant
median = np.median(obs, axis=0)
median = (median / np.sum(median) * 24)[:-1]
cov = MinCovDet(
assume_centered=True, support_fraction=support_fraction
).fit(obs[:, :-1] - median)
lab, p = self._mahalanobis_test(obs[:, :-1], median, cov.covariance_)
anomaly.extend(list(dates[lab]))
pvalues.extend(p[lab])
anomaly = [
TimeSeriesChangePoint(anomaly[i], anomaly[i], 1.0 - pvalues[i])
for i in range(len(anomaly))
]
self.anomaly_dates = anomaly
return anomaly
def plot(self, weekday: int = 0) -> None:
"""plot the detection results.
Args:
weekday: Optional; An integer representing the weekday label, which should be in [0,6]. Default is 0.
Returns:
None.
"""
if self.anomaly_dates is None:
msg = "Please run detector method first."
logging.error(msg)
raise ValueError(msg)
anomaly_dates = [t.start_time for t in self.anomaly_dates]
anomaly_dates = set(anomaly_dates)
obs = self._ratiodf[self._ratiodf["weekday"] == weekday][
"hourly_ratio"
].values.reshape(-1, 24)
dates = np.unique(
self._ratiodf[self._ratiodf["weekday"] == weekday]["date"].values
)
labs = [(t in anomaly_dates) for t in dates]
logging.info("# of anomaly dates: {}".format(np.sum(labs)))
for i in range(len(obs)):
if not labs[i]:
plt.plot(obs[i], "--", color="silver", alpha=0.5)
else:
plt.plot(obs[i], "--", color="navy", label=str(dates[i]))
plt.title("Hourly Ratio Plot for Weeday {}".format(weekday))
plt.legend()
plt.show()
return | 0.959059 | 0.680288 |
import pytest
from align.circuit.core import NTerminalDevice, Circuit, SubCircuit, Model
def test_n_terminal_device():
inst = NTerminalDevice('X1')
assert inst.name == 'X1'
with pytest.raises(AssertionError):
inst = NTerminalDevice('X2', 'net1')
@pytest.fixture
def TwoTerminalDevice():
return type('TwoTerminalDevice', (NTerminalDevice,), {'_pins': ['a', 'b']})
@pytest.fixture
def ThreeTerminalDevice():
return type('ThreeTerminalDevice', (NTerminalDevice,), {'_pins': ['a', 'b', 'c'], '_parameters': {'myparameter': 1}})
def test_2_terminal_device(TwoTerminalDevice):
with pytest.raises(AssertionError):
inst = TwoTerminalDevice('X1')
with pytest.raises(AssertionError):
inst = TwoTerminalDevice('X1', 'net1')
with pytest.raises(AssertionError):
inst = TwoTerminalDevice('X1', 'net1', 'net2')
inst = TwoTerminalDevice('X1', 'net1', 'net2', nonexistentparameter=2)
inst = TwoTerminalDevice('X1', 'net1', 'net2')
assert inst.name == 'X1'
assert type(inst).__name__ == 'TwoTerminalDevice'
assert inst.pins == {'a': 'net1', 'b': 'net2'}
assert inst.parameters == {}
def test_3_terminal_device_w_parameter(ThreeTerminalDevice):
with pytest.raises(AssertionError):
inst = ThreeTerminalDevice('X1')
with pytest.raises(AssertionError):
inst = ThreeTerminalDevice('X1', 'net1')
with pytest.raises(AssertionError):
inst = ThreeTerminalDevice('X1', 'net1', 'net2')
with pytest.raises(AssertionError):
inst = ThreeTerminalDevice('X1', 'net1', 'net2', 'net3', garbageparameter=2)
inst = ThreeTerminalDevice('X1', 'net1', 'net2', 'net3')
assert inst.name == 'X1'
assert type(inst).__name__ == 'ThreeTerminalDevice'
assert inst.pins == {'a': 'net1', 'b': 'net2', 'c': 'net3'}
assert inst.parameters == {'myparameter': 1}
inst = ThreeTerminalDevice('X1', 'net1', 'net2', 'net3', myparameter = 2)
assert inst.parameters == {'myparameter': 2}
def test_subckt_class(TwoTerminalDevice):
subckt = SubCircuit('test_subckt', 'pin1', 'pin2', param1=1, param2=1e-3, param3=1e-16, param4="hello")
X1 = TwoTerminalDevice('X1', 'net1', 'net2')
X2 = TwoTerminalDevice('X2', 'net2', 'net3')
subckt.add_element(X1)
subckt.add_element(X2)
assert subckt.elements == [X1, X2]
assert subckt.element('X1') == X1
assert subckt.element('X2') == X2
assert subckt.nets == ['net1', 'net2', 'net3']
with pytest.raises(AssertionError):
inst = subckt('X1')
with pytest.raises(AssertionError):
inst = subckt('X1', 'net10')
inst = subckt('X1', 'net10', 'net12')
assert inst.name == 'X1'
assert type(inst).__name__ == 'test_subckt'
assert inst.pins == {'pin1': 'net10', 'pin2': 'net12'}
assert inst.parameters == {'param1': 1, 'param2': 1e-3, 'param3': 1e-16, 'param4': 'hello'}
assert inst.elements == [X1, X2]
assert inst.element('X1') == X1
assert inst.element('X2') == X2
assert inst.nets == ['net1', 'net2', 'net3']
with pytest.raises(AssertionError):
inst.add_element(TwoTerminalDevice('X3', 'net1', 'net3'))
def test_circuit(TwoTerminalDevice, ThreeTerminalDevice):
ckt = Circuit()
X1 = ckt.add_element(TwoTerminalDevice('X1', 'net1', 'net2'))
X2 = ckt.add_element(ThreeTerminalDevice('X2', 'net1', 'net2', 'net3'))
assert ckt.elements == [X1, X2]
assert ckt.element('X1') == X1
assert ckt.element('X2') == X2
assert ckt.nets == ['net1', 'net2', 'net3']
# Advanced graphx functionality test
nodes = ['X1', 'X2',
'net1', 'net2', 'net3']
assert all(x in ckt.nodes for x in nodes)
assert all(x in nodes for x in ckt.nodes)
edges = [# X1, net, pin
('X1', 'net1', {'a'}), ('X1', 'net2', {'b'}),
('net1', 'X1', {'a'}), ('net2', 'X1', {'b'}),
# X2, net, pin
('X2', 'net1', {'a'}), ('X2', 'net2', {'b'}), ('X2', 'net3', {'c'}),
('net1', 'X2', {'a'}), ('net2', 'X2', {'b'}), ('net3', 'X2', {'c'})]
assert all(x in ckt.edges.data('pin') for x in edges), ckt.edges
assert all(x in edges for x in ckt.edges.data('pin')), ckt.edges
def test_circuit_shared_net(TwoTerminalDevice, ThreeTerminalDevice):
ckt = Circuit()
X1 = ckt.add_element(TwoTerminalDevice('X1', 'net1', 'net2'))
X2 = ckt.add_element(ThreeTerminalDevice('X2', 'net1', 'net1', 'net2'))
assert ckt.elements == [X1, X2]
assert ckt.element('X1') == X1
assert ckt.element('X2') == X2
assert ckt.nets == ['net1', 'net2']
# Advanced graphx functionality test
nodes = ['X1', 'X2',
'net1', 'net2']
assert all(x in ckt.nodes for x in nodes)
assert all(x in nodes for x in ckt.nodes)
edges = [# X1, net, pin
('X1', 'net1', {'a'}), ('X1', 'net2', {'b'}),
('net1', 'X1', {'a'}), ('net2', 'X1', {'b'}),
# X2, net, pin
('X2', 'net1', {'a', 'b'}), ('X2', 'net2', {'c'}),
('net1', 'X2', {'a', 'b'}), ('net2', 'X2', {'c'})]
assert all(x in ckt.edges.data('pin') for x in edges), ckt.edges
assert all(x in edges for x in ckt.edges.data('pin')), ckt.edges
def test_model(ThreeTerminalDevice):
CustomDevice = Model('CustomDevice', ThreeTerminalDevice, newparam=1, newparam2='hello')
with pytest.raises(AssertionError):
inst = CustomDevice('X1', 'net01', 'net02', 'net03', garbage=2)
inst = CustomDevice('X1', 'net01', 'net02', 'net03', myparameter=2, newparam=2)
assert type(inst).__name__ == 'CustomDevice'
assert inst.pins == {'a': 'net01', 'b': 'net02', 'c': 'net03'}
assert inst.parameters == {'myparameter': 2, 'newparam': 2, 'newparam2': 'hello'}
@pytest.fixture
def simple_netlist(TwoTerminalDevice, ThreeTerminalDevice):
ckt = Circuit()
CustomDevice = Model('CustomDevice', ThreeTerminalDevice, newparam=1, newparam2='hello')
ckt.add_element(CustomDevice('X1', 'net1', 'in1', 'net01'))
ckt.add_element(CustomDevice('X2', 'net2', 'in2', 'net02'))
ckt.add_element(CustomDevice('X3', 'net3', 'net1', 'net1'))
ckt.add_element(CustomDevice('X4', 'net3', 'net1', 'net2'))
ckt.add_element(TwoTerminalDevice('X5', 'net01', 'net00'))
ckt.add_element(TwoTerminalDevice('X6', 'net02', 'net00'))
ckt.add_element(TwoTerminalDevice('X7', 'net3', 'net03'))
return ckt
@pytest.fixture
def matching_subckt(ThreeTerminalDevice):
subckt = SubCircuit('test_subckt', 'pin1', 'pin2', 'pin3', myparameter=1)
subckt.add_element(ThreeTerminalDevice('X1', 'pin3', 'pin1', 'pin1', myparameter=1))
subckt.add_element(ThreeTerminalDevice('X2', 'pin3', 'pin1', 'pin2', myparameter='myparameter'))
return subckt
def test_find_subgraph_matches(simple_netlist, matching_subckt, ThreeTerminalDevice, TwoTerminalDevice):
ckt, subckt = simple_netlist, matching_subckt
# Validate true match
assert len(ckt.find_subgraph_matches(subckt.circuit)) == 1
assert ckt.find_subgraph_matches(subckt.circuit)[0] == {'X3': 'X1', 'net3': 'pin3', 'net1': 'pin1', 'X4': 'X2', 'net2': 'pin2'}
# Validate false match
subckt2 = SubCircuit('test_subckt2', 'pin1', 'pin2', 'pin3', 'pin4', 'pin5')
subckt2.add_element(ThreeTerminalDevice('X1', 'pin1', 'pin3', 'pin4'))
subckt2.add_element(ThreeTerminalDevice('X2', 'pin2', 'pin3', 'pin5'))
assert len(ckt.find_subgraph_matches(subckt2.circuit)) == 0
# Validate filtering of redundant subgraphs (There are 4 matches. Only 1 should be returned)
subckt3 = SubCircuit('test_subckt3', 'pin1', 'pin2', 'pin3', 'pin4')
subckt3.add_element(TwoTerminalDevice('X1', 'pin1', 'pin2'))
subckt3.add_element(TwoTerminalDevice('X2', 'pin3', 'pin4'))
assert len(ckt.find_subgraph_matches(subckt3.circuit)) == 1
def test_replace_matching_subgraphs(simple_netlist, matching_subckt):
ckt, subckt = simple_netlist, matching_subckt
matches = [{'X3': 'X1', 'net3': 'pin3', 'net1': 'pin1', 'X4': 'X2', 'net2': 'pin2'}]
ckt.replace_matching_subckts(subckt)
assert all(x not in ckt.nodes for x in matches[0].keys() if x.startswith('X'))
assert 'X_test_subckt_0' in ckt.nodes
new_edges = [('X_test_subckt_0', 'net3', {'pin3'}), ('X_test_subckt_0', 'net1', {'pin1'}), ('X_test_subckt_0', 'net2', {'pin2'})]
assert all(x in ckt.edges.data('pin') for x in new_edges)
@pytest.fixture
def heirarchical_ckt(matching_subckt, ThreeTerminalDevice):
ckt = Circuit()
subckt = SubCircuit('parent_subckt', 'pin1', 'pin2')
subckt.add_element(matching_subckt('X1', 'pin1', 'pin2', 'net1', myparameter=2))
subckt.add_element(ThreeTerminalDevice('X2', 'net1', 'pin1', 'pin2', myparameter=1))
ckt.add_element(subckt('XSUB1', 'net1', 'net2'))
ckt.add_element(matching_subckt('XSUB2', 'net1', 'net2', 'net3', myparameter=3))
return ckt
def test_flatten(heirarchical_ckt):
ckt = heirarchical_ckt
ckt.flatten()
myparametermap = {
'XSUB1_X2': 1,
'XSUB1_X1_X1': 1,
'XSUB1_X1_X2': 2,
'XSUB2_X1': 1,
'XSUB2_X2': 3
}
assert {x.name for x in ckt.elements} == set(myparametermap.keys())
assert set(ckt.nets) == {'net1', 'net2', 'net3', 'XSUB1_net1'}
assert all(ckt.element(elem).parameters['myparameter'] == param for elem, param in myparametermap.items()), [ckt.element(elem).parameters['myparameter'] for elem in myparametermap.keys()]
def test_flatten_depth1(heirarchical_ckt):
ckt = heirarchical_ckt
ckt.flatten(1)
myparametermap = {
'XSUB1_X2': 1,
'XSUB1_X1': 2,
'XSUB2_X1': 1,
'XSUB2_X2': 3
}
assert {x.name for x in ckt.elements} == set(myparametermap.keys())
assert set(ckt.nets) == {'net1', 'net2', 'net3', 'XSUB1_net1'}
assert all(ckt.element(elem).parameters['myparameter'] == param for elem, param in myparametermap.items()), [ckt.element(elem).parameters['myparameter'] for elem in myparametermap.keys()] | tests/circuit/test_core.py | import pytest
from align.circuit.core import NTerminalDevice, Circuit, SubCircuit, Model
def test_n_terminal_device():
inst = NTerminalDevice('X1')
assert inst.name == 'X1'
with pytest.raises(AssertionError):
inst = NTerminalDevice('X2', 'net1')
@pytest.fixture
def TwoTerminalDevice():
return type('TwoTerminalDevice', (NTerminalDevice,), {'_pins': ['a', 'b']})
@pytest.fixture
def ThreeTerminalDevice():
return type('ThreeTerminalDevice', (NTerminalDevice,), {'_pins': ['a', 'b', 'c'], '_parameters': {'myparameter': 1}})
def test_2_terminal_device(TwoTerminalDevice):
with pytest.raises(AssertionError):
inst = TwoTerminalDevice('X1')
with pytest.raises(AssertionError):
inst = TwoTerminalDevice('X1', 'net1')
with pytest.raises(AssertionError):
inst = TwoTerminalDevice('X1', 'net1', 'net2')
inst = TwoTerminalDevice('X1', 'net1', 'net2', nonexistentparameter=2)
inst = TwoTerminalDevice('X1', 'net1', 'net2')
assert inst.name == 'X1'
assert type(inst).__name__ == 'TwoTerminalDevice'
assert inst.pins == {'a': 'net1', 'b': 'net2'}
assert inst.parameters == {}
def test_3_terminal_device_w_parameter(ThreeTerminalDevice):
with pytest.raises(AssertionError):
inst = ThreeTerminalDevice('X1')
with pytest.raises(AssertionError):
inst = ThreeTerminalDevice('X1', 'net1')
with pytest.raises(AssertionError):
inst = ThreeTerminalDevice('X1', 'net1', 'net2')
with pytest.raises(AssertionError):
inst = ThreeTerminalDevice('X1', 'net1', 'net2', 'net3', garbageparameter=2)
inst = ThreeTerminalDevice('X1', 'net1', 'net2', 'net3')
assert inst.name == 'X1'
assert type(inst).__name__ == 'ThreeTerminalDevice'
assert inst.pins == {'a': 'net1', 'b': 'net2', 'c': 'net3'}
assert inst.parameters == {'myparameter': 1}
inst = ThreeTerminalDevice('X1', 'net1', 'net2', 'net3', myparameter = 2)
assert inst.parameters == {'myparameter': 2}
def test_subckt_class(TwoTerminalDevice):
subckt = SubCircuit('test_subckt', 'pin1', 'pin2', param1=1, param2=1e-3, param3=1e-16, param4="hello")
X1 = TwoTerminalDevice('X1', 'net1', 'net2')
X2 = TwoTerminalDevice('X2', 'net2', 'net3')
subckt.add_element(X1)
subckt.add_element(X2)
assert subckt.elements == [X1, X2]
assert subckt.element('X1') == X1
assert subckt.element('X2') == X2
assert subckt.nets == ['net1', 'net2', 'net3']
with pytest.raises(AssertionError):
inst = subckt('X1')
with pytest.raises(AssertionError):
inst = subckt('X1', 'net10')
inst = subckt('X1', 'net10', 'net12')
assert inst.name == 'X1'
assert type(inst).__name__ == 'test_subckt'
assert inst.pins == {'pin1': 'net10', 'pin2': 'net12'}
assert inst.parameters == {'param1': 1, 'param2': 1e-3, 'param3': 1e-16, 'param4': 'hello'}
assert inst.elements == [X1, X2]
assert inst.element('X1') == X1
assert inst.element('X2') == X2
assert inst.nets == ['net1', 'net2', 'net3']
with pytest.raises(AssertionError):
inst.add_element(TwoTerminalDevice('X3', 'net1', 'net3'))
def test_circuit(TwoTerminalDevice, ThreeTerminalDevice):
ckt = Circuit()
X1 = ckt.add_element(TwoTerminalDevice('X1', 'net1', 'net2'))
X2 = ckt.add_element(ThreeTerminalDevice('X2', 'net1', 'net2', 'net3'))
assert ckt.elements == [X1, X2]
assert ckt.element('X1') == X1
assert ckt.element('X2') == X2
assert ckt.nets == ['net1', 'net2', 'net3']
# Advanced graphx functionality test
nodes = ['X1', 'X2',
'net1', 'net2', 'net3']
assert all(x in ckt.nodes for x in nodes)
assert all(x in nodes for x in ckt.nodes)
edges = [# X1, net, pin
('X1', 'net1', {'a'}), ('X1', 'net2', {'b'}),
('net1', 'X1', {'a'}), ('net2', 'X1', {'b'}),
# X2, net, pin
('X2', 'net1', {'a'}), ('X2', 'net2', {'b'}), ('X2', 'net3', {'c'}),
('net1', 'X2', {'a'}), ('net2', 'X2', {'b'}), ('net3', 'X2', {'c'})]
assert all(x in ckt.edges.data('pin') for x in edges), ckt.edges
assert all(x in edges for x in ckt.edges.data('pin')), ckt.edges
def test_circuit_shared_net(TwoTerminalDevice, ThreeTerminalDevice):
ckt = Circuit()
X1 = ckt.add_element(TwoTerminalDevice('X1', 'net1', 'net2'))
X2 = ckt.add_element(ThreeTerminalDevice('X2', 'net1', 'net1', 'net2'))
assert ckt.elements == [X1, X2]
assert ckt.element('X1') == X1
assert ckt.element('X2') == X2
assert ckt.nets == ['net1', 'net2']
# Advanced graphx functionality test
nodes = ['X1', 'X2',
'net1', 'net2']
assert all(x in ckt.nodes for x in nodes)
assert all(x in nodes for x in ckt.nodes)
edges = [# X1, net, pin
('X1', 'net1', {'a'}), ('X1', 'net2', {'b'}),
('net1', 'X1', {'a'}), ('net2', 'X1', {'b'}),
# X2, net, pin
('X2', 'net1', {'a', 'b'}), ('X2', 'net2', {'c'}),
('net1', 'X2', {'a', 'b'}), ('net2', 'X2', {'c'})]
assert all(x in ckt.edges.data('pin') for x in edges), ckt.edges
assert all(x in edges for x in ckt.edges.data('pin')), ckt.edges
def test_model(ThreeTerminalDevice):
CustomDevice = Model('CustomDevice', ThreeTerminalDevice, newparam=1, newparam2='hello')
with pytest.raises(AssertionError):
inst = CustomDevice('X1', 'net01', 'net02', 'net03', garbage=2)
inst = CustomDevice('X1', 'net01', 'net02', 'net03', myparameter=2, newparam=2)
assert type(inst).__name__ == 'CustomDevice'
assert inst.pins == {'a': 'net01', 'b': 'net02', 'c': 'net03'}
assert inst.parameters == {'myparameter': 2, 'newparam': 2, 'newparam2': 'hello'}
@pytest.fixture
def simple_netlist(TwoTerminalDevice, ThreeTerminalDevice):
ckt = Circuit()
CustomDevice = Model('CustomDevice', ThreeTerminalDevice, newparam=1, newparam2='hello')
ckt.add_element(CustomDevice('X1', 'net1', 'in1', 'net01'))
ckt.add_element(CustomDevice('X2', 'net2', 'in2', 'net02'))
ckt.add_element(CustomDevice('X3', 'net3', 'net1', 'net1'))
ckt.add_element(CustomDevice('X4', 'net3', 'net1', 'net2'))
ckt.add_element(TwoTerminalDevice('X5', 'net01', 'net00'))
ckt.add_element(TwoTerminalDevice('X6', 'net02', 'net00'))
ckt.add_element(TwoTerminalDevice('X7', 'net3', 'net03'))
return ckt
@pytest.fixture
def matching_subckt(ThreeTerminalDevice):
subckt = SubCircuit('test_subckt', 'pin1', 'pin2', 'pin3', myparameter=1)
subckt.add_element(ThreeTerminalDevice('X1', 'pin3', 'pin1', 'pin1', myparameter=1))
subckt.add_element(ThreeTerminalDevice('X2', 'pin3', 'pin1', 'pin2', myparameter='myparameter'))
return subckt
def test_find_subgraph_matches(simple_netlist, matching_subckt, ThreeTerminalDevice, TwoTerminalDevice):
ckt, subckt = simple_netlist, matching_subckt
# Validate true match
assert len(ckt.find_subgraph_matches(subckt.circuit)) == 1
assert ckt.find_subgraph_matches(subckt.circuit)[0] == {'X3': 'X1', 'net3': 'pin3', 'net1': 'pin1', 'X4': 'X2', 'net2': 'pin2'}
# Validate false match
subckt2 = SubCircuit('test_subckt2', 'pin1', 'pin2', 'pin3', 'pin4', 'pin5')
subckt2.add_element(ThreeTerminalDevice('X1', 'pin1', 'pin3', 'pin4'))
subckt2.add_element(ThreeTerminalDevice('X2', 'pin2', 'pin3', 'pin5'))
assert len(ckt.find_subgraph_matches(subckt2.circuit)) == 0
# Validate filtering of redundant subgraphs (There are 4 matches. Only 1 should be returned)
subckt3 = SubCircuit('test_subckt3', 'pin1', 'pin2', 'pin3', 'pin4')
subckt3.add_element(TwoTerminalDevice('X1', 'pin1', 'pin2'))
subckt3.add_element(TwoTerminalDevice('X2', 'pin3', 'pin4'))
assert len(ckt.find_subgraph_matches(subckt3.circuit)) == 1
def test_replace_matching_subgraphs(simple_netlist, matching_subckt):
ckt, subckt = simple_netlist, matching_subckt
matches = [{'X3': 'X1', 'net3': 'pin3', 'net1': 'pin1', 'X4': 'X2', 'net2': 'pin2'}]
ckt.replace_matching_subckts(subckt)
assert all(x not in ckt.nodes for x in matches[0].keys() if x.startswith('X'))
assert 'X_test_subckt_0' in ckt.nodes
new_edges = [('X_test_subckt_0', 'net3', {'pin3'}), ('X_test_subckt_0', 'net1', {'pin1'}), ('X_test_subckt_0', 'net2', {'pin2'})]
assert all(x in ckt.edges.data('pin') for x in new_edges)
@pytest.fixture
def heirarchical_ckt(matching_subckt, ThreeTerminalDevice):
ckt = Circuit()
subckt = SubCircuit('parent_subckt', 'pin1', 'pin2')
subckt.add_element(matching_subckt('X1', 'pin1', 'pin2', 'net1', myparameter=2))
subckt.add_element(ThreeTerminalDevice('X2', 'net1', 'pin1', 'pin2', myparameter=1))
ckt.add_element(subckt('XSUB1', 'net1', 'net2'))
ckt.add_element(matching_subckt('XSUB2', 'net1', 'net2', 'net3', myparameter=3))
return ckt
def test_flatten(heirarchical_ckt):
ckt = heirarchical_ckt
ckt.flatten()
myparametermap = {
'XSUB1_X2': 1,
'XSUB1_X1_X1': 1,
'XSUB1_X1_X2': 2,
'XSUB2_X1': 1,
'XSUB2_X2': 3
}
assert {x.name for x in ckt.elements} == set(myparametermap.keys())
assert set(ckt.nets) == {'net1', 'net2', 'net3', 'XSUB1_net1'}
assert all(ckt.element(elem).parameters['myparameter'] == param for elem, param in myparametermap.items()), [ckt.element(elem).parameters['myparameter'] for elem in myparametermap.keys()]
def test_flatten_depth1(heirarchical_ckt):
ckt = heirarchical_ckt
ckt.flatten(1)
myparametermap = {
'XSUB1_X2': 1,
'XSUB1_X1': 2,
'XSUB2_X1': 1,
'XSUB2_X2': 3
}
assert {x.name for x in ckt.elements} == set(myparametermap.keys())
assert set(ckt.nets) == {'net1', 'net2', 'net3', 'XSUB1_net1'}
assert all(ckt.element(elem).parameters['myparameter'] == param for elem, param in myparametermap.items()), [ckt.element(elem).parameters['myparameter'] for elem in myparametermap.keys()] | 0.642096 | 0.667053 |
import json
from kafka import KafkaConsumer, KafkaProducer
from kafka.structs import TopicPartition
from tethys.core.transports.connectors.connector_base import (
ConnectorBase,
ConnectionBase,
)
class KafkaConnection(ConnectionBase):
def __init__(
self,
channel_id: str,
group_id: str,
partition: int,
bootstrap_servers: list,
producer_params: dict,
consumer_params: dict,
):
self.topic = channel_id
self.group_id = group_id or channel_id
self.partition = partition or 0
self.bootstrap_servers = bootstrap_servers or []
self.producer_params = producer_params or {}
self.consumer_params = consumer_params or {}
self._consumer = None
self._producer = None
def _get_consumer(self):
enable_auto_commit = self.consumer_params.pop("enable_auto_commit", False)
auto_offset_reset = self.consumer_params.pop("auto_offset_reset", "earliest")
consumer_timeout_ms = self.consumer_params.pop("consumer_timeout_ms", 10 * 1000)
max_poll_records = self.consumer_params.pop("max_poll_records", 1)
value_deserializer = self.consumer_params.pop(
"value_deserializer", lambda x: json.loads(x.decode("utf-8"))
)
consumer = KafkaConsumer(
group_id=self.group_id,
bootstrap_servers=self.bootstrap_servers,
consumer_timeout_ms=consumer_timeout_ms,
enable_auto_commit=enable_auto_commit,
auto_offset_reset=auto_offset_reset,
max_poll_records=max_poll_records,
value_deserializer=value_deserializer,
**self.consumer_params
)
consumer.assign([TopicPartition(self.topic, self.partition)])
return consumer
def _get_producer(self):
value_serializer = self.producer_params.pop(
"value_serializer", lambda x: json.dumps(x).encode()
)
producer = KafkaProducer(
bootstrap_servers=self.bootstrap_servers,
value_serializer=value_serializer,
**self.producer_params
)
return producer
def recv_iter(self, **kwargs):
for message in self._consumer:
yield "", message
def send(self, data_packet, **kwargs):
self._producer.send(self.topic, value=data_packet, **kwargs).get()
def ack(self, message_key, **kwargs):
self._consumer.commit()
def open(self, **kwargs) -> "KafkaConnection":
self._consumer = self._get_consumer()
self._producer = self._get_producer()
return self
def close(self, **kwargs) -> "KafkaConnection":
self._consumer = None
self._producer = None
return self
class KafkaConnector(ConnectorBase):
def __init__(
self,
partition: int = 0,
bootstrap_servers: list = None,
producer_params: dict = None,
consumer_params: dict = None,
):
self.partition = partition
self.bootstrap_servers = bootstrap_servers or []
self.producer_params = producer_params or {}
self.consumer_params = consumer_params or {}
def connect(
self,
channel_id: str,
group_id: str = None,
partition: int = 0,
bootstrap_servers: list = None,
producer_params: dict = None,
consumer_params: dict = None,
**kwargs
) -> "KafkaConnection":
topic = channel_id
group_id = group_id or channel_id
partition = partition or 0
bootstrap_servers = bootstrap_servers or []
producer_params = producer_params or {}
consumer_params = consumer_params or {}
return KafkaConnection(
topic,
group_id,
partition,
bootstrap_servers,
producer_params,
consumer_params,
).open() | tethys/core/transports/connectors/connector_kafka.py |
import json
from kafka import KafkaConsumer, KafkaProducer
from kafka.structs import TopicPartition
from tethys.core.transports.connectors.connector_base import (
ConnectorBase,
ConnectionBase,
)
class KafkaConnection(ConnectionBase):
def __init__(
self,
channel_id: str,
group_id: str,
partition: int,
bootstrap_servers: list,
producer_params: dict,
consumer_params: dict,
):
self.topic = channel_id
self.group_id = group_id or channel_id
self.partition = partition or 0
self.bootstrap_servers = bootstrap_servers or []
self.producer_params = producer_params or {}
self.consumer_params = consumer_params or {}
self._consumer = None
self._producer = None
def _get_consumer(self):
enable_auto_commit = self.consumer_params.pop("enable_auto_commit", False)
auto_offset_reset = self.consumer_params.pop("auto_offset_reset", "earliest")
consumer_timeout_ms = self.consumer_params.pop("consumer_timeout_ms", 10 * 1000)
max_poll_records = self.consumer_params.pop("max_poll_records", 1)
value_deserializer = self.consumer_params.pop(
"value_deserializer", lambda x: json.loads(x.decode("utf-8"))
)
consumer = KafkaConsumer(
group_id=self.group_id,
bootstrap_servers=self.bootstrap_servers,
consumer_timeout_ms=consumer_timeout_ms,
enable_auto_commit=enable_auto_commit,
auto_offset_reset=auto_offset_reset,
max_poll_records=max_poll_records,
value_deserializer=value_deserializer,
**self.consumer_params
)
consumer.assign([TopicPartition(self.topic, self.partition)])
return consumer
def _get_producer(self):
value_serializer = self.producer_params.pop(
"value_serializer", lambda x: json.dumps(x).encode()
)
producer = KafkaProducer(
bootstrap_servers=self.bootstrap_servers,
value_serializer=value_serializer,
**self.producer_params
)
return producer
def recv_iter(self, **kwargs):
for message in self._consumer:
yield "", message
def send(self, data_packet, **kwargs):
self._producer.send(self.topic, value=data_packet, **kwargs).get()
def ack(self, message_key, **kwargs):
self._consumer.commit()
def open(self, **kwargs) -> "KafkaConnection":
self._consumer = self._get_consumer()
self._producer = self._get_producer()
return self
def close(self, **kwargs) -> "KafkaConnection":
self._consumer = None
self._producer = None
return self
class KafkaConnector(ConnectorBase):
def __init__(
self,
partition: int = 0,
bootstrap_servers: list = None,
producer_params: dict = None,
consumer_params: dict = None,
):
self.partition = partition
self.bootstrap_servers = bootstrap_servers or []
self.producer_params = producer_params or {}
self.consumer_params = consumer_params or {}
def connect(
self,
channel_id: str,
group_id: str = None,
partition: int = 0,
bootstrap_servers: list = None,
producer_params: dict = None,
consumer_params: dict = None,
**kwargs
) -> "KafkaConnection":
topic = channel_id
group_id = group_id or channel_id
partition = partition or 0
bootstrap_servers = bootstrap_servers or []
producer_params = producer_params or {}
consumer_params = consumer_params or {}
return KafkaConnection(
topic,
group_id,
partition,
bootstrap_servers,
producer_params,
consumer_params,
).open() | 0.651466 | 0.076857 |
import pygame as pyg
from pygame.locals import *
import random, time
from spritesheet.Sprites import *
clock = pyg.time.Clock()
FPS = 60
pyg.init()
screen = pyg.display.set_mode([800, 600], RESIZABLE)
mouse = pyg.transform.scale(pyg.image.load('./res/mouse/mouse_0.png'), (16, 16))
seletor = pyg.transform.scale(pyg.image.load('./res/mouse/mouse_1.png'), (16, 16))
with open('./res/mapas/mapa1.txt') as m1:
game_map = m1.readlines()
def po():
r = random.randint(0, 300)
# pedra
if r < 20:
return '3'
# arvore
elif r < 40:
return '4'
# grama
else:
return '1'
def ra():
return po()
def generate(gm):
mapa = []
for num in gm:
if num != '1\\n':
mapa.append(f'22222222222222222222222222222222222222222222222222222222222222222')
else:
mapa.append(f'222222222222222222222222222222222222222222222222222222222222222222\\n')
break
for num in gm:
if num != '1\\n':
mapa.append(f'2{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}2')
else:
mapa.append(f'2{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}2\\n')
for num in gm:
if num != '1\\n':
mapa.append(f'22222222222222222222222222222222222222222222222222222222222222222')
else:
mapa.append(f'2222222222222222222222222222222222222222222222222222222222222222222\\n')
break
with open('./res/mapas/mapa1.txt', 'w') as m1:
for m in mapa:
m1.writelines(m+'\n')
return mapa
newm = generate(game_map)
auto = False
while True:
clock.tick(FPS)
for event in pyg.event.get():
if event.type == QUIT:
pyg.quit()
exit()
if event.type == KEYDOWN:
if event.key == K_e:
if not auto:
auto = True
else:
auto = False
screen.fill((0, 0, 10))
if auto:
newm = generate(game_map)
time.sleep(.5)
tile_rects = []
y = 0
for layer in newm:
x = 0
for tile in layer:
# grama
if tile == '1':
pyg.draw.rect(screen, (0, 180, 0), (x * 16, y * 16, 16, 16))
# agua
if tile == '2':
pyg.draw.rect(screen, (0, 0, 180), (x * 16, y * 16, 16, 16))
# pedra
if tile == '3':
pyg.draw.rect(screen, (180, 180, 180), (x * 16, y * 16, 16, 16))
# arvore
if tile == '4':
pyg.draw.rect(screen, (107, 29, 5), (x * 16, y * 16, 16, 16))
# areia
if tile == '5':
pyg.draw.rect(screen, (230, 201, 14), (x * 16, y * 16, 16, 16))
# buraco
if tile == '6':
pyg.draw.rect(screen, (31, 8, 1), (x * 16, y * 16, 16, 16))
x += 1
y += 1
screen.blit(mouse, (100, 100))
pyg.display.update() | testsmap.py | import pygame as pyg
from pygame.locals import *
import random, time
from spritesheet.Sprites import *
clock = pyg.time.Clock()
FPS = 60
pyg.init()
screen = pyg.display.set_mode([800, 600], RESIZABLE)
mouse = pyg.transform.scale(pyg.image.load('./res/mouse/mouse_0.png'), (16, 16))
seletor = pyg.transform.scale(pyg.image.load('./res/mouse/mouse_1.png'), (16, 16))
with open('./res/mapas/mapa1.txt') as m1:
game_map = m1.readlines()
def po():
r = random.randint(0, 300)
# pedra
if r < 20:
return '3'
# arvore
elif r < 40:
return '4'
# grama
else:
return '1'
def ra():
return po()
def generate(gm):
mapa = []
for num in gm:
if num != '1\\n':
mapa.append(f'22222222222222222222222222222222222222222222222222222222222222222')
else:
mapa.append(f'222222222222222222222222222222222222222222222222222222222222222222\\n')
break
for num in gm:
if num != '1\\n':
mapa.append(f'2{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}2')
else:
mapa.append(f'2{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}'
f'{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}{ra()}2\\n')
for num in gm:
if num != '1\\n':
mapa.append(f'22222222222222222222222222222222222222222222222222222222222222222')
else:
mapa.append(f'2222222222222222222222222222222222222222222222222222222222222222222\\n')
break
with open('./res/mapas/mapa1.txt', 'w') as m1:
for m in mapa:
m1.writelines(m+'\n')
return mapa
newm = generate(game_map)
auto = False
while True:
clock.tick(FPS)
for event in pyg.event.get():
if event.type == QUIT:
pyg.quit()
exit()
if event.type == KEYDOWN:
if event.key == K_e:
if not auto:
auto = True
else:
auto = False
screen.fill((0, 0, 10))
if auto:
newm = generate(game_map)
time.sleep(.5)
tile_rects = []
y = 0
for layer in newm:
x = 0
for tile in layer:
# grama
if tile == '1':
pyg.draw.rect(screen, (0, 180, 0), (x * 16, y * 16, 16, 16))
# agua
if tile == '2':
pyg.draw.rect(screen, (0, 0, 180), (x * 16, y * 16, 16, 16))
# pedra
if tile == '3':
pyg.draw.rect(screen, (180, 180, 180), (x * 16, y * 16, 16, 16))
# arvore
if tile == '4':
pyg.draw.rect(screen, (107, 29, 5), (x * 16, y * 16, 16, 16))
# areia
if tile == '5':
pyg.draw.rect(screen, (230, 201, 14), (x * 16, y * 16, 16, 16))
# buraco
if tile == '6':
pyg.draw.rect(screen, (31, 8, 1), (x * 16, y * 16, 16, 16))
x += 1
y += 1
screen.blit(mouse, (100, 100))
pyg.display.update() | 0.088224 | 0.167253 |
from __future__ import division, print_function, unicode_literals
import distro
import itertools
import logging
from rpaths import Path
import subprocess
import time
from reprozip.common import Package
from reprozip.utils import iteritems, listvalues
logger = logging.getLogger('reprozip')
magic_dirs = ('/dev', '/proc', '/sys')
system_dirs = ('/bin', '/etc', '/lib', '/sbin', '/usr', '/var', '/run')
class PkgManager(object):
"""Base class for package identifiers.
Subclasses should provide either `search_for_files` or `search_for_file`
which actually identifies the package for a file.
"""
def __init__(self):
# Files that were not part of a package
self.unknown_files = set()
# All the packages identified, with their `files` attribute set
self.packages = {}
def filter_files(self, files):
seen_files = set()
for f in files:
if f.path not in seen_files:
if not self._filter(f):
yield f
seen_files.add(f.path)
def search_for_files(self, files):
nb_pkg_files = 0
for f in self.filter_files(files):
pkgnames = self._get_packages_for_file(f.path)
# Stores the file
if not pkgnames:
self.unknown_files.add(f)
else:
pkgs = []
for pkgname in pkgnames:
if pkgname in self.packages:
pkgs.append(self.packages[pkgname])
else:
pkg = self._create_package(pkgname)
if pkg is not None:
self.packages[pkgname] = pkg
pkgs.append(self.packages[pkgname])
if len(pkgs) == 1:
pkgs[0].add_file(f)
nb_pkg_files += 1
else:
self.unknown_files.add(f)
# Filter out packages with no files
self.packages = {pkgname: pkg
for pkgname, pkg in iteritems(self.packages)
if pkg.files}
logger.info("%d packages with %d files, and %d other files",
len(self.packages),
nb_pkg_files,
len(self.unknown_files))
def _filter(self, f):
# Special files
if any(f.path.lies_under(c) for c in magic_dirs):
return True
# If it's not in a system directory, no need to look for it
if (f.path.lies_under('/usr/local') or
not any(f.path.lies_under(c) for c in system_dirs)):
self.unknown_files.add(f)
return True
return False
def _get_packages_for_file(self, filename):
raise NotImplementedError
def _create_package(self, pkgname):
raise NotImplementedError
# Before Linux 2.6.23, maximum argv is 128kB
MAX_ARGV = 800
class DpkgManager(PkgManager):
"""Package identifier for deb-based systems (Debian, Ubuntu).
"""
def search_for_files(self, files):
# Make a set of all the requested files
requested = dict((f.path, f) for f in self.filter_files(files))
found = {} # {path: pkgname}
# Request a few files at a time so we don't hit the command-line size
# limit
iter_batch = iter(requested)
while True:
batch = list(itertools.islice(iter_batch, MAX_ARGV))
if not batch:
break
proc = subprocess.Popen(['dpkg-query', '-S'] +
[path.path for path in batch],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
for line in out.splitlines():
pkgname, path = line.split(b': ', 1)
path = Path(path.strip())
# 8-bit safe encoding, because this might be a localized error
# message (that we don't care about)
pkgname = pkgname.decode('iso-8859-1')
if ', ' in pkgname: # Multiple packages
found[path] = None
continue
pkgname = pkgname.split(':', 1)[0] # Remove :arch
if path in requested:
if ' ' not in pkgname:
# If we had assigned it to a package already, undo
if path in found:
found[path] = None
# Else assign to the package
else:
found[path] = pkgname
# Remaining files are not from packages
self.unknown_files.update(
f for f in files
if f.path in requested and found.get(f.path) is None)
nb_pkg_files = 0
for path, pkgname in iteritems(found):
if pkgname is None:
continue
if pkgname in self.packages:
package = self.packages[pkgname]
else:
package = self._create_package(pkgname)
self.packages[pkgname] = package
package.add_file(requested.pop(path))
nb_pkg_files += 1
logger.info("%d packages with %d files, and %d other files",
len(self.packages),
nb_pkg_files,
len(self.unknown_files))
def _get_packages_for_file(self, filename):
# This method is not used for dpkg: instead, we query multiple files at
# once since it is faster
assert False
def _create_package(self, pkgname):
p = subprocess.Popen(['dpkg-query',
'--showformat=${Package}\t'
'${Version}\t'
'${Installed-Size}\n',
'-W',
pkgname],
stdout=subprocess.PIPE)
try:
size = version = None
for line in p.stdout:
fields = line.split()
# Removes :arch
name = fields[0].decode('ascii').split(':', 1)[0]
if name == pkgname:
version = fields[1].decode('ascii')
size = int(fields[2].decode('ascii')) * 1024 # kbytes
break
for line in p.stdout: # finish draining stdout
pass
finally:
p.wait()
if p.returncode == 0:
pkg = Package(pkgname, version, size=size)
logger.debug("Found package %s", pkg)
return pkg
else:
return None
class RpmManager(PkgManager):
"""Package identifier for rpm-based systems (Fedora, CentOS).
"""
def _get_packages_for_file(self, filename):
p = subprocess.Popen(['rpm', '-qf', filename.path,
'--qf', '%{NAME}'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
return None
return [line.strip().decode('iso-8859-1')
for line in out.splitlines()
if line]
def _create_package(self, pkgname):
p = subprocess.Popen(['rpm', '-q', pkgname,
'--qf', '%{VERSION}-%{RELEASE} %{SIZE}'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode == 0:
version, size = out.strip().decode('iso-8859-1').rsplit(' ', 1)
size = int(size)
pkg = Package(pkgname, version, size=size)
logger.debug("Found package %s", pkg)
return pkg
else:
return None
def identify_packages(files):
"""Organizes the files, using the distribution's package manager.
"""
distribution = distro.id()
if distribution in ('debian', 'ubuntu'):
logger.info("Identifying Debian packages for %d files...", len(files))
manager = DpkgManager()
elif (distribution in ('centos', 'centos linux',
'fedora', 'scientific linux') or
distribution.startswith('red hat')):
logger.info("Identifying RPM packages for %d files...", len(files))
manager = RpmManager()
else:
logger.info("Unknown distribution, can't identify packages")
return files, []
begin = time.time()
manager.search_for_files(files)
logger.debug("Assigning files to packages took %f seconds",
(time.time() - begin))
return manager.unknown_files, listvalues(manager.packages) | reprozip/reprozip/tracer/linux_pkgs.py | from __future__ import division, print_function, unicode_literals
import distro
import itertools
import logging
from rpaths import Path
import subprocess
import time
from reprozip.common import Package
from reprozip.utils import iteritems, listvalues
logger = logging.getLogger('reprozip')
magic_dirs = ('/dev', '/proc', '/sys')
system_dirs = ('/bin', '/etc', '/lib', '/sbin', '/usr', '/var', '/run')
class PkgManager(object):
"""Base class for package identifiers.
Subclasses should provide either `search_for_files` or `search_for_file`
which actually identifies the package for a file.
"""
def __init__(self):
# Files that were not part of a package
self.unknown_files = set()
# All the packages identified, with their `files` attribute set
self.packages = {}
def filter_files(self, files):
seen_files = set()
for f in files:
if f.path not in seen_files:
if not self._filter(f):
yield f
seen_files.add(f.path)
def search_for_files(self, files):
nb_pkg_files = 0
for f in self.filter_files(files):
pkgnames = self._get_packages_for_file(f.path)
# Stores the file
if not pkgnames:
self.unknown_files.add(f)
else:
pkgs = []
for pkgname in pkgnames:
if pkgname in self.packages:
pkgs.append(self.packages[pkgname])
else:
pkg = self._create_package(pkgname)
if pkg is not None:
self.packages[pkgname] = pkg
pkgs.append(self.packages[pkgname])
if len(pkgs) == 1:
pkgs[0].add_file(f)
nb_pkg_files += 1
else:
self.unknown_files.add(f)
# Filter out packages with no files
self.packages = {pkgname: pkg
for pkgname, pkg in iteritems(self.packages)
if pkg.files}
logger.info("%d packages with %d files, and %d other files",
len(self.packages),
nb_pkg_files,
len(self.unknown_files))
def _filter(self, f):
# Special files
if any(f.path.lies_under(c) for c in magic_dirs):
return True
# If it's not in a system directory, no need to look for it
if (f.path.lies_under('/usr/local') or
not any(f.path.lies_under(c) for c in system_dirs)):
self.unknown_files.add(f)
return True
return False
def _get_packages_for_file(self, filename):
raise NotImplementedError
def _create_package(self, pkgname):
raise NotImplementedError
# Before Linux 2.6.23, maximum argv is 128kB
MAX_ARGV = 800
class DpkgManager(PkgManager):
"""Package identifier for deb-based systems (Debian, Ubuntu).
"""
def search_for_files(self, files):
# Make a set of all the requested files
requested = dict((f.path, f) for f in self.filter_files(files))
found = {} # {path: pkgname}
# Request a few files at a time so we don't hit the command-line size
# limit
iter_batch = iter(requested)
while True:
batch = list(itertools.islice(iter_batch, MAX_ARGV))
if not batch:
break
proc = subprocess.Popen(['dpkg-query', '-S'] +
[path.path for path in batch],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
for line in out.splitlines():
pkgname, path = line.split(b': ', 1)
path = Path(path.strip())
# 8-bit safe encoding, because this might be a localized error
# message (that we don't care about)
pkgname = pkgname.decode('iso-8859-1')
if ', ' in pkgname: # Multiple packages
found[path] = None
continue
pkgname = pkgname.split(':', 1)[0] # Remove :arch
if path in requested:
if ' ' not in pkgname:
# If we had assigned it to a package already, undo
if path in found:
found[path] = None
# Else assign to the package
else:
found[path] = pkgname
# Remaining files are not from packages
self.unknown_files.update(
f for f in files
if f.path in requested and found.get(f.path) is None)
nb_pkg_files = 0
for path, pkgname in iteritems(found):
if pkgname is None:
continue
if pkgname in self.packages:
package = self.packages[pkgname]
else:
package = self._create_package(pkgname)
self.packages[pkgname] = package
package.add_file(requested.pop(path))
nb_pkg_files += 1
logger.info("%d packages with %d files, and %d other files",
len(self.packages),
nb_pkg_files,
len(self.unknown_files))
def _get_packages_for_file(self, filename):
# This method is not used for dpkg: instead, we query multiple files at
# once since it is faster
assert False
def _create_package(self, pkgname):
p = subprocess.Popen(['dpkg-query',
'--showformat=${Package}\t'
'${Version}\t'
'${Installed-Size}\n',
'-W',
pkgname],
stdout=subprocess.PIPE)
try:
size = version = None
for line in p.stdout:
fields = line.split()
# Removes :arch
name = fields[0].decode('ascii').split(':', 1)[0]
if name == pkgname:
version = fields[1].decode('ascii')
size = int(fields[2].decode('ascii')) * 1024 # kbytes
break
for line in p.stdout: # finish draining stdout
pass
finally:
p.wait()
if p.returncode == 0:
pkg = Package(pkgname, version, size=size)
logger.debug("Found package %s", pkg)
return pkg
else:
return None
class RpmManager(PkgManager):
"""Package identifier for rpm-based systems (Fedora, CentOS).
"""
def _get_packages_for_file(self, filename):
p = subprocess.Popen(['rpm', '-qf', filename.path,
'--qf', '%{NAME}'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
return None
return [line.strip().decode('iso-8859-1')
for line in out.splitlines()
if line]
def _create_package(self, pkgname):
p = subprocess.Popen(['rpm', '-q', pkgname,
'--qf', '%{VERSION}-%{RELEASE} %{SIZE}'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode == 0:
version, size = out.strip().decode('iso-8859-1').rsplit(' ', 1)
size = int(size)
pkg = Package(pkgname, version, size=size)
logger.debug("Found package %s", pkg)
return pkg
else:
return None
def identify_packages(files):
"""Organizes the files, using the distribution's package manager.
"""
distribution = distro.id()
if distribution in ('debian', 'ubuntu'):
logger.info("Identifying Debian packages for %d files...", len(files))
manager = DpkgManager()
elif (distribution in ('centos', 'centos linux',
'fedora', 'scientific linux') or
distribution.startswith('red hat')):
logger.info("Identifying RPM packages for %d files...", len(files))
manager = RpmManager()
else:
logger.info("Unknown distribution, can't identify packages")
return files, []
begin = time.time()
manager.search_for_files(files)
logger.debug("Assigning files to packages took %f seconds",
(time.time() - begin))
return manager.unknown_files, listvalues(manager.packages) | 0.482185 | 0.112016 |
from model.rbm import RBM
import tensorflow as tf
import copy
from functools import partial
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class RBMRealPos(RBM):
"""
This class is used to define a restricted Boltzmann machine with real and
positive wavefunction and as an ansatz for |\Psi(x)|^2.
$\Psi(x) = \sqrt(e^{ax} \times \Pi_{j=1}^{H} \cosh(W_jx + b_j))$
where a = bv is the visible bias, b = bh is the hidden bias, and H is the number of hidden nodes.
"""
def __init__(self, num_visible, density=2, initializer=None, use_bias=True, num_expe=None):
"""
Construct an RBM model for real positive wavefunction
Args:
num_visible: number of visible
density: the number hidden layer define as density * num_visible
initializer: the initialization of the weights
use_bias: use bias or not
num_expe: number of experiment to determine the seed
"""
RBM.__init__(self, num_visible, density)
self.initializer = initializer
self.use_bias = use_bias
self.num_expe = num_expe
if num_expe is not None:
np.random.seed(num_expe)
tf.random.set_seed(num_expe)
self.build_model()
def build_model(self):
"""
Build the RBM model
"""
## Randomly initialize the weights and bias
self.random_initialize()
## Create the model
self.create_variable()
def random_initialize(self):
"""
Randomly initialize an array based on the initializer. Biases array are initialized zero.
"""
## Weights (W)
self.W_array = self.initializer(size=(self.num_visible, self.num_hidden))
## Visible bias (a)
self.bv_array = np.zeros((1, self.num_visible))
## Hidden bias (b)
self.bh_array = np.zeros((1, self.num_hidden))
def create_variable(self):
"""
Create model by creating a parameters variable, which is the weight, visible bias, hidden bias
"""
self.W = tf.Variable(tf.convert_to_tensor(value=self.W_array.astype(np.float32)), name="weights", trainable=True)
self.bv = tf.Variable(tf.convert_to_tensor(value=self.bv_array.astype(np.float32)), name="visible_bias", trainable=True)
self.bh = tf.Variable(tf.convert_to_tensor(value=self.bh_array.astype(np.float32)), name="hidden_bias", trainable=True)
self.model = self
self.trainable_weights = [self.W, self.bv, self.bh]
def log_val(self, x):
"""
Calculate log(\Psi(x)) = 0.5 * (ax + \sum_{j=1}^H log(cosh(Wx + b)))
Args:
x: the x
"""
## Calculate theta = Wx + b
theta = tf.matmul(x, self.W) + self.bh
## Calculate \sum_{j=1}^H log(cosh(Wx + b))
sum_ln_thetas = tf.reduce_sum(input_tensor=tf.math.log(tf.cosh(theta)), axis=1, keepdims=True)
## calculate ax
ln_bias = tf.matmul(x, tf.transpose(a=self.bv))
return 0.5 * (sum_ln_thetas + ln_bias)
def log_val_diff(self, xprime, x):
"""
Calculate log(\Psi(x')) - log(\Psi(x))
Args:
xprime: x'
x: x
"""
log_val_1 = self.log_val(xprime)
log_val_2 = self.log_val(x)
return log_val_1 - log_val_2
def derlog(self, x):
"""
Calculate $D_{W}(x) = D_{W} = (1 / \Psi(x)) * (d \Psi(x) / dW)$ where W can be the weights or the biases.
"""
sample_size = x.shape[0]
## Calculate theta = Wx + b
theta = tf.matmul(x, self.W) + self.bh
## D_a(x) = x
## D_b(x) = tanh(Wx + b)
if self.use_bias:
D_bv = 0.5 * x
D_bh = 0.5 * tf.tanh(theta)
else:
D_bv = x * 0.0
D_bh = tf.tanh(theta) * 0.0
# D_W(x) = x * tanh(Wx+b)
D_w = tf.reshape(tf.tanh(theta), (sample_size, 1, self.num_hidden)) * tf.reshape(x, (sample_size, self.num_visible, 1))
D_bv = tf.reshape(D_bv, (sample_size, 1, self.num_visible))
D_bh = tf.reshape(D_bh, (sample_size, 1, self.num_hidden))
derlogs = [D_w, D_bv, D_bh]
return derlogs
def param_difference (self, first_param, last_param):
"""
Calculate the difference between two parameters.
This is equals to the sum of the mean squared difference of all parameters (weights and biases)
"""
sum_diff = 0.
for (par1, par2) in zip(first_param[1], last_param[1]):
sum_diff += np.mean((par1 - par2) ** 2)
return sum_diff
def get_new_visible(self, v):
"""
Get new visibile by sampling h from p(h|v) and
then sampling v from p(v | h)
"""
hprob = self.get_hidden_prob_given_visible(v)
hstate = self.convert_from_prob_to_state(hprob)
vprob = self.get_visible_prob_given_hidden(hstate)
vstate = self.convert_from_prob_to_state(vprob)
return vstate
def get_hidden_prob_given_visible(self, v):
"""
Calculate p(h | v)
"""
return tf.sigmoid(2.0 * (tf.matmul(v, self.W) + self.bh))
def get_visible_prob_given_hidden(self, h):
"""
Calculate p(v | h)
"""
return tf.sigmoid(2.0 * (tf.matmul(h, tf.transpose(a=self.W)) + self.bv))
def convert_from_prob_to_state(self, prob):
"""
Get state of -1 and 1 from probability
"""
v = prob - tf.random.uniform(tf.shape(input=prob), 0, 1)
return tf.where(tf.greater_equal(v, tf.zeros_like(v)), tf.ones_like(v), -1 * tf.ones_like(v))
def visualize_param (self, params, path):
"""
Visualize every parameters
Args:
params: the parameters that visualize
path: the path to save the visualization
"""
epoch = params[0]
for ii, param in enumerate(params[1]):
plt.figure()
if ii == 0:
plt.title("Weight at epoch %d" % (epoch))
elif ii == 1:
plt.title("Visible Bias at epoch %d" % (epoch))
elif ii == 2:
plt.title("Hidden Bias at epoch %d" % (epoch))
plt.imshow(param, cmap='hot', interpolation='nearest')
plt.xticks(np.arange(0, param.shape[1], 1.0))
plt.yticks(np.arange(0, param.shape[0], 1.0))
plt.colorbar()
plt.tight_layout()
if ii == 0:
plt.savefig(path + '/weight-layer-%d.png' % (epoch))
elif ii == 1:
plt.savefig(path + '/visbias-layer-%d.png' % (epoch))
elif ii == 2:
plt.savefig(path + '/hidbias-layer-%d.png' % (epoch))
plt.close()
def get_parameters(self):
"""
Get the parameter of this model
"""
return [self.W.numpy(), self.bv.numpy(), self.bh.numpy()]
def set_parameters(self, params):
"""
Set the parameters for this model for transfer learning or loading model purposes
Args:
params: the parameters to be set.
"""
self.W.assign(params[0])
self.bv.assign(params[1])
self.bh.assign(params[2])
def get_name(self):
"""
Get the name of the model
"""
return 'rbmrealpos-%d' % (self.num_hidden)
def make_pickle_object(self):
"""
Make pickle object for RBM
Nothing to do for RBM
"""
pass
def __str__(self):
return 'RBMRealPos %d' % (self.num_hidden)
def to_xml(self):
stri = ""
stri += "<model>\n"
stri += "\t<type>rbm_real_pos</type>\n"
stri += "\t<params>\n"
stri += "\t\t<num_visible>%d</num_visible>\n" % self.num_visible
stri += "\t\t<num_hidden>%d</num_hidden>\n" % self.num_hidden
stri += "\t\t<density>%d</density>\n" % self.density
stri += "\t\t<initializer>%s</initializer>\n" % str(self.initializer)
stri += "\t\t<use_bias>%s</use_bias>\n" % str(self.use_bias)
stri += "\t\t<num_expe>%s</num_expe>\n" % str(self.num_expe)
stri += "\t</params>\n"
stri += "</model>\n"
return stri | model/rbm/realpos/rbm_realpos.py | from model.rbm import RBM
import tensorflow as tf
import copy
from functools import partial
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class RBMRealPos(RBM):
"""
This class is used to define a restricted Boltzmann machine with real and
positive wavefunction and as an ansatz for |\Psi(x)|^2.
$\Psi(x) = \sqrt(e^{ax} \times \Pi_{j=1}^{H} \cosh(W_jx + b_j))$
where a = bv is the visible bias, b = bh is the hidden bias, and H is the number of hidden nodes.
"""
def __init__(self, num_visible, density=2, initializer=None, use_bias=True, num_expe=None):
"""
Construct an RBM model for real positive wavefunction
Args:
num_visible: number of visible
density: the number hidden layer define as density * num_visible
initializer: the initialization of the weights
use_bias: use bias or not
num_expe: number of experiment to determine the seed
"""
RBM.__init__(self, num_visible, density)
self.initializer = initializer
self.use_bias = use_bias
self.num_expe = num_expe
if num_expe is not None:
np.random.seed(num_expe)
tf.random.set_seed(num_expe)
self.build_model()
def build_model(self):
"""
Build the RBM model
"""
## Randomly initialize the weights and bias
self.random_initialize()
## Create the model
self.create_variable()
def random_initialize(self):
"""
Randomly initialize an array based on the initializer. Biases array are initialized zero.
"""
## Weights (W)
self.W_array = self.initializer(size=(self.num_visible, self.num_hidden))
## Visible bias (a)
self.bv_array = np.zeros((1, self.num_visible))
## Hidden bias (b)
self.bh_array = np.zeros((1, self.num_hidden))
def create_variable(self):
"""
Create model by creating a parameters variable, which is the weight, visible bias, hidden bias
"""
self.W = tf.Variable(tf.convert_to_tensor(value=self.W_array.astype(np.float32)), name="weights", trainable=True)
self.bv = tf.Variable(tf.convert_to_tensor(value=self.bv_array.astype(np.float32)), name="visible_bias", trainable=True)
self.bh = tf.Variable(tf.convert_to_tensor(value=self.bh_array.astype(np.float32)), name="hidden_bias", trainable=True)
self.model = self
self.trainable_weights = [self.W, self.bv, self.bh]
def log_val(self, x):
"""
Calculate log(\Psi(x)) = 0.5 * (ax + \sum_{j=1}^H log(cosh(Wx + b)))
Args:
x: the x
"""
## Calculate theta = Wx + b
theta = tf.matmul(x, self.W) + self.bh
## Calculate \sum_{j=1}^H log(cosh(Wx + b))
sum_ln_thetas = tf.reduce_sum(input_tensor=tf.math.log(tf.cosh(theta)), axis=1, keepdims=True)
## calculate ax
ln_bias = tf.matmul(x, tf.transpose(a=self.bv))
return 0.5 * (sum_ln_thetas + ln_bias)
def log_val_diff(self, xprime, x):
"""
Calculate log(\Psi(x')) - log(\Psi(x))
Args:
xprime: x'
x: x
"""
log_val_1 = self.log_val(xprime)
log_val_2 = self.log_val(x)
return log_val_1 - log_val_2
def derlog(self, x):
"""
Calculate $D_{W}(x) = D_{W} = (1 / \Psi(x)) * (d \Psi(x) / dW)$ where W can be the weights or the biases.
"""
sample_size = x.shape[0]
## Calculate theta = Wx + b
theta = tf.matmul(x, self.W) + self.bh
## D_a(x) = x
## D_b(x) = tanh(Wx + b)
if self.use_bias:
D_bv = 0.5 * x
D_bh = 0.5 * tf.tanh(theta)
else:
D_bv = x * 0.0
D_bh = tf.tanh(theta) * 0.0
# D_W(x) = x * tanh(Wx+b)
D_w = tf.reshape(tf.tanh(theta), (sample_size, 1, self.num_hidden)) * tf.reshape(x, (sample_size, self.num_visible, 1))
D_bv = tf.reshape(D_bv, (sample_size, 1, self.num_visible))
D_bh = tf.reshape(D_bh, (sample_size, 1, self.num_hidden))
derlogs = [D_w, D_bv, D_bh]
return derlogs
def param_difference (self, first_param, last_param):
"""
Calculate the difference between two parameters.
This is equals to the sum of the mean squared difference of all parameters (weights and biases)
"""
sum_diff = 0.
for (par1, par2) in zip(first_param[1], last_param[1]):
sum_diff += np.mean((par1 - par2) ** 2)
return sum_diff
def get_new_visible(self, v):
"""
Get new visibile by sampling h from p(h|v) and
then sampling v from p(v | h)
"""
hprob = self.get_hidden_prob_given_visible(v)
hstate = self.convert_from_prob_to_state(hprob)
vprob = self.get_visible_prob_given_hidden(hstate)
vstate = self.convert_from_prob_to_state(vprob)
return vstate
def get_hidden_prob_given_visible(self, v):
"""
Calculate p(h | v)
"""
return tf.sigmoid(2.0 * (tf.matmul(v, self.W) + self.bh))
def get_visible_prob_given_hidden(self, h):
"""
Calculate p(v | h)
"""
return tf.sigmoid(2.0 * (tf.matmul(h, tf.transpose(a=self.W)) + self.bv))
def convert_from_prob_to_state(self, prob):
"""
Get state of -1 and 1 from probability
"""
v = prob - tf.random.uniform(tf.shape(input=prob), 0, 1)
return tf.where(tf.greater_equal(v, tf.zeros_like(v)), tf.ones_like(v), -1 * tf.ones_like(v))
def visualize_param (self, params, path):
"""
Visualize every parameters
Args:
params: the parameters that visualize
path: the path to save the visualization
"""
epoch = params[0]
for ii, param in enumerate(params[1]):
plt.figure()
if ii == 0:
plt.title("Weight at epoch %d" % (epoch))
elif ii == 1:
plt.title("Visible Bias at epoch %d" % (epoch))
elif ii == 2:
plt.title("Hidden Bias at epoch %d" % (epoch))
plt.imshow(param, cmap='hot', interpolation='nearest')
plt.xticks(np.arange(0, param.shape[1], 1.0))
plt.yticks(np.arange(0, param.shape[0], 1.0))
plt.colorbar()
plt.tight_layout()
if ii == 0:
plt.savefig(path + '/weight-layer-%d.png' % (epoch))
elif ii == 1:
plt.savefig(path + '/visbias-layer-%d.png' % (epoch))
elif ii == 2:
plt.savefig(path + '/hidbias-layer-%d.png' % (epoch))
plt.close()
def get_parameters(self):
"""
Get the parameter of this model
"""
return [self.W.numpy(), self.bv.numpy(), self.bh.numpy()]
def set_parameters(self, params):
"""
Set the parameters for this model for transfer learning or loading model purposes
Args:
params: the parameters to be set.
"""
self.W.assign(params[0])
self.bv.assign(params[1])
self.bh.assign(params[2])
def get_name(self):
"""
Get the name of the model
"""
return 'rbmrealpos-%d' % (self.num_hidden)
def make_pickle_object(self):
"""
Make pickle object for RBM
Nothing to do for RBM
"""
pass
def __str__(self):
return 'RBMRealPos %d' % (self.num_hidden)
def to_xml(self):
stri = ""
stri += "<model>\n"
stri += "\t<type>rbm_real_pos</type>\n"
stri += "\t<params>\n"
stri += "\t\t<num_visible>%d</num_visible>\n" % self.num_visible
stri += "\t\t<num_hidden>%d</num_hidden>\n" % self.num_hidden
stri += "\t\t<density>%d</density>\n" % self.density
stri += "\t\t<initializer>%s</initializer>\n" % str(self.initializer)
stri += "\t\t<use_bias>%s</use_bias>\n" % str(self.use_bias)
stri += "\t\t<num_expe>%s</num_expe>\n" % str(self.num_expe)
stri += "\t</params>\n"
stri += "</model>\n"
return stri | 0.814717 | 0.738315 |
import math
import random
import time
import pybullet as p
import pybullet_data as pd
random.seed(10)
from blind_walking.envs.env_modifiers.env_modifier import EnvModifier
textureId = -1
useProgrammatic = 0
useTerrainFromPNG = 1
useDeepLocoCSV = 2
updateHeightfield = False
heightfieldSource = useProgrammatic
numHeightfieldRows = 256
numHeightfieldColumns = 256
class HeightField(EnvModifier):
def __init__(self):
self.hf_id = 0
self.terrainShape = 0
self.heightfieldData = [0] * numHeightfieldRows * numHeightfieldColumns
super().__init__()
def _generate(self, env, start_x=0, heightPerturbationRange=0.08, friction=0.5):
env.pybullet_client.setAdditionalSearchPath(pd.getDataPath())
env.pybullet_client.configureDebugVisualizer(env.pybullet_client.COV_ENABLE_RENDERING, 0)
heightPerturbationRange = heightPerturbationRange
if heightfieldSource == useProgrammatic:
for j in range(int(numHeightfieldColumns / 2)):
for i in range(int(numHeightfieldRows / 2)):
height = random.uniform(0, heightPerturbationRange)
self.heightfieldData[2 * i + 2 * j * numHeightfieldRows] = height
self.heightfieldData[2 * i + 1 + 2 * j * numHeightfieldRows] = height
self.heightfieldData[2 * i + (2 * j + 1) * numHeightfieldRows] = height
self.heightfieldData[2 * i + 1 + (2 * j + 1) * numHeightfieldRows] = height
terrainShape = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_HEIGHTFIELD,
meshScale=[0.07, 0.07, 1.6],
heightfieldTextureScaling=(numHeightfieldRows - 1) / 2,
heightfieldData=self.heightfieldData,
numHeightfieldRows=numHeightfieldRows,
numHeightfieldColumns=numHeightfieldColumns,
)
terrain = env.pybullet_client.createMultiBody(0, terrainShape)
env.pybullet_client.resetBasePositionAndOrientation(terrain, [start_x, 0, 0.0], [0, 0, 0, 1])
env.pybullet_client.changeDynamics(terrain, -1, lateralFriction=friction)
if heightfieldSource == useDeepLocoCSV:
terrainShape = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_HEIGHTFIELD,
meshScale=[0.5, 0.5, 2.5],
fileName="heightmaps/ground0.txt",
heightfieldTextureScaling=128,
)
terrain = env.pybullet_client.createMultiBody(0, terrainShape)
env.pybullet_client.resetBasePositionAndOrientation(terrain, [start_x, 0, 0], [0, 0, 0, 1])
env.pybullet_client.changeDynamics(terrain, -1, lateralFriction=friction)
if heightfieldSource == useTerrainFromPNG:
terrainShape = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_HEIGHTFIELD,
meshScale=[0.05, 0.05, 1.8],
fileName="heightmaps/wm_height_out.png",
)
textureId = env.pybullet_client.loadTexture("heightmaps/gimp_overlay_out.png")
terrain = env.pybullet_client.createMultiBody(0, terrainShape)
env.pybullet_client.changeVisualShape(terrain, -1, textureUniqueId=textureId)
env.pybullet_client.resetBasePositionAndOrientation(terrain, [start_x, 0, 0.1], [0, 0, 0, 1])
env.pybullet_client.changeDynamics(terrain, -1, lateralFriction=friction)
self.hf_id = terrainShape
self.terrainShape = terrainShape
# print("TERRAIN SHAPE: {}".format(terrainShape))
env.pybullet_client.changeVisualShape(terrain, -1, rgbaColor=[1, 1, 1, 1])
env.pybullet_client.configureDebugVisualizer(env.pybullet_client.COV_ENABLE_RENDERING, 1)
def _reset(self, env, heightPerturbationRange=0.08):
if heightfieldSource == useProgrammatic:
for j in range(int(numHeightfieldColumns / 2)):
for i in range(int(numHeightfieldRows / 2)):
height = random.uniform(0, heightPerturbationRange) # +math.sin(time.time())
self.heightfieldData[2 * i + 2 * j * numHeightfieldRows] = height
self.heightfieldData[2 * i + 1 + 2 * j * numHeightfieldRows] = height
self.heightfieldData[2 * i + (2 * j + 1) * numHeightfieldRows] = height
self.heightfieldData[2 * i + 1 + (2 * j + 1) * numHeightfieldRows] = height
# GEOM_CONCAVE_INTERNAL_EDGE may help avoid getting stuck at an internal (shared) edge of the triangle/heightfield.
# GEOM_CONCAVE_INTERNAL_EDGE is a bit slower to build though.
flags = p.GEOM_CONCAVE_INTERNAL_EDGE
# flags = 0
self.terrainShape = p.createCollisionShape(
shapeType=p.GEOM_HEIGHTFIELD,
flags=flags,
meshScale=[0.07, 0.07, 1.6],
heightfieldTextureScaling=(numHeightfieldRows - 1) / 2,
heightfieldData=self.heightfieldData,
numHeightfieldRows=numHeightfieldRows,
numHeightfieldColumns=numHeightfieldColumns,
replaceHeightfieldIndex=self.terrainShape,
) | blind_walking/envs/env_modifiers/heightfield.py | import math
import random
import time
import pybullet as p
import pybullet_data as pd
random.seed(10)
from blind_walking.envs.env_modifiers.env_modifier import EnvModifier
textureId = -1
useProgrammatic = 0
useTerrainFromPNG = 1
useDeepLocoCSV = 2
updateHeightfield = False
heightfieldSource = useProgrammatic
numHeightfieldRows = 256
numHeightfieldColumns = 256
class HeightField(EnvModifier):
def __init__(self):
self.hf_id = 0
self.terrainShape = 0
self.heightfieldData = [0] * numHeightfieldRows * numHeightfieldColumns
super().__init__()
def _generate(self, env, start_x=0, heightPerturbationRange=0.08, friction=0.5):
env.pybullet_client.setAdditionalSearchPath(pd.getDataPath())
env.pybullet_client.configureDebugVisualizer(env.pybullet_client.COV_ENABLE_RENDERING, 0)
heightPerturbationRange = heightPerturbationRange
if heightfieldSource == useProgrammatic:
for j in range(int(numHeightfieldColumns / 2)):
for i in range(int(numHeightfieldRows / 2)):
height = random.uniform(0, heightPerturbationRange)
self.heightfieldData[2 * i + 2 * j * numHeightfieldRows] = height
self.heightfieldData[2 * i + 1 + 2 * j * numHeightfieldRows] = height
self.heightfieldData[2 * i + (2 * j + 1) * numHeightfieldRows] = height
self.heightfieldData[2 * i + 1 + (2 * j + 1) * numHeightfieldRows] = height
terrainShape = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_HEIGHTFIELD,
meshScale=[0.07, 0.07, 1.6],
heightfieldTextureScaling=(numHeightfieldRows - 1) / 2,
heightfieldData=self.heightfieldData,
numHeightfieldRows=numHeightfieldRows,
numHeightfieldColumns=numHeightfieldColumns,
)
terrain = env.pybullet_client.createMultiBody(0, terrainShape)
env.pybullet_client.resetBasePositionAndOrientation(terrain, [start_x, 0, 0.0], [0, 0, 0, 1])
env.pybullet_client.changeDynamics(terrain, -1, lateralFriction=friction)
if heightfieldSource == useDeepLocoCSV:
terrainShape = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_HEIGHTFIELD,
meshScale=[0.5, 0.5, 2.5],
fileName="heightmaps/ground0.txt",
heightfieldTextureScaling=128,
)
terrain = env.pybullet_client.createMultiBody(0, terrainShape)
env.pybullet_client.resetBasePositionAndOrientation(terrain, [start_x, 0, 0], [0, 0, 0, 1])
env.pybullet_client.changeDynamics(terrain, -1, lateralFriction=friction)
if heightfieldSource == useTerrainFromPNG:
terrainShape = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_HEIGHTFIELD,
meshScale=[0.05, 0.05, 1.8],
fileName="heightmaps/wm_height_out.png",
)
textureId = env.pybullet_client.loadTexture("heightmaps/gimp_overlay_out.png")
terrain = env.pybullet_client.createMultiBody(0, terrainShape)
env.pybullet_client.changeVisualShape(terrain, -1, textureUniqueId=textureId)
env.pybullet_client.resetBasePositionAndOrientation(terrain, [start_x, 0, 0.1], [0, 0, 0, 1])
env.pybullet_client.changeDynamics(terrain, -1, lateralFriction=friction)
self.hf_id = terrainShape
self.terrainShape = terrainShape
# print("TERRAIN SHAPE: {}".format(terrainShape))
env.pybullet_client.changeVisualShape(terrain, -1, rgbaColor=[1, 1, 1, 1])
env.pybullet_client.configureDebugVisualizer(env.pybullet_client.COV_ENABLE_RENDERING, 1)
def _reset(self, env, heightPerturbationRange=0.08):
if heightfieldSource == useProgrammatic:
for j in range(int(numHeightfieldColumns / 2)):
for i in range(int(numHeightfieldRows / 2)):
height = random.uniform(0, heightPerturbationRange) # +math.sin(time.time())
self.heightfieldData[2 * i + 2 * j * numHeightfieldRows] = height
self.heightfieldData[2 * i + 1 + 2 * j * numHeightfieldRows] = height
self.heightfieldData[2 * i + (2 * j + 1) * numHeightfieldRows] = height
self.heightfieldData[2 * i + 1 + (2 * j + 1) * numHeightfieldRows] = height
# GEOM_CONCAVE_INTERNAL_EDGE may help avoid getting stuck at an internal (shared) edge of the triangle/heightfield.
# GEOM_CONCAVE_INTERNAL_EDGE is a bit slower to build though.
flags = p.GEOM_CONCAVE_INTERNAL_EDGE
# flags = 0
self.terrainShape = p.createCollisionShape(
shapeType=p.GEOM_HEIGHTFIELD,
flags=flags,
meshScale=[0.07, 0.07, 1.6],
heightfieldTextureScaling=(numHeightfieldRows - 1) / 2,
heightfieldData=self.heightfieldData,
numHeightfieldRows=numHeightfieldRows,
numHeightfieldColumns=numHeightfieldColumns,
replaceHeightfieldIndex=self.terrainShape,
) | 0.23546 | 0.265273 |
from django.conf.urls import url, include
from django.contrib import admin
from django.urls import path, re_path
from django.conf import settings
from rest_framework import routers, permissions
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework.documentation import include_docs_urls
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from knox import views as knox_views
from rest_api.views.views import *
router = routers.DefaultRouter(trailing_slash=False)
router.register(r'appliance', ApplianceViewSet, 'Appliance')
router.register(r'reading', TBotReadingViewSet, 'TBotReading')
router.register(r'face', FaceViewSet, 'Face')
# https://books.agiliq.com/projects/django-admin-cookbook/en/latest/change_text.html
admin.site.site_header = "TBot Administration"
admin.site.site_title = "TBot Admin Portal"
admin.site.index_title = "Welcome to TBot Portal"
"""
urls that are forwarded from nginx:
admin/
api/
auth/
swagger/
static/
"""
auth_url_list = [
path('login/', LoginView.as_view(), name='knox_login'),
path('logout/', knox_views.LogoutView.as_view(), name='knox_logout'),
path('logoutall/', knox_views.LogoutAllView.as_view(), name='knox_logoutall'),
]
api_url_list = [
url('(?P<version>(v1))/', include(router.urls)),
path('auth/', include(auth_url_list))
]
urlpatterns = [
path('api/', include(api_url_list)),
]
# Only enable Django admin and DRF auth pages if DEBUG=True
if settings.DEBUG:
# Also non-prefixed paths
urlpatterns.insert(1, path('admin/', admin.site.urls))
auth_url_list.append(
path('api/',
include('rest_framework.urls',
namespace='rest_framework')))
if not settings.IS_PRODUCTION:
schema_view = get_schema_view(
openapi.Info(
title="TBot API",
default_version='v1',
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="<EMAIL>"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns += [
re_path(r'^swagger(?P<format>\.json|\.yaml)$',
schema_view.without_ui(cache_timeout=0),
name='schema-json'),
re_path(r'^swagger/$',
schema_view.with_ui('swagger', cache_timeout=0),
name='schema-swagger-ui'),
re_path(r'^redoc/$',
schema_view.with_ui('redoc', cache_timeout=0),
name='schema-redoc'),
]
urlpatterns += staticfiles_urlpatterns() | tbot/rest_api/urls.py | from django.conf.urls import url, include
from django.contrib import admin
from django.urls import path, re_path
from django.conf import settings
from rest_framework import routers, permissions
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework.documentation import include_docs_urls
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from knox import views as knox_views
from rest_api.views.views import *
router = routers.DefaultRouter(trailing_slash=False)
router.register(r'appliance', ApplianceViewSet, 'Appliance')
router.register(r'reading', TBotReadingViewSet, 'TBotReading')
router.register(r'face', FaceViewSet, 'Face')
# https://books.agiliq.com/projects/django-admin-cookbook/en/latest/change_text.html
admin.site.site_header = "TBot Administration"
admin.site.site_title = "TBot Admin Portal"
admin.site.index_title = "Welcome to TBot Portal"
"""
urls that are forwarded from nginx:
admin/
api/
auth/
swagger/
static/
"""
auth_url_list = [
path('login/', LoginView.as_view(), name='knox_login'),
path('logout/', knox_views.LogoutView.as_view(), name='knox_logout'),
path('logoutall/', knox_views.LogoutAllView.as_view(), name='knox_logoutall'),
]
api_url_list = [
url('(?P<version>(v1))/', include(router.urls)),
path('auth/', include(auth_url_list))
]
urlpatterns = [
path('api/', include(api_url_list)),
]
# Only enable Django admin and DRF auth pages if DEBUG=True
if settings.DEBUG:
# Also non-prefixed paths
urlpatterns.insert(1, path('admin/', admin.site.urls))
auth_url_list.append(
path('api/',
include('rest_framework.urls',
namespace='rest_framework')))
if not settings.IS_PRODUCTION:
schema_view = get_schema_view(
openapi.Info(
title="TBot API",
default_version='v1',
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="<EMAIL>"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns += [
re_path(r'^swagger(?P<format>\.json|\.yaml)$',
schema_view.without_ui(cache_timeout=0),
name='schema-json'),
re_path(r'^swagger/$',
schema_view.with_ui('swagger', cache_timeout=0),
name='schema-swagger-ui'),
re_path(r'^redoc/$',
schema_view.with_ui('redoc', cache_timeout=0),
name='schema-redoc'),
]
urlpatterns += staticfiles_urlpatterns() | 0.337859 | 0.066873 |
import torch
from . import scatter_add, scatter_max
def scatter_logsumexp(src, index, dim=-1, out=None, dim_size=None,
fill_value=None, eps=1e-12):
r"""Fills :attr:`out` with the log of summed exponentials of all values
from the :attr:`src` tensor at the indices specified in the :attr:`index`
tensor along a given axis :attr:`dim`.
If multiple indices reference the same location, their
**exponential contributions add**
(`cf.` :meth:`~torch_scatter.scatter_add`).
For one-dimensional tensors, the operation computes
.. math::
\mathrm{out}_i = \log \, \left( \exp(\mathrm{out}_i) + \sum_j
\exp(\mathrm{src}_j) \right)
where :math:`\sum_j` is over :math:`j` such that
:math:`\mathrm{index}_j = i`.
Args:
src (Tensor): The source tensor.
index (LongTensor): The indices of elements to scatter.
dim (int, optional): The axis along which to index.
(default: :obj:`-1`)
out (Tensor, optional): The destination tensor. (default: :obj:`None`)
dim_size (int, optional): If :attr:`out` is not given, automatically
create output with size :attr:`dim_size` at dimension :attr:`dim`.
If :attr:`dim_size` is not given, a minimal sized output tensor is
returned. (default: :obj:`None`)
fill_value (int, optional): If :attr:`out` is not given, automatically
fill output tensor with :attr:`fill_value`. (default: :obj:`None`)
eps (float, optional): Small value to ensure numerical stability.
(default: :obj:`1e-12`)
:rtype: :class:`Tensor`
"""
if not torch.is_floating_point(src):
raise ValueError('`scatter_logsumexp` can only be computed over '
'tensors with floating point data types.')
max_value_per_index, _ = scatter_max(src, index, dim, out, dim_size,
fill_value)
max_per_src_element = max_value_per_index.gather(dim, index)
recentered_scores = src - max_per_src_element
out = (out - max_per_src_element).exp() if out is not None else None
sum_per_index = scatter_add(recentered_scores.exp(), index, dim, out,
dim_size, fill_value=0)
return torch.log(sum_per_index + eps) + max_value_per_index | torch_scatter/logsumexp.py | import torch
from . import scatter_add, scatter_max
def scatter_logsumexp(src, index, dim=-1, out=None, dim_size=None,
fill_value=None, eps=1e-12):
r"""Fills :attr:`out` with the log of summed exponentials of all values
from the :attr:`src` tensor at the indices specified in the :attr:`index`
tensor along a given axis :attr:`dim`.
If multiple indices reference the same location, their
**exponential contributions add**
(`cf.` :meth:`~torch_scatter.scatter_add`).
For one-dimensional tensors, the operation computes
.. math::
\mathrm{out}_i = \log \, \left( \exp(\mathrm{out}_i) + \sum_j
\exp(\mathrm{src}_j) \right)
where :math:`\sum_j` is over :math:`j` such that
:math:`\mathrm{index}_j = i`.
Args:
src (Tensor): The source tensor.
index (LongTensor): The indices of elements to scatter.
dim (int, optional): The axis along which to index.
(default: :obj:`-1`)
out (Tensor, optional): The destination tensor. (default: :obj:`None`)
dim_size (int, optional): If :attr:`out` is not given, automatically
create output with size :attr:`dim_size` at dimension :attr:`dim`.
If :attr:`dim_size` is not given, a minimal sized output tensor is
returned. (default: :obj:`None`)
fill_value (int, optional): If :attr:`out` is not given, automatically
fill output tensor with :attr:`fill_value`. (default: :obj:`None`)
eps (float, optional): Small value to ensure numerical stability.
(default: :obj:`1e-12`)
:rtype: :class:`Tensor`
"""
if not torch.is_floating_point(src):
raise ValueError('`scatter_logsumexp` can only be computed over '
'tensors with floating point data types.')
max_value_per_index, _ = scatter_max(src, index, dim, out, dim_size,
fill_value)
max_per_src_element = max_value_per_index.gather(dim, index)
recentered_scores = src - max_per_src_element
out = (out - max_per_src_element).exp() if out is not None else None
sum_per_index = scatter_add(recentered_scores.exp(), index, dim, out,
dim_size, fill_value=0)
return torch.log(sum_per_index + eps) + max_value_per_index | 0.917094 | 0.588209 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from spl_sirna.sirna_util import get_seq_motif, idx_to_seq
USE_CUDA = torch.cuda.is_available()
# DEVICE = torch.device('cuda' if USE_CUDA else 'cpu')
class Word2vecModel(nn.Module):
def __init__(self, vocab_size, embed_size):
''' 初始化输出和输出embedding
'''
super(Word2vecModel, self).__init__()
self.vocab_size = vocab_size
self.embed_size = embed_size
initrange = 0.5 / self.embed_size
self.out_embed = nn.Embedding(self.vocab_size,
self.embed_size,
sparse=False)
self.out_embed.weight.data.uniform_(-initrange, initrange)
self.in_embed = nn.Embedding(self.vocab_size,
self.embed_size,
sparse=False)
self.in_embed.weight.data.uniform_(-initrange, initrange)
def forward(self, input_labels, pos_labels, neg_labels):
'''
input_labels: 中心词, [batch_size]
pos_labels: 中心词周围 context window 出现过的单词 [batch_size, (window_size * 2)]
neg_labelss: 中心词周围没有出现过的单词,从 negative sampling 得到 [batch_size, (window_size * 2 * K)]
return: loss, [batch_size]
'''
batch_size = input_labels.size(0)
input_embedding = self.in_embed(input_labels) # B * embed_size
pos_embedding = self.out_embed(pos_labels) # B * (2*C) * embed_size
neg_embedding = self.out_embed(
neg_labels) # B * (2*C * K) * embed_size
log_pos = torch.bmm(
pos_embedding, input_embedding.unsqueeze(2)).squeeze() # B * (2*C)
log_neg = torch.bmm(
neg_embedding,
-input_embedding.unsqueeze(2)).squeeze() # B * (2*C*K)
# 对loss平均处理
log_pos = F.logsigmoid(log_pos).sum(1) / log_pos.shape[1]
log_neg = F.logsigmoid(log_neg).sum(1) / log_neg.shape[1] # batch_size
# log_pos = F.logsigmoid(log_pos).sum(1)
# log_neg = F.logsigmoid(log_neg).sum(1) # batch_size
loss = log_pos + log_neg #[batchsize]
return -loss
def get_input_embeddings(self):
return self.in_embed.weight.data.cpu().numpy()
class MultiMotifLSTMModel(nn.Module):
def __init__(self,
vocab_size,
embedding_dim,
hidden_dim,
output_dim,
n_layers=1,
bidirectional=False,
dropout=0,
avg_hidden=True,
motif=[1, 2, 3],
loadvec =True,
device='cpu'):
'''
Desc:
初始化模型,定义一些网络层级
Args:
vocab_size: int -- 5,即[A, G, U, C, T]
embedding_dim: int -- 词向量的维度
hidden_dim: int -- LSTM层hidden的维度
output_dim: int -- 输出的维度
n_layers: int -- LSTM的层数
bidirectional: bool -- LSTM是否双向
dropout: float -- drouput概率,使用在LSTM和Dropout层
avg_hidden: bool -- 是否将hidden的平均值作为结果输出,如果是False,则使用最后一个Hidden作为LSTM的输出
'''
super(MultiMotifLSTMModel, self).__init__()
self.bidirectional = bidirectional
self.avg_hidden = avg_hidden
self.motif = motif
# 如果motif是整数,则赋1,如果为 list 则为长度
self.motif_num = 1 if type(motif) == int else len(motif)
self.pre_output_dim = hidden_dim * 2 if self.bidirectional else hidden_dim
self.device = device
self.input_embed_1 = nn.Embedding(vocab_size[0], embedding_dim[0])
if loadvec:
embed_1 = np.load('./embedding/motif-1/embedding-E100-C1-K1.npy')
self.input_embed_1.weight.data.copy_(torch.from_numpy(embed_1))
self.lstm_1 = nn.LSTM(embedding_dim[0],
hidden_dim,
num_layers=n_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=(dropout if n_layers > 1 else 0))
self.fc1_1 = nn.Linear(self.pre_output_dim, self.pre_output_dim)
self.bn_1 = nn.BatchNorm1d(self.pre_output_dim)
self.fc2_1 = nn.Linear(self.pre_output_dim, output_dim)
self.input_embed_2 = nn.Embedding(vocab_size[1], embedding_dim[1])
if loadvec:
embed_2 = np.load('./embedding/motif-2/embedding-E25-C1-K1.npy')
self.input_embed_2.weight.data.copy_(torch.from_numpy(embed_2))
self.lstm_2 = nn.LSTM(embedding_dim[1],
hidden_dim,
num_layers=n_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=(dropout if n_layers > 1 else 0))
self.fc1_2 = nn.Linear(self.pre_output_dim, self.pre_output_dim)
self.bn_2 = nn.BatchNorm1d(self.pre_output_dim)
self.fc2_2 = nn.Linear(self.pre_output_dim, output_dim)
self.input_embed_3 = nn.Embedding(vocab_size[2], embedding_dim[2])
if loadvec:
embed_3 = np.load('./embedding/motif-3/embedding-E200-C1-K2.npy')
self.input_embed_2.weight.data.copy_(torch.from_numpy(embed_2))
self.lstm_3 = nn.LSTM(embedding_dim[2],
hidden_dim,
num_layers=n_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=(dropout if n_layers > 1 else 0))
self.fc1_3 = nn.Linear(self.pre_output_dim, self.pre_output_dim)
self.bn_3 = nn.BatchNorm1d(self.pre_output_dim)
self.fc2_3 = nn.Linear(self.pre_output_dim, output_dim)
self.relu = nn.ReLU(inplace=True)
self.leaky_relu = nn.LeakyReLU(0.1, inplace=True)
self.dropout = nn.Dropout(dropout)
self.fc3 = nn.Linear(self.motif_num, output_dim)
self.bn4 = nn.BatchNorm1d(self.pre_output_dim * self.motif_num)
self.fc4 = nn.Linear(self.pre_output_dim * self.motif_num, self.pre_output_dim)
self.bn5 = nn.BatchNorm1d(self.pre_output_dim)
self.fc5 = nn.Linear(self.pre_output_dim, output_dim)
self.fc6 = nn.Linear(self.pre_output_dim * self.motif_num, hidden_dim)
self.bn6 = nn.BatchNorm1d(hidden_dim)
self.fc7 = nn.Linear(hidden_dim, output_dim)
self.sigmoid = nn.Sigmoid()
def forward(self, seq):
'''
Desc:
Forward pass
Args:
seq: tensor(batch_size, seq_size) -- 输入的序列
Returns:
output: tensor(batch_size, output_dim=1) -- Predicted value
'''
seq = seq.long().to(self.device)
embed_seq_1 = self.dropout(self.input_embed_1(seq)) # [batch_size, seq_size, embed_size]
seqs = idx_to_seq(seq, self.motif)
seq_motif1 = get_seq_motif(seqs, self.motif[1])[0]
seq_motif2 = get_seq_motif(seqs, self.motif[2])[0]
seq_motif1 = torch.tensor(seq_motif1).long().to(self.device)
seq_motif2 = torch.tensor(seq_motif2).long().to(self.device)
# embed_seq_2 = self.dropout(self.input_embed_2(seq_motif[1]))
embed_seq_2 = self.input_embed_2(seq_motif1)
embed_seq_3 = self.dropout(self.input_embed_3(seq_motif2))
# lstm_output: [batch size, seq_len, hid dim * num directions]
#hidden, cell: [num layers * num directions, batch size, hidden_dim]
lstm_output_1, (hidden_1, cell_1) = self.lstm_1(embed_seq_1)
lstm_output_2, (hidden_2, cell_2) = self.lstm_2(embed_seq_2)
lstm_output_3, (hidden_3, cell_3) = self.lstm_3(embed_seq_3)
# hidden: [batch_size, hidden_dim * num_directions]
hidden_1 = self.handle_hidden(lstm_output_1, hidden_1)
hidden_2 = self.handle_hidden(lstm_output_2, hidden_2)
hidden_3 = self.handle_hidden(lstm_output_3, hidden_3)
# concatenate hidden, [batch_size, pre_output_dim*motif]
hidden = torch.cat((hidden_1, hidden_2, hidden_3), dim=1)
# hidden = self.bn4(hidden)
# [batch_size, pre_output_dim*motif] -> [batch_size, hidden_dim]
pre_output = self.leaky_relu(self.bn6(self.fc6(hidden)))
pre_output = self.dropout(pre_output)
# hidden_dim -> output_dim
output = self.fc7(pre_output)
# pre_output_1 = self.relu(self.bn_1(self.fc1_1(hidden_1)))
# pre_output_1 = self.leaky_relu(self.bn_1(self.fc1_1(hidden_1)))
# pre_output_1 = self.dropout(pre_output_1)
# output_1 = self.fc2_1(pre_output_1) # [batch_size, output_dim]
# pre_output_2 = self.relu(self.bn_2(self.fc1_2(hidden_2)))
# pre_output_2 = self.dropout(pre_output_2)
# output_2 = self.fc2_2(pre_output_2) # [batch_size, output_dim]
# pre_output_3 = self.relu(self.bn_3(self.fc1_3(hidden_3)))
# pre_output_3 = self.dropout(pre_output_3)
# output_3 = self.fc2_3(pre_output_3) # [batch_size, output_dim]
# # batch_size, pre_hidden_dim -> batch_size, pre_hidden_dim*3 -> batch_size, 1
# output_pre = torch.cat((pre_output_1, pre_output_2, pre_output_3), dim=1)
# output_pre = self.bn4(output_pre)
# # batch_size, pre_hidden_dim*3 -> batch_size, pre_hidden_dim
# output_pre = self.relu(self.bn5(self.fc4(output_pre)))
# output_pre = self.dropout(output_pre)
# # batch_size, pre_hidden_dim -> batch_size, output_dim
# output = self.fc5(output_pre)
# output_pre = torch.cat((output_1, output_2, output_3), dim=1)
# output_pre = torch.cat((output_1, output_2), dim=1)
# output = self.fc3(output_pre)
return output
def handle_hidden(self, lstm_output, hidden):
if self.avg_hidden:
hidden = torch.sum(lstm_output, 1) / lstm_output.size(1)
else:
if self.bidirectional:
hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)
else:
hidden = hidden[-1, :, :]
return hidden
class EmbeddingLSTMModel(nn.Module):
def __init__(self,
vocab_size,
embedding_dim,
hidden_dim,
output_dim,
n_layers=1,
bidirectional=False,
dropout=0,
# device='cpu',
avg_hidden=True):
'''
Desc:
初始化单一输入LSTM模型,定义一些网络层级
Args:
vocab_size: int -- 5,即[A, G, U, C, T]
embedding_dim: int -- 词向量的维度
hidden_dim: int -- LSTM层hidden的维度
output_dim: int -- 输出的维度
n_layers: int -- LSTM的层数
bidirectional: bool -- LSTM是否双向
dropout: float -- drouput概率,使用在LSTM和Dropout层
avg_hidden: bool -- 是否将hidden的平均值作为结果输出,如果是False,则使用最后一个Hidden作为LSTM的输出
'''
super(EmbeddingLSTMModel, self).__init__()
self.bidirectional = bidirectional
self.avg_hidden = avg_hidden
self.pre_output_dim = hidden_dim * 2 if self.bidirectional else hidden_dim
# self.device=device
self.input_embed = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=n_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=(dropout if n_layers > 1 else 0))
self.fc1 = nn.Linear(self.pre_output_dim, self.pre_output_dim)
self.bn = nn.BatchNorm1d(self.pre_output_dim)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout)
self.fc2 = nn.Linear(self.pre_output_dim, output_dim)
def forward(self, seq):
'''
Desc:
Forward pass
Args:
seq: tensor(batch_size, seq_size) -- 输入的序列
Returns:
output: tensor(batch_size, output_dim=1) -- Predicted value
'''
# embed_seq: [batch_size, seq_size, embed_size]
# seq = seq.long().to(self.device)
seq = seq.long().to('cuda')
embed_seq = self.dropout(self.input_embed(seq))
#lstm_output: [batch size, seq_len, hid dim * num directions]
#hidden, cell: [num layers * num directions, batch size, hidden_dim]
lstm_output, (hidden, cell) = self.lstm(embed_seq)
if self.avg_hidden:
hidden = torch.sum(lstm_output, 1) / lstm_output.size(1)
else:
if self.bidirectional:
hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)
else:
# hidden = self.dropout(hidden[-1, :, :])
hidden = hidden[-1, :, :]
# hidden: [batch_size, hidden_dim * num_directions]
# hidden = self.dropout(hidden)
pre_output = self.relu(self.bn(self.fc1(hidden)))
pre_output = self.dropout(pre_output)
output = self.fc2(pre_output)
# return: [batch_size, output_dim]
return output
class WordAVGModel(nn.Module):
def __init__(self, vocab_size, embedding_dim, output_dim, dropout=0.5):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.fc1 = nn.Linear(embedding_dim,
int((embedding_dim + output_dim) / 2))
self.fc2 = nn.Linear(int((embedding_dim + output_dim) / 2), output_dim)
self.dropout = nn.Dropout(dropout)
''' seq: [batch_size, seq_size]'''
def forward(self, text):
embedded = self.embedding(text.long().to('cuda')) # [batch_size,seq_len,emb_dim]
pooled = F.avg_pool2d(
embedded,
(embedded.shape[1], 1)).squeeze() # batch_size, embed_size
output = self.dropout(self.fc1(pooled))
return self.fc2(output)
def count_parameters(model=None):
'''
Desc:
计算模型中参数个数
Args:
model -- 待参数计数的模型
Returns:
res -- model中参数的个数
'''
if model is None:
raise ValueError("model不可为空")
return sum(p.numel() for p in model.parameters() if p.requires_grad) | RSC/model_util.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from spl_sirna.sirna_util import get_seq_motif, idx_to_seq
USE_CUDA = torch.cuda.is_available()
# DEVICE = torch.device('cuda' if USE_CUDA else 'cpu')
class Word2vecModel(nn.Module):
def __init__(self, vocab_size, embed_size):
''' 初始化输出和输出embedding
'''
super(Word2vecModel, self).__init__()
self.vocab_size = vocab_size
self.embed_size = embed_size
initrange = 0.5 / self.embed_size
self.out_embed = nn.Embedding(self.vocab_size,
self.embed_size,
sparse=False)
self.out_embed.weight.data.uniform_(-initrange, initrange)
self.in_embed = nn.Embedding(self.vocab_size,
self.embed_size,
sparse=False)
self.in_embed.weight.data.uniform_(-initrange, initrange)
def forward(self, input_labels, pos_labels, neg_labels):
'''
input_labels: 中心词, [batch_size]
pos_labels: 中心词周围 context window 出现过的单词 [batch_size, (window_size * 2)]
neg_labelss: 中心词周围没有出现过的单词,从 negative sampling 得到 [batch_size, (window_size * 2 * K)]
return: loss, [batch_size]
'''
batch_size = input_labels.size(0)
input_embedding = self.in_embed(input_labels) # B * embed_size
pos_embedding = self.out_embed(pos_labels) # B * (2*C) * embed_size
neg_embedding = self.out_embed(
neg_labels) # B * (2*C * K) * embed_size
log_pos = torch.bmm(
pos_embedding, input_embedding.unsqueeze(2)).squeeze() # B * (2*C)
log_neg = torch.bmm(
neg_embedding,
-input_embedding.unsqueeze(2)).squeeze() # B * (2*C*K)
# 对loss平均处理
log_pos = F.logsigmoid(log_pos).sum(1) / log_pos.shape[1]
log_neg = F.logsigmoid(log_neg).sum(1) / log_neg.shape[1] # batch_size
# log_pos = F.logsigmoid(log_pos).sum(1)
# log_neg = F.logsigmoid(log_neg).sum(1) # batch_size
loss = log_pos + log_neg #[batchsize]
return -loss
def get_input_embeddings(self):
return self.in_embed.weight.data.cpu().numpy()
class MultiMotifLSTMModel(nn.Module):
def __init__(self,
vocab_size,
embedding_dim,
hidden_dim,
output_dim,
n_layers=1,
bidirectional=False,
dropout=0,
avg_hidden=True,
motif=[1, 2, 3],
loadvec =True,
device='cpu'):
'''
Desc:
初始化模型,定义一些网络层级
Args:
vocab_size: int -- 5,即[A, G, U, C, T]
embedding_dim: int -- 词向量的维度
hidden_dim: int -- LSTM层hidden的维度
output_dim: int -- 输出的维度
n_layers: int -- LSTM的层数
bidirectional: bool -- LSTM是否双向
dropout: float -- drouput概率,使用在LSTM和Dropout层
avg_hidden: bool -- 是否将hidden的平均值作为结果输出,如果是False,则使用最后一个Hidden作为LSTM的输出
'''
super(MultiMotifLSTMModel, self).__init__()
self.bidirectional = bidirectional
self.avg_hidden = avg_hidden
self.motif = motif
# 如果motif是整数,则赋1,如果为 list 则为长度
self.motif_num = 1 if type(motif) == int else len(motif)
self.pre_output_dim = hidden_dim * 2 if self.bidirectional else hidden_dim
self.device = device
self.input_embed_1 = nn.Embedding(vocab_size[0], embedding_dim[0])
if loadvec:
embed_1 = np.load('./embedding/motif-1/embedding-E100-C1-K1.npy')
self.input_embed_1.weight.data.copy_(torch.from_numpy(embed_1))
self.lstm_1 = nn.LSTM(embedding_dim[0],
hidden_dim,
num_layers=n_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=(dropout if n_layers > 1 else 0))
self.fc1_1 = nn.Linear(self.pre_output_dim, self.pre_output_dim)
self.bn_1 = nn.BatchNorm1d(self.pre_output_dim)
self.fc2_1 = nn.Linear(self.pre_output_dim, output_dim)
self.input_embed_2 = nn.Embedding(vocab_size[1], embedding_dim[1])
if loadvec:
embed_2 = np.load('./embedding/motif-2/embedding-E25-C1-K1.npy')
self.input_embed_2.weight.data.copy_(torch.from_numpy(embed_2))
self.lstm_2 = nn.LSTM(embedding_dim[1],
hidden_dim,
num_layers=n_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=(dropout if n_layers > 1 else 0))
self.fc1_2 = nn.Linear(self.pre_output_dim, self.pre_output_dim)
self.bn_2 = nn.BatchNorm1d(self.pre_output_dim)
self.fc2_2 = nn.Linear(self.pre_output_dim, output_dim)
self.input_embed_3 = nn.Embedding(vocab_size[2], embedding_dim[2])
if loadvec:
embed_3 = np.load('./embedding/motif-3/embedding-E200-C1-K2.npy')
self.input_embed_2.weight.data.copy_(torch.from_numpy(embed_2))
self.lstm_3 = nn.LSTM(embedding_dim[2],
hidden_dim,
num_layers=n_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=(dropout if n_layers > 1 else 0))
self.fc1_3 = nn.Linear(self.pre_output_dim, self.pre_output_dim)
self.bn_3 = nn.BatchNorm1d(self.pre_output_dim)
self.fc2_3 = nn.Linear(self.pre_output_dim, output_dim)
self.relu = nn.ReLU(inplace=True)
self.leaky_relu = nn.LeakyReLU(0.1, inplace=True)
self.dropout = nn.Dropout(dropout)
self.fc3 = nn.Linear(self.motif_num, output_dim)
self.bn4 = nn.BatchNorm1d(self.pre_output_dim * self.motif_num)
self.fc4 = nn.Linear(self.pre_output_dim * self.motif_num, self.pre_output_dim)
self.bn5 = nn.BatchNorm1d(self.pre_output_dim)
self.fc5 = nn.Linear(self.pre_output_dim, output_dim)
self.fc6 = nn.Linear(self.pre_output_dim * self.motif_num, hidden_dim)
self.bn6 = nn.BatchNorm1d(hidden_dim)
self.fc7 = nn.Linear(hidden_dim, output_dim)
self.sigmoid = nn.Sigmoid()
def forward(self, seq):
'''
Desc:
Forward pass
Args:
seq: tensor(batch_size, seq_size) -- 输入的序列
Returns:
output: tensor(batch_size, output_dim=1) -- Predicted value
'''
seq = seq.long().to(self.device)
embed_seq_1 = self.dropout(self.input_embed_1(seq)) # [batch_size, seq_size, embed_size]
seqs = idx_to_seq(seq, self.motif)
seq_motif1 = get_seq_motif(seqs, self.motif[1])[0]
seq_motif2 = get_seq_motif(seqs, self.motif[2])[0]
seq_motif1 = torch.tensor(seq_motif1).long().to(self.device)
seq_motif2 = torch.tensor(seq_motif2).long().to(self.device)
# embed_seq_2 = self.dropout(self.input_embed_2(seq_motif[1]))
embed_seq_2 = self.input_embed_2(seq_motif1)
embed_seq_3 = self.dropout(self.input_embed_3(seq_motif2))
# lstm_output: [batch size, seq_len, hid dim * num directions]
#hidden, cell: [num layers * num directions, batch size, hidden_dim]
lstm_output_1, (hidden_1, cell_1) = self.lstm_1(embed_seq_1)
lstm_output_2, (hidden_2, cell_2) = self.lstm_2(embed_seq_2)
lstm_output_3, (hidden_3, cell_3) = self.lstm_3(embed_seq_3)
# hidden: [batch_size, hidden_dim * num_directions]
hidden_1 = self.handle_hidden(lstm_output_1, hidden_1)
hidden_2 = self.handle_hidden(lstm_output_2, hidden_2)
hidden_3 = self.handle_hidden(lstm_output_3, hidden_3)
# concatenate hidden, [batch_size, pre_output_dim*motif]
hidden = torch.cat((hidden_1, hidden_2, hidden_3), dim=1)
# hidden = self.bn4(hidden)
# [batch_size, pre_output_dim*motif] -> [batch_size, hidden_dim]
pre_output = self.leaky_relu(self.bn6(self.fc6(hidden)))
pre_output = self.dropout(pre_output)
# hidden_dim -> output_dim
output = self.fc7(pre_output)
# pre_output_1 = self.relu(self.bn_1(self.fc1_1(hidden_1)))
# pre_output_1 = self.leaky_relu(self.bn_1(self.fc1_1(hidden_1)))
# pre_output_1 = self.dropout(pre_output_1)
# output_1 = self.fc2_1(pre_output_1) # [batch_size, output_dim]
# pre_output_2 = self.relu(self.bn_2(self.fc1_2(hidden_2)))
# pre_output_2 = self.dropout(pre_output_2)
# output_2 = self.fc2_2(pre_output_2) # [batch_size, output_dim]
# pre_output_3 = self.relu(self.bn_3(self.fc1_3(hidden_3)))
# pre_output_3 = self.dropout(pre_output_3)
# output_3 = self.fc2_3(pre_output_3) # [batch_size, output_dim]
# # batch_size, pre_hidden_dim -> batch_size, pre_hidden_dim*3 -> batch_size, 1
# output_pre = torch.cat((pre_output_1, pre_output_2, pre_output_3), dim=1)
# output_pre = self.bn4(output_pre)
# # batch_size, pre_hidden_dim*3 -> batch_size, pre_hidden_dim
# output_pre = self.relu(self.bn5(self.fc4(output_pre)))
# output_pre = self.dropout(output_pre)
# # batch_size, pre_hidden_dim -> batch_size, output_dim
# output = self.fc5(output_pre)
# output_pre = torch.cat((output_1, output_2, output_3), dim=1)
# output_pre = torch.cat((output_1, output_2), dim=1)
# output = self.fc3(output_pre)
return output
def handle_hidden(self, lstm_output, hidden):
if self.avg_hidden:
hidden = torch.sum(lstm_output, 1) / lstm_output.size(1)
else:
if self.bidirectional:
hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)
else:
hidden = hidden[-1, :, :]
return hidden
class EmbeddingLSTMModel(nn.Module):
def __init__(self,
vocab_size,
embedding_dim,
hidden_dim,
output_dim,
n_layers=1,
bidirectional=False,
dropout=0,
# device='cpu',
avg_hidden=True):
'''
Desc:
初始化单一输入LSTM模型,定义一些网络层级
Args:
vocab_size: int -- 5,即[A, G, U, C, T]
embedding_dim: int -- 词向量的维度
hidden_dim: int -- LSTM层hidden的维度
output_dim: int -- 输出的维度
n_layers: int -- LSTM的层数
bidirectional: bool -- LSTM是否双向
dropout: float -- drouput概率,使用在LSTM和Dropout层
avg_hidden: bool -- 是否将hidden的平均值作为结果输出,如果是False,则使用最后一个Hidden作为LSTM的输出
'''
super(EmbeddingLSTMModel, self).__init__()
self.bidirectional = bidirectional
self.avg_hidden = avg_hidden
self.pre_output_dim = hidden_dim * 2 if self.bidirectional else hidden_dim
# self.device=device
self.input_embed = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=n_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=(dropout if n_layers > 1 else 0))
self.fc1 = nn.Linear(self.pre_output_dim, self.pre_output_dim)
self.bn = nn.BatchNorm1d(self.pre_output_dim)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout)
self.fc2 = nn.Linear(self.pre_output_dim, output_dim)
def forward(self, seq):
'''
Desc:
Forward pass
Args:
seq: tensor(batch_size, seq_size) -- 输入的序列
Returns:
output: tensor(batch_size, output_dim=1) -- Predicted value
'''
# embed_seq: [batch_size, seq_size, embed_size]
# seq = seq.long().to(self.device)
seq = seq.long().to('cuda')
embed_seq = self.dropout(self.input_embed(seq))
#lstm_output: [batch size, seq_len, hid dim * num directions]
#hidden, cell: [num layers * num directions, batch size, hidden_dim]
lstm_output, (hidden, cell) = self.lstm(embed_seq)
if self.avg_hidden:
hidden = torch.sum(lstm_output, 1) / lstm_output.size(1)
else:
if self.bidirectional:
hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)
else:
# hidden = self.dropout(hidden[-1, :, :])
hidden = hidden[-1, :, :]
# hidden: [batch_size, hidden_dim * num_directions]
# hidden = self.dropout(hidden)
pre_output = self.relu(self.bn(self.fc1(hidden)))
pre_output = self.dropout(pre_output)
output = self.fc2(pre_output)
# return: [batch_size, output_dim]
return output
class WordAVGModel(nn.Module):
def __init__(self, vocab_size, embedding_dim, output_dim, dropout=0.5):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.fc1 = nn.Linear(embedding_dim,
int((embedding_dim + output_dim) / 2))
self.fc2 = nn.Linear(int((embedding_dim + output_dim) / 2), output_dim)
self.dropout = nn.Dropout(dropout)
''' seq: [batch_size, seq_size]'''
def forward(self, text):
embedded = self.embedding(text.long().to('cuda')) # [batch_size,seq_len,emb_dim]
pooled = F.avg_pool2d(
embedded,
(embedded.shape[1], 1)).squeeze() # batch_size, embed_size
output = self.dropout(self.fc1(pooled))
return self.fc2(output)
def count_parameters(model=None):
'''
Desc:
计算模型中参数个数
Args:
model -- 待参数计数的模型
Returns:
res -- model中参数的个数
'''
if model is None:
raise ValueError("model不可为空")
return sum(p.numel() for p in model.parameters() if p.requires_grad) | 0.862612 | 0.35883 |
#string类(不可变对象, 接近内置)
string = str('...') #string = '...'
string.isalnum()
string.isalpha()
string.isdigit()
string.islower()
string.isupper()
string.isspace()
string.isidentifier() #Python标识符
string.startswith('...') #以子串开头
string.endswith('...') #以子串结尾
string.find('...') #查找子串最低下标(-1)
string.rfind('...') #查找子串最高下标(-1)
string.count('...') #查找子串次数(无覆盖)
string.replace(old,new) #替换指定字符
string.capitalize() #首字符大写
string.swapcase() #大写小写逆转
string.title() #每个单词首字母转大写
string.lower() #全转小写
string.upper() #全转大写
string.strip() #移除两端'',\t,\f,\r,\n
string.lstrip() #移除前端...
string.rstrip() #移除末端...
string.center(width) #给定宽度居中对齐
string.ljust(width) #给定宽度左对齐
string.rjust(width) #给定宽度右对齐
string.format(items) #格式化字符串,按照items
string.split('?') #参考限定字符切割成列表(默认空格)
#一堆运算符重载了(下述是摘要):
string[index] #范围[0,length),[-length,-1]
string[start:end] #提取部分字符[start,end]
#start默认0,end默认length-1
string = string * n #复制连接
result = string1 in string2
result = string1 not in string2
'''
'''
#list类(接近内置)
list = list() #list = []
list.index(element) #第一次出现下标
list.count(element) #统计元素
list.append(element) #追加元素
list.extend(list) #追加列表
list.sort() #升序排序
list.reverse() #逆序
list.insert(index, element) #插入元素到index中
list.pop(index) #弹出指定元素,默认最后一个元素
list.remove(element) #移除指定元素(第一个)
#一堆运算符重载了(下述是摘要):
list[index] #范围[0,length),[-length,-1]
list[start:end] #提取部分字符[start,end]
#start默认0,end默认length-1
list = list * n #复制连接
result = element in list
result = element not in list
#列表解析:
list = [i for i in range(9)]
list = [i for i in range(9) if i > 5]
'''
'''
#tuple类(不可变对象, 接近内置)
tuple = tuple(object)
#支持各种内置操作
'''
'''
#set类(接近内置)
set = set(object)
set.add(element) #添加元素
set.remove(element) #移除元素
set.issubset(set) #子集检查
set.issuperset(set) #超集检查
set.union(set) #或运算(并集)
set.intersection(set) #与运算(交集)
set.difference(set) #(差集,补集)
set.symmetric_difference(set) #异或运算(对称差集)
'''
'''
#dictionary类
dictionary = {...}
dictionary[key] = value #添加(修改)一个条目
del dictionary[key] #删除一个条目
dictionary.key() #获得关键字序列(元组)
dictionary.values() #获得值序列(元组)
dictionary.items() #获得字典序列(元组序列,每个元组是一个键值对)
dictionary.clear() #移除所有条目
dictionary.get(key) #获得关键字对应值
dictionary.pop(key) #获得关键字对应值,并移除条目
dictionary.popitem() #随机获得一个条目,并移除条目
'''
'''
#random类
number = random.randint(a,b) #生成[a,b]范围内整数
number = random.randrange(a,b) #生成[a,b)范围内整数
number = random.random() #生成[0,1]范围内小数
''' | KnowledgeSet/B_Other/Python/_note_.py/_class_.py | #string类(不可变对象, 接近内置)
string = str('...') #string = '...'
string.isalnum()
string.isalpha()
string.isdigit()
string.islower()
string.isupper()
string.isspace()
string.isidentifier() #Python标识符
string.startswith('...') #以子串开头
string.endswith('...') #以子串结尾
string.find('...') #查找子串最低下标(-1)
string.rfind('...') #查找子串最高下标(-1)
string.count('...') #查找子串次数(无覆盖)
string.replace(old,new) #替换指定字符
string.capitalize() #首字符大写
string.swapcase() #大写小写逆转
string.title() #每个单词首字母转大写
string.lower() #全转小写
string.upper() #全转大写
string.strip() #移除两端'',\t,\f,\r,\n
string.lstrip() #移除前端...
string.rstrip() #移除末端...
string.center(width) #给定宽度居中对齐
string.ljust(width) #给定宽度左对齐
string.rjust(width) #给定宽度右对齐
string.format(items) #格式化字符串,按照items
string.split('?') #参考限定字符切割成列表(默认空格)
#一堆运算符重载了(下述是摘要):
string[index] #范围[0,length),[-length,-1]
string[start:end] #提取部分字符[start,end]
#start默认0,end默认length-1
string = string * n #复制连接
result = string1 in string2
result = string1 not in string2
'''
'''
#list类(接近内置)
list = list() #list = []
list.index(element) #第一次出现下标
list.count(element) #统计元素
list.append(element) #追加元素
list.extend(list) #追加列表
list.sort() #升序排序
list.reverse() #逆序
list.insert(index, element) #插入元素到index中
list.pop(index) #弹出指定元素,默认最后一个元素
list.remove(element) #移除指定元素(第一个)
#一堆运算符重载了(下述是摘要):
list[index] #范围[0,length),[-length,-1]
list[start:end] #提取部分字符[start,end]
#start默认0,end默认length-1
list = list * n #复制连接
result = element in list
result = element not in list
#列表解析:
list = [i for i in range(9)]
list = [i for i in range(9) if i > 5]
'''
'''
#tuple类(不可变对象, 接近内置)
tuple = tuple(object)
#支持各种内置操作
'''
'''
#set类(接近内置)
set = set(object)
set.add(element) #添加元素
set.remove(element) #移除元素
set.issubset(set) #子集检查
set.issuperset(set) #超集检查
set.union(set) #或运算(并集)
set.intersection(set) #与运算(交集)
set.difference(set) #(差集,补集)
set.symmetric_difference(set) #异或运算(对称差集)
'''
'''
#dictionary类
dictionary = {...}
dictionary[key] = value #添加(修改)一个条目
del dictionary[key] #删除一个条目
dictionary.key() #获得关键字序列(元组)
dictionary.values() #获得值序列(元组)
dictionary.items() #获得字典序列(元组序列,每个元组是一个键值对)
dictionary.clear() #移除所有条目
dictionary.get(key) #获得关键字对应值
dictionary.pop(key) #获得关键字对应值,并移除条目
dictionary.popitem() #随机获得一个条目,并移除条目
'''
'''
#random类
number = random.randint(a,b) #生成[a,b]范围内整数
number = random.randrange(a,b) #生成[a,b)范围内整数
number = random.random() #生成[0,1]范围内小数
''' | 0.091118 | 0.355859 |
# Kludge to import logger from a relative path
from sys import path, stderr
path.append('../logger')
from logger import Logger
OBJS = [
{
"name": "red-ball",
"radius": 0.25,
"restitution": 0.85,
"color": [1, 0, 0, 1],
"pos": [-1, 4, 0],
"rot": [0, 0, 0, 1],
"vel": [0, 0, 0, 0, 0, 0],
}, {
"name": "blue-ball",
"radius": 0.5,
"restitution": 0.95,
"color": [0, 0, 1, 1],
"pos": [1, 4, 0],
"rot": [0, 0, 0, 1],
"vel": [0, 0, 0, 0, 0, 0],
}
]
TIME_STOP_MS = 8*1000
SIMULATION_STEP_MS = 50
SIMULATION_STEP = SIMULATION_STEP_MS/1000
VISUALIZATION_STEP_MS = 100
GRAVITY = -9.81
X, Y, Z = 0, 1, 2
def bouncing_ball_simulation():
"""Simulate two bouncing spheres."""
# Create the logger
logger = Logger(VISUALIZATION_STEP_MS/1000)
# Add all objects
for obj in OBJS:
logger.add_sphere(obj["name"], obj["radius"], obj["color"])
# Create the initial frame
logger.new_frame()
for obj in OBJS:
# Must pass a copy of position and rotation
# since python passes by reference.
pos, rot = obj["pos"].copy(), obj["rot"].copy()
logger.add_to_frame(obj["name"], pos, rot)
time_ms = 0
next_vis_update_ms = VISUALIZATION_STEP_MS
print("t,red-ball position, blue-ball position", file=stderr)
print(f"0,{OBJS[0]['pos'][Y]},{OBJS[1]['pos'][Y]}", file=stderr)
while time_ms <= TIME_STOP_MS:
time_ms += SIMULATION_STEP_MS
# Physics calculation
for obj in OBJS:
obj["pos"][Y] += obj["vel"][Y]*SIMULATION_STEP
obj["vel"][Y] += GRAVITY*SIMULATION_STEP
# Check for bouncing
for obj in OBJS:
penetration_dist = obj["pos"][Y] - obj["radius"]
if penetration_dist <= 0:
obj["pos"][Y] = -penetration_dist
obj["vel"][Y] *= -1*obj["restitution"]
# Update the visualization
if time_ms >= next_vis_update_ms:
next_vis_update_ms += VISUALIZATION_STEP_MS
# Print to STDERR for logging separately from json data
print(f"{time_ms/1000},{OBJS[0]['pos'][Y]},{OBJS[1]['pos'][Y]}",
file=stderr)
logger.new_frame()
for obj in OBJS:
pos, rot = obj["pos"].copy(), obj["rot"].copy()
logger.add_to_frame(obj["name"], pos, rot)
print(str(logger))
if __name__ == '__main__':
bouncing_ball_simulation() | examples/falling_sphere.py | # Kludge to import logger from a relative path
from sys import path, stderr
path.append('../logger')
from logger import Logger
OBJS = [
{
"name": "red-ball",
"radius": 0.25,
"restitution": 0.85,
"color": [1, 0, 0, 1],
"pos": [-1, 4, 0],
"rot": [0, 0, 0, 1],
"vel": [0, 0, 0, 0, 0, 0],
}, {
"name": "blue-ball",
"radius": 0.5,
"restitution": 0.95,
"color": [0, 0, 1, 1],
"pos": [1, 4, 0],
"rot": [0, 0, 0, 1],
"vel": [0, 0, 0, 0, 0, 0],
}
]
TIME_STOP_MS = 8*1000
SIMULATION_STEP_MS = 50
SIMULATION_STEP = SIMULATION_STEP_MS/1000
VISUALIZATION_STEP_MS = 100
GRAVITY = -9.81
X, Y, Z = 0, 1, 2
def bouncing_ball_simulation():
"""Simulate two bouncing spheres."""
# Create the logger
logger = Logger(VISUALIZATION_STEP_MS/1000)
# Add all objects
for obj in OBJS:
logger.add_sphere(obj["name"], obj["radius"], obj["color"])
# Create the initial frame
logger.new_frame()
for obj in OBJS:
# Must pass a copy of position and rotation
# since python passes by reference.
pos, rot = obj["pos"].copy(), obj["rot"].copy()
logger.add_to_frame(obj["name"], pos, rot)
time_ms = 0
next_vis_update_ms = VISUALIZATION_STEP_MS
print("t,red-ball position, blue-ball position", file=stderr)
print(f"0,{OBJS[0]['pos'][Y]},{OBJS[1]['pos'][Y]}", file=stderr)
while time_ms <= TIME_STOP_MS:
time_ms += SIMULATION_STEP_MS
# Physics calculation
for obj in OBJS:
obj["pos"][Y] += obj["vel"][Y]*SIMULATION_STEP
obj["vel"][Y] += GRAVITY*SIMULATION_STEP
# Check for bouncing
for obj in OBJS:
penetration_dist = obj["pos"][Y] - obj["radius"]
if penetration_dist <= 0:
obj["pos"][Y] = -penetration_dist
obj["vel"][Y] *= -1*obj["restitution"]
# Update the visualization
if time_ms >= next_vis_update_ms:
next_vis_update_ms += VISUALIZATION_STEP_MS
# Print to STDERR for logging separately from json data
print(f"{time_ms/1000},{OBJS[0]['pos'][Y]},{OBJS[1]['pos'][Y]}",
file=stderr)
logger.new_frame()
for obj in OBJS:
pos, rot = obj["pos"].copy(), obj["rot"].copy()
logger.add_to_frame(obj["name"], pos, rot)
print(str(logger))
if __name__ == '__main__':
bouncing_ball_simulation() | 0.404743 | 0.450239 |
# Lint as: python3
# pylint:disable=line-too-long
r"""Beam job to map to tf.Examples of embeddings.
This file has two modes:
1) Map from tf.Examples of audio to tf.Examples of embeddings.
2) Map from TFDS dataseet to tf.Examples of embeddings.
"""
# pylint:enable=line-too-long
from typing import Any, Dict
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
import tensorflow as tf
from non_semantic_speech_benchmark.data_prep import audio_to_embeddings_beam_flags # pylint:disable=unused-import
from non_semantic_speech_benchmark.data_prep import audio_to_embeddings_beam_utils as utils
FLAGS = flags.FLAGS
def main(_):
input_filenames_list, output_filenames, beam_params = utils.get_beam_params_from_flags(
)
# Check that inputs and flags are formatted correctly.
utils.validate_inputs(
input_filenames_list=input_filenames_list,
output_filenames=output_filenames,
embedding_modules=beam_params['embedding_modules'],
embedding_names=beam_params['embedding_names'],
module_output_keys=beam_params['module_output_keys'])
logging.info('main: input_filenames_list: %s', input_filenames_list)
logging.info('main: output_filenames: %s', output_filenames)
logging.info('main: beam_params: %s', beam_params)
# If you have custom beam options, add them here.
beam_options = None
logging.info('Starting to create flume pipeline...')
with beam.Pipeline(beam_options) as root:
for i, (input_filenames_or_glob, output_filename) in enumerate(
zip(input_filenames_list, output_filenames)):
utils.data_prep_pipeline(
root=root,
input_filenames_or_glob=input_filenames_or_glob,
output_filename=output_filename,
data_prep_behavior=FLAGS.data_prep_behavior,
beam_params=beam_params,
suffix=str(i))
@flags.multi_flags_validator(
['use_frontend_fn', 'model_input_min_length'],
message='Use only one of `use_frontend_fn` and `model_input_min_length`.'
)
def no_min_input_length_with_frontend_fn(flags_dict):
return (not flags_dict['use_frontend_fn'] or
not flags_dict['model_input_min_length'])
if __name__ == '__main__':
flags.mark_flags_as_required([
'output_filename', 'embedding_names', 'embedding_modules',
'module_output_keys', 'audio_key',
])
flags.mark_flags_as_mutual_exclusive(['input_glob', 'tfds_dataset'],
required=True)
flags.mark_flags_as_mutual_exclusive(
['tfds_dataset', 'sample_rate_key', 'sample_rate'], required=True)
tf.compat.v2.enable_v2_behavior()
assert tf.executing_eagerly()
app.run(main) | non_semantic_speech_benchmark/data_prep/audio_to_embeddings_beam_main.py |
# Lint as: python3
# pylint:disable=line-too-long
r"""Beam job to map to tf.Examples of embeddings.
This file has two modes:
1) Map from tf.Examples of audio to tf.Examples of embeddings.
2) Map from TFDS dataseet to tf.Examples of embeddings.
"""
# pylint:enable=line-too-long
from typing import Any, Dict
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
import tensorflow as tf
from non_semantic_speech_benchmark.data_prep import audio_to_embeddings_beam_flags # pylint:disable=unused-import
from non_semantic_speech_benchmark.data_prep import audio_to_embeddings_beam_utils as utils
FLAGS = flags.FLAGS
def main(_):
input_filenames_list, output_filenames, beam_params = utils.get_beam_params_from_flags(
)
# Check that inputs and flags are formatted correctly.
utils.validate_inputs(
input_filenames_list=input_filenames_list,
output_filenames=output_filenames,
embedding_modules=beam_params['embedding_modules'],
embedding_names=beam_params['embedding_names'],
module_output_keys=beam_params['module_output_keys'])
logging.info('main: input_filenames_list: %s', input_filenames_list)
logging.info('main: output_filenames: %s', output_filenames)
logging.info('main: beam_params: %s', beam_params)
# If you have custom beam options, add them here.
beam_options = None
logging.info('Starting to create flume pipeline...')
with beam.Pipeline(beam_options) as root:
for i, (input_filenames_or_glob, output_filename) in enumerate(
zip(input_filenames_list, output_filenames)):
utils.data_prep_pipeline(
root=root,
input_filenames_or_glob=input_filenames_or_glob,
output_filename=output_filename,
data_prep_behavior=FLAGS.data_prep_behavior,
beam_params=beam_params,
suffix=str(i))
@flags.multi_flags_validator(
['use_frontend_fn', 'model_input_min_length'],
message='Use only one of `use_frontend_fn` and `model_input_min_length`.'
)
def no_min_input_length_with_frontend_fn(flags_dict):
return (not flags_dict['use_frontend_fn'] or
not flags_dict['model_input_min_length'])
if __name__ == '__main__':
flags.mark_flags_as_required([
'output_filename', 'embedding_names', 'embedding_modules',
'module_output_keys', 'audio_key',
])
flags.mark_flags_as_mutual_exclusive(['input_glob', 'tfds_dataset'],
required=True)
flags.mark_flags_as_mutual_exclusive(
['tfds_dataset', 'sample_rate_key', 'sample_rate'], required=True)
tf.compat.v2.enable_v2_behavior()
assert tf.executing_eagerly()
app.run(main) | 0.844697 | 0.255919 |
import torch
import torch.nn as nn
import torch.nn.init
import sys
import numpy as np
import torchvision.models as models
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from torch.nn.utils.rnn import pad_packed_sequence
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.clip_grad import clip_grad_norm
from collections import OrderedDict
from torch.nn import functional as F
from text_encoders import get_text_encoder
from layers import l2norm
def EncoderImage(data_name, img_dim, embed_size, finetune=False,
cnn_type='vgg19', use_abs=False, no_imgnorm=False):
"""A wrapper to image encoders. Chooses between an encoder that uses
precomputed image features, `EncoderImagePrecomp`, or an encoder that
computes image features on the fly `EncoderImageFull`.
"""
if data_name.endswith('_precomp'):
img_enc = EncoderImagePrecomp(
img_dim, embed_size, use_abs, no_imgnorm)
else:
img_enc = EncoderImageFull(
embed_size, finetune, cnn_type, use_abs, no_imgnorm)
return img_enc
# tutorials/09 - Image Captioning
class EncoderImageFull(nn.Module):
def __init__(self, embed_size, finetune=False, cnn_type='vgg19',
use_abs=False, no_imgnorm=False):
"""Load pretrained VGG19 and replace top fc layer."""
super(EncoderImageFull, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.use_abs = use_abs
# Load a pre-trained model
self.cnn = self.get_cnn(cnn_type, True)
# For efficient memory usage.
for param in self.cnn.parameters():
param.requires_grad = finetune
# Replace the last fully connected layer of CNN with a new one
if cnn_type.startswith('vgg'):
self.fc = nn.Linear(self.cnn.classifier._modules['6'].in_features,
embed_size)
self.cnn.classifier = nn.Sequential(
*list(self.cnn.classifier.children())[:-1])
elif cnn_type.startswith('resnet'):
self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)
self.cnn.module.fc = nn.Sequential()
self.init_weights()
def get_cnn(self, arch, pretrained):
"""Load a pretrained CNN and parallelize over GPUs
"""
if pretrained:
print("=> using pre-trained model '{}'".format(arch))
model = models.__dict__[arch](pretrained=True)
else:
print("=> creating model '{}'".format(arch))
model = models.__dict__[arch]()
if arch.startswith('alexnet') or arch.startswith('vgg'):
model.features = nn.DataParallel(model.features)
else:
model = nn.DataParallel(model)
if torch.cuda.is_available():
model.cuda()
return model
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.) / np.sqrt(self.fc.in_features +
self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
features = self.cnn(images)
# normalization in the image embedding space
features = l2norm(features)
# linear projection to the joint embedding space
features = self.fc(features)
# normalization in the joint embedding space
if not self.no_imgnorm:
features = l2norm(features)
# take the absolute value of the embedding (used in order embeddings)
if self.use_abs:
features = torch.abs(features)
return features
class EncoderImagePrecomp(nn.Module):
def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False):
super(EncoderImagePrecomp, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.use_abs = use_abs
self.fc = False
if img_dim != embed_size or True:
self.fc = nn.Linear(img_dim, embed_size)
self.init_weights()
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
if self.fc:
r = np.sqrt(6.) / np.sqrt(self.fc.in_features +
self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
# assuming that the precomputed features are already l2-normalized
features = images
if self.fc:
features = self.fc(images)
# normalize in the joint embedding space
if not self.no_imgnorm:
features = l2norm(features)
# take the absolute value of embedding (used in order embeddings)
if self.use_abs:
features = torch.abs(features)
return features
def load_state_dict(self, state_dict):
"""Copies parameters. overwritting the default one to
accept state_dict from Full model
"""
own_state = self.state_dict()
new_state = OrderedDict()
for name, param in state_dict.items():
if name in own_state:
new_state[name] = param
super(EncoderImagePrecomp, self).load_state_dict(new_state)
def cosine_sim(im, s):
"""Cosine similarity between all the image and sentence pairs
"""
return im.mm(s.t())
def order_sim(im, s):
"""Order embeddings similarity measure $max(0, s-im)$
"""
YmX = (s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1))
- im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1)))
score = -YmX.clamp(min=0).pow(2).sum(2, keepdim=True).squeeze(2).sqrt().t()
return score
class ContrastiveLoss(nn.Module):
"""
Compute contrastive loss
"""
def __init__(self, margin=0, measure=False, max_violation=False):
super(ContrastiveLoss, self).__init__()
self.margin = margin
if measure == 'order':
self.sim = order_sim
else:
self.sim = cosine_sim
self.max_violation = max_violation
def forward(self, im, s):
# compute image-sentence score matrix
scores = self.sim(im, s)
diagonal = scores.diag().view(im.size(0), 1)
d1 = diagonal.expand_as(scores)
d2 = diagonal.t().expand_as(scores)
# compare every diagonal score to scores in its column
# caption retrieval
cost_s = (self.margin + scores - d1).clamp(min=0)
# compare every diagonal score to scores in its row
# image retrieval
cost_im = (self.margin + scores - d2).clamp(min=0)
# clear diagonals
mask = torch.eye(scores.size(0)) > .5
I = Variable(mask)
if torch.cuda.is_available():
I = I.cuda()
cost_s = cost_s.masked_fill_(I, 0)
cost_im = cost_im.masked_fill_(I, 0)
# keep the maximum violating negative for each query
if self.max_violation:
cost_s = cost_s.max(1)[0]
cost_im = cost_im.max(0)[0]
return cost_s.sum() + cost_im.sum()
def Frobenius(mat):
size = mat.size()
if len(size) == 3: # batched matrix
ret = (torch.sum(torch.sum((mat ** 2), 1, keepdim=True), 2, keepdim=True).squeeze() + 1e-10) ** 0.5
return torch.sum(ret) / size[0]
else:
raise Exception('matrix for computing Frobenius norm should be with 3 dims')
class VSE(object):
"""
rkiros/uvs model
"""
def __init__(self, opt):
# tutorials/09 - Image Captioning
# Build Models
self.opt = opt
self.grad_clip = opt.grad_clip
self.img_enc = EncoderImage(opt.data_name,
opt.img_dim,
opt.embed_size,
opt.finetune, opt.cnn_type,
use_abs=opt.use_abs,
no_imgnorm=opt.no_imgnorm)
self.txt_enc = get_text_encoder(opt.text_encoder, opt)
if torch.cuda.is_available():
self.img_enc.cuda()
self.txt_enc.cuda()
cudnn.benchmark = True
# Loss and Optimizer
self.criterion = ContrastiveLoss(margin=opt.margin,
measure=opt.measure,
max_violation=opt.max_violation)
self.attention = False
if opt.text_encoder.startswith('attentive'):
self.init_attention()
params = list(self.txt_enc.parameters())
if self.img_enc.fc:
params += list(self.img_enc.fc.parameters())
if opt.finetune:
params += list(self.img_enc.cnn.parameters())
self.params = params
self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.params), lr=opt.learning_rate)
self.Eiters = 0
def init_attention(self):
opt = self.opt
self.attention = True
hops = self.txt_enc.hops
self.I = Variable(torch.zeros(opt.batch_size, hops, hops))
for i in range(opt.batch_size):
for j in range(hops):
self.I.data[i][j][j] = 1
if torch.cuda.is_available():
self.I = self.I.cuda()
def state_dict(self):
state_dict = [self.img_enc.state_dict(), self.txt_enc.state_dict()]
return state_dict
def load_state_dict(self, state_dict):
self.img_enc.load_state_dict(state_dict[0])
self.txt_enc.load_state_dict(state_dict[1])
def train_start(self):
"""switch to train mode
"""
self.img_enc.train()
self.txt_enc.train()
def val_start(self):
"""switch to evaluate mode
"""
self.img_enc.eval()
self.txt_enc.eval()
def forward_emb(self, images, captions, lengths, volatile=False):
"""Compute the image and caption embeddings
"""
# Set mini-batch dataset
images = Variable(images, volatile=volatile)
captions = Variable(captions, volatile=volatile)
if torch.cuda.is_available():
images = images.cuda()
captions = captions.cuda()
# Forward
img_emb = self.img_enc(images)
cap_emb = self.txt_enc(captions, lengths)
return img_emb, cap_emb
def forward_loss(self, img_emb, cap_emb, **kwargs):
"""Compute the loss given pairs of image and caption embeddings
"""
loss = self.criterion(img_emb, cap_emb)
if self.attention:
coef = self.opt.att_coef
attention = self.txt_enc.attention_weights
attentionT = torch.transpose(attention, 1, 2).contiguous()
extra_loss = Frobenius(torch.bmm(attention, attentionT) - self.I[:attention.size(0)])
total_loss = loss + coef * extra_loss
self.logger.update('TotalLoss', total_loss.data[0], img_emb.size(0))
self.logger.update('AttLoss', coef * extra_loss.data[0], img_emb.size(0))
self.logger.update('ContrLoss', loss.data[0], img_emb.size(0))
return loss
def train_emb(self, images, captions, lengths, ids=None, *args):
"""One training step given images and captions.
"""
self.Eiters += 1
self.logger.update('Iter', self.Eiters)
self.logger.update('lr', self.optimizer.param_groups[0]['lr'])
# compute the embeddings
img_emb, cap_emb = self.forward_emb(images, captions, lengths)
# measure accuracy and record loss
self.optimizer.zero_grad()
loss = self.forward_loss(img_emb, cap_emb)
# compute gradient and do SGD step
loss.backward()
if self.grad_clip > 0:
clip_grad_norm(self.params, self.grad_clip)
self.optimizer.step() | model.py | import torch
import torch.nn as nn
import torch.nn.init
import sys
import numpy as np
import torchvision.models as models
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from torch.nn.utils.rnn import pad_packed_sequence
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.clip_grad import clip_grad_norm
from collections import OrderedDict
from torch.nn import functional as F
from text_encoders import get_text_encoder
from layers import l2norm
def EncoderImage(data_name, img_dim, embed_size, finetune=False,
cnn_type='vgg19', use_abs=False, no_imgnorm=False):
"""A wrapper to image encoders. Chooses between an encoder that uses
precomputed image features, `EncoderImagePrecomp`, or an encoder that
computes image features on the fly `EncoderImageFull`.
"""
if data_name.endswith('_precomp'):
img_enc = EncoderImagePrecomp(
img_dim, embed_size, use_abs, no_imgnorm)
else:
img_enc = EncoderImageFull(
embed_size, finetune, cnn_type, use_abs, no_imgnorm)
return img_enc
# tutorials/09 - Image Captioning
class EncoderImageFull(nn.Module):
def __init__(self, embed_size, finetune=False, cnn_type='vgg19',
use_abs=False, no_imgnorm=False):
"""Load pretrained VGG19 and replace top fc layer."""
super(EncoderImageFull, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.use_abs = use_abs
# Load a pre-trained model
self.cnn = self.get_cnn(cnn_type, True)
# For efficient memory usage.
for param in self.cnn.parameters():
param.requires_grad = finetune
# Replace the last fully connected layer of CNN with a new one
if cnn_type.startswith('vgg'):
self.fc = nn.Linear(self.cnn.classifier._modules['6'].in_features,
embed_size)
self.cnn.classifier = nn.Sequential(
*list(self.cnn.classifier.children())[:-1])
elif cnn_type.startswith('resnet'):
self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)
self.cnn.module.fc = nn.Sequential()
self.init_weights()
def get_cnn(self, arch, pretrained):
"""Load a pretrained CNN and parallelize over GPUs
"""
if pretrained:
print("=> using pre-trained model '{}'".format(arch))
model = models.__dict__[arch](pretrained=True)
else:
print("=> creating model '{}'".format(arch))
model = models.__dict__[arch]()
if arch.startswith('alexnet') or arch.startswith('vgg'):
model.features = nn.DataParallel(model.features)
else:
model = nn.DataParallel(model)
if torch.cuda.is_available():
model.cuda()
return model
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.) / np.sqrt(self.fc.in_features +
self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
features = self.cnn(images)
# normalization in the image embedding space
features = l2norm(features)
# linear projection to the joint embedding space
features = self.fc(features)
# normalization in the joint embedding space
if not self.no_imgnorm:
features = l2norm(features)
# take the absolute value of the embedding (used in order embeddings)
if self.use_abs:
features = torch.abs(features)
return features
class EncoderImagePrecomp(nn.Module):
def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False):
super(EncoderImagePrecomp, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.use_abs = use_abs
self.fc = False
if img_dim != embed_size or True:
self.fc = nn.Linear(img_dim, embed_size)
self.init_weights()
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
if self.fc:
r = np.sqrt(6.) / np.sqrt(self.fc.in_features +
self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
# assuming that the precomputed features are already l2-normalized
features = images
if self.fc:
features = self.fc(images)
# normalize in the joint embedding space
if not self.no_imgnorm:
features = l2norm(features)
# take the absolute value of embedding (used in order embeddings)
if self.use_abs:
features = torch.abs(features)
return features
def load_state_dict(self, state_dict):
"""Copies parameters. overwritting the default one to
accept state_dict from Full model
"""
own_state = self.state_dict()
new_state = OrderedDict()
for name, param in state_dict.items():
if name in own_state:
new_state[name] = param
super(EncoderImagePrecomp, self).load_state_dict(new_state)
def cosine_sim(im, s):
"""Cosine similarity between all the image and sentence pairs
"""
return im.mm(s.t())
def order_sim(im, s):
"""Order embeddings similarity measure $max(0, s-im)$
"""
YmX = (s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1))
- im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1)))
score = -YmX.clamp(min=0).pow(2).sum(2, keepdim=True).squeeze(2).sqrt().t()
return score
class ContrastiveLoss(nn.Module):
"""
Compute contrastive loss
"""
def __init__(self, margin=0, measure=False, max_violation=False):
super(ContrastiveLoss, self).__init__()
self.margin = margin
if measure == 'order':
self.sim = order_sim
else:
self.sim = cosine_sim
self.max_violation = max_violation
def forward(self, im, s):
# compute image-sentence score matrix
scores = self.sim(im, s)
diagonal = scores.diag().view(im.size(0), 1)
d1 = diagonal.expand_as(scores)
d2 = diagonal.t().expand_as(scores)
# compare every diagonal score to scores in its column
# caption retrieval
cost_s = (self.margin + scores - d1).clamp(min=0)
# compare every diagonal score to scores in its row
# image retrieval
cost_im = (self.margin + scores - d2).clamp(min=0)
# clear diagonals
mask = torch.eye(scores.size(0)) > .5
I = Variable(mask)
if torch.cuda.is_available():
I = I.cuda()
cost_s = cost_s.masked_fill_(I, 0)
cost_im = cost_im.masked_fill_(I, 0)
# keep the maximum violating negative for each query
if self.max_violation:
cost_s = cost_s.max(1)[0]
cost_im = cost_im.max(0)[0]
return cost_s.sum() + cost_im.sum()
def Frobenius(mat):
size = mat.size()
if len(size) == 3: # batched matrix
ret = (torch.sum(torch.sum((mat ** 2), 1, keepdim=True), 2, keepdim=True).squeeze() + 1e-10) ** 0.5
return torch.sum(ret) / size[0]
else:
raise Exception('matrix for computing Frobenius norm should be with 3 dims')
class VSE(object):
"""
rkiros/uvs model
"""
def __init__(self, opt):
# tutorials/09 - Image Captioning
# Build Models
self.opt = opt
self.grad_clip = opt.grad_clip
self.img_enc = EncoderImage(opt.data_name,
opt.img_dim,
opt.embed_size,
opt.finetune, opt.cnn_type,
use_abs=opt.use_abs,
no_imgnorm=opt.no_imgnorm)
self.txt_enc = get_text_encoder(opt.text_encoder, opt)
if torch.cuda.is_available():
self.img_enc.cuda()
self.txt_enc.cuda()
cudnn.benchmark = True
# Loss and Optimizer
self.criterion = ContrastiveLoss(margin=opt.margin,
measure=opt.measure,
max_violation=opt.max_violation)
self.attention = False
if opt.text_encoder.startswith('attentive'):
self.init_attention()
params = list(self.txt_enc.parameters())
if self.img_enc.fc:
params += list(self.img_enc.fc.parameters())
if opt.finetune:
params += list(self.img_enc.cnn.parameters())
self.params = params
self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.params), lr=opt.learning_rate)
self.Eiters = 0
def init_attention(self):
opt = self.opt
self.attention = True
hops = self.txt_enc.hops
self.I = Variable(torch.zeros(opt.batch_size, hops, hops))
for i in range(opt.batch_size):
for j in range(hops):
self.I.data[i][j][j] = 1
if torch.cuda.is_available():
self.I = self.I.cuda()
def state_dict(self):
state_dict = [self.img_enc.state_dict(), self.txt_enc.state_dict()]
return state_dict
def load_state_dict(self, state_dict):
self.img_enc.load_state_dict(state_dict[0])
self.txt_enc.load_state_dict(state_dict[1])
def train_start(self):
"""switch to train mode
"""
self.img_enc.train()
self.txt_enc.train()
def val_start(self):
"""switch to evaluate mode
"""
self.img_enc.eval()
self.txt_enc.eval()
def forward_emb(self, images, captions, lengths, volatile=False):
"""Compute the image and caption embeddings
"""
# Set mini-batch dataset
images = Variable(images, volatile=volatile)
captions = Variable(captions, volatile=volatile)
if torch.cuda.is_available():
images = images.cuda()
captions = captions.cuda()
# Forward
img_emb = self.img_enc(images)
cap_emb = self.txt_enc(captions, lengths)
return img_emb, cap_emb
def forward_loss(self, img_emb, cap_emb, **kwargs):
"""Compute the loss given pairs of image and caption embeddings
"""
loss = self.criterion(img_emb, cap_emb)
if self.attention:
coef = self.opt.att_coef
attention = self.txt_enc.attention_weights
attentionT = torch.transpose(attention, 1, 2).contiguous()
extra_loss = Frobenius(torch.bmm(attention, attentionT) - self.I[:attention.size(0)])
total_loss = loss + coef * extra_loss
self.logger.update('TotalLoss', total_loss.data[0], img_emb.size(0))
self.logger.update('AttLoss', coef * extra_loss.data[0], img_emb.size(0))
self.logger.update('ContrLoss', loss.data[0], img_emb.size(0))
return loss
def train_emb(self, images, captions, lengths, ids=None, *args):
"""One training step given images and captions.
"""
self.Eiters += 1
self.logger.update('Iter', self.Eiters)
self.logger.update('lr', self.optimizer.param_groups[0]['lr'])
# compute the embeddings
img_emb, cap_emb = self.forward_emb(images, captions, lengths)
# measure accuracy and record loss
self.optimizer.zero_grad()
loss = self.forward_loss(img_emb, cap_emb)
# compute gradient and do SGD step
loss.backward()
if self.grad_clip > 0:
clip_grad_norm(self.params, self.grad_clip)
self.optimizer.step() | 0.851799 | 0.3805 |
import os
import threading
import subprocess
import pkg_resources
METEOR_JAR = pkg_resources.resource_filename('nmtpytorch',
'lib/meteor-1.5.jar')
class Meteor(object):
def __init__(self, language, norm=False):
self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR,
'-', '-', '-stdio', '-l', language]
self.env = os.environ
self.env['LC_ALL'] = 'en_US.UTF_8'
if norm:
self.meteor_cmd.append('-norm')
self.meteor_p = subprocess.Popen(self.meteor_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.env,
universal_newlines=True, bufsize=1)
# Used to guarantee thread safety
self.lock = threading.Lock()
def method(self):
return "METEOR"
def compute_score(self, gts, res):
imgIds = sorted(list(gts.keys()))
scores = []
eval_line = 'EVAL'
self.lock.acquire()
for i in imgIds:
assert(len(res[i]) == 1)
hypothesis_str = res[i][0].replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(
('SCORE', ' ||| '.join(gts[i]), hypothesis_str))
# We obtained --> SCORE ||| reference 1 words |||
# reference n words ||| hypothesis words
self.meteor_p.stdin.write(score_line + '\n')
stat = self.meteor_p.stdout.readline().strip()
eval_line += ' ||| {}'.format(stat)
# Send to METEOR
self.meteor_p.stdin.write(eval_line + '\n')
# Collect segment scores
for i in range(len(imgIds)):
score = float(self.meteor_p.stdout.readline().strip())
scores.append(score)
# Final score
final_score = 100 * float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return final_score, scores
def __del__(self):
self.lock.acquire()
self.meteor_p.stdin.close()
self.meteor_p.wait()
self.lock.release() | nmtpytorch/cocoeval/meteor/meteor.py |
import os
import threading
import subprocess
import pkg_resources
METEOR_JAR = pkg_resources.resource_filename('nmtpytorch',
'lib/meteor-1.5.jar')
class Meteor(object):
def __init__(self, language, norm=False):
self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR,
'-', '-', '-stdio', '-l', language]
self.env = os.environ
self.env['LC_ALL'] = 'en_US.UTF_8'
if norm:
self.meteor_cmd.append('-norm')
self.meteor_p = subprocess.Popen(self.meteor_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.env,
universal_newlines=True, bufsize=1)
# Used to guarantee thread safety
self.lock = threading.Lock()
def method(self):
return "METEOR"
def compute_score(self, gts, res):
imgIds = sorted(list(gts.keys()))
scores = []
eval_line = 'EVAL'
self.lock.acquire()
for i in imgIds:
assert(len(res[i]) == 1)
hypothesis_str = res[i][0].replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(
('SCORE', ' ||| '.join(gts[i]), hypothesis_str))
# We obtained --> SCORE ||| reference 1 words |||
# reference n words ||| hypothesis words
self.meteor_p.stdin.write(score_line + '\n')
stat = self.meteor_p.stdout.readline().strip()
eval_line += ' ||| {}'.format(stat)
# Send to METEOR
self.meteor_p.stdin.write(eval_line + '\n')
# Collect segment scores
for i in range(len(imgIds)):
score = float(self.meteor_p.stdout.readline().strip())
scores.append(score)
# Final score
final_score = 100 * float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return final_score, scores
def __del__(self):
self.lock.acquire()
self.meteor_p.stdin.close()
self.meteor_p.wait()
self.lock.release() | 0.377885 | 0.115586 |
import optparse
import os
import sys
import time
import threading
import BaseHTTPServer
from server.http_handler import XwalkHttpHandlerWrapper
from net.port_server import PortServer
from base.log import InitLogging
from base.log import VLOG
from base.bind import Bind
def main(argv):
''' main entrance of xwalkdriver '''
# default setting for CrossWalk WebDriver
port = "9515"
host_name = ""
port_server = None
target = "android"
# Parse command line flag.
parser = optparse.OptionParser()
parser.add_option('--port', action='store', dest="port", type='int', help='port to listen on')
parser.add_option('--target', action='store', dest="target", type='str', help='target device, e.g --target=android')
parser.add_option('--log-path', action='store', dest="log_path", help='write server log to file instead of stderr, increase log level to INFO')
parser.add_option('--verbose', action='store_false', dest="verbose", help='log verbosely')
parser.add_option('--silent', action='store_false', dest="silent", help='log nothing')
parser.add_option('--url-base', action='store', dest="url_base", help='base URL path prefix for command, e.g. wd/url')
parser.add_option('--port-server', action='store', dest="port_server", help='address of server to contact for reserving a port')
# info user HOWTO:
if 1 == len(argv):
parser.print_help()
# choose specific port to listen on
(opts, _) = parser.parse_args()
if opts.port:
port = opts.port
# choose specific port server to maintain port for devtools
if opts.port_server:
if 'linux2' != sys.platform:
print "Warning: port-server not implemented for this platform."
sys.exit(-1)
else:
if not opts.port_server.startswith('@'):
print "Invalid port-server. Exiting..."
sys.exit(-1)
else:
path = "\0"
path += opts.port_server[1:]
port_server = PortServer(path)
if opts.url_base == None:
url_base = ""
else:
url_base = str(opts.url_base)
if not url_base or not url_base.startswith('/'):
url_base = "/" + url_base
elif url_base[-1] != '/':
url_base = url_base + '/'
# choose specific device for testing
if opts.target:
target = opts.target.lower()
Handler = XwalkHttpHandlerWrapper(port, url_base, target, port_server)
else:
Handler = XwalkHttpHandlerWrapper(port, url_base, target, port_server)
if not opts.silent:
print "Starting XwalkDriver on port %s" % port
if False == InitLogging(opts):
print "Unable to initialize logging. Exiting..."
sys.exit(-1)
VLOG(0, "Running on target device " + target)
# Running Http Server
httpd = BaseHTTPServer.HTTPServer((host_name, int(port)), Handler)
VLOG(1, "Xwalk Http Server Starts - %s:%s" % (host_name, port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
VLOG(1, "Xwalk Http Server Stops - %s:%s" % (host_name, port))
httpd.server_close()
finally:
# scan and make the directory tree clean every time
for rootdir, subdir, files in os.walk("./"):
for item in files:
if item.endswith(".pyc"):
try:
os.remove(rootdir + "/" + item)
except:
pass
# retrieve zombie thread in case of Ctrl-C crosswalk webdriver ohther than call dirver.quit() in selenium side
for zombie in threading.enumerate():
if zombie != threading.current_thread():
quit_thread_cmd = Bind(Bind._RunNothing)
quit_thread_cmd.is_quit_func_ = True
zombie.PostTask(quit_thread_cmd)
sys.exit(0) # end of main
if __name__ == '__main__':
main(sys.argv) | xwalkdriver.py | import optparse
import os
import sys
import time
import threading
import BaseHTTPServer
from server.http_handler import XwalkHttpHandlerWrapper
from net.port_server import PortServer
from base.log import InitLogging
from base.log import VLOG
from base.bind import Bind
def main(argv):
''' main entrance of xwalkdriver '''
# default setting for CrossWalk WebDriver
port = "9515"
host_name = ""
port_server = None
target = "android"
# Parse command line flag.
parser = optparse.OptionParser()
parser.add_option('--port', action='store', dest="port", type='int', help='port to listen on')
parser.add_option('--target', action='store', dest="target", type='str', help='target device, e.g --target=android')
parser.add_option('--log-path', action='store', dest="log_path", help='write server log to file instead of stderr, increase log level to INFO')
parser.add_option('--verbose', action='store_false', dest="verbose", help='log verbosely')
parser.add_option('--silent', action='store_false', dest="silent", help='log nothing')
parser.add_option('--url-base', action='store', dest="url_base", help='base URL path prefix for command, e.g. wd/url')
parser.add_option('--port-server', action='store', dest="port_server", help='address of server to contact for reserving a port')
# info user HOWTO:
if 1 == len(argv):
parser.print_help()
# choose specific port to listen on
(opts, _) = parser.parse_args()
if opts.port:
port = opts.port
# choose specific port server to maintain port for devtools
if opts.port_server:
if 'linux2' != sys.platform:
print "Warning: port-server not implemented for this platform."
sys.exit(-1)
else:
if not opts.port_server.startswith('@'):
print "Invalid port-server. Exiting..."
sys.exit(-1)
else:
path = "\0"
path += opts.port_server[1:]
port_server = PortServer(path)
if opts.url_base == None:
url_base = ""
else:
url_base = str(opts.url_base)
if not url_base or not url_base.startswith('/'):
url_base = "/" + url_base
elif url_base[-1] != '/':
url_base = url_base + '/'
# choose specific device for testing
if opts.target:
target = opts.target.lower()
Handler = XwalkHttpHandlerWrapper(port, url_base, target, port_server)
else:
Handler = XwalkHttpHandlerWrapper(port, url_base, target, port_server)
if not opts.silent:
print "Starting XwalkDriver on port %s" % port
if False == InitLogging(opts):
print "Unable to initialize logging. Exiting..."
sys.exit(-1)
VLOG(0, "Running on target device " + target)
# Running Http Server
httpd = BaseHTTPServer.HTTPServer((host_name, int(port)), Handler)
VLOG(1, "Xwalk Http Server Starts - %s:%s" % (host_name, port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
VLOG(1, "Xwalk Http Server Stops - %s:%s" % (host_name, port))
httpd.server_close()
finally:
# scan and make the directory tree clean every time
for rootdir, subdir, files in os.walk("./"):
for item in files:
if item.endswith(".pyc"):
try:
os.remove(rootdir + "/" + item)
except:
pass
# retrieve zombie thread in case of Ctrl-C crosswalk webdriver ohther than call dirver.quit() in selenium side
for zombie in threading.enumerate():
if zombie != threading.current_thread():
quit_thread_cmd = Bind(Bind._RunNothing)
quit_thread_cmd.is_quit_func_ = True
zombie.PostTask(quit_thread_cmd)
sys.exit(0) # end of main
if __name__ == '__main__':
main(sys.argv) | 0.157947 | 0.041793 |
from __future__ import annotations
import platform
from typing import TYPE_CHECKING
import psutil
from pincer import command
if TYPE_CHECKING:
from pincer.objects import Embed
from mcoding_bot.bot import Bot
def _percent_info_unit_ram(used, total):
mb: int = 1024 ** 2
return (
100 * (used / total),
f"{(total / mb) / 1000:,.3f}",
"Gb",
)
def _percent_info_unit_cpu(cpu_percent, current_freq, max_freq):
return (
cpu_percent,
f"{current_freq / 1000:.1f}`/`{max_freq / 1000:.1f}",
"Ghz",
)
def _percent_info_unit_disk(used, total):
mb: int = 1024 ** 2
return (
100 * (used / total),
f"{total / mb:,.0f}",
"Mb",
)
def _format_percent_info_unit_for_embed(percent, info, unit):
return f"> `{percent:.3f}` **%**\n- `{info}` **{unit}**"
class Dev:
"""Admin & Test features"""
def __init__(self, client: Bot):
self.client = client
@command(
name="panel", description="Some data about the panel", guild=826875707418214451
)
async def panel_command(self) -> Embed:
"""Panel status command."""
cols: tuple = ("blue", "green", "yellow", "orange", "red")
vm = psutil.virtual_memory()
cpu_freq = psutil.cpu_freq()
cpu_percent = psutil.cpu_percent()
disk = psutil.disk_usage("/")
stats = {
"ram": _percent_info_unit_ram(vm.used, vm.total),
"cpu": _percent_info_unit_cpu(cpu_percent, cpu_freq.current, cpu_freq.max),
"disk": _percent_info_unit_disk(disk.used, disk.total),
}
title = "Panel Stats"
description = "The bot is hosted on a private vps."
embed = self.client.embed(title=title, description=description)
def _format_name(name):
col = cols[int(stats[name][0] // 20)]
return f":{col}_square: __{name.upper()}__"
embed = embed.add_fields(
stats.items(),
map_title=_format_name,
map_values=_format_percent_info_unit_for_embed,
)
return embed
setup = Dev | mcoding_bot/cogs/dev.py | from __future__ import annotations
import platform
from typing import TYPE_CHECKING
import psutil
from pincer import command
if TYPE_CHECKING:
from pincer.objects import Embed
from mcoding_bot.bot import Bot
def _percent_info_unit_ram(used, total):
mb: int = 1024 ** 2
return (
100 * (used / total),
f"{(total / mb) / 1000:,.3f}",
"Gb",
)
def _percent_info_unit_cpu(cpu_percent, current_freq, max_freq):
return (
cpu_percent,
f"{current_freq / 1000:.1f}`/`{max_freq / 1000:.1f}",
"Ghz",
)
def _percent_info_unit_disk(used, total):
mb: int = 1024 ** 2
return (
100 * (used / total),
f"{total / mb:,.0f}",
"Mb",
)
def _format_percent_info_unit_for_embed(percent, info, unit):
return f"> `{percent:.3f}` **%**\n- `{info}` **{unit}**"
class Dev:
"""Admin & Test features"""
def __init__(self, client: Bot):
self.client = client
@command(
name="panel", description="Some data about the panel", guild=826875707418214451
)
async def panel_command(self) -> Embed:
"""Panel status command."""
cols: tuple = ("blue", "green", "yellow", "orange", "red")
vm = psutil.virtual_memory()
cpu_freq = psutil.cpu_freq()
cpu_percent = psutil.cpu_percent()
disk = psutil.disk_usage("/")
stats = {
"ram": _percent_info_unit_ram(vm.used, vm.total),
"cpu": _percent_info_unit_cpu(cpu_percent, cpu_freq.current, cpu_freq.max),
"disk": _percent_info_unit_disk(disk.used, disk.total),
}
title = "Panel Stats"
description = "The bot is hosted on a private vps."
embed = self.client.embed(title=title, description=description)
def _format_name(name):
col = cols[int(stats[name][0] // 20)]
return f":{col}_square: __{name.upper()}__"
embed = embed.add_fields(
stats.items(),
map_title=_format_name,
map_values=_format_percent_info_unit_for_embed,
)
return embed
setup = Dev | 0.875867 | 0.209955 |
import json
import sys
from pathlib import Path
from typing import Optional, Awaitable
from os import path as P
import tornado.websocket
from tornado.log import app_log
from tornado.web import Finish, HTTPError, StaticFileHandler
class RestResult(object):
def __init__(self, code, body):
self.code = code
self.body = body
def to_json(self):
result = {"code": self.code, "data": self.body}
return json.dumps(result)
class RestCode(object):
Success = 0
Exception = -1
class BaseHandler(tornado.web.RequestHandler):
def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
pass
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish(*e.args)
return
try:
self.log_exception(*sys.exc_info())
except Exception:
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
return
if isinstance(e, HTTPError):
self.send_error_content(str(e))
else:
self.send_error_content(str(e))
def send_error_content(self, msg):
# msg = "\"%s\"" % msg.replace("\"", "\\\"")
_s = RestResult(RestCode.Exception, str(msg))
self.finish(_s.to_json())
def response(self, result: dict):
rest_result = RestResult(RestCode.Success, result)
self.set_header("Content-Type", "application/json")
self.write(rest_result.to_json())
def get_request_as_dict(self):
body = self.request.body
return json.loads(body)
class IndexHandler(BaseHandler):
def __init__(self, a, b, **c):
super().__init__(a, b, **c)
def get(self, *args, **kwargs):
self.finish("It's worked")
class EventsHandler(BaseHandler):
def get_event_file(self):
app = self.application
log_file_exp = Path(app.event_file).absolute()
return log_file_exp
def get(self, *args, **kwargs):
event_begin = int(self.get_argument('begin', '0', True))
event_file = self.get_event_file()
with open(event_file, 'r', newline='\n') as f:
events_txt = f.readlines()
events_dict = [json.loads(event_txt) for event_txt in events_txt]
selected_events = events_dict[event_begin:]
self.response({"events": selected_events})
class AssetsHandler(StaticFileHandler):
MissingResource = ['favicon.ico']
async def get(self, path, **kwargs):
if path in self.MissingResource:
raise tornado.web.HTTPError(404, f"File {path} is missing")
if path in ['', '/']:
resource_path = "index.html"
else:
absolute_path = self.get_absolute_path(self.root, self.parse_url_path(path))
if not P.exists(absolute_path):
resource_path = "index.html" # handle 404
else:
resource_path = path
await super(AssetsHandler, self).get(resource_path) | experiment-visualization/experiment_visualization/handlers.py | import json
import sys
from pathlib import Path
from typing import Optional, Awaitable
from os import path as P
import tornado.websocket
from tornado.log import app_log
from tornado.web import Finish, HTTPError, StaticFileHandler
class RestResult(object):
def __init__(self, code, body):
self.code = code
self.body = body
def to_json(self):
result = {"code": self.code, "data": self.body}
return json.dumps(result)
class RestCode(object):
Success = 0
Exception = -1
class BaseHandler(tornado.web.RequestHandler):
def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
pass
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish(*e.args)
return
try:
self.log_exception(*sys.exc_info())
except Exception:
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
return
if isinstance(e, HTTPError):
self.send_error_content(str(e))
else:
self.send_error_content(str(e))
def send_error_content(self, msg):
# msg = "\"%s\"" % msg.replace("\"", "\\\"")
_s = RestResult(RestCode.Exception, str(msg))
self.finish(_s.to_json())
def response(self, result: dict):
rest_result = RestResult(RestCode.Success, result)
self.set_header("Content-Type", "application/json")
self.write(rest_result.to_json())
def get_request_as_dict(self):
body = self.request.body
return json.loads(body)
class IndexHandler(BaseHandler):
def __init__(self, a, b, **c):
super().__init__(a, b, **c)
def get(self, *args, **kwargs):
self.finish("It's worked")
class EventsHandler(BaseHandler):
def get_event_file(self):
app = self.application
log_file_exp = Path(app.event_file).absolute()
return log_file_exp
def get(self, *args, **kwargs):
event_begin = int(self.get_argument('begin', '0', True))
event_file = self.get_event_file()
with open(event_file, 'r', newline='\n') as f:
events_txt = f.readlines()
events_dict = [json.loads(event_txt) for event_txt in events_txt]
selected_events = events_dict[event_begin:]
self.response({"events": selected_events})
class AssetsHandler(StaticFileHandler):
MissingResource = ['favicon.ico']
async def get(self, path, **kwargs):
if path in self.MissingResource:
raise tornado.web.HTTPError(404, f"File {path} is missing")
if path in ['', '/']:
resource_path = "index.html"
else:
absolute_path = self.get_absolute_path(self.root, self.parse_url_path(path))
if not P.exists(absolute_path):
resource_path = "index.html" # handle 404
else:
resource_path = path
await super(AssetsHandler, self).get(resource_path) | 0.419648 | 0.100172 |
import argparse
import logging
import numpy as np
import tensorflow as tf
from features import extract_features_shapenet, generate_views
from inference import shapenet_inference
from utilities import print_and_log, get_log_files, gaussian_log_density
from data import get_data
"""
parse_command_line: command line parser
"""
def parse_command_line():
parser = argparse.ArgumentParser()
parser.add_argument("--d_theta", type=int, default=256, help="Shared parameters dimensionality.")
parser.add_argument("--d_psi", type=int, default=256, help="Adaptation input dimensionality.")
parser.add_argument("--shot", type=int, default=1, help="Number of training examples.")
parser.add_argument("--tasks_per_batch", type=int, default=24, help="Number of tasks per batch.")
parser.add_argument("--samples", type=int, default=1, help="Number of samples from q.")
parser.add_argument("--learning_rate", "-lr", type=float, default=1e-4, help="Learning rate.")
parser.add_argument("--iterations", type=int, default=500000, help="Number of training iterations.")
parser.add_argument("--checkpoint_dir", "-c", default='./checkpoint', help="Directory to save trained models.")
parser.add_argument("--random_shot", default=False, action="store_true",
help="Randomize the shot between 1 and shot.")
parser.add_argument("--print_freq", type=int, default=200, help="Frequency of summary results (in iterations).")
args = parser.parse_args()
return args
def main(_unused_argv):
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
args = parse_command_line()
logfile, checkpoint_path_validation, checkpoint_path_final = get_log_files(args.checkpoint_dir)
print_and_log(logfile, "Options: %s\n" % args)
# Load training and eval data
data = get_data("shapenet")
# tf placeholders
batch_train_images = tf.compat.v1.placeholder(tf.float32, [None, # tasks per batch
None, # shot
data.get_image_height(),
data.get_image_width(),
data.get_image_channels()], name='train_images'
)
batch_test_images = tf.compat.v1.placeholder(tf.float32, [None, # tasks per batch
None, # num test images
data.get_image_height(),
data.get_image_width(),
data.get_image_channels()], name='test_images'
)
batch_train_angles = tf.compat.v1.placeholder(tf.float32, [None, # tasks per batch
None, # shot
data.get_angle_dimensionality()], name='train_angles'
)
batch_test_angles = tf.compat.v1.placeholder(tf.float32, [None, # tasks per batch
None, # num test angles
data.get_angle_dimensionality()], name='test_angles'
)
def evaluate_task(inputs):
train_images, train_angles, test_images, test_angles = inputs
inference_features_train = extract_features_shapenet(images=train_images, output_size=args.d_theta,
use_batch_norm=False, dropout_keep_prob=1.0)
adaptation_params = shapenet_inference(inference_features_train, train_angles, args.d_theta,
args.d_psi, args.samples)
test_batch_size = tf.shape(test_images)[0]
sample_log_py = []
# loop over samples
for n in range(args.samples):
adaptation_vector = adaptation_params['psi_samples'][n, :, :]
adaptation_inputs = tf.tile(adaptation_vector, [test_batch_size, 1])
generated_images = generate_views(test_angles, adaptation_inputs)
# Compute loss
flat_images_gt = tf.reshape(test_images,
[-1,
data.get_image_height() * data.get_image_width() * data.get_image_channels()])
flat_images_gen = tf.reshape(generated_images,
[-1,
data.get_image_height() * data.get_image_width() * data.get_image_channels()])
log_var = tf.zeros_like(flat_images_gt)
log_density = gaussian_log_density(flat_images_gt, flat_images_gen, log_var)
sample_log_py.append(tf.expand_dims(log_density, 1))
task_log_py = tf.reduce_mean(tf.concat(sample_log_py, 1), axis=1)
task_loss = -task_log_py
return [task_loss, task_log_py]
# tf mapping of batch to evaluation function
batch_output = tf.map_fn(fn=evaluate_task,
elems=(batch_train_images, batch_train_angles, batch_test_images, batch_test_angles),
dtype=[tf.float32, tf.float32],
parallel_iterations=args.tasks_per_batch)
# average all values across batch
batch_losses, batch_log_densities = batch_output
loss = tf.reduce_mean(batch_losses)
log_likelihood = tf.reduce_mean(batch_log_densities)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=args.learning_rate)
gvs = optimizer.compute_gradients(loss)
gvs = [(tf.clip_by_value(grad, -1, 1), var) for grad, var in gvs if grad is not None]
train_step = optimizer.apply_gradients(gvs)
with tf.compat.v1.Session() as sess:
saver = tf.compat.v1.train.Saver()
# train the model
validation_batches = 100
iteration = 0
best_validation_loss = 5e10
train_iteration_loss = []
sess.run(tf.compat.v1.global_variables_initializer())
while iteration < args.iterations:
train_shot = args.shot
if args.random_shot:
train_shot = np.random.randint(low=1, high=(args.shot + 1))
train_inputs, test_inputs, train_outputs, test_outputs = \
data.get_batch(source='train', tasks_per_batch=args.tasks_per_batch, shot=train_shot)
feed_dict = {batch_train_images: train_inputs, batch_test_images: test_inputs,
batch_train_angles: train_outputs, batch_test_angles: test_outputs}
_, log_py, iteration_loss = sess.run([train_step, log_likelihood, loss], feed_dict)
train_iteration_loss.append(iteration_loss)
if (iteration > 0) and (iteration % args.print_freq == 0):
validation_iteration, iteration_loss = 0, []
while validation_iteration < validation_batches:
train_inputs, test_inputs, train_outputs, test_outputs = \
data.get_batch(source='validation', tasks_per_batch=args.tasks_per_batch, shot=args.shot)
feed_dict = {batch_train_images: train_inputs, batch_test_images: test_inputs,
batch_train_angles: train_outputs, batch_test_angles: test_outputs}
iter_loss = sess.run([loss], feed_dict)
iteration_loss.append(iter_loss)
validation_iteration += 1
validation_loss = np.array(iteration_loss).mean()
train_average_loss = np.array(train_iteration_loss).mean()
# save checkpoint if validation is the best so far
if validation_loss < best_validation_loss:
best_validation_loss = validation_loss
saver.save(sess=sess, save_path=checkpoint_path_validation)
print_and_log(logfile, 'Iteration: {}, Likelihood: {:5.3f}, Iteration-Train-Loss: {:5.3f},'
'Val-Loss: {:5.3f}'.format(iteration, log_py, train_average_loss,
validation_loss))
train_iteration_loss = []
iteration += 1
# save the checkpoint from the final epoch
saver.save(sess, save_path=checkpoint_path_final)
print_and_log(logfile, 'Fully-trained model saved to: {}'.format(checkpoint_path_final))
print_and_log(logfile, 'Best validation loss: {:5.3f}'.format(best_validation_loss))
print_and_log(logfile, 'Best validation model saved to: {}'.format(checkpoint_path_validation))
logfile.close()
if __name__ == "__main__":
tf.compat.v1.app.run() | src/train_view_reconstruction.py | import argparse
import logging
import numpy as np
import tensorflow as tf
from features import extract_features_shapenet, generate_views
from inference import shapenet_inference
from utilities import print_and_log, get_log_files, gaussian_log_density
from data import get_data
"""
parse_command_line: command line parser
"""
def parse_command_line():
parser = argparse.ArgumentParser()
parser.add_argument("--d_theta", type=int, default=256, help="Shared parameters dimensionality.")
parser.add_argument("--d_psi", type=int, default=256, help="Adaptation input dimensionality.")
parser.add_argument("--shot", type=int, default=1, help="Number of training examples.")
parser.add_argument("--tasks_per_batch", type=int, default=24, help="Number of tasks per batch.")
parser.add_argument("--samples", type=int, default=1, help="Number of samples from q.")
parser.add_argument("--learning_rate", "-lr", type=float, default=1e-4, help="Learning rate.")
parser.add_argument("--iterations", type=int, default=500000, help="Number of training iterations.")
parser.add_argument("--checkpoint_dir", "-c", default='./checkpoint', help="Directory to save trained models.")
parser.add_argument("--random_shot", default=False, action="store_true",
help="Randomize the shot between 1 and shot.")
parser.add_argument("--print_freq", type=int, default=200, help="Frequency of summary results (in iterations).")
args = parser.parse_args()
return args
def main(_unused_argv):
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
args = parse_command_line()
logfile, checkpoint_path_validation, checkpoint_path_final = get_log_files(args.checkpoint_dir)
print_and_log(logfile, "Options: %s\n" % args)
# Load training and eval data
data = get_data("shapenet")
# tf placeholders
batch_train_images = tf.compat.v1.placeholder(tf.float32, [None, # tasks per batch
None, # shot
data.get_image_height(),
data.get_image_width(),
data.get_image_channels()], name='train_images'
)
batch_test_images = tf.compat.v1.placeholder(tf.float32, [None, # tasks per batch
None, # num test images
data.get_image_height(),
data.get_image_width(),
data.get_image_channels()], name='test_images'
)
batch_train_angles = tf.compat.v1.placeholder(tf.float32, [None, # tasks per batch
None, # shot
data.get_angle_dimensionality()], name='train_angles'
)
batch_test_angles = tf.compat.v1.placeholder(tf.float32, [None, # tasks per batch
None, # num test angles
data.get_angle_dimensionality()], name='test_angles'
)
def evaluate_task(inputs):
train_images, train_angles, test_images, test_angles = inputs
inference_features_train = extract_features_shapenet(images=train_images, output_size=args.d_theta,
use_batch_norm=False, dropout_keep_prob=1.0)
adaptation_params = shapenet_inference(inference_features_train, train_angles, args.d_theta,
args.d_psi, args.samples)
test_batch_size = tf.shape(test_images)[0]
sample_log_py = []
# loop over samples
for n in range(args.samples):
adaptation_vector = adaptation_params['psi_samples'][n, :, :]
adaptation_inputs = tf.tile(adaptation_vector, [test_batch_size, 1])
generated_images = generate_views(test_angles, adaptation_inputs)
# Compute loss
flat_images_gt = tf.reshape(test_images,
[-1,
data.get_image_height() * data.get_image_width() * data.get_image_channels()])
flat_images_gen = tf.reshape(generated_images,
[-1,
data.get_image_height() * data.get_image_width() * data.get_image_channels()])
log_var = tf.zeros_like(flat_images_gt)
log_density = gaussian_log_density(flat_images_gt, flat_images_gen, log_var)
sample_log_py.append(tf.expand_dims(log_density, 1))
task_log_py = tf.reduce_mean(tf.concat(sample_log_py, 1), axis=1)
task_loss = -task_log_py
return [task_loss, task_log_py]
# tf mapping of batch to evaluation function
batch_output = tf.map_fn(fn=evaluate_task,
elems=(batch_train_images, batch_train_angles, batch_test_images, batch_test_angles),
dtype=[tf.float32, tf.float32],
parallel_iterations=args.tasks_per_batch)
# average all values across batch
batch_losses, batch_log_densities = batch_output
loss = tf.reduce_mean(batch_losses)
log_likelihood = tf.reduce_mean(batch_log_densities)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=args.learning_rate)
gvs = optimizer.compute_gradients(loss)
gvs = [(tf.clip_by_value(grad, -1, 1), var) for grad, var in gvs if grad is not None]
train_step = optimizer.apply_gradients(gvs)
with tf.compat.v1.Session() as sess:
saver = tf.compat.v1.train.Saver()
# train the model
validation_batches = 100
iteration = 0
best_validation_loss = 5e10
train_iteration_loss = []
sess.run(tf.compat.v1.global_variables_initializer())
while iteration < args.iterations:
train_shot = args.shot
if args.random_shot:
train_shot = np.random.randint(low=1, high=(args.shot + 1))
train_inputs, test_inputs, train_outputs, test_outputs = \
data.get_batch(source='train', tasks_per_batch=args.tasks_per_batch, shot=train_shot)
feed_dict = {batch_train_images: train_inputs, batch_test_images: test_inputs,
batch_train_angles: train_outputs, batch_test_angles: test_outputs}
_, log_py, iteration_loss = sess.run([train_step, log_likelihood, loss], feed_dict)
train_iteration_loss.append(iteration_loss)
if (iteration > 0) and (iteration % args.print_freq == 0):
validation_iteration, iteration_loss = 0, []
while validation_iteration < validation_batches:
train_inputs, test_inputs, train_outputs, test_outputs = \
data.get_batch(source='validation', tasks_per_batch=args.tasks_per_batch, shot=args.shot)
feed_dict = {batch_train_images: train_inputs, batch_test_images: test_inputs,
batch_train_angles: train_outputs, batch_test_angles: test_outputs}
iter_loss = sess.run([loss], feed_dict)
iteration_loss.append(iter_loss)
validation_iteration += 1
validation_loss = np.array(iteration_loss).mean()
train_average_loss = np.array(train_iteration_loss).mean()
# save checkpoint if validation is the best so far
if validation_loss < best_validation_loss:
best_validation_loss = validation_loss
saver.save(sess=sess, save_path=checkpoint_path_validation)
print_and_log(logfile, 'Iteration: {}, Likelihood: {:5.3f}, Iteration-Train-Loss: {:5.3f},'
'Val-Loss: {:5.3f}'.format(iteration, log_py, train_average_loss,
validation_loss))
train_iteration_loss = []
iteration += 1
# save the checkpoint from the final epoch
saver.save(sess, save_path=checkpoint_path_final)
print_and_log(logfile, 'Fully-trained model saved to: {}'.format(checkpoint_path_final))
print_and_log(logfile, 'Best validation loss: {:5.3f}'.format(best_validation_loss))
print_and_log(logfile, 'Best validation model saved to: {}'.format(checkpoint_path_validation))
logfile.close()
if __name__ == "__main__":
tf.compat.v1.app.run() | 0.837421 | 0.236252 |
from PIL import Image
from difflib import SequenceMatcher
import hashlib, os, imagehash, argparse
# Setup arguments
parser = argparse.ArgumentParser(description="Image deduplicator")
parser.add_argument('-m', '--mode', help="Deduplicator operation mode (rename, move, organize, similar, all)", required=True)
parser.add_argument('-p', '--path', help="Path to files to process", required=True)
parser.add_argument('-d', '--duplicate-path', help="Define path to dump duplicate images")
parser.add_argument('-q', '--quiet', help="Do not display any output", action='store_true')
parser.add_argument('-v', '--verbose', help="Displat additional messages", action='store_true')
args = parser.parse_args()
# Massage args for input
if args.quiet == True and args.verbose == True:
args.quiet = False
args.mode = args.mode.split("+")
# Setup global vars
hasher = hashlib.md5()
directory = args.path
duplicate_dir = args.duplicate_path if args.duplicate_path is not None else "{0}~duplicates".format(directory)
skip_dir_nums = []
skip_dirs = []
files = []
# Message filter function
def __print(msg, level=None):
if args.quiet != True:
if args.verbose == True and level == "verbose":
print(msg)
else:
print(msg)
elif level == "error":
print(msg)
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
def generate_similarity_report(similarities, parent_dir):
html = r'<html><head><style>.left_info,.right_info{margin-bottom:5px}body{width:810px;background-color:#333;color:#cec}.similar_set{height:400px;width:810px;margin:40px;margin-bottom:60px}div.left_image,div.right_image{float:left;background-color:#444}div.left_image{margin-right:5px}img.image_a,img.image_b{max-height:400px;max-width:400px}</style></head><body>'
for key, value in similarities.items():
a, b, a_info, b_info, similarity, truth = value
html += "<div class='similar_set' id='%s'><div>Similarity: %s (%s)</div>" % (key, similarity, truth)
html += "<div class='left_image'><div class='left_info'>%s</div><img class='image_a' src='%s' /></div>" % (a_info, os.path.join(parent_dir, a))
html += "<div class='right_image'><div class='right_info'>%s</div><img class='image_b' src='%s' /></div>" % (b_info, os.path.join(parent_dir, b))
html += "</div>"
html += "</body></html>"
return html
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def generate_skip_dirs():
for skip_dir_num in skip_dir_nums:
skip_dirs.append(os.path.join(directory, "{0}".format(skip_dir_num)))
def get_hash(file_path):
hash = imagehash.average_hash(Image.open(file_path))
return hash
def move_duplicate(file_path, file_hash, file_ext):
if not os.path.exists(duplicate_dir):
os.makedirs(duplicate_dir)
duplicate_path = os.path.join(duplicate_dir, "{0}{1}".format(file_hash, file_ext))
if os.path.isfile(duplicate_path) == True:
os.remove(file_path)
__print("Deleting extra duplicate {0} ({1})".format(file_path, duplicate_path))
return
os.rename(file_path, duplicate_path)
__print("Moving {0}{1} to duplicate image directory".format(file_hash, file_ext))
def rename_file(file_path):
file_hash = get_hash(file_path)
file_ext = os.path.splitext(file_path)[1]
old_file_name = os.path.basename(file_path)
new_file_name = "{0}{1}".format(file_hash, file_ext)
new_file_path = os.path.join(os.path.dirname(file_path), new_file_name)
# If old_file_name and new_file_name do not match, rename file
if old_file_name != new_file_name:
if os.path.isfile(new_file_path):
move_duplicate(file_path, file_hash, file_ext)
else:
os.rename(file_path, new_file_path)
__print("{2} {0} -> {1}".format(old_file_name, new_file_name, os.path.dirname(file_path).replace(directory, "")))
if new_file_name not in files:
files.append(new_file_name)
else:
move_duplicate(file_path, file_hash, file_ext)
def move_file(file_path, destination):
file_name = os.path.basename(file_path)
new_file_path = os.path.join(destination, file_name)
if file_path != new_file_path:
if os.path.isfile(new_file_path) == False:
os.rename(file_path, new_file_path)
__print("Moving {0} -> {1}".format(file_name, new_file_path.replace(directory, "").replace("\\", "/")))
else:
move_duplicate(file_path, get_hash(file_path), os.path.splitext(file_path)[1])
def organize_file(file_path):
file_name = os.path.basename(file_path)
destination = os.path.join(directory, file_name[:1])
if not os.path.exists(destination):
os.makedirs(destination)
move_file(file_path, destination)
def validate_file_type(file):
extension = os.path.splitext(file)[1]
valid_extensions = [".jpg", ".jpeg", ".png", ".gif"]
return extension in valid_extensions
def get_files_and_rename(parent_dir):
if parent_dir is not duplicate_dir and parent_dir not in skip_dirs:
for file in os.listdir(parent_dir):
file_path = os.path.join(parent_dir, file)
if os.path.isfile(file_path):
if validate_file_type(file_path):
try:
rename_file(file_path)
except:
__print("There was an error handling %s" % (file_path), "error")
pass
else:
get_files_and_rename(file_path)
def get_files_and_move(parent_dir):
if parent_dir not in skip_dirs:
for file in os.listdir(parent_dir):
file_path = os.path.join(parent_dir, file)
if os.path.isfile(file_path):
move_file(file_path, os.path.join(directory, "~unsorted"))
else:
get_files_and_move(file_path)
def get_files_and_organize(parent_dir):
if parent_dir not in skip_dirs:
for file in os.listdir(parent_dir):
file_path = os.path.join(parent_dir, file)
if os.path.isfile(file_path):
organize_file(file_path)
else:
get_files_and_organize(file_path)
def get_files_and_find_similar(parent_dir):
if parent_dir not in skip_dirs:
similar_sets = {}
for file in os.listdir(parent_dir):
for index in os.listdir(parent_dir):
similarity = similar(file.split(".")[0], index.split(".")[0])
if similarity >= 0.8 and similarity < 1.0:
hash_file = hash(file)
hash_index = hash(index)
hash_combination = hash_file + hash_index
if hash_combination not in similar_sets:
file_path = os.path.join(parent_dir, file)
file_stat = os.stat(file_path)
file_size = sizeof_fmt(file_stat.st_size)
file_ext = os.path.splitext(file_path)[1].replace(".", "")
file_info = "%s, %s" % (file_size, file_ext)
file_img = Image.open(file_path)
file_hash = imagehash.average_hash(file_img)
index_path = os.path.join(parent_dir, index)
index_stat = os.stat(index_path)
index_size = sizeof_fmt(index_stat.st_size)
index_ext = os.path.splitext(index_path)[1].replace(".", "")
index_info = "%s, %s" % (index_size, index_ext)
index_img = Image.open(index_path)
index_hash = imagehash.average_hash(index_img)
true_similarity = file_hash - index_hash
if true_similarity < 3:
similar_sets[hash_combination] = [file, index, file_info, index_info, "%d%%" % (similarity * 100), true_similarity]
__print('Similarity [%d] found between %s and %s (%d%%)' % (len(similar_sets), file, index, similarity * 100), None)
else:
__print("Not similar enough...", None)
else:
__print("Duplicate similarity found, discarding... (%s, %s)" % (file, index), None)
__print("Similarities found: %d" % (len(similar_sets)), None)
html = generate_similarity_report(similar_sets, parent_dir)
with open("report.html", "w") as outfile:
outfile.write(html)
os.system("start report.html")
generate_skip_dirs()
if "similar" in args.mode:
get_files_and_find_similar(directory)
if "rename" in args.mode:
get_files_and_rename(directory)
if "move" in args.mode:
get_files_and_move(directory)
if "organize" in args.mode:
get_files_and_organize(directory)
# Example usage to rename files in a folder and detect duplicate within
# python .\deduper.py -m rename -p C:\path\ -v
# Example usage to organize files, including any that are unorganized, within a directory and all subdirectories
# python .\deduper.py -m organize -p C:\path\ | deduper.py | from PIL import Image
from difflib import SequenceMatcher
import hashlib, os, imagehash, argparse
# Setup arguments
parser = argparse.ArgumentParser(description="Image deduplicator")
parser.add_argument('-m', '--mode', help="Deduplicator operation mode (rename, move, organize, similar, all)", required=True)
parser.add_argument('-p', '--path', help="Path to files to process", required=True)
parser.add_argument('-d', '--duplicate-path', help="Define path to dump duplicate images")
parser.add_argument('-q', '--quiet', help="Do not display any output", action='store_true')
parser.add_argument('-v', '--verbose', help="Displat additional messages", action='store_true')
args = parser.parse_args()
# Massage args for input
if args.quiet == True and args.verbose == True:
args.quiet = False
args.mode = args.mode.split("+")
# Setup global vars
hasher = hashlib.md5()
directory = args.path
duplicate_dir = args.duplicate_path if args.duplicate_path is not None else "{0}~duplicates".format(directory)
skip_dir_nums = []
skip_dirs = []
files = []
# Message filter function
def __print(msg, level=None):
if args.quiet != True:
if args.verbose == True and level == "verbose":
print(msg)
else:
print(msg)
elif level == "error":
print(msg)
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
def generate_similarity_report(similarities, parent_dir):
html = r'<html><head><style>.left_info,.right_info{margin-bottom:5px}body{width:810px;background-color:#333;color:#cec}.similar_set{height:400px;width:810px;margin:40px;margin-bottom:60px}div.left_image,div.right_image{float:left;background-color:#444}div.left_image{margin-right:5px}img.image_a,img.image_b{max-height:400px;max-width:400px}</style></head><body>'
for key, value in similarities.items():
a, b, a_info, b_info, similarity, truth = value
html += "<div class='similar_set' id='%s'><div>Similarity: %s (%s)</div>" % (key, similarity, truth)
html += "<div class='left_image'><div class='left_info'>%s</div><img class='image_a' src='%s' /></div>" % (a_info, os.path.join(parent_dir, a))
html += "<div class='right_image'><div class='right_info'>%s</div><img class='image_b' src='%s' /></div>" % (b_info, os.path.join(parent_dir, b))
html += "</div>"
html += "</body></html>"
return html
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def generate_skip_dirs():
for skip_dir_num in skip_dir_nums:
skip_dirs.append(os.path.join(directory, "{0}".format(skip_dir_num)))
def get_hash(file_path):
hash = imagehash.average_hash(Image.open(file_path))
return hash
def move_duplicate(file_path, file_hash, file_ext):
if not os.path.exists(duplicate_dir):
os.makedirs(duplicate_dir)
duplicate_path = os.path.join(duplicate_dir, "{0}{1}".format(file_hash, file_ext))
if os.path.isfile(duplicate_path) == True:
os.remove(file_path)
__print("Deleting extra duplicate {0} ({1})".format(file_path, duplicate_path))
return
os.rename(file_path, duplicate_path)
__print("Moving {0}{1} to duplicate image directory".format(file_hash, file_ext))
def rename_file(file_path):
file_hash = get_hash(file_path)
file_ext = os.path.splitext(file_path)[1]
old_file_name = os.path.basename(file_path)
new_file_name = "{0}{1}".format(file_hash, file_ext)
new_file_path = os.path.join(os.path.dirname(file_path), new_file_name)
# If old_file_name and new_file_name do not match, rename file
if old_file_name != new_file_name:
if os.path.isfile(new_file_path):
move_duplicate(file_path, file_hash, file_ext)
else:
os.rename(file_path, new_file_path)
__print("{2} {0} -> {1}".format(old_file_name, new_file_name, os.path.dirname(file_path).replace(directory, "")))
if new_file_name not in files:
files.append(new_file_name)
else:
move_duplicate(file_path, file_hash, file_ext)
def move_file(file_path, destination):
file_name = os.path.basename(file_path)
new_file_path = os.path.join(destination, file_name)
if file_path != new_file_path:
if os.path.isfile(new_file_path) == False:
os.rename(file_path, new_file_path)
__print("Moving {0} -> {1}".format(file_name, new_file_path.replace(directory, "").replace("\\", "/")))
else:
move_duplicate(file_path, get_hash(file_path), os.path.splitext(file_path)[1])
def organize_file(file_path):
file_name = os.path.basename(file_path)
destination = os.path.join(directory, file_name[:1])
if not os.path.exists(destination):
os.makedirs(destination)
move_file(file_path, destination)
def validate_file_type(file):
extension = os.path.splitext(file)[1]
valid_extensions = [".jpg", ".jpeg", ".png", ".gif"]
return extension in valid_extensions
def get_files_and_rename(parent_dir):
if parent_dir is not duplicate_dir and parent_dir not in skip_dirs:
for file in os.listdir(parent_dir):
file_path = os.path.join(parent_dir, file)
if os.path.isfile(file_path):
if validate_file_type(file_path):
try:
rename_file(file_path)
except:
__print("There was an error handling %s" % (file_path), "error")
pass
else:
get_files_and_rename(file_path)
def get_files_and_move(parent_dir):
if parent_dir not in skip_dirs:
for file in os.listdir(parent_dir):
file_path = os.path.join(parent_dir, file)
if os.path.isfile(file_path):
move_file(file_path, os.path.join(directory, "~unsorted"))
else:
get_files_and_move(file_path)
def get_files_and_organize(parent_dir):
if parent_dir not in skip_dirs:
for file in os.listdir(parent_dir):
file_path = os.path.join(parent_dir, file)
if os.path.isfile(file_path):
organize_file(file_path)
else:
get_files_and_organize(file_path)
def get_files_and_find_similar(parent_dir):
if parent_dir not in skip_dirs:
similar_sets = {}
for file in os.listdir(parent_dir):
for index in os.listdir(parent_dir):
similarity = similar(file.split(".")[0], index.split(".")[0])
if similarity >= 0.8 and similarity < 1.0:
hash_file = hash(file)
hash_index = hash(index)
hash_combination = hash_file + hash_index
if hash_combination not in similar_sets:
file_path = os.path.join(parent_dir, file)
file_stat = os.stat(file_path)
file_size = sizeof_fmt(file_stat.st_size)
file_ext = os.path.splitext(file_path)[1].replace(".", "")
file_info = "%s, %s" % (file_size, file_ext)
file_img = Image.open(file_path)
file_hash = imagehash.average_hash(file_img)
index_path = os.path.join(parent_dir, index)
index_stat = os.stat(index_path)
index_size = sizeof_fmt(index_stat.st_size)
index_ext = os.path.splitext(index_path)[1].replace(".", "")
index_info = "%s, %s" % (index_size, index_ext)
index_img = Image.open(index_path)
index_hash = imagehash.average_hash(index_img)
true_similarity = file_hash - index_hash
if true_similarity < 3:
similar_sets[hash_combination] = [file, index, file_info, index_info, "%d%%" % (similarity * 100), true_similarity]
__print('Similarity [%d] found between %s and %s (%d%%)' % (len(similar_sets), file, index, similarity * 100), None)
else:
__print("Not similar enough...", None)
else:
__print("Duplicate similarity found, discarding... (%s, %s)" % (file, index), None)
__print("Similarities found: %d" % (len(similar_sets)), None)
html = generate_similarity_report(similar_sets, parent_dir)
with open("report.html", "w") as outfile:
outfile.write(html)
os.system("start report.html")
generate_skip_dirs()
if "similar" in args.mode:
get_files_and_find_similar(directory)
if "rename" in args.mode:
get_files_and_rename(directory)
if "move" in args.mode:
get_files_and_move(directory)
if "organize" in args.mode:
get_files_and_organize(directory)
# Example usage to rename files in a folder and detect duplicate within
# python .\deduper.py -m rename -p C:\path\ -v
# Example usage to organize files, including any that are unorganized, within a directory and all subdirectories
# python .\deduper.py -m organize -p C:\path\ | 0.272218 | 0.099733 |
from functools import reduce
from operator import mul
from typing import Union, Callable
import torch
from torch import nn
from continual_learning.solvers.base import Solver
class MultiHeadsSolver(Solver):
def __init__(self, input_dim: Union[int, tuple], topology: Callable[[int, int], nn.Module] = None):
super().__init__()
if topology is None:
topology = self.base_topology
if hasattr(input_dim, '__len__') and len(input_dim) > 1:
input_dim = reduce(mul, input_dim, 1)
self.flat_input = True
else:
self.flat_input = False
self._tasks = nn.ModuleList()
self.input_dim = input_dim
self.classification_layer = None
self.topology = topology
self._task = 0
def base_topology(self, ind, outd):
return nn.Sequential(*[nn.Linear(ind, ind),
nn.Dropout(0.25),
nn.ReLU(),
nn.Linear(ind, ind // 4),
nn.Dropout(0.25),
nn.ReLU(),
nn.Linear(ind // 4, outd)])
def add_task(self, output_size):
self._tasks.append(self.topology(self.input_dim, output_size))
@property
def task(self):
return self._task
def get_parameters(self, task=None, recuse=True):
if task is None:
task = self.task
th = self.heads[task]
for param in th.parameters(recurse=recuse):
yield param
@property
def heads(self):
return self._tasks
@task.setter
def task(self, value):
if value > len(self._tasks) or value < 0:
raise ValueError()
self._task = value
def forward(self, x, task=None):
if task is not None:
_t = task
else:
_t = self.task
if self.flat_input:
x = torch.flatten(x, 1)
x = self.heads[_t](x)
return x | continual_learning/solvers/multi_task.py | from functools import reduce
from operator import mul
from typing import Union, Callable
import torch
from torch import nn
from continual_learning.solvers.base import Solver
class MultiHeadsSolver(Solver):
def __init__(self, input_dim: Union[int, tuple], topology: Callable[[int, int], nn.Module] = None):
super().__init__()
if topology is None:
topology = self.base_topology
if hasattr(input_dim, '__len__') and len(input_dim) > 1:
input_dim = reduce(mul, input_dim, 1)
self.flat_input = True
else:
self.flat_input = False
self._tasks = nn.ModuleList()
self.input_dim = input_dim
self.classification_layer = None
self.topology = topology
self._task = 0
def base_topology(self, ind, outd):
return nn.Sequential(*[nn.Linear(ind, ind),
nn.Dropout(0.25),
nn.ReLU(),
nn.Linear(ind, ind // 4),
nn.Dropout(0.25),
nn.ReLU(),
nn.Linear(ind // 4, outd)])
def add_task(self, output_size):
self._tasks.append(self.topology(self.input_dim, output_size))
@property
def task(self):
return self._task
def get_parameters(self, task=None, recuse=True):
if task is None:
task = self.task
th = self.heads[task]
for param in th.parameters(recurse=recuse):
yield param
@property
def heads(self):
return self._tasks
@task.setter
def task(self, value):
if value > len(self._tasks) or value < 0:
raise ValueError()
self._task = value
def forward(self, x, task=None):
if task is not None:
_t = task
else:
_t = self.task
if self.flat_input:
x = torch.flatten(x, 1)
x = self.heads[_t](x)
return x | 0.911309 | 0.218982 |
from common import *
from apps.nem.helpers import *
from apps.nem.multisig import *
from apps.nem.multisig.serialize import *
from apps.nem.namespace import *
from apps.nem.namespace.serialize import *
from trezor.messages.NEMSignTx import NEMSignTx
from trezor.messages.NEMAggregateModification import NEMAggregateModification
from trezor.messages.NEMProvisionNamespace import NEMProvisionNamespace
from trezor.messages.NEMCosignatoryModification import NEMCosignatoryModification
class TestNemMultisig(unittest.TestCase):
def test_nem_multisig(self):
# http://bob.nem.ninja:8765/#/multisig/7d3a7087023ee29005262016706818579a2b5499eb9ca76bad98c1e6f4c46642
m = _create_msg(NEM_NETWORK_TESTNET,
3939039,
16000000,
3960639,
1,
0)
base_tx = serialize_aggregate_modification(m.transaction, m.aggregate_modification, unhexlify("abac2ee3d4aaa7a3bfb65261a00cc04c761521527dd3f2cf741e2815cbba83ac"))
base_tx = write_cosignatory_modification(base_tx, 2, unhexlify("e6cff9b3725a91f31089c3acca0fac3e341c00b1c8c6e9578f66c4514509c3b3"))
m = _create_common_msg(NEM_NETWORK_TESTNET,
3939039,
6000000,
3960639)
multisig = serialize_multisig(m, unhexlify("59d89076964742ef2a2089d26a5aa1d2c7a7bb052a46c1de159891e91ad3d76e"), base_tx)
self.assertEqual(multisig, unhexlify("0410000001000098df1a3c002000000059d89076964742ef2a2089d26a5aa1d2c7a7bb052a46c1de159891e91ad3d76e808d5b00000000003f6f3c006c0000000110000001000098df1a3c0020000000abac2ee3d4aaa7a3bfb65261a00cc04c761521527dd3f2cf741e2815cbba83ac0024f400000000003f6f3c0001000000280000000200000020000000e6cff9b3725a91f31089c3acca0fac3e341c00b1c8c6e9578f66c4514509c3b3"))
address_pubkey = unhexlify("<KEY>")
m = _create_common_msg(NEM_NETWORK_TESTNET,
3939891,
6000000,
3961491)
multisig = serialize_multisig_signature(m, unhexlify("71cba4f2a28fd19f902ba40e9937994154d9eeaad0631d25d525ec37922567d4"), base_tx, address_pubkey)
self.assertEqual(multisig, unhexlify("0210000001000098331e3c002000000071cba4f2a28fd19f902ba40e9937994154d9eeaad0631d25d525ec37922567d4808d5b000000000093723c0024000000200000008ec165580bdabfd31ce6007a1748ce5bdf30eab7a214743097de3bc822ac7e002800000054435258595551494d464137414f474c354c463359574c43375641424c59554d4a35414342554e4c"))
def test_nem_multisig_2(self):
# http://chain.nem.ninja/#/multisig/1016cf3bdd61bd57b9b2b07b6ff2dee390279d8d899265bdc23d42360abe2e6c
m = _create_provision_msg(NEM_NETWORK_MAINNET,
59414272,
20000000,
59500672,
"dim",
"",
"NAMESPACEWH4MKFMBCVFERDPOOP4FK7MTBXDPZZA",
5000000000)
base_tx = serialize_provision_namespace(m.transaction, m.provision_namespace, unhexlify("a1df5306355766bd2f9a64efdc089eb294be265987b3359093ae474c051d7d5a"))
m = _create_common_msg(NEM_NETWORK_MAINNET,
59414272,
6000000,
59500672)
multisig = serialize_multisig(m, unhexlify("cfe58463f0eaebceb5d00717f8aead49171a5d7c08f6b1299bd534f11715acc9"), base_tx)
self.assertEqual(multisig, unhexlify("041000000100006800978a0320000000cfe58463f0eaebceb5d00717f8aead49171a5d7c08f6b1299bd534f11715acc9808d5b000000000080e88b037b000000012000000100006800978a0320000000a1df5306355766bd2f9a64efdc089eb294be265987b3359093ae474c051d7d5a002d31010000000080e88b03280000004e414d4553504143455748344d4b464d42435646455244504f4f5034464b374d54425844505a5a4100f2052a010000000300000064696dffffffff"))
m = _create_common_msg(NEM_NETWORK_MAINNET,
59414342,
6000000,
59500742)
address_pubkey = unhexlify("a1df5306355766bd2f9a64efdc089eb294be265987b3359093ae474c051d7d5a")
multisig = serialize_multisig_signature(m, unhexlify("1b49b80203007117d034e45234ffcdf402c044aeef6dbb06351f346ca892bce2"), base_tx, address_pubkey)
self.assertEqual(multisig, unhexlify("021000000100006846978a03200000001b49b80203007117d034e45234ffcdf402c044aeef6dbb06351f346ca892bce2808d5b0000000000c6e88b032400000020000000bfa2088f7720f89dd4664d650e321dabd02fab61b7355bc88a391a848a49786a280000004e4444524733554542354c5a5a5a4d445742453452544b5a4b37334a424850414957424843464d56"))
m = _create_common_msg(NEM_NETWORK_MAINNET,
59414381,
6000000,
59500781)
multisig = serialize_multisig_signature(m, unhexlify("<KEY>"), base_tx, address_pubkey)
self.assertEqual(multisig, unhexlify("<KEY>"))
def _create_common_msg(network: int, timestamp: int, fee: int, deadline: int):
m = NEMTransactionCommon()
m.network = network
m.timestamp = timestamp
m.fee = fee
m.deadline = deadline
return m
def _create_msg(network: int, timestamp: int, fee: int, deadline: int,
modifications: int, relative_change: int):
m = NEMSignTx()
m.transaction = _create_common_msg(network, timestamp, fee, deadline)
m.aggregate_modification = NEMAggregateModification()
for i in range(modifications):
m.aggregate_modification.modifications.append(NEMCosignatoryModification())
m.aggregate_modification.relative_change = relative_change
return m
def _create_provision_msg(network: int, timestamp: int, fee: int, deadline: int,
name: str, parent: str, sink: str, rental_fee: int):
m = NEMSignTx()
m.transaction = _create_common_msg(network, timestamp, fee, deadline)
m.provision_namespace = NEMProvisionNamespace()
m.provision_namespace.namespace = name
m.provision_namespace.parent = parent
m.provision_namespace.sink = sink
m.provision_namespace.fee = rental_fee
return m
if __name__ == '__main__':
unittest.main() | core/tests/test_apps.nem.multisig.py | from common import *
from apps.nem.helpers import *
from apps.nem.multisig import *
from apps.nem.multisig.serialize import *
from apps.nem.namespace import *
from apps.nem.namespace.serialize import *
from trezor.messages.NEMSignTx import NEMSignTx
from trezor.messages.NEMAggregateModification import NEMAggregateModification
from trezor.messages.NEMProvisionNamespace import NEMProvisionNamespace
from trezor.messages.NEMCosignatoryModification import NEMCosignatoryModification
class TestNemMultisig(unittest.TestCase):
def test_nem_multisig(self):
# http://bob.nem.ninja:8765/#/multisig/7d3a7087023ee29005262016706818579a2b5499eb9ca76bad98c1e6f4c46642
m = _create_msg(NEM_NETWORK_TESTNET,
3939039,
16000000,
3960639,
1,
0)
base_tx = serialize_aggregate_modification(m.transaction, m.aggregate_modification, unhexlify("abac2ee3d4aaa7a3bfb65261a00cc04c761521527dd3f2cf741e2815cbba83ac"))
base_tx = write_cosignatory_modification(base_tx, 2, unhexlify("e6cff9b3725a91f31089c3acca0fac3e341c00b1c8c6e9578f66c4514509c3b3"))
m = _create_common_msg(NEM_NETWORK_TESTNET,
3939039,
6000000,
3960639)
multisig = serialize_multisig(m, unhexlify("59d89076964742ef2a2089d26a5aa1d2c7a7bb052a46c1de159891e91ad3d76e"), base_tx)
self.assertEqual(multisig, unhexlify("0410000001000098df1a3c002000000059d89076964742ef2a2089d26a5aa1d2c7a7bb052a46c1de159891e91ad3d76e808d5b00000000003f6f3c006c0000000110000001000098df1a3c0020000000abac2ee3d4aaa7a3bfb65261a00cc04c761521527dd3f2cf741e2815cbba83ac0024f400000000003f6f3c0001000000280000000200000020000000e6cff9b3725a91f31089c3acca0fac3e341c00b1c8c6e9578f66c4514509c3b3"))
address_pubkey = unhexlify("<KEY>")
m = _create_common_msg(NEM_NETWORK_TESTNET,
3939891,
6000000,
3961491)
multisig = serialize_multisig_signature(m, unhexlify("71cba4f2a28fd19f902ba40e9937994154d9eeaad0631d25d525ec37922567d4"), base_tx, address_pubkey)
self.assertEqual(multisig, unhexlify("0210000001000098331e3c002000000071cba4f2a28fd19f902ba40e9937994154d9eeaad0631d25d525ec37922567d4808d5b000000000093723c0024000000200000008ec165580bdabfd31ce6007a1748ce5bdf30eab7a214743097de3bc822ac7e002800000054435258595551494d464137414f474c354c463359574c43375641424c59554d4a35414342554e4c"))
def test_nem_multisig_2(self):
# http://chain.nem.ninja/#/multisig/1016cf3bdd61bd57b9b2b07b6ff2dee390279d8d899265bdc23d42360abe2e6c
m = _create_provision_msg(NEM_NETWORK_MAINNET,
59414272,
20000000,
59500672,
"dim",
"",
"NAMESPACEWH4MKFMBCVFERDPOOP4FK7MTBXDPZZA",
5000000000)
base_tx = serialize_provision_namespace(m.transaction, m.provision_namespace, unhexlify("a1df5306355766bd2f9a64efdc089eb294be265987b3359093ae474c051d7d5a"))
m = _create_common_msg(NEM_NETWORK_MAINNET,
59414272,
6000000,
59500672)
multisig = serialize_multisig(m, unhexlify("cfe58463f0eaebceb5d00717f8aead49171a5d7c08f6b1299bd534f11715acc9"), base_tx)
self.assertEqual(multisig, unhexlify("041000000100006800978a0320000000cfe58463f0eaebceb5d00717f8aead49171a5d7c08f6b1299bd534f11715acc9808d5b000000000080e88b037b000000012000000100006800978a0320000000a1df5306355766bd2f9a64efdc089eb294be265987b3359093ae474c051d7d5a002d31010000000080e88b03280000004e414d4553504143455748344d4b464d42435646455244504f4f5034464b374d54425844505a5a4100f2052a010000000300000064696dffffffff"))
m = _create_common_msg(NEM_NETWORK_MAINNET,
59414342,
6000000,
59500742)
address_pubkey = unhexlify("a1df5306355766bd2f9a64efdc089eb294be265987b3359093ae474c051d7d5a")
multisig = serialize_multisig_signature(m, unhexlify("1b49b80203007117d034e45234ffcdf402c044aeef6dbb06351f346ca892bce2"), base_tx, address_pubkey)
self.assertEqual(multisig, unhexlify("021000000100006846978a03200000001b49b80203007117d034e45234ffcdf402c044aeef6dbb06351f346ca892bce2808d5b0000000000c6e88b032400000020000000bfa2088f7720f89dd4664d650e321dabd02fab61b7355bc88a391a848a49786a280000004e4444524733554542354c5a5a5a4d445742453452544b5a4b37334a424850414957424843464d56"))
m = _create_common_msg(NEM_NETWORK_MAINNET,
59414381,
6000000,
59500781)
multisig = serialize_multisig_signature(m, unhexlify("<KEY>"), base_tx, address_pubkey)
self.assertEqual(multisig, unhexlify("<KEY>"))
def _create_common_msg(network: int, timestamp: int, fee: int, deadline: int):
m = NEMTransactionCommon()
m.network = network
m.timestamp = timestamp
m.fee = fee
m.deadline = deadline
return m
def _create_msg(network: int, timestamp: int, fee: int, deadline: int,
modifications: int, relative_change: int):
m = NEMSignTx()
m.transaction = _create_common_msg(network, timestamp, fee, deadline)
m.aggregate_modification = NEMAggregateModification()
for i in range(modifications):
m.aggregate_modification.modifications.append(NEMCosignatoryModification())
m.aggregate_modification.relative_change = relative_change
return m
def _create_provision_msg(network: int, timestamp: int, fee: int, deadline: int,
name: str, parent: str, sink: str, rental_fee: int):
m = NEMSignTx()
m.transaction = _create_common_msg(network, timestamp, fee, deadline)
m.provision_namespace = NEMProvisionNamespace()
m.provision_namespace.namespace = name
m.provision_namespace.parent = parent
m.provision_namespace.sink = sink
m.provision_namespace.fee = rental_fee
return m
if __name__ == '__main__':
unittest.main() | 0.555918 | 0.186706 |
import pytest # pylint: disable=unused-import
from datetime import date, timedelta
from structure.organization import Team # pylint: disable=unused-import
from structure.measurements import OTMeasurement
class TestOvertimeMeasurement:
TEAM = None
def test_common_overtime(self):
# Positive overtime
measurement = OTMeasurement(
measurement_date=date.today(),
team=TestOvertimeMeasurement.TEAM,
workdays_fix=20,
workdays_actual=20,
overtime=timedelta(hours=12, minutes=21),
)
assert measurement is not None
def test_overtime_more_than_24h(self):
# Positive overtime >24h
measurement = OTMeasurement(
measurement_date=date.today(),
team=TestOvertimeMeasurement.TEAM,
workdays_fix=20,
workdays_actual=20,
overtime=timedelta(hours=32, minutes=21),
)
assert measurement is not None
def test_overtime_more_than_24h_with_day_value(self):
# Positive overtime >24h with day value in timedelta
measurement = OTMeasurement(
measurement_date=date.today(),
team=TestOvertimeMeasurement.TEAM,
workdays_fix=20,
workdays_actual=20,
overtime=timedelta(days=1, hours=8, minutes=21),
)
assert measurement is not None
def test_overtime_negative(self):
# Negative overtime
measurement = OTMeasurement(
measurement_date=date.today(),
team=TestOvertimeMeasurement.TEAM,
workdays_fix=20,
workdays_actual=20,
overtime=timedelta(hours=-10, minutes=10),
)
assert measurement is not None
def test_overtime_high_accuracy(self):
# Overtime with too accurate OT
measurement = OTMeasurement(
measurement_date=date.today(),
team=TestOvertimeMeasurement.TEAM,
workdays_fix=20,
workdays_actual=20,
overtime=timedelta(hours=42, minutes=8, seconds=28, milliseconds=218),
)
assert measurement is not None | tmv/test/test_measurements.py | import pytest # pylint: disable=unused-import
from datetime import date, timedelta
from structure.organization import Team # pylint: disable=unused-import
from structure.measurements import OTMeasurement
class TestOvertimeMeasurement:
TEAM = None
def test_common_overtime(self):
# Positive overtime
measurement = OTMeasurement(
measurement_date=date.today(),
team=TestOvertimeMeasurement.TEAM,
workdays_fix=20,
workdays_actual=20,
overtime=timedelta(hours=12, minutes=21),
)
assert measurement is not None
def test_overtime_more_than_24h(self):
# Positive overtime >24h
measurement = OTMeasurement(
measurement_date=date.today(),
team=TestOvertimeMeasurement.TEAM,
workdays_fix=20,
workdays_actual=20,
overtime=timedelta(hours=32, minutes=21),
)
assert measurement is not None
def test_overtime_more_than_24h_with_day_value(self):
# Positive overtime >24h with day value in timedelta
measurement = OTMeasurement(
measurement_date=date.today(),
team=TestOvertimeMeasurement.TEAM,
workdays_fix=20,
workdays_actual=20,
overtime=timedelta(days=1, hours=8, minutes=21),
)
assert measurement is not None
def test_overtime_negative(self):
# Negative overtime
measurement = OTMeasurement(
measurement_date=date.today(),
team=TestOvertimeMeasurement.TEAM,
workdays_fix=20,
workdays_actual=20,
overtime=timedelta(hours=-10, minutes=10),
)
assert measurement is not None
def test_overtime_high_accuracy(self):
# Overtime with too accurate OT
measurement = OTMeasurement(
measurement_date=date.today(),
team=TestOvertimeMeasurement.TEAM,
workdays_fix=20,
workdays_actual=20,
overtime=timedelta(hours=42, minutes=8, seconds=28, milliseconds=218),
)
assert measurement is not None | 0.596668 | 0.387806 |
# In[1]:
'''
Author: Sameer
Date: 09/11/2018
Read Me:
1 - This code is for building a Gaussian Kernel (RBF) Support Vector Machine (SVM) and the optimization
problem (Quadratic Programming) is solved using python cvxopt optimization toolbox.
Polynomial Kernel SVM can also be build using this code.
2 - Input Samples variable indicates total no. of points present in both Positive and Negative Classes
Gaussian Standard Deviation is a free variable used in the Gaussian Kernel
Order is a free variable used to control the order of the polynomial used in Polynomial Kernel
Grid Size is controls the number of points to be searched on 1x1 grid to generate decision boundaries
3 - Quadratic Optimization Problem:
QP: Minimize- 1/2 * X.T * P * X + q.T * X
ST: G * X <= h and A * X = b
Matrices were selected based on above descritpion. But for the detailed description go through following link:
http://cs229.stanford.edu/notes/cs229-notes3.pdf
4 - Solution for the QP problem will not be accurate i.e. lagrange mulitpliers will not be absolute zero, so I have
made values below 1.0e-04 to be zero. For Non Support Vectors lagrange mulitpliers are zero and for Support Vector they are
greater than zero.
5 - Postive Class is also represented by C_1 and Negative Class is also represented C_-1
'''
# In[2]:
# Import Required Libraries
import numpy
import matplotlib.pyplot as plt
import cvxopt
# In[3]:
# Parameters
Input_Samples = 100
Gaussian_Std_Deviation = 0.5
Order = 5
grid_size = 500
# In[4]:
# Check if data exists. If yes load it otherwise create new data and store it in text file
if numpy.DataSource().exists('InputPattern.txt'):
X = numpy.loadtxt('InputPattern.txt')
else:
X = numpy.random.uniform(0, 1, (Input_Samples, 2))
numpy.savetxt('InputPattern.txt', X)
# In[5]:
# Create Desired Classification based on some conditions
# Which will give a circle and sine wave boundary and points lying inside them will be positive class, egative otherwise
D = numpy.array([])
Pos_Class = numpy.array([[0, 0]])
Neg_Class = numpy.array([[0, 0]])
for i in range(0, Input_Samples):
if X[i, 1] < 0.2 * numpy.sin(10 * X[i, 0]) + 0.3 or numpy.square(X[i, 0] - 0.5) + numpy.square(X[i, 1] - 0.8) < numpy.square(0.15):
D = numpy.concatenate((D, [1]), axis=0)
Pos_Class = numpy.concatenate((Pos_Class, [X[i]]), axis=0)
else:
D = numpy.concatenate((D, [-1]), axis=0)
Neg_Class = numpy.concatenate((Neg_Class, [X[i]]), axis=0)
# In[6]:
# Plot Results
plt.plot(Pos_Class[1:, 0], Pos_Class[1:, 1], 'b.', label=r'Class $C_1$')
plt.plot(Neg_Class[1:, 0], Neg_Class[1:, 1], 'r.', label=r'Class $C_{-1}$')
plt.show()
# In[7]:
# Required Functions
def Gaussian_Kernel(X1, X2, sigma):
return numpy.exp(-numpy.square(numpy.linalg.norm(X1 - X2))/(2 * numpy.square(sigma)))
def Polynomial_Kernel(X1, X2, order):
return (1 + numpy.dot(X1, X2))**order
# In[8]:
# Optimization Problem - Quadratic Programming
P = numpy.empty([Input_Samples, Input_Samples])
for i in range(0, Input_Samples):
for j in range(0, Input_Samples):
P[i, j] = D[i] * D[j] * Gaussian_Kernel(X[i], X[j], Gaussian_Std_Deviation)
# P[i, j] = D[i] * D[j] * Polynomial_Kernel(X[i], X[j], Order)
# Refer Documentation for Cvxopt Quadratic Programming for the meaning of the matrices
P = cvxopt.matrix(P)
q = cvxopt.matrix(numpy.ones((Input_Samples), dtype='double') * -1)
G = cvxopt.matrix(numpy.diag(numpy.ones((Input_Samples), dtype='double') * -1))
h = cvxopt.matrix(numpy.zeros(Input_Samples))
A = cvxopt.matrix(D, (1, Input_Samples))
b = cvxopt.matrix(0.0)
sol = cvxopt.solvers.qp(P, q, G, h, A, b)
# In[9]:
# Solution:
Alpha_Prime = numpy.ravel(numpy.array(sol['x']))
Alpha = numpy.array([0 if i < 1.0e-04 else i for i in Alpha_Prime]) # Making alpha's perfectly zeroes
SV_Pos_Class = numpy.array([[0, 0]]) # Support Vector for Positive Class
SV_Neg_Class = numpy.array([[0, 0]]) # Support Vecotr for Negative Class
SV_Label = numpy.array([]) # Labels for all Support Vectors, to decrease for loop time execution.
for i in range(0, Input_Samples):
if Alpha[i] != 0:
if D[i] == 1:
SV_Pos_Class = numpy.concatenate((SV_Pos_Class, [X[i]]), axis=0)
SV_Label = numpy.concatenate((SV_Label, [i]), axis=0)
else:
SV_Neg_Class = numpy.concatenate((SV_Neg_Class, [X[i]]), axis=0)
SV_Label = numpy.concatenate((SV_Label, [i]), axis=0)
for k in range(0, Input_Samples):
if Alpha[k] != 0:
W = 0
for i in numpy.nditer(SV_Label):
W = W + Alpha[int(i)] * D[int(i)] * Gaussian_Kernel(X[int(i)], X[k], Gaussian_Std_Deviation)
# W = W + Alpha[int(i)] * D[int(i)] * Polynomial_Kernel(X[int(i)], X[k], Order)
Theta = D[k] - W
break
# In[10]:
# Generate Boundary
x_points = numpy.linspace(0.0, 1.0, grid_size)
y_points = numpy.linspace(0.0, 1.0, grid_size)
H = numpy.array([[0, 0]]) # Decision Boundary
H_Plus = numpy.array([[0, 0]]) # Postivie Gutter
H_Minus = numpy.array([[0, 0]]) # Negative Gutter
for i in range(0, grid_size):
for j in range(0, grid_size):
Discriminant = 0
temp = numpy.array([x_points[i], y_points[j]])
for k in numpy.nditer(SV_Label):
Discriminant = Discriminant + Alpha[int(k)] * D[int(k)] * Gaussian_Kernel(X[int(k)], temp, Gaussian_Std_Deviation)
# Discriminant = Discriminant + Alpha[int(k)] * D[int(k)] * Polynomial_Kernel(X[int(k)], temp, Order)
Discriminant = Discriminant + Theta
if -0.1 < Discriminant < 0.1:
H = numpy.concatenate((H, [temp]), axis=0)
elif -1.1 < Discriminant < -0.9:
H_Minus = numpy.concatenate((H_Minus, [temp]), axis=0)
elif 0.9 < Discriminant < 1.1:
H_Plus = numpy.concatenate((H_Plus, [temp]), axis=0)
# In[11]:
# Plot Results
plt.plot(Pos_Class[1:, 0], Pos_Class[1:, 1], 'b.', label=r'Class $C_1$')
plt.plot(SV_Pos_Class[1:, 0], SV_Pos_Class[1:, 1], 'bd', label=r'$C_1$ Support Vectors')
plt.plot(Neg_Class[1:, 0], Neg_Class[1:, 1], 'r.', label=r'Class $C_{-1}$')
plt.plot(SV_Neg_Class[1:, 0], SV_Neg_Class[1:, 1], 'rd', label=r'$C_{-1}$ Support Vectors')
plt.scatter(H[1:, 0], H[1:, 1], s=0.1, c='g', label='Decision Boundary')
plt.scatter(H_Plus[1:, 0], H_Plus[1:, 1], s=0.1, c='b', label='Positive Gutter')
plt.scatter(H_Minus[1:, 0], H_Minus[1:, 1], s=0.1, c='r', label='Negative Gutter')
plt.xlabel(r'X Coordinate $\rightarrow$')
plt.ylabel(r'Y Coordinate $\rightarrow$')
plt.legend(loc='lower center', bbox_to_anchor=(0.5, 1), fancybox=True, shadow=True, ncol=3, borderpad=0.1, labelspacing=0.1)
plt.tight_layout()
plt.savefig('Results.pdf')
plt.show() | SVM.py |
# In[1]:
'''
Author: Sameer
Date: 09/11/2018
Read Me:
1 - This code is for building a Gaussian Kernel (RBF) Support Vector Machine (SVM) and the optimization
problem (Quadratic Programming) is solved using python cvxopt optimization toolbox.
Polynomial Kernel SVM can also be build using this code.
2 - Input Samples variable indicates total no. of points present in both Positive and Negative Classes
Gaussian Standard Deviation is a free variable used in the Gaussian Kernel
Order is a free variable used to control the order of the polynomial used in Polynomial Kernel
Grid Size is controls the number of points to be searched on 1x1 grid to generate decision boundaries
3 - Quadratic Optimization Problem:
QP: Minimize- 1/2 * X.T * P * X + q.T * X
ST: G * X <= h and A * X = b
Matrices were selected based on above descritpion. But for the detailed description go through following link:
http://cs229.stanford.edu/notes/cs229-notes3.pdf
4 - Solution for the QP problem will not be accurate i.e. lagrange mulitpliers will not be absolute zero, so I have
made values below 1.0e-04 to be zero. For Non Support Vectors lagrange mulitpliers are zero and for Support Vector they are
greater than zero.
5 - Postive Class is also represented by C_1 and Negative Class is also represented C_-1
'''
# In[2]:
# Import Required Libraries
import numpy
import matplotlib.pyplot as plt
import cvxopt
# In[3]:
# Parameters
Input_Samples = 100
Gaussian_Std_Deviation = 0.5
Order = 5
grid_size = 500
# In[4]:
# Check if data exists. If yes load it otherwise create new data and store it in text file
if numpy.DataSource().exists('InputPattern.txt'):
X = numpy.loadtxt('InputPattern.txt')
else:
X = numpy.random.uniform(0, 1, (Input_Samples, 2))
numpy.savetxt('InputPattern.txt', X)
# In[5]:
# Create Desired Classification based on some conditions
# Which will give a circle and sine wave boundary and points lying inside them will be positive class, egative otherwise
D = numpy.array([])
Pos_Class = numpy.array([[0, 0]])
Neg_Class = numpy.array([[0, 0]])
for i in range(0, Input_Samples):
if X[i, 1] < 0.2 * numpy.sin(10 * X[i, 0]) + 0.3 or numpy.square(X[i, 0] - 0.5) + numpy.square(X[i, 1] - 0.8) < numpy.square(0.15):
D = numpy.concatenate((D, [1]), axis=0)
Pos_Class = numpy.concatenate((Pos_Class, [X[i]]), axis=0)
else:
D = numpy.concatenate((D, [-1]), axis=0)
Neg_Class = numpy.concatenate((Neg_Class, [X[i]]), axis=0)
# In[6]:
# Plot Results
plt.plot(Pos_Class[1:, 0], Pos_Class[1:, 1], 'b.', label=r'Class $C_1$')
plt.plot(Neg_Class[1:, 0], Neg_Class[1:, 1], 'r.', label=r'Class $C_{-1}$')
plt.show()
# In[7]:
# Required Functions
def Gaussian_Kernel(X1, X2, sigma):
return numpy.exp(-numpy.square(numpy.linalg.norm(X1 - X2))/(2 * numpy.square(sigma)))
def Polynomial_Kernel(X1, X2, order):
return (1 + numpy.dot(X1, X2))**order
# In[8]:
# Optimization Problem - Quadratic Programming
P = numpy.empty([Input_Samples, Input_Samples])
for i in range(0, Input_Samples):
for j in range(0, Input_Samples):
P[i, j] = D[i] * D[j] * Gaussian_Kernel(X[i], X[j], Gaussian_Std_Deviation)
# P[i, j] = D[i] * D[j] * Polynomial_Kernel(X[i], X[j], Order)
# Refer Documentation for Cvxopt Quadratic Programming for the meaning of the matrices
P = cvxopt.matrix(P)
q = cvxopt.matrix(numpy.ones((Input_Samples), dtype='double') * -1)
G = cvxopt.matrix(numpy.diag(numpy.ones((Input_Samples), dtype='double') * -1))
h = cvxopt.matrix(numpy.zeros(Input_Samples))
A = cvxopt.matrix(D, (1, Input_Samples))
b = cvxopt.matrix(0.0)
sol = cvxopt.solvers.qp(P, q, G, h, A, b)
# In[9]:
# Solution:
Alpha_Prime = numpy.ravel(numpy.array(sol['x']))
Alpha = numpy.array([0 if i < 1.0e-04 else i for i in Alpha_Prime]) # Making alpha's perfectly zeroes
SV_Pos_Class = numpy.array([[0, 0]]) # Support Vector for Positive Class
SV_Neg_Class = numpy.array([[0, 0]]) # Support Vecotr for Negative Class
SV_Label = numpy.array([]) # Labels for all Support Vectors, to decrease for loop time execution.
for i in range(0, Input_Samples):
if Alpha[i] != 0:
if D[i] == 1:
SV_Pos_Class = numpy.concatenate((SV_Pos_Class, [X[i]]), axis=0)
SV_Label = numpy.concatenate((SV_Label, [i]), axis=0)
else:
SV_Neg_Class = numpy.concatenate((SV_Neg_Class, [X[i]]), axis=0)
SV_Label = numpy.concatenate((SV_Label, [i]), axis=0)
for k in range(0, Input_Samples):
if Alpha[k] != 0:
W = 0
for i in numpy.nditer(SV_Label):
W = W + Alpha[int(i)] * D[int(i)] * Gaussian_Kernel(X[int(i)], X[k], Gaussian_Std_Deviation)
# W = W + Alpha[int(i)] * D[int(i)] * Polynomial_Kernel(X[int(i)], X[k], Order)
Theta = D[k] - W
break
# In[10]:
# Generate Boundary
x_points = numpy.linspace(0.0, 1.0, grid_size)
y_points = numpy.linspace(0.0, 1.0, grid_size)
H = numpy.array([[0, 0]]) # Decision Boundary
H_Plus = numpy.array([[0, 0]]) # Postivie Gutter
H_Minus = numpy.array([[0, 0]]) # Negative Gutter
for i in range(0, grid_size):
for j in range(0, grid_size):
Discriminant = 0
temp = numpy.array([x_points[i], y_points[j]])
for k in numpy.nditer(SV_Label):
Discriminant = Discriminant + Alpha[int(k)] * D[int(k)] * Gaussian_Kernel(X[int(k)], temp, Gaussian_Std_Deviation)
# Discriminant = Discriminant + Alpha[int(k)] * D[int(k)] * Polynomial_Kernel(X[int(k)], temp, Order)
Discriminant = Discriminant + Theta
if -0.1 < Discriminant < 0.1:
H = numpy.concatenate((H, [temp]), axis=0)
elif -1.1 < Discriminant < -0.9:
H_Minus = numpy.concatenate((H_Minus, [temp]), axis=0)
elif 0.9 < Discriminant < 1.1:
H_Plus = numpy.concatenate((H_Plus, [temp]), axis=0)
# In[11]:
# Plot Results
plt.plot(Pos_Class[1:, 0], Pos_Class[1:, 1], 'b.', label=r'Class $C_1$')
plt.plot(SV_Pos_Class[1:, 0], SV_Pos_Class[1:, 1], 'bd', label=r'$C_1$ Support Vectors')
plt.plot(Neg_Class[1:, 0], Neg_Class[1:, 1], 'r.', label=r'Class $C_{-1}$')
plt.plot(SV_Neg_Class[1:, 0], SV_Neg_Class[1:, 1], 'rd', label=r'$C_{-1}$ Support Vectors')
plt.scatter(H[1:, 0], H[1:, 1], s=0.1, c='g', label='Decision Boundary')
plt.scatter(H_Plus[1:, 0], H_Plus[1:, 1], s=0.1, c='b', label='Positive Gutter')
plt.scatter(H_Minus[1:, 0], H_Minus[1:, 1], s=0.1, c='r', label='Negative Gutter')
plt.xlabel(r'X Coordinate $\rightarrow$')
plt.ylabel(r'Y Coordinate $\rightarrow$')
plt.legend(loc='lower center', bbox_to_anchor=(0.5, 1), fancybox=True, shadow=True, ncol=3, borderpad=0.1, labelspacing=0.1)
plt.tight_layout()
plt.savefig('Results.pdf')
plt.show() | 0.801625 | 0.822795 |
from rllab.misc.instrument import VariantGenerator
from rllab import config
from rllab_maml.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab_maml.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
from sandbox.ours.envs.normalized_env import normalize
from sandbox.ours.envs.base import TfEnv
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.ours.policies.maml_improved_gauss_mlp_policy import MAMLImprovedGaussianMLPPolicy
from sandbox.ours.dynamics.dynamics_ensemble import MLPDynamicsEnsemble
from sandbox.ours.algos.ModelMAML.model_maml_trpo import ModelMAMLTRPO
from experiments.helpers.ec2_helpers import cheapest_subnets
from rllab import config
from sandbox.ours.envs.own_envs import PointEnvMAML
from sandbox.ours.envs.mujoco import AntEnvRandParams, HalfCheetahEnvRandParams, HopperEnvRandParams, WalkerEnvRandomParams
from sandbox.ours.envs.mujoco import Reacher5DofEnvRandParams
import tensorflow as tf
import sys
import argparse
import random
import json
import os
def run_train_task(vv):
import sys
print(vv['exp_prefix'])
sysout_log_path = os.path.join(config.LOG_DIR, 'local', vv['exp_prefix'], vv['exp_name'], 'stdout.log')
sysout_log_file = open(sysout_log_path, 'w')
sys.stdout = sysout_log_file
env = TfEnv(normalize(vv['env'](log_scale_limit=vv['log_scale_limit'])))
dynamics_model = MLPDynamicsEnsemble(
name="dyn_model",
env_spec=env.spec,
hidden_sizes=vv['hidden_sizes_model'],
weight_normalization=vv['weight_normalization_model'],
num_models=vv['num_models'],
optimizer=vv['optimizer_model'],
valid_split_ratio=vv['valid_split_ratio'],
rolling_average_persitency=vv['rolling_average_persitency']
)
policy = MAMLImprovedGaussianMLPPolicy(
name="policy",
env_spec=env.spec,
hidden_sizes=vv['hidden_sizes_policy'],
hidden_nonlinearity=vv['hidden_nonlinearity_policy'],
grad_step_size=vv['fast_lr'],
trainable_step_size=vv['trainable_step_size'],
bias_transform=vv['bias_transform'],
param_noise_std=vv['param_noise_std']
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = ModelMAMLTRPO(
env=env,
policy=policy,
dynamics_model=dynamics_model,
baseline=baseline,
n_itr=vv['n_itr'],
n_iter=vv['n_itr'],
batch_size_env_samples=vv['batch_size_env_samples'],
batch_size_dynamics_samples=vv['batch_size_dynamics_samples'],
meta_batch_size=vv['meta_batch_size'],
initial_random_samples=vv['initial_random_samples'],
num_maml_steps_per_iter=vv['num_maml_steps_per_iter'],
reset_from_env_traj=vv.get('reset_from_env_traj', False),
max_path_length_env=vv['path_length_env'],
max_path_length_dyn=vv.get('path_length_dyn', None),
discount=vv['discount'],
step_size=vv["meta_step_size"],
num_grad_updates=1,
retrain_model_when_reward_decreases=vv['retrain_model_when_reward_decreases'],
reset_policy_std=vv['reset_policy_std'],
reinit_model_cycle=vv['reinit_model_cycle'],
frac_gpu=vv.get('frac_gpu', 0.85),
clip_obs=vv.get('clip_obs', True)
)
algo.train()
sysout_log_file.close()
def run_experiment(vargs):
# ----------------------- TRAINING ---------------------------------------
kwargs = json.load(open(vargs[1], 'r'))
exp_id = random.sample(range(1, 1000), 1)[0]
v = kwargs['variant']
exp_name = "model_ensemble_maml_train_env_%s_%i_%i_%i_%i_id_%i" % (v['env'], v['path_length_env'], v['num_models'],
v['batch_size_env_samples'], v['seed'], exp_id)
v = instantiate_class_stings(v)
kwargs['variant'] = v
run_experiment_lite(
run_train_task,
exp_name=exp_name,
**kwargs
)
def instantiate_class_stings(v):
v['env'] = globals()[v['env']]
# optimizer
if v['optimizer_model'] == 'sgd':
v['optimizer_model'] = tf.train.GradientDescentOptimizer
elif v['optimizer_model'] == 'adam':
v['optimizer_model'] = tf.train.AdamOptimizer
elif v['optimizer_model'] == 'momentum':
v['optimizer_model'] = tf.train.MomentumOptimizer
# nonlinearlity
for nonlinearity_key in ['hidden_nonlinearity_policy', 'hidden_nonlinearity_model']:
if v[nonlinearity_key] == 'relu':
v[nonlinearity_key] = tf.nn.relu
elif v[nonlinearity_key] == 'tanh':
v[nonlinearity_key] = tf.tanh
elif v[nonlinearity_key] == 'elu':
v[nonlinearity_key] = tf.nn.elu
else:
raise NotImplementedError('Not able to recognize spicified hidden_nonlinearity: %s' % v['hidden_nonlinearity'])
return v
if __name__ == "__main__":
run_experiment(sys.argv) | experiments/run_scripts/gpu-mb-mpo-train.py | from rllab.misc.instrument import VariantGenerator
from rllab import config
from rllab_maml.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab_maml.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
from sandbox.ours.envs.normalized_env import normalize
from sandbox.ours.envs.base import TfEnv
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.ours.policies.maml_improved_gauss_mlp_policy import MAMLImprovedGaussianMLPPolicy
from sandbox.ours.dynamics.dynamics_ensemble import MLPDynamicsEnsemble
from sandbox.ours.algos.ModelMAML.model_maml_trpo import ModelMAMLTRPO
from experiments.helpers.ec2_helpers import cheapest_subnets
from rllab import config
from sandbox.ours.envs.own_envs import PointEnvMAML
from sandbox.ours.envs.mujoco import AntEnvRandParams, HalfCheetahEnvRandParams, HopperEnvRandParams, WalkerEnvRandomParams
from sandbox.ours.envs.mujoco import Reacher5DofEnvRandParams
import tensorflow as tf
import sys
import argparse
import random
import json
import os
def run_train_task(vv):
import sys
print(vv['exp_prefix'])
sysout_log_path = os.path.join(config.LOG_DIR, 'local', vv['exp_prefix'], vv['exp_name'], 'stdout.log')
sysout_log_file = open(sysout_log_path, 'w')
sys.stdout = sysout_log_file
env = TfEnv(normalize(vv['env'](log_scale_limit=vv['log_scale_limit'])))
dynamics_model = MLPDynamicsEnsemble(
name="dyn_model",
env_spec=env.spec,
hidden_sizes=vv['hidden_sizes_model'],
weight_normalization=vv['weight_normalization_model'],
num_models=vv['num_models'],
optimizer=vv['optimizer_model'],
valid_split_ratio=vv['valid_split_ratio'],
rolling_average_persitency=vv['rolling_average_persitency']
)
policy = MAMLImprovedGaussianMLPPolicy(
name="policy",
env_spec=env.spec,
hidden_sizes=vv['hidden_sizes_policy'],
hidden_nonlinearity=vv['hidden_nonlinearity_policy'],
grad_step_size=vv['fast_lr'],
trainable_step_size=vv['trainable_step_size'],
bias_transform=vv['bias_transform'],
param_noise_std=vv['param_noise_std']
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = ModelMAMLTRPO(
env=env,
policy=policy,
dynamics_model=dynamics_model,
baseline=baseline,
n_itr=vv['n_itr'],
n_iter=vv['n_itr'],
batch_size_env_samples=vv['batch_size_env_samples'],
batch_size_dynamics_samples=vv['batch_size_dynamics_samples'],
meta_batch_size=vv['meta_batch_size'],
initial_random_samples=vv['initial_random_samples'],
num_maml_steps_per_iter=vv['num_maml_steps_per_iter'],
reset_from_env_traj=vv.get('reset_from_env_traj', False),
max_path_length_env=vv['path_length_env'],
max_path_length_dyn=vv.get('path_length_dyn', None),
discount=vv['discount'],
step_size=vv["meta_step_size"],
num_grad_updates=1,
retrain_model_when_reward_decreases=vv['retrain_model_when_reward_decreases'],
reset_policy_std=vv['reset_policy_std'],
reinit_model_cycle=vv['reinit_model_cycle'],
frac_gpu=vv.get('frac_gpu', 0.85),
clip_obs=vv.get('clip_obs', True)
)
algo.train()
sysout_log_file.close()
def run_experiment(vargs):
# ----------------------- TRAINING ---------------------------------------
kwargs = json.load(open(vargs[1], 'r'))
exp_id = random.sample(range(1, 1000), 1)[0]
v = kwargs['variant']
exp_name = "model_ensemble_maml_train_env_%s_%i_%i_%i_%i_id_%i" % (v['env'], v['path_length_env'], v['num_models'],
v['batch_size_env_samples'], v['seed'], exp_id)
v = instantiate_class_stings(v)
kwargs['variant'] = v
run_experiment_lite(
run_train_task,
exp_name=exp_name,
**kwargs
)
def instantiate_class_stings(v):
v['env'] = globals()[v['env']]
# optimizer
if v['optimizer_model'] == 'sgd':
v['optimizer_model'] = tf.train.GradientDescentOptimizer
elif v['optimizer_model'] == 'adam':
v['optimizer_model'] = tf.train.AdamOptimizer
elif v['optimizer_model'] == 'momentum':
v['optimizer_model'] = tf.train.MomentumOptimizer
# nonlinearlity
for nonlinearity_key in ['hidden_nonlinearity_policy', 'hidden_nonlinearity_model']:
if v[nonlinearity_key] == 'relu':
v[nonlinearity_key] = tf.nn.relu
elif v[nonlinearity_key] == 'tanh':
v[nonlinearity_key] = tf.tanh
elif v[nonlinearity_key] == 'elu':
v[nonlinearity_key] = tf.nn.elu
else:
raise NotImplementedError('Not able to recognize spicified hidden_nonlinearity: %s' % v['hidden_nonlinearity'])
return v
if __name__ == "__main__":
run_experiment(sys.argv) | 0.478041 | 0.212355 |
import datetime
import time
class EarthquakeUSGS:
"""
@brief Class that holds earthquake data records.
Class that hold earthquake data, for use with USGIS retrieved quake data.
BRIDGES uses scripts to
continually monitor USGIS site (tweets) and retrieve the latest
quake data for use in student projects.
This object is generally not created by the user, to see how its created check
out bridges::data_src_dependent::data_source::get_earthquake_usgs_data()
@sa For an example, check out https://bridgesuncc.github.io/tutorials/Data_EQ_USGS.html
@author <NAME>, <NAME>,
@date 2/18/18, 12/29/20, 1/6/21
"""
def __set_time_from_unix_timestamp(self, tm):
epoch_time = int(tm)
eq_time = epoch_time / 1000
eqt = time.gmtime(eq_time)
self._time = time.strftime("%Y-%m-%d %H:%M:%S", eqt)
def __init__(self, magnitude=None, longit=None, latit=None, location=None,
title=None, url=None, time=None):
"""
@brief constructor
Args:
magnitude: magnitude of quake
latit: latitude position
longit: longitude position
location: location of quake
title: title (has some of eq info in a string)
url: url for more information
time: occurrence time of quake
"""
self._time = int()
if magnitude is not None:
self._magnitude = magnitude
else:
self._magnitude = 0.0
if longit is not None:
self._longit = longit
else:
self._longit = 0.0
if latit is not None:
self._latit = latit
else:
self._latit = 0.0
if location is not None:
self._location = location
else:
self._location = ""
if title is not None:
self._title = title
else:
self._title = ""
if url is not None:
self._url = url
else:
self._url = ""
if time is not None:
self.time = time
@property
def time(self):
"""
@brief Get occurrence time (epoch) of quake
Returns:
Quake occurrence time
"""
return self._time
@time.setter
def time(self, tm) -> None:
"""
@brief Set occurrence time (epoch) of quake
Args:
tm: Quake occurrence time to set
"""
self.__set_time_from_unix_timestamp(tm)
@property
def latit(self) -> float:
"""
@brief Get latitude of quake
Returns:
Quake latitude
"""
return self._latit
@latit.setter
def latit(self, latit: float) -> None:
"""
@brief Set latitude of quake
Args:
latit: quake latitude to set
"""
self._latit = latit
@property
def longit(self) -> float:
"""
@brief Get longitude of quake
Returns:
Quake longitude
"""
return self._longit
@longit.setter
def longit(self, longit: float) -> None:
"""
@brief Set longitude of quake
Args:
longit: quake longitude to set
"""
self._longit = longit
@property
def location(self) -> str:
"""
@brief Get location of quake (typically a city or something of the sort)
Returns:
Quake location
"""
return self._location
@location.setter
def location(self, location: str):
"""
@brief Set location of quake
Args:
location: quake location to set
"""
self._location = location
@property
def title(self) -> str:
"""
@brief Get quake title
Returns:
Quake title
"""
return self._title
@title.setter
def title(self, title: str):
"""
@brief Set title of quake
Args:
title: quake title to set
"""
self._title = title
@property
def url(self) -> str:
"""
@brief Get quake url
Returns:
Quake url
"""
return self._url
@url.setter
def url(self, url: str):
"""
@brief Set url of quake
Args:
url: quake url to set
"""
self._url = url
@property
def magnitude(self) -> float:
"""
@brief Get quake magnitude (Richter scale)
Returns:
Quake magnitude
"""
return self._magnitude
@magnitude.setter
def magnitude(self, magn: float):
"""
Setter for the magnitude of the quake
Args:
magn: magnitude to set
"""
self._magnitude = magnitude | bridges/data_src_dependent/earthquake_usgs.py | import datetime
import time
class EarthquakeUSGS:
"""
@brief Class that holds earthquake data records.
Class that hold earthquake data, for use with USGIS retrieved quake data.
BRIDGES uses scripts to
continually monitor USGIS site (tweets) and retrieve the latest
quake data for use in student projects.
This object is generally not created by the user, to see how its created check
out bridges::data_src_dependent::data_source::get_earthquake_usgs_data()
@sa For an example, check out https://bridgesuncc.github.io/tutorials/Data_EQ_USGS.html
@author <NAME>, <NAME>,
@date 2/18/18, 12/29/20, 1/6/21
"""
def __set_time_from_unix_timestamp(self, tm):
epoch_time = int(tm)
eq_time = epoch_time / 1000
eqt = time.gmtime(eq_time)
self._time = time.strftime("%Y-%m-%d %H:%M:%S", eqt)
def __init__(self, magnitude=None, longit=None, latit=None, location=None,
title=None, url=None, time=None):
"""
@brief constructor
Args:
magnitude: magnitude of quake
latit: latitude position
longit: longitude position
location: location of quake
title: title (has some of eq info in a string)
url: url for more information
time: occurrence time of quake
"""
self._time = int()
if magnitude is not None:
self._magnitude = magnitude
else:
self._magnitude = 0.0
if longit is not None:
self._longit = longit
else:
self._longit = 0.0
if latit is not None:
self._latit = latit
else:
self._latit = 0.0
if location is not None:
self._location = location
else:
self._location = ""
if title is not None:
self._title = title
else:
self._title = ""
if url is not None:
self._url = url
else:
self._url = ""
if time is not None:
self.time = time
@property
def time(self):
"""
@brief Get occurrence time (epoch) of quake
Returns:
Quake occurrence time
"""
return self._time
@time.setter
def time(self, tm) -> None:
"""
@brief Set occurrence time (epoch) of quake
Args:
tm: Quake occurrence time to set
"""
self.__set_time_from_unix_timestamp(tm)
@property
def latit(self) -> float:
"""
@brief Get latitude of quake
Returns:
Quake latitude
"""
return self._latit
@latit.setter
def latit(self, latit: float) -> None:
"""
@brief Set latitude of quake
Args:
latit: quake latitude to set
"""
self._latit = latit
@property
def longit(self) -> float:
"""
@brief Get longitude of quake
Returns:
Quake longitude
"""
return self._longit
@longit.setter
def longit(self, longit: float) -> None:
"""
@brief Set longitude of quake
Args:
longit: quake longitude to set
"""
self._longit = longit
@property
def location(self) -> str:
"""
@brief Get location of quake (typically a city or something of the sort)
Returns:
Quake location
"""
return self._location
@location.setter
def location(self, location: str):
"""
@brief Set location of quake
Args:
location: quake location to set
"""
self._location = location
@property
def title(self) -> str:
"""
@brief Get quake title
Returns:
Quake title
"""
return self._title
@title.setter
def title(self, title: str):
"""
@brief Set title of quake
Args:
title: quake title to set
"""
self._title = title
@property
def url(self) -> str:
"""
@brief Get quake url
Returns:
Quake url
"""
return self._url
@url.setter
def url(self, url: str):
"""
@brief Set url of quake
Args:
url: quake url to set
"""
self._url = url
@property
def magnitude(self) -> float:
"""
@brief Get quake magnitude (Richter scale)
Returns:
Quake magnitude
"""
return self._magnitude
@magnitude.setter
def magnitude(self, magn: float):
"""
Setter for the magnitude of the quake
Args:
magn: magnitude to set
"""
self._magnitude = magnitude | 0.9024 | 0.476092 |
import yfinance as yf
import matplotlib.pyplot as plt
import collections
import pandas as pd
import numpy as np
import cvxpy as cp
import efficient_frontier
import param_estimator
import backtest
import objective_functions
def port_opt(stock_picks, weight_constraints, control, trade_horizon, cardinality, target_return, risk_aversion):
selected_etfs = ['IWD', 'IYH', 'IYW', 'MDY', 'EWT', 'XLE', 'EWZ', 'EWY', 'IWB', 'EZU']
if cardinality >= 20:
selected_etfs = ['IWD', 'IYH', 'IYW', 'MDY', 'EWT', 'XLE', 'EWZ', 'EWY', 'IWB', 'EZU']
num_stocks = len(stock_picks)
train_start, train_end = '2016-12-01', '2021-11-30'
etf_table = 'americanetfs'
etf_tickers = selected_etfs
etf_returns_by_tick = []
for tick in etf_tickers:
returns = param_estimator.get_returns(tick, etf_table, train_start, train_end, freq='monthly')
if returns.empty:
continue
returns[tick] = returns['adj_close']
etf_returns_by_tick += [returns[[tick]]]
etf_returns = pd.concat(etf_returns_by_tick, axis=1).T.dropna()
train_etf_returns = etf_returns.T
etf_table = 'spy'
print(stock_picks)
stock_returns_by_tick = []
for tick in stock_picks:
returns = param_estimator.get_returns(tick, etf_table, train_start, train_end, freq='monthly')
if returns.empty:
continue
returns[tick] = returns['adj_close']
stock_returns_by_tick += [returns[[tick]]]
stock_returns = pd.concat(stock_returns_by_tick, axis=1).T.dropna()
train_stock_returns = stock_returns.T
# Fama-French factors
train_factors = param_estimator.get_factors(start=int(train_start[0:4] + train_start[5:7]),
end=int(train_end[0:4] + train_end[5:7]), freq='monthly')
asset_universe = stock_picks + selected_etfs
train_returns = pd.concat([train_stock_returns, train_etf_returns], axis=1)
# historical average param. estimation
mu, sample_cov = backtest.historical_avg(train_returns, 12 * 5, 12)
print(sample_cov)
factor_model = dict()
for tick in asset_universe:
merged = pd.merge(train_factors, train_returns[[tick]], left_on='date', right_on='date', how="inner",
sort=False)
ff5 = merged[['excess', 'smb', 'hml', 'rmw', 'cma']]
merged[tick] = merged[tick] - merged['riskfree'].astype('float')/100.0
adj_returns = merged[[tick]]
alpha = 1e-1
l1_ratio = 0.05
en5, en_r_sq5 = param_estimator.EN(ff5, adj_returns[tick], alpha=alpha, l1_ratio=l1_ratio)
factor_model[tick] = en5
# arima-garch
ag = param_estimator.arima_garch(train_factors[['excess', 'smb', 'hml', 'rmw', 'cma']], trade_horizon=trade_horizon,
columns=['excess', 'smb', 'hml', 'rmw', 'cma'])
mu_factor = []
for month in range(trade_horizon):
mu_month = []
for tick in asset_universe:
data = [ag[factor_name][1][month-1] for factor_name in ['excess', 'smb', 'hml', 'rmw', 'cma']]
mu = factor_model[tick].predict(np.array(data).reshape(1, -1))
mu_month.append(mu[0])
mu_factor.append(mu_month)
print(mu_month)
mu_factor = [pd.Series(mu_factor[i]) for i in range(trade_horizon)]
print(mu_factor)
print(sample_cov)
ef = efficient_frontier.EfficientFrontier(mu_factor, sample_cov, trade_horizon=trade_horizon)
# ef.add_objective(objective_functions.transaction_cost, w_prev=np.zeros(len(asset_universe)), k=0.001)
for i in range(num_stocks):
min = np.zeros(shape=len(asset_universe))
max = np.ones(shape=len(asset_universe))
min[i] = weight_constraints[asset_universe[i]][0]/100.0
max[i] = weight_constraints[asset_universe[i]][1]/100.0
ef.add_constraint(lambda w: w >= min, broadcast=False, var_list=[0])
ef.add_constraint(lambda w: w <= max, broadcast=False, var_list=[0])
card = np.zeros(shape=len(asset_universe))
for i in range(num_stocks):
card[i] = 1
ef.add_constraint(lambda w: card @ w >= control, broadcast=False, var_list=[0])
print(ef.n_assets)
print(ef.trade_horizon)
print(ef.cov_matrices)
print(ef.expected_returns)
ef.efficient_return(target_return=target_return)
weights = ef.clean_weights()
print(weights)
new_weights = dict(weights)
proper_weights = {}
for key in new_weights.keys():
proper_weights[asset_universe[key]] = weights[key]
print(proper_weights)
weights = pd.DataFrame.from_dict(new_weights, orient='index')
exp_returns = {month: np.dot(mu_factor[month-1], weights) for month in range(trade_horizon)}
ret_exp = {}
for key in exp_returns.keys():
ret_exp[key+1] = (1 + exp_returns[key][0])
for key in ret_exp.keys():
if key != 1:
ret_exp[key] = ret_exp[key]*ret_exp[key-1]
return proper_weights, ret_exp | business_logic/portfolio_opt_front_end.py | import yfinance as yf
import matplotlib.pyplot as plt
import collections
import pandas as pd
import numpy as np
import cvxpy as cp
import efficient_frontier
import param_estimator
import backtest
import objective_functions
def port_opt(stock_picks, weight_constraints, control, trade_horizon, cardinality, target_return, risk_aversion):
selected_etfs = ['IWD', 'IYH', 'IYW', 'MDY', 'EWT', 'XLE', 'EWZ', 'EWY', 'IWB', 'EZU']
if cardinality >= 20:
selected_etfs = ['IWD', 'IYH', 'IYW', 'MDY', 'EWT', 'XLE', 'EWZ', 'EWY', 'IWB', 'EZU']
num_stocks = len(stock_picks)
train_start, train_end = '2016-12-01', '2021-11-30'
etf_table = 'americanetfs'
etf_tickers = selected_etfs
etf_returns_by_tick = []
for tick in etf_tickers:
returns = param_estimator.get_returns(tick, etf_table, train_start, train_end, freq='monthly')
if returns.empty:
continue
returns[tick] = returns['adj_close']
etf_returns_by_tick += [returns[[tick]]]
etf_returns = pd.concat(etf_returns_by_tick, axis=1).T.dropna()
train_etf_returns = etf_returns.T
etf_table = 'spy'
print(stock_picks)
stock_returns_by_tick = []
for tick in stock_picks:
returns = param_estimator.get_returns(tick, etf_table, train_start, train_end, freq='monthly')
if returns.empty:
continue
returns[tick] = returns['adj_close']
stock_returns_by_tick += [returns[[tick]]]
stock_returns = pd.concat(stock_returns_by_tick, axis=1).T.dropna()
train_stock_returns = stock_returns.T
# Fama-French factors
train_factors = param_estimator.get_factors(start=int(train_start[0:4] + train_start[5:7]),
end=int(train_end[0:4] + train_end[5:7]), freq='monthly')
asset_universe = stock_picks + selected_etfs
train_returns = pd.concat([train_stock_returns, train_etf_returns], axis=1)
# historical average param. estimation
mu, sample_cov = backtest.historical_avg(train_returns, 12 * 5, 12)
print(sample_cov)
factor_model = dict()
for tick in asset_universe:
merged = pd.merge(train_factors, train_returns[[tick]], left_on='date', right_on='date', how="inner",
sort=False)
ff5 = merged[['excess', 'smb', 'hml', 'rmw', 'cma']]
merged[tick] = merged[tick] - merged['riskfree'].astype('float')/100.0
adj_returns = merged[[tick]]
alpha = 1e-1
l1_ratio = 0.05
en5, en_r_sq5 = param_estimator.EN(ff5, adj_returns[tick], alpha=alpha, l1_ratio=l1_ratio)
factor_model[tick] = en5
# arima-garch
ag = param_estimator.arima_garch(train_factors[['excess', 'smb', 'hml', 'rmw', 'cma']], trade_horizon=trade_horizon,
columns=['excess', 'smb', 'hml', 'rmw', 'cma'])
mu_factor = []
for month in range(trade_horizon):
mu_month = []
for tick in asset_universe:
data = [ag[factor_name][1][month-1] for factor_name in ['excess', 'smb', 'hml', 'rmw', 'cma']]
mu = factor_model[tick].predict(np.array(data).reshape(1, -1))
mu_month.append(mu[0])
mu_factor.append(mu_month)
print(mu_month)
mu_factor = [pd.Series(mu_factor[i]) for i in range(trade_horizon)]
print(mu_factor)
print(sample_cov)
ef = efficient_frontier.EfficientFrontier(mu_factor, sample_cov, trade_horizon=trade_horizon)
# ef.add_objective(objective_functions.transaction_cost, w_prev=np.zeros(len(asset_universe)), k=0.001)
for i in range(num_stocks):
min = np.zeros(shape=len(asset_universe))
max = np.ones(shape=len(asset_universe))
min[i] = weight_constraints[asset_universe[i]][0]/100.0
max[i] = weight_constraints[asset_universe[i]][1]/100.0
ef.add_constraint(lambda w: w >= min, broadcast=False, var_list=[0])
ef.add_constraint(lambda w: w <= max, broadcast=False, var_list=[0])
card = np.zeros(shape=len(asset_universe))
for i in range(num_stocks):
card[i] = 1
ef.add_constraint(lambda w: card @ w >= control, broadcast=False, var_list=[0])
print(ef.n_assets)
print(ef.trade_horizon)
print(ef.cov_matrices)
print(ef.expected_returns)
ef.efficient_return(target_return=target_return)
weights = ef.clean_weights()
print(weights)
new_weights = dict(weights)
proper_weights = {}
for key in new_weights.keys():
proper_weights[asset_universe[key]] = weights[key]
print(proper_weights)
weights = pd.DataFrame.from_dict(new_weights, orient='index')
exp_returns = {month: np.dot(mu_factor[month-1], weights) for month in range(trade_horizon)}
ret_exp = {}
for key in exp_returns.keys():
ret_exp[key+1] = (1 + exp_returns[key][0])
for key in ret_exp.keys():
if key != 1:
ret_exp[key] = ret_exp[key]*ret_exp[key-1]
return proper_weights, ret_exp | 0.465387 | 0.425486 |
import os
import time
import queue
import demomgr.constants as CNST
from demomgr.filterlogic import process_filterstring, FILTERFLAGS
from demomgr.helpers import readdemoheader
from demomgr.threads.read_folder import ThreadReadFolder
from demomgr.threads._threadsig import THREADSIG
from demomgr.threads._base import _StoppableBaseThread
class ThreadFilter(_StoppableBaseThread):
"""
Thread to filter a directory of demos.
"""
REQUIRED_CFG_KEYS = ThreadReadFolder.REQUIRED_CFG_KEYS
def __init__(self, queue_out, filterstring, curdir, cfg, silent = False):
"""
Thread requires output queue and the following args:
filterstring <Str>: Raw user input from the entry field
curdir <Str>: Absolute path to current directory
cfg <Dict>: Program configuration, reduced to cls.REQUIRED_CFG_KEYS
silent <Bool>: If True, thread will not drop progress messages
"""
self.filterstring = filterstring
self.curdir = curdir
self.cfg = cfg
self.silent = silent
super().__init__(None, queue_out)
def run(self):
starttime = time.time()
self.queue_out_put(THREADSIG.INFO_STATUSBAR, ("Filtering demos; Parsing filter...", ))
try:
filters, flags = process_filterstring(self.filterstring)
except Exception as error:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR, (f"Error parsing filter request: {error}", 4000)
)
self.queue_out_put(THREADSIG.FAILURE); return
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED); return
if not self.silent:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR, ("Filtering demos; Reading information...", )
)
self.datafetcherqueue = queue.Queue()
self.datafetcherthread = ThreadReadFolder(
self.datafetcherqueue, targetdir = self.curdir, cfg = self.cfg
)
self.datafetcherthread.start()
# NOTE: Can't really wait for join to this thread here.
self.datafetcherthread.join(None, nostop = True)
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED); return
demo_data = None
while True:
try:
queueobj = self.datafetcherqueue.get_nowait()
if queueobj[0] == THREADSIG.RESULT_DEMODATA:
demo_data = queueobj[1]
elif queueobj[0] < 0x100: # Finish signal
if queueobj[0] == THREADSIG.FAILURE:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR,
("Demo fetching thread failed unexpectedly during filtering.", 4000)
)
self.queue_out_put(THREADSIG.FAILURE); return
break
except queue.Empty:
break
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED); return
filtered_demo_data = {
"col_filename": [], "col_ks": [], "col_bm": [], "col_ctime": [], "col_filesize": []
}
file_amnt = len(demo_data["col_filename"])
for i, j in enumerate(demo_data["col_filename"]): # Filter
if not self.silent:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR, (f"Filtering demos; {i+1} / {file_amnt}", )
)
curdataset = {
"name": j,
"killstreaks": () if demo_data["col_ks"][i] is None else demo_data["col_ks"][i],
"bookmarks": () if demo_data["col_bm"][i] is None else demo_data["col_bm"][i],
"header": None,
"filedata": {
"filesize": demo_data["col_filesize"][i],
"modtime": demo_data["col_ctime"][i],
},
}
if flags & FILTERFLAGS.HEADER:
try:
curdataset["header"] = readdemoheader(os.path.join(self.curdir, j))
except (FileNotFoundError, PermissionError, OSError):
break
if all(lambda_(curdataset) for lambda_ in filters):
filtered_demo_data["col_filename"].append(j)
filtered_demo_data["col_ks" ].append(demo_data["col_ks"][i])
filtered_demo_data["col_bm" ].append(demo_data["col_bm"][i])
filtered_demo_data["col_ctime" ].append(demo_data["col_ctime"][i])
filtered_demo_data["col_filesize"].append(demo_data["col_filesize"][i])
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED); return
self.queue_out_put(
THREADSIG.INFO_STATUSBAR,
(f"Filtered {file_amnt} demos in {round(time.time() - starttime, 3)} seconds.", 3000)
)
self.queue_out_put(THREADSIG.RESULT_DEMODATA, filtered_demo_data)
self.queue_out_put(THREADSIG.SUCCESS) | demomgr/threads/filter.py | import os
import time
import queue
import demomgr.constants as CNST
from demomgr.filterlogic import process_filterstring, FILTERFLAGS
from demomgr.helpers import readdemoheader
from demomgr.threads.read_folder import ThreadReadFolder
from demomgr.threads._threadsig import THREADSIG
from demomgr.threads._base import _StoppableBaseThread
class ThreadFilter(_StoppableBaseThread):
"""
Thread to filter a directory of demos.
"""
REQUIRED_CFG_KEYS = ThreadReadFolder.REQUIRED_CFG_KEYS
def __init__(self, queue_out, filterstring, curdir, cfg, silent = False):
"""
Thread requires output queue and the following args:
filterstring <Str>: Raw user input from the entry field
curdir <Str>: Absolute path to current directory
cfg <Dict>: Program configuration, reduced to cls.REQUIRED_CFG_KEYS
silent <Bool>: If True, thread will not drop progress messages
"""
self.filterstring = filterstring
self.curdir = curdir
self.cfg = cfg
self.silent = silent
super().__init__(None, queue_out)
def run(self):
starttime = time.time()
self.queue_out_put(THREADSIG.INFO_STATUSBAR, ("Filtering demos; Parsing filter...", ))
try:
filters, flags = process_filterstring(self.filterstring)
except Exception as error:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR, (f"Error parsing filter request: {error}", 4000)
)
self.queue_out_put(THREADSIG.FAILURE); return
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED); return
if not self.silent:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR, ("Filtering demos; Reading information...", )
)
self.datafetcherqueue = queue.Queue()
self.datafetcherthread = ThreadReadFolder(
self.datafetcherqueue, targetdir = self.curdir, cfg = self.cfg
)
self.datafetcherthread.start()
# NOTE: Can't really wait for join to this thread here.
self.datafetcherthread.join(None, nostop = True)
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED); return
demo_data = None
while True:
try:
queueobj = self.datafetcherqueue.get_nowait()
if queueobj[0] == THREADSIG.RESULT_DEMODATA:
demo_data = queueobj[1]
elif queueobj[0] < 0x100: # Finish signal
if queueobj[0] == THREADSIG.FAILURE:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR,
("Demo fetching thread failed unexpectedly during filtering.", 4000)
)
self.queue_out_put(THREADSIG.FAILURE); return
break
except queue.Empty:
break
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED); return
filtered_demo_data = {
"col_filename": [], "col_ks": [], "col_bm": [], "col_ctime": [], "col_filesize": []
}
file_amnt = len(demo_data["col_filename"])
for i, j in enumerate(demo_data["col_filename"]): # Filter
if not self.silent:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR, (f"Filtering demos; {i+1} / {file_amnt}", )
)
curdataset = {
"name": j,
"killstreaks": () if demo_data["col_ks"][i] is None else demo_data["col_ks"][i],
"bookmarks": () if demo_data["col_bm"][i] is None else demo_data["col_bm"][i],
"header": None,
"filedata": {
"filesize": demo_data["col_filesize"][i],
"modtime": demo_data["col_ctime"][i],
},
}
if flags & FILTERFLAGS.HEADER:
try:
curdataset["header"] = readdemoheader(os.path.join(self.curdir, j))
except (FileNotFoundError, PermissionError, OSError):
break
if all(lambda_(curdataset) for lambda_ in filters):
filtered_demo_data["col_filename"].append(j)
filtered_demo_data["col_ks" ].append(demo_data["col_ks"][i])
filtered_demo_data["col_bm" ].append(demo_data["col_bm"][i])
filtered_demo_data["col_ctime" ].append(demo_data["col_ctime"][i])
filtered_demo_data["col_filesize"].append(demo_data["col_filesize"][i])
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED); return
self.queue_out_put(
THREADSIG.INFO_STATUSBAR,
(f"Filtered {file_amnt} demos in {round(time.time() - starttime, 3)} seconds.", 3000)
)
self.queue_out_put(THREADSIG.RESULT_DEMODATA, filtered_demo_data)
self.queue_out_put(THREADSIG.SUCCESS) | 0.183594 | 0.10393 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import (is_scalar, is_list_like, is_bool)
from pandas.core.dtypes.common import is_integer
from pandas.core.indexing import IndexingError
import numpy as np
import ray
from warnings import warn
from .utils import (_get_nan_block_id, extractor, _mask_block_partitions,
writer, _blocks_to_series)
from .index_metadata import _IndexMetadata
from .dataframe import DataFrame
"""Indexing Helper Class works as follows:
_Location_Indexer_Base provide methods framework for __getitem__
and __setitem__ that work with Ray DataFrame's internal index. Base
class's __{get,set}item__ takes in partitions & idx_in_partition data
and perform lookup/item write.
_LocIndexer and _iLocIndexer is responsible for indexer specific logic and
lookup computation. Loc will take care of enlarge DataFrame. Both indexer
will take care of translating pandas's lookup to Ray DataFrame's internal
lookup.
An illustration is available at
https://github.com/ray-project/ray/pull/1955#issuecomment-386781826
"""
def is_slice(x):
return isinstance(x, slice)
def is_2d(x):
return is_list_like(x) or is_slice(x)
def is_tuple(x):
return isinstance(x, tuple)
def is_boolean_array(x):
return is_list_like(x) and all(map(is_bool, x))
def is_integer_slice(x):
if not is_slice(x):
return False
for pos in [x.start, x.stop, x.step]:
if not ((pos is None) or is_integer(pos)):
return False # one position is neither None nor int
return True
_ENLARGEMENT_WARNING = """
Passing list-likes to .loc or [] with any missing label will raise
KeyError in the future, you can use .reindex() as an alternative.
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike
"""
_ILOC_INT_ONLY_ERROR = """
Location based indexing can only have [integer, integer slice (START point is
INCLUDED, END point is EXCLUDED), listlike of integers, boolean array] types.
"""
def _parse_tuple(tup):
"""Unpack the user input for getitem and setitem and compute ndim
loc[a] -> ([a], :), 1D
loc[[a,b],] -> ([a,b], :),
loc[a,b] -> ([a], [b]), 0D
"""
row_loc, col_loc = slice(None), slice(None)
if is_tuple(tup):
row_loc = tup[0]
if len(tup) == 2:
col_loc = tup[1]
if len(tup) > 2:
raise IndexingError('Too many indexers')
else:
row_loc = tup
ndim = _compute_ndim(row_loc, col_loc)
row_loc = [row_loc] if is_scalar(row_loc) else row_loc
col_loc = [col_loc] if is_scalar(col_loc) else col_loc
return row_loc, col_loc, ndim
def _is_enlargement(locator, coord_df):
"""Determine if a locator will enlarge the corrd_df.
Enlargement happens when you trying to locate using labels isn't in the
original index. In other words, enlargement == adding NaNs !
"""
if is_list_like(locator) and not is_slice(
locator) and len(locator) > 0 and not is_boolean_array(locator):
n_diff_elems = len(pandas.Index(locator).difference(coord_df.index))
is_enlargement_boolean = n_diff_elems > 0
return is_enlargement_boolean
return False
def _warn_enlargement():
warn(FutureWarning(_ENLARGEMENT_WARNING))
def _compute_ndim(row_loc, col_loc):
"""Compute the ndim of result from locators
"""
row_scaler = is_scalar(row_loc)
col_scaler = is_scalar(col_loc)
if row_scaler and col_scaler:
ndim = 0
elif row_scaler ^ col_scaler:
ndim = 1
else:
ndim = 2
return ndim
class _Location_Indexer_Base(object):
"""Base class for location indexer like loc and iloc
"""
def __init__(self, ray_df):
self.df = ray_df
self.col_coord_df = ray_df._col_metadata._coord_df
self.row_coord_df = ray_df._row_metadata._coord_df
self.block_oids = ray_df._block_partitions
self.is_view = False
if isinstance(ray_df, DataFrameView):
self.block_oids = ray_df._block_partitions_data
self.is_view = True
def __getitem__(self, row_lookup, col_lookup, ndim):
"""
Args:
row_lookup: A pandas DataFrame, a partial view from row_coord_df
col_lookup: A pandas DataFrame, a partial view from col_coord_df
ndim: the dimension of returned data
"""
if ndim == 2:
return self._generate_view(row_lookup, col_lookup)
extracted = self._retrive_items(row_lookup, col_lookup)
if ndim == 1:
result = ray.get(_blocks_to_series.remote(*extracted)).squeeze()
if is_scalar(result):
result = pandas.Series(result)
scaler_axis = row_lookup if len(row_lookup) == 1 else col_lookup
series_name = scaler_axis.iloc[0].name
result.name = series_name
index_axis = row_lookup if len(col_lookup) == 1 else col_lookup
result.index = index_axis.index
if ndim == 0:
result = ray.get(extracted[0]).squeeze()
return result
def _retrive_items(self, row_lookup, col_lookup):
"""Given lookup dataframes, return a list of result oids
"""
result_oids = []
# We have to copy before we groupby because
# https://github.com/pandas-dev/pandas/issues/10043
row_groups = row_lookup.copy().groupby('partition')
col_groups = col_lookup.copy().groupby('partition')
for row_blk, row_data in row_groups:
for col_blk, col_data in col_groups:
block_oid = self.block_oids[row_blk, col_blk]
row_idx = row_data['index_within_partition']
col_idx = col_data['index_within_partition']
result_oid = extractor.remote(block_oid, row_idx, col_idx)
result_oids.append(result_oid)
return result_oids
def _generate_view(self, row_lookup, col_lookup):
"""Generate a DataFrameView from lookup
"""
row_lengths = [0] * len(self.df._row_metadata._lengths)
for i in row_lookup["partition"]:
row_lengths[i] += 1
col_lengths = [0] * len(self.df._col_metadata._lengths)
for i in col_lookup["partition"]:
col_lengths[i] += 1
row_metadata_view = _IndexMetadata(
coord_df_oid=row_lookup, lengths_oid=row_lengths)
col_metadata_view = _IndexMetadata(
coord_df_oid=col_lookup, lengths_oid=col_lengths)
df_view = DataFrameView(
block_partitions=self.block_oids,
row_metadata=row_metadata_view,
col_metadata=col_metadata_view,
index=row_metadata_view.index,
columns=col_metadata_view.index)
return df_view
def __setitem__(self, row_lookup, col_lookup, item):
"""
Args:
row_lookup: A pandas DataFrame, a partial view from row_coord_df
col_lookup: A pandas DataFrame, a partial view from col_coord_df
item: The new item needs to be set. It can be any shape that's
broadcastable to the product of the lookup tables.
"""
to_shape = (len(row_lookup), len(col_lookup))
item = self._broadcast_item(item, to_shape)
self._write_items(row_lookup, col_lookup, item)
def _broadcast_item(self, item, to_shape):
"""Use numpy to broadcast or reshape item.
Notes:
- Numpy is memory efficent, there shouldn't be performance issue.
"""
try:
item = np.array(item)
if np.prod(to_shape) == np.prod(item.shape):
return item.reshape(to_shape)
else:
return np.broadcast_to(item, to_shape)
except ValueError:
from_shape = np.array(item).shape
raise ValueError("could not broadcast input array from \
shape {from_shape} into shape {to_shape}".format(
from_shape=from_shape, to_shape=to_shape))
def _write_items(self, row_lookup, col_lookup, item):
"""Perform remote write and replace blocks.
"""
# We have to copy before we groupby because
# https://github.com/pandas-dev/pandas/issues/10043
row_groups = row_lookup.copy().groupby('partition')
col_groups = col_lookup.copy().groupby('partition')
row_item_index = 0
for row_blk, row_data in row_groups:
row_len = len(row_data)
col_item_index = 0
for col_blk, col_data in col_groups:
col_len = len(col_data)
block_oid = self.block_oids[row_blk, col_blk]
row_idx = row_data['index_within_partition']
col_idx = col_data['index_within_partition']
item_to_write = item[row_item_index:row_item_index + row_len,
col_item_index:col_item_index + col_len]
result_oid = writer.remote(block_oid, row_idx, col_idx,
item_to_write)
if self.is_view:
self.df._block_partitions_data[row_blk,
col_blk] = result_oid
else:
self.df._block_partitions[row_blk, col_blk] = result_oid
col_item_index += col_len
row_item_index += row_len
class _Loc_Indexer(_Location_Indexer_Base):
"""A indexer for ray_df.loc[] functionality"""
def __getitem__(self, key):
row_loc, col_loc, ndim = _parse_tuple(key)
self._handle_enlargement(row_loc, col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
ndim = self._expand_dim(row_lookup, col_lookup, ndim)
result = super(_Loc_Indexer, self).__getitem__(row_lookup, col_lookup,
ndim)
return result
def __setitem__(self, key, item):
row_loc, col_loc, _ = _parse_tuple(key)
self._handle_enlargement(row_loc, col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
super(_Loc_Indexer, self).__setitem__(row_lookup, col_lookup, item)
def _handle_enlargement(self, row_loc, col_loc):
"""Handle Enlargement (if there is one).
Returns:
None
"""
locators = [row_loc, col_loc]
coord_dfs = [self.row_coord_df, self.col_coord_df]
axis = ['row', 'col']
metadata = {'row': self.df._row_metadata, 'col': self.df._col_metadata}
for loc, coord, axis in zip(locators, coord_dfs, axis):
if _is_enlargement(loc, coord):
new_meta = self._enlarge_axis(loc, axis=axis)
_warn_enlargement()
metadata[axis] = new_meta
self.row_coord_df = metadata['row']._coord_df
self.col_coord_df = metadata['col']._coord_df
def _enlarge_axis(self, locator, axis):
"""Add rows/columns to block partitions according to locator.
Returns:
metadata (_IndexMetadata)
"""
# 1. Prepare variables
row_based_bool = axis == 'row'
# major == the axis of the locator
major_meta = self.df._row_metadata if row_based_bool \
else self.df._col_metadata
minor_meta = self.df._col_metadata if row_based_bool \
else self.df._row_metadata
# 2. Compute the nan labels and add blocks
nan_labels = self._compute_enlarge_labels(locator, major_meta.index)
num_nan_labels = len(nan_labels)
blk_part_n_row, blk_part_n_col = self.block_oids.shape
nan_blk_lens = minor_meta._lengths
nan_blks = np.array([[
_get_nan_block_id(
num_nan_labels, n_cols, transpose=not row_based_bool)
for n_cols in nan_blk_lens
]])
nan_blks = nan_blks.T if not row_based_bool else nan_blks
self.block_oids = np.concatenate(
[self.block_oids, nan_blks], axis=0 if row_based_bool else 1)
# 3. Prepare metadata to return
nan_coord_df = pandas.DataFrame(data=[
{
'': name,
'partition': blk_part_n_row
if row_based_bool else blk_part_n_col,
'index_within_partition': i
} for name, i in zip(nan_labels, np.arange(num_nan_labels))
]).set_index('')
coord_df = pandas.concat([major_meta._coord_df, nan_coord_df])
coord_df = coord_df.loc[locator] # Re-index that allows duplicates
lens = major_meta._lengths
lens = np.concatenate([lens, np.array([num_nan_labels])])
metadata_view = _IndexMetadata(coord_df_oid=coord_df, lengths_oid=lens)
return metadata_view
def _compute_enlarge_labels(self, locator, base_index):
"""Helper for _enlarge_axis, compute common labels and extra labels.
Returns:
nan_labels: The labels needs to be added
"""
# base_index_type can be pd.Index or pd.DatetimeIndex
# depending on user input and pandas behavior
# See issue #2264
base_index_type = type(base_index)
locator_as_index = base_index_type(locator)
nan_labels = locator_as_index.difference(base_index)
common_labels = locator_as_index.intersection(base_index)
if len(common_labels) == 0:
raise KeyError(
'None of [{labels}] are in the [{base_index_name}]'.format(
labels=list(locator_as_index), base_index_name=base_index))
return nan_labels
def _expand_dim(self, row_lookup, col_lookup, ndim):
"""Expand the dimension if necessary.
This method is for cases like duplicate labels.
"""
many_rows = len(row_lookup) > 1
many_cols = len(col_lookup) > 1
if ndim == 0 and (many_rows or many_cols):
ndim = 1
if ndim == 1 and (many_rows and many_cols):
ndim = 2
return ndim
def _compute_lookup(self, row_loc, col_loc):
# We use reindex for list to avoid duplicates.
row_lookup = self.row_coord_df.loc[row_loc]
col_lookup = self.col_coord_df.loc[col_loc]
return row_lookup, col_lookup
class _iLoc_Indexer(_Location_Indexer_Base):
"""A indexer for ray_df.iloc[] functionality"""
def __getitem__(self, key):
row_loc, col_loc, ndim = _parse_tuple(key)
self._check_dtypes(row_loc)
self._check_dtypes(col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
result = super(_iLoc_Indexer, self).__getitem__(
row_lookup, col_lookup, ndim)
return result
def __setitem__(self, key, item):
row_loc, col_loc, _ = _parse_tuple(key)
self._check_dtypes(row_loc)
self._check_dtypes(col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
super(_iLoc_Indexer, self).__setitem__(row_lookup, col_lookup, item)
def _compute_lookup(self, row_loc, col_loc):
# We use reindex for list to avoid duplicates.
return self.row_coord_df.iloc[row_loc], self.col_coord_df.iloc[col_loc]
def _check_dtypes(self, locator):
is_int = is_integer(locator)
is_int_slice = is_integer_slice(locator)
is_int_list = is_list_like(locator) and all(map(is_integer, locator))
is_bool_arr = is_boolean_array(locator)
if not any([is_int, is_int_slice, is_int_list, is_bool_arr]):
raise ValueError(_ILOC_INT_ONLY_ERROR)
class DataFrameView(DataFrame):
"""A subclass of DataFrame where the index can be smaller than blocks.
"""
def __init__(self, block_partitions, row_metadata, col_metadata, index,
columns):
self._block_partitions = block_partitions
self._row_metadata = row_metadata
self._col_metadata = col_metadata
self.index = index
self.columns = columns
def _get_block_partitions(self):
oid_arr = _mask_block_partitions(self._block_partitions_data,
self._row_metadata,
self._col_metadata)
return oid_arr
def _set_block_partitions(self, new_block_partitions):
self._block_partitions_data = new_block_partitions
_block_partitions = property(_get_block_partitions, _set_block_partitions) | modin/pandas/indexing.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import (is_scalar, is_list_like, is_bool)
from pandas.core.dtypes.common import is_integer
from pandas.core.indexing import IndexingError
import numpy as np
import ray
from warnings import warn
from .utils import (_get_nan_block_id, extractor, _mask_block_partitions,
writer, _blocks_to_series)
from .index_metadata import _IndexMetadata
from .dataframe import DataFrame
"""Indexing Helper Class works as follows:
_Location_Indexer_Base provide methods framework for __getitem__
and __setitem__ that work with Ray DataFrame's internal index. Base
class's __{get,set}item__ takes in partitions & idx_in_partition data
and perform lookup/item write.
_LocIndexer and _iLocIndexer is responsible for indexer specific logic and
lookup computation. Loc will take care of enlarge DataFrame. Both indexer
will take care of translating pandas's lookup to Ray DataFrame's internal
lookup.
An illustration is available at
https://github.com/ray-project/ray/pull/1955#issuecomment-386781826
"""
def is_slice(x):
return isinstance(x, slice)
def is_2d(x):
return is_list_like(x) or is_slice(x)
def is_tuple(x):
return isinstance(x, tuple)
def is_boolean_array(x):
return is_list_like(x) and all(map(is_bool, x))
def is_integer_slice(x):
if not is_slice(x):
return False
for pos in [x.start, x.stop, x.step]:
if not ((pos is None) or is_integer(pos)):
return False # one position is neither None nor int
return True
_ENLARGEMENT_WARNING = """
Passing list-likes to .loc or [] with any missing label will raise
KeyError in the future, you can use .reindex() as an alternative.
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike
"""
_ILOC_INT_ONLY_ERROR = """
Location based indexing can only have [integer, integer slice (START point is
INCLUDED, END point is EXCLUDED), listlike of integers, boolean array] types.
"""
def _parse_tuple(tup):
"""Unpack the user input for getitem and setitem and compute ndim
loc[a] -> ([a], :), 1D
loc[[a,b],] -> ([a,b], :),
loc[a,b] -> ([a], [b]), 0D
"""
row_loc, col_loc = slice(None), slice(None)
if is_tuple(tup):
row_loc = tup[0]
if len(tup) == 2:
col_loc = tup[1]
if len(tup) > 2:
raise IndexingError('Too many indexers')
else:
row_loc = tup
ndim = _compute_ndim(row_loc, col_loc)
row_loc = [row_loc] if is_scalar(row_loc) else row_loc
col_loc = [col_loc] if is_scalar(col_loc) else col_loc
return row_loc, col_loc, ndim
def _is_enlargement(locator, coord_df):
"""Determine if a locator will enlarge the corrd_df.
Enlargement happens when you trying to locate using labels isn't in the
original index. In other words, enlargement == adding NaNs !
"""
if is_list_like(locator) and not is_slice(
locator) and len(locator) > 0 and not is_boolean_array(locator):
n_diff_elems = len(pandas.Index(locator).difference(coord_df.index))
is_enlargement_boolean = n_diff_elems > 0
return is_enlargement_boolean
return False
def _warn_enlargement():
warn(FutureWarning(_ENLARGEMENT_WARNING))
def _compute_ndim(row_loc, col_loc):
"""Compute the ndim of result from locators
"""
row_scaler = is_scalar(row_loc)
col_scaler = is_scalar(col_loc)
if row_scaler and col_scaler:
ndim = 0
elif row_scaler ^ col_scaler:
ndim = 1
else:
ndim = 2
return ndim
class _Location_Indexer_Base(object):
"""Base class for location indexer like loc and iloc
"""
def __init__(self, ray_df):
self.df = ray_df
self.col_coord_df = ray_df._col_metadata._coord_df
self.row_coord_df = ray_df._row_metadata._coord_df
self.block_oids = ray_df._block_partitions
self.is_view = False
if isinstance(ray_df, DataFrameView):
self.block_oids = ray_df._block_partitions_data
self.is_view = True
def __getitem__(self, row_lookup, col_lookup, ndim):
"""
Args:
row_lookup: A pandas DataFrame, a partial view from row_coord_df
col_lookup: A pandas DataFrame, a partial view from col_coord_df
ndim: the dimension of returned data
"""
if ndim == 2:
return self._generate_view(row_lookup, col_lookup)
extracted = self._retrive_items(row_lookup, col_lookup)
if ndim == 1:
result = ray.get(_blocks_to_series.remote(*extracted)).squeeze()
if is_scalar(result):
result = pandas.Series(result)
scaler_axis = row_lookup if len(row_lookup) == 1 else col_lookup
series_name = scaler_axis.iloc[0].name
result.name = series_name
index_axis = row_lookup if len(col_lookup) == 1 else col_lookup
result.index = index_axis.index
if ndim == 0:
result = ray.get(extracted[0]).squeeze()
return result
def _retrive_items(self, row_lookup, col_lookup):
"""Given lookup dataframes, return a list of result oids
"""
result_oids = []
# We have to copy before we groupby because
# https://github.com/pandas-dev/pandas/issues/10043
row_groups = row_lookup.copy().groupby('partition')
col_groups = col_lookup.copy().groupby('partition')
for row_blk, row_data in row_groups:
for col_blk, col_data in col_groups:
block_oid = self.block_oids[row_blk, col_blk]
row_idx = row_data['index_within_partition']
col_idx = col_data['index_within_partition']
result_oid = extractor.remote(block_oid, row_idx, col_idx)
result_oids.append(result_oid)
return result_oids
def _generate_view(self, row_lookup, col_lookup):
"""Generate a DataFrameView from lookup
"""
row_lengths = [0] * len(self.df._row_metadata._lengths)
for i in row_lookup["partition"]:
row_lengths[i] += 1
col_lengths = [0] * len(self.df._col_metadata._lengths)
for i in col_lookup["partition"]:
col_lengths[i] += 1
row_metadata_view = _IndexMetadata(
coord_df_oid=row_lookup, lengths_oid=row_lengths)
col_metadata_view = _IndexMetadata(
coord_df_oid=col_lookup, lengths_oid=col_lengths)
df_view = DataFrameView(
block_partitions=self.block_oids,
row_metadata=row_metadata_view,
col_metadata=col_metadata_view,
index=row_metadata_view.index,
columns=col_metadata_view.index)
return df_view
def __setitem__(self, row_lookup, col_lookup, item):
"""
Args:
row_lookup: A pandas DataFrame, a partial view from row_coord_df
col_lookup: A pandas DataFrame, a partial view from col_coord_df
item: The new item needs to be set. It can be any shape that's
broadcastable to the product of the lookup tables.
"""
to_shape = (len(row_lookup), len(col_lookup))
item = self._broadcast_item(item, to_shape)
self._write_items(row_lookup, col_lookup, item)
def _broadcast_item(self, item, to_shape):
"""Use numpy to broadcast or reshape item.
Notes:
- Numpy is memory efficent, there shouldn't be performance issue.
"""
try:
item = np.array(item)
if np.prod(to_shape) == np.prod(item.shape):
return item.reshape(to_shape)
else:
return np.broadcast_to(item, to_shape)
except ValueError:
from_shape = np.array(item).shape
raise ValueError("could not broadcast input array from \
shape {from_shape} into shape {to_shape}".format(
from_shape=from_shape, to_shape=to_shape))
def _write_items(self, row_lookup, col_lookup, item):
"""Perform remote write and replace blocks.
"""
# We have to copy before we groupby because
# https://github.com/pandas-dev/pandas/issues/10043
row_groups = row_lookup.copy().groupby('partition')
col_groups = col_lookup.copy().groupby('partition')
row_item_index = 0
for row_blk, row_data in row_groups:
row_len = len(row_data)
col_item_index = 0
for col_blk, col_data in col_groups:
col_len = len(col_data)
block_oid = self.block_oids[row_blk, col_blk]
row_idx = row_data['index_within_partition']
col_idx = col_data['index_within_partition']
item_to_write = item[row_item_index:row_item_index + row_len,
col_item_index:col_item_index + col_len]
result_oid = writer.remote(block_oid, row_idx, col_idx,
item_to_write)
if self.is_view:
self.df._block_partitions_data[row_blk,
col_blk] = result_oid
else:
self.df._block_partitions[row_blk, col_blk] = result_oid
col_item_index += col_len
row_item_index += row_len
class _Loc_Indexer(_Location_Indexer_Base):
"""A indexer for ray_df.loc[] functionality"""
def __getitem__(self, key):
row_loc, col_loc, ndim = _parse_tuple(key)
self._handle_enlargement(row_loc, col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
ndim = self._expand_dim(row_lookup, col_lookup, ndim)
result = super(_Loc_Indexer, self).__getitem__(row_lookup, col_lookup,
ndim)
return result
def __setitem__(self, key, item):
row_loc, col_loc, _ = _parse_tuple(key)
self._handle_enlargement(row_loc, col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
super(_Loc_Indexer, self).__setitem__(row_lookup, col_lookup, item)
def _handle_enlargement(self, row_loc, col_loc):
"""Handle Enlargement (if there is one).
Returns:
None
"""
locators = [row_loc, col_loc]
coord_dfs = [self.row_coord_df, self.col_coord_df]
axis = ['row', 'col']
metadata = {'row': self.df._row_metadata, 'col': self.df._col_metadata}
for loc, coord, axis in zip(locators, coord_dfs, axis):
if _is_enlargement(loc, coord):
new_meta = self._enlarge_axis(loc, axis=axis)
_warn_enlargement()
metadata[axis] = new_meta
self.row_coord_df = metadata['row']._coord_df
self.col_coord_df = metadata['col']._coord_df
def _enlarge_axis(self, locator, axis):
"""Add rows/columns to block partitions according to locator.
Returns:
metadata (_IndexMetadata)
"""
# 1. Prepare variables
row_based_bool = axis == 'row'
# major == the axis of the locator
major_meta = self.df._row_metadata if row_based_bool \
else self.df._col_metadata
minor_meta = self.df._col_metadata if row_based_bool \
else self.df._row_metadata
# 2. Compute the nan labels and add blocks
nan_labels = self._compute_enlarge_labels(locator, major_meta.index)
num_nan_labels = len(nan_labels)
blk_part_n_row, blk_part_n_col = self.block_oids.shape
nan_blk_lens = minor_meta._lengths
nan_blks = np.array([[
_get_nan_block_id(
num_nan_labels, n_cols, transpose=not row_based_bool)
for n_cols in nan_blk_lens
]])
nan_blks = nan_blks.T if not row_based_bool else nan_blks
self.block_oids = np.concatenate(
[self.block_oids, nan_blks], axis=0 if row_based_bool else 1)
# 3. Prepare metadata to return
nan_coord_df = pandas.DataFrame(data=[
{
'': name,
'partition': blk_part_n_row
if row_based_bool else blk_part_n_col,
'index_within_partition': i
} for name, i in zip(nan_labels, np.arange(num_nan_labels))
]).set_index('')
coord_df = pandas.concat([major_meta._coord_df, nan_coord_df])
coord_df = coord_df.loc[locator] # Re-index that allows duplicates
lens = major_meta._lengths
lens = np.concatenate([lens, np.array([num_nan_labels])])
metadata_view = _IndexMetadata(coord_df_oid=coord_df, lengths_oid=lens)
return metadata_view
def _compute_enlarge_labels(self, locator, base_index):
"""Helper for _enlarge_axis, compute common labels and extra labels.
Returns:
nan_labels: The labels needs to be added
"""
# base_index_type can be pd.Index or pd.DatetimeIndex
# depending on user input and pandas behavior
# See issue #2264
base_index_type = type(base_index)
locator_as_index = base_index_type(locator)
nan_labels = locator_as_index.difference(base_index)
common_labels = locator_as_index.intersection(base_index)
if len(common_labels) == 0:
raise KeyError(
'None of [{labels}] are in the [{base_index_name}]'.format(
labels=list(locator_as_index), base_index_name=base_index))
return nan_labels
def _expand_dim(self, row_lookup, col_lookup, ndim):
"""Expand the dimension if necessary.
This method is for cases like duplicate labels.
"""
many_rows = len(row_lookup) > 1
many_cols = len(col_lookup) > 1
if ndim == 0 and (many_rows or many_cols):
ndim = 1
if ndim == 1 and (many_rows and many_cols):
ndim = 2
return ndim
def _compute_lookup(self, row_loc, col_loc):
# We use reindex for list to avoid duplicates.
row_lookup = self.row_coord_df.loc[row_loc]
col_lookup = self.col_coord_df.loc[col_loc]
return row_lookup, col_lookup
class _iLoc_Indexer(_Location_Indexer_Base):
"""A indexer for ray_df.iloc[] functionality"""
def __getitem__(self, key):
row_loc, col_loc, ndim = _parse_tuple(key)
self._check_dtypes(row_loc)
self._check_dtypes(col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
result = super(_iLoc_Indexer, self).__getitem__(
row_lookup, col_lookup, ndim)
return result
def __setitem__(self, key, item):
row_loc, col_loc, _ = _parse_tuple(key)
self._check_dtypes(row_loc)
self._check_dtypes(col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
super(_iLoc_Indexer, self).__setitem__(row_lookup, col_lookup, item)
def _compute_lookup(self, row_loc, col_loc):
# We use reindex for list to avoid duplicates.
return self.row_coord_df.iloc[row_loc], self.col_coord_df.iloc[col_loc]
def _check_dtypes(self, locator):
is_int = is_integer(locator)
is_int_slice = is_integer_slice(locator)
is_int_list = is_list_like(locator) and all(map(is_integer, locator))
is_bool_arr = is_boolean_array(locator)
if not any([is_int, is_int_slice, is_int_list, is_bool_arr]):
raise ValueError(_ILOC_INT_ONLY_ERROR)
class DataFrameView(DataFrame):
"""A subclass of DataFrame where the index can be smaller than blocks.
"""
def __init__(self, block_partitions, row_metadata, col_metadata, index,
columns):
self._block_partitions = block_partitions
self._row_metadata = row_metadata
self._col_metadata = col_metadata
self.index = index
self.columns = columns
def _get_block_partitions(self):
oid_arr = _mask_block_partitions(self._block_partitions_data,
self._row_metadata,
self._col_metadata)
return oid_arr
def _set_block_partitions(self, new_block_partitions):
self._block_partitions_data = new_block_partitions
_block_partitions = property(_get_block_partitions, _set_block_partitions) | 0.744378 | 0.319871 |
import urlparse
import logging
from django.views.generic import FormView, TemplateView
from django.contrib import auth
from django.contrib.auth import REDIRECT_FIELD_NAME, login
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect, HttpResponseGone
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.generic.base import RedirectView
from django.conf import settings
from .forms import SurmandlAuthForm
logger = logging.getLogger(__name__)
class LoginView(FormView):
"""View to handle our login process."""
form_class = SurmandlAuthForm
redirect_field_name = REDIRECT_FIELD_NAME
template_name = "login.html"
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(LoginView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
if self.success_url:
redirect_to = self.success_url
else:
redirect_to = self.request.REQUEST.get(self.redirect_field_name, '')
netloc = urlparse.urlparse(redirect_to)[1]
if not redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
# Security check -- don't allow redirection to a different host.
elif netloc and netloc != self.request.get_host():
redirect_to = settings.LOGIN_REDIRECT_URL
return redirect_to
class HomePageView(TemplateView):
"""Basic view for the homepage"""
template_name = "home.html"
@method_decorator(csrf_protect)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(HomePageView, self).dispatch(request, *args, **kwargs)
class LogOutView(RedirectView):
"""And here is the logout view, taking the user back to the login page. """
url = settings.LOGIN_URL
permanent = False
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(**kwargs)
if self.request.user.is_authenticated:
auth.logout(self.request)
if url:
if self.permanent:
return HttpResponsePermanentRedirect(url)
else:
return HttpResponseRedirect(url)
else:
logger.warning('Gone: %s', self.request.path,
extra={
'status_code': 410,
'request': self.request
})
return HttpResponseGone() | teamsurmandl/views.py | import urlparse
import logging
from django.views.generic import FormView, TemplateView
from django.contrib import auth
from django.contrib.auth import REDIRECT_FIELD_NAME, login
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect, HttpResponseGone
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.generic.base import RedirectView
from django.conf import settings
from .forms import SurmandlAuthForm
logger = logging.getLogger(__name__)
class LoginView(FormView):
"""View to handle our login process."""
form_class = SurmandlAuthForm
redirect_field_name = REDIRECT_FIELD_NAME
template_name = "login.html"
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(LoginView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
if self.success_url:
redirect_to = self.success_url
else:
redirect_to = self.request.REQUEST.get(self.redirect_field_name, '')
netloc = urlparse.urlparse(redirect_to)[1]
if not redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
# Security check -- don't allow redirection to a different host.
elif netloc and netloc != self.request.get_host():
redirect_to = settings.LOGIN_REDIRECT_URL
return redirect_to
class HomePageView(TemplateView):
"""Basic view for the homepage"""
template_name = "home.html"
@method_decorator(csrf_protect)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(HomePageView, self).dispatch(request, *args, **kwargs)
class LogOutView(RedirectView):
"""And here is the logout view, taking the user back to the login page. """
url = settings.LOGIN_URL
permanent = False
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(**kwargs)
if self.request.user.is_authenticated:
auth.logout(self.request)
if url:
if self.permanent:
return HttpResponsePermanentRedirect(url)
else:
return HttpResponseRedirect(url)
else:
logger.warning('Gone: %s', self.request.path,
extra={
'status_code': 410,
'request': self.request
})
return HttpResponseGone() | 0.493653 | 0.05151 |
import json
import os
import ipaddress
from platform import system
def clear():
if system() == "Windows":
os.system("cls")
else:
os.system("clear")
def get_settings():
with open("data/settings.json", "r", encoding="utf-8") as file:
return json.load(file)
def get_last_session():
with open("data/tmp/last_session.json", "r", encoding="utf-8") as file:
return json.load(file)
def save_file(new_thinks, file_path):
try:
os.remove(file_path)
except:
pass
with open(file_path, "w") as file:
file.write(new_thinks)
def get_lang(settings, state=0):
if state == 1 or settings["language"] is None:
lang_list = ["EN", "IT", "ES", "NL"]
while True:
clear()
x = int(input('''
1) English
2) Italiano
3) Español
4) Nederlands
Select your language: '''))
if 1 <= x <= len(lang_list):
settings["language"] = lang_list[x - 1]
save_file(json.dumps(settings), "data/settings.json")
break
with open("data/lang/lang_" + settings["language"] + ".json", "r", encoding="utf-8") as file:
return json.load(file)
def range_calculator(number_of_client, raw_range):
first_ip = raw_range.split("-")[0]
last_ip = raw_range.split("-")[1]
first_ip_int = int(ipaddress.IPv4Address(first_ip))
last_ip_int = int(ipaddress.IPv4Address(last_ip))
difference = last_ip_int - first_ip_int
diff_between_ip = int(difference / number_of_client)
diff_between_ip_copy = diff_between_ip
divided_range = [(str(first_ip) + "-" + str(ipaddress.IPv4Address(diff_between_ip + first_ip_int)))]
for x in range(number_of_client - 1):
range_string = str(ipaddress.IPv4Address(diff_between_ip + first_ip_int + 1)) + "-"
diff_between_ip += diff_between_ip_copy
if x == number_of_client - 2:
range_string += last_ip
else:
range_string += str(ipaddress.IPv4Address(diff_between_ip + first_ip_int))
divided_range.append(range_string)
return divided_range | Dscanner/data/functions.py | import json
import os
import ipaddress
from platform import system
def clear():
if system() == "Windows":
os.system("cls")
else:
os.system("clear")
def get_settings():
with open("data/settings.json", "r", encoding="utf-8") as file:
return json.load(file)
def get_last_session():
with open("data/tmp/last_session.json", "r", encoding="utf-8") as file:
return json.load(file)
def save_file(new_thinks, file_path):
try:
os.remove(file_path)
except:
pass
with open(file_path, "w") as file:
file.write(new_thinks)
def get_lang(settings, state=0):
if state == 1 or settings["language"] is None:
lang_list = ["EN", "IT", "ES", "NL"]
while True:
clear()
x = int(input('''
1) English
2) Italiano
3) Español
4) Nederlands
Select your language: '''))
if 1 <= x <= len(lang_list):
settings["language"] = lang_list[x - 1]
save_file(json.dumps(settings), "data/settings.json")
break
with open("data/lang/lang_" + settings["language"] + ".json", "r", encoding="utf-8") as file:
return json.load(file)
def range_calculator(number_of_client, raw_range):
first_ip = raw_range.split("-")[0]
last_ip = raw_range.split("-")[1]
first_ip_int = int(ipaddress.IPv4Address(first_ip))
last_ip_int = int(ipaddress.IPv4Address(last_ip))
difference = last_ip_int - first_ip_int
diff_between_ip = int(difference / number_of_client)
diff_between_ip_copy = diff_between_ip
divided_range = [(str(first_ip) + "-" + str(ipaddress.IPv4Address(diff_between_ip + first_ip_int)))]
for x in range(number_of_client - 1):
range_string = str(ipaddress.IPv4Address(diff_between_ip + first_ip_int + 1)) + "-"
diff_between_ip += diff_between_ip_copy
if x == number_of_client - 2:
range_string += last_ip
else:
range_string += str(ipaddress.IPv4Address(diff_between_ip + first_ip_int))
divided_range.append(range_string)
return divided_range | 0.110904 | 0.090053 |
import torch
import torch.nn as nn
import skimage
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
def conv_block(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(num_features=out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(num_features=out_channels),
nn.ReLU(inplace=True)
)
def up_transpose(in_channels, out_channels):
return nn.Sequential(
nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, output_padding=1)
)
class center_block(nn.Module):
def __init__(self, in_channels, out_channels):
super(center_block, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1,dilation=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=2,dilation=2)
self.conv3 = nn.Conv2d(out_channels, out_channels, 3, padding=4,dilation=4)
self.conv4 = nn.Conv2d(out_channels, out_channels, 3, padding=8,dilation=8)
self.conv5 = nn.Conv2d(out_channels, out_channels, 3, padding=16,dilation=16)
self.conv6 = nn.Conv2d(out_channels, out_channels, 3, padding=32,dilation=32)
self.bn_1 = nn.BatchNorm2d(num_features=out_channels)
self.bn_2 = nn.BatchNorm2d(num_features=out_channels)
self.bn_3 = nn.BatchNorm2d(num_features=out_channels)
self.bn_4 = nn.BatchNorm2d(num_features=out_channels)
self.bn_5 = nn.BatchNorm2d(num_features=out_channels)
self.bn_6 = nn.BatchNorm2d(num_features=out_channels)
self.relu = nn.ReLU()
def forward(self,x):# 지금 rrm쪽이랑 센터랑 섞임..
x1 = self.relu(self.bn_1(self.conv1(x)))
x2 = self.relu(self.bn_2(self.conv2(x1)))
x3 = self.relu(self.bn_3(self.conv3(x2)))
x4 = self.relu(self.bn_4(self.conv4(x3)))
x5 = self.relu(self.bn_5(self.conv5(x4)))
x6 = self.relu(self.bn_6(self.conv6(x5)))
x = x1+x2+x3+x4+x5+x6
return x
class rrm_module(nn.Module):
def __init__(self, in_channels, out_channels):
super(rrm_module,self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1,dilation=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=2,dilation=2)
self.conv3 = nn.Conv2d(out_channels, out_channels, 3, padding=4,dilation=4)
self.conv4 = nn.Conv2d(out_channels, out_channels, 3, padding=8,dilation=8)
self.conv5 = nn.Conv2d(out_channels, out_channels, 3, padding=16,dilation=16)
self.conv6 = nn.Conv2d(out_channels, out_channels, 3, padding=32,dilation=32)
self.bn_1 = nn.BatchNorm2d(num_features=out_channels)
self.bn_2 = nn.BatchNorm2d(num_features=out_channels)
self.bn_3 = nn.BatchNorm2d(num_features=out_channels)
self.bn_4 = nn.BatchNorm2d(num_features=out_channels)
self.bn_5 = nn.BatchNorm2d(num_features=out_channels)
self.bn_6 = nn.BatchNorm2d(num_features=out_channels)
self.relu = nn.ReLU()
# self.out = nn.Conv2d(out_channels, 1, 3, padding=1,dilation=1)
# BE mode
self.out = nn.Conv2d(out_channels, 64, 3, padding=1,dilation=1)
def forward(self,x):
residual = x
x1 = self.relu(self.bn_1(self.conv1(x)))
x2 = self.relu(self.bn_2(self.conv2(x1)))
x3 = self.relu(self.bn_3(self.conv3(x2)))
x4 = self.relu(self.bn_4(self.conv4(x3)))
x5 = self.relu(self.bn_5(self.conv5(x4)))
x6 = self.relu(self.bn_6(self.conv6(x5)))
x = x1+x2+x3+x4+x5+x6
x = self.out(x)
x = residual + x
output = x
# output = F.sigmoid(x)
return output
class decoder_block(nn.Module):
def __init__(self, in_channels, out_channels):
super(decoder_block,self).__init__()
self.bn_i = nn.BatchNorm2d(num_features=in_channels)
self.relu = nn.ReLU()
self.conv = conv_block(in_channels, out_channels)
def forward(self, x):
out = self.bn_i(x)
out = self.relu(out)
out = self.conv(out)
return out
class BRRNet_BE(nn.Module):
def __init__(self, n_class=1, pretrained=False, mode= 'Train'):
super().__init__()
self.mode = mode
self.dconv_down1 = conv_block(3, 64)
self.dconv_down2 = conv_block(64, 128)
self.dconv_down3 = conv_block(128, 256)
self.maxpool = nn.MaxPool2d(2,2)
self.center = center_block(256,512)
self.deconv3 = up_transpose(512,256)
self.deconv2 = up_transpose(256,128)
self.deconv1 = up_transpose(128,64)
self.decoder_3 = decoder_block(512, 256)
self.decoder_2 = decoder_block(256, 128)
self.decoder_1 = decoder_block(128, 64)
# self.output_1 = nn.Conv2d(64,n_class, 1)
# self.rrm = rrm_module(1,64)
# BE mode
self.output_1 = nn.Conv2d(64,64, 1)
self.rrm = rrm_module(64,64)
# HED Block
self.dsn1 = nn.Conv2d(64, 1, 1)
self.dsn2 = nn.Conv2d(128, 1, 1)
self.dsn3 = nn.Conv2d(256, 1, 1)
self.dsn4 = nn.Conv2d(512, 1, 1)
#boundary enhancement part
self.fuse = nn.Sequential(nn.Conv2d(4, 64, 1),nn.ReLU(inplace=True))
self.SE_mimic = nn.Sequential(
nn.Linear(64, 64, bias=False),
nn.ReLU(inplace=True),
nn.Linear(64, 4, bias=False),
nn.Sigmoid()
)
self.final_boundary = nn.Conv2d(4,2,1)
self.final_conv = nn.Sequential(
nn.Conv2d(128,64,3, padding=1),
nn.ReLU(inplace=True)
)
self.final_mask = nn.Conv2d(64,2,1)
self.relu = nn.ReLU()
self.out = nn.Conv2d(64,1,1)
def forward(self, x):
h = x.size(2)
w = x.size(3)
conv1 = self.dconv_down1(x)
# print(conv1.shape)
x = self.maxpool(conv1)
# print(x.shape)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.maxpool(conv3)
conv4 = self.center(x)
x = self.deconv3(conv4) # 512 256
x = torch.cat([conv3,x],1) # 256 + 256
x = self.decoder_3(x) # 512 256
x = self.deconv2(x)
x = torch.cat([conv2,x],1)
x = self.decoder_2(x)
x = self.deconv1(x)
x = torch.cat([conv1,x],1)
x = self.decoder_1(x)
x = self.output_1(x)
out = self.rrm(x)
d1 = self.dsn1(conv1)
d2 = F.upsample_bilinear(self.dsn2(conv2), size=(h,w))
d3 = F.upsample_bilinear(self.dsn3(conv3), size=(h,w))
d4 = F.upsample_bilinear(self.dsn4(conv4), size=(h,w))
d1_out = F.sigmoid(d1)
d2_out = F.sigmoid(d2)
d3_out = F.sigmoid(d3)
d4_out = F.sigmoid(d4)
concat = torch.cat((d1_out, d2_out, d3_out, d4_out), 1)
fuse_box = self.fuse(concat)
GAP = F.adaptive_avg_pool2d(fuse_box,(1,1))
GAP = GAP.view(-1, 64)
se_like = self.SE_mimic(GAP)
se_like = torch.unsqueeze(se_like, 2)
se_like = torch.unsqueeze(se_like, 3)
feat_se = concat * se_like.expand_as(concat)
boundary = self.final_boundary(feat_se)
boundary_out = torch.unsqueeze(boundary[:,1,:,:],1)
bd_sftmax = F.softmax(boundary, dim=1)
boundary_scale = torch.unsqueeze(bd_sftmax[:,1,:,:],1)
feat_concat = torch.cat( [out, fuse_box], 1)
feat_concat_conv = self.final_conv(feat_concat)
mask = self.final_mask(feat_concat_conv)
mask_sftmax = F.softmax(mask,dim=1)
mask_scale = torch.unsqueeze(mask_sftmax[:,1,:,:],1)
if self.mode == 'Train':
scalefactor = torch.clamp(mask_scale+boundary_scale,0,1)
elif self.mode == 'Infer':
scalefactor = torch.clamp(mask_scale+5*boundary_scale,0,1)
mask_out = torch.unsqueeze(mask[:,1,:,:],1)
relu = self.relu(mask_out)
scalar = relu.cpu().detach().numpy()
if np.sum(scalar) == 0:
average = 0
else :
average = scalar[np.nonzero(scalar)].mean()
mask_out = mask_out-relu + (average*scalefactor)
if self.mode == 'Train':
mask_out = F.sigmoid(mask_out)
boundary_out = F.sigmoid(boundary_out)
return d1_out, d2_out, d3_out, d4_out, boundary_out, mask_out
elif self.mode =='Infer':
return mask_out | nets/zoo/brrnet_BE.py | import torch
import torch.nn as nn
import skimage
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
def conv_block(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(num_features=out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(num_features=out_channels),
nn.ReLU(inplace=True)
)
def up_transpose(in_channels, out_channels):
return nn.Sequential(
nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, output_padding=1)
)
class center_block(nn.Module):
def __init__(self, in_channels, out_channels):
super(center_block, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1,dilation=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=2,dilation=2)
self.conv3 = nn.Conv2d(out_channels, out_channels, 3, padding=4,dilation=4)
self.conv4 = nn.Conv2d(out_channels, out_channels, 3, padding=8,dilation=8)
self.conv5 = nn.Conv2d(out_channels, out_channels, 3, padding=16,dilation=16)
self.conv6 = nn.Conv2d(out_channels, out_channels, 3, padding=32,dilation=32)
self.bn_1 = nn.BatchNorm2d(num_features=out_channels)
self.bn_2 = nn.BatchNorm2d(num_features=out_channels)
self.bn_3 = nn.BatchNorm2d(num_features=out_channels)
self.bn_4 = nn.BatchNorm2d(num_features=out_channels)
self.bn_5 = nn.BatchNorm2d(num_features=out_channels)
self.bn_6 = nn.BatchNorm2d(num_features=out_channels)
self.relu = nn.ReLU()
def forward(self,x):# 지금 rrm쪽이랑 센터랑 섞임..
x1 = self.relu(self.bn_1(self.conv1(x)))
x2 = self.relu(self.bn_2(self.conv2(x1)))
x3 = self.relu(self.bn_3(self.conv3(x2)))
x4 = self.relu(self.bn_4(self.conv4(x3)))
x5 = self.relu(self.bn_5(self.conv5(x4)))
x6 = self.relu(self.bn_6(self.conv6(x5)))
x = x1+x2+x3+x4+x5+x6
return x
class rrm_module(nn.Module):
def __init__(self, in_channels, out_channels):
super(rrm_module,self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1,dilation=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=2,dilation=2)
self.conv3 = nn.Conv2d(out_channels, out_channels, 3, padding=4,dilation=4)
self.conv4 = nn.Conv2d(out_channels, out_channels, 3, padding=8,dilation=8)
self.conv5 = nn.Conv2d(out_channels, out_channels, 3, padding=16,dilation=16)
self.conv6 = nn.Conv2d(out_channels, out_channels, 3, padding=32,dilation=32)
self.bn_1 = nn.BatchNorm2d(num_features=out_channels)
self.bn_2 = nn.BatchNorm2d(num_features=out_channels)
self.bn_3 = nn.BatchNorm2d(num_features=out_channels)
self.bn_4 = nn.BatchNorm2d(num_features=out_channels)
self.bn_5 = nn.BatchNorm2d(num_features=out_channels)
self.bn_6 = nn.BatchNorm2d(num_features=out_channels)
self.relu = nn.ReLU()
# self.out = nn.Conv2d(out_channels, 1, 3, padding=1,dilation=1)
# BE mode
self.out = nn.Conv2d(out_channels, 64, 3, padding=1,dilation=1)
def forward(self,x):
residual = x
x1 = self.relu(self.bn_1(self.conv1(x)))
x2 = self.relu(self.bn_2(self.conv2(x1)))
x3 = self.relu(self.bn_3(self.conv3(x2)))
x4 = self.relu(self.bn_4(self.conv4(x3)))
x5 = self.relu(self.bn_5(self.conv5(x4)))
x6 = self.relu(self.bn_6(self.conv6(x5)))
x = x1+x2+x3+x4+x5+x6
x = self.out(x)
x = residual + x
output = x
# output = F.sigmoid(x)
return output
class decoder_block(nn.Module):
def __init__(self, in_channels, out_channels):
super(decoder_block,self).__init__()
self.bn_i = nn.BatchNorm2d(num_features=in_channels)
self.relu = nn.ReLU()
self.conv = conv_block(in_channels, out_channels)
def forward(self, x):
out = self.bn_i(x)
out = self.relu(out)
out = self.conv(out)
return out
class BRRNet_BE(nn.Module):
def __init__(self, n_class=1, pretrained=False, mode= 'Train'):
super().__init__()
self.mode = mode
self.dconv_down1 = conv_block(3, 64)
self.dconv_down2 = conv_block(64, 128)
self.dconv_down3 = conv_block(128, 256)
self.maxpool = nn.MaxPool2d(2,2)
self.center = center_block(256,512)
self.deconv3 = up_transpose(512,256)
self.deconv2 = up_transpose(256,128)
self.deconv1 = up_transpose(128,64)
self.decoder_3 = decoder_block(512, 256)
self.decoder_2 = decoder_block(256, 128)
self.decoder_1 = decoder_block(128, 64)
# self.output_1 = nn.Conv2d(64,n_class, 1)
# self.rrm = rrm_module(1,64)
# BE mode
self.output_1 = nn.Conv2d(64,64, 1)
self.rrm = rrm_module(64,64)
# HED Block
self.dsn1 = nn.Conv2d(64, 1, 1)
self.dsn2 = nn.Conv2d(128, 1, 1)
self.dsn3 = nn.Conv2d(256, 1, 1)
self.dsn4 = nn.Conv2d(512, 1, 1)
#boundary enhancement part
self.fuse = nn.Sequential(nn.Conv2d(4, 64, 1),nn.ReLU(inplace=True))
self.SE_mimic = nn.Sequential(
nn.Linear(64, 64, bias=False),
nn.ReLU(inplace=True),
nn.Linear(64, 4, bias=False),
nn.Sigmoid()
)
self.final_boundary = nn.Conv2d(4,2,1)
self.final_conv = nn.Sequential(
nn.Conv2d(128,64,3, padding=1),
nn.ReLU(inplace=True)
)
self.final_mask = nn.Conv2d(64,2,1)
self.relu = nn.ReLU()
self.out = nn.Conv2d(64,1,1)
def forward(self, x):
h = x.size(2)
w = x.size(3)
conv1 = self.dconv_down1(x)
# print(conv1.shape)
x = self.maxpool(conv1)
# print(x.shape)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.maxpool(conv3)
conv4 = self.center(x)
x = self.deconv3(conv4) # 512 256
x = torch.cat([conv3,x],1) # 256 + 256
x = self.decoder_3(x) # 512 256
x = self.deconv2(x)
x = torch.cat([conv2,x],1)
x = self.decoder_2(x)
x = self.deconv1(x)
x = torch.cat([conv1,x],1)
x = self.decoder_1(x)
x = self.output_1(x)
out = self.rrm(x)
d1 = self.dsn1(conv1)
d2 = F.upsample_bilinear(self.dsn2(conv2), size=(h,w))
d3 = F.upsample_bilinear(self.dsn3(conv3), size=(h,w))
d4 = F.upsample_bilinear(self.dsn4(conv4), size=(h,w))
d1_out = F.sigmoid(d1)
d2_out = F.sigmoid(d2)
d3_out = F.sigmoid(d3)
d4_out = F.sigmoid(d4)
concat = torch.cat((d1_out, d2_out, d3_out, d4_out), 1)
fuse_box = self.fuse(concat)
GAP = F.adaptive_avg_pool2d(fuse_box,(1,1))
GAP = GAP.view(-1, 64)
se_like = self.SE_mimic(GAP)
se_like = torch.unsqueeze(se_like, 2)
se_like = torch.unsqueeze(se_like, 3)
feat_se = concat * se_like.expand_as(concat)
boundary = self.final_boundary(feat_se)
boundary_out = torch.unsqueeze(boundary[:,1,:,:],1)
bd_sftmax = F.softmax(boundary, dim=1)
boundary_scale = torch.unsqueeze(bd_sftmax[:,1,:,:],1)
feat_concat = torch.cat( [out, fuse_box], 1)
feat_concat_conv = self.final_conv(feat_concat)
mask = self.final_mask(feat_concat_conv)
mask_sftmax = F.softmax(mask,dim=1)
mask_scale = torch.unsqueeze(mask_sftmax[:,1,:,:],1)
if self.mode == 'Train':
scalefactor = torch.clamp(mask_scale+boundary_scale,0,1)
elif self.mode == 'Infer':
scalefactor = torch.clamp(mask_scale+5*boundary_scale,0,1)
mask_out = torch.unsqueeze(mask[:,1,:,:],1)
relu = self.relu(mask_out)
scalar = relu.cpu().detach().numpy()
if np.sum(scalar) == 0:
average = 0
else :
average = scalar[np.nonzero(scalar)].mean()
mask_out = mask_out-relu + (average*scalefactor)
if self.mode == 'Train':
mask_out = F.sigmoid(mask_out)
boundary_out = F.sigmoid(boundary_out)
return d1_out, d2_out, d3_out, d4_out, boundary_out, mask_out
elif self.mode =='Infer':
return mask_out | 0.939165 | 0.395981 |
import io
import requests
from bs4 import BeautifulSoup
from collections import OrderedDict
from requests_toolbelt.multipart.encoder import MultipartEncoder
from watchdogs.utils import Cast
from watchdogs.base.models import AllArgs, Common
from watchdogs.web.models import Response, WebFile
from watchdogs.web.models.Requests import Request
from watchdogs.web.parsers import RequestArgs
from watchdogs.utils.Constants import (EMPTY, LFRN, HTTP, HTTP_PROTOCOL, HTTPS_PROTOCOL, HTTPS, HTML_PARSER,
CONTENT_LENGTH, UTF8)
class RequestResponseService(Common):
def __init__(self): #type: () -> None
super(RequestResponseService, self).__init__()
def printRequest(self, request): # type: (Request) -> None
format = '{}: {}'
info = []
for infoKey, infoValue in request.getRequestInfo().__dict__:
info.append(format.format(infoKey, infoValue))
headers = []
for headersKey, headersValue in request.getRequestHeaders().__dict__:
headers.append(format.format(headersKey, headersValue))
body = []
if (request.getRequestBodyString()):
body.append(request.getRequestBodyString())
else:
for k, v in request.getRequestBodyDict().items():
body.append(format.format(k, v))
print('{}{}{}{}{}{}{}{}{}{}{}'.format(LFRN, '-----------Request Start-----------',
LFRN, LFRN.join(info), LFRN, LFRN.join(headers), LFRN,
LFRN.join(body), LFRN, '----------- Request End ------------',
LFRN))
def getUrl(self, host, secure, endpoint=EMPTY): #type: (str, bool, str) -> str
standardProtocol = HTTP
if (standardProtocol in host):
return "{}{}".format(host, endpoint)
else:
protocol = HTTP_PROTOCOL
if (secure):
protocol = HTTPS_PROTOCOL
return "{}{}{}".format(protocol, host, endpoint)
def getRequestBody(self, request): #type: (Request) -> OrderedDict | str
if (request.getRequestBodyString()):
return request.getRequestBodyString()
elif (request.getRequestBodyDict()):
requestBodyDict = request.getRequestBodyDict()
for requestBodyKey in requestBodyDict:
requestBodyValue = requestBodyDict[requestBodyKey]
if (isinstance(requestBodyValue, WebFile)):
webFile = Cast._to(WebFile, requestBodyValue)
postFileIO = io.BytesIO(webFile.getContent())
requestBodyDict[requestBodyKey] = (webFile.getFileName(), postFileIO, webFile.getContentType())
return MultipartEncoder(fields=requestBodyDict, boundary=request.getRequestBoundary())
def getProxies(self, requestArgs): #type: (RequestArgs) -> dict
proxies = {}
if (requestArgs.httpProxy):
proxies[HTTP] = self.getUrl(requestArgs.httpProxy, False)
elif (requestArgs.httpsProxy):
proxies[HTTPS] = self.getUrl(requestArgs.httpsProxy, True)
return proxies
def printResponse(self, allArgs, response): #type: (AllArgs, Response) -> None
requestArgs = allArgs.getArgs(RequestArgs)
responseStatus = response.getResponseStatus()
responseLength = response.getResponseLength()
responseSoup = response.getResponseSoup()
responseString = "Response status: {} - Response length: {}".format(responseStatus, responseLength)
if (requestArgs.showResponse):
responseString = "Response body: {}{}{}".format(LFRN, responseSoup.encode(UTF8), LFRN) + responseString
print(responseString)
def filterResponse(self, allArgs, response): #type: (AllArgs, Response) -> None
requestArgs = allArgs.getArgs(RequestArgs)
responseSoup = response.getResponseSoup()
responseStatus = response.getResponseStatus()
responseLength = response.getResponseLength()
shouldReturn = False
if (requestArgs.filterLength and responseLength and responseLength in requestArgs.filterLength):
shouldReturn = True
if (requestArgs.filterStatus and responseStatus and not str(responseStatus) in requestArgs.filterStatus):
shouldReturn = True
if (requestArgs.filterIn and not requestArgs.filterIn.lower() in responseSoup.lower()):
shouldReturn = True
if (requestArgs.filterOut and requestArgs.filterOut.lower() in responseSoup.lower()):
shouldReturn = True
return shouldReturn
def getFinalResponse(self, response): #type: (requests.models.Response) -> Response
responseSoup = BeautifulSoup(response.text, HTML_PARSER).prettify().rstrip()
responseStatus = response.status_code
responseLength = EMPTY
try:
responseLength = response.headers.get(CONTENT_LENGTH)
if(responseLength == None):
responseLength = 0
except:
print("An exception occurred trying to retrieve header {}".format(CONTENT_LENGTH))
responseLength = 0
return Response(response, responseSoup, responseStatus, responseLength)
def handleResponse(self, allArgs, response):
#type: (AllArgs, requests.models.Response) -> None
finalResponse = self.getFinalResponse(response)
if (self.filterResponse(allArgs, finalResponse)):
return
self.printResponse(allArgs, finalResponse)
def sendRequest(self, allArgs, request): #type: (AllArgs, Request) -> requests.models.Response
requestArgs = allArgs.getArgs(RequestArgs)
requestInfo = request.getRequestInfo()
requestHeaders = request.getRequestHeaders()
requestBody = self.getRequestBody(request)
requestUrl = self.getUrl(requestInfo.getUrlHost(), requestArgs.secure, requestInfo.getEndpoint())
request.setRequestUrl(requestUrl)
req = requests.Request(requestInfo.getMethod(), requestUrl, headers=requestHeaders, data=requestBody)
preparedRequest = req.prepare()
session = requests.Session()
session.proxies = self.getProxies(requestArgs)
session.verify = not requestArgs.disableVerification
response = requests.models.Response()
try:
return session.send(preparedRequest, timeout=requestArgs.readTimeout)
except requests.exceptions.HTTPError:
response.status_code = 500
return response
except requests.exceptions.ConnectionError:
response.status_code = 502
return response
except requests.exceptions.Timeout:
response.status_code = 504
return response
except requests.exceptions.RequestException:
response.status_code = 500
return response
except Exception as e:
print("An exception while retrieving the response:\n", e)
return None | watchdogs/web/services/RequestResponseService.py |
import io
import requests
from bs4 import BeautifulSoup
from collections import OrderedDict
from requests_toolbelt.multipart.encoder import MultipartEncoder
from watchdogs.utils import Cast
from watchdogs.base.models import AllArgs, Common
from watchdogs.web.models import Response, WebFile
from watchdogs.web.models.Requests import Request
from watchdogs.web.parsers import RequestArgs
from watchdogs.utils.Constants import (EMPTY, LFRN, HTTP, HTTP_PROTOCOL, HTTPS_PROTOCOL, HTTPS, HTML_PARSER,
CONTENT_LENGTH, UTF8)
class RequestResponseService(Common):
def __init__(self): #type: () -> None
super(RequestResponseService, self).__init__()
def printRequest(self, request): # type: (Request) -> None
format = '{}: {}'
info = []
for infoKey, infoValue in request.getRequestInfo().__dict__:
info.append(format.format(infoKey, infoValue))
headers = []
for headersKey, headersValue in request.getRequestHeaders().__dict__:
headers.append(format.format(headersKey, headersValue))
body = []
if (request.getRequestBodyString()):
body.append(request.getRequestBodyString())
else:
for k, v in request.getRequestBodyDict().items():
body.append(format.format(k, v))
print('{}{}{}{}{}{}{}{}{}{}{}'.format(LFRN, '-----------Request Start-----------',
LFRN, LFRN.join(info), LFRN, LFRN.join(headers), LFRN,
LFRN.join(body), LFRN, '----------- Request End ------------',
LFRN))
def getUrl(self, host, secure, endpoint=EMPTY): #type: (str, bool, str) -> str
standardProtocol = HTTP
if (standardProtocol in host):
return "{}{}".format(host, endpoint)
else:
protocol = HTTP_PROTOCOL
if (secure):
protocol = HTTPS_PROTOCOL
return "{}{}{}".format(protocol, host, endpoint)
def getRequestBody(self, request): #type: (Request) -> OrderedDict | str
if (request.getRequestBodyString()):
return request.getRequestBodyString()
elif (request.getRequestBodyDict()):
requestBodyDict = request.getRequestBodyDict()
for requestBodyKey in requestBodyDict:
requestBodyValue = requestBodyDict[requestBodyKey]
if (isinstance(requestBodyValue, WebFile)):
webFile = Cast._to(WebFile, requestBodyValue)
postFileIO = io.BytesIO(webFile.getContent())
requestBodyDict[requestBodyKey] = (webFile.getFileName(), postFileIO, webFile.getContentType())
return MultipartEncoder(fields=requestBodyDict, boundary=request.getRequestBoundary())
def getProxies(self, requestArgs): #type: (RequestArgs) -> dict
proxies = {}
if (requestArgs.httpProxy):
proxies[HTTP] = self.getUrl(requestArgs.httpProxy, False)
elif (requestArgs.httpsProxy):
proxies[HTTPS] = self.getUrl(requestArgs.httpsProxy, True)
return proxies
def printResponse(self, allArgs, response): #type: (AllArgs, Response) -> None
requestArgs = allArgs.getArgs(RequestArgs)
responseStatus = response.getResponseStatus()
responseLength = response.getResponseLength()
responseSoup = response.getResponseSoup()
responseString = "Response status: {} - Response length: {}".format(responseStatus, responseLength)
if (requestArgs.showResponse):
responseString = "Response body: {}{}{}".format(LFRN, responseSoup.encode(UTF8), LFRN) + responseString
print(responseString)
def filterResponse(self, allArgs, response): #type: (AllArgs, Response) -> None
requestArgs = allArgs.getArgs(RequestArgs)
responseSoup = response.getResponseSoup()
responseStatus = response.getResponseStatus()
responseLength = response.getResponseLength()
shouldReturn = False
if (requestArgs.filterLength and responseLength and responseLength in requestArgs.filterLength):
shouldReturn = True
if (requestArgs.filterStatus and responseStatus and not str(responseStatus) in requestArgs.filterStatus):
shouldReturn = True
if (requestArgs.filterIn and not requestArgs.filterIn.lower() in responseSoup.lower()):
shouldReturn = True
if (requestArgs.filterOut and requestArgs.filterOut.lower() in responseSoup.lower()):
shouldReturn = True
return shouldReturn
def getFinalResponse(self, response): #type: (requests.models.Response) -> Response
responseSoup = BeautifulSoup(response.text, HTML_PARSER).prettify().rstrip()
responseStatus = response.status_code
responseLength = EMPTY
try:
responseLength = response.headers.get(CONTENT_LENGTH)
if(responseLength == None):
responseLength = 0
except:
print("An exception occurred trying to retrieve header {}".format(CONTENT_LENGTH))
responseLength = 0
return Response(response, responseSoup, responseStatus, responseLength)
def handleResponse(self, allArgs, response):
#type: (AllArgs, requests.models.Response) -> None
finalResponse = self.getFinalResponse(response)
if (self.filterResponse(allArgs, finalResponse)):
return
self.printResponse(allArgs, finalResponse)
def sendRequest(self, allArgs, request): #type: (AllArgs, Request) -> requests.models.Response
requestArgs = allArgs.getArgs(RequestArgs)
requestInfo = request.getRequestInfo()
requestHeaders = request.getRequestHeaders()
requestBody = self.getRequestBody(request)
requestUrl = self.getUrl(requestInfo.getUrlHost(), requestArgs.secure, requestInfo.getEndpoint())
request.setRequestUrl(requestUrl)
req = requests.Request(requestInfo.getMethod(), requestUrl, headers=requestHeaders, data=requestBody)
preparedRequest = req.prepare()
session = requests.Session()
session.proxies = self.getProxies(requestArgs)
session.verify = not requestArgs.disableVerification
response = requests.models.Response()
try:
return session.send(preparedRequest, timeout=requestArgs.readTimeout)
except requests.exceptions.HTTPError:
response.status_code = 500
return response
except requests.exceptions.ConnectionError:
response.status_code = 502
return response
except requests.exceptions.Timeout:
response.status_code = 504
return response
except requests.exceptions.RequestException:
response.status_code = 500
return response
except Exception as e:
print("An exception while retrieving the response:\n", e)
return None | 0.371707 | 0.065785 |
def get_target_area(raw):
target_area = {}
raw = raw.replace("target area: x=", "").split(", y=")
target_area["x"] = [int(x) for x in raw[0].split("..")]
target_area["y"] = [int(x) for x in raw[1].split("..")]
return target_area
class Probe():
def __init__(self, x, y, target_area):
self.name = (x, y)
self.x_velocity = x
self.y_velocity = y
self.is_in_target_area = False
self.trajectory = [(0, 0)]
self.calculate_trajectory(target_area)
def step(self):
start = self.trajectory[-1]
x, y = start
new_position = x + self.x_velocity, y + self.y_velocity
self.trajectory.append(new_position)
if self.x_velocity != 0:
self.x_velocity = self.x_velocity - 1 if self.x_velocity > 0 else self.x_velocity + 1
self.y_velocity -= 1
def calculate_trajectory(self, target_area):
while True:
self.step()
x, y = self.trajectory[-1]
if x > target_area["x"][1] or y < target_area["y"][0]:
break
if x in range(target_area["x"][0], target_area["x"][1]+1) and y in range(target_area["y"][0], target_area["y"][1]+1):
self.is_in_target_area = True
break
def print_probe_trajectory(target_area, probe):
"""
Prints probe trajectory. Helps with debugging.
"""
for x in range(10, target_area["y"][0] - 5, -1):
for y in range(target_area["x"][1] + 10):
if (x, y) == (0, 0):
print("S", end="")
elif (y, x) in probe.trajectory:
print("#", end="")
elif x in range(target_area["y"][0], target_area["y"][1]+1) and y in range(target_area["x"][0], target_area["x"][1]+1):
print("T", end="")
else:
print(".", end="")
print()
# Puzzle input
target_area = get_target_area("target area: x=269..292, y=-68..-44")
# Puzzle 1 + Puzzle 2
max_y = 0
result = 0
for x in range(1, target_area["x"][1] + 1):
for y in range(target_area["y"][0] - 1, 100):
probe = Probe(x, y, target_area)
if probe.is_in_target_area:
result += 1
for coor in probe.trajectory:
if max_y < coor[1]:
max_y = coor[1]
print(f"Puzzle 1 = {max_y}")
print(f"Puzzle 2 = {result}") | day17/puzzle.py | def get_target_area(raw):
target_area = {}
raw = raw.replace("target area: x=", "").split(", y=")
target_area["x"] = [int(x) for x in raw[0].split("..")]
target_area["y"] = [int(x) for x in raw[1].split("..")]
return target_area
class Probe():
def __init__(self, x, y, target_area):
self.name = (x, y)
self.x_velocity = x
self.y_velocity = y
self.is_in_target_area = False
self.trajectory = [(0, 0)]
self.calculate_trajectory(target_area)
def step(self):
start = self.trajectory[-1]
x, y = start
new_position = x + self.x_velocity, y + self.y_velocity
self.trajectory.append(new_position)
if self.x_velocity != 0:
self.x_velocity = self.x_velocity - 1 if self.x_velocity > 0 else self.x_velocity + 1
self.y_velocity -= 1
def calculate_trajectory(self, target_area):
while True:
self.step()
x, y = self.trajectory[-1]
if x > target_area["x"][1] or y < target_area["y"][0]:
break
if x in range(target_area["x"][0], target_area["x"][1]+1) and y in range(target_area["y"][0], target_area["y"][1]+1):
self.is_in_target_area = True
break
def print_probe_trajectory(target_area, probe):
"""
Prints probe trajectory. Helps with debugging.
"""
for x in range(10, target_area["y"][0] - 5, -1):
for y in range(target_area["x"][1] + 10):
if (x, y) == (0, 0):
print("S", end="")
elif (y, x) in probe.trajectory:
print("#", end="")
elif x in range(target_area["y"][0], target_area["y"][1]+1) and y in range(target_area["x"][0], target_area["x"][1]+1):
print("T", end="")
else:
print(".", end="")
print()
# Puzzle input
target_area = get_target_area("target area: x=269..292, y=-68..-44")
# Puzzle 1 + Puzzle 2
max_y = 0
result = 0
for x in range(1, target_area["x"][1] + 1):
for y in range(target_area["y"][0] - 1, 100):
probe = Probe(x, y, target_area)
if probe.is_in_target_area:
result += 1
for coor in probe.trajectory:
if max_y < coor[1]:
max_y = coor[1]
print(f"Puzzle 1 = {max_y}")
print(f"Puzzle 2 = {result}") | 0.322419 | 0.430506 |
from tkinter import *
root= Tk()
root.title("Calculator")
result=0
f_num=0
Operator=""
def click(number):
current=e.get()
e.delete(0, END)
e.insert(0, str(current)+str(number))
def clear():
e.delete(0, END)
def first_num():
first_num=e.get()
global f_num
f_num=int(first_num)
e.delete(0, END)
def operation(sign):
first_num()
global operator
operator=sign
def equal():
second_num=e.get()
e.delete(0, END)
global result
if operator=="+":
result=f_num+int(second_num)
elif operator=="-":
result=f_num-int(second_num)
elif operator=="X":
result=f_num*int(second_num)
elif operator=="%":
result=int(f_num/int(second_num))
e.insert(0, result)
e=Entry(root, width=40, borderwidth=3)
e.grid(row=0, column=0, columnspan=3, padx=10, pady=10)
b_1=Button(root, text="1", padx=40, pady=20, command=lambda : click(1))
b_2=Button(root, text="2", padx=40, pady=20, command=lambda : click(2))
b_3=Button(root, text="3", padx=40, pady=20, command=lambda : click(3))
b_4=Button(root, text="4", padx=40, pady=20, command=lambda : click(4))
b_5=Button(root, text="5", padx=40, pady=20, command=lambda : click(5))
b_6=Button(root, text="6", padx=40, pady=20, command=lambda : click(6))
b_7=Button(root, text="7", padx=40, pady=20, command=lambda : click(7))
b_8=Button(root, text="8", padx=40, pady=20, command=lambda : click(8))
b_9=Button(root, text="9", padx=40, pady=20, command=lambda : click(9))
b_0=Button(root, text="0", padx=40, pady=20, command=lambda : click(0))
b_add=Button(root, text="+", padx=40, pady=20, command=lambda:operation('+'))
b_subtract=Button(root, text="-", padx=40, pady=20, command=lambda:operation('-'))
b_multiply=Button(root, text="X", padx=40, pady=20, command=lambda:operation('X'))
b_divide=Button(root, text="%", padx=40, pady=20, command=lambda:operation('%'))
b_equal=Button(root, text="=", padx=40, pady=20, command=equal)
b_clear=Button(root, text="clear", padx=120, pady=20, command=clear)
b_1.grid(row=3, column=0)
b_2.grid(row=3, column=1)
b_3.grid(row=3, column=2)
b_4.grid(row=2, column=0)
b_5.grid(row=2, column=1)
b_6.grid(row=2, column=2)
b_7.grid(row=1, column=0)
b_8.grid(row=1, column=1)
b_9.grid(row=1, column=2)
b_0.grid(row=4, column=0)
b_add.grid(row=4, column=1)
b_subtract.grid(row=4, column=2)
b_multiply.grid(row=5, column=0)
b_divide.grid(row=5, column=1)
b_equal.grid(row=5, column=2)
b_clear.grid(row=6, column=0, columnspan=3)
mainloop() | Python Library/Simple Calculator GUI/calculator adv.py | from tkinter import *
root= Tk()
root.title("Calculator")
result=0
f_num=0
Operator=""
def click(number):
current=e.get()
e.delete(0, END)
e.insert(0, str(current)+str(number))
def clear():
e.delete(0, END)
def first_num():
first_num=e.get()
global f_num
f_num=int(first_num)
e.delete(0, END)
def operation(sign):
first_num()
global operator
operator=sign
def equal():
second_num=e.get()
e.delete(0, END)
global result
if operator=="+":
result=f_num+int(second_num)
elif operator=="-":
result=f_num-int(second_num)
elif operator=="X":
result=f_num*int(second_num)
elif operator=="%":
result=int(f_num/int(second_num))
e.insert(0, result)
e=Entry(root, width=40, borderwidth=3)
e.grid(row=0, column=0, columnspan=3, padx=10, pady=10)
b_1=Button(root, text="1", padx=40, pady=20, command=lambda : click(1))
b_2=Button(root, text="2", padx=40, pady=20, command=lambda : click(2))
b_3=Button(root, text="3", padx=40, pady=20, command=lambda : click(3))
b_4=Button(root, text="4", padx=40, pady=20, command=lambda : click(4))
b_5=Button(root, text="5", padx=40, pady=20, command=lambda : click(5))
b_6=Button(root, text="6", padx=40, pady=20, command=lambda : click(6))
b_7=Button(root, text="7", padx=40, pady=20, command=lambda : click(7))
b_8=Button(root, text="8", padx=40, pady=20, command=lambda : click(8))
b_9=Button(root, text="9", padx=40, pady=20, command=lambda : click(9))
b_0=Button(root, text="0", padx=40, pady=20, command=lambda : click(0))
b_add=Button(root, text="+", padx=40, pady=20, command=lambda:operation('+'))
b_subtract=Button(root, text="-", padx=40, pady=20, command=lambda:operation('-'))
b_multiply=Button(root, text="X", padx=40, pady=20, command=lambda:operation('X'))
b_divide=Button(root, text="%", padx=40, pady=20, command=lambda:operation('%'))
b_equal=Button(root, text="=", padx=40, pady=20, command=equal)
b_clear=Button(root, text="clear", padx=120, pady=20, command=clear)
b_1.grid(row=3, column=0)
b_2.grid(row=3, column=1)
b_3.grid(row=3, column=2)
b_4.grid(row=2, column=0)
b_5.grid(row=2, column=1)
b_6.grid(row=2, column=2)
b_7.grid(row=1, column=0)
b_8.grid(row=1, column=1)
b_9.grid(row=1, column=2)
b_0.grid(row=4, column=0)
b_add.grid(row=4, column=1)
b_subtract.grid(row=4, column=2)
b_multiply.grid(row=5, column=0)
b_divide.grid(row=5, column=1)
b_equal.grid(row=5, column=2)
b_clear.grid(row=6, column=0, columnspan=3)
mainloop() | 0.28897 | 0.262548 |
import pickle
import time
from .transaction import Transaction
from blockchain.crypto_tools import hash, sign, generateKeys, b58encode, b58decode, text2PublicKey, verify
import pickle
from pathlib import Path
class Block():
def __init__(self, id = 0, ts = time.time(), coinbase:Transaction=Transaction(),transactions=[], prevH=hash(b"")):
"""Every block has an ID, a timestamp, a coinbase transaction( that pays the miner), a list of regular transactions, a hash of the block
and a validation by the signature of the miner
Notice there is no nonce here as we don't require a proof of work :) We only use a proof of stake.
"""
self.id = id
self.timestamp = ts
self.coinbase = coinbase # This is a special transaction
self.transactions = transactions
self.prevH = prevH
data = self.serialize()
self.hash = hash(data)
self.signature = hash(b"")
def serialize_transactions(self):
return bytes("\n".join([str(t.serialize()) for t in self.transactions]),"utf8")
def serialize(self):
id_ = bytes(str(self.id),"utf8")
coinb_ = self.coinbase.serialize()
trans_ = self.serialize_transactions()
prevH_ = bytes(self.prevH,"utf8")
return id_+coinb_+trans_+prevH_
def sign(self, miner_private_key):
"""Sign the block
"""
# Asignature to tell that the block is signed by this user
data = self.serialize()
self.signature = sign(miner_private_key, data)
def verify(self):
"""Verify the block signature
"""
# First verify that all transactions are correct
for transaction in self.transactions:
if not transaction.verify():
return False
miner_public_key = text2PublicKey(self.coinbase.outputs[0].public_key)
data = self.serialize()
return verify(miner_public_key, data, self.signature)
def save(self, block_chain_path):
"""Save the block
"""
block_chain_path = Path(block_chain_path)
with open(str(block_chain_path/f"{self.id}.pkl"),"wb") as f:
pickle.dump(self,f)
def load(self, block_chain_path, block_id):
"""Save the block
"""
block_chain_path = Path(block_chain_path)
with open(str(block_chain_path/f"{block_id}.pkl"),"rb") as f:
v = pickle.load(f)
self.id = v.id
self.timestamp = v.timestamp
self.coinbase = v.coinbase # This is a special transaction
self.transactions = v.transactions
self.prevH = v.prevH
self.hash = v.hash
self.signature = v.signature | src/blockchain/data/block.py | import pickle
import time
from .transaction import Transaction
from blockchain.crypto_tools import hash, sign, generateKeys, b58encode, b58decode, text2PublicKey, verify
import pickle
from pathlib import Path
class Block():
def __init__(self, id = 0, ts = time.time(), coinbase:Transaction=Transaction(),transactions=[], prevH=hash(b"")):
"""Every block has an ID, a timestamp, a coinbase transaction( that pays the miner), a list of regular transactions, a hash of the block
and a validation by the signature of the miner
Notice there is no nonce here as we don't require a proof of work :) We only use a proof of stake.
"""
self.id = id
self.timestamp = ts
self.coinbase = coinbase # This is a special transaction
self.transactions = transactions
self.prevH = prevH
data = self.serialize()
self.hash = hash(data)
self.signature = hash(b"")
def serialize_transactions(self):
return bytes("\n".join([str(t.serialize()) for t in self.transactions]),"utf8")
def serialize(self):
id_ = bytes(str(self.id),"utf8")
coinb_ = self.coinbase.serialize()
trans_ = self.serialize_transactions()
prevH_ = bytes(self.prevH,"utf8")
return id_+coinb_+trans_+prevH_
def sign(self, miner_private_key):
"""Sign the block
"""
# Asignature to tell that the block is signed by this user
data = self.serialize()
self.signature = sign(miner_private_key, data)
def verify(self):
"""Verify the block signature
"""
# First verify that all transactions are correct
for transaction in self.transactions:
if not transaction.verify():
return False
miner_public_key = text2PublicKey(self.coinbase.outputs[0].public_key)
data = self.serialize()
return verify(miner_public_key, data, self.signature)
def save(self, block_chain_path):
"""Save the block
"""
block_chain_path = Path(block_chain_path)
with open(str(block_chain_path/f"{self.id}.pkl"),"wb") as f:
pickle.dump(self,f)
def load(self, block_chain_path, block_id):
"""Save the block
"""
block_chain_path = Path(block_chain_path)
with open(str(block_chain_path/f"{block_id}.pkl"),"rb") as f:
v = pickle.load(f)
self.id = v.id
self.timestamp = v.timestamp
self.coinbase = v.coinbase # This is a special transaction
self.transactions = v.transactions
self.prevH = v.prevH
self.hash = v.hash
self.signature = v.signature | 0.511473 | 0.205336 |
import os
import subprocess
import sys
from importlib import import_module
from tensorflow.keras import models
import numpy as np
from snntoolbox.bin.utils import initialize_simulator
from snntoolbox.bin.utils import run_pipeline
from snntoolbox.conversion.utils import normalize_parameters
from snntoolbox.datasets.utils import get_dataset
from snntoolbox.simulation.utils import spiketrains_to_rates
from snntoolbox.utils.utils import import_configparser, \
get_pearson_coefficients
from tests.conftest import spinnaker_skip_if_dependency_missing, \
loihi_skip_if_dependency_missing
from tests.conftest import nest_skip_if_dependency_missing
from tests.conftest import brian2_skip_if_dependency_missing
configparser = import_configparser()
def get_correlations(config):
logdir = os.path.join(config.get('paths', 'log_dir_of_current_run'),
'log_vars', '0.npz')
logvars = np.load(logdir, allow_pickle=True)
spiketrains = logvars['spiketrains_n_b_l_t']
activations = logvars['activations_n_b_l']
spikerates = spiketrains_to_rates(
spiketrains, config.getint('simulation', 'duration'),
config.get('conversion', 'spike_code'))
max_rate = 1. / config.getfloat('simulation', 'dt')
co = get_pearson_coefficients(spikerates, activations, max_rate)
return np.mean(co, axis=1)
class TestInputModel:
"""Test loading, parsing and evaluating an input ANN model."""
def test_parsing(self, _model_2, _config):
# Parsing removes BatchNorm layers, so we make a copy of the model.
input_model = models.clone_model(_model_2)
input_model.set_weights(_model_2.get_weights())
input_model.compile(_model_2.optimizer.__class__.__name__,
_model_2.loss, _model_2.metrics)
num_to_test = 10000
batch_size = 100
_config.set('simulation', 'batch_size', str(batch_size))
_config.set('simulation', 'num_to_test', str(num_to_test))
_, testset = get_dataset(_config)
dataflow = testset['dataflow']
model_lib = import_module('snntoolbox.parsing.model_libs.' +
_config.get('input', 'model_lib') +
'_input_lib')
model_parser = model_lib.ModelParser(input_model, _config)
model_parser.parse()
model_parser.build_parsed_model()
_, acc, _ = model_parser.evaluate(batch_size, num_to_test,
dataflow=dataflow)
_, target_acc = _model_2.evaluate(dataflow,
steps=int(num_to_test / batch_size))
assert acc == target_acc
def test_normalizing(self, _model_2, _config):
# Parsing removes BatchNorm layers, so we make a copy of the model.
input_model = models.clone_model(_model_2)
input_model.set_weights(_model_2.get_weights())
input_model.compile(_model_2.optimizer.__class__.__name__,
_model_2.loss, _model_2.metrics)
num_to_test = 10000
batch_size = 100
_config.set('simulation', 'batch_size', str(batch_size))
_config.set('simulation', 'num_to_test', str(num_to_test))
normset, testset = get_dataset(_config)
dataflow = testset['dataflow']
dataflow_norm = normset['dataflow']
model_lib = import_module('snntoolbox.parsing.model_libs.' +
_config.get('input', 'model_lib') +
'_input_lib')
model_parser = model_lib.ModelParser(input_model, _config)
model_parser.parse()
parsed_model = model_parser.build_parsed_model()
normalize_parameters(parsed_model, _config, dataflow=dataflow_norm)
_, acc, _ = model_parser.evaluate(batch_size, num_to_test,
dataflow=dataflow)
_, target_acc = _model_2.evaluate(dataflow,
steps=int(num_to_test / batch_size))
assert acc == target_acc
class TestOutputModel:
"""Test building, saving and running the converted SNN model."""
def test_inisim(self, _model_2, _config):
path_wd = _config.get('paths', 'path_wd')
model_name = _config.get('paths', 'filename_ann')
models.save_model(_model_2, os.path.join(path_wd, model_name + '.h5'))
updates = {
'tools': {'evaluate_ann': False},
'simulation': {
'duration': 100,
'num_to_test': 100,
'batch_size': 50},
'output': {
'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}
_config.read_dict(updates)
acc = run_pipeline(_config)
assert acc[0] >= 0.95
corr = get_correlations(_config)
assert np.all(corr[:-1] > 0.99)
assert corr[-1] > 0.90
@brian2_skip_if_dependency_missing
def test_brian2(self, _model_1, _config):
path_wd = _config.get('paths', 'path_wd')
model_name = _config.get('paths', 'filename_ann')
models.save_model(_model_1, os.path.join(path_wd, model_name + '.h5'))
updates = {
'tools': {'evaluate_ann': False},
'input': {'poisson_input': True},
'simulation': {
'simulator': 'brian2',
'duration': 200,
'num_to_test': 100,
'batch_size': 1,
'dt': 0.1},
'output': {
'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}
_config.read_dict(updates)
initialize_simulator(_config)
acc = run_pipeline(_config)
assert acc[0] >= 0.95
corr = get_correlations(_config)
assert np.all(corr[:-1] > 0.97)
assert corr[-1] > 0.5
@nest_skip_if_dependency_missing
def test_nest(self, _model_1, _config):
path_wd = _config.get('paths', 'path_wd')
model_name = _config.get('paths', 'filename_ann')
models.save_model(_model_1, os.path.join(path_wd, model_name + '.h5'))
updates = {
'tools': {'evaluate_ann': False},
'simulation': {
'simulator': 'nest',
'duration': 50,
'num_to_test': 10,
'batch_size': 1,
'dt': 0.1},
'cell': {
'tau_refrac': 0.1,
'delay': 0.1,
'v_thresh': 0.01},
'output': {
'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}
_config.read_dict(updates)
initialize_simulator(_config)
acc = run_pipeline(_config)
assert acc[0] >= 0.95
corr = get_correlations(_config)
assert np.all(corr[:-1] > 0.97)
assert corr[-1] > 0.5
@spinnaker_skip_if_dependency_missing
def test_spinnaker(self, _model_1, _config):
path_wd = _config.get('paths', 'path_wd')
model_name = _config.get('paths', 'filename_ann')
models.save_model(_model_1, os.path.join(path_wd, model_name + '.h5'))
updates = {
'tools': {'evaluate_ann': False},
'input': {'poisson_input': True},
'simulation': {
'simulator': 'spiNNaker',
'duration': 100,
'num_to_test': 1, # smaller to make more feasible
'batch_size': 1},
'output': {
'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}
_config.read_dict(updates)
initialize_simulator(_config)
acc = run_pipeline(_config)
assert acc[0] >= 0.95
corr = get_correlations(_config)
assert np.all(corr[:-1] > 0.97)
assert corr[-1] > 0.5
@spinnaker_skip_if_dependency_missing
def test_spinnaker_sparse(self, _model_3, _config):
path_wd = _config.get('paths', 'path_wd')
model_name = _config.get('paths', 'filename_ann')
models.save_model(_model_3, os.path.join(path_wd, model_name + '.h5'))
updates = {
'tools': {'evaluate_ann': False},
'input': {'poisson_input': True},
'simulation': {
'simulator': 'spiNNaker',
'duration': 100,
'num_to_test': 1, # smaller to make more feasible
'batch_size': 1},
'output': {
'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}
_config.read_dict(updates)
initialize_simulator(_config)
acc = run_pipeline(_config)
assert acc[0] >= 0.95
corr = get_correlations(_config)
assert np.all(corr[:-1] > 0.97)
assert corr[-1] > 0.5
@loihi_skip_if_dependency_missing
def test_loihi(self, _model_1, _config):
path_wd = _config.get('paths', 'path_wd')
model_name = _config.get('paths', 'filename_ann')
models.save_model(_model_1, os.path.join(path_wd, model_name + '.h5'))
updates = {
'tools': {'evaluate_ann': True,
'normalize': False},
'loihi': {'reset_mode': 'soft',
'desired_threshold_to_input_ratio': 1,
'compartment_kwargs': {'biasExp': 6, 'vThMant': 512},
'connection_kwargs': {'numWeightBits': 8,
'weightExponent': 0,
'numBiasBits': 12},
'validate_partitions': False,
'save_output': False,
'do_overflow_estimate': False,
'normalize_thresholds': True},
'simulation': {
'simulator': 'loihi',
'duration': 512,
'num_to_test': 100,
'batch_size': 20},
'output': {
'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}
_config.read_dict(updates)
initialize_simulator(_config)
acc = run_pipeline(_config)
assert acc[0] >= 0.95
corr = get_correlations(_config)
assert np.all(corr[:-1] > 0.97)
assert corr[-1] > 0.5
class TestPipeline:
"""Test complete pipeline for a number of examples."""
def test_examples(self, _example_filepath):
returncode = subprocess.call([sys.executable, _example_filepath],
shell=True)
assert returncode == 0 | tests/core/test_models.py | import os
import subprocess
import sys
from importlib import import_module
from tensorflow.keras import models
import numpy as np
from snntoolbox.bin.utils import initialize_simulator
from snntoolbox.bin.utils import run_pipeline
from snntoolbox.conversion.utils import normalize_parameters
from snntoolbox.datasets.utils import get_dataset
from snntoolbox.simulation.utils import spiketrains_to_rates
from snntoolbox.utils.utils import import_configparser, \
get_pearson_coefficients
from tests.conftest import spinnaker_skip_if_dependency_missing, \
loihi_skip_if_dependency_missing
from tests.conftest import nest_skip_if_dependency_missing
from tests.conftest import brian2_skip_if_dependency_missing
configparser = import_configparser()
def get_correlations(config):
logdir = os.path.join(config.get('paths', 'log_dir_of_current_run'),
'log_vars', '0.npz')
logvars = np.load(logdir, allow_pickle=True)
spiketrains = logvars['spiketrains_n_b_l_t']
activations = logvars['activations_n_b_l']
spikerates = spiketrains_to_rates(
spiketrains, config.getint('simulation', 'duration'),
config.get('conversion', 'spike_code'))
max_rate = 1. / config.getfloat('simulation', 'dt')
co = get_pearson_coefficients(spikerates, activations, max_rate)
return np.mean(co, axis=1)
class TestInputModel:
"""Test loading, parsing and evaluating an input ANN model."""
def test_parsing(self, _model_2, _config):
# Parsing removes BatchNorm layers, so we make a copy of the model.
input_model = models.clone_model(_model_2)
input_model.set_weights(_model_2.get_weights())
input_model.compile(_model_2.optimizer.__class__.__name__,
_model_2.loss, _model_2.metrics)
num_to_test = 10000
batch_size = 100
_config.set('simulation', 'batch_size', str(batch_size))
_config.set('simulation', 'num_to_test', str(num_to_test))
_, testset = get_dataset(_config)
dataflow = testset['dataflow']
model_lib = import_module('snntoolbox.parsing.model_libs.' +
_config.get('input', 'model_lib') +
'_input_lib')
model_parser = model_lib.ModelParser(input_model, _config)
model_parser.parse()
model_parser.build_parsed_model()
_, acc, _ = model_parser.evaluate(batch_size, num_to_test,
dataflow=dataflow)
_, target_acc = _model_2.evaluate(dataflow,
steps=int(num_to_test / batch_size))
assert acc == target_acc
def test_normalizing(self, _model_2, _config):
# Parsing removes BatchNorm layers, so we make a copy of the model.
input_model = models.clone_model(_model_2)
input_model.set_weights(_model_2.get_weights())
input_model.compile(_model_2.optimizer.__class__.__name__,
_model_2.loss, _model_2.metrics)
num_to_test = 10000
batch_size = 100
_config.set('simulation', 'batch_size', str(batch_size))
_config.set('simulation', 'num_to_test', str(num_to_test))
normset, testset = get_dataset(_config)
dataflow = testset['dataflow']
dataflow_norm = normset['dataflow']
model_lib = import_module('snntoolbox.parsing.model_libs.' +
_config.get('input', 'model_lib') +
'_input_lib')
model_parser = model_lib.ModelParser(input_model, _config)
model_parser.parse()
parsed_model = model_parser.build_parsed_model()
normalize_parameters(parsed_model, _config, dataflow=dataflow_norm)
_, acc, _ = model_parser.evaluate(batch_size, num_to_test,
dataflow=dataflow)
_, target_acc = _model_2.evaluate(dataflow,
steps=int(num_to_test / batch_size))
assert acc == target_acc
class TestOutputModel:
"""Test building, saving and running the converted SNN model."""
def test_inisim(self, _model_2, _config):
path_wd = _config.get('paths', 'path_wd')
model_name = _config.get('paths', 'filename_ann')
models.save_model(_model_2, os.path.join(path_wd, model_name + '.h5'))
updates = {
'tools': {'evaluate_ann': False},
'simulation': {
'duration': 100,
'num_to_test': 100,
'batch_size': 50},
'output': {
'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}
_config.read_dict(updates)
acc = run_pipeline(_config)
assert acc[0] >= 0.95
corr = get_correlations(_config)
assert np.all(corr[:-1] > 0.99)
assert corr[-1] > 0.90
@brian2_skip_if_dependency_missing
def test_brian2(self, _model_1, _config):
path_wd = _config.get('paths', 'path_wd')
model_name = _config.get('paths', 'filename_ann')
models.save_model(_model_1, os.path.join(path_wd, model_name + '.h5'))
updates = {
'tools': {'evaluate_ann': False},
'input': {'poisson_input': True},
'simulation': {
'simulator': 'brian2',
'duration': 200,
'num_to_test': 100,
'batch_size': 1,
'dt': 0.1},
'output': {
'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}
_config.read_dict(updates)
initialize_simulator(_config)
acc = run_pipeline(_config)
assert acc[0] >= 0.95
corr = get_correlations(_config)
assert np.all(corr[:-1] > 0.97)
assert corr[-1] > 0.5
@nest_skip_if_dependency_missing
def test_nest(self, _model_1, _config):
path_wd = _config.get('paths', 'path_wd')
model_name = _config.get('paths', 'filename_ann')
models.save_model(_model_1, os.path.join(path_wd, model_name + '.h5'))
updates = {
'tools': {'evaluate_ann': False},
'simulation': {
'simulator': 'nest',
'duration': 50,
'num_to_test': 10,
'batch_size': 1,
'dt': 0.1},
'cell': {
'tau_refrac': 0.1,
'delay': 0.1,
'v_thresh': 0.01},
'output': {
'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}
_config.read_dict(updates)
initialize_simulator(_config)
acc = run_pipeline(_config)
assert acc[0] >= 0.95
corr = get_correlations(_config)
assert np.all(corr[:-1] > 0.97)
assert corr[-1] > 0.5
@spinnaker_skip_if_dependency_missing
def test_spinnaker(self, _model_1, _config):
path_wd = _config.get('paths', 'path_wd')
model_name = _config.get('paths', 'filename_ann')
models.save_model(_model_1, os.path.join(path_wd, model_name + '.h5'))
updates = {
'tools': {'evaluate_ann': False},
'input': {'poisson_input': True},
'simulation': {
'simulator': 'spiNNaker',
'duration': 100,
'num_to_test': 1, # smaller to make more feasible
'batch_size': 1},
'output': {
'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}
_config.read_dict(updates)
initialize_simulator(_config)
acc = run_pipeline(_config)
assert acc[0] >= 0.95
corr = get_correlations(_config)
assert np.all(corr[:-1] > 0.97)
assert corr[-1] > 0.5
@spinnaker_skip_if_dependency_missing
def test_spinnaker_sparse(self, _model_3, _config):
path_wd = _config.get('paths', 'path_wd')
model_name = _config.get('paths', 'filename_ann')
models.save_model(_model_3, os.path.join(path_wd, model_name + '.h5'))
updates = {
'tools': {'evaluate_ann': False},
'input': {'poisson_input': True},
'simulation': {
'simulator': 'spiNNaker',
'duration': 100,
'num_to_test': 1, # smaller to make more feasible
'batch_size': 1},
'output': {
'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}
_config.read_dict(updates)
initialize_simulator(_config)
acc = run_pipeline(_config)
assert acc[0] >= 0.95
corr = get_correlations(_config)
assert np.all(corr[:-1] > 0.97)
assert corr[-1] > 0.5
@loihi_skip_if_dependency_missing
def test_loihi(self, _model_1, _config):
path_wd = _config.get('paths', 'path_wd')
model_name = _config.get('paths', 'filename_ann')
models.save_model(_model_1, os.path.join(path_wd, model_name + '.h5'))
updates = {
'tools': {'evaluate_ann': True,
'normalize': False},
'loihi': {'reset_mode': 'soft',
'desired_threshold_to_input_ratio': 1,
'compartment_kwargs': {'biasExp': 6, 'vThMant': 512},
'connection_kwargs': {'numWeightBits': 8,
'weightExponent': 0,
'numBiasBits': 12},
'validate_partitions': False,
'save_output': False,
'do_overflow_estimate': False,
'normalize_thresholds': True},
'simulation': {
'simulator': 'loihi',
'duration': 512,
'num_to_test': 100,
'batch_size': 20},
'output': {
'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}
_config.read_dict(updates)
initialize_simulator(_config)
acc = run_pipeline(_config)
assert acc[0] >= 0.95
corr = get_correlations(_config)
assert np.all(corr[:-1] > 0.97)
assert corr[-1] > 0.5
class TestPipeline:
"""Test complete pipeline for a number of examples."""
def test_examples(self, _example_filepath):
returncode = subprocess.call([sys.executable, _example_filepath],
shell=True)
assert returncode == 0 | 0.496338 | 0.279208 |
import cv2
import mediapipe as mp
import math
from imutils.video import VideoStream
from imutils.video import FileVideoStream
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
import collections
class PoseEstimator:
def __init__(self, window_size=8, smoothing_function=None):
"""
Window Size to specify how much frames to be considered for smoothing
"""
if(smoothing_function == 'savgol') and ((window_size % 2) == 0):
print('Is Here')
print(window_size)
self.window_size = window_size - 1
print(self.window_size)
else:
self.window_size = window_size
self.smoothing_function = smoothing_function
self.mp_drawing = mp.solutions.drawing_utils
self.mp_pose = mp.solutions.pose
self.pose = self.mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.1)
self.writer = None
self.coords_array = []
def get_pose_coords(self, image):
"""
Function returns the coordinates of wrist, elbow and shoulder if given an image.
"""
try:
image_height, image_width, _ = image.shape
results = self.pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if not results.pose_landmarks:
raise ValueError('No poses detected')
get_pose = results.pose_landmarks.landmark
lm = self.mp_pose.PoseLandmark
left_wrist_x = get_pose[lm.LEFT_WRIST].x*image_width
left_wrist_y = get_pose[lm.LEFT_WRIST].y*image_height
left_elbow_x = get_pose[lm.LEFT_ELBOW].x*image_width
left_elbow_y = get_pose[lm.LEFT_ELBOW].y*image_height
left_shoulder_x = get_pose[lm.LEFT_SHOULDER].x*image_width
left_shoulder_y = get_pose[lm.LEFT_SHOULDER].y*image_height
left_hip_x = get_pose[lm.LEFT_HIP].x*image_width
left_hip_y = get_pose[lm.LEFT_HIP].y*image_height
left_knee_x = get_pose[lm.LEFT_KNEE].x*image_width
left_knee_y = get_pose[lm.LEFT_KNEE].y*image_height
left_ankle_x = get_pose[lm.LEFT_ANKLE].x*image_width
left_ankle_y = get_pose[lm.LEFT_ANKLE].y*image_height
right_wrist_x = get_pose[lm.RIGHT_WRIST].x*image_width
right_wrist_y = get_pose[lm.RIGHT_WRIST].y*image_height
right_elbow_x = get_pose[lm.RIGHT_ELBOW].x*image_width
right_elbow_y = get_pose[lm.RIGHT_ELBOW].y*image_height
right_shoulder_x = get_pose[lm.RIGHT_SHOULDER].x*image_width
right_shoulder_y = get_pose[lm.RIGHT_SHOULDER].y*image_height
right_hip_x = get_pose[lm.RIGHT_HIP].x*image_width
right_hip_y = get_pose[lm.RIGHT_HIP].y*image_height
right_knee_x = get_pose[lm.RIGHT_KNEE].x*image_width
right_knee_y = get_pose[lm.RIGHT_KNEE].y*image_height
right_ankle_x = get_pose[lm.RIGHT_ANKLE].x*image_width
right_ankle_y = get_pose[lm.RIGHT_ANKLE].y*image_height
nose_x = get_pose[lm.NOSE].x*image_width
nose_y = get_pose[lm.NOSE].y*image_height
return (left_wrist_x, left_wrist_y, left_elbow_x, left_elbow_y, left_shoulder_x, left_shoulder_y, left_hip_x, left_hip_y, left_knee_x, left_knee_y, left_ankle_x, left_ankle_y,
right_wrist_x, right_wrist_y, right_elbow_x, right_elbow_y, right_shoulder_x, right_shoulder_y, right_hip_x, right_hip_y, right_knee_x, right_knee_y, right_ankle_x, right_ankle_y,
nose_x,nose_y)
except Exception as e:
print(e)
return None
def smoothen_coords(self, pose_coords):
"""
Function to smooth the coordinates of last n coordinates where
n is the window size.
Input is a list of tuple of coordinates.
"""
if len(self.coords_array) == self.window_size:
self.coords_array.pop(0)
self.coords_array.append(pose_coords)
if self.smoothing_function == 'mean':
smoothened_coords = np.array(self.coords_array).mean(axis=0)
elif self.smoothing_function == 'savgol':
try:
savgol = lambda arr: savgol_filter(arr, self.window_size, 1)[-1]
coords_np_arr = np.array(self.coords_array)
smoothened_coords = np.apply_along_axis(savgol, 0,
coords_np_arr)
self.coords_array.pop()
self.coords_array.append(smoothened_coords)
except ValueError as ve:
print(ve)
return pose_coords
else:
return pose_coords
return tuple(smoothened_coords)
def get_annotated_image(self, image, pose_coords):
"""
Function to draw and visualize the coordinates in the image.
"""
left_wrist_x, left_wrist_y, left_elbow_x, left_elbow_y, left_shoulder_x, left_shoulder_y, left_hip_x, left_hip_y, left_knee_x, left_knee_y, left_ankle_x, left_ankle_y, right_wrist_x, right_wrist_y, right_elbow_x, right_elbow_y, right_shoulder_x, right_shoulder_y, right_hip_x, right_hip_y, right_knee_x, right_knee_y, right_ankle_x, right_ankle_y, nose_x, nose_y = pose_coords
annotated_image = image.copy()
##Drawing Cirlces
#Nose
cv2.circle(annotated_image,
(int(nose_x), int(nose_y)),
10,(0,0,255),-1)
#Shoulders
cv2.circle(annotated_image,
(int(left_shoulder_x), int(left_shoulder_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_shoulder_x), int(right_shoulder_y)),
10,(0,0,255),-1)
#Elbows
cv2.circle(annotated_image,
(int(left_elbow_x), int(left_elbow_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_elbow_x), int(right_elbow_y)),
10,(0,0,255),-1)
#Wrists
cv2.circle(annotated_image,
(int(left_wrist_x), int(left_wrist_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_wrist_x), int(right_wrist_y)),
10,(0,0,255),-1)
#Hips
cv2.circle(annotated_image,
(int(left_hip_x), int(left_hip_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_hip_x), int(right_hip_y)),
10,(0,0,255),-1)
#Knees
cv2.circle(annotated_image,
(int(left_knee_x), int(left_knee_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_knee_x), int(right_knee_y)),
10,(0,0,255),-1)
#Ankles
cv2.circle(annotated_image,
(int(left_ankle_x), int(left_ankle_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_ankle_x), int(right_ankle_y)),
10,(0,0,255),-1)
##Drawing Lines
#Nose-Shoulder
cv2.line(annotated_image,
(int(nose_x), int(nose_y)),
(int((left_shoulder_x+right_shoulder_x)/2), int((left_shoulder_y+right_shoulder_y)/2)),
(0,0,255),3)
#Shoulder
cv2.line(annotated_image,
(int(left_shoulder_x), int(left_shoulder_y)),
(int(right_shoulder_x), int(right_shoulder_y)),
(0,0,255),3)
#Shoulder-Elbow
cv2.line(annotated_image,
(int(left_shoulder_x), int(left_shoulder_y)),
(int(left_elbow_x), int(left_elbow_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_shoulder_x), int(right_shoulder_y)),
(int(right_elbow_x), int(right_elbow_y)),
(0,0,255),3)
#Elbow-Wrist
cv2.line(annotated_image,
(int(left_elbow_x), int(left_elbow_y)),
(int(left_wrist_x), int(left_wrist_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_elbow_x), int(right_elbow_y)),
(int(right_wrist_x), int(right_wrist_y)),
(0,0,255),3)
#Shoulder-Hip
cv2.line(annotated_image,
(int(left_shoulder_x), int(left_shoulder_y)),
(int(left_hip_x), int(left_hip_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_shoulder_x), int(right_shoulder_y)),
(int(right_hip_x), int(right_hip_y)),
(0,0,255),3)
#Hip
cv2.line(annotated_image,
(int(left_hip_x), int(left_hip_y)),
(int(right_hip_x), int(right_hip_y)),
(0,0,255),3)
#Hip-Knee
cv2.line(annotated_image,
(int(left_hip_x), int(left_hip_y)),
(int(left_knee_x), int(left_knee_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_hip_x), int(right_hip_y)),
(int(right_knee_x), int(right_knee_y)),
(0,0,255),3)
#Knee-Ankle
cv2.line(annotated_image,
(int(left_knee_x), int(left_knee_y)),
(int(left_ankle_x), int(left_ankle_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_knee_x), int(right_knee_y)),
(int(right_ankle_x), int(right_ankle_y)),
(0,0,255),3)
return annotated_image
def write_image(self, image):
"""
Function for displaying the image.
"""
if self.writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
self.writer = cv2.VideoWriter("test6.mp4", fourcc, 25,
(image.shape[1], image.shape[0]), True)
self.writer.write(image)
show = cv2.resize(image, None,
fx=1, fy =1)
show = cv2.flip(image, 1)
cv2.imshow("Frame", show)
key = cv2.waitKey(1) & 0xFF
return key
def run_estimator(self):
"""
Main Function to run the Pose Estimator.
"""
capture = cv2.VideoCapture(0)
while (capture.isOpened()):
# Read a frame
ret, image = capture.read(0)
if ret:
try:
# Get the pose coordinates in a tuple
pose_coords = self.get_pose_coords(image)
if pose_coords:
# If poses are detected then apply the smoothing filter
# And annotate the image
pose_coords = self.smoothen_coords(pose_coords)
annotated_image = self.get_annotated_image(image, pose_coords)
else:
# If no poses are detected, then just display the frame
pose_coords = None
self.write_image(image)
continue
# Write the annotated image
key = self.write_image(annotated_image)
except ValueError as ve:
print(ve)
key = self.write_image(image)
if key == ord("q"):
break
cv2.destroyAllWindows()
capture.release()
if self.writer is not None:
self.writer.release()
self.pose.close()
s = PoseEstimator(window_size=8)
s.run_estimator() | code/pose_estimator.py | import cv2
import mediapipe as mp
import math
from imutils.video import VideoStream
from imutils.video import FileVideoStream
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
import collections
class PoseEstimator:
def __init__(self, window_size=8, smoothing_function=None):
"""
Window Size to specify how much frames to be considered for smoothing
"""
if(smoothing_function == 'savgol') and ((window_size % 2) == 0):
print('Is Here')
print(window_size)
self.window_size = window_size - 1
print(self.window_size)
else:
self.window_size = window_size
self.smoothing_function = smoothing_function
self.mp_drawing = mp.solutions.drawing_utils
self.mp_pose = mp.solutions.pose
self.pose = self.mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.1)
self.writer = None
self.coords_array = []
def get_pose_coords(self, image):
"""
Function returns the coordinates of wrist, elbow and shoulder if given an image.
"""
try:
image_height, image_width, _ = image.shape
results = self.pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if not results.pose_landmarks:
raise ValueError('No poses detected')
get_pose = results.pose_landmarks.landmark
lm = self.mp_pose.PoseLandmark
left_wrist_x = get_pose[lm.LEFT_WRIST].x*image_width
left_wrist_y = get_pose[lm.LEFT_WRIST].y*image_height
left_elbow_x = get_pose[lm.LEFT_ELBOW].x*image_width
left_elbow_y = get_pose[lm.LEFT_ELBOW].y*image_height
left_shoulder_x = get_pose[lm.LEFT_SHOULDER].x*image_width
left_shoulder_y = get_pose[lm.LEFT_SHOULDER].y*image_height
left_hip_x = get_pose[lm.LEFT_HIP].x*image_width
left_hip_y = get_pose[lm.LEFT_HIP].y*image_height
left_knee_x = get_pose[lm.LEFT_KNEE].x*image_width
left_knee_y = get_pose[lm.LEFT_KNEE].y*image_height
left_ankle_x = get_pose[lm.LEFT_ANKLE].x*image_width
left_ankle_y = get_pose[lm.LEFT_ANKLE].y*image_height
right_wrist_x = get_pose[lm.RIGHT_WRIST].x*image_width
right_wrist_y = get_pose[lm.RIGHT_WRIST].y*image_height
right_elbow_x = get_pose[lm.RIGHT_ELBOW].x*image_width
right_elbow_y = get_pose[lm.RIGHT_ELBOW].y*image_height
right_shoulder_x = get_pose[lm.RIGHT_SHOULDER].x*image_width
right_shoulder_y = get_pose[lm.RIGHT_SHOULDER].y*image_height
right_hip_x = get_pose[lm.RIGHT_HIP].x*image_width
right_hip_y = get_pose[lm.RIGHT_HIP].y*image_height
right_knee_x = get_pose[lm.RIGHT_KNEE].x*image_width
right_knee_y = get_pose[lm.RIGHT_KNEE].y*image_height
right_ankle_x = get_pose[lm.RIGHT_ANKLE].x*image_width
right_ankle_y = get_pose[lm.RIGHT_ANKLE].y*image_height
nose_x = get_pose[lm.NOSE].x*image_width
nose_y = get_pose[lm.NOSE].y*image_height
return (left_wrist_x, left_wrist_y, left_elbow_x, left_elbow_y, left_shoulder_x, left_shoulder_y, left_hip_x, left_hip_y, left_knee_x, left_knee_y, left_ankle_x, left_ankle_y,
right_wrist_x, right_wrist_y, right_elbow_x, right_elbow_y, right_shoulder_x, right_shoulder_y, right_hip_x, right_hip_y, right_knee_x, right_knee_y, right_ankle_x, right_ankle_y,
nose_x,nose_y)
except Exception as e:
print(e)
return None
def smoothen_coords(self, pose_coords):
"""
Function to smooth the coordinates of last n coordinates where
n is the window size.
Input is a list of tuple of coordinates.
"""
if len(self.coords_array) == self.window_size:
self.coords_array.pop(0)
self.coords_array.append(pose_coords)
if self.smoothing_function == 'mean':
smoothened_coords = np.array(self.coords_array).mean(axis=0)
elif self.smoothing_function == 'savgol':
try:
savgol = lambda arr: savgol_filter(arr, self.window_size, 1)[-1]
coords_np_arr = np.array(self.coords_array)
smoothened_coords = np.apply_along_axis(savgol, 0,
coords_np_arr)
self.coords_array.pop()
self.coords_array.append(smoothened_coords)
except ValueError as ve:
print(ve)
return pose_coords
else:
return pose_coords
return tuple(smoothened_coords)
def get_annotated_image(self, image, pose_coords):
"""
Function to draw and visualize the coordinates in the image.
"""
left_wrist_x, left_wrist_y, left_elbow_x, left_elbow_y, left_shoulder_x, left_shoulder_y, left_hip_x, left_hip_y, left_knee_x, left_knee_y, left_ankle_x, left_ankle_y, right_wrist_x, right_wrist_y, right_elbow_x, right_elbow_y, right_shoulder_x, right_shoulder_y, right_hip_x, right_hip_y, right_knee_x, right_knee_y, right_ankle_x, right_ankle_y, nose_x, nose_y = pose_coords
annotated_image = image.copy()
##Drawing Cirlces
#Nose
cv2.circle(annotated_image,
(int(nose_x), int(nose_y)),
10,(0,0,255),-1)
#Shoulders
cv2.circle(annotated_image,
(int(left_shoulder_x), int(left_shoulder_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_shoulder_x), int(right_shoulder_y)),
10,(0,0,255),-1)
#Elbows
cv2.circle(annotated_image,
(int(left_elbow_x), int(left_elbow_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_elbow_x), int(right_elbow_y)),
10,(0,0,255),-1)
#Wrists
cv2.circle(annotated_image,
(int(left_wrist_x), int(left_wrist_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_wrist_x), int(right_wrist_y)),
10,(0,0,255),-1)
#Hips
cv2.circle(annotated_image,
(int(left_hip_x), int(left_hip_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_hip_x), int(right_hip_y)),
10,(0,0,255),-1)
#Knees
cv2.circle(annotated_image,
(int(left_knee_x), int(left_knee_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_knee_x), int(right_knee_y)),
10,(0,0,255),-1)
#Ankles
cv2.circle(annotated_image,
(int(left_ankle_x), int(left_ankle_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_ankle_x), int(right_ankle_y)),
10,(0,0,255),-1)
##Drawing Lines
#Nose-Shoulder
cv2.line(annotated_image,
(int(nose_x), int(nose_y)),
(int((left_shoulder_x+right_shoulder_x)/2), int((left_shoulder_y+right_shoulder_y)/2)),
(0,0,255),3)
#Shoulder
cv2.line(annotated_image,
(int(left_shoulder_x), int(left_shoulder_y)),
(int(right_shoulder_x), int(right_shoulder_y)),
(0,0,255),3)
#Shoulder-Elbow
cv2.line(annotated_image,
(int(left_shoulder_x), int(left_shoulder_y)),
(int(left_elbow_x), int(left_elbow_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_shoulder_x), int(right_shoulder_y)),
(int(right_elbow_x), int(right_elbow_y)),
(0,0,255),3)
#Elbow-Wrist
cv2.line(annotated_image,
(int(left_elbow_x), int(left_elbow_y)),
(int(left_wrist_x), int(left_wrist_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_elbow_x), int(right_elbow_y)),
(int(right_wrist_x), int(right_wrist_y)),
(0,0,255),3)
#Shoulder-Hip
cv2.line(annotated_image,
(int(left_shoulder_x), int(left_shoulder_y)),
(int(left_hip_x), int(left_hip_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_shoulder_x), int(right_shoulder_y)),
(int(right_hip_x), int(right_hip_y)),
(0,0,255),3)
#Hip
cv2.line(annotated_image,
(int(left_hip_x), int(left_hip_y)),
(int(right_hip_x), int(right_hip_y)),
(0,0,255),3)
#Hip-Knee
cv2.line(annotated_image,
(int(left_hip_x), int(left_hip_y)),
(int(left_knee_x), int(left_knee_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_hip_x), int(right_hip_y)),
(int(right_knee_x), int(right_knee_y)),
(0,0,255),3)
#Knee-Ankle
cv2.line(annotated_image,
(int(left_knee_x), int(left_knee_y)),
(int(left_ankle_x), int(left_ankle_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_knee_x), int(right_knee_y)),
(int(right_ankle_x), int(right_ankle_y)),
(0,0,255),3)
return annotated_image
def write_image(self, image):
"""
Function for displaying the image.
"""
if self.writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
self.writer = cv2.VideoWriter("test6.mp4", fourcc, 25,
(image.shape[1], image.shape[0]), True)
self.writer.write(image)
show = cv2.resize(image, None,
fx=1, fy =1)
show = cv2.flip(image, 1)
cv2.imshow("Frame", show)
key = cv2.waitKey(1) & 0xFF
return key
def run_estimator(self):
"""
Main Function to run the Pose Estimator.
"""
capture = cv2.VideoCapture(0)
while (capture.isOpened()):
# Read a frame
ret, image = capture.read(0)
if ret:
try:
# Get the pose coordinates in a tuple
pose_coords = self.get_pose_coords(image)
if pose_coords:
# If poses are detected then apply the smoothing filter
# And annotate the image
pose_coords = self.smoothen_coords(pose_coords)
annotated_image = self.get_annotated_image(image, pose_coords)
else:
# If no poses are detected, then just display the frame
pose_coords = None
self.write_image(image)
continue
# Write the annotated image
key = self.write_image(annotated_image)
except ValueError as ve:
print(ve)
key = self.write_image(image)
if key == ord("q"):
break
cv2.destroyAllWindows()
capture.release()
if self.writer is not None:
self.writer.release()
self.pose.close()
s = PoseEstimator(window_size=8)
s.run_estimator() | 0.466603 | 0.269596 |
import logging
from datetime import datetime
from datetime import timezone
from typing import cast
from typing import Dict
from typing import List
from typing import Optional
import aiohttp
from .credential_store import CredentialStore
from .exceptions import RenaultException
from .kamereon import enums
from .kamereon import models
from .kamereon import schemas
from .renault_session import RenaultSession
_LOGGER = logging.getLogger(__name__)
PERIOD_DAY_FORMAT = "%Y%m%d"
PERIOD_MONTH_FORMAT = "%Y%m"
PERIOD_TZ_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
PERIOD_FORMATS = {"day": PERIOD_DAY_FORMAT, "month": PERIOD_MONTH_FORMAT}
class RenaultVehicle:
"""Proxy to a Renault vehicle."""
def __init__(
self,
account_id: str,
vin: str,
*,
session: Optional[RenaultSession] = None,
websession: Optional[aiohttp.ClientSession] = None,
locale: Optional[str] = None,
country: Optional[str] = None,
locale_details: Optional[Dict[str, str]] = None,
credential_store: Optional[CredentialStore] = None,
vehicle_details: Optional[models.KamereonVehiclesDetails] = None,
) -> None:
"""Initialise Renault vehicle."""
self._account_id = account_id
self._vin = vin
self._vehicle_details = vehicle_details
if session:
self._session = session
else:
if websession is None: # pragma: no cover
raise RenaultException(
"`websession` is required if session is not provided."
)
self._session = RenaultSession(
websession=websession,
locale=locale,
country=country,
locale_details=locale_details,
credential_store=credential_store,
)
@property
def session(self) -> RenaultSession:
"""Get session."""
return self._session
@property
def account_id(self) -> str:
"""Get account id."""
return self._account_id
@property
def vin(self) -> str:
"""Get vin."""
return self._vin
async def get_battery_status(self) -> models.KamereonVehicleBatteryStatusData:
"""Get vehicle battery status."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="battery-status",
)
return cast(
models.KamereonVehicleBatteryStatusData,
response.get_attributes(schemas.KamereonVehicleBatteryStatusDataSchema),
)
async def get_location(self) -> models.KamereonVehicleLocationData:
"""Get vehicle location."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="location",
)
return cast(
models.KamereonVehicleLocationData,
response.get_attributes(schemas.KamereonVehicleLocationDataSchema),
)
async def get_hvac_status(self) -> models.KamereonVehicleHvacStatusData:
"""Get vehicle hvac status."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="hvac-status",
)
return cast(
models.KamereonVehicleHvacStatusData,
response.get_attributes(schemas.KamereonVehicleHvacStatusDataSchema),
)
async def get_charge_mode(self) -> models.KamereonVehicleChargeModeData:
"""Get vehicle charge mode."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="charge-mode",
)
return cast(
models.KamereonVehicleChargeModeData,
response.get_attributes(schemas.KamereonVehicleChargeModeDataSchema),
)
async def get_cockpit(self) -> models.KamereonVehicleCockpitData:
"""Get vehicle cockpit."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="cockpit",
)
return cast(
models.KamereonVehicleCockpitData,
response.get_attributes(schemas.KamereonVehicleCockpitDataSchema),
)
async def get_lock_status(self) -> models.KamereonVehicleLockStatusData:
"""Get vehicle lock status."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="lock-status",
)
return cast(
models.KamereonVehicleLockStatusData,
response.get_attributes(schemas.KamereonVehicleLockStatusDataSchema),
)
async def get_charging_settings(self) -> models.KamereonVehicleChargingSettingsData:
"""Get vehicle charging settings."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="charging-settings",
)
return cast(
models.KamereonVehicleChargingSettingsData,
response.get_attributes(schemas.KamereonVehicleChargingSettingsDataSchema),
)
async def get_notification_settings(
self,
) -> models.KamereonVehicleNotificationSettingsData:
"""Get vehicle notification settings."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="notification-settings",
)
return cast(
models.KamereonVehicleNotificationSettingsData,
response.get_attributes(
schemas.KamereonVehicleNotificationSettingsDataSchema
),
)
async def get_charge_history(
self, start: datetime, end: datetime, period: str = "month"
) -> models.KamereonVehicleChargeHistoryData:
"""Get vehicle charge history."""
if not isinstance(start, datetime): # pragma: no cover
raise TypeError(
"`start` should be an instance of datetime.datetime, not {}".format(
start.__class__
)
)
if not isinstance(end, datetime): # pragma: no cover
raise TypeError(
"`end` should be an instance of datetime.datetime, not {}".format(
end.__class__
)
)
if period not in PERIOD_FORMATS.keys(): # pragma: no cover
raise TypeError("`period` should be one of `month`, `day`")
params = {
"type": period,
"start": start.strftime(PERIOD_FORMATS[period]),
"end": end.strftime(PERIOD_FORMATS[period]),
}
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="charge-history",
params=params,
)
return cast(
models.KamereonVehicleChargeHistoryData,
response.get_attributes(schemas.KamereonVehicleChargeHistoryDataSchema),
)
async def get_charges(
self, start: datetime, end: datetime
) -> models.KamereonVehicleChargesData:
"""Get vehicle charges."""
if not isinstance(start, datetime): # pragma: no cover
raise TypeError(
"`start` should be an instance of datetime.datetime, not {}".format(
start.__class__
)
)
if not isinstance(end, datetime): # pragma: no cover
raise TypeError(
"`end` should be an instance of datetime.datetime, not {}".format(
end.__class__
)
)
params = {
"start": start.strftime(PERIOD_DAY_FORMAT),
"end": end.strftime(PERIOD_DAY_FORMAT),
}
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="charges",
params=params,
)
return cast(
models.KamereonVehicleChargesData,
response.get_attributes(schemas.KamereonVehicleChargesDataSchema),
)
async def get_hvac_history(
self, start: datetime, end: datetime, period: str = "month"
) -> models.KamereonVehicleHvacHistoryData:
"""Get vehicle hvac history."""
if not isinstance(start, datetime): # pragma: no cover
raise TypeError(
"`start` should be an instance of datetime.datetime, not {}".format(
start.__class__
)
)
if not isinstance(end, datetime): # pragma: no cover
raise TypeError(
"`end` should be an instance of datetime.datetime, not {}".format(
end.__class__
)
)
if period not in PERIOD_FORMATS.keys(): # pragma: no cover
raise TypeError("`period` should be one of `month`, `day`")
params = {
"type": period,
"start": start.strftime(PERIOD_FORMATS[period]),
"end": end.strftime(PERIOD_FORMATS[period]),
}
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="hvac-history",
params=params,
)
return cast(
models.KamereonVehicleHvacHistoryData,
response.get_attributes(schemas.KamereonVehicleHvacHistoryDataSchema),
)
async def get_hvac_sessions(
self, start: datetime, end: datetime
) -> models.KamereonVehicleHvacSessionsData:
"""Get vehicle hvac sessions."""
if not isinstance(start, datetime): # pragma: no cover
raise TypeError(
"`start` should be an instance of datetime.datetime, not {}".format(
start.__class__
)
)
if not isinstance(end, datetime): # pragma: no cover
raise TypeError(
"`end` should be an instance of datetime.datetime, not {}".format(
end.__class__
)
)
params = {
"start": start.strftime(PERIOD_DAY_FORMAT),
"end": end.strftime(PERIOD_DAY_FORMAT),
}
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="hvac-sessions",
params=params,
)
return cast(
models.KamereonVehicleHvacSessionsData,
response.get_attributes(schemas.KamereonVehicleHvacSessionsDataSchema),
)
async def set_ac_start(
self, temperature: float, when: Optional[datetime] = None
) -> models.KamereonVehicleHvacStartActionData:
"""Start vehicle ac."""
attributes = {
"action": "start",
"targetTemperature": temperature,
}
if when:
if not isinstance(when, datetime): # pragma: no cover
raise TypeError(
"`when` should be an instance of datetime.datetime, not {}".format(
when.__class__
)
)
start_date_time = when.astimezone(timezone.utc).strftime(PERIOD_TZ_FORMAT)
attributes["startDateTime"] = start_date_time
response = await self.session.set_vehicle_action(
account_id=self.account_id,
vin=self.vin,
endpoint="hvac-start",
attributes=attributes,
)
return cast(
models.KamereonVehicleHvacStartActionData,
response.get_attributes(schemas.KamereonVehicleHvacStartActionDataSchema),
)
async def set_ac_stop(self) -> models.KamereonVehicleHvacStartActionData:
"""Stop vehicle ac."""
attributes = {"action": "cancel"}
response = await self.session.set_vehicle_action(
account_id=self.account_id,
vin=self.vin,
endpoint="hvac-start",
attributes=attributes,
)
return cast(
models.KamereonVehicleHvacStartActionData,
response.get_attributes(schemas.KamereonVehicleHvacStartActionDataSchema),
)
async def set_charge_schedules(
self, schedules: List[models.ChargeSchedule]
) -> models.KamereonVehicleChargeScheduleActionData:
"""Set vehicle charge schedules."""
for schedule in schedules:
if not isinstance(schedule, models.ChargeSchedule): # pragma: no cover
raise TypeError(
"`schedules` should be a list of ChargeSchedule, not {}".format(
schedules.__class__
)
)
attributes = {"schedules": list(schedule.for_json() for schedule in schedules)}
response = await self.session.set_vehicle_action(
account_id=self.account_id,
vin=self.vin,
endpoint="charge-schedule",
attributes=attributes,
)
return cast(
models.KamereonVehicleChargeScheduleActionData,
response.get_attributes(
schemas.KamereonVehicleChargeScheduleActionDataSchema
),
)
async def set_charge_mode(
self, charge_mode: enums.ChargeMode
) -> models.KamereonVehicleChargeModeActionData:
"""Set vehicle charge mode."""
if not isinstance(charge_mode, enums.ChargeMode): # pragma: no cover
raise TypeError(
"`charge_mode` should be an instance of ChargeMode, not {}".format(
charge_mode.__class__
)
)
attributes = {"action": charge_mode.name}
response = await self.session.set_vehicle_action(
account_id=self.account_id,
vin=self.vin,
endpoint="charge-mode",
attributes=attributes,
)
return cast(
models.KamereonVehicleChargeModeActionData,
response.get_attributes(schemas.KamereonVehicleChargeModeActionDataSchema),
)
async def set_charge_start(self) -> models.KamereonVehicleChargingStartActionData:
"""Start vehicle charge."""
attributes = {"action": "start"}
response = await self.session.set_vehicle_action(
account_id=self.account_id,
vin=self.vin,
endpoint="charging-start",
attributes=attributes,
)
return cast(
models.KamereonVehicleChargingStartActionData,
response.get_attributes(
schemas.KamereonVehicleChargingStartActionDataSchema
),
) | src/renault_api/renault_vehicle.py | import logging
from datetime import datetime
from datetime import timezone
from typing import cast
from typing import Dict
from typing import List
from typing import Optional
import aiohttp
from .credential_store import CredentialStore
from .exceptions import RenaultException
from .kamereon import enums
from .kamereon import models
from .kamereon import schemas
from .renault_session import RenaultSession
_LOGGER = logging.getLogger(__name__)
PERIOD_DAY_FORMAT = "%Y%m%d"
PERIOD_MONTH_FORMAT = "%Y%m"
PERIOD_TZ_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
PERIOD_FORMATS = {"day": PERIOD_DAY_FORMAT, "month": PERIOD_MONTH_FORMAT}
class RenaultVehicle:
"""Proxy to a Renault vehicle."""
def __init__(
self,
account_id: str,
vin: str,
*,
session: Optional[RenaultSession] = None,
websession: Optional[aiohttp.ClientSession] = None,
locale: Optional[str] = None,
country: Optional[str] = None,
locale_details: Optional[Dict[str, str]] = None,
credential_store: Optional[CredentialStore] = None,
vehicle_details: Optional[models.KamereonVehiclesDetails] = None,
) -> None:
"""Initialise Renault vehicle."""
self._account_id = account_id
self._vin = vin
self._vehicle_details = vehicle_details
if session:
self._session = session
else:
if websession is None: # pragma: no cover
raise RenaultException(
"`websession` is required if session is not provided."
)
self._session = RenaultSession(
websession=websession,
locale=locale,
country=country,
locale_details=locale_details,
credential_store=credential_store,
)
@property
def session(self) -> RenaultSession:
"""Get session."""
return self._session
@property
def account_id(self) -> str:
"""Get account id."""
return self._account_id
@property
def vin(self) -> str:
"""Get vin."""
return self._vin
async def get_battery_status(self) -> models.KamereonVehicleBatteryStatusData:
"""Get vehicle battery status."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="battery-status",
)
return cast(
models.KamereonVehicleBatteryStatusData,
response.get_attributes(schemas.KamereonVehicleBatteryStatusDataSchema),
)
async def get_location(self) -> models.KamereonVehicleLocationData:
"""Get vehicle location."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="location",
)
return cast(
models.KamereonVehicleLocationData,
response.get_attributes(schemas.KamereonVehicleLocationDataSchema),
)
async def get_hvac_status(self) -> models.KamereonVehicleHvacStatusData:
"""Get vehicle hvac status."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="hvac-status",
)
return cast(
models.KamereonVehicleHvacStatusData,
response.get_attributes(schemas.KamereonVehicleHvacStatusDataSchema),
)
async def get_charge_mode(self) -> models.KamereonVehicleChargeModeData:
"""Get vehicle charge mode."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="charge-mode",
)
return cast(
models.KamereonVehicleChargeModeData,
response.get_attributes(schemas.KamereonVehicleChargeModeDataSchema),
)
async def get_cockpit(self) -> models.KamereonVehicleCockpitData:
"""Get vehicle cockpit."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="cockpit",
)
return cast(
models.KamereonVehicleCockpitData,
response.get_attributes(schemas.KamereonVehicleCockpitDataSchema),
)
async def get_lock_status(self) -> models.KamereonVehicleLockStatusData:
"""Get vehicle lock status."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="lock-status",
)
return cast(
models.KamereonVehicleLockStatusData,
response.get_attributes(schemas.KamereonVehicleLockStatusDataSchema),
)
async def get_charging_settings(self) -> models.KamereonVehicleChargingSettingsData:
"""Get vehicle charging settings."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="charging-settings",
)
return cast(
models.KamereonVehicleChargingSettingsData,
response.get_attributes(schemas.KamereonVehicleChargingSettingsDataSchema),
)
async def get_notification_settings(
self,
) -> models.KamereonVehicleNotificationSettingsData:
"""Get vehicle notification settings."""
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="notification-settings",
)
return cast(
models.KamereonVehicleNotificationSettingsData,
response.get_attributes(
schemas.KamereonVehicleNotificationSettingsDataSchema
),
)
async def get_charge_history(
self, start: datetime, end: datetime, period: str = "month"
) -> models.KamereonVehicleChargeHistoryData:
"""Get vehicle charge history."""
if not isinstance(start, datetime): # pragma: no cover
raise TypeError(
"`start` should be an instance of datetime.datetime, not {}".format(
start.__class__
)
)
if not isinstance(end, datetime): # pragma: no cover
raise TypeError(
"`end` should be an instance of datetime.datetime, not {}".format(
end.__class__
)
)
if period not in PERIOD_FORMATS.keys(): # pragma: no cover
raise TypeError("`period` should be one of `month`, `day`")
params = {
"type": period,
"start": start.strftime(PERIOD_FORMATS[period]),
"end": end.strftime(PERIOD_FORMATS[period]),
}
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="charge-history",
params=params,
)
return cast(
models.KamereonVehicleChargeHistoryData,
response.get_attributes(schemas.KamereonVehicleChargeHistoryDataSchema),
)
async def get_charges(
self, start: datetime, end: datetime
) -> models.KamereonVehicleChargesData:
"""Get vehicle charges."""
if not isinstance(start, datetime): # pragma: no cover
raise TypeError(
"`start` should be an instance of datetime.datetime, not {}".format(
start.__class__
)
)
if not isinstance(end, datetime): # pragma: no cover
raise TypeError(
"`end` should be an instance of datetime.datetime, not {}".format(
end.__class__
)
)
params = {
"start": start.strftime(PERIOD_DAY_FORMAT),
"end": end.strftime(PERIOD_DAY_FORMAT),
}
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="charges",
params=params,
)
return cast(
models.KamereonVehicleChargesData,
response.get_attributes(schemas.KamereonVehicleChargesDataSchema),
)
async def get_hvac_history(
self, start: datetime, end: datetime, period: str = "month"
) -> models.KamereonVehicleHvacHistoryData:
"""Get vehicle hvac history."""
if not isinstance(start, datetime): # pragma: no cover
raise TypeError(
"`start` should be an instance of datetime.datetime, not {}".format(
start.__class__
)
)
if not isinstance(end, datetime): # pragma: no cover
raise TypeError(
"`end` should be an instance of datetime.datetime, not {}".format(
end.__class__
)
)
if period not in PERIOD_FORMATS.keys(): # pragma: no cover
raise TypeError("`period` should be one of `month`, `day`")
params = {
"type": period,
"start": start.strftime(PERIOD_FORMATS[period]),
"end": end.strftime(PERIOD_FORMATS[period]),
}
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="hvac-history",
params=params,
)
return cast(
models.KamereonVehicleHvacHistoryData,
response.get_attributes(schemas.KamereonVehicleHvacHistoryDataSchema),
)
async def get_hvac_sessions(
self, start: datetime, end: datetime
) -> models.KamereonVehicleHvacSessionsData:
"""Get vehicle hvac sessions."""
if not isinstance(start, datetime): # pragma: no cover
raise TypeError(
"`start` should be an instance of datetime.datetime, not {}".format(
start.__class__
)
)
if not isinstance(end, datetime): # pragma: no cover
raise TypeError(
"`end` should be an instance of datetime.datetime, not {}".format(
end.__class__
)
)
params = {
"start": start.strftime(PERIOD_DAY_FORMAT),
"end": end.strftime(PERIOD_DAY_FORMAT),
}
response = await self.session.get_vehicle_data(
account_id=self.account_id,
vin=self.vin,
endpoint="hvac-sessions",
params=params,
)
return cast(
models.KamereonVehicleHvacSessionsData,
response.get_attributes(schemas.KamereonVehicleHvacSessionsDataSchema),
)
async def set_ac_start(
self, temperature: float, when: Optional[datetime] = None
) -> models.KamereonVehicleHvacStartActionData:
"""Start vehicle ac."""
attributes = {
"action": "start",
"targetTemperature": temperature,
}
if when:
if not isinstance(when, datetime): # pragma: no cover
raise TypeError(
"`when` should be an instance of datetime.datetime, not {}".format(
when.__class__
)
)
start_date_time = when.astimezone(timezone.utc).strftime(PERIOD_TZ_FORMAT)
attributes["startDateTime"] = start_date_time
response = await self.session.set_vehicle_action(
account_id=self.account_id,
vin=self.vin,
endpoint="hvac-start",
attributes=attributes,
)
return cast(
models.KamereonVehicleHvacStartActionData,
response.get_attributes(schemas.KamereonVehicleHvacStartActionDataSchema),
)
async def set_ac_stop(self) -> models.KamereonVehicleHvacStartActionData:
"""Stop vehicle ac."""
attributes = {"action": "cancel"}
response = await self.session.set_vehicle_action(
account_id=self.account_id,
vin=self.vin,
endpoint="hvac-start",
attributes=attributes,
)
return cast(
models.KamereonVehicleHvacStartActionData,
response.get_attributes(schemas.KamereonVehicleHvacStartActionDataSchema),
)
async def set_charge_schedules(
self, schedules: List[models.ChargeSchedule]
) -> models.KamereonVehicleChargeScheduleActionData:
"""Set vehicle charge schedules."""
for schedule in schedules:
if not isinstance(schedule, models.ChargeSchedule): # pragma: no cover
raise TypeError(
"`schedules` should be a list of ChargeSchedule, not {}".format(
schedules.__class__
)
)
attributes = {"schedules": list(schedule.for_json() for schedule in schedules)}
response = await self.session.set_vehicle_action(
account_id=self.account_id,
vin=self.vin,
endpoint="charge-schedule",
attributes=attributes,
)
return cast(
models.KamereonVehicleChargeScheduleActionData,
response.get_attributes(
schemas.KamereonVehicleChargeScheduleActionDataSchema
),
)
async def set_charge_mode(
self, charge_mode: enums.ChargeMode
) -> models.KamereonVehicleChargeModeActionData:
"""Set vehicle charge mode."""
if not isinstance(charge_mode, enums.ChargeMode): # pragma: no cover
raise TypeError(
"`charge_mode` should be an instance of ChargeMode, not {}".format(
charge_mode.__class__
)
)
attributes = {"action": charge_mode.name}
response = await self.session.set_vehicle_action(
account_id=self.account_id,
vin=self.vin,
endpoint="charge-mode",
attributes=attributes,
)
return cast(
models.KamereonVehicleChargeModeActionData,
response.get_attributes(schemas.KamereonVehicleChargeModeActionDataSchema),
)
async def set_charge_start(self) -> models.KamereonVehicleChargingStartActionData:
"""Start vehicle charge."""
attributes = {"action": "start"}
response = await self.session.set_vehicle_action(
account_id=self.account_id,
vin=self.vin,
endpoint="charging-start",
attributes=attributes,
)
return cast(
models.KamereonVehicleChargingStartActionData,
response.get_attributes(
schemas.KamereonVehicleChargingStartActionDataSchema
),
) | 0.827932 | 0.091463 |
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from System.Collections.Generic import Dictionary
from DAQ.Environment import *
from DAQ import *
from MOTMaster import*
from time import sleep
import math
import time
path = "C:\\ExperimentControl\\EDMSuite\\SympatheticMOTMasterScripts\\ImagingFromMOT.cs"
print("hello")
def run_script():
return 0
def str2bool(v):
return v.lower() in ("yes", "true")
def MakeList(startvalue, endvalue, increment):
N = int((endvalue-startvalue)/increment)
return [startvalue + i*increment for i in range(N)]
def AddReferenceValueToList(list,referencevalue,period):
for i in range(0,int(math.floor(len(list)/period))):
list.insert((period+1)*i, referencevalue)
return list
def TemperatureMeasurement(dic,exptimeValues):
mm.SetScriptPath(path)
if not exptimeValues:
exptimeValues = [5,25,35,40]
for n in range(len(exptimeValues)):
dic["ImageDelay"] = exptimeValues[n]
print('ImageDelay: ' + str(exptimeValues[n]))
mm.Run(dic)
return 0
def TemperatureScan(ParameterName,ParameterValues,expTimeValues,repeats):
dic = Dictionary[String, Object]()
if not expTimeValues:
expTimeValues = [5,25,35,40]
if not repeats:
repeats = 1
for i in range(repeats):
print('Repeat number: ' + str(i+1))
for n in range(len(ParameterValues)):
dic[ParameterName]=ParameterValues[n]
print(ParameterName + ': ' +str(ParameterValues[n]))
TemperatureMeasurement(dic,expTimeValues)
return 0
def ScanAbsImageDetuning(vals, repeats):
dic = Dictionary[String, Object]()
if not repeats:
repeats = 1
mm.SetScriptPath(path)
for i in range(repeats):
print('repeat number: '+str(i) )
for j in vals:
dic["absImageDetuning"] = j
print('Image Detuning: ' +str(j))
mm.Run(dic)
return 0
def ScanQWPangle(vals, repeats):
dic = Dictionary[String, Object]()
if not repeats:
repeats = 1
mm.SetScriptPath(path)
for i in range(repeats):
print('repeat number: '+str(i) )
for j in vals:
dic["probeQWPangle"] = j
print('probe QWP angle : ' +str(j))
mm.Run(dic)
return 0
def ScanZShimCurrent(vals, repeats):
dic = Dictionary[String, Object]()
if not repeats:
repeats = 1
mm.SetScriptPath(path)
for i in range(repeats):
print('repeat number: '+str(i) )
for j in vals:
dic["ZCoilCurrent"] = j
print('Z Shim control voltage : ' +str(j))
mm.Run(dic)
return 0
def ScanXShimCurrent(vals, repeats):
dic = Dictionary[String, Object]()
if not repeats:
repeats = 1
mm.SetScriptPath(path)
for i in range(repeats):
print('repeat number: '+str(i) )
for j in vals:
dic["XCoilCurrent"] = j
print('X Shim control voltage : ' +str(j))
mm.Run(dic)
return 0
def ScanMOTEndTimes(vals, repeats):
dic = Dictionary[String, Object]()
if not repeats:
repeats = 1
mm.SetScriptPath(path)
for i in range(repeats):
print('repeat number: '+str(i) )
for j in vals:
dic["MOTEndTime"] = j
print('MOTEndTime: ' +str(j))
mm.Run(dic)
return 0
def ScanFieldGradient(TopVals,BottomVals, repeats):
dic = Dictionary[String, Object]()
if not repeats:
repeats = 1
mm.SetScriptPath(path)
length = len(TopVals)
if length != len(BottomVals):
print('Lists must be equal length.')
return 0
for i in range(repeats):
print('repeat number: '+str(i) )
for j in range(length):
dic["TopVacCoilCurrent"] = TopVals[j]
dic["BottomVacCoilCurrent"] = BottomVals[j]
print('Coil Currents: ' +str(TopVals[j]) + ', '+str(BottomVals[j]))
mm.Run(dic)
return 0 | SympatheticScripts/SPImagingFromMOT.py | from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from System.Collections.Generic import Dictionary
from DAQ.Environment import *
from DAQ import *
from MOTMaster import*
from time import sleep
import math
import time
path = "C:\\ExperimentControl\\EDMSuite\\SympatheticMOTMasterScripts\\ImagingFromMOT.cs"
print("hello")
def run_script():
return 0
def str2bool(v):
return v.lower() in ("yes", "true")
def MakeList(startvalue, endvalue, increment):
N = int((endvalue-startvalue)/increment)
return [startvalue + i*increment for i in range(N)]
def AddReferenceValueToList(list,referencevalue,period):
for i in range(0,int(math.floor(len(list)/period))):
list.insert((period+1)*i, referencevalue)
return list
def TemperatureMeasurement(dic,exptimeValues):
mm.SetScriptPath(path)
if not exptimeValues:
exptimeValues = [5,25,35,40]
for n in range(len(exptimeValues)):
dic["ImageDelay"] = exptimeValues[n]
print('ImageDelay: ' + str(exptimeValues[n]))
mm.Run(dic)
return 0
def TemperatureScan(ParameterName,ParameterValues,expTimeValues,repeats):
dic = Dictionary[String, Object]()
if not expTimeValues:
expTimeValues = [5,25,35,40]
if not repeats:
repeats = 1
for i in range(repeats):
print('Repeat number: ' + str(i+1))
for n in range(len(ParameterValues)):
dic[ParameterName]=ParameterValues[n]
print(ParameterName + ': ' +str(ParameterValues[n]))
TemperatureMeasurement(dic,expTimeValues)
return 0
def ScanAbsImageDetuning(vals, repeats):
dic = Dictionary[String, Object]()
if not repeats:
repeats = 1
mm.SetScriptPath(path)
for i in range(repeats):
print('repeat number: '+str(i) )
for j in vals:
dic["absImageDetuning"] = j
print('Image Detuning: ' +str(j))
mm.Run(dic)
return 0
def ScanQWPangle(vals, repeats):
dic = Dictionary[String, Object]()
if not repeats:
repeats = 1
mm.SetScriptPath(path)
for i in range(repeats):
print('repeat number: '+str(i) )
for j in vals:
dic["probeQWPangle"] = j
print('probe QWP angle : ' +str(j))
mm.Run(dic)
return 0
def ScanZShimCurrent(vals, repeats):
dic = Dictionary[String, Object]()
if not repeats:
repeats = 1
mm.SetScriptPath(path)
for i in range(repeats):
print('repeat number: '+str(i) )
for j in vals:
dic["ZCoilCurrent"] = j
print('Z Shim control voltage : ' +str(j))
mm.Run(dic)
return 0
def ScanXShimCurrent(vals, repeats):
dic = Dictionary[String, Object]()
if not repeats:
repeats = 1
mm.SetScriptPath(path)
for i in range(repeats):
print('repeat number: '+str(i) )
for j in vals:
dic["XCoilCurrent"] = j
print('X Shim control voltage : ' +str(j))
mm.Run(dic)
return 0
def ScanMOTEndTimes(vals, repeats):
dic = Dictionary[String, Object]()
if not repeats:
repeats = 1
mm.SetScriptPath(path)
for i in range(repeats):
print('repeat number: '+str(i) )
for j in vals:
dic["MOTEndTime"] = j
print('MOTEndTime: ' +str(j))
mm.Run(dic)
return 0
def ScanFieldGradient(TopVals,BottomVals, repeats):
dic = Dictionary[String, Object]()
if not repeats:
repeats = 1
mm.SetScriptPath(path)
length = len(TopVals)
if length != len(BottomVals):
print('Lists must be equal length.')
return 0
for i in range(repeats):
print('repeat number: '+str(i) )
for j in range(length):
dic["TopVacCoilCurrent"] = TopVals[j]
dic["BottomVacCoilCurrent"] = BottomVals[j]
print('Coil Currents: ' +str(TopVals[j]) + ', '+str(BottomVals[j]))
mm.Run(dic)
return 0 | 0.101567 | 0.128197 |
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import numpy as np
from typing import Tuple
from privacy_evaluator.datasets.dataset import Dataset
class CIFAR10(Dataset):
"""CIFAR10 dataset class."""
TRAIN_SET_SIZE = 50000
TEST_SET_SIZE = 10000
INPUT_SHAPE = (3, 32, 32)
N_CLASSES = 10
@classmethod
def pytorch_loader(
cls,
train_batch_size: int = 128,
test_batch_size: int = 128,
one_hot_encode: bool = True,
) -> Tuple[DataLoader, DataLoader]:
"""Loads the dataset as pytorch train and test data loader.
:param train_batch_size: Batch size of the train data loader.
:param test_batch_size: Batch size of the test data loader.
:param one_hot_encode: If data should be one-hot-encoded.
:return: Train and test data loaders.
"""
transform_train = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
),
]
)
transform_test = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
),
]
)
train_set = datasets.CIFAR10(
root=cls.DATA_ROOT, train=True, download=True, transform=transform_train
)
test_set = datasets.CIFAR10(
root=cls.DATA_ROOT, train=False, download=True, transform=transform_test
)
if one_hot_encode:
train_set.targets = cls._one_hot_encode(
np.array(train_set.targets), cls.N_CLASSES
)
test_set.targets = cls._one_hot_encode(
np.array(test_set.targets), cls.N_CLASSES
)
return (
DataLoader(
train_set, batch_size=train_batch_size, shuffle=True, num_workers=4
),
DataLoader(
test_set, batch_size=test_batch_size, shuffle=False, num_workers=4
),
)
@classmethod
def numpy(cls, one_hot_encode: bool = True) -> Tuple[np.ndarray, ...]:
"""Loads train and test dataset as a numpy arrays.
:param one_hot_encode: If data should be one-hot-encoded.
:return: Train and Test data and labels as numpy arrays.
"""
train_loader, test_loader = cls.pytorch_loader(
train_batch_size=cls.TRAIN_SET_SIZE,
test_batch_size=cls.TEST_SET_SIZE,
one_hot_encode=one_hot_encode,
)
x_train, y_train = next(iter(train_loader))
x_train, y_train = x_train.numpy(), y_train.numpy()
x_test, y_test = next(iter(test_loader))
x_test, y_test = x_test.numpy(), y_test.numpy()
return x_train, y_train, x_test, y_test
@classmethod
def tensorflow_loader(
cls,
train_batch_size: int = 128,
test_batch_size: int = 128,
one_hot_encode: bool = False,
):
"""Loads the dataset as Tensorflow train and test data loader.
:param train_batch_size: Batch size of the train data loader.
:param test_batch_size: Batch size of the test data loader.
:param one_hot_encode: If data should be one-hot-encoded.
:return: Train and test data loaders.
"""
raise NotImplementedError(
"Method 'tensorflow_loader()' needs to be implemented."
) | privacy_evaluator/datasets/cifar10.py | import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import numpy as np
from typing import Tuple
from privacy_evaluator.datasets.dataset import Dataset
class CIFAR10(Dataset):
"""CIFAR10 dataset class."""
TRAIN_SET_SIZE = 50000
TEST_SET_SIZE = 10000
INPUT_SHAPE = (3, 32, 32)
N_CLASSES = 10
@classmethod
def pytorch_loader(
cls,
train_batch_size: int = 128,
test_batch_size: int = 128,
one_hot_encode: bool = True,
) -> Tuple[DataLoader, DataLoader]:
"""Loads the dataset as pytorch train and test data loader.
:param train_batch_size: Batch size of the train data loader.
:param test_batch_size: Batch size of the test data loader.
:param one_hot_encode: If data should be one-hot-encoded.
:return: Train and test data loaders.
"""
transform_train = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
),
]
)
transform_test = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
),
]
)
train_set = datasets.CIFAR10(
root=cls.DATA_ROOT, train=True, download=True, transform=transform_train
)
test_set = datasets.CIFAR10(
root=cls.DATA_ROOT, train=False, download=True, transform=transform_test
)
if one_hot_encode:
train_set.targets = cls._one_hot_encode(
np.array(train_set.targets), cls.N_CLASSES
)
test_set.targets = cls._one_hot_encode(
np.array(test_set.targets), cls.N_CLASSES
)
return (
DataLoader(
train_set, batch_size=train_batch_size, shuffle=True, num_workers=4
),
DataLoader(
test_set, batch_size=test_batch_size, shuffle=False, num_workers=4
),
)
@classmethod
def numpy(cls, one_hot_encode: bool = True) -> Tuple[np.ndarray, ...]:
"""Loads train and test dataset as a numpy arrays.
:param one_hot_encode: If data should be one-hot-encoded.
:return: Train and Test data and labels as numpy arrays.
"""
train_loader, test_loader = cls.pytorch_loader(
train_batch_size=cls.TRAIN_SET_SIZE,
test_batch_size=cls.TEST_SET_SIZE,
one_hot_encode=one_hot_encode,
)
x_train, y_train = next(iter(train_loader))
x_train, y_train = x_train.numpy(), y_train.numpy()
x_test, y_test = next(iter(test_loader))
x_test, y_test = x_test.numpy(), y_test.numpy()
return x_train, y_train, x_test, y_test
@classmethod
def tensorflow_loader(
cls,
train_batch_size: int = 128,
test_batch_size: int = 128,
one_hot_encode: bool = False,
):
"""Loads the dataset as Tensorflow train and test data loader.
:param train_batch_size: Batch size of the train data loader.
:param test_batch_size: Batch size of the test data loader.
:param one_hot_encode: If data should be one-hot-encoded.
:return: Train and test data loaders.
"""
raise NotImplementedError(
"Method 'tensorflow_loader()' needs to be implemented."
) | 0.955079 | 0.784897 |
import datetime
from slack import WebClient
from slack.errors import SlackApiError
import ssl
import bench_cli.configuration as configuration
# ----------------------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------- Send Slack Message ----------------------------------------------------------
def send_slack_message(slack_api_token: str, slack_channel: str, report_path: str):
ssl._create_default_https_context = ssl._create_unverified_context
client = WebClient(slack_api_token)
# Upload OLTP file to slack
try:
response = client.files_upload(channels='#'+slack_channel, file=report_path)
assert response["file"] # the uploaded file
except SlackApiError as e:
# You will get a SlackApiError if "ok" is False
assert e.response["ok"] is False
assert e.response["error"] # str like 'invalid_auth', 'channel_not_found'
print(f"Got an error: {e.response['error']}")
# --------------------------------------------------------------------------------------------------------------------------------------
# -------------------------------------- Main function for report and add OLTP to database ---------------------------------------------
def save_to_mysql(cfg: configuration.Config, report, table_name: str):
conn = cfg.mysql_connect()
# source (https://www.w3schools.com/python/python_mysql_insert.asp)
mycursor = conn.cursor()
# current date and time
now = datetime.datetime.now()
format = '%Y-%m-%d %H:%M:%S'
mysql_timestamp = now.strftime(format)
benchmark = "INSERT INTO benchmark(commit,Datetime,source) values(%s,%s,%s)"
mycursor.execute(benchmark, (cfg.commit, mysql_timestamp, cfg.source))
conn.commit()
mycursor.execute("select * from benchmark ORDER BY test_no DESC LIMIT 1;")
result = mycursor.fetchall()
test_no = result[0][0]
# Inserting for table name
sql_insert = "INSERT INTO "+table_name+" (time,threads,test_no,tps,latency,errors,reconnects) values(%s,%s,%s,%s,%s,%s,%s)"
mycursor.execute(sql_insert, (report['results']["time"], report['results']["threads"], test_no, report['results']["tps"], report['results']["latency"], report['results']["errors"], report['results']["reconnects"]))
conn.commit()
# Get {{table_name}}_no
mycursor.execute("select "+table_name+"_no from "+table_name+" where test_no = %s ORDER BY "+table_name+"_no DESC LIMIT 1;", (test_no,))
result = mycursor.fetchall()
task_id_res = result[0][0]
# Inserting for {{table_name}}_qps
insert_qps = "INSERT INTO qps("+table_name+"_no,total_qps,reads_qps,writes_qps,other_qps) values(%s,%s,%s,%s,%s)"
mycursor.execute(insert_qps, (task_id_res, report['results']["qps"]["total"], report['results']["qps"]["reads"], report['results']["qps"]["writes"], report['results']["qps"]["other"]))
conn.commit() | bench_cli/reporting.py |
import datetime
from slack import WebClient
from slack.errors import SlackApiError
import ssl
import bench_cli.configuration as configuration
# ----------------------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------- Send Slack Message ----------------------------------------------------------
def send_slack_message(slack_api_token: str, slack_channel: str, report_path: str):
ssl._create_default_https_context = ssl._create_unverified_context
client = WebClient(slack_api_token)
# Upload OLTP file to slack
try:
response = client.files_upload(channels='#'+slack_channel, file=report_path)
assert response["file"] # the uploaded file
except SlackApiError as e:
# You will get a SlackApiError if "ok" is False
assert e.response["ok"] is False
assert e.response["error"] # str like 'invalid_auth', 'channel_not_found'
print(f"Got an error: {e.response['error']}")
# --------------------------------------------------------------------------------------------------------------------------------------
# -------------------------------------- Main function for report and add OLTP to database ---------------------------------------------
def save_to_mysql(cfg: configuration.Config, report, table_name: str):
conn = cfg.mysql_connect()
# source (https://www.w3schools.com/python/python_mysql_insert.asp)
mycursor = conn.cursor()
# current date and time
now = datetime.datetime.now()
format = '%Y-%m-%d %H:%M:%S'
mysql_timestamp = now.strftime(format)
benchmark = "INSERT INTO benchmark(commit,Datetime,source) values(%s,%s,%s)"
mycursor.execute(benchmark, (cfg.commit, mysql_timestamp, cfg.source))
conn.commit()
mycursor.execute("select * from benchmark ORDER BY test_no DESC LIMIT 1;")
result = mycursor.fetchall()
test_no = result[0][0]
# Inserting for table name
sql_insert = "INSERT INTO "+table_name+" (time,threads,test_no,tps,latency,errors,reconnects) values(%s,%s,%s,%s,%s,%s,%s)"
mycursor.execute(sql_insert, (report['results']["time"], report['results']["threads"], test_no, report['results']["tps"], report['results']["latency"], report['results']["errors"], report['results']["reconnects"]))
conn.commit()
# Get {{table_name}}_no
mycursor.execute("select "+table_name+"_no from "+table_name+" where test_no = %s ORDER BY "+table_name+"_no DESC LIMIT 1;", (test_no,))
result = mycursor.fetchall()
task_id_res = result[0][0]
# Inserting for {{table_name}}_qps
insert_qps = "INSERT INTO qps("+table_name+"_no,total_qps,reads_qps,writes_qps,other_qps) values(%s,%s,%s,%s,%s)"
mycursor.execute(insert_qps, (task_id_res, report['results']["qps"]["total"], report['results']["qps"]["reads"], report['results']["qps"]["writes"], report['results']["qps"]["other"]))
conn.commit() | 0.229363 | 0.131396 |
import torch
from torch import nn
import torch.nn.functional as F
from hparams import hparams as hp
class MocoLoss(nn.Module):
def __init__(self):
super(MocoLoss, self).__init__()
moco_model_path = hp.moco_model_path
print("Loading MOCO model from path: {}".format(moco_model_path))
self.model = self.__load_model(moco_model_path)
self.model.cuda()
self.model.eval()
@staticmethod
def __load_model(moco_model_path):
import torchvision.models as models
model = models.__dict__["resnet50"]()
# freeze all layers but the last fc
for name, param in model.named_parameters():
if name not in ['fc.weight', 'fc.bias']:
param.requires_grad = False
checkpoint = torch.load(moco_model_path, map_location="cpu")
state_dict = checkpoint['state_dict']
# rename moco pre-trained keys
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):
# remove prefix
state_dict[k[len("module.encoder_q."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
msg = model.load_state_dict(state_dict, strict=False)
assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
# remove output layer
model = nn.Sequential(*list(model.children())[:-1]).cuda()
return model
def extract_feats(self, x):
x = F.interpolate(x, size=224)
x_feats = self.model(x)
x_feats = nn.functional.normalize(x_feats, dim=1)
x_feats = x_feats.squeeze()
return x_feats
def forward(self, y_hat, y, x):
n_samples = x.shape[0]
x_feats = self.extract_feats(x)
y_feats = self.extract_feats(y)
y_hat_feats = self.extract_feats(y_hat)
y_feats = y_feats.detach()
loss = 0
sim_improvement = 0
sim_logs = []
count = 0
for i in range(n_samples):
diff_target = y_hat_feats[i].dot(y_feats[i])
diff_input = y_hat_feats[i].dot(x_feats[i])
diff_views = y_feats[i].dot(x_feats[i])
sim_logs.append({'diff_target': float(diff_target),
'diff_input': float(diff_input),
'diff_views': float(diff_views)})
loss += 1 - diff_target
sim_diff = float(diff_target) - float(diff_views)
sim_improvement += sim_diff
count += 1
return loss / count, sim_improvement / count, sim_logs | criteria/moco_loss.py | import torch
from torch import nn
import torch.nn.functional as F
from hparams import hparams as hp
class MocoLoss(nn.Module):
def __init__(self):
super(MocoLoss, self).__init__()
moco_model_path = hp.moco_model_path
print("Loading MOCO model from path: {}".format(moco_model_path))
self.model = self.__load_model(moco_model_path)
self.model.cuda()
self.model.eval()
@staticmethod
def __load_model(moco_model_path):
import torchvision.models as models
model = models.__dict__["resnet50"]()
# freeze all layers but the last fc
for name, param in model.named_parameters():
if name not in ['fc.weight', 'fc.bias']:
param.requires_grad = False
checkpoint = torch.load(moco_model_path, map_location="cpu")
state_dict = checkpoint['state_dict']
# rename moco pre-trained keys
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):
# remove prefix
state_dict[k[len("module.encoder_q."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
msg = model.load_state_dict(state_dict, strict=False)
assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
# remove output layer
model = nn.Sequential(*list(model.children())[:-1]).cuda()
return model
def extract_feats(self, x):
x = F.interpolate(x, size=224)
x_feats = self.model(x)
x_feats = nn.functional.normalize(x_feats, dim=1)
x_feats = x_feats.squeeze()
return x_feats
def forward(self, y_hat, y, x):
n_samples = x.shape[0]
x_feats = self.extract_feats(x)
y_feats = self.extract_feats(y)
y_hat_feats = self.extract_feats(y_hat)
y_feats = y_feats.detach()
loss = 0
sim_improvement = 0
sim_logs = []
count = 0
for i in range(n_samples):
diff_target = y_hat_feats[i].dot(y_feats[i])
diff_input = y_hat_feats[i].dot(x_feats[i])
diff_views = y_feats[i].dot(x_feats[i])
sim_logs.append({'diff_target': float(diff_target),
'diff_input': float(diff_input),
'diff_views': float(diff_views)})
loss += 1 - diff_target
sim_diff = float(diff_target) - float(diff_views)
sim_improvement += sim_diff
count += 1
return loss / count, sim_improvement / count, sim_logs | 0.920317 | 0.349477 |
import io
import json
import os
from json import load, dump, JSONDecodeError
from ctpbee.center import PositionModel
from ctpbee.constant import TradeData, PositionData, Direction, Offset, Exchange
class SinglePositionModel:
def __init__(self, local_symbol):
self.local_symbol = local_symbol
# 持仓方向
self.direction = None
# 昨日持仓
self.yd_volume: int = 0
# 今日持仓
self.td_volume: int = 0
# 持仓均价
self.price: float = 0
# 持仓数目
self.volume: int = 0
# 交易所代码
self.exchange = None
# 持仓盈利
self.pnl: float = 0
# 网关名字
self.gateway_name = None
def update_trade(self, trade):
"""根据立即成交的信息来更新本地持仓 """
self.exchange = trade.exchange
self.direction = trade.direction
cost = self.price * self.volume
cost += trade.volume * trade.price
new_pos = self.volume + trade.volume
if new_pos:
self.price = cost / new_pos
else:
self.price = 0
if trade.offset == Offset.OPEN:
self.td_volume += trade.volume
# 平今/home/somewheve/PycharmProjects/ctpbee_tutorial
elif trade.offset == Offset.CLOSETODAY:
self.td_volume -= trade.volume
# 平昨
elif trade.offset == Offset.CLOSEYESTERDAY:
self.yd_volume -= trade.volume
# 平仓
elif trade.offset == Offset.CLOSE:
if trade.volume < self.td_volume:
self.td_volume -= trade.volume
else:
self.yd_volume -= trade.volume - self.td_volume
self.td_volume = 0
self.volume = self.yd_volume + self.td_volume
def update_postition(self, position: PositionData):
""" 根据返回的查询持仓信息来更新持仓信息 """
self.yd_volume = position.yd_volume
self.exchange = position.exchange
self.price = position.price
self.volume = position.volume
self.direction = position.direction
self.gateway_name = position.gateway_name
def to_dict(self):
""" 将持仓信息构建为字典的信息"""
if isinstance(self.direction, Direction):
direction = self.direction.value
else:
direction = self.direction
if isinstance(self.exchange, Exchange):
exchange = self.direction.value
else:
exchange = self.direction
return {
"direction": direction,
"yd_volume": self.yd_volume,
"local_symbol": self.local_symbol,
"exchange": exchange,
"price": self.price,
"volume": self.volume
}
@property
def _to_dict(self):
return self.to_dict
def to_position(self):
""" 返回为持仓 """
try:
return PositionData(symbol=self.local_symbol.split(".")[0], exchange=self.exchange, volume=self.volume,
price=self.price)
except Exception:
raise ValueError(f"本地维护符号有问题,请检查,当前符号为{self.local_symbol}")
def to_df(self):
""" 将持仓信息构建为DataFrame """
pass
@classmethod
def create_model(cls, local, **kwargs):
"""
根据字典数据创建PositionModel实例
"""
instance = cls(local)
{setattr(instance, key, value) for key, value in kwargs.items()}
return instance
class ApiPositionManager(dict):
def __init__(self, name, cache_path=None, init_flag: bool = False):
"""
策略持仓管理的基本信息,注意你需要确保没有在其他地方进行下单, 否则会影响到持仓信息的准确性
* name: 策略名称
* cache_path: 缓存文件地址,注意如果是默认的策略持仓参数会被放置到用户目录下的.ctpbee/api目录下
"""
self.filename = name + ".json"
dict.__init__(self)
self.cache_path = cache_path
self.file_path = cache_path + "/" + self.filename
try:
with open(self.file_path, "r") as f:
data = json.load(f)
except json.decoder.JSONDecodeError as e:
with open(self.file_path, "w") as f:
data = {}
dump(data, f)
except FileNotFoundError as e:
with open(self.file_path, 'w') as f:
data = {}
self.init_data(data)
def init_data(self, data):
"""
初始化数据
"""
def create_position_model(local, data: dict):
"""
将地点数据解析为PositionModel
"""
return SinglePositionModel.create_model(local, **data)
if not data:
return
else:
for local, position_detail in data.items():
self[local] = create_position_model(local, position_detail)
def on_trade(self, trade: TradeData):
"""
更新成交单
"""
def update_local_cache(file_path, local, self):
try:
with open(file_path, "r") as fp:
p = json.load(fp)
except JSONDecodeError:
p = {}
p[local] = self[local].to_dict()
with open(file_path, "w") as fp:
dump(obj=p, fp=fp)
def get_reverse(direction: Direction) -> Direction:
if direction == Direction.LONG:
return Direction.SHORT
if direction == Direction.SHORT:
return Direction.LONG
# 如果是平仓, 那么反转方向
if trade.offset == Offset.OPEN:
local = trade.local_symbol + "." + trade.direction.value
else:
local = trade.local_symbol + "." + get_reverse(trade.direction).value
if local not in self.keys():
self[local] = SinglePositionModel(local_symbol=trade.local_symbol)
self[local].update_trade(trade=trade)
update_local_cache(self.file_path, local, self)
def on_order(self, order):
pass
def on_position(self, position: PositionData):
"""
更新持仓
"""
local = position.local_symbol + "." + position.direction.value
if local not in self.keys():
self[local] = SinglePositionModel(local_symbol=position.local_symbol)
self[local].update_position(position=position)
def get_position_by_ld(self, local_symbol, direction) -> SinglePositionModel:
""" 通过local_symbol和direction获得持仓信息 """
return self.get(local_symbol + "." + direction.value, None)
def get_position(self, local_symbol) -> PositionModel or None:
long = self.get_position_by_ld(local_symbol, direction=Direction.LONG)
short = self.get_position_by_ld(local_symbol, direction=Direction.SHORT)
if long is None and short is None:
return None
else:
return PositionModel(long, short) | ctpbee/data_handle/level_position.py | import io
import json
import os
from json import load, dump, JSONDecodeError
from ctpbee.center import PositionModel
from ctpbee.constant import TradeData, PositionData, Direction, Offset, Exchange
class SinglePositionModel:
def __init__(self, local_symbol):
self.local_symbol = local_symbol
# 持仓方向
self.direction = None
# 昨日持仓
self.yd_volume: int = 0
# 今日持仓
self.td_volume: int = 0
# 持仓均价
self.price: float = 0
# 持仓数目
self.volume: int = 0
# 交易所代码
self.exchange = None
# 持仓盈利
self.pnl: float = 0
# 网关名字
self.gateway_name = None
def update_trade(self, trade):
"""根据立即成交的信息来更新本地持仓 """
self.exchange = trade.exchange
self.direction = trade.direction
cost = self.price * self.volume
cost += trade.volume * trade.price
new_pos = self.volume + trade.volume
if new_pos:
self.price = cost / new_pos
else:
self.price = 0
if trade.offset == Offset.OPEN:
self.td_volume += trade.volume
# 平今/home/somewheve/PycharmProjects/ctpbee_tutorial
elif trade.offset == Offset.CLOSETODAY:
self.td_volume -= trade.volume
# 平昨
elif trade.offset == Offset.CLOSEYESTERDAY:
self.yd_volume -= trade.volume
# 平仓
elif trade.offset == Offset.CLOSE:
if trade.volume < self.td_volume:
self.td_volume -= trade.volume
else:
self.yd_volume -= trade.volume - self.td_volume
self.td_volume = 0
self.volume = self.yd_volume + self.td_volume
def update_postition(self, position: PositionData):
""" 根据返回的查询持仓信息来更新持仓信息 """
self.yd_volume = position.yd_volume
self.exchange = position.exchange
self.price = position.price
self.volume = position.volume
self.direction = position.direction
self.gateway_name = position.gateway_name
def to_dict(self):
""" 将持仓信息构建为字典的信息"""
if isinstance(self.direction, Direction):
direction = self.direction.value
else:
direction = self.direction
if isinstance(self.exchange, Exchange):
exchange = self.direction.value
else:
exchange = self.direction
return {
"direction": direction,
"yd_volume": self.yd_volume,
"local_symbol": self.local_symbol,
"exchange": exchange,
"price": self.price,
"volume": self.volume
}
@property
def _to_dict(self):
return self.to_dict
def to_position(self):
""" 返回为持仓 """
try:
return PositionData(symbol=self.local_symbol.split(".")[0], exchange=self.exchange, volume=self.volume,
price=self.price)
except Exception:
raise ValueError(f"本地维护符号有问题,请检查,当前符号为{self.local_symbol}")
def to_df(self):
""" 将持仓信息构建为DataFrame """
pass
@classmethod
def create_model(cls, local, **kwargs):
"""
根据字典数据创建PositionModel实例
"""
instance = cls(local)
{setattr(instance, key, value) for key, value in kwargs.items()}
return instance
class ApiPositionManager(dict):
def __init__(self, name, cache_path=None, init_flag: bool = False):
"""
策略持仓管理的基本信息,注意你需要确保没有在其他地方进行下单, 否则会影响到持仓信息的准确性
* name: 策略名称
* cache_path: 缓存文件地址,注意如果是默认的策略持仓参数会被放置到用户目录下的.ctpbee/api目录下
"""
self.filename = name + ".json"
dict.__init__(self)
self.cache_path = cache_path
self.file_path = cache_path + "/" + self.filename
try:
with open(self.file_path, "r") as f:
data = json.load(f)
except json.decoder.JSONDecodeError as e:
with open(self.file_path, "w") as f:
data = {}
dump(data, f)
except FileNotFoundError as e:
with open(self.file_path, 'w') as f:
data = {}
self.init_data(data)
def init_data(self, data):
"""
初始化数据
"""
def create_position_model(local, data: dict):
"""
将地点数据解析为PositionModel
"""
return SinglePositionModel.create_model(local, **data)
if not data:
return
else:
for local, position_detail in data.items():
self[local] = create_position_model(local, position_detail)
def on_trade(self, trade: TradeData):
"""
更新成交单
"""
def update_local_cache(file_path, local, self):
try:
with open(file_path, "r") as fp:
p = json.load(fp)
except JSONDecodeError:
p = {}
p[local] = self[local].to_dict()
with open(file_path, "w") as fp:
dump(obj=p, fp=fp)
def get_reverse(direction: Direction) -> Direction:
if direction == Direction.LONG:
return Direction.SHORT
if direction == Direction.SHORT:
return Direction.LONG
# 如果是平仓, 那么反转方向
if trade.offset == Offset.OPEN:
local = trade.local_symbol + "." + trade.direction.value
else:
local = trade.local_symbol + "." + get_reverse(trade.direction).value
if local not in self.keys():
self[local] = SinglePositionModel(local_symbol=trade.local_symbol)
self[local].update_trade(trade=trade)
update_local_cache(self.file_path, local, self)
def on_order(self, order):
pass
def on_position(self, position: PositionData):
"""
更新持仓
"""
local = position.local_symbol + "." + position.direction.value
if local not in self.keys():
self[local] = SinglePositionModel(local_symbol=position.local_symbol)
self[local].update_position(position=position)
def get_position_by_ld(self, local_symbol, direction) -> SinglePositionModel:
""" 通过local_symbol和direction获得持仓信息 """
return self.get(local_symbol + "." + direction.value, None)
def get_position(self, local_symbol) -> PositionModel or None:
long = self.get_position_by_ld(local_symbol, direction=Direction.LONG)
short = self.get_position_by_ld(local_symbol, direction=Direction.SHORT)
if long is None and short is None:
return None
else:
return PositionModel(long, short) | 0.430387 | 0.257342 |
from ..mapping import MappedArray, AccessType
from ..indexing import is_fullslice, split_operation, slicer_sub2ind, invert_slice
from .. import volutils
from ..readers import reader_classes
from .metadata import ome_zooms, parse_unit
from nitorch.spatial import affine_default
from nitorch.core import pyutils, dtypes
from tifffile import TiffFile
from contextlib import contextmanager
import torch
import numpy as np
from warnings import warn
class TiffArray(MappedArray):
"""
MappedArray that uses `tifffile` under the hood.
"""
def __init__(self, file_like, permission='r', keep_file_open=True, **hints):
"""
Parameters
----------
file_like : str or file object
keep_file_open : bool, default=True
Whether to keep the file handle open
hints : keyword of the form `is_<format>=<True|False>`
Tells the Tiff reader that a file is or isn't of a specific
subformat. If not provided, it it guessed by the Tiff reader.
"""
self._tiff = TiffFile(file_like, **hints)
if not keep_file_open:
self._tiff.close()
self._series = 0
self._level = 0
self._cache = dict()
super().__init__()
_series: int = 0 # index of series to map
_level: int = 0 # index of pyramid level to map
_cache: dict = {} # a cache of precomputed _shape, _spatial, etc
@property
def _shape(self):
"""Full shape of a series+level"""
if '_shape' not in self._cache:
with self.tiffobj() as tiff:
shape = tiff.series[self.series].levels[self.level].shape
self._cache['_shape'] = shape
return self._cache['_shape']
@property
def _axes(self):
"""Axes names of a series+level"""
if '_axes' not in self._cache:
with self.tiffobj() as tiff:
axes = tiff.series[self.series].levels[self.level].axes
self._cache['_axes'] = axes
return self._cache['_axes']
@property
def _spatial(self):
"""Mask of spatial axes of a series+level"""
msk = [ax in 'XYZ' for ax in self._axes]
return msk
@property
def _affine(self):
"""Affine orientation matrix of a series+level"""
# TODO: I don't know yet how we should use GeoTiff to encode
# affine matrices. In the matrix/zooms, their voxels are ordered
# as [x, y, z] even though their dimensions in the returned array
# are ordered as [Z, Y, X]. If we want to keep the same convention
# as nitorch, I need to permute the matrix/zooms.
if '_affine' not in self._cache:
with self.tiffobj() as tiff:
omexml = tiff.ome_metadata
geotags = tiff.geotiff_metadata or {}
zooms, units, axes = ome_zooms(omexml, self.series)
if zooms:
# convert to mm + drop non-spatial zooms
units = [parse_unit(u) for u in units]
zooms = [z * (f / 1e-3) for z, (f, type) in zip(zooms, units)
if type == 'm']
if 'ModelPixelScaleTag' in geotags:
warn("Both OME and GeoTiff pixel scales are present: "
"{} vs {}. Using OME."
.format(zooms, geotags['ModelPixelScaleTag']))
elif 'ModelPixelScaleTag' in geotags:
zooms = geotags['ModelPixelScaleTag']
axes = 'XYZ'
else:
zooms = 1.
axes = [ax for ax in self._axes if ax in 'XYZ']
if 'ModelTransformation' in geotags:
aff = geotags['ModelTransformation']
aff = torch.as_tensor(aff, dtype=torch.double).reshape(4, 4)
self._cache['_affine'] = aff
elif ('ModelTiepointTag' in geotags):
# copied from tifffile
sx, sy, sz = pyutils.make_list(zooms, n=3)
tiepoints = torch.as_tensor(geotags['ModelTiepointTag'])
affines = []
for tiepoint in tiepoints:
i, j, k, x, y, z = tiepoint
affines.append(torch.as_tensor(
[[sx, 0.0, 0.0, x - i * sx],
[0.0, -sy, 0.0, y + j * sy],
[0.0, 0.0, sz, z - k * sz],
[0.0, 0.0, 0.0, 1.0]], dtype=torch.double))
affines = torch.stack(affines, dim=0)
if len(tiepoints) == 1:
affines = affines[0]
self._cache['_affine'] = affines
else:
zooms = pyutils.make_list(zooms, n=len(axes))
ax2zoom = {ax: zoom for ax, zoom in zip(axes, zooms)}
axes = [ax for ax in self._axes if ax in 'XYZ']
shape = [shp for shp, msk in zip(self._shape, self._spatial)
if msk]
zooms = [ax2zoom.get(ax, 1.) for ax in axes]
layout = [('R' if ax == 'Z' else 'P' if ax == 'Y' else 'S')
for ax in axes]
aff = affine_default(shape, zooms, layout=''.join(layout))
self._cache['_affine'] = aff
return self._cache['_affine']
@property
def dtype(self):
if 'dtype' not in self._cache:
with self.tiffobj() as tiff:
dt = tiff.series[self.series].levels[self.level].dtype
self._cache['dtype'] = dt
return self._cache['dtype']
@property
def series(self):
"""Series index (Tiff files can hold multiple series)"""
return self._series
@series.setter
def series(self, val):
if val != self.series and not all(is_fullslice(self.slicer)):
raise RuntimeError("Cannot change series in a view")
self._series = val
self._cache = {}
@property
def level(self):
"""Level index (Tiff files can hold multiple spatial resolutions)"""
return self._level
@level.setter
def level(self, val):
if val != self.level and not all(is_fullslice(self.slicer)):
raise RuntimeError("Cannot change resolution level in a view")
self._level = val
self._cache = {}
@property
def readable(self):
# That's not exact: pseudo partial access in-plane
return AccessType.TruePartial
@property
def writable(self):
return AccessType.No
@contextmanager
def tiffobj(self):
"""Returns an *open* Tiff reader.
Should be used in a `with` statement:
```python
>>> with self.tiffobj() as tiff:
>>> # do stuff with `tiff`
```
"""
closed = self._tiff.filehandle.closed
if closed:
self._tiff.filehandle.open()
yield self._tiff
if closed:
self._tiff.close()
def __del__(self):
# make sure we close all file objects
self._tiff.close()
@property
def filename(self):
with self.tiffobj() as f:
return f.filename
def data(self, dtype=None, device=None, casting='unsafe', rand=True,
cutoff=None, dim=None, numpy=False):
# --- sanity check before reading ---
dtype = self.dtype if dtype is None else dtype
dtype = dtypes.dtype(dtype)
if not numpy and dtype.torch is None:
raise TypeError('Data type {} does not exist in PyTorch.'
.format(dtype))
# --- check that view is not empty ---
if pyutils.prod(self.shape) == 0:
if numpy:
return np.zeros(self.shape, dtype=dtype.numpy)
else:
return torch.zeros(self.shape, dtype=dtype.torch, device=device)
# --- read native data ---
slicer, perm, newdim = split_operation(self.permutation, self.slicer, 'r')
with self.tiffobj() as f:
dat = self._read_data_raw(slicer, tiffobj=f)
dat = dat.transpose(perm)[newdim]
indtype = dtypes.dtype(self.dtype)
# --- cutoff ---
dat = volutils.cutoff(dat, cutoff, dim)
# --- cast ---
rand = rand and not indtype.is_floating_point
if rand and not dtype.is_floating_point:
tmpdtype = dtypes.float64
else:
tmpdtype = dtype
dat, scale = volutils.cast(dat, tmpdtype.numpy, casting, with_scale=True)
# --- random sample ---
# uniform noise in the uncertainty interval
if rand and not (scale == 1 and not dtype.is_floating_point):
dat = volutils.addnoise(dat, scale)
# --- final cast ---
dat = volutils.cast(dat, dtype.numpy, 'unsafe')
# convert to torch if needed
if not numpy:
dat = torch.as_tensor(dat, device=device)
return dat
# --------------
# LOW LEVEL
# --------------
def _read_data_raw(self, slicer=None, tiffobj=None):
"""Read native data
Dispatch to `_read_data_raw_full` or `_read_data_raw_partial`.
Parameters
----------
slicer : tuple[index_like], optional
A tuple of indices that describe the chunk of data to read.
If None, read everything.
tiffobj : file object, default=`self.fileobj('image', 'r')`
A file object (with `seek`, `read`) from which to read
Returns
-------
dat : np.ndarray
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._read_data_raw(slicer, tiffobj)
# load sub-array
if slicer is None or all(is_fullslice(slicer, self._shape)):
dat = self._read_data_raw_full(tiffobj)
else:
dat = self._read_data_raw_partial(slicer, tiffobj)
return dat
def _read_data_raw_partial(self, slicer, tiffobj=None):
"""Read a chunk of data from disk
Parameters
----------
slicer : tuple[slice or int]
tiffobj : TiffFile
Returns
-------
dat : np.ndarray
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._read_data_raw_partial(slicer, tiffobj)
# 1) split dimensions
shape_feat, shape_stack, shape_page = self._shape_split(tiffobj)
dim_feat = len(shape_feat)
dim_stack = len(shape_stack)
dim_page = len(shape_page)
# 2) split slicer
slicer_feat = slicer[:dim_feat]
slicer_stack = slicer[dim_feat:dim_feat+dim_stack]
slicer_page = slicer[dim_feat+dim_stack:]
dim_feat_out = sum(isinstance(idx, slice) for idx in slicer_feat)
dim_stack_out = sum(isinstance(idx, slice) for idx in slicer_stack)
dim_page_out = sum(isinstance(idx, slice) for idx in slicer_page)
# 3) ensure positive strides
slicer_inv = [slice(None, None, -1) if idx.step and idx.step < 0
else slice(None) for idx in slicer_stack
if isinstance(idx, slice)]
slicer_stack = [invert_slice(idx, shp) if isinstance(idx, slice) and
idx.step and idx.step < 0
else idx for idx, shp in zip(slicer_stack, shape_stack)]
# 4) convert stack slice to list of linear indices
# (or to one slice if possible)
index_stack = slicer_sub2ind(slicer_stack, shape_stack)
# 5) read only pages in the substack
dat = tiffobj.asarray(key=index_stack,
series=self.series,
level=self.level)
dat = dat.reshape([*shape_feat, -1, *shape_page])
# 6) apply slicers along the feature and page dimensions
dat = dat[(*slicer_feat, slice(None), *slicer_page)]
# 7) reshape
dat = dat.reshape(self.shape)
# 7) final slicers for negative strides along stack dimensions
slicer = [slice(None)] * dim_feat_out + slicer_inv + [slice(None)] * dim_page_out
dat = dat[tuple(slicer)]
return dat
def _read_data_raw_full(self, tiffobj=None):
"""Read the full data from disk
Parameters
----------
tiffobj : TiffFile
Returns
-------
dat : np.ndarray
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._read_data_raw_full(tiffobj)
return tiffobj.asarray(series=self.series, level=self.level)
def _shape_split(self, tiffobj=None):
"""Split the shape into different components
Returns
-------
shape_feat : tuple[int]
Color features (belong to pages but end-up at the left-most axis)
shape_collection : tuple[int]
Shape of the collection of pages (usually Z, T, etc. axes)
shape_page : tuple[int]
Shape of one page -- with or without features (usually X, Y axes)
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._shape_split(tiffobj)
if tiffobj.is_imagej:
return self._shape_split_imagej(tiffobj)
else:
page = tiffobj.series[self.series].levels[self.level].pages[0]
shape_page = page.shape
page_dim = len(shape_page)
shape_collection = self._shape[:-page_dim]
return tuple(), tuple(shape_collection), tuple(shape_page)
def _shape_split_imagej(self, tiffobj):
"""Split the shape into different components (ImageJ format).
This is largely copied from tifffile.
"""
pages = tiffobj.pages
pages.useframes = True
pages.keyframe = 0
page = pages[0]
meta = tiffobj.imagej_metadata
def is_virtual():
# ImageJ virtual hyperstacks store all image metadata in the first
# page and image data are stored contiguously before the second
# page, if any
if not page.is_final:
return False
images = meta.get('images', 0)
if images <= 1:
return False
offset, count = page.is_contiguous
if (
count != pyutils.prod(page.shape) * page.bitspersample // 8
or offset + count * images > self.filehandle.size
):
raise ValueError()
# check that next page is stored after data
if len(pages) > 1 and offset + count * images > pages[1].offset:
return False
return True
isvirtual = is_virtual()
if isvirtual:
# no need to read other pages
pages = [page]
else:
pages = pages[:]
images = meta.get('images', len(pages))
frames = meta.get('frames', 1)
slices = meta.get('slices', 1)
channels = meta.get('channels', 1)
# compute shape of the collection of pages
shape = []
axes = []
if frames > 1:
shape.append(frames)
axes.append('T')
if slices > 1:
shape.append(slices)
axes.append('Z')
if channels > 1 and (pyutils.prod(shape) if shape else 1) != images:
shape.append(channels)
axes.append('C')
remain = images // (pyutils.prod(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
if page.axes[0] == 'S' and 'C' in axes:
# planar storage, S == C, saved by Bio-Formats
return tuple(), tuple(shape), tuple(page.shape[1:])
elif page.axes[0] == 'I':
# contiguous multiple images
return tuple(), tuple(shape), tuple(page.shape[1:])
elif page.axes[:2] == 'SI':
# color-mapped contiguous multiple images
return tuple(page.shape[0:1]), tuple(shape), tuple(page.shape[2:])
else:
return tuple(), tuple(shape), tuple(page.shape)
reader_classes.append(TiffArray) | nitorch/io/tiff/array.py | from ..mapping import MappedArray, AccessType
from ..indexing import is_fullslice, split_operation, slicer_sub2ind, invert_slice
from .. import volutils
from ..readers import reader_classes
from .metadata import ome_zooms, parse_unit
from nitorch.spatial import affine_default
from nitorch.core import pyutils, dtypes
from tifffile import TiffFile
from contextlib import contextmanager
import torch
import numpy as np
from warnings import warn
class TiffArray(MappedArray):
"""
MappedArray that uses `tifffile` under the hood.
"""
def __init__(self, file_like, permission='r', keep_file_open=True, **hints):
"""
Parameters
----------
file_like : str or file object
keep_file_open : bool, default=True
Whether to keep the file handle open
hints : keyword of the form `is_<format>=<True|False>`
Tells the Tiff reader that a file is or isn't of a specific
subformat. If not provided, it it guessed by the Tiff reader.
"""
self._tiff = TiffFile(file_like, **hints)
if not keep_file_open:
self._tiff.close()
self._series = 0
self._level = 0
self._cache = dict()
super().__init__()
_series: int = 0 # index of series to map
_level: int = 0 # index of pyramid level to map
_cache: dict = {} # a cache of precomputed _shape, _spatial, etc
@property
def _shape(self):
"""Full shape of a series+level"""
if '_shape' not in self._cache:
with self.tiffobj() as tiff:
shape = tiff.series[self.series].levels[self.level].shape
self._cache['_shape'] = shape
return self._cache['_shape']
@property
def _axes(self):
"""Axes names of a series+level"""
if '_axes' not in self._cache:
with self.tiffobj() as tiff:
axes = tiff.series[self.series].levels[self.level].axes
self._cache['_axes'] = axes
return self._cache['_axes']
@property
def _spatial(self):
"""Mask of spatial axes of a series+level"""
msk = [ax in 'XYZ' for ax in self._axes]
return msk
@property
def _affine(self):
"""Affine orientation matrix of a series+level"""
# TODO: I don't know yet how we should use GeoTiff to encode
# affine matrices. In the matrix/zooms, their voxels are ordered
# as [x, y, z] even though their dimensions in the returned array
# are ordered as [Z, Y, X]. If we want to keep the same convention
# as nitorch, I need to permute the matrix/zooms.
if '_affine' not in self._cache:
with self.tiffobj() as tiff:
omexml = tiff.ome_metadata
geotags = tiff.geotiff_metadata or {}
zooms, units, axes = ome_zooms(omexml, self.series)
if zooms:
# convert to mm + drop non-spatial zooms
units = [parse_unit(u) for u in units]
zooms = [z * (f / 1e-3) for z, (f, type) in zip(zooms, units)
if type == 'm']
if 'ModelPixelScaleTag' in geotags:
warn("Both OME and GeoTiff pixel scales are present: "
"{} vs {}. Using OME."
.format(zooms, geotags['ModelPixelScaleTag']))
elif 'ModelPixelScaleTag' in geotags:
zooms = geotags['ModelPixelScaleTag']
axes = 'XYZ'
else:
zooms = 1.
axes = [ax for ax in self._axes if ax in 'XYZ']
if 'ModelTransformation' in geotags:
aff = geotags['ModelTransformation']
aff = torch.as_tensor(aff, dtype=torch.double).reshape(4, 4)
self._cache['_affine'] = aff
elif ('ModelTiepointTag' in geotags):
# copied from tifffile
sx, sy, sz = pyutils.make_list(zooms, n=3)
tiepoints = torch.as_tensor(geotags['ModelTiepointTag'])
affines = []
for tiepoint in tiepoints:
i, j, k, x, y, z = tiepoint
affines.append(torch.as_tensor(
[[sx, 0.0, 0.0, x - i * sx],
[0.0, -sy, 0.0, y + j * sy],
[0.0, 0.0, sz, z - k * sz],
[0.0, 0.0, 0.0, 1.0]], dtype=torch.double))
affines = torch.stack(affines, dim=0)
if len(tiepoints) == 1:
affines = affines[0]
self._cache['_affine'] = affines
else:
zooms = pyutils.make_list(zooms, n=len(axes))
ax2zoom = {ax: zoom for ax, zoom in zip(axes, zooms)}
axes = [ax for ax in self._axes if ax in 'XYZ']
shape = [shp for shp, msk in zip(self._shape, self._spatial)
if msk]
zooms = [ax2zoom.get(ax, 1.) for ax in axes]
layout = [('R' if ax == 'Z' else 'P' if ax == 'Y' else 'S')
for ax in axes]
aff = affine_default(shape, zooms, layout=''.join(layout))
self._cache['_affine'] = aff
return self._cache['_affine']
@property
def dtype(self):
if 'dtype' not in self._cache:
with self.tiffobj() as tiff:
dt = tiff.series[self.series].levels[self.level].dtype
self._cache['dtype'] = dt
return self._cache['dtype']
@property
def series(self):
"""Series index (Tiff files can hold multiple series)"""
return self._series
@series.setter
def series(self, val):
if val != self.series and not all(is_fullslice(self.slicer)):
raise RuntimeError("Cannot change series in a view")
self._series = val
self._cache = {}
@property
def level(self):
"""Level index (Tiff files can hold multiple spatial resolutions)"""
return self._level
@level.setter
def level(self, val):
if val != self.level and not all(is_fullslice(self.slicer)):
raise RuntimeError("Cannot change resolution level in a view")
self._level = val
self._cache = {}
@property
def readable(self):
# That's not exact: pseudo partial access in-plane
return AccessType.TruePartial
@property
def writable(self):
return AccessType.No
@contextmanager
def tiffobj(self):
"""Returns an *open* Tiff reader.
Should be used in a `with` statement:
```python
>>> with self.tiffobj() as tiff:
>>> # do stuff with `tiff`
```
"""
closed = self._tiff.filehandle.closed
if closed:
self._tiff.filehandle.open()
yield self._tiff
if closed:
self._tiff.close()
def __del__(self):
# make sure we close all file objects
self._tiff.close()
@property
def filename(self):
with self.tiffobj() as f:
return f.filename
def data(self, dtype=None, device=None, casting='unsafe', rand=True,
cutoff=None, dim=None, numpy=False):
# --- sanity check before reading ---
dtype = self.dtype if dtype is None else dtype
dtype = dtypes.dtype(dtype)
if not numpy and dtype.torch is None:
raise TypeError('Data type {} does not exist in PyTorch.'
.format(dtype))
# --- check that view is not empty ---
if pyutils.prod(self.shape) == 0:
if numpy:
return np.zeros(self.shape, dtype=dtype.numpy)
else:
return torch.zeros(self.shape, dtype=dtype.torch, device=device)
# --- read native data ---
slicer, perm, newdim = split_operation(self.permutation, self.slicer, 'r')
with self.tiffobj() as f:
dat = self._read_data_raw(slicer, tiffobj=f)
dat = dat.transpose(perm)[newdim]
indtype = dtypes.dtype(self.dtype)
# --- cutoff ---
dat = volutils.cutoff(dat, cutoff, dim)
# --- cast ---
rand = rand and not indtype.is_floating_point
if rand and not dtype.is_floating_point:
tmpdtype = dtypes.float64
else:
tmpdtype = dtype
dat, scale = volutils.cast(dat, tmpdtype.numpy, casting, with_scale=True)
# --- random sample ---
# uniform noise in the uncertainty interval
if rand and not (scale == 1 and not dtype.is_floating_point):
dat = volutils.addnoise(dat, scale)
# --- final cast ---
dat = volutils.cast(dat, dtype.numpy, 'unsafe')
# convert to torch if needed
if not numpy:
dat = torch.as_tensor(dat, device=device)
return dat
# --------------
# LOW LEVEL
# --------------
def _read_data_raw(self, slicer=None, tiffobj=None):
"""Read native data
Dispatch to `_read_data_raw_full` or `_read_data_raw_partial`.
Parameters
----------
slicer : tuple[index_like], optional
A tuple of indices that describe the chunk of data to read.
If None, read everything.
tiffobj : file object, default=`self.fileobj('image', 'r')`
A file object (with `seek`, `read`) from which to read
Returns
-------
dat : np.ndarray
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._read_data_raw(slicer, tiffobj)
# load sub-array
if slicer is None or all(is_fullslice(slicer, self._shape)):
dat = self._read_data_raw_full(tiffobj)
else:
dat = self._read_data_raw_partial(slicer, tiffobj)
return dat
def _read_data_raw_partial(self, slicer, tiffobj=None):
"""Read a chunk of data from disk
Parameters
----------
slicer : tuple[slice or int]
tiffobj : TiffFile
Returns
-------
dat : np.ndarray
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._read_data_raw_partial(slicer, tiffobj)
# 1) split dimensions
shape_feat, shape_stack, shape_page = self._shape_split(tiffobj)
dim_feat = len(shape_feat)
dim_stack = len(shape_stack)
dim_page = len(shape_page)
# 2) split slicer
slicer_feat = slicer[:dim_feat]
slicer_stack = slicer[dim_feat:dim_feat+dim_stack]
slicer_page = slicer[dim_feat+dim_stack:]
dim_feat_out = sum(isinstance(idx, slice) for idx in slicer_feat)
dim_stack_out = sum(isinstance(idx, slice) for idx in slicer_stack)
dim_page_out = sum(isinstance(idx, slice) for idx in slicer_page)
# 3) ensure positive strides
slicer_inv = [slice(None, None, -1) if idx.step and idx.step < 0
else slice(None) for idx in slicer_stack
if isinstance(idx, slice)]
slicer_stack = [invert_slice(idx, shp) if isinstance(idx, slice) and
idx.step and idx.step < 0
else idx for idx, shp in zip(slicer_stack, shape_stack)]
# 4) convert stack slice to list of linear indices
# (or to one slice if possible)
index_stack = slicer_sub2ind(slicer_stack, shape_stack)
# 5) read only pages in the substack
dat = tiffobj.asarray(key=index_stack,
series=self.series,
level=self.level)
dat = dat.reshape([*shape_feat, -1, *shape_page])
# 6) apply slicers along the feature and page dimensions
dat = dat[(*slicer_feat, slice(None), *slicer_page)]
# 7) reshape
dat = dat.reshape(self.shape)
# 7) final slicers for negative strides along stack dimensions
slicer = [slice(None)] * dim_feat_out + slicer_inv + [slice(None)] * dim_page_out
dat = dat[tuple(slicer)]
return dat
def _read_data_raw_full(self, tiffobj=None):
"""Read the full data from disk
Parameters
----------
tiffobj : TiffFile
Returns
-------
dat : np.ndarray
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._read_data_raw_full(tiffobj)
return tiffobj.asarray(series=self.series, level=self.level)
def _shape_split(self, tiffobj=None):
"""Split the shape into different components
Returns
-------
shape_feat : tuple[int]
Color features (belong to pages but end-up at the left-most axis)
shape_collection : tuple[int]
Shape of the collection of pages (usually Z, T, etc. axes)
shape_page : tuple[int]
Shape of one page -- with or without features (usually X, Y axes)
"""
if tiffobj is None:
with self.tiffobj() as tiffobj:
return self._shape_split(tiffobj)
if tiffobj.is_imagej:
return self._shape_split_imagej(tiffobj)
else:
page = tiffobj.series[self.series].levels[self.level].pages[0]
shape_page = page.shape
page_dim = len(shape_page)
shape_collection = self._shape[:-page_dim]
return tuple(), tuple(shape_collection), tuple(shape_page)
def _shape_split_imagej(self, tiffobj):
"""Split the shape into different components (ImageJ format).
This is largely copied from tifffile.
"""
pages = tiffobj.pages
pages.useframes = True
pages.keyframe = 0
page = pages[0]
meta = tiffobj.imagej_metadata
def is_virtual():
# ImageJ virtual hyperstacks store all image metadata in the first
# page and image data are stored contiguously before the second
# page, if any
if not page.is_final:
return False
images = meta.get('images', 0)
if images <= 1:
return False
offset, count = page.is_contiguous
if (
count != pyutils.prod(page.shape) * page.bitspersample // 8
or offset + count * images > self.filehandle.size
):
raise ValueError()
# check that next page is stored after data
if len(pages) > 1 and offset + count * images > pages[1].offset:
return False
return True
isvirtual = is_virtual()
if isvirtual:
# no need to read other pages
pages = [page]
else:
pages = pages[:]
images = meta.get('images', len(pages))
frames = meta.get('frames', 1)
slices = meta.get('slices', 1)
channels = meta.get('channels', 1)
# compute shape of the collection of pages
shape = []
axes = []
if frames > 1:
shape.append(frames)
axes.append('T')
if slices > 1:
shape.append(slices)
axes.append('Z')
if channels > 1 and (pyutils.prod(shape) if shape else 1) != images:
shape.append(channels)
axes.append('C')
remain = images // (pyutils.prod(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
if page.axes[0] == 'S' and 'C' in axes:
# planar storage, S == C, saved by Bio-Formats
return tuple(), tuple(shape), tuple(page.shape[1:])
elif page.axes[0] == 'I':
# contiguous multiple images
return tuple(), tuple(shape), tuple(page.shape[1:])
elif page.axes[:2] == 'SI':
# color-mapped contiguous multiple images
return tuple(page.shape[0:1]), tuple(shape), tuple(page.shape[2:])
else:
return tuple(), tuple(shape), tuple(page.shape)
reader_classes.append(TiffArray) | 0.752104 | 0.567637 |
from marinetrafficapi.models import Model
from marinetrafficapi.fields import NumberField, TextField, RealNumberField
class PredictiveArrivals(Model):
"""Receive a prediction of the vessels
likely to arrive to a specific port."""
mmsi = NumberField(index='MMSI',
desc="Maritime Mobile Service Identity - a \n"
"nine-digit number sent in digital form \n"
"over a radio frequency that identifies \n"
"the vessel's transmitter station")
imo = NumberField(index='IMO',
desc="International Maritime Organisation number - a \n"
"seven-digit number that uniquely \n"
"identifies vessels")
ship_id = NumberField(index='SHIP_ID',
desc="A uniquely assigned ID by MarineTraffic \n"
"for the subject vessel")
ship_name = TextField(index='SHIPNAME',
desc="The Shipname of the subject vessel")
ship_class = TextField(index='SHIPCLASS',
desc="The class of the subject vessel \n"
"based on vessel type and size")
market = TextField(index='MARKET',
desc="The commercial market segment \n"
"the subject vessel belongs to")
from_port_id = NumberField(index='FROM_PORT_ID',
desc="A uniquely assigned ID by \n"
"MarineTraffic for the port that \n"
"was used as origin to retrieve \n"
"possible destinations")
from_port = TextField(index='FROM_PORT',
desc="The port that was used as origin \n"
"to retrieve possible destinations")
next_port_id = NumberField(index='NEXT_PORT_ID',
desc="A uniquely assigned ID by \n"
"MarineTraffic for the Next Port")
next_port = TextField(index='NEXT_PORT',
desc="The target port used to \n"
"predict arrivals of vessels")
next_area = TextField(index='NEXT_AREA',
desc="The area where the target port belongs to")
next_port_prob = RealNumberField(index='NEXT_PORT_PROB',
desc="The probability of visiting \n"
"the target port")
next_area_prob = RealNumberField(index='NEXT_AREA_PROB',
desc="The probability of visiting the \n"
"area where the target port is") | marinetrafficapi/voyage_info/VI05_predictive_arrivals/models.py | from marinetrafficapi.models import Model
from marinetrafficapi.fields import NumberField, TextField, RealNumberField
class PredictiveArrivals(Model):
"""Receive a prediction of the vessels
likely to arrive to a specific port."""
mmsi = NumberField(index='MMSI',
desc="Maritime Mobile Service Identity - a \n"
"nine-digit number sent in digital form \n"
"over a radio frequency that identifies \n"
"the vessel's transmitter station")
imo = NumberField(index='IMO',
desc="International Maritime Organisation number - a \n"
"seven-digit number that uniquely \n"
"identifies vessels")
ship_id = NumberField(index='SHIP_ID',
desc="A uniquely assigned ID by MarineTraffic \n"
"for the subject vessel")
ship_name = TextField(index='SHIPNAME',
desc="The Shipname of the subject vessel")
ship_class = TextField(index='SHIPCLASS',
desc="The class of the subject vessel \n"
"based on vessel type and size")
market = TextField(index='MARKET',
desc="The commercial market segment \n"
"the subject vessel belongs to")
from_port_id = NumberField(index='FROM_PORT_ID',
desc="A uniquely assigned ID by \n"
"MarineTraffic for the port that \n"
"was used as origin to retrieve \n"
"possible destinations")
from_port = TextField(index='FROM_PORT',
desc="The port that was used as origin \n"
"to retrieve possible destinations")
next_port_id = NumberField(index='NEXT_PORT_ID',
desc="A uniquely assigned ID by \n"
"MarineTraffic for the Next Port")
next_port = TextField(index='NEXT_PORT',
desc="The target port used to \n"
"predict arrivals of vessels")
next_area = TextField(index='NEXT_AREA',
desc="The area where the target port belongs to")
next_port_prob = RealNumberField(index='NEXT_PORT_PROB',
desc="The probability of visiting \n"
"the target port")
next_area_prob = RealNumberField(index='NEXT_AREA_PROB',
desc="The probability of visiting the \n"
"area where the target port is") | 0.765155 | 0.438364 |
from fcn.config import cfg
from gt_synthesize_layer.minibatch import get_minibatch
import numpy as np
import cv2
from utils.blob import pad_im
import os
import cPickle
import scipy.io
class GtSynthesizeLayer(object):
"""FCN data layer used for training."""
def __init__(self, roidb, roidb_val, num_classes, extents, points, symmetry, cache_path, name, data_queue, model_file, pose_file, class_colors=None):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._roidb_val = roidb_val
self._num_classes = num_classes
self._extents = extents
self._points = points
self._symmetry = symmetry
self._cache_path = cache_path
self._name = name
self._data_queue = data_queue
self._shuffle_roidb_inds()
self._cur_val = 0
self._perm_val = np.arange(len(self._roidb_val))
self._shuffle_syn_inds()
self._shuffle_adapt_inds()
self._build_background_images()
self._build_background_depth_images()
self._read_camera_parameters()
self._validation = False
self._class_colors = class_colors
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _shuffle_roidb_val_inds(self):
"""Randomly permute the validation roidb."""
self._perm_val = np.random.permutation(np.arange(len(self._roidb_val)))
self._cur_val = 0
def _shuffle_syn_inds(self):
self._perm_syn = np.random.permutation(np.arange(cfg.TRAIN.SYNNUM))
self._cur_syn = 0
def _shuffle_adapt_inds(self):
self._perm_adapt = np.random.permutation(np.arange(cfg.TRAIN.ADAPT_NUM))
self._cur_adapt = 0
def _get_next_minibatch_inds(self, is_syn, is_adapt):
"""Return the roidb indices for the next minibatch."""
if self._validation:
db_inds = self._perm_val[self._cur_val:self._cur_val + cfg.TRAIN.IMS_PER_BATCH]
if self._cur_val + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb_val):
self._cur_val = 0
#self._shuffle_roidb_val_inds
if is_syn == 0 and is_adapt == 0:
self._cur_val += cfg.TRAIN.IMS_PER_BATCH
db_inds_syn = self._perm_syn[self._cur_syn:self._cur_syn + cfg.TRAIN.IMS_PER_BATCH]
if is_syn:
self._cur_syn += cfg.TRAIN.IMS_PER_BATCH
if self._cur_syn + cfg.TRAIN.IMS_PER_BATCH >= cfg.TRAIN.SYNNUM:
self._shuffle_syn_inds()
db_inds_adapt = self._perm_adapt[self._cur_adapt:self._cur_adapt + cfg.TRAIN.IMS_PER_BATCH]
if is_adapt:
self._cur_adapt += cfg.TRAIN.IMS_PER_BATCH
if self._cur_adapt + cfg.TRAIN.IMS_PER_BATCH >= cfg.TRAIN.ADAPT_NUM:
self._shuffle_adapt_inds()
else:
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
if is_syn == 0 and is_adapt == 0:
self._cur += cfg.TRAIN.IMS_PER_BATCH
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds_syn = self._perm_syn[self._cur_syn:self._cur_syn + cfg.TRAIN.IMS_PER_BATCH]
if is_syn:
self._cur_syn += cfg.TRAIN.IMS_PER_BATCH
if self._cur_syn + cfg.TRAIN.IMS_PER_BATCH >= cfg.TRAIN.SYNNUM:
self._shuffle_syn_inds()
db_inds_adapt = self._perm_adapt[self._cur_adapt:self._cur_adapt + cfg.TRAIN.IMS_PER_BATCH]
if is_adapt:
self._cur_adapt += cfg.TRAIN.IMS_PER_BATCH
if self._cur_adapt + cfg.TRAIN.IMS_PER_BATCH >= cfg.TRAIN.ADAPT_NUM:
self._shuffle_adapt_inds()
return db_inds, db_inds_syn, db_inds_adapt
def _get_next_minibatch(self, iter):
"""Return the blobs to be used for the next minibatch."""
if cfg.TRAIN.SYNTHESIZE:
if cfg.TRAIN.SYN_RATIO == 0:
is_syn = 1
else:
r = np.random.randint(cfg.TRAIN.SYN_RATIO+1, size=1)[0]
if r == 0:
is_syn = 0
else:
is_syn = 1
else:
is_syn = 0
# domain adaptation
if cfg.TRAIN.ADAPT:
r = np.random.randint(cfg.TRAIN.ADAPT_RATIO+1, size=1)[0]
if r == 0:
is_adapt = 1
is_syn = 0
else:
is_adapt = 0
else:
is_adapt = 0
if iter >= cfg.TRAIN.SYMSIZE:
is_symmetric = 1
else:
is_symmetric = 0
db_inds, db_inds_syn, db_inds_adapt = self._get_next_minibatch_inds(is_syn, is_adapt)
if self._validation:
minibatch_db = [self._roidb_val[i] for i in db_inds]
else:
minibatch_db = [self._roidb[i] for i in db_inds]
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'NORMAL':
backgrounds = self._backgrounds_depth
else:
backgrounds = self._backgrounds
return get_minibatch(minibatch_db, self._extents, self._points, self._symmetry, self._num_classes, backgrounds, self._intrinsic_matrix, self._data_queue, db_inds_syn,
is_syn, db_inds_adapt, is_adapt, is_symmetric, self._class_colors, self._validation)
def forward(self, iter):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch(iter)
return blobs
def _read_camera_parameters(self):
meta_data = scipy.io.loadmat(self._roidb[0]['meta_data'])
self._intrinsic_matrix = meta_data['intrinsic_matrix'].astype(np.float32, copy=True)
def _build_background_images(self):
cache_file = os.path.join(self._cache_path, 'backgrounds.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
self._backgrounds = cPickle.load(fid)
print '{} backgrounds loaded from {}'.format(self._name, cache_file)
return
backgrounds = []
# SUN 2012
root = os.path.join(self._cache_path, '../SUN2012/data/Images')
subdirs = os.listdir(root)
for i in xrange(len(subdirs)):
subdir = subdirs[i]
names = os.listdir(os.path.join(root, subdir))
for j in xrange(len(names)):
name = names[j]
if os.path.isdir(os.path.join(root, subdir, name)):
files = os.listdir(os.path.join(root, subdir, name))
for k in range(len(files)):
if os.path.isdir(os.path.join(root, subdir, name, files[k])):
filenames = os.listdir(os.path.join(root, subdir, name, files[k]))
for l in range(len(filenames)):
filename = os.path.join(root, subdir, name, files[k], filenames[l])
backgrounds.append(filename)
else:
filename = os.path.join(root, subdir, name, files[k])
backgrounds.append(filename)
else:
filename = os.path.join(root, subdir, name)
backgrounds.append(filename)
# ObjectNet3D
objectnet3d = os.path.join(self._cache_path, '../ObjectNet3D/data')
files = os.listdir(objectnet3d)
for i in range(len(files)):
filename = os.path.join(objectnet3d, files[i])
backgrounds.append(filename)
for i in xrange(len(backgrounds)):
if not os.path.isfile(backgrounds[i]):
print 'file not exist {}'.format(backgrounds[i])
self._backgrounds = backgrounds
print "build background images finished"
with open(cache_file, 'wb') as fid:
cPickle.dump(backgrounds, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote backgrounds to {}'.format(cache_file)
def _write_background_images(self):
cache_file = os.path.join(self._cache_path, self._name + '_backgrounds.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
self._backgrounds = cPickle.load(fid)
if self._name != 'lov_train':
cache_file_lov = os.path.join(self._cache_path, 'lov_train_backgrounds.pkl')
if os.path.exists(cache_file_lov):
with open(cache_file_lov, 'rb') as fid:
backgrounds_lov = cPickle.load(fid)
self._backgrounds = self._backgrounds + backgrounds_lov
print '{} backgrounds loaded from {}, {} images'.format(self._name, cache_file, len(self._backgrounds))
return
print "building background images"
outdir = os.path.join(self._cache_path, self._name + '_backgrounds')
if not os.path.exists(outdir):
os.mkdir(outdir)
num = 1000
perm = np.random.permutation(np.arange(len(self._roidb)))
perm = perm[:num]
print len(perm)
backgrounds = [None]*num
kernel = np.ones((50, 50), np.uint8)
for i in xrange(num):
index = perm[i]
# rgba
rgba = pad_im(cv2.imread(self._roidb[index]['image'], cv2.IMREAD_UNCHANGED), 16)
if rgba.shape[2] == 4:
im = np.copy(rgba[:,:,:3])
alpha = rgba[:,:,3]
I = np.where(alpha == 0)
im[I[0], I[1], :] = 0
else:
im = rgba
# generate background image
mask = pad_im(cv2.imread(self._roidb[index]['label'], cv2.IMREAD_UNCHANGED), 16)
index = np.where(mask > 0)
mask[index[0], index[1]] = 1
mask = cv2.dilate(mask, kernel)
background = cv2.inpaint(im, mask, 3, cv2.INPAINT_TELEA)
# write the image
filename = os.path.join(self._cache_path, self._name + '_backgrounds', '%04d.jpg' % (i))
cv2.imwrite(filename, background)
backgrounds[i] = filename
self._backgrounds = backgrounds
print "build background images finished"
with open(cache_file, 'wb') as fid:
cPickle.dump(backgrounds, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote backgrounds to {}'.format(cache_file)
def _build_background_depth_images(self):
cache_file = os.path.join(self._cache_path, 'backgrounds_depth.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
self._backgrounds_depth = cPickle.load(fid)
print '{} backgrounds depth loaded from {}'.format(self._name, cache_file)
return
backgrounds = []
# RGBD scenes
root = os.path.join(self._cache_path, '../RGBDScene')
# load image index
image_set_file = os.path.join(root, 'train.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index_train = [x.rstrip('\n') for x in f.readlines()]
image_set_file = os.path.join(root, 'val.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index_val = [x.rstrip('\n') for x in f.readlines()]
image_index = image_index_train + image_index_val
print len(image_index)
for i in range(len(image_index)):
filename = os.path.join(root, 'data', image_index[i] + '-depth.png')
backgrounds.append(filename)
for i in xrange(len(backgrounds)):
if not os.path.isfile(backgrounds[i]):
print 'file not exist {}'.format(backgrounds[i])
self._backgrounds_depth = backgrounds
print "build background depth images finished"
with open(cache_file, 'wb') as fid:
cPickle.dump(backgrounds, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote backgrounds to {}'.format(cache_file) | lib/gt_synthesize_layer/layer.py | from fcn.config import cfg
from gt_synthesize_layer.minibatch import get_minibatch
import numpy as np
import cv2
from utils.blob import pad_im
import os
import cPickle
import scipy.io
class GtSynthesizeLayer(object):
"""FCN data layer used for training."""
def __init__(self, roidb, roidb_val, num_classes, extents, points, symmetry, cache_path, name, data_queue, model_file, pose_file, class_colors=None):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._roidb_val = roidb_val
self._num_classes = num_classes
self._extents = extents
self._points = points
self._symmetry = symmetry
self._cache_path = cache_path
self._name = name
self._data_queue = data_queue
self._shuffle_roidb_inds()
self._cur_val = 0
self._perm_val = np.arange(len(self._roidb_val))
self._shuffle_syn_inds()
self._shuffle_adapt_inds()
self._build_background_images()
self._build_background_depth_images()
self._read_camera_parameters()
self._validation = False
self._class_colors = class_colors
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _shuffle_roidb_val_inds(self):
"""Randomly permute the validation roidb."""
self._perm_val = np.random.permutation(np.arange(len(self._roidb_val)))
self._cur_val = 0
def _shuffle_syn_inds(self):
self._perm_syn = np.random.permutation(np.arange(cfg.TRAIN.SYNNUM))
self._cur_syn = 0
def _shuffle_adapt_inds(self):
self._perm_adapt = np.random.permutation(np.arange(cfg.TRAIN.ADAPT_NUM))
self._cur_adapt = 0
def _get_next_minibatch_inds(self, is_syn, is_adapt):
"""Return the roidb indices for the next minibatch."""
if self._validation:
db_inds = self._perm_val[self._cur_val:self._cur_val + cfg.TRAIN.IMS_PER_BATCH]
if self._cur_val + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb_val):
self._cur_val = 0
#self._shuffle_roidb_val_inds
if is_syn == 0 and is_adapt == 0:
self._cur_val += cfg.TRAIN.IMS_PER_BATCH
db_inds_syn = self._perm_syn[self._cur_syn:self._cur_syn + cfg.TRAIN.IMS_PER_BATCH]
if is_syn:
self._cur_syn += cfg.TRAIN.IMS_PER_BATCH
if self._cur_syn + cfg.TRAIN.IMS_PER_BATCH >= cfg.TRAIN.SYNNUM:
self._shuffle_syn_inds()
db_inds_adapt = self._perm_adapt[self._cur_adapt:self._cur_adapt + cfg.TRAIN.IMS_PER_BATCH]
if is_adapt:
self._cur_adapt += cfg.TRAIN.IMS_PER_BATCH
if self._cur_adapt + cfg.TRAIN.IMS_PER_BATCH >= cfg.TRAIN.ADAPT_NUM:
self._shuffle_adapt_inds()
else:
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
if is_syn == 0 and is_adapt == 0:
self._cur += cfg.TRAIN.IMS_PER_BATCH
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds_syn = self._perm_syn[self._cur_syn:self._cur_syn + cfg.TRAIN.IMS_PER_BATCH]
if is_syn:
self._cur_syn += cfg.TRAIN.IMS_PER_BATCH
if self._cur_syn + cfg.TRAIN.IMS_PER_BATCH >= cfg.TRAIN.SYNNUM:
self._shuffle_syn_inds()
db_inds_adapt = self._perm_adapt[self._cur_adapt:self._cur_adapt + cfg.TRAIN.IMS_PER_BATCH]
if is_adapt:
self._cur_adapt += cfg.TRAIN.IMS_PER_BATCH
if self._cur_adapt + cfg.TRAIN.IMS_PER_BATCH >= cfg.TRAIN.ADAPT_NUM:
self._shuffle_adapt_inds()
return db_inds, db_inds_syn, db_inds_adapt
def _get_next_minibatch(self, iter):
"""Return the blobs to be used for the next minibatch."""
if cfg.TRAIN.SYNTHESIZE:
if cfg.TRAIN.SYN_RATIO == 0:
is_syn = 1
else:
r = np.random.randint(cfg.TRAIN.SYN_RATIO+1, size=1)[0]
if r == 0:
is_syn = 0
else:
is_syn = 1
else:
is_syn = 0
# domain adaptation
if cfg.TRAIN.ADAPT:
r = np.random.randint(cfg.TRAIN.ADAPT_RATIO+1, size=1)[0]
if r == 0:
is_adapt = 1
is_syn = 0
else:
is_adapt = 0
else:
is_adapt = 0
if iter >= cfg.TRAIN.SYMSIZE:
is_symmetric = 1
else:
is_symmetric = 0
db_inds, db_inds_syn, db_inds_adapt = self._get_next_minibatch_inds(is_syn, is_adapt)
if self._validation:
minibatch_db = [self._roidb_val[i] for i in db_inds]
else:
minibatch_db = [self._roidb[i] for i in db_inds]
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'NORMAL':
backgrounds = self._backgrounds_depth
else:
backgrounds = self._backgrounds
return get_minibatch(minibatch_db, self._extents, self._points, self._symmetry, self._num_classes, backgrounds, self._intrinsic_matrix, self._data_queue, db_inds_syn,
is_syn, db_inds_adapt, is_adapt, is_symmetric, self._class_colors, self._validation)
def forward(self, iter):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch(iter)
return blobs
def _read_camera_parameters(self):
meta_data = scipy.io.loadmat(self._roidb[0]['meta_data'])
self._intrinsic_matrix = meta_data['intrinsic_matrix'].astype(np.float32, copy=True)
def _build_background_images(self):
cache_file = os.path.join(self._cache_path, 'backgrounds.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
self._backgrounds = cPickle.load(fid)
print '{} backgrounds loaded from {}'.format(self._name, cache_file)
return
backgrounds = []
# SUN 2012
root = os.path.join(self._cache_path, '../SUN2012/data/Images')
subdirs = os.listdir(root)
for i in xrange(len(subdirs)):
subdir = subdirs[i]
names = os.listdir(os.path.join(root, subdir))
for j in xrange(len(names)):
name = names[j]
if os.path.isdir(os.path.join(root, subdir, name)):
files = os.listdir(os.path.join(root, subdir, name))
for k in range(len(files)):
if os.path.isdir(os.path.join(root, subdir, name, files[k])):
filenames = os.listdir(os.path.join(root, subdir, name, files[k]))
for l in range(len(filenames)):
filename = os.path.join(root, subdir, name, files[k], filenames[l])
backgrounds.append(filename)
else:
filename = os.path.join(root, subdir, name, files[k])
backgrounds.append(filename)
else:
filename = os.path.join(root, subdir, name)
backgrounds.append(filename)
# ObjectNet3D
objectnet3d = os.path.join(self._cache_path, '../ObjectNet3D/data')
files = os.listdir(objectnet3d)
for i in range(len(files)):
filename = os.path.join(objectnet3d, files[i])
backgrounds.append(filename)
for i in xrange(len(backgrounds)):
if not os.path.isfile(backgrounds[i]):
print 'file not exist {}'.format(backgrounds[i])
self._backgrounds = backgrounds
print "build background images finished"
with open(cache_file, 'wb') as fid:
cPickle.dump(backgrounds, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote backgrounds to {}'.format(cache_file)
def _write_background_images(self):
cache_file = os.path.join(self._cache_path, self._name + '_backgrounds.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
self._backgrounds = cPickle.load(fid)
if self._name != 'lov_train':
cache_file_lov = os.path.join(self._cache_path, 'lov_train_backgrounds.pkl')
if os.path.exists(cache_file_lov):
with open(cache_file_lov, 'rb') as fid:
backgrounds_lov = cPickle.load(fid)
self._backgrounds = self._backgrounds + backgrounds_lov
print '{} backgrounds loaded from {}, {} images'.format(self._name, cache_file, len(self._backgrounds))
return
print "building background images"
outdir = os.path.join(self._cache_path, self._name + '_backgrounds')
if not os.path.exists(outdir):
os.mkdir(outdir)
num = 1000
perm = np.random.permutation(np.arange(len(self._roidb)))
perm = perm[:num]
print len(perm)
backgrounds = [None]*num
kernel = np.ones((50, 50), np.uint8)
for i in xrange(num):
index = perm[i]
# rgba
rgba = pad_im(cv2.imread(self._roidb[index]['image'], cv2.IMREAD_UNCHANGED), 16)
if rgba.shape[2] == 4:
im = np.copy(rgba[:,:,:3])
alpha = rgba[:,:,3]
I = np.where(alpha == 0)
im[I[0], I[1], :] = 0
else:
im = rgba
# generate background image
mask = pad_im(cv2.imread(self._roidb[index]['label'], cv2.IMREAD_UNCHANGED), 16)
index = np.where(mask > 0)
mask[index[0], index[1]] = 1
mask = cv2.dilate(mask, kernel)
background = cv2.inpaint(im, mask, 3, cv2.INPAINT_TELEA)
# write the image
filename = os.path.join(self._cache_path, self._name + '_backgrounds', '%04d.jpg' % (i))
cv2.imwrite(filename, background)
backgrounds[i] = filename
self._backgrounds = backgrounds
print "build background images finished"
with open(cache_file, 'wb') as fid:
cPickle.dump(backgrounds, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote backgrounds to {}'.format(cache_file)
def _build_background_depth_images(self):
cache_file = os.path.join(self._cache_path, 'backgrounds_depth.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
self._backgrounds_depth = cPickle.load(fid)
print '{} backgrounds depth loaded from {}'.format(self._name, cache_file)
return
backgrounds = []
# RGBD scenes
root = os.path.join(self._cache_path, '../RGBDScene')
# load image index
image_set_file = os.path.join(root, 'train.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index_train = [x.rstrip('\n') for x in f.readlines()]
image_set_file = os.path.join(root, 'val.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index_val = [x.rstrip('\n') for x in f.readlines()]
image_index = image_index_train + image_index_val
print len(image_index)
for i in range(len(image_index)):
filename = os.path.join(root, 'data', image_index[i] + '-depth.png')
backgrounds.append(filename)
for i in xrange(len(backgrounds)):
if not os.path.isfile(backgrounds[i]):
print 'file not exist {}'.format(backgrounds[i])
self._backgrounds_depth = backgrounds
print "build background depth images finished"
with open(cache_file, 'wb') as fid:
cPickle.dump(backgrounds, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote backgrounds to {}'.format(cache_file) | 0.691602 | 0.185523 |
from typing import Set
import argparse
import os
import json
import glob
from utils.utils import load_images_map
def is_trajectory_valid(trajectory, images_map):
"""Check that a trajectory has associated images.
"""
# TODO: add min count instead?
for frame_index, bbs in enumerate(trajectory["bbs"], start=trajectory["start"]):
if frame_index in images_map and tuple(bbs) in images_map[frame_index]:
return True
return False
def passes_min_size(trajectory, min_face_size):
"""Check that faces have a certain minimum (pixel) size. This is useful if
we want to have reliable embedddings of images in a trajectory.
"""
for bbs in trajectory["bbs"]:
# Bounding boxes (bbs) are: x1, y1, x2, y2
w, h = (bbs[2] - bbs[0]), (bbs[3] - bbs[1])
if min(w, h) < min_face_size:
return False
return True
def save_trajectories(file, trajectories, images_map, min_face_size, traj_count, movie_id):
"""Save trajectories, and filter out trajectories that had no corresponding
images for any bounding boxes.
"""
# Write out .jsonl
n_saved = 0
for traj in trajectories:
if is_trajectory_valid(traj, images_map) and passes_min_size(traj, min_face_size):
traj["index"] = traj_count
traj["movie_id"] = movie_id
json.dump(traj, file, indent=None, separators=(",", ":"))
file.write("\n")
traj_count += 1
n_saved += 1
n_removed = len(trajectories) - n_saved
return n_saved, n_removed
def save_scene_changes(file_path, scene_cuts: Set[int], movie_id: int):
scene_cuts_list = sorted(scene_cuts)
with open(file_path, "w") as file:
obj = {"frame_indices": scene_cuts_list, "movie_id": movie_id}
json.dump(obj, file, indent=None, separators=(",", ":"))
file.write("\n")
def iou(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))
boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))
return interArea / float(boxAArea + boxBArea - interArea)
def load_trajectory(trajectory_file: str, scene_cuts: Set[int], iou_threshold: float):
"""Load a single trajectory file (from 1 shard).
Note: also merges possible trajectories that weren't caught by the face trackers
earlier, while making sure that no trajectories pass scene_cuts.
"""
with open(trajectory_file, "r") as f:
trajectories = sorted([json.loads(line) for line in f], key=lambda t: t["start"])
merged_trajectories = []
merged_indices = set()
# Loop to merge trajectories within the shard itself
for i, t1 in enumerate(trajectories):
if i in merged_indices:
continue
found_merge = True
while found_merge:
end = t1["start"] + t1["len"]
best_iou = iou_threshold
best_j = None
for j, t2 in enumerate(trajectories[i + 1:], start=i + 1):
if t2["start"] != end or j in merged_indices or end in scene_cuts:
continue
iou_value = iou(t1["bbs"][-1], t2["bbs"][0])
if iou_value > best_iou:
best_iou = iou_value
best_j = j
found_merge = (best_j is not None)
if found_merge:
t1["bbs"] = t1["bbs"] + trajectories[best_j]["bbs"]
t1["detected"] = t1["detected"] + trajectories[best_j]["detected"]
t1["len"] = len(t1["bbs"])
merged_indices.add(best_j)
merged_trajectories.append(t1)
# Return final trajectories + number of merges made
n_merges = len(trajectories) - len(merged_trajectories)
return merged_trajectories, n_merges
def merge(
data_dir: str, movie_id: int, iou_threshold: float, overlap: int, min_face_size: int
):
"""Merge trajectories that cross file boundaries in terms of frames.
"""
trajectories_dir = os.path.join(data_dir, "trajectories")
scene_changes_dir = os.path.join(data_dir, "scene_changes")
features_dir = os.path.join(data_dir, "features")
images_dir = os.path.join(data_dir, "images")
assert os.path.exists(trajectories_dir), f"Didn't find: {trajectories_dir}"
assert os.path.exists(scene_changes_dir), f"Didn't find: {scene_changes_dir}"
assert os.path.exists(features_dir), f"Didn't find: {features_dir}"
assert os.path.exists(images_dir), f"Didn't find: {images_dir}"
# Check what trajectory files we have (one for each shard)
_, _, filenames = next(os.walk(trajectories_dir))
traj_files = []
for file in filenames:
# file is like: trajectories_987654_1000-2000.jsonl
name, ext = os.path.splitext(file)
parts = name.split("_")
if parts[0] != "trajectories":
continue
start, end = [int(f) for f in parts[2].split("-")]
traj_files.append({"s": start, "e": end, "path": os.path.join(trajectories_dir, file)})
traj_files = sorted(traj_files, key=lambda d: d["s"])
# Check that we have corresponding scene cut files (one for each shard)
scene_cuts = set()
for t_file in traj_files:
start, end = t_file["s"], t_file["e"]
# Scene change files have the same format as trajectory files
filename = f"scene_changes_{movie_id}_{start}-{end}.json"
scene_change_file = os.path.join(scene_changes_dir, filename)
if os.path.exists(scene_change_file):
with open(scene_change_file, "r") as f:
shard_scene_cuts = json.load(f)["frame_indices"]
scene_cuts |= set(shard_scene_cuts)
# Merge feature files into one
_, _, filenames = next(os.walk(features_dir))
feature_files = []
for file in filenames:
# file is like: features_987654_1000-2000.jsonl
name, ext = os.path.splitext(file)
parts = name.split("_")
if parts[0] != "features":
continue
start, _ = [int(f) for f in parts[2].split("-")]
feature_files.append({"s": start, "path": os.path.join(features_dir, file)})
feature_files = sorted(feature_files, key=lambda f: f["s"])
with open(os.path.join(data_dir, "features.jsonl"), "w") as write_file:
for file_obj in feature_files:
with open(file_obj["path"], "r") as read_file:
write_file.write(read_file.read())
print(f"Processing {len(traj_files)} trajectory files.")
print(f"Read a total {len(scene_cuts)} scene changes.")
# Load image lookup map that allows to check if a frame + bbs combo has an image
image_map = load_images_map(images_dir)
out_file = open(os.path.join(data_dir, "trajectories.jsonl"), "w")
trajectories = []
n_read = 0
n_saved = 0
n_merges = 0
n_deleted = 0
# Loop to merge trajectories across different shards
for file in traj_files:
new_trajectories, n_shard_merges = load_trajectory(file["path"], scene_cuts, iou_threshold)
n_read += len(new_trajectories)
n_merges += n_shard_merges
mergables = [t for t in new_trajectories if t["start"] < file["s"] + overlap]
others = [t for t in new_trajectories if t["start"] >= file["s"] + overlap]
expired = [t for t in trajectories if (t["start"] + t["len"]) < file["s"]]
trajectories = [t for t in trajectories if (t["start"] + t["len"]) >= file["s"]]
# Save trajectories that can't be merged anymore, to disk
ns, nr = save_trajectories(out_file, expired, image_map, min_face_size, n_saved, movie_id)
n_saved += ns
n_deleted += nr
# Check if some of the new trajectories can merge into an old one
for t1 in mergables:
best_iou = iou_threshold
best_t = None
# We'll only attempt a merge if t1["start"] isn't at a scene cut
if t1["start"] not in scene_cuts:
for t2 in trajectories:
if t2["start"] >= t1["start"] or (t2["start"] + t2["len"]) <= t1["start"]:
continue
t2_bbs_i = t1["start"] - t2["start"]
assert t2_bbs_i >= 0, "Invalid index?"
iou_value = iou(t2["bbs"][t2_bbs_i], t1["bbs"][0])
if iou_value > best_iou:
best_iou = iou_value
best_t = t2
# A merge was found!
if best_t is not None:
n_merges += 1
assumed_len = t1["start"] + t1["len"] - best_t["start"]
best_t["bbs"] = best_t["bbs"][:(t1["start"] - best_t["start"])] + t1["bbs"]
best_t["detected"] = best_t["detected"][:(t1["start"] - best_t["start"])] + t1["detected"]
best_t["len"] = len(best_t["bbs"])
assert best_t["len"] == assumed_len, "Len???"
else:
others.append(t1)
trajectories += others
# Save remaining
ns, nr = save_trajectories(out_file, trajectories, image_map, min_face_size, n_saved, movie_id)
n_saved += ns
n_deleted += nr
out_file.close()
# Save merged scene cuts
scene_cuts_file = os.path.join(data_dir, "scene_changes.json")
save_scene_changes(scene_cuts_file, scene_cuts, movie_id)
print(f"Total merges: {n_merges}.")
print(f"Total removed if they had no images or had too small faces: {n_deleted}.")
print(f"Done! Read {n_read} trajectories and saved {n_saved}.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(allow_abbrev=True)
parser.add_argument("--iou-threshold", type=float, default=0.5,
help="IOU threshold when merging bounding boxes.")
parser.add_argument("--overlap", type=int, default=5,
help="""Overlap to consider when merging across shards. Should
match the max-trajectory-age that was used when extracting.""")
parser.add_argument("--min-face-size", type=int, default=50,
help="""If bigger than zero, will filter trajectories that
have faces where `min(w, h) < min-face-size`.""")
parser.add_argument("--path", type=str, default=".")
args = parser.parse_args()
data_dirs = glob.glob(args.path)
for data_dir in data_dirs:
# data_dir will be a movie directory like ./data/123456-data
data_dir = data_dir.rstrip("/")
print(f"Merging shards in: {data_dir}")
movie_id: int = int(os.path.basename(data_dir).split("-")[0])
merge(data_dir, movie_id, args.iou_threshold, args.overlap, args.min_face_size)
print() | facerec/merge_shards.py | from typing import Set
import argparse
import os
import json
import glob
from utils.utils import load_images_map
def is_trajectory_valid(trajectory, images_map):
"""Check that a trajectory has associated images.
"""
# TODO: add min count instead?
for frame_index, bbs in enumerate(trajectory["bbs"], start=trajectory["start"]):
if frame_index in images_map and tuple(bbs) in images_map[frame_index]:
return True
return False
def passes_min_size(trajectory, min_face_size):
"""Check that faces have a certain minimum (pixel) size. This is useful if
we want to have reliable embedddings of images in a trajectory.
"""
for bbs in trajectory["bbs"]:
# Bounding boxes (bbs) are: x1, y1, x2, y2
w, h = (bbs[2] - bbs[0]), (bbs[3] - bbs[1])
if min(w, h) < min_face_size:
return False
return True
def save_trajectories(file, trajectories, images_map, min_face_size, traj_count, movie_id):
"""Save trajectories, and filter out trajectories that had no corresponding
images for any bounding boxes.
"""
# Write out .jsonl
n_saved = 0
for traj in trajectories:
if is_trajectory_valid(traj, images_map) and passes_min_size(traj, min_face_size):
traj["index"] = traj_count
traj["movie_id"] = movie_id
json.dump(traj, file, indent=None, separators=(",", ":"))
file.write("\n")
traj_count += 1
n_saved += 1
n_removed = len(trajectories) - n_saved
return n_saved, n_removed
def save_scene_changes(file_path, scene_cuts: Set[int], movie_id: int):
scene_cuts_list = sorted(scene_cuts)
with open(file_path, "w") as file:
obj = {"frame_indices": scene_cuts_list, "movie_id": movie_id}
json.dump(obj, file, indent=None, separators=(",", ":"))
file.write("\n")
def iou(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))
boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))
return interArea / float(boxAArea + boxBArea - interArea)
def load_trajectory(trajectory_file: str, scene_cuts: Set[int], iou_threshold: float):
"""Load a single trajectory file (from 1 shard).
Note: also merges possible trajectories that weren't caught by the face trackers
earlier, while making sure that no trajectories pass scene_cuts.
"""
with open(trajectory_file, "r") as f:
trajectories = sorted([json.loads(line) for line in f], key=lambda t: t["start"])
merged_trajectories = []
merged_indices = set()
# Loop to merge trajectories within the shard itself
for i, t1 in enumerate(trajectories):
if i in merged_indices:
continue
found_merge = True
while found_merge:
end = t1["start"] + t1["len"]
best_iou = iou_threshold
best_j = None
for j, t2 in enumerate(trajectories[i + 1:], start=i + 1):
if t2["start"] != end or j in merged_indices or end in scene_cuts:
continue
iou_value = iou(t1["bbs"][-1], t2["bbs"][0])
if iou_value > best_iou:
best_iou = iou_value
best_j = j
found_merge = (best_j is not None)
if found_merge:
t1["bbs"] = t1["bbs"] + trajectories[best_j]["bbs"]
t1["detected"] = t1["detected"] + trajectories[best_j]["detected"]
t1["len"] = len(t1["bbs"])
merged_indices.add(best_j)
merged_trajectories.append(t1)
# Return final trajectories + number of merges made
n_merges = len(trajectories) - len(merged_trajectories)
return merged_trajectories, n_merges
def merge(
data_dir: str, movie_id: int, iou_threshold: float, overlap: int, min_face_size: int
):
"""Merge trajectories that cross file boundaries in terms of frames.
"""
trajectories_dir = os.path.join(data_dir, "trajectories")
scene_changes_dir = os.path.join(data_dir, "scene_changes")
features_dir = os.path.join(data_dir, "features")
images_dir = os.path.join(data_dir, "images")
assert os.path.exists(trajectories_dir), f"Didn't find: {trajectories_dir}"
assert os.path.exists(scene_changes_dir), f"Didn't find: {scene_changes_dir}"
assert os.path.exists(features_dir), f"Didn't find: {features_dir}"
assert os.path.exists(images_dir), f"Didn't find: {images_dir}"
# Check what trajectory files we have (one for each shard)
_, _, filenames = next(os.walk(trajectories_dir))
traj_files = []
for file in filenames:
# file is like: trajectories_987654_1000-2000.jsonl
name, ext = os.path.splitext(file)
parts = name.split("_")
if parts[0] != "trajectories":
continue
start, end = [int(f) for f in parts[2].split("-")]
traj_files.append({"s": start, "e": end, "path": os.path.join(trajectories_dir, file)})
traj_files = sorted(traj_files, key=lambda d: d["s"])
# Check that we have corresponding scene cut files (one for each shard)
scene_cuts = set()
for t_file in traj_files:
start, end = t_file["s"], t_file["e"]
# Scene change files have the same format as trajectory files
filename = f"scene_changes_{movie_id}_{start}-{end}.json"
scene_change_file = os.path.join(scene_changes_dir, filename)
if os.path.exists(scene_change_file):
with open(scene_change_file, "r") as f:
shard_scene_cuts = json.load(f)["frame_indices"]
scene_cuts |= set(shard_scene_cuts)
# Merge feature files into one
_, _, filenames = next(os.walk(features_dir))
feature_files = []
for file in filenames:
# file is like: features_987654_1000-2000.jsonl
name, ext = os.path.splitext(file)
parts = name.split("_")
if parts[0] != "features":
continue
start, _ = [int(f) for f in parts[2].split("-")]
feature_files.append({"s": start, "path": os.path.join(features_dir, file)})
feature_files = sorted(feature_files, key=lambda f: f["s"])
with open(os.path.join(data_dir, "features.jsonl"), "w") as write_file:
for file_obj in feature_files:
with open(file_obj["path"], "r") as read_file:
write_file.write(read_file.read())
print(f"Processing {len(traj_files)} trajectory files.")
print(f"Read a total {len(scene_cuts)} scene changes.")
# Load image lookup map that allows to check if a frame + bbs combo has an image
image_map = load_images_map(images_dir)
out_file = open(os.path.join(data_dir, "trajectories.jsonl"), "w")
trajectories = []
n_read = 0
n_saved = 0
n_merges = 0
n_deleted = 0
# Loop to merge trajectories across different shards
for file in traj_files:
new_trajectories, n_shard_merges = load_trajectory(file["path"], scene_cuts, iou_threshold)
n_read += len(new_trajectories)
n_merges += n_shard_merges
mergables = [t for t in new_trajectories if t["start"] < file["s"] + overlap]
others = [t for t in new_trajectories if t["start"] >= file["s"] + overlap]
expired = [t for t in trajectories if (t["start"] + t["len"]) < file["s"]]
trajectories = [t for t in trajectories if (t["start"] + t["len"]) >= file["s"]]
# Save trajectories that can't be merged anymore, to disk
ns, nr = save_trajectories(out_file, expired, image_map, min_face_size, n_saved, movie_id)
n_saved += ns
n_deleted += nr
# Check if some of the new trajectories can merge into an old one
for t1 in mergables:
best_iou = iou_threshold
best_t = None
# We'll only attempt a merge if t1["start"] isn't at a scene cut
if t1["start"] not in scene_cuts:
for t2 in trajectories:
if t2["start"] >= t1["start"] or (t2["start"] + t2["len"]) <= t1["start"]:
continue
t2_bbs_i = t1["start"] - t2["start"]
assert t2_bbs_i >= 0, "Invalid index?"
iou_value = iou(t2["bbs"][t2_bbs_i], t1["bbs"][0])
if iou_value > best_iou:
best_iou = iou_value
best_t = t2
# A merge was found!
if best_t is not None:
n_merges += 1
assumed_len = t1["start"] + t1["len"] - best_t["start"]
best_t["bbs"] = best_t["bbs"][:(t1["start"] - best_t["start"])] + t1["bbs"]
best_t["detected"] = best_t["detected"][:(t1["start"] - best_t["start"])] + t1["detected"]
best_t["len"] = len(best_t["bbs"])
assert best_t["len"] == assumed_len, "Len???"
else:
others.append(t1)
trajectories += others
# Save remaining
ns, nr = save_trajectories(out_file, trajectories, image_map, min_face_size, n_saved, movie_id)
n_saved += ns
n_deleted += nr
out_file.close()
# Save merged scene cuts
scene_cuts_file = os.path.join(data_dir, "scene_changes.json")
save_scene_changes(scene_cuts_file, scene_cuts, movie_id)
print(f"Total merges: {n_merges}.")
print(f"Total removed if they had no images or had too small faces: {n_deleted}.")
print(f"Done! Read {n_read} trajectories and saved {n_saved}.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(allow_abbrev=True)
parser.add_argument("--iou-threshold", type=float, default=0.5,
help="IOU threshold when merging bounding boxes.")
parser.add_argument("--overlap", type=int, default=5,
help="""Overlap to consider when merging across shards. Should
match the max-trajectory-age that was used when extracting.""")
parser.add_argument("--min-face-size", type=int, default=50,
help="""If bigger than zero, will filter trajectories that
have faces where `min(w, h) < min-face-size`.""")
parser.add_argument("--path", type=str, default=".")
args = parser.parse_args()
data_dirs = glob.glob(args.path)
for data_dir in data_dirs:
# data_dir will be a movie directory like ./data/123456-data
data_dir = data_dir.rstrip("/")
print(f"Merging shards in: {data_dir}")
movie_id: int = int(os.path.basename(data_dir).split("-")[0])
merge(data_dir, movie_id, args.iou_threshold, args.overlap, args.min_face_size)
print() | 0.492188 | 0.409929 |
import pyrosim.pyrosim as pyrosim
import random
def Create_World():
x,y,z = 1,1,1
pyrosim.Start_SDF("world.sdf")
pyrosim.Send_Cube(name="Box", pos=[2,2,0.5] , size=[x,y,z])
pyrosim.End()
def Generate_Body():
# x,y,z = 1,1,1
# pyrosim.Start_URDF("body.urdf")
# pyrosim.Send_Cube(name="Torso", pos=[1.5,0,1.5] , size=[x,y,z])
# pyrosim.Send_Joint( name = "Torso_BackLeg" , parent= "Torso" , child = "BackLeg" ,
# type = "revolute", position = "1 0.0 1")
# pyrosim.Send_Cube(name="BackLeg", pos=[-0.5,0,-0.5] , size=[x,y,z])
# pyrosim.Send_Joint( name = "Torso_FrontLeg" , parent= "Torso" , child = "FrontLeg" ,
# type = "revolute", position = "2 0.0 1")
# pyrosim.Send_Cube(name="FrontLeg", pos=[0.5,0,-0.5] , size=[x,y,z])
# pyrosim.End()
pyrosim.Start_URDF("body.urdf")
# Robot Torso
pyrosim.Send_Cube(name="Torso", pos=[1.5, 0, 1.5], size=[length, width, height])
# Connect BackLeg to Torso with one joint
pyrosim.Send_Joint(name="Torso_FrontLeg", parent="Torso", child="FrontLeg", type="revolute", position="2.0 0.0 1.0")
# Robot FrontLeg
pyrosim.Send_Cube(name="FrontLeg", pos=[0.5, 0, -0.5], size=[length, width, height])
# Connect FrontLeg to Torso with a second joint.
pyrosim.Send_Joint(name="Torso_BackLeg", parent="Torso", child="BackLeg", type="revolute", position="1.0 0.0 1.0")
# The other BackLeg
pyrosim.Send_Cube(name="BackLeg", pos=[-0.5, 0, -0.5], size=[length, width, height])
# End
pyrosim.End()
def Generate_Brain():
pyrosim.Start_NeuralNetwork("brain.nndf")
pyrosim.Send_Sensor_Neuron(name = 0 , linkName = "Torso")
pyrosim.Send_Sensor_Neuron(name = 1 , linkName = "BackLeg")
pyrosim.Send_Sensor_Neuron(name = 2 , linkName = "FrontLeg")
pyrosim.Send_Motor_Neuron( name = 3 , jointName = "Torso_BackLeg")
pyrosim.Send_Motor_Neuron( name = 4 , jointName = "Torso_FrontLeg")
for linkName in range(0,3):
for motor in range(3,5):
pyrosim.Send_Synapse( sourceNeuronName = linkName , targetNeuronName = motor , weight = random.randrange(-1, 1))
pyrosim.End() | generate.py | import pyrosim.pyrosim as pyrosim
import random
def Create_World():
x,y,z = 1,1,1
pyrosim.Start_SDF("world.sdf")
pyrosim.Send_Cube(name="Box", pos=[2,2,0.5] , size=[x,y,z])
pyrosim.End()
def Generate_Body():
# x,y,z = 1,1,1
# pyrosim.Start_URDF("body.urdf")
# pyrosim.Send_Cube(name="Torso", pos=[1.5,0,1.5] , size=[x,y,z])
# pyrosim.Send_Joint( name = "Torso_BackLeg" , parent= "Torso" , child = "BackLeg" ,
# type = "revolute", position = "1 0.0 1")
# pyrosim.Send_Cube(name="BackLeg", pos=[-0.5,0,-0.5] , size=[x,y,z])
# pyrosim.Send_Joint( name = "Torso_FrontLeg" , parent= "Torso" , child = "FrontLeg" ,
# type = "revolute", position = "2 0.0 1")
# pyrosim.Send_Cube(name="FrontLeg", pos=[0.5,0,-0.5] , size=[x,y,z])
# pyrosim.End()
pyrosim.Start_URDF("body.urdf")
# Robot Torso
pyrosim.Send_Cube(name="Torso", pos=[1.5, 0, 1.5], size=[length, width, height])
# Connect BackLeg to Torso with one joint
pyrosim.Send_Joint(name="Torso_FrontLeg", parent="Torso", child="FrontLeg", type="revolute", position="2.0 0.0 1.0")
# Robot FrontLeg
pyrosim.Send_Cube(name="FrontLeg", pos=[0.5, 0, -0.5], size=[length, width, height])
# Connect FrontLeg to Torso with a second joint.
pyrosim.Send_Joint(name="Torso_BackLeg", parent="Torso", child="BackLeg", type="revolute", position="1.0 0.0 1.0")
# The other BackLeg
pyrosim.Send_Cube(name="BackLeg", pos=[-0.5, 0, -0.5], size=[length, width, height])
# End
pyrosim.End()
def Generate_Brain():
pyrosim.Start_NeuralNetwork("brain.nndf")
pyrosim.Send_Sensor_Neuron(name = 0 , linkName = "Torso")
pyrosim.Send_Sensor_Neuron(name = 1 , linkName = "BackLeg")
pyrosim.Send_Sensor_Neuron(name = 2 , linkName = "FrontLeg")
pyrosim.Send_Motor_Neuron( name = 3 , jointName = "Torso_BackLeg")
pyrosim.Send_Motor_Neuron( name = 4 , jointName = "Torso_FrontLeg")
for linkName in range(0,3):
for motor in range(3,5):
pyrosim.Send_Synapse( sourceNeuronName = linkName , targetNeuronName = motor , weight = random.randrange(-1, 1))
pyrosim.End() | 0.406037 | 0.424352 |
class MemObject:
'''memcached 关系对象
'''
def __init__(self,name,mc):
'''
@param name: str 对象的名称
@param _lock: int 对象锁 为1时表示对象被锁定无法进行修改
'''
self._client = mc
self._name = name
self._lock = 0
def produceKey(self,keyname):
'''重新生成key
'''
if isinstance(keyname, basestring):
return ''.join([self._name,':',keyname])
else:
raise "type error"
def locked(self):
'''检测对象是否被锁定
'''
key = self.produceKey('_lock')
return self._client.get(key)
def lock(self):
'''锁定对象
'''
key = self.produceKey('_lock')
self._client.set(key, 1)
def release(self):
'''释放锁
'''
key = self.produceKey('_lock')
self._client.set(key, 0)
def get(self,key):
'''获取对象值
'''
key = self.produceKey(key)
return self._client.get(key)
def get_multi(self,keys):
'''一次获取多个key的值
@param keys: list(str) key的列表
'''
keynamelist = [self.produceKey(keyname) for keyname in keys]
olddict = self._client.get_multi(keynamelist)
newdict = dict(zip([keyname.split(':')[-1] for keyname in olddict.keys()],
olddict.values()))
return newdict
def update(self,key,values):
'''修改对象的值
'''
if self.locked():
return False
key = self.produceKey(key)
return self._client.set(key,values)
def update_multi(self,mapping):
'''同时修改多个key值
'''
if self.locked():
return False
newmapping = dict(zip([self.produceKey(keyname) for keyname in mapping.keys()],
mapping.values()))
return self._client.set_multi(newmapping)
def mdelete(self):
'''删除memcache中的数据
'''
nowdict = dict(self.__dict__)
del nowdict['_client']
keys = nowdict.keys()
keys = [self.produceKey(key) for key in keys]
self._client.delete_multi(keys)
def incr(self, key, delta):
'''自增
'''
key = self.produceKey(key)
return self._client.incr( key, delta)
def insert(self):
'''插入对象记录
'''
nowdict = dict(self.__dict__)
del nowdict['_client']
newmapping = dict(zip([self.produceKey(keyname) for keyname in nowdict.keys()],
nowdict.values()))
self._client.set_multi(newmapping) | firefly/dbentrust/memobject.py | class MemObject:
'''memcached 关系对象
'''
def __init__(self,name,mc):
'''
@param name: str 对象的名称
@param _lock: int 对象锁 为1时表示对象被锁定无法进行修改
'''
self._client = mc
self._name = name
self._lock = 0
def produceKey(self,keyname):
'''重新生成key
'''
if isinstance(keyname, basestring):
return ''.join([self._name,':',keyname])
else:
raise "type error"
def locked(self):
'''检测对象是否被锁定
'''
key = self.produceKey('_lock')
return self._client.get(key)
def lock(self):
'''锁定对象
'''
key = self.produceKey('_lock')
self._client.set(key, 1)
def release(self):
'''释放锁
'''
key = self.produceKey('_lock')
self._client.set(key, 0)
def get(self,key):
'''获取对象值
'''
key = self.produceKey(key)
return self._client.get(key)
def get_multi(self,keys):
'''一次获取多个key的值
@param keys: list(str) key的列表
'''
keynamelist = [self.produceKey(keyname) for keyname in keys]
olddict = self._client.get_multi(keynamelist)
newdict = dict(zip([keyname.split(':')[-1] for keyname in olddict.keys()],
olddict.values()))
return newdict
def update(self,key,values):
'''修改对象的值
'''
if self.locked():
return False
key = self.produceKey(key)
return self._client.set(key,values)
def update_multi(self,mapping):
'''同时修改多个key值
'''
if self.locked():
return False
newmapping = dict(zip([self.produceKey(keyname) for keyname in mapping.keys()],
mapping.values()))
return self._client.set_multi(newmapping)
def mdelete(self):
'''删除memcache中的数据
'''
nowdict = dict(self.__dict__)
del nowdict['_client']
keys = nowdict.keys()
keys = [self.produceKey(key) for key in keys]
self._client.delete_multi(keys)
def incr(self, key, delta):
'''自增
'''
key = self.produceKey(key)
return self._client.incr( key, delta)
def insert(self):
'''插入对象记录
'''
nowdict = dict(self.__dict__)
del nowdict['_client']
newmapping = dict(zip([self.produceKey(keyname) for keyname in nowdict.keys()],
nowdict.values()))
self._client.set_multi(newmapping) | 0.320183 | 0.14919 |
from __future__ import annotations
import traceback
from datetime import datetime
from typing import TYPE_CHECKING, List
from fabric_mb.message_bus.messages.lease_reservation_avro import LeaseReservationAvro
from fabric_mb.message_bus.messages.result_delegation_avro import ResultDelegationAvro
from fabric_mb.message_bus.messages.result_broker_query_model_avro import ResultBrokerQueryModelAvro
from fabric_mb.message_bus.messages.result_proxy_avro import ResultProxyAvro
from fabric_mb.message_bus.messages.result_reservation_avro import ResultReservationAvro
from fabric_mb.message_bus.messages.result_string_avro import ResultStringAvro
from fabric_mb.message_bus.messages.result_strings_avro import ResultStringsAvro
from fabric_mb.message_bus.messages.result_avro import ResultAvro
from fim.user import GraphFormat
from fabric_cf.actor.core.apis.abc_actor_runnable import ABCActorRunnable
from fabric_cf.actor.core.apis.abc_controller_reservation import ABCControllerReservation
from fabric_cf.actor.core.common.constants import Constants, ErrorCodes
from fabric_cf.actor.core.common.exceptions import ManageException
from fabric_cf.actor.core.kernel.reservation_client import ClientReservationFactory
from fabric_cf.actor.core.kernel.reservation_states import ReservationStates, ReservationPendingStates
from fabric_cf.actor.core.kernel.resource_set import ResourceSet
from fabric_cf.actor.core.manage.converter import Converter
from fabric_cf.actor.core.manage.management_object import ManagementObject
from fabric_cf.actor.core.manage.management_utils import ManagementUtils
from fabric_cf.actor.core.proxies.kafka.translate import Translate
from fabric_cf.actor.core.time.actor_clock import ActorClock
from fabric_cf.actor.security.access_checker import AccessChecker
from fabric_cf.actor.security.pdp_auth import ActionId
from fabric_cf.actor.core.apis.abc_client_actor_management_object import ABCClientActorManagementObject
from fabric_cf.actor.core.time.term import Term
from fabric_cf.actor.core.util.id import ID
from fabric_cf.actor.core.util.resource_type import ResourceType
from fabric_cf.actor.core.core.broker_policy import BrokerPolicy
from fabric_cf.actor.security.pdp_auth import ResourceType as AuthResourceType
if TYPE_CHECKING:
from fabric_mb.message_bus.messages.proxy_avro import ProxyAvro
from fabric_mb.message_bus.messages.ticket_reservation_avro import TicketReservationAvro
from fabric_mb.message_bus.messages.reservation_mng import ReservationMng
from fabric_cf.actor.core.apis.abc_client_actor import ABCClientActor
from fabric_cf.actor.security.auth_token import AuthToken
from fabric_cf.actor.core.apis.abc_actor_mixin import ABCActorMixin
class ClientActorManagementObjectHelper(ABCClientActorManagementObject):
def __init__(self, *, client: ABCClientActor):
self.client = client
from fabric_cf.actor.core.container.globals import GlobalsSingleton
self.logger = GlobalsSingleton.get().get_logger()
def get_brokers(self, *, caller: AuthToken, broker_id: ID = None, id_token: str = None) -> ResultProxyAvro:
result = ResultProxyAvro()
result.status = ResultAvro()
if caller is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
brokers = None
if broker_id is None:
brokers = self.client.get_brokers()
else:
broker = self.client.get_broker(guid=broker_id)
if broker is not None:
brokers = [broker]
result.proxies = Converter.fill_proxies(proxies=brokers)
else:
result.status.set_code(ErrorCodes.ErrorNoSuchBroker.value)
result.status.set_message(ErrorCodes.ErrorNoSuchBroker.interpret())
if brokers is not None:
result.proxies = Converter.fill_proxies(proxies=brokers)
except Exception as e:
self.logger.error("get_brokers {}".format(e))
result.status.set_code(ErrorCodes.ErrorInternalError.value)
result.status.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result.status = ManagementObject.set_exception_details(result=result.status, e=e)
return result
def add_broker(self, *, broker: ProxyAvro, caller: AuthToken) -> ResultAvro:
result = ResultAvro()
if broker is None or caller is None:
result.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
proxy = Converter.get_agent_proxy(mng=broker)
if proxy is None:
result.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
else:
self.client.add_broker(broker=proxy)
except Exception as e:
self.logger.error("add_broker {}".format(e))
result.set_code(ErrorCodes.ErrorInternalError.value)
result.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result = ManagementObject.set_exception_details(result=result, e=e)
return result
def get_broker_query_model(self, *, broker: ID, caller: AuthToken, id_token: str,
level: int, graph_format: GraphFormat) -> ResultBrokerQueryModelAvro:
result = ResultBrokerQueryModelAvro()
result.status = ResultAvro()
if broker is None or caller is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
if id_token is not None:
AccessChecker.check_access(action_id=ActionId.query, resource_type=AuthResourceType.resources,
token=id_token, logger=self.logger, actor_type=self.client.get_type())
b = self.client.get_broker(guid=broker)
if b is not None:
request = BrokerPolicy.get_broker_query_model_query(level=level, bqm_format=graph_format)
response = ManagementUtils.query(actor=self.client, actor_proxy=b, query=request, id_token=id_token)
result.model = Translate.translate_to_broker_query_model(query_response=response, level=level)
else:
result.status.set_code(ErrorCodes.ErrorNoSuchBroker.value)
result.status.set_message(ErrorCodes.ErrorNoSuchBroker.interpret())
except Exception as e:
self.logger.error("get_broker_query_model {}".format(e))
result.status.set_code(ErrorCodes.ErrorInternalError.value)
result.status.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result.status = ManagementObject.set_exception_details(result=result.status, e=e)
return result
def add_reservation_private(self, *, reservation: TicketReservationAvro):
result = ResultAvro()
slice_id = ID(uid=reservation.get_slice_id())
rset = Converter.get_resource_set(res_mng=reservation)
term = Term(start=ActorClock.from_milliseconds(milli_seconds=reservation.get_start()),
end=ActorClock.from_milliseconds(milli_seconds=reservation.get_end()))
broker = None
if reservation.get_broker() is not None:
broker = ID(uid=reservation.get_broker())
rid = None
if reservation.get_reservation_id() is not None:
rid = ID(uid=reservation.get_reservation_id())
else:
rid = ID()
rc = ClientReservationFactory.create(rid=rid, resources=rset, term=term)
rc.set_renewable(renewable=reservation.is_renewable())
if rc.get_state() != ReservationStates.Nascent or rc.get_pending_state() != ReservationPendingStates.None_:
result.set_code(ErrorCodes.ErrorInvalidReservation.value)
result.set_message("Only reservations in Nascent.None can be added")
return None, result
slice_obj = self.client.get_slice(slice_id=slice_id)
if slice_obj is None:
result.set_code(ErrorCodes.ErrorNoSuchSlice.value)
result.set_message(ErrorCodes.ErrorNoSuchSlice.interpret())
return None, result
rc.set_slice(slice_object=slice_obj)
proxy = None
if broker is None:
proxy = self.client.get_default_broker()
else:
proxy = self.client.get_broker(guid=broker)
if proxy is None:
result.set_code(ErrorCodes.ErrorNoSuchBroker.value)
result.set_message(ErrorCodes.ErrorNoSuchBroker.interpret())
return None, result
rc.set_broker(broker=proxy)
self.client.register(reservation=rc)
return rc.get_reservation_id(), result
def add_reservation(self, *, reservation: TicketReservationAvro, caller: AuthToken) -> ResultStringAvro:
result = ResultStringAvro()
result.status = ResultAvro()
if reservation is None or reservation.get_slice_id() is None or caller is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
class Runner(ABCActorRunnable):
def __init__(self, *, parent):
self.parent = parent
def run(self):
return self.parent.add_reservation_private(reservation=reservation)
rid, result.status = self.client.execute_on_actor_thread_and_wait(runnable=Runner(parent=self))
if rid is not None:
result.set_result(str(rid))
except Exception as e:
self.logger.error("add_reservation {}".format(e))
result.status.set_code(ErrorCodes.ErrorInternalError.value)
result.status.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result.status = ManagementObject.set_exception_details(result=result.status, e=e)
return result
def add_reservations(self, *, reservations: List[TicketReservationAvro], caller: AuthToken) -> ResultStringsAvro:
result = ResultStringsAvro()
result.status = ResultAvro()
if reservations is None or caller is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
for r in reservations:
if r.get_slice_id() is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
class Runner(ABCActorRunnable):
def __init__(self, *, parent):
self.parent = parent
def run(self):
result = []
try:
for r in reservations:
rr, status = self.parent.add_reservation_private(reservation=r)
if rr is not None:
result.append(str(rr))
else:
raise ManageException("Could not add reservation")
except Exception:
for r in reservations:
self.parent.client.unregister(reservation=r)
result.clear()
return result
rids, result.status = self.client.execute_on_actor_thread_and_wait(runnable=Runner(parent=self))
if result.status.get_code() == 0:
for r in rids:
result.result.append(r)
except Exception as e:
self.logger.error("add_reservations {}".format(e))
result.status.set_code(ErrorCodes.ErrorInternalError.value)
result.status.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result.status = ManagementObject.set_exception_details(result=result.status, e=e)
return result
def demand_reservation_rid(self, *, rid: ID, caller: AuthToken) -> ResultAvro:
result = ResultAvro()
if rid is None or caller is None:
result.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
class Runner(ABCActorRunnable):
def __init__(self, *, actor: ABCActorMixin):
self.actor = actor
def run(self):
self.actor.demand(rid=rid)
return None
self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client))
except Exception as e:
self.logger.error("demand_reservation_rid {}".format(e))
result.set_code(ErrorCodes.ErrorInternalError.value)
result.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result = ManagementObject.set_exception_details(result=result, e=e)
return result
def demand_reservation(self, *, reservation: ReservationMng, caller: AuthToken) -> ResultAvro:
result = ResultAvro()
if reservation is None or caller is None:
result.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
class Runner(ABCActorRunnable):
def __init__(self, *, actor: ABCActorMixin, logger):
self.actor = actor
self.logger = logger
def run(self):
result = ResultAvro()
rid = ID(uid=reservation.get_reservation_id())
r = self.actor.get_reservation(rid=rid)
if r is None:
result.set_code(ErrorCodes.ErrorNoSuchReservation.value)
result.set_message(ErrorCodes.ErrorNoSuchReservation.interpret())
return result
ManagementUtils.update_reservation(res_obj=r, rsv_mng=reservation)
if isinstance(reservation, LeaseReservationAvro):
predecessors = reservation.get_redeem_predecessors()
for pred in predecessors:
if pred.get_reservation_id() is None:
self.logger.warning("Redeem predecessor specified for rid={} "
"but missing reservation id of predecessor".format(rid))
continue
predid = ID(uid=pred.get_reservation_id())
pr = self.actor.get_reservation(rid=predid)
if pr is None:
self.logger.warning("Redeem predecessor for rid={} with rid={} does not exist. "
"Ignoring it!".format(rid, predid))
continue
if not isinstance(pr, ABCControllerReservation):
self.logger.warning("Redeem predecessor for rid={} is not an IControllerReservation: "
"class={}".format(rid, type(pr)))
continue
self.logger.debug("Setting redeem predecessor on reservation # {} pred={}".
format(r.get_reservation_id(), pr.get_reservation_id()))
r.add_redeem_predecessor(reservation=pr)
try:
self.actor.get_plugin().get_database().update_reservation(reservation=r)
except Exception as e:
self.logger.error("Could not commit slice update {}".format(e))
result.set_code(ErrorCodes.ErrorDatabaseError.value)
result.set_message(ErrorCodes.ErrorDatabaseError.interpret(exception=e))
self.actor.demand(rid=rid)
return result
result = self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client,
logger=self.logger))
except Exception as e:
self.logger.error("demand_reservation {}".format(e))
result.set_code(ErrorCodes.ErrorInternalError.value)
result.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result = ManagementObject.set_exception_details(result=result, e=e)
return result
def extend_reservation(self, *, reservation: id, new_end_time: datetime, new_units: int,
caller: AuthToken) -> ResultAvro:
result = ResultAvro()
if reservation is None or caller is None or new_end_time is None:
result.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
class Runner(ABCActorRunnable):
def __init__(self, *, actor: ABCActorMixin):
self.actor = actor
def run(self):
result = ResultAvro()
r = self.actor.get_reservation(rid=reservation)
if r is None:
result.set_code(ErrorCodes.ErrorNoSuchReservation.value)
result.set_message(ErrorCodes.ErrorNoSuchReservation.interpret())
return result
rset = ResourceSet()
if new_units == Constants.EXTEND_SAME_UNITS:
rset.set_units(units=r.get_resources().get_units())
else:
rset.set_units(units=new_units)
rset.set_type(rtype=r.get_resources().get_type())
tmp_start_time = r.get_term().get_start_time()
new_term = r.get_term().extend()
new_term.set_end_time(date=new_end_time)
new_term.set_new_start_time(date=tmp_start_time)
new_term.set_start_time(date=tmp_start_time)
self.actor.extend(rid=r.get_reservation_id(), resources=rset, term=new_term)
return result
result = self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client))
except Exception as e:
self.logger.error("extend_reservation {}".format(e))
result.set_code(ErrorCodes.ErrorInternalError.value)
result.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result = ManagementObject.set_exception_details(result=result, e=e)
return result
def modify_reservation(self, *, rid: ID, modify_properties: dict, caller: AuthToken) -> ResultAvro:
result = ResultAvro()
if rid is None or modify_properties is None:
result.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
self.logger.debug("reservation: {} | modifyProperties= {}".format(rid, modify_properties))
try:
class Runner(ABCActorRunnable):
def __init__(self, *, actor: ABCActorMixin):
self.actor = actor
def run(self):
result = ResultAvro()
r = self.actor.get_reservation(rid=rid)
if r is None:
result.set_code(ErrorCodes.ErrorNoSuchReservation.value)
result.set_message(ErrorCodes.ErrorNoSuchReservation.interpret())
return result
self.actor.modify(reservation_id=rid, modify_properties=modify_properties)
return result
result = self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client))
except Exception as e:
self.logger.error("modify_reservation {}".format(e))
result.set_code(ErrorCodes.ErrorInternalError.value)
result.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result = ManagementObject.set_exception_details(result=result, e=e)
return result
def claim_delegations(self, *, broker: ID, did: str, caller: AuthToken,
id_token: str = None) -> ResultDelegationAvro:
result = ResultDelegationAvro()
result.status = ResultAvro()
if caller is None or did is None or broker is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
if id_token is not None:
AccessChecker.check_access(action_id=ActionId.query, resource_type=AuthResourceType.delegation,
token=id_token, logger=self.logger, actor_type=self.client.get_type(),
resource_id=did)
my_broker = self.client.get_broker(guid=broker)
if my_broker is None:
result.status.set_code(ErrorCodes.ErrorNoSuchBroker.value)
result.status.set_message(ErrorCodes.ErrorNoSuchBroker.interpret())
return result
class Runner(ABCActorRunnable):
def __init__(self, *, actor: ABCActorMixin):
self.actor = actor
def run(self):
return self.actor.claim_delegation_client(delegation_id=did, broker=my_broker)
rc = self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client))
if rc is not None:
result.delegations = []
delegation = Translate.translate_delegation_to_avro(delegation=rc)
result.delegations.append(delegation)
else:
raise ManageException("Internal Error")
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("claim_delegations {}".format(e))
result.status.set_code(ErrorCodes.ErrorInternalError.value)
result.status.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result.status = ManagementObject.set_exception_details(result=result.status, e=e)
return result
def reclaim_delegations(self, *, broker: ID, did: str, caller: AuthToken,
id_token: str = None) -> ResultDelegationAvro:
result = ResultReservationAvro()
result.status = ResultAvro()
if caller is None or did is None or broker is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
if id_token is not None:
AccessChecker.check_access(action_id=ActionId.query, resource_type=AuthResourceType.resources,
token=id_token, logger=self.logger, actor_type=self.client.get_type(),
resource_id=did)
my_broker = self.client.get_broker(guid=broker)
if my_broker is None:
result.status.set_code(ErrorCodes.ErrorNoSuchBroker.value)
result.status.set_message(ErrorCodes.ErrorNoSuchBroker.interpret())
return result
class Runner(ABCActorRunnable):
def __init__(self, *, actor: ABCActorMixin):
self.actor = actor
def run(self):
return self.actor.reclaim_delegation_client(delegation_id=did, broker=my_broker)
rc = self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client))
if rc is not None:
result.delegations = []
delegation = Translate.translate_delegation_to_avro(delegation=rc)
result.delegations.append(delegation)
else:
raise ManageException("Internal Error")
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("reclaim_delegations {}".format(e))
result.status.set_code(ErrorCodes.ErrorInternalError.value)
result.status.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result.status = ManagementObject.set_exception_details(result=result.status, e=e)
return result | fabric_cf/actor/core/manage/client_actor_management_object_helper.py | from __future__ import annotations
import traceback
from datetime import datetime
from typing import TYPE_CHECKING, List
from fabric_mb.message_bus.messages.lease_reservation_avro import LeaseReservationAvro
from fabric_mb.message_bus.messages.result_delegation_avro import ResultDelegationAvro
from fabric_mb.message_bus.messages.result_broker_query_model_avro import ResultBrokerQueryModelAvro
from fabric_mb.message_bus.messages.result_proxy_avro import ResultProxyAvro
from fabric_mb.message_bus.messages.result_reservation_avro import ResultReservationAvro
from fabric_mb.message_bus.messages.result_string_avro import ResultStringAvro
from fabric_mb.message_bus.messages.result_strings_avro import ResultStringsAvro
from fabric_mb.message_bus.messages.result_avro import ResultAvro
from fim.user import GraphFormat
from fabric_cf.actor.core.apis.abc_actor_runnable import ABCActorRunnable
from fabric_cf.actor.core.apis.abc_controller_reservation import ABCControllerReservation
from fabric_cf.actor.core.common.constants import Constants, ErrorCodes
from fabric_cf.actor.core.common.exceptions import ManageException
from fabric_cf.actor.core.kernel.reservation_client import ClientReservationFactory
from fabric_cf.actor.core.kernel.reservation_states import ReservationStates, ReservationPendingStates
from fabric_cf.actor.core.kernel.resource_set import ResourceSet
from fabric_cf.actor.core.manage.converter import Converter
from fabric_cf.actor.core.manage.management_object import ManagementObject
from fabric_cf.actor.core.manage.management_utils import ManagementUtils
from fabric_cf.actor.core.proxies.kafka.translate import Translate
from fabric_cf.actor.core.time.actor_clock import ActorClock
from fabric_cf.actor.security.access_checker import AccessChecker
from fabric_cf.actor.security.pdp_auth import ActionId
from fabric_cf.actor.core.apis.abc_client_actor_management_object import ABCClientActorManagementObject
from fabric_cf.actor.core.time.term import Term
from fabric_cf.actor.core.util.id import ID
from fabric_cf.actor.core.util.resource_type import ResourceType
from fabric_cf.actor.core.core.broker_policy import BrokerPolicy
from fabric_cf.actor.security.pdp_auth import ResourceType as AuthResourceType
if TYPE_CHECKING:
from fabric_mb.message_bus.messages.proxy_avro import ProxyAvro
from fabric_mb.message_bus.messages.ticket_reservation_avro import TicketReservationAvro
from fabric_mb.message_bus.messages.reservation_mng import ReservationMng
from fabric_cf.actor.core.apis.abc_client_actor import ABCClientActor
from fabric_cf.actor.security.auth_token import AuthToken
from fabric_cf.actor.core.apis.abc_actor_mixin import ABCActorMixin
class ClientActorManagementObjectHelper(ABCClientActorManagementObject):
def __init__(self, *, client: ABCClientActor):
self.client = client
from fabric_cf.actor.core.container.globals import GlobalsSingleton
self.logger = GlobalsSingleton.get().get_logger()
def get_brokers(self, *, caller: AuthToken, broker_id: ID = None, id_token: str = None) -> ResultProxyAvro:
result = ResultProxyAvro()
result.status = ResultAvro()
if caller is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
brokers = None
if broker_id is None:
brokers = self.client.get_brokers()
else:
broker = self.client.get_broker(guid=broker_id)
if broker is not None:
brokers = [broker]
result.proxies = Converter.fill_proxies(proxies=brokers)
else:
result.status.set_code(ErrorCodes.ErrorNoSuchBroker.value)
result.status.set_message(ErrorCodes.ErrorNoSuchBroker.interpret())
if brokers is not None:
result.proxies = Converter.fill_proxies(proxies=brokers)
except Exception as e:
self.logger.error("get_brokers {}".format(e))
result.status.set_code(ErrorCodes.ErrorInternalError.value)
result.status.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result.status = ManagementObject.set_exception_details(result=result.status, e=e)
return result
def add_broker(self, *, broker: ProxyAvro, caller: AuthToken) -> ResultAvro:
result = ResultAvro()
if broker is None or caller is None:
result.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
proxy = Converter.get_agent_proxy(mng=broker)
if proxy is None:
result.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
else:
self.client.add_broker(broker=proxy)
except Exception as e:
self.logger.error("add_broker {}".format(e))
result.set_code(ErrorCodes.ErrorInternalError.value)
result.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result = ManagementObject.set_exception_details(result=result, e=e)
return result
def get_broker_query_model(self, *, broker: ID, caller: AuthToken, id_token: str,
level: int, graph_format: GraphFormat) -> ResultBrokerQueryModelAvro:
result = ResultBrokerQueryModelAvro()
result.status = ResultAvro()
if broker is None or caller is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
if id_token is not None:
AccessChecker.check_access(action_id=ActionId.query, resource_type=AuthResourceType.resources,
token=id_token, logger=self.logger, actor_type=self.client.get_type())
b = self.client.get_broker(guid=broker)
if b is not None:
request = BrokerPolicy.get_broker_query_model_query(level=level, bqm_format=graph_format)
response = ManagementUtils.query(actor=self.client, actor_proxy=b, query=request, id_token=id_token)
result.model = Translate.translate_to_broker_query_model(query_response=response, level=level)
else:
result.status.set_code(ErrorCodes.ErrorNoSuchBroker.value)
result.status.set_message(ErrorCodes.ErrorNoSuchBroker.interpret())
except Exception as e:
self.logger.error("get_broker_query_model {}".format(e))
result.status.set_code(ErrorCodes.ErrorInternalError.value)
result.status.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result.status = ManagementObject.set_exception_details(result=result.status, e=e)
return result
def add_reservation_private(self, *, reservation: TicketReservationAvro):
result = ResultAvro()
slice_id = ID(uid=reservation.get_slice_id())
rset = Converter.get_resource_set(res_mng=reservation)
term = Term(start=ActorClock.from_milliseconds(milli_seconds=reservation.get_start()),
end=ActorClock.from_milliseconds(milli_seconds=reservation.get_end()))
broker = None
if reservation.get_broker() is not None:
broker = ID(uid=reservation.get_broker())
rid = None
if reservation.get_reservation_id() is not None:
rid = ID(uid=reservation.get_reservation_id())
else:
rid = ID()
rc = ClientReservationFactory.create(rid=rid, resources=rset, term=term)
rc.set_renewable(renewable=reservation.is_renewable())
if rc.get_state() != ReservationStates.Nascent or rc.get_pending_state() != ReservationPendingStates.None_:
result.set_code(ErrorCodes.ErrorInvalidReservation.value)
result.set_message("Only reservations in Nascent.None can be added")
return None, result
slice_obj = self.client.get_slice(slice_id=slice_id)
if slice_obj is None:
result.set_code(ErrorCodes.ErrorNoSuchSlice.value)
result.set_message(ErrorCodes.ErrorNoSuchSlice.interpret())
return None, result
rc.set_slice(slice_object=slice_obj)
proxy = None
if broker is None:
proxy = self.client.get_default_broker()
else:
proxy = self.client.get_broker(guid=broker)
if proxy is None:
result.set_code(ErrorCodes.ErrorNoSuchBroker.value)
result.set_message(ErrorCodes.ErrorNoSuchBroker.interpret())
return None, result
rc.set_broker(broker=proxy)
self.client.register(reservation=rc)
return rc.get_reservation_id(), result
def add_reservation(self, *, reservation: TicketReservationAvro, caller: AuthToken) -> ResultStringAvro:
result = ResultStringAvro()
result.status = ResultAvro()
if reservation is None or reservation.get_slice_id() is None or caller is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
class Runner(ABCActorRunnable):
def __init__(self, *, parent):
self.parent = parent
def run(self):
return self.parent.add_reservation_private(reservation=reservation)
rid, result.status = self.client.execute_on_actor_thread_and_wait(runnable=Runner(parent=self))
if rid is not None:
result.set_result(str(rid))
except Exception as e:
self.logger.error("add_reservation {}".format(e))
result.status.set_code(ErrorCodes.ErrorInternalError.value)
result.status.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result.status = ManagementObject.set_exception_details(result=result.status, e=e)
return result
def add_reservations(self, *, reservations: List[TicketReservationAvro], caller: AuthToken) -> ResultStringsAvro:
result = ResultStringsAvro()
result.status = ResultAvro()
if reservations is None or caller is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
for r in reservations:
if r.get_slice_id() is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
class Runner(ABCActorRunnable):
def __init__(self, *, parent):
self.parent = parent
def run(self):
result = []
try:
for r in reservations:
rr, status = self.parent.add_reservation_private(reservation=r)
if rr is not None:
result.append(str(rr))
else:
raise ManageException("Could not add reservation")
except Exception:
for r in reservations:
self.parent.client.unregister(reservation=r)
result.clear()
return result
rids, result.status = self.client.execute_on_actor_thread_and_wait(runnable=Runner(parent=self))
if result.status.get_code() == 0:
for r in rids:
result.result.append(r)
except Exception as e:
self.logger.error("add_reservations {}".format(e))
result.status.set_code(ErrorCodes.ErrorInternalError.value)
result.status.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result.status = ManagementObject.set_exception_details(result=result.status, e=e)
return result
def demand_reservation_rid(self, *, rid: ID, caller: AuthToken) -> ResultAvro:
result = ResultAvro()
if rid is None or caller is None:
result.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
class Runner(ABCActorRunnable):
def __init__(self, *, actor: ABCActorMixin):
self.actor = actor
def run(self):
self.actor.demand(rid=rid)
return None
self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client))
except Exception as e:
self.logger.error("demand_reservation_rid {}".format(e))
result.set_code(ErrorCodes.ErrorInternalError.value)
result.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result = ManagementObject.set_exception_details(result=result, e=e)
return result
def demand_reservation(self, *, reservation: ReservationMng, caller: AuthToken) -> ResultAvro:
result = ResultAvro()
if reservation is None or caller is None:
result.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
class Runner(ABCActorRunnable):
def __init__(self, *, actor: ABCActorMixin, logger):
self.actor = actor
self.logger = logger
def run(self):
result = ResultAvro()
rid = ID(uid=reservation.get_reservation_id())
r = self.actor.get_reservation(rid=rid)
if r is None:
result.set_code(ErrorCodes.ErrorNoSuchReservation.value)
result.set_message(ErrorCodes.ErrorNoSuchReservation.interpret())
return result
ManagementUtils.update_reservation(res_obj=r, rsv_mng=reservation)
if isinstance(reservation, LeaseReservationAvro):
predecessors = reservation.get_redeem_predecessors()
for pred in predecessors:
if pred.get_reservation_id() is None:
self.logger.warning("Redeem predecessor specified for rid={} "
"but missing reservation id of predecessor".format(rid))
continue
predid = ID(uid=pred.get_reservation_id())
pr = self.actor.get_reservation(rid=predid)
if pr is None:
self.logger.warning("Redeem predecessor for rid={} with rid={} does not exist. "
"Ignoring it!".format(rid, predid))
continue
if not isinstance(pr, ABCControllerReservation):
self.logger.warning("Redeem predecessor for rid={} is not an IControllerReservation: "
"class={}".format(rid, type(pr)))
continue
self.logger.debug("Setting redeem predecessor on reservation # {} pred={}".
format(r.get_reservation_id(), pr.get_reservation_id()))
r.add_redeem_predecessor(reservation=pr)
try:
self.actor.get_plugin().get_database().update_reservation(reservation=r)
except Exception as e:
self.logger.error("Could not commit slice update {}".format(e))
result.set_code(ErrorCodes.ErrorDatabaseError.value)
result.set_message(ErrorCodes.ErrorDatabaseError.interpret(exception=e))
self.actor.demand(rid=rid)
return result
result = self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client,
logger=self.logger))
except Exception as e:
self.logger.error("demand_reservation {}".format(e))
result.set_code(ErrorCodes.ErrorInternalError.value)
result.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result = ManagementObject.set_exception_details(result=result, e=e)
return result
def extend_reservation(self, *, reservation: id, new_end_time: datetime, new_units: int,
caller: AuthToken) -> ResultAvro:
result = ResultAvro()
if reservation is None or caller is None or new_end_time is None:
result.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
class Runner(ABCActorRunnable):
def __init__(self, *, actor: ABCActorMixin):
self.actor = actor
def run(self):
result = ResultAvro()
r = self.actor.get_reservation(rid=reservation)
if r is None:
result.set_code(ErrorCodes.ErrorNoSuchReservation.value)
result.set_message(ErrorCodes.ErrorNoSuchReservation.interpret())
return result
rset = ResourceSet()
if new_units == Constants.EXTEND_SAME_UNITS:
rset.set_units(units=r.get_resources().get_units())
else:
rset.set_units(units=new_units)
rset.set_type(rtype=r.get_resources().get_type())
tmp_start_time = r.get_term().get_start_time()
new_term = r.get_term().extend()
new_term.set_end_time(date=new_end_time)
new_term.set_new_start_time(date=tmp_start_time)
new_term.set_start_time(date=tmp_start_time)
self.actor.extend(rid=r.get_reservation_id(), resources=rset, term=new_term)
return result
result = self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client))
except Exception as e:
self.logger.error("extend_reservation {}".format(e))
result.set_code(ErrorCodes.ErrorInternalError.value)
result.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result = ManagementObject.set_exception_details(result=result, e=e)
return result
def modify_reservation(self, *, rid: ID, modify_properties: dict, caller: AuthToken) -> ResultAvro:
result = ResultAvro()
if rid is None or modify_properties is None:
result.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
self.logger.debug("reservation: {} | modifyProperties= {}".format(rid, modify_properties))
try:
class Runner(ABCActorRunnable):
def __init__(self, *, actor: ABCActorMixin):
self.actor = actor
def run(self):
result = ResultAvro()
r = self.actor.get_reservation(rid=rid)
if r is None:
result.set_code(ErrorCodes.ErrorNoSuchReservation.value)
result.set_message(ErrorCodes.ErrorNoSuchReservation.interpret())
return result
self.actor.modify(reservation_id=rid, modify_properties=modify_properties)
return result
result = self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client))
except Exception as e:
self.logger.error("modify_reservation {}".format(e))
result.set_code(ErrorCodes.ErrorInternalError.value)
result.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result = ManagementObject.set_exception_details(result=result, e=e)
return result
def claim_delegations(self, *, broker: ID, did: str, caller: AuthToken,
id_token: str = None) -> ResultDelegationAvro:
result = ResultDelegationAvro()
result.status = ResultAvro()
if caller is None or did is None or broker is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
if id_token is not None:
AccessChecker.check_access(action_id=ActionId.query, resource_type=AuthResourceType.delegation,
token=id_token, logger=self.logger, actor_type=self.client.get_type(),
resource_id=did)
my_broker = self.client.get_broker(guid=broker)
if my_broker is None:
result.status.set_code(ErrorCodes.ErrorNoSuchBroker.value)
result.status.set_message(ErrorCodes.ErrorNoSuchBroker.interpret())
return result
class Runner(ABCActorRunnable):
def __init__(self, *, actor: ABCActorMixin):
self.actor = actor
def run(self):
return self.actor.claim_delegation_client(delegation_id=did, broker=my_broker)
rc = self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client))
if rc is not None:
result.delegations = []
delegation = Translate.translate_delegation_to_avro(delegation=rc)
result.delegations.append(delegation)
else:
raise ManageException("Internal Error")
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("claim_delegations {}".format(e))
result.status.set_code(ErrorCodes.ErrorInternalError.value)
result.status.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result.status = ManagementObject.set_exception_details(result=result.status, e=e)
return result
def reclaim_delegations(self, *, broker: ID, did: str, caller: AuthToken,
id_token: str = None) -> ResultDelegationAvro:
result = ResultReservationAvro()
result.status = ResultAvro()
if caller is None or did is None or broker is None:
result.status.set_code(ErrorCodes.ErrorInvalidArguments.value)
result.status.set_message(ErrorCodes.ErrorInvalidArguments.interpret())
return result
try:
if id_token is not None:
AccessChecker.check_access(action_id=ActionId.query, resource_type=AuthResourceType.resources,
token=id_token, logger=self.logger, actor_type=self.client.get_type(),
resource_id=did)
my_broker = self.client.get_broker(guid=broker)
if my_broker is None:
result.status.set_code(ErrorCodes.ErrorNoSuchBroker.value)
result.status.set_message(ErrorCodes.ErrorNoSuchBroker.interpret())
return result
class Runner(ABCActorRunnable):
def __init__(self, *, actor: ABCActorMixin):
self.actor = actor
def run(self):
return self.actor.reclaim_delegation_client(delegation_id=did, broker=my_broker)
rc = self.client.execute_on_actor_thread_and_wait(runnable=Runner(actor=self.client))
if rc is not None:
result.delegations = []
delegation = Translate.translate_delegation_to_avro(delegation=rc)
result.delegations.append(delegation)
else:
raise ManageException("Internal Error")
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("reclaim_delegations {}".format(e))
result.status.set_code(ErrorCodes.ErrorInternalError.value)
result.status.set_message(ErrorCodes.ErrorInternalError.interpret(exception=e))
result.status = ManagementObject.set_exception_details(result=result.status, e=e)
return result | 0.740644 | 0.054199 |
import unittest
from extensions.middle.GroupNorm import GroupNormToMVN
from mo.front.common.partial_infer.utils import float_array, int64_array
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph, result, build_graph_with_edge_attrs, connect, \
regular_op_with_shaped_data, valued_const_with_data, connect_data
shape = int64_array([1, 3, 5, 2])
nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}),
**valued_const_with_data('gamma', float_array([0.5])),
**valued_const_with_data('beta', float_array([0.5])),
**regular_op_with_shaped_data('group_norm', shape,
{'op': 'GroupNorm', 'name': 'group_norm', 'num_groups': 3, 'eps': 1e-9}),
**result('result')
}
edges = [*connect('input:0', '0:group_norm'),
*connect('gamma', '1:group_norm'),
*connect('beta', '2:group_norm'),
*connect('group_norm:0', 'result'),
]
ref_nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}),
**regular_op_with_shaped_data('shape1', int64_array([4]), {'op': 'ShapeOf'}),
**regular_op_with_shaped_data('shape2', int64_array([4]), {'op': 'ShapeOf'}),
**regular_op_with_shaped_data('shape3', int64_array([1]), {'op': 'ShapeOf'}),
**regular_op_with_shaped_data('hcast1', int64_array([4]), {'op': 'Cast'}),
**regular_op_with_shaped_data('cast2', int64_array([2]), {'op': 'Cast'}),
**regular_op_with_shaped_data('cast3', int64_array([4]), {'op': 'Cast'}),
**regular_op_with_shaped_data('gather1', int64_array([2]), {'op': 'Gather'}),
**regular_op_with_shaped_data('gather2', int64_array([1]), {'op': 'Gather'}),
**regular_op_with_shaped_data('gather3', int64_array([1]), {'op': 'Gather'}),
**regular_op_with_shaped_data('mul1', int64_array([1]), {'op': 'Mul'}),
**regular_op_with_shaped_data('mul2', int64_array([1]), {'op': 'Mul'}),
**regular_op_with_shaped_data('mul3', shape, {'op': 'Mul'}),
**regular_op_with_shaped_data('concat', int64_array([4]), {'op': 'Concat'}),
**regular_op_with_shaped_data('reshape1', int64_array([3, 1, 5, 2]), {'op': 'Reshape'}),
**regular_op_with_shaped_data('reshape2', shape, {'op': 'Reshape'}),
**regular_op_with_shaped_data('squeeze', int64_array([]), {'op': 'Squeeze'}),
**regular_op_with_shaped_data('range', int64_array([3]), {'op': 'Range'}),
**regular_op_with_shaped_data('mvn', int64_array([3, 1, 5, 2]), {'op': 'MVN'}),
**regular_op_with_shaped_data('add', shape, {'op': 'Add'}),
**valued_const_with_data('shape/axis1', int64_array(0)),
**valued_const_with_data('shape/ind1', int64_array([2, 3])),
**valued_const_with_data('shape/axis2', int64_array(0)),
**valued_const_with_data('shape/ind2', int64_array([0])),
**valued_const_with_data('shape/axis3', int64_array(0)),
**valued_const_with_data('shape/ind3', int64_array([1])),
**valued_const_with_data('gn/rec', float_array([1./3])),
**valued_const_with_data('group', int64_array([3])),
**valued_const_with_data('squeeze/axis', int64_array([0])),
**valued_const_with_data('range/start', int64_array(1)),
**valued_const_with_data('range/step', int64_array(1)),
**valued_const_with_data('gamma', float_array([[[[0.5]]]])),
**valued_const_with_data('beta', float_array([[[[0.5]]]])),
**result('result')
}
ref_edges = [*connect('input', '0:reshape1'),
*connect('input', 'shape1', skip_data=True),
*connect('shape1:0', '0:gather1'),
*connect('shape1:0', 'hcast1', skip_data=True),
*connect('shape/ind1', '1:gather1'),
*connect('shape/axis1', '2:gather1'),
*connect('gather1', 'cast2'),
*connect('hcast1', '0:gather3'),
*connect('hcast1', '0:gather2', skip_data=True),
*connect('shape/ind2', '1:gather2'),
*connect('shape/axis2', '2:gather2'),
*connect('gather2', '0:mul2'),
*connect('group', '1:mul2'),
*connect('shape/ind3', '1:gather3'),
*connect('shape/axis3', '2:gather3'),
*connect('gather3', '0:mul1'),
*connect('gn/rec', '1:mul1'),
*connect('mul2', '0:concat'),
*connect('mul1', '1:concat'),
*connect('cast2', '2:concat'),
*connect('concat', 'cast3'),
*connect('cast3', '1:reshape1'),
*connect('reshape1', 'shape2'),
*connect('shape2', 'shape3'),
*connect('shape3', '0:squeeze'),
*connect('squeeze/axis', '1:squeeze'),
*connect('range/start', '0:range'),
*connect('squeeze', '1:range'),
*connect('range/step', '2:range'),
*connect('reshape1', '0:mvn', skip_data=True),
*connect('range', '1:mvn'),
*connect('mvn', '0:reshape2'),
*connect('shape1:0', '1:reshape2', skip_data=True),
*connect('reshape2', '0:mul3'),
*connect('gamma', '1:mul3'),
*connect('mul3', '0:add'),
*connect('beta', '1:add'),
*connect('add', 'result')
]
class GroupNormToMVNTest(unittest.TestCase):
def test_group_norm_1(self):
graph = build_graph(nodes, edges)
graph_ref = build_graph(ref_nodes, ref_edges)
graph.graph['layout'] = 'NCHW'
GroupNormToMVN().find_and_replace_pattern(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp) | model-optimizer/extensions/middle/GroupNorm_test.py | import unittest
from extensions.middle.GroupNorm import GroupNormToMVN
from mo.front.common.partial_infer.utils import float_array, int64_array
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph, result, build_graph_with_edge_attrs, connect, \
regular_op_with_shaped_data, valued_const_with_data, connect_data
shape = int64_array([1, 3, 5, 2])
nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}),
**valued_const_with_data('gamma', float_array([0.5])),
**valued_const_with_data('beta', float_array([0.5])),
**regular_op_with_shaped_data('group_norm', shape,
{'op': 'GroupNorm', 'name': 'group_norm', 'num_groups': 3, 'eps': 1e-9}),
**result('result')
}
edges = [*connect('input:0', '0:group_norm'),
*connect('gamma', '1:group_norm'),
*connect('beta', '2:group_norm'),
*connect('group_norm:0', 'result'),
]
ref_nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}),
**regular_op_with_shaped_data('shape1', int64_array([4]), {'op': 'ShapeOf'}),
**regular_op_with_shaped_data('shape2', int64_array([4]), {'op': 'ShapeOf'}),
**regular_op_with_shaped_data('shape3', int64_array([1]), {'op': 'ShapeOf'}),
**regular_op_with_shaped_data('hcast1', int64_array([4]), {'op': 'Cast'}),
**regular_op_with_shaped_data('cast2', int64_array([2]), {'op': 'Cast'}),
**regular_op_with_shaped_data('cast3', int64_array([4]), {'op': 'Cast'}),
**regular_op_with_shaped_data('gather1', int64_array([2]), {'op': 'Gather'}),
**regular_op_with_shaped_data('gather2', int64_array([1]), {'op': 'Gather'}),
**regular_op_with_shaped_data('gather3', int64_array([1]), {'op': 'Gather'}),
**regular_op_with_shaped_data('mul1', int64_array([1]), {'op': 'Mul'}),
**regular_op_with_shaped_data('mul2', int64_array([1]), {'op': 'Mul'}),
**regular_op_with_shaped_data('mul3', shape, {'op': 'Mul'}),
**regular_op_with_shaped_data('concat', int64_array([4]), {'op': 'Concat'}),
**regular_op_with_shaped_data('reshape1', int64_array([3, 1, 5, 2]), {'op': 'Reshape'}),
**regular_op_with_shaped_data('reshape2', shape, {'op': 'Reshape'}),
**regular_op_with_shaped_data('squeeze', int64_array([]), {'op': 'Squeeze'}),
**regular_op_with_shaped_data('range', int64_array([3]), {'op': 'Range'}),
**regular_op_with_shaped_data('mvn', int64_array([3, 1, 5, 2]), {'op': 'MVN'}),
**regular_op_with_shaped_data('add', shape, {'op': 'Add'}),
**valued_const_with_data('shape/axis1', int64_array(0)),
**valued_const_with_data('shape/ind1', int64_array([2, 3])),
**valued_const_with_data('shape/axis2', int64_array(0)),
**valued_const_with_data('shape/ind2', int64_array([0])),
**valued_const_with_data('shape/axis3', int64_array(0)),
**valued_const_with_data('shape/ind3', int64_array([1])),
**valued_const_with_data('gn/rec', float_array([1./3])),
**valued_const_with_data('group', int64_array([3])),
**valued_const_with_data('squeeze/axis', int64_array([0])),
**valued_const_with_data('range/start', int64_array(1)),
**valued_const_with_data('range/step', int64_array(1)),
**valued_const_with_data('gamma', float_array([[[[0.5]]]])),
**valued_const_with_data('beta', float_array([[[[0.5]]]])),
**result('result')
}
ref_edges = [*connect('input', '0:reshape1'),
*connect('input', 'shape1', skip_data=True),
*connect('shape1:0', '0:gather1'),
*connect('shape1:0', 'hcast1', skip_data=True),
*connect('shape/ind1', '1:gather1'),
*connect('shape/axis1', '2:gather1'),
*connect('gather1', 'cast2'),
*connect('hcast1', '0:gather3'),
*connect('hcast1', '0:gather2', skip_data=True),
*connect('shape/ind2', '1:gather2'),
*connect('shape/axis2', '2:gather2'),
*connect('gather2', '0:mul2'),
*connect('group', '1:mul2'),
*connect('shape/ind3', '1:gather3'),
*connect('shape/axis3', '2:gather3'),
*connect('gather3', '0:mul1'),
*connect('gn/rec', '1:mul1'),
*connect('mul2', '0:concat'),
*connect('mul1', '1:concat'),
*connect('cast2', '2:concat'),
*connect('concat', 'cast3'),
*connect('cast3', '1:reshape1'),
*connect('reshape1', 'shape2'),
*connect('shape2', 'shape3'),
*connect('shape3', '0:squeeze'),
*connect('squeeze/axis', '1:squeeze'),
*connect('range/start', '0:range'),
*connect('squeeze', '1:range'),
*connect('range/step', '2:range'),
*connect('reshape1', '0:mvn', skip_data=True),
*connect('range', '1:mvn'),
*connect('mvn', '0:reshape2'),
*connect('shape1:0', '1:reshape2', skip_data=True),
*connect('reshape2', '0:mul3'),
*connect('gamma', '1:mul3'),
*connect('mul3', '0:add'),
*connect('beta', '1:add'),
*connect('add', 'result')
]
class GroupNormToMVNTest(unittest.TestCase):
def test_group_norm_1(self):
graph = build_graph(nodes, edges)
graph_ref = build_graph(ref_nodes, ref_edges)
graph.graph['layout'] = 'NCHW'
GroupNormToMVN().find_and_replace_pattern(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp) | 0.634543 | 0.493042 |
class MovieData():
""" This class handle movie's data information """
def __init__(self):
self.get_movie_data()
def get_movie_data(self):
VALID_RATINGS = ["G", "PG", "PG-13", "R"]
return (["2h 34 min", VALID_RATINGS[3], "Crime, Drama","1994",
"Pulp Fiction", "The lives of two mob hit men, a boxer,"
"a gangster wife, and a pair of diner bandits intertwine in"
"four tales of violence and redemption. ",
"http://img05.deviantart.net/4591/i/2015/115/7/2/"
"pulp_fiction_poster_by_riikardo-d50i7n6.jpg",
"https://www.youtube.com/watch?v=s7EdQ4FqbhY"],
["3h 21min", VALID_RATINGS[2], "Adventure, Drama, Fantasy",
"2003", "The Lord of the Rings",
"Gandalf and Aragorn lead the World of Men against Sauron's"
"army to draw his gaze from Frodo and Sam as they approach"
" ount Doom with the One Ring. ",
"http://horrornews.net/wp-content/uploads/2016/09/"
"LOTR-Return-Of-The-King-poster.jpg",
"https://www.youtube.com/watch?v=y2rYRu8UW8M"],
["2h 19min", VALID_RATINGS[3], "Drama", "1999",
"Fight Club", "An insomniac office worker, looking for"
"a way to change his life, crosses paths with a"
"devil-may-care soap maker, forming an underground fight"
"club that evolves into something much, much more. ",
"http://www.impawards.com/1999/posters/fight_club_ver4.jpg",
"https://www.youtube.com/watch?v=SUXWAEX2jlg"],
["2h 4min", VALID_RATINGS[1], "Action, Adventure, Fantasy",
"1980", "Star Wars: Episode V - The Empire Strikes Back",
"After the rebels have been brutally overpowered by the"
"Empire on their newly established base, <NAME>"
"takes advanced Jedi training with <NAME>, while his"
"friends are pursued by <NAME> as part of his plan"
"to capture Luke. ",
"http://i10.photobucket.com/albums/a116/mikepaul1/"
"star-wars-episode-v-the-empire-strikes-back"
"-5229c2d4a1c75_zpsmc0wqr6s.jpg",
"https://www.youtube.com/watch?v=xESiohGGP7g"],
["2h 22min", VALID_RATINGS[2], "Comedy, Drama", "1994",
"<NAME>", "Forrest Gump, while not intelligent, has"
"accidentally been present at many historic moments, but"
"his true love, <NAME>, eludes him. ",
"https://s-media-cache-ak0.pinimg.com/736x/cd/d9/e3/"
"cdd9e3b6c6072fb0f8c6a54bfc4bc7a4.jpg",
"https://www.youtube.com/watch?v=bLvqoHBptjg"],
["2h 28min", VALID_RATINGS[2], "Action, Adventure, Sci-Fi",
"2010", "Inception", "A thief, who steals corporate secrets"
"through use of dream-sharing technology, is given the"
"inverse task of planting an idea into the mind of a CEO.",
"https://encrypted-tbn2.gstatic.com/"
"images?q=tbn:ANd9GcRMyEwdqYKY8VRxYmWFZxVoJJalccdCZ4ksKC"
"iaPH6JYcu2sQhf",
"https://www.youtube.com/watch?v=8hP9D6kZseM"]) | movie-trailler/movie_data.py | class MovieData():
""" This class handle movie's data information """
def __init__(self):
self.get_movie_data()
def get_movie_data(self):
VALID_RATINGS = ["G", "PG", "PG-13", "R"]
return (["2h 34 min", VALID_RATINGS[3], "Crime, Drama","1994",
"Pulp Fiction", "The lives of two mob hit men, a boxer,"
"a gangster wife, and a pair of diner bandits intertwine in"
"four tales of violence and redemption. ",
"http://img05.deviantart.net/4591/i/2015/115/7/2/"
"pulp_fiction_poster_by_riikardo-d50i7n6.jpg",
"https://www.youtube.com/watch?v=s7EdQ4FqbhY"],
["3h 21min", VALID_RATINGS[2], "Adventure, Drama, Fantasy",
"2003", "The Lord of the Rings",
"Gandalf and Aragorn lead the World of Men against Sauron's"
"army to draw his gaze from Frodo and Sam as they approach"
" ount Doom with the One Ring. ",
"http://horrornews.net/wp-content/uploads/2016/09/"
"LOTR-Return-Of-The-King-poster.jpg",
"https://www.youtube.com/watch?v=y2rYRu8UW8M"],
["2h 19min", VALID_RATINGS[3], "Drama", "1999",
"Fight Club", "An insomniac office worker, looking for"
"a way to change his life, crosses paths with a"
"devil-may-care soap maker, forming an underground fight"
"club that evolves into something much, much more. ",
"http://www.impawards.com/1999/posters/fight_club_ver4.jpg",
"https://www.youtube.com/watch?v=SUXWAEX2jlg"],
["2h 4min", VALID_RATINGS[1], "Action, Adventure, Fantasy",
"1980", "Star Wars: Episode V - The Empire Strikes Back",
"After the rebels have been brutally overpowered by the"
"Empire on their newly established base, <NAME>"
"takes advanced Jedi training with <NAME>, while his"
"friends are pursued by <NAME> as part of his plan"
"to capture Luke. ",
"http://i10.photobucket.com/albums/a116/mikepaul1/"
"star-wars-episode-v-the-empire-strikes-back"
"-5229c2d4a1c75_zpsmc0wqr6s.jpg",
"https://www.youtube.com/watch?v=xESiohGGP7g"],
["2h 22min", VALID_RATINGS[2], "Comedy, Drama", "1994",
"<NAME>", "Forrest Gump, while not intelligent, has"
"accidentally been present at many historic moments, but"
"his true love, <NAME>, eludes him. ",
"https://s-media-cache-ak0.pinimg.com/736x/cd/d9/e3/"
"cdd9e3b6c6072fb0f8c6a54bfc4bc7a4.jpg",
"https://www.youtube.com/watch?v=bLvqoHBptjg"],
["2h 28min", VALID_RATINGS[2], "Action, Adventure, Sci-Fi",
"2010", "Inception", "A thief, who steals corporate secrets"
"through use of dream-sharing technology, is given the"
"inverse task of planting an idea into the mind of a CEO.",
"https://encrypted-tbn2.gstatic.com/"
"images?q=tbn:ANd9GcRMyEwdqYKY8VRxYmWFZxVoJJalccdCZ4ksKC"
"iaPH6JYcu2sQhf",
"https://www.youtube.com/watch?v=8hP9D6kZseM"]) | 0.507324 | 0.542924 |
import logging
import struct
import shutil
from pathlib import Path
from typing import Iterable, List
import requests
from dacite import from_dict
from nozomi.data import Post
from nozomi.exceptions import InvalidTagFormat, InvalidUrlFormat
from nozomi.helpers import sanitize_tag, create_tag_filepath, create_post_filepath, parse_post_id
from nozomi import byte_for
_LOGGER = logging.getLogger(__name__)
def get_post(url: str) -> Post:
"""Retrieve a single post.
Args:
url: The URL of the post to retrieve.
Returns:
A post in JSON format if it exists.
"""
_LOGGER.debug('Retrieving a post from URL "%s"', url)
try:
post_id = parse_post_id(url)
post_url = create_post_filepath(post_id)
post_data = requests.get(post_url).json()
_LOGGER.debug(post_data)
return from_dict(data_class=Post, data=post_data)
except InvalidUrlFormat:
raise
except Exception as ex:
_LOGGER.exception(ex)
raise
def get_posts(positive_tags: List[str], negative_tags: List[str]=None) -> Iterable[Post]:
"""Retrieve all post data that contains and doesn't contain certain tags.
Args:
positive_tags: The tags that the posts retrieved must contain.
negative_tags: Optional, blacklisted tags.
Yields:
A post in JSON format, which contains the positive tags and doesn't contain the negative
tags.
"""
if negative_tags is None:
negative_tags = list()
_LOGGER.debug('Retrieving posts with positive_tags=%s and negative_tags=%s',
str(positive_tags), str(negative_tags))
try:
positive_post_urls = _get_post_urls(positive_tags)
negative_post_urls = _get_post_urls(negative_tags)
relevant_post_urls = set(positive_post_urls) - set(negative_post_urls)
for post_url in relevant_post_urls:
post_data = requests.get(post_url).json()
_LOGGER.debug(post_data)
yield from_dict(data_class=Post, data=post_data)
except InvalidTagFormat:
raise
except Exception as ex:
_LOGGER.exception(ex)
raise
def download_media(post: Post, filepath: Path) -> List[str]:
"""Download all media on a post and save it.
Args:
post: The post to download.
filepath: The file directory to save the media. The directory will be created if it doesn't
already exist.
Returns:
The names of the images downloaded.
"""
images_downloaded = []
filepath.mkdir(parents=True, exist_ok=True)
for media_meta_data in post.imageurls:
image_url = media_meta_data.imageurl
image_name = image_url.split('/')[-1]
image_filepath = filepath.joinpath(image_name)
_download_media(image_url, image_filepath)
images_downloaded.append(image_name)
return images_downloaded
def _download_media(image_url: str, filepath: Path):
"""Download an image and save it.
Args:
image_url: The image URL.
filepath: The file directory to save the media. The directory will be created if it doesn't
already exist.
"""
headers = {
'Host': 'i.nozomi.la',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,/;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'DNT': '1',
'Connection': 'keep-alive',
'Referer': 'https://nozomi.la/',
'Upgrade-Insecure-Requests': '1',
'TE': 'Trailers',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'
}
with requests.get(image_url, stream=True, headers=headers) as r:
with open(filepath, 'wb') as f:
shutil.copyfileobj(r.raw, f)
_LOGGER.debug('Image downloaded %s', filepath)
def _get_post_urls(tags: List[str]) -> List[str]:
"""Retrieve the links to all of the posts that contain the tags.
Args:
tags: The tags that the posts must contain.
Returns:
A list of post urls that contain all of the specified tags.
"""
if len(tags) == 0: return tags
_LOGGER.debug('Retrieving all URLs that contain the tags %s', str(tags))
sanitized_tags = [sanitize_tag(tag) for tag in tags]
nozomi_urls = [create_tag_filepath(sanitized_tag) for sanitized_tag in sanitized_tags]
tag_post_ids = [_get_post_ids(nozomi_url) for nozomi_url in nozomi_urls]
tag_post_ids = set.intersection(*map(set, tag_post_ids)) # Flatten list of tuples on intersection
post_urls = [create_post_filepath(post_id) for post_id in tag_post_ids]
_LOGGER.debug('Got %d post urls containing the tags %s', len(tags), str(tags))
return post_urls
def _get_post_ids(tag_filepath_url: str) -> List[int]:
post_ids = []
"""Retrieve the .nozomi data file.
Args:
tag_filepath_url: The URL to a tag's .nozomi file.
Returns:
A list containing all of the post IDs that contain the tag.
"""
_LOGGER.debug('Getting post IDs from %s', tag_filepath_url)
try:
headers={
'Host': 'n.nozomi.la',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://nozomi.la/',
'Origin': 'https://nozomi.la',
'Connection': 'keep-alive',
'TE': 'Trailers',
}
response = requests.get(tag_filepath_url, headers=headers)
_LOGGER.debug('RESPONSE: %s', response)
total_ids = len(response.content) // 4 # divide by the size of uint
_LOGGER.info('Unpacking .nozomi file... Expecting %d post ids.', total_ids)
result = []
byte_for.bt_for(response.content,result)
_LOGGER.debug('Unpacked data... Got %d total post ids! %s', len(result), str(result))
except Exception as ex:
_LOGGER.exception(ex)
return result | nozomi/api.py |
import logging
import struct
import shutil
from pathlib import Path
from typing import Iterable, List
import requests
from dacite import from_dict
from nozomi.data import Post
from nozomi.exceptions import InvalidTagFormat, InvalidUrlFormat
from nozomi.helpers import sanitize_tag, create_tag_filepath, create_post_filepath, parse_post_id
from nozomi import byte_for
_LOGGER = logging.getLogger(__name__)
def get_post(url: str) -> Post:
"""Retrieve a single post.
Args:
url: The URL of the post to retrieve.
Returns:
A post in JSON format if it exists.
"""
_LOGGER.debug('Retrieving a post from URL "%s"', url)
try:
post_id = parse_post_id(url)
post_url = create_post_filepath(post_id)
post_data = requests.get(post_url).json()
_LOGGER.debug(post_data)
return from_dict(data_class=Post, data=post_data)
except InvalidUrlFormat:
raise
except Exception as ex:
_LOGGER.exception(ex)
raise
def get_posts(positive_tags: List[str], negative_tags: List[str]=None) -> Iterable[Post]:
"""Retrieve all post data that contains and doesn't contain certain tags.
Args:
positive_tags: The tags that the posts retrieved must contain.
negative_tags: Optional, blacklisted tags.
Yields:
A post in JSON format, which contains the positive tags and doesn't contain the negative
tags.
"""
if negative_tags is None:
negative_tags = list()
_LOGGER.debug('Retrieving posts with positive_tags=%s and negative_tags=%s',
str(positive_tags), str(negative_tags))
try:
positive_post_urls = _get_post_urls(positive_tags)
negative_post_urls = _get_post_urls(negative_tags)
relevant_post_urls = set(positive_post_urls) - set(negative_post_urls)
for post_url in relevant_post_urls:
post_data = requests.get(post_url).json()
_LOGGER.debug(post_data)
yield from_dict(data_class=Post, data=post_data)
except InvalidTagFormat:
raise
except Exception as ex:
_LOGGER.exception(ex)
raise
def download_media(post: Post, filepath: Path) -> List[str]:
"""Download all media on a post and save it.
Args:
post: The post to download.
filepath: The file directory to save the media. The directory will be created if it doesn't
already exist.
Returns:
The names of the images downloaded.
"""
images_downloaded = []
filepath.mkdir(parents=True, exist_ok=True)
for media_meta_data in post.imageurls:
image_url = media_meta_data.imageurl
image_name = image_url.split('/')[-1]
image_filepath = filepath.joinpath(image_name)
_download_media(image_url, image_filepath)
images_downloaded.append(image_name)
return images_downloaded
def _download_media(image_url: str, filepath: Path):
"""Download an image and save it.
Args:
image_url: The image URL.
filepath: The file directory to save the media. The directory will be created if it doesn't
already exist.
"""
headers = {
'Host': 'i.nozomi.la',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,/;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'DNT': '1',
'Connection': 'keep-alive',
'Referer': 'https://nozomi.la/',
'Upgrade-Insecure-Requests': '1',
'TE': 'Trailers',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'
}
with requests.get(image_url, stream=True, headers=headers) as r:
with open(filepath, 'wb') as f:
shutil.copyfileobj(r.raw, f)
_LOGGER.debug('Image downloaded %s', filepath)
def _get_post_urls(tags: List[str]) -> List[str]:
"""Retrieve the links to all of the posts that contain the tags.
Args:
tags: The tags that the posts must contain.
Returns:
A list of post urls that contain all of the specified tags.
"""
if len(tags) == 0: return tags
_LOGGER.debug('Retrieving all URLs that contain the tags %s', str(tags))
sanitized_tags = [sanitize_tag(tag) for tag in tags]
nozomi_urls = [create_tag_filepath(sanitized_tag) for sanitized_tag in sanitized_tags]
tag_post_ids = [_get_post_ids(nozomi_url) for nozomi_url in nozomi_urls]
tag_post_ids = set.intersection(*map(set, tag_post_ids)) # Flatten list of tuples on intersection
post_urls = [create_post_filepath(post_id) for post_id in tag_post_ids]
_LOGGER.debug('Got %d post urls containing the tags %s', len(tags), str(tags))
return post_urls
def _get_post_ids(tag_filepath_url: str) -> List[int]:
post_ids = []
"""Retrieve the .nozomi data file.
Args:
tag_filepath_url: The URL to a tag's .nozomi file.
Returns:
A list containing all of the post IDs that contain the tag.
"""
_LOGGER.debug('Getting post IDs from %s', tag_filepath_url)
try:
headers={
'Host': 'n.nozomi.la',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://nozomi.la/',
'Origin': 'https://nozomi.la',
'Connection': 'keep-alive',
'TE': 'Trailers',
}
response = requests.get(tag_filepath_url, headers=headers)
_LOGGER.debug('RESPONSE: %s', response)
total_ids = len(response.content) // 4 # divide by the size of uint
_LOGGER.info('Unpacking .nozomi file... Expecting %d post ids.', total_ids)
result = []
byte_for.bt_for(response.content,result)
_LOGGER.debug('Unpacked data... Got %d total post ids! %s', len(result), str(result))
except Exception as ex:
_LOGGER.exception(ex)
return result | 0.708818 | 0.226228 |
import os
import sys
import time
import math
import torch
import copy
import torch.nn as nn
import torch.nn.init as init
class MovingMaximum(object):
def __init__(self):
self.data = [] # data[i] is the maximum val in data[0:i+1]
self.max = 0.0
def push(self, current_data):
if len(self.data) == 0:
self.max = current_data
elif current_data > self.max:
self.max = current_data
self.data.append(self.max)
def get(self):
return self.data
def delta(self, start, end):
try:
res = self.data[end] - self.data[start]
except IndexError:
res = self.data[end]
return res
class ExponentialMovingAverage(object):
def __init__(self, decay=0.95, scale=False):
self.data = []
self.decay = decay
self.number = 0
self.scale = scale
self.avg_val = 0.0
def push(self, current_data):
self.number += 1
self.avg_val = self.decay * self.avg_val + (1 - self.decay) * current_data
self.data.append(self.avg_val)
def get(self):
return self.data
def delta(self, start, end):
try:
res = self.data[end] - self.data[start]
except IndexError:
res = self.data[end]
return res
def average(self):
if self.scale:
return self.avg_val / (1.0 - self.decay ** self.number)
else:
return self.avg_val
class TorchExponentialMovingAverage(object):
def __init__(self, decay=0.999):
self.decay = decay
self.ema = {}
self.number = {}
def push(self, current_data):
assert isinstance(current_data, dict), "current_data should be a dict"
for key in current_data:
if key in self.ema:
# in-place
self.ema[key] -= (1.0 - self.decay) * (self.ema[key] - current_data[key])
self.number[key] += 1
else:
# self.ema[key] = copy.deepcopy(current_data[key])
self.ema[key] = current_data[key] * (1.0 - self.decay)
self.number[key] = 1
def average(self):
scaled_ema = {}
for key in self.ema:
scaled_ema[key] = self.ema[key] / (1.0 - self.decay ** self.number[key])
return scaled_ema
# net: pytorch module
# strict: strict matching for set
def set_named_parameters(net, named_params, strict=True):
assert isinstance(named_params, dict), "named_params should be a dict"
orig_params_data = {}
for n, p in net.named_parameters():
orig_params_data[n] = copy.deepcopy(p.data)
if strict:
assert len(named_params) == len(list(net.named_parameters())), "Unmatched number of params!"
for n, p in net.named_parameters():
if strict:
assert n in named_params, "Unknown param name!"
if n in named_params:
p.data.copy_(named_params[n])
return orig_params_data | cifar10/utils.py | import os
import sys
import time
import math
import torch
import copy
import torch.nn as nn
import torch.nn.init as init
class MovingMaximum(object):
def __init__(self):
self.data = [] # data[i] is the maximum val in data[0:i+1]
self.max = 0.0
def push(self, current_data):
if len(self.data) == 0:
self.max = current_data
elif current_data > self.max:
self.max = current_data
self.data.append(self.max)
def get(self):
return self.data
def delta(self, start, end):
try:
res = self.data[end] - self.data[start]
except IndexError:
res = self.data[end]
return res
class ExponentialMovingAverage(object):
def __init__(self, decay=0.95, scale=False):
self.data = []
self.decay = decay
self.number = 0
self.scale = scale
self.avg_val = 0.0
def push(self, current_data):
self.number += 1
self.avg_val = self.decay * self.avg_val + (1 - self.decay) * current_data
self.data.append(self.avg_val)
def get(self):
return self.data
def delta(self, start, end):
try:
res = self.data[end] - self.data[start]
except IndexError:
res = self.data[end]
return res
def average(self):
if self.scale:
return self.avg_val / (1.0 - self.decay ** self.number)
else:
return self.avg_val
class TorchExponentialMovingAverage(object):
def __init__(self, decay=0.999):
self.decay = decay
self.ema = {}
self.number = {}
def push(self, current_data):
assert isinstance(current_data, dict), "current_data should be a dict"
for key in current_data:
if key in self.ema:
# in-place
self.ema[key] -= (1.0 - self.decay) * (self.ema[key] - current_data[key])
self.number[key] += 1
else:
# self.ema[key] = copy.deepcopy(current_data[key])
self.ema[key] = current_data[key] * (1.0 - self.decay)
self.number[key] = 1
def average(self):
scaled_ema = {}
for key in self.ema:
scaled_ema[key] = self.ema[key] / (1.0 - self.decay ** self.number[key])
return scaled_ema
# net: pytorch module
# strict: strict matching for set
def set_named_parameters(net, named_params, strict=True):
assert isinstance(named_params, dict), "named_params should be a dict"
orig_params_data = {}
for n, p in net.named_parameters():
orig_params_data[n] = copy.deepcopy(p.data)
if strict:
assert len(named_params) == len(list(net.named_parameters())), "Unmatched number of params!"
for n, p in net.named_parameters():
if strict:
assert n in named_params, "Unknown param name!"
if n in named_params:
p.data.copy_(named_params[n])
return orig_params_data | 0.440951 | 0.237101 |
import pyparsing as pp
from functools import lru_cache
def paren_exp(keyword, contents):
return pp.Keyword(keyword)('op') + pp.Suppress('(') + contents + pp.Suppress(')')
def cfg_exp():
option = pp.Word(pp.alphanums + '_')('option')
exp = pp.Forward()
assign = (option + pp.Suppress("=") + pp.QuotedString('"')('value'))('assign')
any_exp = paren_exp('any', pp.delimitedList(exp, delim=','))
all_exp = paren_exp('all', pp.delimitedList(exp, delim=','))
not_exp = paren_exp('not', exp)
exp << pp.Group(any_exp | all_exp | not_exp | assign | option)
return paren_exp('cfg', exp)
def multiarch_tuple():
word = pp.Word(pp.alphanums + '_')
opt = pp.Optional(pp.Suppress('-') + word)
tup = (word('a') + pp.Suppress('-') + word('b') + opt('c') + opt('d'))('archtuple')
return tup
@lru_cache()
def cfg_grammar():
grammar = (cfg_exp() | multiarch_tuple()) + pp.stringEnd()
return grammar
def dump_tree(t, level=0, evalf=None):
print('{}structure {}{}{}{}'.format(' '*level, t.getName(),
' [' if evalf else '',
evalf(t) if evalf else '',
']' if evalf else ''))
for item in t:
if isinstance(item, str):
print('{}{!r}'.format(' '*(level+1), item))
else:
dump_tree(item, level+1, evalf=evalf)
class Evaluator:
"""Evalutate cfg expressions
From rust docs:
Configuration options are boolean (on or off) and are named
either with a single identifier (e.g. foo) or an identifier and
a string (e.g. foo = "bar"; the quotes are required and spaces
around the = are unimportant). Note that similarly-named
options, such as foo, foo="bar" and foo="baz" may each be set or
unset independently.
"""
def __init__(self, options=()):
self.options = options
def eval_tree(self, tree):
kind = tree.getName()
assert kind
if kind == 'option':
return tree.option in self.options
elif kind == 'assign':
option = tree.option, tree.value
return option in self.options
elif kind == 'op':
op = tree[0]
if op == 'cfg':
assert(len(tree) == 2)
return self.eval_tree(tree[1])
if op == 'any':
assert(len(tree) >= 2)
return any(self.eval_tree(item) for item in tree[1:])
if op == 'all':
assert(len(tree) >= 2)
return all(self.eval_tree(item) for item in tree[1:])
if op == 'not':
assert(len(tree) == 2)
return not self.eval_tree(tree[1])
assert False, f'Unknown operator {op}'
elif kind == 'archtuple':
return 'linux' in list(tree)
else:
assert False, f'Unknown element {kind}'
@classmethod
def platform(cls):
"""An Evaluator populated with some platform options
I don't see a list that'd specify what the allowed options
are, so this is culled from deps used in crates packaged in
Fedora 28.
"""
return cls(options=('unix',
('target_os', 'linux')))
def parse_and_eval(self, string):
g = cfg_grammar()
t = g.parseString(string)
return self.eval_tree(t) | rustcfg/__init__.py |
import pyparsing as pp
from functools import lru_cache
def paren_exp(keyword, contents):
return pp.Keyword(keyword)('op') + pp.Suppress('(') + contents + pp.Suppress(')')
def cfg_exp():
option = pp.Word(pp.alphanums + '_')('option')
exp = pp.Forward()
assign = (option + pp.Suppress("=") + pp.QuotedString('"')('value'))('assign')
any_exp = paren_exp('any', pp.delimitedList(exp, delim=','))
all_exp = paren_exp('all', pp.delimitedList(exp, delim=','))
not_exp = paren_exp('not', exp)
exp << pp.Group(any_exp | all_exp | not_exp | assign | option)
return paren_exp('cfg', exp)
def multiarch_tuple():
word = pp.Word(pp.alphanums + '_')
opt = pp.Optional(pp.Suppress('-') + word)
tup = (word('a') + pp.Suppress('-') + word('b') + opt('c') + opt('d'))('archtuple')
return tup
@lru_cache()
def cfg_grammar():
grammar = (cfg_exp() | multiarch_tuple()) + pp.stringEnd()
return grammar
def dump_tree(t, level=0, evalf=None):
print('{}structure {}{}{}{}'.format(' '*level, t.getName(),
' [' if evalf else '',
evalf(t) if evalf else '',
']' if evalf else ''))
for item in t:
if isinstance(item, str):
print('{}{!r}'.format(' '*(level+1), item))
else:
dump_tree(item, level+1, evalf=evalf)
class Evaluator:
"""Evalutate cfg expressions
From rust docs:
Configuration options are boolean (on or off) and are named
either with a single identifier (e.g. foo) or an identifier and
a string (e.g. foo = "bar"; the quotes are required and spaces
around the = are unimportant). Note that similarly-named
options, such as foo, foo="bar" and foo="baz" may each be set or
unset independently.
"""
def __init__(self, options=()):
self.options = options
def eval_tree(self, tree):
kind = tree.getName()
assert kind
if kind == 'option':
return tree.option in self.options
elif kind == 'assign':
option = tree.option, tree.value
return option in self.options
elif kind == 'op':
op = tree[0]
if op == 'cfg':
assert(len(tree) == 2)
return self.eval_tree(tree[1])
if op == 'any':
assert(len(tree) >= 2)
return any(self.eval_tree(item) for item in tree[1:])
if op == 'all':
assert(len(tree) >= 2)
return all(self.eval_tree(item) for item in tree[1:])
if op == 'not':
assert(len(tree) == 2)
return not self.eval_tree(tree[1])
assert False, f'Unknown operator {op}'
elif kind == 'archtuple':
return 'linux' in list(tree)
else:
assert False, f'Unknown element {kind}'
@classmethod
def platform(cls):
"""An Evaluator populated with some platform options
I don't see a list that'd specify what the allowed options
are, so this is culled from deps used in crates packaged in
Fedora 28.
"""
return cls(options=('unix',
('target_os', 'linux')))
def parse_and_eval(self, string):
g = cfg_grammar()
t = g.parseString(string)
return self.eval_tree(t) | 0.646014 | 0.239416 |
# type annotations
from __future__ import annotations
from typing import Optional
# standard libs
import logging
from threading import Thread
from queue import Queue, Empty
# internal libs
from .database.message import Message, publish
# initialize module level logger
log = logging.getLogger(__name__)
# shared parameters
DEFAULT_BATCHSIZE: int = 10
DEFAULT_TIMEOUT: float = 5.0
class QueueThread(Thread):
"""Enqueue and flush messages to the database."""
queue: Queue = None
timeout: float = None
batchsize: int = None
terminated: bool = False
def __init__(self, queue: Queue, batchsize: int = DEFAULT_BATCHSIZE,
timeout: float = DEFAULT_TIMEOUT) -> None:
"""Initialize publishing thread."""
self.queue = queue
self.timeout = float(timeout)
self.batchsize = batchsize
super().__init__(daemon=True)
def run(self) -> None:
"""Get messages from the queue and publish to the database."""
log.debug('starting publisher-thread')
messages = []
while not self.terminated:
try:
messages.clear()
for count in range(self.batchsize):
if not self.terminated:
message = Message(**self.queue.get(timeout=self.timeout))
messages.append(message)
except Empty:
pass
finally:
if messages:
publish(messages)
log.info(f'added {len(messages)} messages')
for count, _ in enumerate(messages):
self.queue.task_done()
def terminate(self) -> None:
"""Signal to shut down the thread."""
log.debug('stopping publisher-thread')
self.terminated = True
class Publisher:
"""
A Publisher defines the interface for writing messages to the stream.
Example:
>>> with Publisher(batchsize=10) as stream:
... stream.write('hello, world!', topic='example', level='INFO')
"""
queue: Queue = None
topic: str = None
thread: QueueThread = None
batchsize: Optional[int] = DEFAULT_BATCHSIZE
timeout: Optional[float] = DEFAULT_TIMEOUT
def __init__(self, topic: str = None, level: str = None,
batchsize: int = DEFAULT_BATCHSIZE, timeout: float = DEFAULT_TIMEOUT) -> None:
"""
Initialize publisher.
Args:
topic (str):
Default topic name (optional).
level (str):
Default level name (optional).
batchsize (int):
Number of messages to accumulate before committing.
Default to `DEFAULT_BATCHSIZE`.
timeout (float):
Seconds to wait on new messages before committing.
Default to `DEFAULT_TIMEOUT`.
"""
self.topic = None if topic is None else str(topic)
self.level = None if level is None else str(level)
self.queue = Queue(maxsize=2*batchsize)
self.timeout = float(timeout)
self.thread = QueueThread(queue=self.queue, batchsize=batchsize, timeout=self.timeout)
def start(self) -> None:
"""Start subscription threads."""
self.thread.start()
def stop(self) -> None:
"""Terminate all threads."""
self.queue.join()
self.thread.terminate()
self.thread.join()
def __enter__(self) -> Publisher:
"""Start all threads."""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
"""Join all threads and stop."""
self.stop()
def write(self, text: str, level: str = None, topic: str = None) -> None:
"""
Publish a message.
Args:
text:
Message text.
topic:
Message topic (optional if specified globally).
level:
Message level (optional if specified globally).
"""
self.queue.put({'text': str(text),
'topic': self.topic if topic is None else str(topic),
'level': self.level if level is None else str(level)}) | streamkit/publisher.py | # type annotations
from __future__ import annotations
from typing import Optional
# standard libs
import logging
from threading import Thread
from queue import Queue, Empty
# internal libs
from .database.message import Message, publish
# initialize module level logger
log = logging.getLogger(__name__)
# shared parameters
DEFAULT_BATCHSIZE: int = 10
DEFAULT_TIMEOUT: float = 5.0
class QueueThread(Thread):
"""Enqueue and flush messages to the database."""
queue: Queue = None
timeout: float = None
batchsize: int = None
terminated: bool = False
def __init__(self, queue: Queue, batchsize: int = DEFAULT_BATCHSIZE,
timeout: float = DEFAULT_TIMEOUT) -> None:
"""Initialize publishing thread."""
self.queue = queue
self.timeout = float(timeout)
self.batchsize = batchsize
super().__init__(daemon=True)
def run(self) -> None:
"""Get messages from the queue and publish to the database."""
log.debug('starting publisher-thread')
messages = []
while not self.terminated:
try:
messages.clear()
for count in range(self.batchsize):
if not self.terminated:
message = Message(**self.queue.get(timeout=self.timeout))
messages.append(message)
except Empty:
pass
finally:
if messages:
publish(messages)
log.info(f'added {len(messages)} messages')
for count, _ in enumerate(messages):
self.queue.task_done()
def terminate(self) -> None:
"""Signal to shut down the thread."""
log.debug('stopping publisher-thread')
self.terminated = True
class Publisher:
"""
A Publisher defines the interface for writing messages to the stream.
Example:
>>> with Publisher(batchsize=10) as stream:
... stream.write('hello, world!', topic='example', level='INFO')
"""
queue: Queue = None
topic: str = None
thread: QueueThread = None
batchsize: Optional[int] = DEFAULT_BATCHSIZE
timeout: Optional[float] = DEFAULT_TIMEOUT
def __init__(self, topic: str = None, level: str = None,
batchsize: int = DEFAULT_BATCHSIZE, timeout: float = DEFAULT_TIMEOUT) -> None:
"""
Initialize publisher.
Args:
topic (str):
Default topic name (optional).
level (str):
Default level name (optional).
batchsize (int):
Number of messages to accumulate before committing.
Default to `DEFAULT_BATCHSIZE`.
timeout (float):
Seconds to wait on new messages before committing.
Default to `DEFAULT_TIMEOUT`.
"""
self.topic = None if topic is None else str(topic)
self.level = None if level is None else str(level)
self.queue = Queue(maxsize=2*batchsize)
self.timeout = float(timeout)
self.thread = QueueThread(queue=self.queue, batchsize=batchsize, timeout=self.timeout)
def start(self) -> None:
"""Start subscription threads."""
self.thread.start()
def stop(self) -> None:
"""Terminate all threads."""
self.queue.join()
self.thread.terminate()
self.thread.join()
def __enter__(self) -> Publisher:
"""Start all threads."""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
"""Join all threads and stop."""
self.stop()
def write(self, text: str, level: str = None, topic: str = None) -> None:
"""
Publish a message.
Args:
text:
Message text.
topic:
Message topic (optional if specified globally).
level:
Message level (optional if specified globally).
"""
self.queue.put({'text': str(text),
'topic': self.topic if topic is None else str(topic),
'level': self.level if level is None else str(level)}) | 0.942823 | 0.158858 |
from inspect import signature, Parameter
import numpy as np
from scipy.stats import rankdata
from sklearn.utils.validation import check_array, _is_arraylike
from ...base import (
MultiAnnotatorPoolQueryStrategy,
SingleAnnotatorPoolQueryStrategy,
)
from ...utils import (
rand_argmax,
check_type,
MISSING_LABEL,
majority_vote,
check_random_state,
check_scalar,
)
class SingleAnnotatorWrapper(MultiAnnotatorPoolQueryStrategy):
"""SingleAnnotatorWrapper
Implementation of a wrapper class for pool-based active
learning query strategies with a single annotator such that it transforms
the query strategy for the single annotator into a query strategy for
multiple annotators by choosing an annotator randomly or according to the
parameter `A_pef` and setting the labeled matrix to a labeled vector by an
aggregation function, e.g., majority voting.
Parameters
----------
strategy : SingleAnnotatorPoolQueryStrategy
An active learning strategy for a single annotator.
y_aggregate : callable, optional (default=None)
`y_aggregate` is used to transform `y` as a matrix of shape
(n_samples, n_annotators) into a vector of shape (n_samples) during
the querying process and is then passed to the given `strategy`.
If `y_aggregate is None` and `y` is used in the strategy,
majority_vote is used as `y_aggregate`.
missing_label : scalar or string or np.nan or None, optional
(default=np.nan)
Value to represent a missing label.
random_state : int or RandomState instance, optional (default=None)
Controls the randomness of the estimator.
"""
def __init__(
self,
strategy,
y_aggregate=None,
missing_label=MISSING_LABEL,
random_state=None,
):
super().__init__(random_state=random_state, missing_label=missing_label)
self.strategy = strategy
self.y_aggregate = y_aggregate
def query(
self,
X,
y,
candidates=None,
annotators=None,
batch_size=1,
query_params_dict=None,
n_annotators_per_sample=1,
A_perf=None,
return_utilities=False,
):
"""Determines which candidate sample is to be annotated by which
annotator. The samples are first and primarily ranked by the given
strategy as if one unspecified annotator where to annotate the sample.
Then for each sample the sample-annotator pairs are ranked based either
on previously set preferences or at random.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data set, usually complete, i.e., including the labeled
and unlabeled samples.
y : array-like of shape (n_samples, n_annotators)
Labels of the training data set for each annotator (possibly
including unlabeled ones indicated by self.MISSING_LABEL), meaning
that `y[i, j]` contains the label annotated by annotator `i` for
sample `j`.
candidates : None or array-like of shape (n_candidates), dtype=int or
array-like of shape (n_candidates, n_features),
optional (default=None)
If `candidates` is None, the samples from (X,y), for which an
annotator exists such that the annotator sample pairs is
unlabeled are considered as sample candidates.
If `candidates` is of shape (n_candidates) and of type int,
candidates is considered as the indices of the sample candidates in
(X,y).
If `candidates` is of shape (n_candidates, n_features), the
sample candidates are directly given in candidates (not necessarily
contained in X). This is not supported by all query strategies.
annotators : array-like of shape (n_candidates, n_annotators), optional
(default=None)
If `annotators` is None, all annotators are considered as available
annotators.
If `annotators` is of shape (n_avl_annotators) and of type int,
`annotators` is considered as the indices of the available
annotators.
If candidate samples and available annotators are specified:
The annotator sample pairs, for which the sample is a candidate
sample and the annotator is an available annotator are considered
as candidate annotator-sample-pairs.
If `annotators` is None and `candidates` is of shape
(n_candidates), all annotator sample pairs, for which the sample is
indexed by `candidates` are considered as candidate
annotator-sample-pairs.
If `annotators` is a boolean array of shape (n_candidates,
n_avl_annotators) the annotator sample pairs, for which the sample
is a candidate sample and the boolean matrix has entry `True` are
considered as candidate sample pairs.
batch_size : int, optional (default=1)
The number of annotators sample pairs to be selected in one AL
cycle.
query_params_dict : dict, optional (default=None)
Dictionary for the parameters of the query method besides `X` and
the transformed `y`.
A_perf : array-like, shape (n_samples, n_annotators) or
(n_annotators,) optional (default=None)
The performance based ranking of each annotator.
1.) If `A_perf` is of shape (n_samples, n_annotators) for each
sample `i` the value-annotators pair `(i, j)` is chosen
over the pair `(i, k)` if `A_perf[i, j]` is greater or
equal to `A_perf[i, k]`.
2.) If `A_perf` is of shape (n_annotators,) for each sample
`i` the value-annotators pair `(i, j)` is chosen over
the pair `(i, k)` if `A_perf[j]` is greater or
equal to `A_perf[k]`.
3.) If `A_perf` is None, the annotators are chosen at random, with
a different distribution for each sample.
return_utilities : bool, optional (default=False)
If true, also returns the utilities based on the query strategy.
n_annotators_per_sample : int, array-like, optional (default=1)
array-like of shape (k,), k <= n_samples
If `n_annotators_per_sample` is an int, the value indicates
the number of annotators that are preferably assigned to a
candidate sample, selected by the query_strategy.
`Preferably` in this case means depending on how many annotators
can be assigned to a given candidate sample and how many
annotator-sample pairs should be assigned considering the
`batch_size`.
If `n_annotators_per_sample` is an int array, the values of the
array are interpreted as follows. The value at the i-th index
determines the preferred number of annotators for the candidate
sample at the i-th index in the ranking of the batch.
The ranking of the batch is given by the `strategy`
(SingleAnnotatorPoolQueryStrategy). The last index
of the n_annotators_per_sample array (k-1) indicates the
preferred number of annotators for all candidate sample at an index
greater of equal to k-1.
Returns
-------
query_indices : np.ndarray of shape (batchsize, 2)
The query_indices indicate which candidate sample pairs are to be
queried is, i. e. which candidate sample is to be annotated by
which annotator, e.g., `query_indices[:, 0]` indicates the selected
candidate samples and `query_indices[:, 1]` indicates the
respectively selected annotators.
utilities: np.ndarray of shape (batch_size, n_samples, n_annotators) or
np.ndarray of shape (batch_size, n_candidates, n_annotators)
The utilities of all candidate samples w.r.t. to the available
annotators after each selected sample of the batch, e.g.,
`utilities[0, :, j]` indicates the utilities used for selecting
the first sample-annotator-pair (with indices `query_indices[0]`).
If `candidates` is None or of shape (n_candidates,), the indexing
refers to samples in `X`.
If `candidates` is of shape (n_candidates, n_features), the
indexing refers to samples in candidates.
"""
(
X,
y,
candidates,
annotators,
batch_size,
return_utilities,
) = super()._validate_data(
X,
y,
candidates,
annotators,
batch_size,
return_utilities,
reset=True,
)
X_cand, mapping, A_cand = self._transform_cand_annot(
candidates, annotators, X, y
)
random_state = self.random_state_
# check strategy
check_type(
self.strategy, "self.strategy", SingleAnnotatorPoolQueryStrategy
)
# check query_params_dict
if query_params_dict is None:
query_params_dict = {}
check_type(query_params_dict, "query_params_dict", dict)
# aggregate y
if self.y_aggregate is None:
y_aggregate = lambda y: majority_vote(y, random_state=random_state)
else:
y_aggregate = self.y_aggregate
if not callable(y_aggregate):
raise TypeError(
f"`self.y_aggregate` must be callable. "
f"`self.y_aggregate` is of type {type(y_aggregate)}"
)
# count the number of arguments that have no default value
n_free_params = len(
list(
filter(
lambda x: x.default == Parameter.empty,
signature(y_aggregate).parameters.values(),
)
)
)
if n_free_params != 1:
raise TypeError(
f"The number of free parameters of the callable has to "
f"equal one. "
f"The number of free parameters is {n_free_params}."
)
y_sq = y_aggregate(y)
n_candidates = X_cand.shape[0]
n_annotators = A_cand.shape[1]
n_samples = X.shape[0]
batch_size_sq = min(batch_size, X_cand.shape[0])
# check n_annotators_per_sample and set pref_n_annotators
if isinstance(n_annotators_per_sample, (int, np.int_)):
check_scalar(
n_annotators_per_sample,
name="n_annotators_per_sample",
target_type=int,
min_val=1,
)
pref_n_annotators = n_annotators_per_sample * np.ones(batch_size_sq)
elif _is_arraylike(n_annotators_per_sample):
pref_n_annotators = check_array(
n_annotators_per_sample, ensure_2d=False
)
if pref_n_annotators.ndim != 1:
raise ValueError(
"n_annotators_per_sample, if an array, must be of dim "
f"1 but, it is of dim {pref_n_annotators.ndim}"
)
else:
pref_length = pref_n_annotators.shape[0]
if pref_length > batch_size_sq:
pref_n_annotators = pref_n_annotators[:batch_size_sq]
if pref_length < batch_size_sq:
appended = pref_n_annotators[-1] * np.ones(
batch_size_sq - pref_length
)
pref_n_annotators = np.append(pref_n_annotators, appended)
else:
raise TypeError(
"n_annotators_per_sample must be array like " "or an integer"
)
# check A_perf and set annotator_utilities
if A_perf is None:
annotator_utilities = random_state.rand(
1, n_candidates, n_annotators
).repeat(batch_size_sq, axis=0)
elif _is_arraylike(A_perf):
A_perf = check_array(A_perf, ensure_2d=False)
# ensure A_perf lies in [0, 1)
if A_perf.min() != A_perf.max():
A_perf = (
1
/ (A_perf.max() - A_perf.min() + 1)
* (A_perf - A_perf.min())
)
else:
A_perf = np.zeros_like(A_perf, dtype=float)
if A_perf.shape == (n_candidates, n_annotators):
annotator_utilities = A_perf[np.newaxis, :, :].repeat(
batch_size_sq, axis=0
)
elif A_perf.shape == (n_annotators,):
annotator_utilities = (
A_perf[np.newaxis, np.newaxis, :]
.repeat(n_candidates, axis=1)
.repeat(batch_size_sq, axis=0)
)
else:
raise ValueError(
f"`A_perf` is of shape {A_perf.shape}, but must be of "
f"shape ({n_candidates}, {n_annotators}) or of shape "
f"({n_annotators},)."
)
else:
raise TypeError(
f"`A_perf` is of type {type(A_perf)}, but must be array like "
f"or of type None."
)
candidates_sq = mapping if mapping is not None else X_cand
re_val = self.strategy.query(
X=X,
y=y_sq,
candidates=candidates_sq,
**query_params_dict,
batch_size=batch_size_sq,
return_utilities=True,
)
single_query_indices, w_utilities = re_val
if mapping is None:
sample_utilities = w_utilities
else:
sample_utilities = w_utilities[:, mapping]
re_val = self._query_annotators(
A_cand,
batch_size,
sample_utilities,
annotator_utilities,
return_utilities,
pref_n_annotators,
)
if mapping is None:
return re_val
elif return_utilities:
w_indices, w_utilities = re_val
utilities = np.full((batch_size, n_samples, n_annotators), np.nan)
utilities[:, mapping, :] = w_utilities
indices = np.zeros_like(w_indices)
indices[:, 0] = mapping[w_indices[:, 0]]
indices[:, 1] = w_indices[:, 1]
return indices, utilities
else:
w_indices = re_val
indices = np.zeros_like(w_indices)
indices[:, 0] = mapping[w_indices[:, 0]]
indices[:, 1] = w_indices[:, 1]
return indices
def _query_annotators(
self,
A_cand,
batch_size,
sample_utilities,
annotator_utilities,
return_utilities,
pref_n_annotators,
):
random_state = check_random_state(self.random_state)
n_annotators = A_cand.shape[1]
n_samples = A_cand.shape[0]
re_val = self._get_order_preserving_s_query(
A_cand, sample_utilities, annotator_utilities
)
s_indices, s_utilities = re_val
n_as_annotators = self._n_to_assign_annotators(
batch_size, A_cand, s_indices, pref_n_annotators
)
utilities = np.zeros((batch_size, n_samples, n_annotators))
query_indices = np.zeros((batch_size, 2), dtype=int)
batch_index = 0 # actual batch index
annotator_ps = 0 # current annotators per sample
sample_index = 0 # sample batch index
while batch_index < batch_size:
utilities[batch_index] = s_utilities[sample_index]
query_indices[batch_index] = rand_argmax(
utilities[batch_index], random_state=random_state
)
s_utilities[
:, query_indices[batch_index, 0], query_indices[batch_index, 1]
] = np.nan
batch_index += 1
annotator_ps += 1
if annotator_ps >= n_as_annotators[sample_index]:
sample_index += 1
annotator_ps = 0
if return_utilities:
return query_indices, utilities
else:
return query_indices
@staticmethod
def _get_order_preserving_s_query(
A, candidate_utilities, annotator_utilities
):
nan_indices = np.argwhere(np.isnan(candidate_utilities))
candidate_utilities[nan_indices[:, 0], nan_indices[:, 1]] = -np.inf
# prepare candidate_utilities
candidate_utilities = rankdata(
candidate_utilities, method="ordinal", axis=1
).astype(float)
# calculate indices of maximum sample
indices = np.argmax(candidate_utilities, axis=1)
candidate_utilities[nan_indices[:, 0], nan_indices[:, 1]] = np.nan
annotator_utilities[:, A == 0] = np.nan
# combine utilities by addition
utilities = candidate_utilities[:, :, np.newaxis] + annotator_utilities
return indices, utilities
@staticmethod
def _n_to_assign_annotators(batch_size, A, s_indices, pref_n_annotators):
n_max_annotators = np.sum(A, axis=1)
n_max_chosen_annotators = n_max_annotators[s_indices]
annot_per_sample = np.minimum(
n_max_chosen_annotators, pref_n_annotators
)
n_annotator_sample_pairs = np.sum(annot_per_sample)
while n_annotator_sample_pairs < batch_size:
annot_per_sample = np.minimum(
n_max_chosen_annotators, annot_per_sample + 1
)
n_annotator_sample_pairs = np.sum(annot_per_sample)
return annot_per_sample | skactiveml/pool/multiannotator/_wrapper.py | from inspect import signature, Parameter
import numpy as np
from scipy.stats import rankdata
from sklearn.utils.validation import check_array, _is_arraylike
from ...base import (
MultiAnnotatorPoolQueryStrategy,
SingleAnnotatorPoolQueryStrategy,
)
from ...utils import (
rand_argmax,
check_type,
MISSING_LABEL,
majority_vote,
check_random_state,
check_scalar,
)
class SingleAnnotatorWrapper(MultiAnnotatorPoolQueryStrategy):
"""SingleAnnotatorWrapper
Implementation of a wrapper class for pool-based active
learning query strategies with a single annotator such that it transforms
the query strategy for the single annotator into a query strategy for
multiple annotators by choosing an annotator randomly or according to the
parameter `A_pef` and setting the labeled matrix to a labeled vector by an
aggregation function, e.g., majority voting.
Parameters
----------
strategy : SingleAnnotatorPoolQueryStrategy
An active learning strategy for a single annotator.
y_aggregate : callable, optional (default=None)
`y_aggregate` is used to transform `y` as a matrix of shape
(n_samples, n_annotators) into a vector of shape (n_samples) during
the querying process and is then passed to the given `strategy`.
If `y_aggregate is None` and `y` is used in the strategy,
majority_vote is used as `y_aggregate`.
missing_label : scalar or string or np.nan or None, optional
(default=np.nan)
Value to represent a missing label.
random_state : int or RandomState instance, optional (default=None)
Controls the randomness of the estimator.
"""
def __init__(
self,
strategy,
y_aggregate=None,
missing_label=MISSING_LABEL,
random_state=None,
):
super().__init__(random_state=random_state, missing_label=missing_label)
self.strategy = strategy
self.y_aggregate = y_aggregate
def query(
self,
X,
y,
candidates=None,
annotators=None,
batch_size=1,
query_params_dict=None,
n_annotators_per_sample=1,
A_perf=None,
return_utilities=False,
):
"""Determines which candidate sample is to be annotated by which
annotator. The samples are first and primarily ranked by the given
strategy as if one unspecified annotator where to annotate the sample.
Then for each sample the sample-annotator pairs are ranked based either
on previously set preferences or at random.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data set, usually complete, i.e., including the labeled
and unlabeled samples.
y : array-like of shape (n_samples, n_annotators)
Labels of the training data set for each annotator (possibly
including unlabeled ones indicated by self.MISSING_LABEL), meaning
that `y[i, j]` contains the label annotated by annotator `i` for
sample `j`.
candidates : None or array-like of shape (n_candidates), dtype=int or
array-like of shape (n_candidates, n_features),
optional (default=None)
If `candidates` is None, the samples from (X,y), for which an
annotator exists such that the annotator sample pairs is
unlabeled are considered as sample candidates.
If `candidates` is of shape (n_candidates) and of type int,
candidates is considered as the indices of the sample candidates in
(X,y).
If `candidates` is of shape (n_candidates, n_features), the
sample candidates are directly given in candidates (not necessarily
contained in X). This is not supported by all query strategies.
annotators : array-like of shape (n_candidates, n_annotators), optional
(default=None)
If `annotators` is None, all annotators are considered as available
annotators.
If `annotators` is of shape (n_avl_annotators) and of type int,
`annotators` is considered as the indices of the available
annotators.
If candidate samples and available annotators are specified:
The annotator sample pairs, for which the sample is a candidate
sample and the annotator is an available annotator are considered
as candidate annotator-sample-pairs.
If `annotators` is None and `candidates` is of shape
(n_candidates), all annotator sample pairs, for which the sample is
indexed by `candidates` are considered as candidate
annotator-sample-pairs.
If `annotators` is a boolean array of shape (n_candidates,
n_avl_annotators) the annotator sample pairs, for which the sample
is a candidate sample and the boolean matrix has entry `True` are
considered as candidate sample pairs.
batch_size : int, optional (default=1)
The number of annotators sample pairs to be selected in one AL
cycle.
query_params_dict : dict, optional (default=None)
Dictionary for the parameters of the query method besides `X` and
the transformed `y`.
A_perf : array-like, shape (n_samples, n_annotators) or
(n_annotators,) optional (default=None)
The performance based ranking of each annotator.
1.) If `A_perf` is of shape (n_samples, n_annotators) for each
sample `i` the value-annotators pair `(i, j)` is chosen
over the pair `(i, k)` if `A_perf[i, j]` is greater or
equal to `A_perf[i, k]`.
2.) If `A_perf` is of shape (n_annotators,) for each sample
`i` the value-annotators pair `(i, j)` is chosen over
the pair `(i, k)` if `A_perf[j]` is greater or
equal to `A_perf[k]`.
3.) If `A_perf` is None, the annotators are chosen at random, with
a different distribution for each sample.
return_utilities : bool, optional (default=False)
If true, also returns the utilities based on the query strategy.
n_annotators_per_sample : int, array-like, optional (default=1)
array-like of shape (k,), k <= n_samples
If `n_annotators_per_sample` is an int, the value indicates
the number of annotators that are preferably assigned to a
candidate sample, selected by the query_strategy.
`Preferably` in this case means depending on how many annotators
can be assigned to a given candidate sample and how many
annotator-sample pairs should be assigned considering the
`batch_size`.
If `n_annotators_per_sample` is an int array, the values of the
array are interpreted as follows. The value at the i-th index
determines the preferred number of annotators for the candidate
sample at the i-th index in the ranking of the batch.
The ranking of the batch is given by the `strategy`
(SingleAnnotatorPoolQueryStrategy). The last index
of the n_annotators_per_sample array (k-1) indicates the
preferred number of annotators for all candidate sample at an index
greater of equal to k-1.
Returns
-------
query_indices : np.ndarray of shape (batchsize, 2)
The query_indices indicate which candidate sample pairs are to be
queried is, i. e. which candidate sample is to be annotated by
which annotator, e.g., `query_indices[:, 0]` indicates the selected
candidate samples and `query_indices[:, 1]` indicates the
respectively selected annotators.
utilities: np.ndarray of shape (batch_size, n_samples, n_annotators) or
np.ndarray of shape (batch_size, n_candidates, n_annotators)
The utilities of all candidate samples w.r.t. to the available
annotators after each selected sample of the batch, e.g.,
`utilities[0, :, j]` indicates the utilities used for selecting
the first sample-annotator-pair (with indices `query_indices[0]`).
If `candidates` is None or of shape (n_candidates,), the indexing
refers to samples in `X`.
If `candidates` is of shape (n_candidates, n_features), the
indexing refers to samples in candidates.
"""
(
X,
y,
candidates,
annotators,
batch_size,
return_utilities,
) = super()._validate_data(
X,
y,
candidates,
annotators,
batch_size,
return_utilities,
reset=True,
)
X_cand, mapping, A_cand = self._transform_cand_annot(
candidates, annotators, X, y
)
random_state = self.random_state_
# check strategy
check_type(
self.strategy, "self.strategy", SingleAnnotatorPoolQueryStrategy
)
# check query_params_dict
if query_params_dict is None:
query_params_dict = {}
check_type(query_params_dict, "query_params_dict", dict)
# aggregate y
if self.y_aggregate is None:
y_aggregate = lambda y: majority_vote(y, random_state=random_state)
else:
y_aggregate = self.y_aggregate
if not callable(y_aggregate):
raise TypeError(
f"`self.y_aggregate` must be callable. "
f"`self.y_aggregate` is of type {type(y_aggregate)}"
)
# count the number of arguments that have no default value
n_free_params = len(
list(
filter(
lambda x: x.default == Parameter.empty,
signature(y_aggregate).parameters.values(),
)
)
)
if n_free_params != 1:
raise TypeError(
f"The number of free parameters of the callable has to "
f"equal one. "
f"The number of free parameters is {n_free_params}."
)
y_sq = y_aggregate(y)
n_candidates = X_cand.shape[0]
n_annotators = A_cand.shape[1]
n_samples = X.shape[0]
batch_size_sq = min(batch_size, X_cand.shape[0])
# check n_annotators_per_sample and set pref_n_annotators
if isinstance(n_annotators_per_sample, (int, np.int_)):
check_scalar(
n_annotators_per_sample,
name="n_annotators_per_sample",
target_type=int,
min_val=1,
)
pref_n_annotators = n_annotators_per_sample * np.ones(batch_size_sq)
elif _is_arraylike(n_annotators_per_sample):
pref_n_annotators = check_array(
n_annotators_per_sample, ensure_2d=False
)
if pref_n_annotators.ndim != 1:
raise ValueError(
"n_annotators_per_sample, if an array, must be of dim "
f"1 but, it is of dim {pref_n_annotators.ndim}"
)
else:
pref_length = pref_n_annotators.shape[0]
if pref_length > batch_size_sq:
pref_n_annotators = pref_n_annotators[:batch_size_sq]
if pref_length < batch_size_sq:
appended = pref_n_annotators[-1] * np.ones(
batch_size_sq - pref_length
)
pref_n_annotators = np.append(pref_n_annotators, appended)
else:
raise TypeError(
"n_annotators_per_sample must be array like " "or an integer"
)
# check A_perf and set annotator_utilities
if A_perf is None:
annotator_utilities = random_state.rand(
1, n_candidates, n_annotators
).repeat(batch_size_sq, axis=0)
elif _is_arraylike(A_perf):
A_perf = check_array(A_perf, ensure_2d=False)
# ensure A_perf lies in [0, 1)
if A_perf.min() != A_perf.max():
A_perf = (
1
/ (A_perf.max() - A_perf.min() + 1)
* (A_perf - A_perf.min())
)
else:
A_perf = np.zeros_like(A_perf, dtype=float)
if A_perf.shape == (n_candidates, n_annotators):
annotator_utilities = A_perf[np.newaxis, :, :].repeat(
batch_size_sq, axis=0
)
elif A_perf.shape == (n_annotators,):
annotator_utilities = (
A_perf[np.newaxis, np.newaxis, :]
.repeat(n_candidates, axis=1)
.repeat(batch_size_sq, axis=0)
)
else:
raise ValueError(
f"`A_perf` is of shape {A_perf.shape}, but must be of "
f"shape ({n_candidates}, {n_annotators}) or of shape "
f"({n_annotators},)."
)
else:
raise TypeError(
f"`A_perf` is of type {type(A_perf)}, but must be array like "
f"or of type None."
)
candidates_sq = mapping if mapping is not None else X_cand
re_val = self.strategy.query(
X=X,
y=y_sq,
candidates=candidates_sq,
**query_params_dict,
batch_size=batch_size_sq,
return_utilities=True,
)
single_query_indices, w_utilities = re_val
if mapping is None:
sample_utilities = w_utilities
else:
sample_utilities = w_utilities[:, mapping]
re_val = self._query_annotators(
A_cand,
batch_size,
sample_utilities,
annotator_utilities,
return_utilities,
pref_n_annotators,
)
if mapping is None:
return re_val
elif return_utilities:
w_indices, w_utilities = re_val
utilities = np.full((batch_size, n_samples, n_annotators), np.nan)
utilities[:, mapping, :] = w_utilities
indices = np.zeros_like(w_indices)
indices[:, 0] = mapping[w_indices[:, 0]]
indices[:, 1] = w_indices[:, 1]
return indices, utilities
else:
w_indices = re_val
indices = np.zeros_like(w_indices)
indices[:, 0] = mapping[w_indices[:, 0]]
indices[:, 1] = w_indices[:, 1]
return indices
def _query_annotators(
self,
A_cand,
batch_size,
sample_utilities,
annotator_utilities,
return_utilities,
pref_n_annotators,
):
random_state = check_random_state(self.random_state)
n_annotators = A_cand.shape[1]
n_samples = A_cand.shape[0]
re_val = self._get_order_preserving_s_query(
A_cand, sample_utilities, annotator_utilities
)
s_indices, s_utilities = re_val
n_as_annotators = self._n_to_assign_annotators(
batch_size, A_cand, s_indices, pref_n_annotators
)
utilities = np.zeros((batch_size, n_samples, n_annotators))
query_indices = np.zeros((batch_size, 2), dtype=int)
batch_index = 0 # actual batch index
annotator_ps = 0 # current annotators per sample
sample_index = 0 # sample batch index
while batch_index < batch_size:
utilities[batch_index] = s_utilities[sample_index]
query_indices[batch_index] = rand_argmax(
utilities[batch_index], random_state=random_state
)
s_utilities[
:, query_indices[batch_index, 0], query_indices[batch_index, 1]
] = np.nan
batch_index += 1
annotator_ps += 1
if annotator_ps >= n_as_annotators[sample_index]:
sample_index += 1
annotator_ps = 0
if return_utilities:
return query_indices, utilities
else:
return query_indices
@staticmethod
def _get_order_preserving_s_query(
A, candidate_utilities, annotator_utilities
):
nan_indices = np.argwhere(np.isnan(candidate_utilities))
candidate_utilities[nan_indices[:, 0], nan_indices[:, 1]] = -np.inf
# prepare candidate_utilities
candidate_utilities = rankdata(
candidate_utilities, method="ordinal", axis=1
).astype(float)
# calculate indices of maximum sample
indices = np.argmax(candidate_utilities, axis=1)
candidate_utilities[nan_indices[:, 0], nan_indices[:, 1]] = np.nan
annotator_utilities[:, A == 0] = np.nan
# combine utilities by addition
utilities = candidate_utilities[:, :, np.newaxis] + annotator_utilities
return indices, utilities
@staticmethod
def _n_to_assign_annotators(batch_size, A, s_indices, pref_n_annotators):
n_max_annotators = np.sum(A, axis=1)
n_max_chosen_annotators = n_max_annotators[s_indices]
annot_per_sample = np.minimum(
n_max_chosen_annotators, pref_n_annotators
)
n_annotator_sample_pairs = np.sum(annot_per_sample)
while n_annotator_sample_pairs < batch_size:
annot_per_sample = np.minimum(
n_max_chosen_annotators, annot_per_sample + 1
)
n_annotator_sample_pairs = np.sum(annot_per_sample)
return annot_per_sample | 0.946578 | 0.631651 |
import pytest
from homeassistant.components.binary_sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.modbus.const import (
CALL_TYPE_COIL,
CALL_TYPE_DISCRETE,
CONF_BINARY_SENSORS,
CONF_INPUT_TYPE,
CONF_INPUTS,
)
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE_CLASS,
CONF_NAME,
CONF_SLAVE,
STATE_OFF,
STATE_ON,
)
from .conftest import base_config_test, base_test
@pytest.mark.parametrize("do_discovery", [False, True])
@pytest.mark.parametrize(
"do_options",
[
{},
{
CONF_SLAVE: 10,
CONF_INPUT_TYPE: CALL_TYPE_DISCRETE,
CONF_DEVICE_CLASS: "door",
},
],
)
async def test_config_binary_sensor(hass, do_discovery, do_options):
"""Run test for binary sensor."""
sensor_name = "test_sensor"
config_sensor = {
CONF_NAME: sensor_name,
CONF_ADDRESS: 51,
**do_options,
}
await base_config_test(
hass,
config_sensor,
sensor_name,
SENSOR_DOMAIN,
CONF_BINARY_SENSORS,
CONF_INPUTS,
method_discovery=do_discovery,
)
@pytest.mark.parametrize("do_type", [CALL_TYPE_COIL, CALL_TYPE_DISCRETE])
@pytest.mark.parametrize(
"regs,expected",
[
(
[0xFF],
STATE_ON,
),
(
[0x01],
STATE_ON,
),
(
[0x00],
STATE_OFF,
),
(
[0x80],
STATE_OFF,
),
(
[0xFE],
STATE_OFF,
),
],
)
async def test_all_binary_sensor(hass, do_type, regs, expected):
"""Run test for given config."""
sensor_name = "modbus_test_binary_sensor"
state = await base_test(
hass,
{CONF_NAME: sensor_name, CONF_ADDRESS: 1234, CONF_INPUT_TYPE: do_type},
sensor_name,
SENSOR_DOMAIN,
CONF_BINARY_SENSORS,
CONF_INPUTS,
regs,
expected,
method_discovery=True,
scan_interval=5,
)
assert state == expected | tests/components/modbus/test_modbus_binary_sensor.py | import pytest
from homeassistant.components.binary_sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.modbus.const import (
CALL_TYPE_COIL,
CALL_TYPE_DISCRETE,
CONF_BINARY_SENSORS,
CONF_INPUT_TYPE,
CONF_INPUTS,
)
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE_CLASS,
CONF_NAME,
CONF_SLAVE,
STATE_OFF,
STATE_ON,
)
from .conftest import base_config_test, base_test
@pytest.mark.parametrize("do_discovery", [False, True])
@pytest.mark.parametrize(
"do_options",
[
{},
{
CONF_SLAVE: 10,
CONF_INPUT_TYPE: CALL_TYPE_DISCRETE,
CONF_DEVICE_CLASS: "door",
},
],
)
async def test_config_binary_sensor(hass, do_discovery, do_options):
"""Run test for binary sensor."""
sensor_name = "test_sensor"
config_sensor = {
CONF_NAME: sensor_name,
CONF_ADDRESS: 51,
**do_options,
}
await base_config_test(
hass,
config_sensor,
sensor_name,
SENSOR_DOMAIN,
CONF_BINARY_SENSORS,
CONF_INPUTS,
method_discovery=do_discovery,
)
@pytest.mark.parametrize("do_type", [CALL_TYPE_COIL, CALL_TYPE_DISCRETE])
@pytest.mark.parametrize(
"regs,expected",
[
(
[0xFF],
STATE_ON,
),
(
[0x01],
STATE_ON,
),
(
[0x00],
STATE_OFF,
),
(
[0x80],
STATE_OFF,
),
(
[0xFE],
STATE_OFF,
),
],
)
async def test_all_binary_sensor(hass, do_type, regs, expected):
"""Run test for given config."""
sensor_name = "modbus_test_binary_sensor"
state = await base_test(
hass,
{CONF_NAME: sensor_name, CONF_ADDRESS: 1234, CONF_INPUT_TYPE: do_type},
sensor_name,
SENSOR_DOMAIN,
CONF_BINARY_SENSORS,
CONF_INPUTS,
regs,
expected,
method_discovery=True,
scan_interval=5,
)
assert state == expected | 0.507812 | 0.332635 |
import os
#os.add_dll_directory(os.path.join(os.environ['CUDA_PATH'], 'bin'))
import dlib
import cv2
import numpy as np
from .IDetect import IDetect
from .core import Core
class FaceDetectCV2(Core, IDetect):
def __init__(self, source, method):
super().__init__(source = source)
self.face = []
self.bboxes = []
self.method = method
self.pose_predictor = dlib.shape_predictor(r"../helper/dat/shape_predictor_68_face_landmarks.dat")
self.modelFile = "../helper/dat/res10_300x300_ssd_iter_140000.caffemodel"
self.configFile = "../helper/dat/res10_300x300_ssd_iter_140000.prototxt"
self.net = cv2.dnn.readNetFromCaffe(self.configFile, self.modelFile)
try:
self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
except:
print("GPU Ayarlanamıyor!")
#end
def faceDetect(self, image = np.zeros([10,10])):
imgC = image.copy() #Gelen resmin, referans alınmaması için
self.face = []
self.bboxes = []
h, w = imgC.shape[:2];
blob = cv2.dnn.blobFromImage(cv2.resize(imgC, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
self.net.setInput(blob)
faces = self.net.forward()
try:
for i in range(faces.shape[2]):
confidence = faces[0, 0, i, 2]
if confidence > 0.73:
box = faces[0, 0, i, 3:7] * np.array([w, h, w, h])
(left, top, right, bottom) = box.astype("int")
imgC = cv2.rectangle(imgC, (left, top), (right, bottom), (50, 200, 200), 2)
bbox = (left,top,right,bottom)
self.bboxes.append(bbox)
except:
pass
self.face = np.array(imgC)
return self.bboxes, self.face
#end
def run(self):
while True:
success, frame = self.vc.read()
a, face = self.faceDetect(frame)
if face == []:
face = frame
print(face)
cv2.imshow("Ekran", face)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
self.vc.release()
#end
#end::class | PROJECT_0000/faceLib/faceDetectCV2.py | import os
#os.add_dll_directory(os.path.join(os.environ['CUDA_PATH'], 'bin'))
import dlib
import cv2
import numpy as np
from .IDetect import IDetect
from .core import Core
class FaceDetectCV2(Core, IDetect):
def __init__(self, source, method):
super().__init__(source = source)
self.face = []
self.bboxes = []
self.method = method
self.pose_predictor = dlib.shape_predictor(r"../helper/dat/shape_predictor_68_face_landmarks.dat")
self.modelFile = "../helper/dat/res10_300x300_ssd_iter_140000.caffemodel"
self.configFile = "../helper/dat/res10_300x300_ssd_iter_140000.prototxt"
self.net = cv2.dnn.readNetFromCaffe(self.configFile, self.modelFile)
try:
self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
except:
print("GPU Ayarlanamıyor!")
#end
def faceDetect(self, image = np.zeros([10,10])):
imgC = image.copy() #Gelen resmin, referans alınmaması için
self.face = []
self.bboxes = []
h, w = imgC.shape[:2];
blob = cv2.dnn.blobFromImage(cv2.resize(imgC, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
self.net.setInput(blob)
faces = self.net.forward()
try:
for i in range(faces.shape[2]):
confidence = faces[0, 0, i, 2]
if confidence > 0.73:
box = faces[0, 0, i, 3:7] * np.array([w, h, w, h])
(left, top, right, bottom) = box.astype("int")
imgC = cv2.rectangle(imgC, (left, top), (right, bottom), (50, 200, 200), 2)
bbox = (left,top,right,bottom)
self.bboxes.append(bbox)
except:
pass
self.face = np.array(imgC)
return self.bboxes, self.face
#end
def run(self):
while True:
success, frame = self.vc.read()
a, face = self.faceDetect(frame)
if face == []:
face = frame
print(face)
cv2.imshow("Ekran", face)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
self.vc.release()
#end
#end::class | 0.206574 | 0.087136 |
from inspect import isclass
from enum import Enum
from sqlalchemy import types
from pulsar.api import ImproperlyConfigured
class ScalarCoercible(object):
def _coerce(self, value):
raise NotImplementedError
def coercion_listener(self, target, value, oldvalue, initiator):
return self._coerce(value)
class Choice(object):
def __init__(self, code, value):
self.code = code
self.value = value
def __eq__(self, other):
if isinstance(other, Choice):
return self.code == other.code
return other == self.code
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.value)
def __repr__(self):
return 'Choice(code={code}, value={value})'.format(
code=self.code,
value=self.value
)
class ChoiceType(types.TypeDecorator, ScalarCoercible):
impl = types.Unicode(255)
def __init__(self, choices, impl=None, **kwargs):
self.choices = choices
if isinstance(choices, type) and issubclass(choices, Enum):
self.type_impl = EnumTypeImpl(enum_class=choices, **kwargs)
else:
self.type_impl = ChoiceTypeImpl(choices=choices, **kwargs)
if impl:
if isclass(impl):
impl = impl()
self.impl = impl
@property
def python_type(self):
return self.impl.python_type
def _coerce(self, value):
return self.type_impl._coerce(value)
def process_bind_param(self, value, dialect):
return self.type_impl.process_bind_param(value, dialect)
def process_result_value(self, value, dialect):
return self.type_impl.process_result_value(value, dialect)
class ChoiceTypeImpl(object):
"""The implementation for the ``Choice`` usage."""
def __init__(self, choices):
if not choices:
raise ImproperlyConfigured(
'ChoiceType needs list of choices defined.'
)
self.choices_dict = dict(choices)
def _coerce(self, value):
if value is None:
return value
if isinstance(value, Choice):
return value
return Choice(value, self.choices_dict[value])
def process_bind_param(self, value, dialect):
if value and isinstance(value, Choice):
return value.code
return value
def process_result_value(self, value, dialect):
if value:
return Choice(value, self.choices_dict[value])
return value
class EnumTypeImpl(object):
"""The implementation for the ``Enum`` usage."""
def __init__(self, enum_class, bind_by_name=True):
self.enum_class = enum_class
self.bind_by_name = bind_by_name
def _coerce(self, value):
return self.enum_class(value) if value else None
def process_bind_param(self, value, dialect):
ret = None
if isinstance(value, Enum):
ret = value.value
elif self.bind_by_name and isinstance(value, str):
for e in self.enum_class:
if e.name.lower() == value.lower():
ret = e.value
elif value:
ret = self.enum_class(value).value
return ret
def process_result_value(self, value, dialect):
return self.enum_class(value) if value else None | odm/types/choice.py | from inspect import isclass
from enum import Enum
from sqlalchemy import types
from pulsar.api import ImproperlyConfigured
class ScalarCoercible(object):
def _coerce(self, value):
raise NotImplementedError
def coercion_listener(self, target, value, oldvalue, initiator):
return self._coerce(value)
class Choice(object):
def __init__(self, code, value):
self.code = code
self.value = value
def __eq__(self, other):
if isinstance(other, Choice):
return self.code == other.code
return other == self.code
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.value)
def __repr__(self):
return 'Choice(code={code}, value={value})'.format(
code=self.code,
value=self.value
)
class ChoiceType(types.TypeDecorator, ScalarCoercible):
impl = types.Unicode(255)
def __init__(self, choices, impl=None, **kwargs):
self.choices = choices
if isinstance(choices, type) and issubclass(choices, Enum):
self.type_impl = EnumTypeImpl(enum_class=choices, **kwargs)
else:
self.type_impl = ChoiceTypeImpl(choices=choices, **kwargs)
if impl:
if isclass(impl):
impl = impl()
self.impl = impl
@property
def python_type(self):
return self.impl.python_type
def _coerce(self, value):
return self.type_impl._coerce(value)
def process_bind_param(self, value, dialect):
return self.type_impl.process_bind_param(value, dialect)
def process_result_value(self, value, dialect):
return self.type_impl.process_result_value(value, dialect)
class ChoiceTypeImpl(object):
"""The implementation for the ``Choice`` usage."""
def __init__(self, choices):
if not choices:
raise ImproperlyConfigured(
'ChoiceType needs list of choices defined.'
)
self.choices_dict = dict(choices)
def _coerce(self, value):
if value is None:
return value
if isinstance(value, Choice):
return value
return Choice(value, self.choices_dict[value])
def process_bind_param(self, value, dialect):
if value and isinstance(value, Choice):
return value.code
return value
def process_result_value(self, value, dialect):
if value:
return Choice(value, self.choices_dict[value])
return value
class EnumTypeImpl(object):
"""The implementation for the ``Enum`` usage."""
def __init__(self, enum_class, bind_by_name=True):
self.enum_class = enum_class
self.bind_by_name = bind_by_name
def _coerce(self, value):
return self.enum_class(value) if value else None
def process_bind_param(self, value, dialect):
ret = None
if isinstance(value, Enum):
ret = value.value
elif self.bind_by_name and isinstance(value, str):
for e in self.enum_class:
if e.name.lower() == value.lower():
ret = e.value
elif value:
ret = self.enum_class(value).value
return ret
def process_result_value(self, value, dialect):
return self.enum_class(value) if value else None | 0.848596 | 0.162945 |
from datetime import datetime
from datetime import timedelta
from datetime import tzinfo
from typing import Optional
from typing import TypeVar
from typing import overload
import pendulum
from pendulum.helpers import local_time
from pendulum.helpers import timestamp
from pendulum.utils._compat import _HAS_FOLD
from .exceptions import AmbiguousTime
from .exceptions import NonExistingTime
from .zoneinfo import read
from .zoneinfo import read_file
from .zoneinfo.transition import Transition
POST_TRANSITION = "post"
PRE_TRANSITION = "pre"
TRANSITION_ERROR = "error"
_datetime = datetime
_D = TypeVar("_D", bound=datetime)
class Timezone(tzinfo):
"""
Represents a named timezone.
The accepted names are those provided by the IANA time zone database.
>>> from pendulum.tz.timezone import Timezone
>>> tz = Timezone('Europe/Paris')
"""
def __init__(self, name, extended=True): # type: (str, bool) -> None
tz = read(name, extend=extended)
self._name = name
self._transitions = tz.transitions
self._hint = {True: None, False: None}
@property
def name(self): # type: () -> str
return self._name
def convert(self, dt, dst_rule=None): # type: (_D, Optional[str]) -> _D
"""
Converts a datetime in the current timezone.
If the datetime is naive, it will be normalized.
>>> from datetime import datetime
>>> from pendulum import timezone
>>> paris = timezone('Europe/Paris')
>>> dt = datetime(2013, 3, 31, 2, 30, fold=1)
>>> in_paris = paris.convert(dt)
>>> in_paris.isoformat()
'2013-03-31T03:30:00+02:00'
If the datetime is aware, it will be properly converted.
>>> new_york = timezone('America/New_York')
>>> in_new_york = new_york.convert(in_paris)
>>> in_new_york.isoformat()
'2013-03-30T21:30:00-04:00'
"""
if dt.tzinfo is None:
return self._normalize(dt, dst_rule=dst_rule)
return self._convert(dt)
def datetime(
self, year, month, day, hour=0, minute=0, second=0, microsecond=0
): # type: (int, int, int, int, int, int, int) -> _datetime
"""
Return a normalized datetime for the current timezone.
"""
if _HAS_FOLD:
return self.convert(
datetime(year, month, day, hour, minute, second, microsecond, fold=1)
)
return self.convert(
datetime(year, month, day, hour, minute, second, microsecond),
dst_rule=POST_TRANSITION,
)
def _normalize(self, dt, dst_rule=None): # type: (_D, Optional[str]) -> _D
sec = timestamp(dt)
fold = 0
transition = self._lookup_transition(sec)
if not _HAS_FOLD and dst_rule is None:
dst_rule = POST_TRANSITION
if dst_rule is None:
dst_rule = PRE_TRANSITION
if dt.fold == 1:
dst_rule = POST_TRANSITION
if sec < transition.local:
if transition.is_ambiguous(sec):
# Ambiguous time
if dst_rule == TRANSITION_ERROR:
raise AmbiguousTime(dt)
# We set the fold attribute for later
if dst_rule == POST_TRANSITION:
fold = 1
elif transition.previous is not None:
transition = transition.previous
if transition:
if transition.is_ambiguous(sec):
# Ambiguous time
if dst_rule == TRANSITION_ERROR:
raise AmbiguousTime(dt)
# We set the fold attribute for later
if dst_rule == POST_TRANSITION:
fold = 1
elif transition.is_missing(sec):
# Skipped time
if dst_rule == TRANSITION_ERROR:
raise NonExistingTime(dt)
# We adjust accordingly
if dst_rule == POST_TRANSITION:
sec += transition.fix
fold = 1
else:
sec -= transition.fix
kwargs = {"tzinfo": self}
if _HAS_FOLD or isinstance(dt, pendulum.DateTime):
kwargs["fold"] = fold
return dt.__class__(*local_time(sec, 0, dt.microsecond), **kwargs)
def _convert(self, dt): # type: (_D) -> _D
if dt.tzinfo is self:
return self._normalize(dt, dst_rule=POST_TRANSITION)
if not isinstance(dt.tzinfo, Timezone):
return dt.astimezone(self)
stamp = timestamp(dt)
if isinstance(dt.tzinfo, FixedTimezone):
offset = dt.tzinfo.offset
else:
transition = dt.tzinfo._lookup_transition(stamp)
offset = transition.ttype.offset
if stamp < transition.local and transition.previous is not None:
if (
transition.previous.is_ambiguous(stamp)
and getattr(dt, "fold", 1) == 0
):
pass
else:
offset = transition.previous.ttype.offset
stamp -= offset
transition = self._lookup_transition(stamp, is_utc=True)
if stamp < transition.at and transition.previous is not None:
transition = transition.previous
offset = transition.ttype.offset
stamp += offset
fold = int(not transition.ttype.is_dst())
kwargs = {"tzinfo": self}
if _HAS_FOLD or isinstance(dt, pendulum.DateTime):
kwargs["fold"] = fold
return dt.__class__(*local_time(stamp, 0, dt.microsecond), **kwargs)
def _lookup_transition(
self, stamp, is_utc=False
): # type: (int, bool) -> Transition
lo, hi = 0, len(self._transitions)
hint = self._hint[is_utc]
if hint:
if stamp == hint[0]:
return self._transitions[hint[1]]
elif stamp < hint[0]:
hi = hint[1]
else:
lo = hint[1]
if not is_utc:
while lo < hi:
mid = (lo + hi) // 2
if stamp < self._transitions[mid].to:
hi = mid
else:
lo = mid + 1
else:
while lo < hi:
mid = (lo + hi) // 2
if stamp < self._transitions[mid].at:
hi = mid
else:
lo = mid + 1
if lo >= len(self._transitions):
# Beyond last transition
lo = len(self._transitions) - 1
self._hint[is_utc] = (stamp, lo)
return self._transitions[lo]
@overload
def utcoffset(self, dt): # type: (None) -> None
pass
@overload
def utcoffset(self, dt): # type: (_datetime) -> timedelta
pass
def utcoffset(self, dt):
if dt is None:
return
transition = self._get_transition(dt)
return transition.utcoffset()
def dst(
self, dt # type: Optional[_datetime]
): # type: (...) -> Optional[timedelta]
if dt is None:
return
transition = self._get_transition(dt)
if not transition.ttype.is_dst():
return timedelta()
return timedelta(seconds=transition.fix)
def tzname(self, dt): # type: (Optional[_datetime]) -> Optional[str]
if dt is None:
return
transition = self._get_transition(dt)
return transition.ttype.abbreviation
def _get_transition(self, dt): # type: (_datetime) -> Transition
if dt.tzinfo is not None and dt.tzinfo is not self:
dt = dt - dt.utcoffset()
stamp = timestamp(dt)
transition = self._lookup_transition(stamp, is_utc=True)
else:
stamp = timestamp(dt)
transition = self._lookup_transition(stamp)
if stamp < transition.local and transition.previous is not None:
fold = getattr(dt, "fold", 1)
if transition.is_ambiguous(stamp):
if fold == 0:
transition = transition.previous
elif transition.previous.is_ambiguous(stamp) and fold == 0:
pass
else:
transition = transition.previous
return transition
def fromutc(self, dt): # type: (_D) -> _D
stamp = timestamp(dt)
transition = self._lookup_transition(stamp, is_utc=True)
if stamp < transition.at and transition.previous is not None:
transition = transition.previous
stamp += transition.ttype.offset
return dt.__class__(*local_time(stamp, 0, dt.microsecond), tzinfo=self)
def __repr__(self): # type: () -> str
return "Timezone('{}')".format(self._name)
def __getinitargs__(self): # type: () -> tuple
return (self._name,)
class FixedTimezone(Timezone):
def __init__(self, offset, name=None):
sign = "-" if offset < 0 else "+"
minutes = offset / 60
hour, minute = divmod(abs(int(minutes)), 60)
if not name:
name = "{0}{1:02d}:{2:02d}".format(sign, hour, minute)
self._name = name
self._offset = offset
self._utcoffset = timedelta(seconds=offset)
@property
def offset(self): # type: () -> int
return self._offset
def _normalize(self, dt, dst_rule=None): # type: (_D, Optional[str]) -> _D
if _HAS_FOLD:
dt = dt.__class__(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tzinfo=self,
fold=0,
)
else:
dt = dt.__class__(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tzinfo=self,
)
return dt
def _convert(self, dt): # type: (_D) -> _D
if dt.tzinfo is not self:
return dt.astimezone(self)
return dt
def utcoffset(self, dt): # type: (Optional[_datetime]) -> timedelta
return self._utcoffset
def dst(self, dt): # type: (Optional[_datetime]) -> timedelta
return timedelta()
def fromutc(self, dt): # type: (_D) -> _D
# Use the stdlib datetime's add method to avoid infinite recursion
return (datetime.__add__(dt, self._utcoffset)).replace(tzinfo=self)
def tzname(self, dt): # type: (Optional[_datetime]) -> Optional[str]
return self._name
def __getinitargs__(self): # type: () -> tuple
return self._offset, self._name
class TimezoneFile(Timezone):
def __init__(self, path):
tz = read_file(path)
self._name = ""
self._transitions = tz.transitions
self._hint = {True: None, False: None}
UTC = FixedTimezone(0, "UTC") | buildroot-external/rootfs-overlay/usr/lib/python3.8/site-packages/pendulum/tz/timezone.py | from datetime import datetime
from datetime import timedelta
from datetime import tzinfo
from typing import Optional
from typing import TypeVar
from typing import overload
import pendulum
from pendulum.helpers import local_time
from pendulum.helpers import timestamp
from pendulum.utils._compat import _HAS_FOLD
from .exceptions import AmbiguousTime
from .exceptions import NonExistingTime
from .zoneinfo import read
from .zoneinfo import read_file
from .zoneinfo.transition import Transition
POST_TRANSITION = "post"
PRE_TRANSITION = "pre"
TRANSITION_ERROR = "error"
_datetime = datetime
_D = TypeVar("_D", bound=datetime)
class Timezone(tzinfo):
"""
Represents a named timezone.
The accepted names are those provided by the IANA time zone database.
>>> from pendulum.tz.timezone import Timezone
>>> tz = Timezone('Europe/Paris')
"""
def __init__(self, name, extended=True): # type: (str, bool) -> None
tz = read(name, extend=extended)
self._name = name
self._transitions = tz.transitions
self._hint = {True: None, False: None}
@property
def name(self): # type: () -> str
return self._name
def convert(self, dt, dst_rule=None): # type: (_D, Optional[str]) -> _D
"""
Converts a datetime in the current timezone.
If the datetime is naive, it will be normalized.
>>> from datetime import datetime
>>> from pendulum import timezone
>>> paris = timezone('Europe/Paris')
>>> dt = datetime(2013, 3, 31, 2, 30, fold=1)
>>> in_paris = paris.convert(dt)
>>> in_paris.isoformat()
'2013-03-31T03:30:00+02:00'
If the datetime is aware, it will be properly converted.
>>> new_york = timezone('America/New_York')
>>> in_new_york = new_york.convert(in_paris)
>>> in_new_york.isoformat()
'2013-03-30T21:30:00-04:00'
"""
if dt.tzinfo is None:
return self._normalize(dt, dst_rule=dst_rule)
return self._convert(dt)
def datetime(
self, year, month, day, hour=0, minute=0, second=0, microsecond=0
): # type: (int, int, int, int, int, int, int) -> _datetime
"""
Return a normalized datetime for the current timezone.
"""
if _HAS_FOLD:
return self.convert(
datetime(year, month, day, hour, minute, second, microsecond, fold=1)
)
return self.convert(
datetime(year, month, day, hour, minute, second, microsecond),
dst_rule=POST_TRANSITION,
)
def _normalize(self, dt, dst_rule=None): # type: (_D, Optional[str]) -> _D
sec = timestamp(dt)
fold = 0
transition = self._lookup_transition(sec)
if not _HAS_FOLD and dst_rule is None:
dst_rule = POST_TRANSITION
if dst_rule is None:
dst_rule = PRE_TRANSITION
if dt.fold == 1:
dst_rule = POST_TRANSITION
if sec < transition.local:
if transition.is_ambiguous(sec):
# Ambiguous time
if dst_rule == TRANSITION_ERROR:
raise AmbiguousTime(dt)
# We set the fold attribute for later
if dst_rule == POST_TRANSITION:
fold = 1
elif transition.previous is not None:
transition = transition.previous
if transition:
if transition.is_ambiguous(sec):
# Ambiguous time
if dst_rule == TRANSITION_ERROR:
raise AmbiguousTime(dt)
# We set the fold attribute for later
if dst_rule == POST_TRANSITION:
fold = 1
elif transition.is_missing(sec):
# Skipped time
if dst_rule == TRANSITION_ERROR:
raise NonExistingTime(dt)
# We adjust accordingly
if dst_rule == POST_TRANSITION:
sec += transition.fix
fold = 1
else:
sec -= transition.fix
kwargs = {"tzinfo": self}
if _HAS_FOLD or isinstance(dt, pendulum.DateTime):
kwargs["fold"] = fold
return dt.__class__(*local_time(sec, 0, dt.microsecond), **kwargs)
def _convert(self, dt): # type: (_D) -> _D
if dt.tzinfo is self:
return self._normalize(dt, dst_rule=POST_TRANSITION)
if not isinstance(dt.tzinfo, Timezone):
return dt.astimezone(self)
stamp = timestamp(dt)
if isinstance(dt.tzinfo, FixedTimezone):
offset = dt.tzinfo.offset
else:
transition = dt.tzinfo._lookup_transition(stamp)
offset = transition.ttype.offset
if stamp < transition.local and transition.previous is not None:
if (
transition.previous.is_ambiguous(stamp)
and getattr(dt, "fold", 1) == 0
):
pass
else:
offset = transition.previous.ttype.offset
stamp -= offset
transition = self._lookup_transition(stamp, is_utc=True)
if stamp < transition.at and transition.previous is not None:
transition = transition.previous
offset = transition.ttype.offset
stamp += offset
fold = int(not transition.ttype.is_dst())
kwargs = {"tzinfo": self}
if _HAS_FOLD or isinstance(dt, pendulum.DateTime):
kwargs["fold"] = fold
return dt.__class__(*local_time(stamp, 0, dt.microsecond), **kwargs)
def _lookup_transition(
self, stamp, is_utc=False
): # type: (int, bool) -> Transition
lo, hi = 0, len(self._transitions)
hint = self._hint[is_utc]
if hint:
if stamp == hint[0]:
return self._transitions[hint[1]]
elif stamp < hint[0]:
hi = hint[1]
else:
lo = hint[1]
if not is_utc:
while lo < hi:
mid = (lo + hi) // 2
if stamp < self._transitions[mid].to:
hi = mid
else:
lo = mid + 1
else:
while lo < hi:
mid = (lo + hi) // 2
if stamp < self._transitions[mid].at:
hi = mid
else:
lo = mid + 1
if lo >= len(self._transitions):
# Beyond last transition
lo = len(self._transitions) - 1
self._hint[is_utc] = (stamp, lo)
return self._transitions[lo]
@overload
def utcoffset(self, dt): # type: (None) -> None
pass
@overload
def utcoffset(self, dt): # type: (_datetime) -> timedelta
pass
def utcoffset(self, dt):
if dt is None:
return
transition = self._get_transition(dt)
return transition.utcoffset()
def dst(
self, dt # type: Optional[_datetime]
): # type: (...) -> Optional[timedelta]
if dt is None:
return
transition = self._get_transition(dt)
if not transition.ttype.is_dst():
return timedelta()
return timedelta(seconds=transition.fix)
def tzname(self, dt): # type: (Optional[_datetime]) -> Optional[str]
if dt is None:
return
transition = self._get_transition(dt)
return transition.ttype.abbreviation
def _get_transition(self, dt): # type: (_datetime) -> Transition
if dt.tzinfo is not None and dt.tzinfo is not self:
dt = dt - dt.utcoffset()
stamp = timestamp(dt)
transition = self._lookup_transition(stamp, is_utc=True)
else:
stamp = timestamp(dt)
transition = self._lookup_transition(stamp)
if stamp < transition.local and transition.previous is not None:
fold = getattr(dt, "fold", 1)
if transition.is_ambiguous(stamp):
if fold == 0:
transition = transition.previous
elif transition.previous.is_ambiguous(stamp) and fold == 0:
pass
else:
transition = transition.previous
return transition
def fromutc(self, dt): # type: (_D) -> _D
stamp = timestamp(dt)
transition = self._lookup_transition(stamp, is_utc=True)
if stamp < transition.at and transition.previous is not None:
transition = transition.previous
stamp += transition.ttype.offset
return dt.__class__(*local_time(stamp, 0, dt.microsecond), tzinfo=self)
def __repr__(self): # type: () -> str
return "Timezone('{}')".format(self._name)
def __getinitargs__(self): # type: () -> tuple
return (self._name,)
class FixedTimezone(Timezone):
def __init__(self, offset, name=None):
sign = "-" if offset < 0 else "+"
minutes = offset / 60
hour, minute = divmod(abs(int(minutes)), 60)
if not name:
name = "{0}{1:02d}:{2:02d}".format(sign, hour, minute)
self._name = name
self._offset = offset
self._utcoffset = timedelta(seconds=offset)
@property
def offset(self): # type: () -> int
return self._offset
def _normalize(self, dt, dst_rule=None): # type: (_D, Optional[str]) -> _D
if _HAS_FOLD:
dt = dt.__class__(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tzinfo=self,
fold=0,
)
else:
dt = dt.__class__(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tzinfo=self,
)
return dt
def _convert(self, dt): # type: (_D) -> _D
if dt.tzinfo is not self:
return dt.astimezone(self)
return dt
def utcoffset(self, dt): # type: (Optional[_datetime]) -> timedelta
return self._utcoffset
def dst(self, dt): # type: (Optional[_datetime]) -> timedelta
return timedelta()
def fromutc(self, dt): # type: (_D) -> _D
# Use the stdlib datetime's add method to avoid infinite recursion
return (datetime.__add__(dt, self._utcoffset)).replace(tzinfo=self)
def tzname(self, dt): # type: (Optional[_datetime]) -> Optional[str]
return self._name
def __getinitargs__(self): # type: () -> tuple
return self._offset, self._name
class TimezoneFile(Timezone):
def __init__(self, path):
tz = read_file(path)
self._name = ""
self._transitions = tz.transitions
self._hint = {True: None, False: None}
UTC = FixedTimezone(0, "UTC") | 0.860589 | 0.139162 |
from directx.types import *
from directx.d3d import *
#********************************************************************
# Typedefs and constants
#********************************************************************
try:
#SDK April 2006 - you can change the
#.dll to a another one if you know what you are doing.
if D3DDEBUGENABLED:
print "Debugging enabled, attempting to load the debug D3DX .dll"
d3dxdll = windll.d3dx9d_43
else:
d3dxdll = windll.d3dx9_43
except:
print """
*****************************************************
You don't seem to have the D3D end-user runtime installed.
Visit Microsoft's DirectX web site for latest downloads.
*****************************************************
"""
raise
D3DX_VERSION = 0x0902
D3DX_SDK_VERSION = 43
D3DXMATRIX = D3DMATRIX
D3DXVECTOR3 = D3DVECTOR
D3DXHANDLE = LPCSTR
#********************************************************************
# Functions
#********************************************************************
def TestHR(hr):
if hr < 0:
#Todo: create a wrapper for Dxerr.lib (normal HR lookups
#can be wrong and thus confusing).
raise WinError("Unknown error (not yet implemented)", -1)
else:
return hr
#********************************************************************
# Interfaces
#********************************************************************
class ID3DXBuffer(IUnknown):
_iid_ = GUID("{8BA5FB08-5195-40e2-AC58-0D989C3A0102}")
_methods_ = [
STDMETHOD(POINTER(None), 'GetBufferPointer'),
STDMETHOD(DWORD, 'GetBufferSize'),
]
class ID3DXSprite(IUnknown):
_iid_ = GUID("{BA0B762D-7D28-43ec-B9DC-2F84443B0614}")
_methods_ = [
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'GetTransform', [POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'SetTransform', [POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'SetWorldViewRH', [POINTER(D3DXMATRIX), POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'SetWorldViewLH', [POINTER(D3DXMATRIX), POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'Begin', [DWORD]),
STDMETHOD(HRESULT, 'Draw', [POINTER(IDirect3DTexture9), POINTER(RECT), POINTER(D3DXVECTOR3), POINTER(D3DXVECTOR3), DWORD]),
STDMETHOD(HRESULT, 'Flush'),
STDMETHOD(HRESULT, 'End'),
STDMETHOD(HRESULT, 'OnLostDevice'),
STDMETHOD(HRESULT, 'OnResetDevice'),
]
class ID3DXFont(IUnknown):
_iid_ = GUID("{D79DBB70-5F21-4d36-BBC2-FF525C213CDC}")
_methods_ = [
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'GetDescAXXX', [POINTER(None)]), #Todo
STDMETHOD(HRESULT, 'GetDescW', [POINTER(D3DXFONT_DESCW)]),
STDMETHOD(BOOL, 'GetTextMetricsAXXX', [POINTER(None)]), #Todo
STDMETHOD(BOOL, 'GetTextMetricsW', [POINTER(TEXTMETRICW)]),
STDMETHOD(HDC, 'GetDC'),
STDMETHOD(HRESULT, 'GetGlyphData', [UINT, POINTER(IDirect3DTexture9), POINTER(RECT), POINTER(POINT)]),
STDMETHOD(HRESULT, 'PreloadCharacters', [UINT, UINT]),
STDMETHOD(HRESULT, 'PreloadGlyphs', [UINT, UINT]),
STDMETHOD(HRESULT, 'PreloadTextA', [LPCSTR, INT]),
STDMETHOD(HRESULT, 'PreloadTextW', [LPCWSTR, INT]),
STDMETHOD(INT, 'DrawTextA', [POINTER(ID3DXSprite), LPCSTR, INT, POINTER(RECT), DWORD, DWORD]),
STDMETHOD(INT, 'DrawTextW', [POINTER(ID3DXSprite), LPCWSTR, INT, POINTER(RECT), DWORD, DWORD]),
STDMETHOD(HRESULT, 'OnLostDevice'),
STDMETHOD(HRESULT, 'OnResetDevice'),
]
class ID3DXLine(IUnknown):
_iid_ = GUID("{D379BA7F-9042-4ac4-9F5E-58192A4C6BD8}")
_methods_ = [
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'Begin'),
STDMETHOD(HRESULT, 'Draw', [POINTER(D3DXVECTOR2), DWORD, DWORD]),
STDMETHOD(HRESULT, 'DrawTransform', [POINTER(D3DXVECTOR3), DWORD, POINTER(D3DMATRIX), DWORD]),
STDMETHOD(HRESULT, 'SetPattern', [DWORD]),
STDMETHOD(DWORD, 'GetPattern'),
STDMETHOD(HRESULT, 'SetPatternScale', [c_float]),
STDMETHOD(c_float, 'GetPatternScale'),
STDMETHOD(HRESULT, 'SetWidth', [c_float]),
STDMETHOD(c_float, 'GetWidth'),
STDMETHOD(HRESULT, 'SetAntialias', [BOOL]),
STDMETHOD(BOOL, 'GetAntialias'),
STDMETHOD(HRESULT, 'SetGLLines', [BOOL]),
STDMETHOD(BOOL, 'GetGLLines'),
STDMETHOD(HRESULT, 'End'),
STDMETHOD(HRESULT, 'OnLostDevice'),
STDMETHOD(HRESULT, 'OnResetDevice'),
]
#********************************************************************
# Interfaces (Effect)
#********************************************************************
class ID3DXEffectPool(IUnknown):
_iid_ = GUID("{9537AB04-3250-412e-8213-FCD2F8677933}")
_methods_ = [
]
class ID3DXBaseEffect(IUnknown):
_iid_ = GUID("{017C18AC-103F-4417-8C51-6BF6EF1E56BE}")
_methods_ = [
STDMETHOD(HRESULT, 'GetDesc', [POINTER(D3DXEFFECT_DESC)]),
STDMETHOD(HRESULT, 'GetParameterDesc', [D3DXHANDLE, POINTER(D3DXPARAMETER_DESC)]),
STDMETHOD(HRESULT, 'GetTechniqueDesc', [D3DXHANDLE, POINTER(D3DXTECHNIQUE_DESC)]),
STDMETHOD(HRESULT, 'GetPassDesc', [D3DXHANDLE, POINTER(D3DXPASS_DESC)]),
STDMETHOD(HRESULT, 'GetFunctionDesc', [D3DXHANDLE, POINTER(D3DXFUNCTION_DESC)]),
STDMETHOD(D3DXHANDLE, 'GetParameter', [D3DXHANDLE, UINT]),
STDMETHOD(D3DXHANDLE, 'GetParameterByName', [D3DXHANDLE, LPCSTR]),
STDMETHOD(D3DXHANDLE, 'GetParameterBySemantic', [D3DXHANDLE, LPCSTR]),
STDMETHOD(D3DXHANDLE, 'GetParameterElement', [D3DXHANDLE, UINT]),
STDMETHOD(D3DXHANDLE, 'GetTechnique', [UINT]),
STDMETHOD(D3DXHANDLE, 'GetTechniqueByName', [LPCSTR]),
STDMETHOD(D3DXHANDLE, 'GetPass', [D3DXHANDLE, UINT]),
STDMETHOD(D3DXHANDLE, 'GetPassByName', [D3DXHANDLE, LPCSTR]),
STDMETHOD(D3DXHANDLE, 'GetFunction', [UINT]),
STDMETHOD(D3DXHANDLE, 'GetFunctionByName', [LPCSTR]),
STDMETHOD(D3DXHANDLE, 'GetAnnotation', [D3DXHANDLE, UINT]),
STDMETHOD(D3DXHANDLE, 'GetAnnotationByName', [D3DXHANDLE, LPCSTR]),
STDMETHOD(HRESULT, 'SetValue', [D3DXHANDLE, POINTER(None), UINT]),
STDMETHOD(HRESULT, 'GetValue', [D3DXHANDLE, POINTER(None), UINT]),
STDMETHOD(HRESULT, 'SetBool', [D3DXHANDLE, BOOL]),
STDMETHOD(HRESULT, 'GetBool', [D3DXHANDLE, POINTER(BOOL)]),
STDMETHOD(HRESULT, 'SetBoolArray', [D3DXHANDLE, POINTER(BOOL), UINT]),
STDMETHOD(HRESULT, 'GetBoolArray', [D3DXHANDLE, POINTER(BOOL), UINT]),
STDMETHOD(HRESULT, 'SetInt', [D3DXHANDLE, INT]),
STDMETHOD(HRESULT, 'GetInt', [D3DXHANDLE, POINTER(INT)]),
STDMETHOD(HRESULT, 'SetIntArray', [D3DXHANDLE, POINTER(INT), UINT]),
STDMETHOD(HRESULT, 'GetIntArray', [D3DXHANDLE, POINTER(INT), UINT]),
STDMETHOD(HRESULT, 'SetFloat', [D3DXHANDLE, c_float]),
STDMETHOD(HRESULT, 'GetFloat', [D3DXHANDLE, POINTER(c_float)]),
STDMETHOD(HRESULT, 'SetFloatArray', [D3DXHANDLE, POINTER(c_float), UINT]),
STDMETHOD(HRESULT, 'GetFloatArray', [D3DXHANDLE, POINTER(c_float), UINT]),
STDMETHOD(HRESULT, 'SetVector', [D3DXHANDLE, POINTER(D3DXVECTOR4)]),
STDMETHOD(HRESULT, 'GetVector', [D3DXHANDLE, POINTER(D3DXVECTOR4)]),
STDMETHOD(HRESULT, 'SetVectorArray', [D3DXHANDLE, POINTER(D3DXVECTOR4), UINT]),
STDMETHOD(HRESULT, 'GetVectorArray', [D3DXHANDLE, POINTER(D3DXVECTOR4), UINT]),
STDMETHOD(HRESULT, 'SetMatrix', [D3DXHANDLE, POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'GetMatrix', [D3DXHANDLE, POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'SetMatrixArray', [D3DXHANDLE, POINTER(D3DXMATRIX), UINT]),
STDMETHOD(HRESULT, 'GetMatrixArray', [D3DXHANDLE, POINTER(D3DXMATRIX), UINT]),
STDMETHOD(HRESULT, 'SetMatrixPointerArray', [D3DXHANDLE, POINTER(POINTER(D3DXMATRIX)), UINT]),
STDMETHOD(HRESULT, 'GetMatrixPointerArray', [D3DXHANDLE, POINTER(POINTER(D3DXMATRIX)), UINT]),
STDMETHOD(HRESULT, 'SetMatrixTranspose', [D3DXHANDLE, POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'GetMatrixTranspose', [D3DXHANDLE, POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'SetMatrixTransposeArray', [D3DXHANDLE, POINTER(D3DXMATRIX), UINT]),
STDMETHOD(HRESULT, 'GetMatrixTransposeArray', [D3DXHANDLE, POINTER(D3DXMATRIX), UINT]),
STDMETHOD(HRESULT, 'SetMatrixTransposePointerArray', [D3DXHANDLE, POINTER(POINTER(D3DXMATRIX)), UINT]),
STDMETHOD(HRESULT, 'GetMatrixTransposePointerArray', [D3DXHANDLE, POINTER(POINTER(D3DXMATRIX)), UINT]),
STDMETHOD(HRESULT, 'SetString', [D3DXHANDLE, LPCSTR]),
STDMETHOD(HRESULT, 'GetString', [D3DXHANDLE, POINTER(LPCSTR)]),
STDMETHOD(HRESULT, 'SetTexture', [D3DXHANDLE, POINTER(IDirect3DBaseTexture9)]),
STDMETHOD(HRESULT, 'GetTexture', [D3DXHANDLE, POINTER(IDirect3DBaseTexture9)]),
STDMETHOD(HRESULT, 'GetPixelShader', [D3DXHANDLE, POINTER(IDirect3DPixelShader9)]),
STDMETHOD(HRESULT, 'GetVertexShader', [D3DXHANDLE, POINTER(IDirect3DVertexShader9)]),
STDMETHOD(HRESULT, 'SetArrayRange', [D3DXHANDLE, UINT, UINT]),
]
class ID3DXEffectStateManager(IUnknown):
_iid_ = GUID("{79AAB587-6DBC-4fa7-82DE-37FA1781C5CE}")
_methods_ = [
STDMETHOD(HRESULT, 'SetTransform', [DWORD, POINTER(D3DMATRIX)]),
STDMETHOD(HRESULT, 'SetMaterial', [D3DMATERIAL9]),
STDMETHOD(HRESULT, 'SetLight', [DWORD, D3DLIGHT9]),
STDMETHOD(HRESULT, 'LightEnable', [DWORD, BOOL]),
STDMETHOD(HRESULT, 'SetRenderState', [DWORD, DWORD]),
STDMETHOD(HRESULT, 'SetTexture', [DWORD, POINTER(IDirect3DBaseTexture9)]),
STDMETHOD(HRESULT, 'SetTextureStageState', [DWORD, DWORD, DWORD]),
STDMETHOD(HRESULT, 'SetSamplerState', [DWORD, DWORD, DWORD]),
STDMETHOD(HRESULT, 'SetNPatchMode', [c_float]),
STDMETHOD(HRESULT, 'SetFVF', [DWORD]),
STDMETHOD(HRESULT, 'SetVertexShader', [POINTER(IDirect3DVertexShader9)]),
STDMETHOD(HRESULT, 'SetVertexShaderConstantF', [UINT, c_float, UINT]),
STDMETHOD(HRESULT, 'SetVertexShaderConstantI', [UINT, INT, UINT]),
STDMETHOD(HRESULT, 'SetVertexShaderConstantB', [UINT, BOOL, UINT]),
STDMETHOD(HRESULT, 'SetPixelShader', [POINTER(IDirect3DPixelShader9)]),
STDMETHOD(HRESULT, 'SetPixelShaderConstantF', [UINT, c_float, UINT]),
STDMETHOD(HRESULT, 'SetPixelShaderConstantI', [UINT, INT, UINT]),
STDMETHOD(HRESULT, 'SetPixelShaderConstantB', [UINT, BOOL, UINT]),
]
class ID3DXEffect(ID3DXBaseEffect):
_iid_ = GUID("{F6CEB4B3-4E4C-40dd-B883-8D8DE5EA0CD5}")
_methods_ = [
STDMETHOD(HRESULT, 'GetPool', [POINTER(ID3DXEffectPool)]),
STDMETHOD(HRESULT, 'SetTechnique', [D3DXHANDLE]),
STDMETHOD(D3DXHANDLE, 'GetCurrentTechnique'),
STDMETHOD(HRESULT, 'ValidateTechnique', [D3DXHANDLE]),
STDMETHOD(HRESULT, 'FindNextValidTechnique', [D3DXHANDLE, D3DXHANDLE]),
STDMETHOD(BOOL, 'IsParameterUsed', [D3DXHANDLE, D3DXHANDLE]),
STDMETHOD(HRESULT, 'Begin', [POINTER(UINT), DWORD]),
STDMETHOD(HRESULT, 'BeginPass', [UINT]),
STDMETHOD(HRESULT, 'CommitChanges'),
STDMETHOD(HRESULT, 'EndPass'),
STDMETHOD(HRESULT, 'End'),
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'OnLostDevice'),
STDMETHOD(HRESULT, 'OnResetDevice'),
STDMETHOD(HRESULT, 'SetStateManager', [POINTER(ID3DXEffectStateManager)]),
STDMETHOD(HRESULT, 'GetStateManager', [POINTER(POINTER(ID3DXEffectStateManager))]),
STDMETHOD(HRESULT, 'BeginParameterBlock'),
STDMETHOD(D3DXHANDLE, 'EndParameterBlock'),
STDMETHOD(HRESULT, 'ApplyParameterBlock', [D3DXHANDLE]),
STDMETHOD(HRESULT, 'DeleteParameterBlock', [D3DXHANDLE]),
STDMETHOD(HRESULT, 'CloneEffect', [POINTER(IDirect3DDevice9), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'SetRawValue', [D3DXHANDLE, POINTER(None), UINT, UINT]),
]
class ID3DXEffectCompiler(ID3DXBaseEffect):
_iid_ = GUID("{51B8A949-1A31-47e6-BEA0-4B30DB53F1E0}")
_methods_ = [
STDMETHOD(HRESULT, 'SetLiteral', [D3DXHANDLE, BOOL]),
STDMETHOD(HRESULT, 'GetLiteral', [D3DXHANDLE, BOOL]),
STDMETHOD(HRESULT, 'CompileEffect'),
STDMETHOD(HRESULT, 'CompileShader'),
]
#********************************************************************
# Interfaces (Mesh)
#********************************************************************
class ID3DXBaseMesh(IUnknown):
_iid_ = GUID("{7ED943DD-52E8-40b5-A8D8-76685C406330}")
_methods_ = [
STDMETHOD(HRESULT, 'DrawSubset', [DWORD]),
STDMETHOD(DWORD, 'GetNumFaces'),
STDMETHOD(DWORD, 'GetNumVertices'),
STDMETHOD(DWORD, 'GetFVF'),
STDMETHOD(HRESULT, 'GetDeclaration', [POINTER(D3DVERTEXELEMENT9)]),
STDMETHOD(DWORD, 'GetNumBytesPerVertex'),
STDMETHOD(DWORD, 'GetOptions'),
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'CloneMeshFVF', [DWORD, DWORD, POINTER(IDirect3DDevice9), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'CloneMesh', [DWORD, POINTER(D3DVERTEXELEMENT9), POINTER(IDirect3DDevice9), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'GetVertexBuffer', [POINTER(POINTER(IDirect3DVertexBuffer9))]),
STDMETHOD(HRESULT, 'GetIndexBuffer', [POINTER(POINTER(IDirect3DIndexBuffer9))]),
STDMETHOD(HRESULT, 'LockVertexBuffer', [DWORD, POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'UnlockVertexBuffer'),
STDMETHOD(HRESULT, 'LockIndexBuffer', [DWORD, POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'UnlockIndexBuffer'),
STDMETHOD(HRESULT, 'GetAttributeTable', [POINTER(D3DXATTRIBUTERANGE), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'ConvertPointRepsToAdjacency', [POINTER(DWORD), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'ConvertAdjacencyToPointReps', [POINTER(DWORD), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'GenerateAdjacency', [c_float, POINTER(DWORD)]),
STDMETHOD(HRESULT, 'UpdateSemantics', [POINTER(D3DVERTEXELEMENT9)]),
]
class ID3DXMesh(ID3DXBaseMesh):
_iid_ = GUID("{4020E5C2-1403-4929-883F-E2E849FAC195}")
_methods_ = [
STDMETHOD(HRESULT, 'LockAttributeBuffer', [DWORD, POINTER(POINTER(DWORD))]),
STDMETHOD(HRESULT, 'UnlockAttributeBuffer'),
STDMETHOD(HRESULT, 'Optimize', [DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXBuffer)), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'OptimizeInplace', [DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXBuffer))]),
STDMETHOD(HRESULT, 'SetAttributeTable', [POINTER(D3DXATTRIBUTERANGE), DWORD]),
]
class ID3DXPMesh(ID3DXBaseMesh):
_iid_ = GUID("{8875769A-D579-4088-AAEB-534D1AD84E96}")
_methods_ = [
STDMETHOD(HRESULT, 'ClonePMeshFVF', [DWORD, DWORD, POINTER(IDirect3DDevice9), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'ClonePMesh', [DWORD, POINTER(D3DVERTEXELEMENT9), POINTER(IDirect3DDevice9), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'SetNumFaces', [DWORD]),
STDMETHOD(HRESULT, 'SetNumVertices', [DWORD]),
STDMETHOD(DWORD, 'GetMaxFaces'),
STDMETHOD(DWORD, 'GetMinFaces'),
STDMETHOD(DWORD, 'GetMaxVertices'),
STDMETHOD(DWORD, 'GetMinVertices'),
STDMETHOD(HRESULT, 'Save', [POINTER(None), POINTER(D3DXMATERIAL), POINTER(None), DWORD]),
STDMETHOD(HRESULT, 'Optimize', [DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXBuffer)), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'OptimizeBaseLOD', [DWORD, POINTER(DWORD)]),
STDMETHOD(HRESULT, 'TrimByFaces', [DWORD, DWORD, POINTER(DWORD), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'TrimByVertices', [DWORD, DWORD, POINTER(DWORD), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'GetAdjacency', [POINTER(DWORD)]),
STDMETHOD(HRESULT, 'GenerateVertexHistory', [POINTER(DWORD)]),
]
class ID3DXSPMesh(IUnknown):
_iid_ = GUID("{667EA4C7-F1CD-4386-B523-7C0290B83CC5}")
_methods_ = [
STDMETHOD(DWORD, 'GetNumFaces'),
STDMETHOD(DWORD, 'GetNumVertices'),
STDMETHOD(DWORD, 'GetFVF'),
STDMETHOD(HRESULT, 'GetDeclaration', [POINTER(D3DVERTEXELEMENT9)]),
STDMETHOD(DWORD, 'GetOptions'),
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'CloneMeshFVF', [DWORD, DWORD, POINTER(IDirect3DDevice9), POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXMesh))]),
STDMETHOD(HRESULT, 'CloneMesh', [DWORD, POINTER(D3DVERTEXELEMENT9), POINTER(IDirect3DDevice9), POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXMesh))]),
STDMETHOD(HRESULT, 'ClonePMeshFVF', [DWORD, DWORD, POINTER(D3DVERTEXELEMENT9), POINTER(DWORD), POINTER(c_float), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'ClonePMesh', [DWORD, POINTER(D3DVERTEXELEMENT9), POINTER(IDirect3DDevice9), POINTER(DWORD), POINTER(c_float), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'ReduceFaces', [DWORD]),
STDMETHOD(HRESULT, 'ReduceVertices', [DWORD]),
STDMETHOD(DWORD, 'GetMaxFaces'),
STDMETHOD(DWORD, 'GetMaxVertices'),
STDMETHOD(HRESULT, 'GetVertexAttributeWeights', [POINTER(D3DXATTRIBUTEWEIGHTS)]),
STDMETHOD(HRESULT, 'GetVertexWeights', [c_float]),
]
class ID3DXSkinInfo(IUnknown):
_iid_ = GUID("{11EAA540-F9A6-4d49-AE6A-E19221F70CC4}")
_methods_ = [
STDMETHOD(HRESULT, 'SetBoneInfluence', [DWORD, DWORD, POINTER(DWORD), POINTER(c_float)]),
STDMETHOD(HRESULT, 'SetBoneVertexInfluence', [DWORD, DWORD, c_float]),
STDMETHOD(DWORD, 'GetNumBoneInfluences', [DWORD]),
STDMETHOD(HRESULT, 'GetBoneInfluence', [DWORD, POINTER(DWORD), POINTER(c_float)]),
STDMETHOD(HRESULT, 'GetBoneVertexInfluence', [DWORD, DWORD, POINTER(c_float), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'GetMaxVertexInfluences', [POINTER(DWORD)]),
STDMETHOD(DWORD, 'GetNumBones'),
STDMETHOD(HRESULT, 'FindBoneVertexInfluenceIndex', [DWORD, DWORD, POINTER(DWORD)]),
STDMETHOD(HRESULT, 'GetMaxFaceInfluences', [POINTER(IDirect3DIndexBuffer9), DWORD, POINTER(DWORD)]),
STDMETHOD(HRESULT, 'SetMinBoneInfluence', [c_float]),
STDMETHOD(c_float, 'GetMinBoneInfluence'),
STDMETHOD(HRESULT, 'SetBoneName', [DWORD, LPCSTR]),
STDMETHOD(LPCSTR, 'GetBoneName', [DWORD]),
STDMETHOD(HRESULT, 'SetBoneOffsetMatrix', [DWORD, POINTER(D3DXMATRIX)]),
STDMETHOD(POINTER(D3DXMATRIX), 'GetBoneOffsetMatrix', [DWORD]),
STDMETHOD(HRESULT, 'Clone', [POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'Remap', [DWORD, POINTER(DWORD)]),
STDMETHOD(HRESULT, 'SetFVF', [DWORD]),
STDMETHOD(HRESULT, 'SetDeclaration', [POINTER(D3DVERTEXELEMENT9)]),
STDMETHOD(DWORD, 'GetFVF'),
STDMETHOD(HRESULT, 'GetDeclaration', [POINTER(D3DVERTEXELEMENT9)]),
STDMETHOD(HRESULT, 'UpdateSkinnedMesh', [POINTER(D3DXMATRIX), POINTER(D3DXMATRIX), POINTER(None), POINTER(None)]),
STDMETHOD(HRESULT, 'ConvertToBlendedMesh', [POINTER(ID3DXMesh), DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXBuffer)), POINTER(DWORD), POINTER(DWORD),
POINTER(POINTER(ID3DXBuffer)), POINTER(POINTER(ID3DXMesh))]),
STDMETHOD(HRESULT, 'ConvertToIndexedBlendedMesh', [POINTER(ID3DXMesh), DWORD, DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXBuffer)),
POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXBuffer)), POINTER(POINTER(ID3DXMesh))]),
]
class ID3DXPatchMesh(IUnknown):
_iid_ = GUID("{3CE6CC22-DBF2-44f4-894D-F9C34A337139}")
_methods_ = [
STDMETHOD(DWORD, 'GetNumPatches'),
STDMETHOD(DWORD, 'GetNumVertices'),
STDMETHOD(HRESULT, 'GetDeclaration', [POINTER(D3DVERTEXELEMENT9)]),
STDMETHOD(DWORD, 'GetControlVerticesPerPatch'),
STDMETHOD(DWORD, 'GetOptions'),
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'GetPatchInfo', [POINTER(D3DXPATCHINFO)]),
STDMETHOD(HRESULT, 'GetVertexBuffer', [POINTER(POINTER(IDirect3DVertexBuffer9))]),
STDMETHOD(HRESULT, 'GetIndexBuffer', [POINTER(POINTER(IDirect3DIndexBuffer9))]),
STDMETHOD(HRESULT, 'LockVertexBuffer', [DWORD, POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'UnlockVertexBuffer'),
STDMETHOD(HRESULT, 'LockIndexBuffer', [DWORD, POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'UnlockIndexBuffer'),
STDMETHOD(HRESULT, 'LockAttributeBuffer', [DWORD, POINTER(POINTER(DWORD))]),
STDMETHOD(HRESULT, 'UnlockAttributeBuffer'),
STDMETHOD(HRESULT, 'GetTessSize', [c_float, DWORD, POINTER(DWORD), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'GenerateAdjacency', [c_float]),
STDMETHOD(HRESULT, 'CloneMesh', [DWORD, POINTER(D3DVERTEXELEMENT9), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'Optimize', [DWORD]),
STDMETHOD(HRESULT, 'SetDisplaceParam', [POINTER(IDirect3DBaseTexture9), DWORD, DWORD, DWORD, DWORD, DWORD]),
STDMETHOD(HRESULT, 'GetDisplaceParam', [POINTER(POINTER(IDirect3DBaseTexture9)), POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'Tessellate', [c_float, POINTER(ID3DXMesh)]),
STDMETHOD(HRESULT, 'TessellateAdaptive', [POINTER(D3DXVECTOR4), DWORD, DWORD, POINTER(ID3DXMesh)]),
]
class ID3DXConstantTable(IUnknown):
_iid_ = GUID("{AB3C758F-093E-4356-B762-4DB18F1B3A01}")
_methods_ = [
STDMETHOD(c_void_p, 'GetBufferPointer'),
STDMETHOD(DWORD, 'GetBufferSize'),
STDMETHOD(HRESULT, 'GetDesc'),
STDMETHOD(HRESULT, 'GetConstantDesc'),
STDMETHOD(UINT, 'GetSamplerIndex',[D3DXHANDLE]),
STDMETHOD(D3DXHANDLE, 'GetConstant',[D3DXHANDLE,UINT]),
STDMETHOD(D3DXHANDLE, 'GetConstantByName',[D3DXHANDLE,LPCSTR]),
STDMETHOD(D3DXHANDLE, 'GetConstantElement',[D3DXHANDLE,UINT]),
STDMETHOD(HRESULT,'SetDefaults',[POINTER(IDirect3DDevice9)]),
STDMETHOD(HRESULT,'SetValue',[POINTER(IDirect3DDevice9),D3DXHANDLE,c_void_p, UINT]),
STDMETHOD(HRESULT,'SetBool',[POINTER(IDirect3DDevice9), D3DXHANDLE , BOOL]),
STDMETHOD(HRESULT,'SetBoolArray',[POINTER(IDirect3DDevice9),D3DXHANDLE, POINTER(BOOL), UINT]),
STDMETHOD(HRESULT,'SetInt',[POINTER(IDirect3DDevice9),D3DXHANDLE , INT]),
STDMETHOD(HRESULT,'SetIntArray',[POINTER(IDirect3DDevice9),D3DXHANDLE , POINTER(INT), UINT]),
STDMETHOD(HRESULT,'SetFloat',[POINTER(IDirect3DDevice9),D3DXHANDLE , FLOAT]),
STDMETHOD(HRESULT,'SetFloatArray',[POINTER(IDirect3DDevice9),D3DXHANDLE , POINTER(FLOAT), UINT]),
STDMETHOD(HRESULT,'SetVector',[POINTER(IDirect3DDevice9),D3DXHANDLE , POINTER(D3DXVECTOR4)]),
] | directx/d3dx.py | from directx.types import *
from directx.d3d import *
#********************************************************************
# Typedefs and constants
#********************************************************************
try:
#SDK April 2006 - you can change the
#.dll to a another one if you know what you are doing.
if D3DDEBUGENABLED:
print "Debugging enabled, attempting to load the debug D3DX .dll"
d3dxdll = windll.d3dx9d_43
else:
d3dxdll = windll.d3dx9_43
except:
print """
*****************************************************
You don't seem to have the D3D end-user runtime installed.
Visit Microsoft's DirectX web site for latest downloads.
*****************************************************
"""
raise
D3DX_VERSION = 0x0902
D3DX_SDK_VERSION = 43
D3DXMATRIX = D3DMATRIX
D3DXVECTOR3 = D3DVECTOR
D3DXHANDLE = LPCSTR
#********************************************************************
# Functions
#********************************************************************
def TestHR(hr):
if hr < 0:
#Todo: create a wrapper for Dxerr.lib (normal HR lookups
#can be wrong and thus confusing).
raise WinError("Unknown error (not yet implemented)", -1)
else:
return hr
#********************************************************************
# Interfaces
#********************************************************************
class ID3DXBuffer(IUnknown):
_iid_ = GUID("{8BA5FB08-5195-40e2-AC58-0D989C3A0102}")
_methods_ = [
STDMETHOD(POINTER(None), 'GetBufferPointer'),
STDMETHOD(DWORD, 'GetBufferSize'),
]
class ID3DXSprite(IUnknown):
_iid_ = GUID("{BA0B762D-7D28-43ec-B9DC-2F84443B0614}")
_methods_ = [
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'GetTransform', [POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'SetTransform', [POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'SetWorldViewRH', [POINTER(D3DXMATRIX), POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'SetWorldViewLH', [POINTER(D3DXMATRIX), POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'Begin', [DWORD]),
STDMETHOD(HRESULT, 'Draw', [POINTER(IDirect3DTexture9), POINTER(RECT), POINTER(D3DXVECTOR3), POINTER(D3DXVECTOR3), DWORD]),
STDMETHOD(HRESULT, 'Flush'),
STDMETHOD(HRESULT, 'End'),
STDMETHOD(HRESULT, 'OnLostDevice'),
STDMETHOD(HRESULT, 'OnResetDevice'),
]
class ID3DXFont(IUnknown):
_iid_ = GUID("{D79DBB70-5F21-4d36-BBC2-FF525C213CDC}")
_methods_ = [
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'GetDescAXXX', [POINTER(None)]), #Todo
STDMETHOD(HRESULT, 'GetDescW', [POINTER(D3DXFONT_DESCW)]),
STDMETHOD(BOOL, 'GetTextMetricsAXXX', [POINTER(None)]), #Todo
STDMETHOD(BOOL, 'GetTextMetricsW', [POINTER(TEXTMETRICW)]),
STDMETHOD(HDC, 'GetDC'),
STDMETHOD(HRESULT, 'GetGlyphData', [UINT, POINTER(IDirect3DTexture9), POINTER(RECT), POINTER(POINT)]),
STDMETHOD(HRESULT, 'PreloadCharacters', [UINT, UINT]),
STDMETHOD(HRESULT, 'PreloadGlyphs', [UINT, UINT]),
STDMETHOD(HRESULT, 'PreloadTextA', [LPCSTR, INT]),
STDMETHOD(HRESULT, 'PreloadTextW', [LPCWSTR, INT]),
STDMETHOD(INT, 'DrawTextA', [POINTER(ID3DXSprite), LPCSTR, INT, POINTER(RECT), DWORD, DWORD]),
STDMETHOD(INT, 'DrawTextW', [POINTER(ID3DXSprite), LPCWSTR, INT, POINTER(RECT), DWORD, DWORD]),
STDMETHOD(HRESULT, 'OnLostDevice'),
STDMETHOD(HRESULT, 'OnResetDevice'),
]
class ID3DXLine(IUnknown):
_iid_ = GUID("{D379BA7F-9042-4ac4-9F5E-58192A4C6BD8}")
_methods_ = [
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'Begin'),
STDMETHOD(HRESULT, 'Draw', [POINTER(D3DXVECTOR2), DWORD, DWORD]),
STDMETHOD(HRESULT, 'DrawTransform', [POINTER(D3DXVECTOR3), DWORD, POINTER(D3DMATRIX), DWORD]),
STDMETHOD(HRESULT, 'SetPattern', [DWORD]),
STDMETHOD(DWORD, 'GetPattern'),
STDMETHOD(HRESULT, 'SetPatternScale', [c_float]),
STDMETHOD(c_float, 'GetPatternScale'),
STDMETHOD(HRESULT, 'SetWidth', [c_float]),
STDMETHOD(c_float, 'GetWidth'),
STDMETHOD(HRESULT, 'SetAntialias', [BOOL]),
STDMETHOD(BOOL, 'GetAntialias'),
STDMETHOD(HRESULT, 'SetGLLines', [BOOL]),
STDMETHOD(BOOL, 'GetGLLines'),
STDMETHOD(HRESULT, 'End'),
STDMETHOD(HRESULT, 'OnLostDevice'),
STDMETHOD(HRESULT, 'OnResetDevice'),
]
#********************************************************************
# Interfaces (Effect)
#********************************************************************
class ID3DXEffectPool(IUnknown):
_iid_ = GUID("{9537AB04-3250-412e-8213-FCD2F8677933}")
_methods_ = [
]
class ID3DXBaseEffect(IUnknown):
_iid_ = GUID("{017C18AC-103F-4417-8C51-6BF6EF1E56BE}")
_methods_ = [
STDMETHOD(HRESULT, 'GetDesc', [POINTER(D3DXEFFECT_DESC)]),
STDMETHOD(HRESULT, 'GetParameterDesc', [D3DXHANDLE, POINTER(D3DXPARAMETER_DESC)]),
STDMETHOD(HRESULT, 'GetTechniqueDesc', [D3DXHANDLE, POINTER(D3DXTECHNIQUE_DESC)]),
STDMETHOD(HRESULT, 'GetPassDesc', [D3DXHANDLE, POINTER(D3DXPASS_DESC)]),
STDMETHOD(HRESULT, 'GetFunctionDesc', [D3DXHANDLE, POINTER(D3DXFUNCTION_DESC)]),
STDMETHOD(D3DXHANDLE, 'GetParameter', [D3DXHANDLE, UINT]),
STDMETHOD(D3DXHANDLE, 'GetParameterByName', [D3DXHANDLE, LPCSTR]),
STDMETHOD(D3DXHANDLE, 'GetParameterBySemantic', [D3DXHANDLE, LPCSTR]),
STDMETHOD(D3DXHANDLE, 'GetParameterElement', [D3DXHANDLE, UINT]),
STDMETHOD(D3DXHANDLE, 'GetTechnique', [UINT]),
STDMETHOD(D3DXHANDLE, 'GetTechniqueByName', [LPCSTR]),
STDMETHOD(D3DXHANDLE, 'GetPass', [D3DXHANDLE, UINT]),
STDMETHOD(D3DXHANDLE, 'GetPassByName', [D3DXHANDLE, LPCSTR]),
STDMETHOD(D3DXHANDLE, 'GetFunction', [UINT]),
STDMETHOD(D3DXHANDLE, 'GetFunctionByName', [LPCSTR]),
STDMETHOD(D3DXHANDLE, 'GetAnnotation', [D3DXHANDLE, UINT]),
STDMETHOD(D3DXHANDLE, 'GetAnnotationByName', [D3DXHANDLE, LPCSTR]),
STDMETHOD(HRESULT, 'SetValue', [D3DXHANDLE, POINTER(None), UINT]),
STDMETHOD(HRESULT, 'GetValue', [D3DXHANDLE, POINTER(None), UINT]),
STDMETHOD(HRESULT, 'SetBool', [D3DXHANDLE, BOOL]),
STDMETHOD(HRESULT, 'GetBool', [D3DXHANDLE, POINTER(BOOL)]),
STDMETHOD(HRESULT, 'SetBoolArray', [D3DXHANDLE, POINTER(BOOL), UINT]),
STDMETHOD(HRESULT, 'GetBoolArray', [D3DXHANDLE, POINTER(BOOL), UINT]),
STDMETHOD(HRESULT, 'SetInt', [D3DXHANDLE, INT]),
STDMETHOD(HRESULT, 'GetInt', [D3DXHANDLE, POINTER(INT)]),
STDMETHOD(HRESULT, 'SetIntArray', [D3DXHANDLE, POINTER(INT), UINT]),
STDMETHOD(HRESULT, 'GetIntArray', [D3DXHANDLE, POINTER(INT), UINT]),
STDMETHOD(HRESULT, 'SetFloat', [D3DXHANDLE, c_float]),
STDMETHOD(HRESULT, 'GetFloat', [D3DXHANDLE, POINTER(c_float)]),
STDMETHOD(HRESULT, 'SetFloatArray', [D3DXHANDLE, POINTER(c_float), UINT]),
STDMETHOD(HRESULT, 'GetFloatArray', [D3DXHANDLE, POINTER(c_float), UINT]),
STDMETHOD(HRESULT, 'SetVector', [D3DXHANDLE, POINTER(D3DXVECTOR4)]),
STDMETHOD(HRESULT, 'GetVector', [D3DXHANDLE, POINTER(D3DXVECTOR4)]),
STDMETHOD(HRESULT, 'SetVectorArray', [D3DXHANDLE, POINTER(D3DXVECTOR4), UINT]),
STDMETHOD(HRESULT, 'GetVectorArray', [D3DXHANDLE, POINTER(D3DXVECTOR4), UINT]),
STDMETHOD(HRESULT, 'SetMatrix', [D3DXHANDLE, POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'GetMatrix', [D3DXHANDLE, POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'SetMatrixArray', [D3DXHANDLE, POINTER(D3DXMATRIX), UINT]),
STDMETHOD(HRESULT, 'GetMatrixArray', [D3DXHANDLE, POINTER(D3DXMATRIX), UINT]),
STDMETHOD(HRESULT, 'SetMatrixPointerArray', [D3DXHANDLE, POINTER(POINTER(D3DXMATRIX)), UINT]),
STDMETHOD(HRESULT, 'GetMatrixPointerArray', [D3DXHANDLE, POINTER(POINTER(D3DXMATRIX)), UINT]),
STDMETHOD(HRESULT, 'SetMatrixTranspose', [D3DXHANDLE, POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'GetMatrixTranspose', [D3DXHANDLE, POINTER(D3DXMATRIX)]),
STDMETHOD(HRESULT, 'SetMatrixTransposeArray', [D3DXHANDLE, POINTER(D3DXMATRIX), UINT]),
STDMETHOD(HRESULT, 'GetMatrixTransposeArray', [D3DXHANDLE, POINTER(D3DXMATRIX), UINT]),
STDMETHOD(HRESULT, 'SetMatrixTransposePointerArray', [D3DXHANDLE, POINTER(POINTER(D3DXMATRIX)), UINT]),
STDMETHOD(HRESULT, 'GetMatrixTransposePointerArray', [D3DXHANDLE, POINTER(POINTER(D3DXMATRIX)), UINT]),
STDMETHOD(HRESULT, 'SetString', [D3DXHANDLE, LPCSTR]),
STDMETHOD(HRESULT, 'GetString', [D3DXHANDLE, POINTER(LPCSTR)]),
STDMETHOD(HRESULT, 'SetTexture', [D3DXHANDLE, POINTER(IDirect3DBaseTexture9)]),
STDMETHOD(HRESULT, 'GetTexture', [D3DXHANDLE, POINTER(IDirect3DBaseTexture9)]),
STDMETHOD(HRESULT, 'GetPixelShader', [D3DXHANDLE, POINTER(IDirect3DPixelShader9)]),
STDMETHOD(HRESULT, 'GetVertexShader', [D3DXHANDLE, POINTER(IDirect3DVertexShader9)]),
STDMETHOD(HRESULT, 'SetArrayRange', [D3DXHANDLE, UINT, UINT]),
]
class ID3DXEffectStateManager(IUnknown):
_iid_ = GUID("{79AAB587-6DBC-4fa7-82DE-37FA1781C5CE}")
_methods_ = [
STDMETHOD(HRESULT, 'SetTransform', [DWORD, POINTER(D3DMATRIX)]),
STDMETHOD(HRESULT, 'SetMaterial', [D3DMATERIAL9]),
STDMETHOD(HRESULT, 'SetLight', [DWORD, D3DLIGHT9]),
STDMETHOD(HRESULT, 'LightEnable', [DWORD, BOOL]),
STDMETHOD(HRESULT, 'SetRenderState', [DWORD, DWORD]),
STDMETHOD(HRESULT, 'SetTexture', [DWORD, POINTER(IDirect3DBaseTexture9)]),
STDMETHOD(HRESULT, 'SetTextureStageState', [DWORD, DWORD, DWORD]),
STDMETHOD(HRESULT, 'SetSamplerState', [DWORD, DWORD, DWORD]),
STDMETHOD(HRESULT, 'SetNPatchMode', [c_float]),
STDMETHOD(HRESULT, 'SetFVF', [DWORD]),
STDMETHOD(HRESULT, 'SetVertexShader', [POINTER(IDirect3DVertexShader9)]),
STDMETHOD(HRESULT, 'SetVertexShaderConstantF', [UINT, c_float, UINT]),
STDMETHOD(HRESULT, 'SetVertexShaderConstantI', [UINT, INT, UINT]),
STDMETHOD(HRESULT, 'SetVertexShaderConstantB', [UINT, BOOL, UINT]),
STDMETHOD(HRESULT, 'SetPixelShader', [POINTER(IDirect3DPixelShader9)]),
STDMETHOD(HRESULT, 'SetPixelShaderConstantF', [UINT, c_float, UINT]),
STDMETHOD(HRESULT, 'SetPixelShaderConstantI', [UINT, INT, UINT]),
STDMETHOD(HRESULT, 'SetPixelShaderConstantB', [UINT, BOOL, UINT]),
]
class ID3DXEffect(ID3DXBaseEffect):
_iid_ = GUID("{F6CEB4B3-4E4C-40dd-B883-8D8DE5EA0CD5}")
_methods_ = [
STDMETHOD(HRESULT, 'GetPool', [POINTER(ID3DXEffectPool)]),
STDMETHOD(HRESULT, 'SetTechnique', [D3DXHANDLE]),
STDMETHOD(D3DXHANDLE, 'GetCurrentTechnique'),
STDMETHOD(HRESULT, 'ValidateTechnique', [D3DXHANDLE]),
STDMETHOD(HRESULT, 'FindNextValidTechnique', [D3DXHANDLE, D3DXHANDLE]),
STDMETHOD(BOOL, 'IsParameterUsed', [D3DXHANDLE, D3DXHANDLE]),
STDMETHOD(HRESULT, 'Begin', [POINTER(UINT), DWORD]),
STDMETHOD(HRESULT, 'BeginPass', [UINT]),
STDMETHOD(HRESULT, 'CommitChanges'),
STDMETHOD(HRESULT, 'EndPass'),
STDMETHOD(HRESULT, 'End'),
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'OnLostDevice'),
STDMETHOD(HRESULT, 'OnResetDevice'),
STDMETHOD(HRESULT, 'SetStateManager', [POINTER(ID3DXEffectStateManager)]),
STDMETHOD(HRESULT, 'GetStateManager', [POINTER(POINTER(ID3DXEffectStateManager))]),
STDMETHOD(HRESULT, 'BeginParameterBlock'),
STDMETHOD(D3DXHANDLE, 'EndParameterBlock'),
STDMETHOD(HRESULT, 'ApplyParameterBlock', [D3DXHANDLE]),
STDMETHOD(HRESULT, 'DeleteParameterBlock', [D3DXHANDLE]),
STDMETHOD(HRESULT, 'CloneEffect', [POINTER(IDirect3DDevice9), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'SetRawValue', [D3DXHANDLE, POINTER(None), UINT, UINT]),
]
class ID3DXEffectCompiler(ID3DXBaseEffect):
_iid_ = GUID("{51B8A949-1A31-47e6-BEA0-4B30DB53F1E0}")
_methods_ = [
STDMETHOD(HRESULT, 'SetLiteral', [D3DXHANDLE, BOOL]),
STDMETHOD(HRESULT, 'GetLiteral', [D3DXHANDLE, BOOL]),
STDMETHOD(HRESULT, 'CompileEffect'),
STDMETHOD(HRESULT, 'CompileShader'),
]
#********************************************************************
# Interfaces (Mesh)
#********************************************************************
class ID3DXBaseMesh(IUnknown):
_iid_ = GUID("{7ED943DD-52E8-40b5-A8D8-76685C406330}")
_methods_ = [
STDMETHOD(HRESULT, 'DrawSubset', [DWORD]),
STDMETHOD(DWORD, 'GetNumFaces'),
STDMETHOD(DWORD, 'GetNumVertices'),
STDMETHOD(DWORD, 'GetFVF'),
STDMETHOD(HRESULT, 'GetDeclaration', [POINTER(D3DVERTEXELEMENT9)]),
STDMETHOD(DWORD, 'GetNumBytesPerVertex'),
STDMETHOD(DWORD, 'GetOptions'),
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'CloneMeshFVF', [DWORD, DWORD, POINTER(IDirect3DDevice9), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'CloneMesh', [DWORD, POINTER(D3DVERTEXELEMENT9), POINTER(IDirect3DDevice9), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'GetVertexBuffer', [POINTER(POINTER(IDirect3DVertexBuffer9))]),
STDMETHOD(HRESULT, 'GetIndexBuffer', [POINTER(POINTER(IDirect3DIndexBuffer9))]),
STDMETHOD(HRESULT, 'LockVertexBuffer', [DWORD, POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'UnlockVertexBuffer'),
STDMETHOD(HRESULT, 'LockIndexBuffer', [DWORD, POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'UnlockIndexBuffer'),
STDMETHOD(HRESULT, 'GetAttributeTable', [POINTER(D3DXATTRIBUTERANGE), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'ConvertPointRepsToAdjacency', [POINTER(DWORD), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'ConvertAdjacencyToPointReps', [POINTER(DWORD), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'GenerateAdjacency', [c_float, POINTER(DWORD)]),
STDMETHOD(HRESULT, 'UpdateSemantics', [POINTER(D3DVERTEXELEMENT9)]),
]
class ID3DXMesh(ID3DXBaseMesh):
_iid_ = GUID("{4020E5C2-1403-4929-883F-E2E849FAC195}")
_methods_ = [
STDMETHOD(HRESULT, 'LockAttributeBuffer', [DWORD, POINTER(POINTER(DWORD))]),
STDMETHOD(HRESULT, 'UnlockAttributeBuffer'),
STDMETHOD(HRESULT, 'Optimize', [DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXBuffer)), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'OptimizeInplace', [DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXBuffer))]),
STDMETHOD(HRESULT, 'SetAttributeTable', [POINTER(D3DXATTRIBUTERANGE), DWORD]),
]
class ID3DXPMesh(ID3DXBaseMesh):
_iid_ = GUID("{8875769A-D579-4088-AAEB-534D1AD84E96}")
_methods_ = [
STDMETHOD(HRESULT, 'ClonePMeshFVF', [DWORD, DWORD, POINTER(IDirect3DDevice9), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'ClonePMesh', [DWORD, POINTER(D3DVERTEXELEMENT9), POINTER(IDirect3DDevice9), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'SetNumFaces', [DWORD]),
STDMETHOD(HRESULT, 'SetNumVertices', [DWORD]),
STDMETHOD(DWORD, 'GetMaxFaces'),
STDMETHOD(DWORD, 'GetMinFaces'),
STDMETHOD(DWORD, 'GetMaxVertices'),
STDMETHOD(DWORD, 'GetMinVertices'),
STDMETHOD(HRESULT, 'Save', [POINTER(None), POINTER(D3DXMATERIAL), POINTER(None), DWORD]),
STDMETHOD(HRESULT, 'Optimize', [DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXBuffer)), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'OptimizeBaseLOD', [DWORD, POINTER(DWORD)]),
STDMETHOD(HRESULT, 'TrimByFaces', [DWORD, DWORD, POINTER(DWORD), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'TrimByVertices', [DWORD, DWORD, POINTER(DWORD), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'GetAdjacency', [POINTER(DWORD)]),
STDMETHOD(HRESULT, 'GenerateVertexHistory', [POINTER(DWORD)]),
]
class ID3DXSPMesh(IUnknown):
_iid_ = GUID("{667EA4C7-F1CD-4386-B523-7C0290B83CC5}")
_methods_ = [
STDMETHOD(DWORD, 'GetNumFaces'),
STDMETHOD(DWORD, 'GetNumVertices'),
STDMETHOD(DWORD, 'GetFVF'),
STDMETHOD(HRESULT, 'GetDeclaration', [POINTER(D3DVERTEXELEMENT9)]),
STDMETHOD(DWORD, 'GetOptions'),
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'CloneMeshFVF', [DWORD, DWORD, POINTER(IDirect3DDevice9), POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXMesh))]),
STDMETHOD(HRESULT, 'CloneMesh', [DWORD, POINTER(D3DVERTEXELEMENT9), POINTER(IDirect3DDevice9), POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXMesh))]),
STDMETHOD(HRESULT, 'ClonePMeshFVF', [DWORD, DWORD, POINTER(D3DVERTEXELEMENT9), POINTER(DWORD), POINTER(c_float), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'ClonePMesh', [DWORD, POINTER(D3DVERTEXELEMENT9), POINTER(IDirect3DDevice9), POINTER(DWORD), POINTER(c_float), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'ReduceFaces', [DWORD]),
STDMETHOD(HRESULT, 'ReduceVertices', [DWORD]),
STDMETHOD(DWORD, 'GetMaxFaces'),
STDMETHOD(DWORD, 'GetMaxVertices'),
STDMETHOD(HRESULT, 'GetVertexAttributeWeights', [POINTER(D3DXATTRIBUTEWEIGHTS)]),
STDMETHOD(HRESULT, 'GetVertexWeights', [c_float]),
]
class ID3DXSkinInfo(IUnknown):
_iid_ = GUID("{11EAA540-F9A6-4d49-AE6A-E19221F70CC4}")
_methods_ = [
STDMETHOD(HRESULT, 'SetBoneInfluence', [DWORD, DWORD, POINTER(DWORD), POINTER(c_float)]),
STDMETHOD(HRESULT, 'SetBoneVertexInfluence', [DWORD, DWORD, c_float]),
STDMETHOD(DWORD, 'GetNumBoneInfluences', [DWORD]),
STDMETHOD(HRESULT, 'GetBoneInfluence', [DWORD, POINTER(DWORD), POINTER(c_float)]),
STDMETHOD(HRESULT, 'GetBoneVertexInfluence', [DWORD, DWORD, POINTER(c_float), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'GetMaxVertexInfluences', [POINTER(DWORD)]),
STDMETHOD(DWORD, 'GetNumBones'),
STDMETHOD(HRESULT, 'FindBoneVertexInfluenceIndex', [DWORD, DWORD, POINTER(DWORD)]),
STDMETHOD(HRESULT, 'GetMaxFaceInfluences', [POINTER(IDirect3DIndexBuffer9), DWORD, POINTER(DWORD)]),
STDMETHOD(HRESULT, 'SetMinBoneInfluence', [c_float]),
STDMETHOD(c_float, 'GetMinBoneInfluence'),
STDMETHOD(HRESULT, 'SetBoneName', [DWORD, LPCSTR]),
STDMETHOD(LPCSTR, 'GetBoneName', [DWORD]),
STDMETHOD(HRESULT, 'SetBoneOffsetMatrix', [DWORD, POINTER(D3DXMATRIX)]),
STDMETHOD(POINTER(D3DXMATRIX), 'GetBoneOffsetMatrix', [DWORD]),
STDMETHOD(HRESULT, 'Clone', [POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'Remap', [DWORD, POINTER(DWORD)]),
STDMETHOD(HRESULT, 'SetFVF', [DWORD]),
STDMETHOD(HRESULT, 'SetDeclaration', [POINTER(D3DVERTEXELEMENT9)]),
STDMETHOD(DWORD, 'GetFVF'),
STDMETHOD(HRESULT, 'GetDeclaration', [POINTER(D3DVERTEXELEMENT9)]),
STDMETHOD(HRESULT, 'UpdateSkinnedMesh', [POINTER(D3DXMATRIX), POINTER(D3DXMATRIX), POINTER(None), POINTER(None)]),
STDMETHOD(HRESULT, 'ConvertToBlendedMesh', [POINTER(ID3DXMesh), DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXBuffer)), POINTER(DWORD), POINTER(DWORD),
POINTER(POINTER(ID3DXBuffer)), POINTER(POINTER(ID3DXMesh))]),
STDMETHOD(HRESULT, 'ConvertToIndexedBlendedMesh', [POINTER(ID3DXMesh), DWORD, DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXBuffer)),
POINTER(DWORD), POINTER(DWORD), POINTER(POINTER(ID3DXBuffer)), POINTER(POINTER(ID3DXMesh))]),
]
class ID3DXPatchMesh(IUnknown):
_iid_ = GUID("{3CE6CC22-DBF2-44f4-894D-F9C34A337139}")
_methods_ = [
STDMETHOD(DWORD, 'GetNumPatches'),
STDMETHOD(DWORD, 'GetNumVertices'),
STDMETHOD(HRESULT, 'GetDeclaration', [POINTER(D3DVERTEXELEMENT9)]),
STDMETHOD(DWORD, 'GetControlVerticesPerPatch'),
STDMETHOD(DWORD, 'GetOptions'),
STDMETHOD(HRESULT, 'GetDevice', [POINTER(POINTER(IDirect3DDevice9))]),
STDMETHOD(HRESULT, 'GetPatchInfo', [POINTER(D3DXPATCHINFO)]),
STDMETHOD(HRESULT, 'GetVertexBuffer', [POINTER(POINTER(IDirect3DVertexBuffer9))]),
STDMETHOD(HRESULT, 'GetIndexBuffer', [POINTER(POINTER(IDirect3DIndexBuffer9))]),
STDMETHOD(HRESULT, 'LockVertexBuffer', [DWORD, POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'UnlockVertexBuffer'),
STDMETHOD(HRESULT, 'LockIndexBuffer', [DWORD, POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'UnlockIndexBuffer'),
STDMETHOD(HRESULT, 'LockAttributeBuffer', [DWORD, POINTER(POINTER(DWORD))]),
STDMETHOD(HRESULT, 'UnlockAttributeBuffer'),
STDMETHOD(HRESULT, 'GetTessSize', [c_float, DWORD, POINTER(DWORD), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'GenerateAdjacency', [c_float]),
STDMETHOD(HRESULT, 'CloneMesh', [DWORD, POINTER(D3DVERTEXELEMENT9), POINTER(POINTER(None))]),
STDMETHOD(HRESULT, 'Optimize', [DWORD]),
STDMETHOD(HRESULT, 'SetDisplaceParam', [POINTER(IDirect3DBaseTexture9), DWORD, DWORD, DWORD, DWORD, DWORD]),
STDMETHOD(HRESULT, 'GetDisplaceParam', [POINTER(POINTER(IDirect3DBaseTexture9)), POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(DWORD)]),
STDMETHOD(HRESULT, 'Tessellate', [c_float, POINTER(ID3DXMesh)]),
STDMETHOD(HRESULT, 'TessellateAdaptive', [POINTER(D3DXVECTOR4), DWORD, DWORD, POINTER(ID3DXMesh)]),
]
class ID3DXConstantTable(IUnknown):
_iid_ = GUID("{AB3C758F-093E-4356-B762-4DB18F1B3A01}")
_methods_ = [
STDMETHOD(c_void_p, 'GetBufferPointer'),
STDMETHOD(DWORD, 'GetBufferSize'),
STDMETHOD(HRESULT, 'GetDesc'),
STDMETHOD(HRESULT, 'GetConstantDesc'),
STDMETHOD(UINT, 'GetSamplerIndex',[D3DXHANDLE]),
STDMETHOD(D3DXHANDLE, 'GetConstant',[D3DXHANDLE,UINT]),
STDMETHOD(D3DXHANDLE, 'GetConstantByName',[D3DXHANDLE,LPCSTR]),
STDMETHOD(D3DXHANDLE, 'GetConstantElement',[D3DXHANDLE,UINT]),
STDMETHOD(HRESULT,'SetDefaults',[POINTER(IDirect3DDevice9)]),
STDMETHOD(HRESULT,'SetValue',[POINTER(IDirect3DDevice9),D3DXHANDLE,c_void_p, UINT]),
STDMETHOD(HRESULT,'SetBool',[POINTER(IDirect3DDevice9), D3DXHANDLE , BOOL]),
STDMETHOD(HRESULT,'SetBoolArray',[POINTER(IDirect3DDevice9),D3DXHANDLE, POINTER(BOOL), UINT]),
STDMETHOD(HRESULT,'SetInt',[POINTER(IDirect3DDevice9),D3DXHANDLE , INT]),
STDMETHOD(HRESULT,'SetIntArray',[POINTER(IDirect3DDevice9),D3DXHANDLE , POINTER(INT), UINT]),
STDMETHOD(HRESULT,'SetFloat',[POINTER(IDirect3DDevice9),D3DXHANDLE , FLOAT]),
STDMETHOD(HRESULT,'SetFloatArray',[POINTER(IDirect3DDevice9),D3DXHANDLE , POINTER(FLOAT), UINT]),
STDMETHOD(HRESULT,'SetVector',[POINTER(IDirect3DDevice9),D3DXHANDLE , POINTER(D3DXVECTOR4)]),
] | 0.169612 | 0.202818 |
import sys
import argparse
from pathlib import Path
import time
import pyperclip
from collections import defaultdict
from math import prod
def out(str):
print(str)
pyperclip.copy(str)
def read_positions(filename):
p = {}
with open(filename) as fin:
items = fin.readline().split(" ")
p[int(items[1])] = int(items[4])
items = fin.readline().split(" ")
p[int(items[1])] = int(items[4])
return [p[1],p[2]]
def sim_practice_game(positions):
dice = list(range(1,101))
rolls = 0
# 0-index positions
for i in range(2):
positions[i] -= 1
scores = [0] * 2
turn = 0
while all([s < 1000 for s in scores]):
turn += 1
for i in range(2):
rolls += 3
positions[i] = (positions[i] + sum(num for num in dice[:3])) % 10
scores[i] += positions[i]+1
dice = dice[3:] + dice[:3]
if scores[i] >= 1000:
break
out(rolls * min(scores))
def sim_quantum_game(positions):
s = [(positions[0]-1,positions[1]-1,0,0,0,1)]
wins = [0,0]
while s:
p1p,p2p,p1s,p2s,player,count = s.pop()
for outcome,universe_count in ((3,1),(4,3),(5,6),(6,7),(7,6),(8,3),(9,1)):
# universe_count games created with the outcome from the combined dice rolls
positions = [p1p,p2p]
scores = [p1s,p2s]
positions[player] = (positions[player] + outcome) % 10
scores[player] += positions[player] + 1
if scores[player] < 21:
p1pp,p2pp = positions
p1sp,p2sp = scores
s.append((p1pp,p2pp,p1sp,p2sp,(player+1)%2,count*universe_count))
else:
wins[player] += count * universe_count
out(max(wins))
def part1(filename):
positions = read_positions(filename)
sim_practice_game(positions)
def part2(filename):
positions = read_positions(filename)
sim_quantum_game(positions)
if __name__ == "__main__":
day = sys.argv[0][-5:-3]
parser = argparse.ArgumentParser(description=f'Solution to Advent of Code 2021 Day {day:s}')
parser.add_argument('file', help='path to input file')
parser.add_argument('--part', dest='part', type=int, default=1, choices=range(1, 3),
help='select part (1) or part (2) solution')
args = parser.parse_args()
file_path = Path(args.file)
if not file_path.exists():
print("ERROR: Input file does not exist", file=sys.stderr)
elif not file_path.is_file():
print("ERROR: Input path is not a file", file=sys.stderr)
else:
start = time.time()
if args.part == 1:
part1(args.file)
else:
part2(args.file)
end = time.time()
print( "%f ms" % ((end-start)*1000)) | day21.py | import sys
import argparse
from pathlib import Path
import time
import pyperclip
from collections import defaultdict
from math import prod
def out(str):
print(str)
pyperclip.copy(str)
def read_positions(filename):
p = {}
with open(filename) as fin:
items = fin.readline().split(" ")
p[int(items[1])] = int(items[4])
items = fin.readline().split(" ")
p[int(items[1])] = int(items[4])
return [p[1],p[2]]
def sim_practice_game(positions):
dice = list(range(1,101))
rolls = 0
# 0-index positions
for i in range(2):
positions[i] -= 1
scores = [0] * 2
turn = 0
while all([s < 1000 for s in scores]):
turn += 1
for i in range(2):
rolls += 3
positions[i] = (positions[i] + sum(num for num in dice[:3])) % 10
scores[i] += positions[i]+1
dice = dice[3:] + dice[:3]
if scores[i] >= 1000:
break
out(rolls * min(scores))
def sim_quantum_game(positions):
s = [(positions[0]-1,positions[1]-1,0,0,0,1)]
wins = [0,0]
while s:
p1p,p2p,p1s,p2s,player,count = s.pop()
for outcome,universe_count in ((3,1),(4,3),(5,6),(6,7),(7,6),(8,3),(9,1)):
# universe_count games created with the outcome from the combined dice rolls
positions = [p1p,p2p]
scores = [p1s,p2s]
positions[player] = (positions[player] + outcome) % 10
scores[player] += positions[player] + 1
if scores[player] < 21:
p1pp,p2pp = positions
p1sp,p2sp = scores
s.append((p1pp,p2pp,p1sp,p2sp,(player+1)%2,count*universe_count))
else:
wins[player] += count * universe_count
out(max(wins))
def part1(filename):
positions = read_positions(filename)
sim_practice_game(positions)
def part2(filename):
positions = read_positions(filename)
sim_quantum_game(positions)
if __name__ == "__main__":
day = sys.argv[0][-5:-3]
parser = argparse.ArgumentParser(description=f'Solution to Advent of Code 2021 Day {day:s}')
parser.add_argument('file', help='path to input file')
parser.add_argument('--part', dest='part', type=int, default=1, choices=range(1, 3),
help='select part (1) or part (2) solution')
args = parser.parse_args()
file_path = Path(args.file)
if not file_path.exists():
print("ERROR: Input file does not exist", file=sys.stderr)
elif not file_path.is_file():
print("ERROR: Input path is not a file", file=sys.stderr)
else:
start = time.time()
if args.part == 1:
part1(args.file)
else:
part2(args.file)
end = time.time()
print( "%f ms" % ((end-start)*1000)) | 0.158044 | 0.349449 |
import math
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
import numpy as np
from torch.utils import data
from torchvision import transforms as T
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
'F': [16, 16, 'M', 32, 32, 'M', 64, 64, 64, 'M', 128, 128, 128, 'M', 128, 128, 128, 'M'],
}
class truncated_VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(truncated_VGG, self).__init__()
self.features = features
if init_weights:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
return x
def vgg19(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = truncated_VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))
return model
def vgg16(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = truncated_VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
def vgg161_4(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = truncated_VGG(make_layers(cfg['F']), **kwargs)
return model
vgg_type={
"vgg16":vgg16(pretrained=True),
"vgg19": vgg19(pretrained=True),
"vgg161_4": vgg161_4(pretrained=False),
}
class Classificationmodel(nn.Module):
def __init__(self, vggtype):
super(Classificationmodel, self).__init__()
self.vggnet=vgg_type[vggtype]
self.conv=nn.Conv2d(128,128,kernel_size=6)
self.fc1=nn.Linear(128,70)
self.fc2 = nn.Linear(128, 70)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self,image):
x=self.vggnet(image)
x=self.conv(x)
x=x.reshape(-1,128)
fc1=self.fc1(x)
fc1=F.sigmoid(fc1)
fc2=self.fc2(fc1)
fc2=F.softmax(fc2,dim=-1)
return fc1,fc2
def getCossloss(fc1,one,cost_one):
L_hyper=torch.mean(cost_one*torch.sqrt(fc1-one))
return L_hyper
def getKLloss(fc2,y_sig01):
torch.randn([fc2.shape()[0],70], out=None)
outresult=torch.mean(y_sig01*torch.log(fc2),dim=-1)
return outresult | model_agecomparison.py | import math
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
import numpy as np
from torch.utils import data
from torchvision import transforms as T
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
'F': [16, 16, 'M', 32, 32, 'M', 64, 64, 64, 'M', 128, 128, 128, 'M', 128, 128, 128, 'M'],
}
class truncated_VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(truncated_VGG, self).__init__()
self.features = features
if init_weights:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
return x
def vgg19(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = truncated_VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))
return model
def vgg16(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = truncated_VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
def vgg161_4(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = truncated_VGG(make_layers(cfg['F']), **kwargs)
return model
vgg_type={
"vgg16":vgg16(pretrained=True),
"vgg19": vgg19(pretrained=True),
"vgg161_4": vgg161_4(pretrained=False),
}
class Classificationmodel(nn.Module):
def __init__(self, vggtype):
super(Classificationmodel, self).__init__()
self.vggnet=vgg_type[vggtype]
self.conv=nn.Conv2d(128,128,kernel_size=6)
self.fc1=nn.Linear(128,70)
self.fc2 = nn.Linear(128, 70)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self,image):
x=self.vggnet(image)
x=self.conv(x)
x=x.reshape(-1,128)
fc1=self.fc1(x)
fc1=F.sigmoid(fc1)
fc2=self.fc2(fc1)
fc2=F.softmax(fc2,dim=-1)
return fc1,fc2
def getCossloss(fc1,one,cost_one):
L_hyper=torch.mean(cost_one*torch.sqrt(fc1-one))
return L_hyper
def getKLloss(fc2,y_sig01):
torch.randn([fc2.shape()[0],70], out=None)
outresult=torch.mean(y_sig01*torch.log(fc2),dim=-1)
return outresult | 0.873916 | 0.582907 |
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='text_to_speech.proto',
package='rero',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x14text_to_speech.proto\x12\x04rero\"\x1a\n\nTTSRequest\x12\x0c\n\x04text\x18\x01 \x01(\t\"\x1d\n\x0bTTSResponse\x12\x0e\n\x06status\x18\x01 \x01(\x08\x32<\n\x0cTextToSpeech\x12,\n\x03TTS\x12\x10.rero.TTSRequest\x1a\x11.rero.TTSResponse\"\x00\x62\x06proto3'
)
_TTSREQUEST = _descriptor.Descriptor(
name='TTSRequest',
full_name='rero.TTSRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='rero.TTSRequest.text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=56,
)
_TTSRESPONSE = _descriptor.Descriptor(
name='TTSResponse',
full_name='rero.TTSResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='rero.TTSResponse.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=87,
)
DESCRIPTOR.message_types_by_name['TTSRequest'] = _TTSREQUEST
DESCRIPTOR.message_types_by_name['TTSResponse'] = _TTSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TTSRequest = _reflection.GeneratedProtocolMessageType('TTSRequest', (_message.Message,), {
'DESCRIPTOR' : _TTSREQUEST,
'__module__' : 'text_to_speech_pb2'
# @@protoc_insertion_point(class_scope:rero.TTSRequest)
})
_sym_db.RegisterMessage(TTSRequest)
TTSResponse = _reflection.GeneratedProtocolMessageType('TTSResponse', (_message.Message,), {
'DESCRIPTOR' : _TTSRESPONSE,
'__module__' : 'text_to_speech_pb2'
# @@protoc_insertion_point(class_scope:rero.TTSResponse)
})
_sym_db.RegisterMessage(TTSResponse)
_TEXTTOSPEECH = _descriptor.ServiceDescriptor(
name='TextToSpeech',
full_name='rero.TextToSpeech',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=89,
serialized_end=149,
methods=[
_descriptor.MethodDescriptor(
name='TTS',
full_name='rero.TextToSpeech.TTS',
index=0,
containing_service=None,
input_type=_TTSREQUEST,
output_type=_TTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_TEXTTOSPEECH)
DESCRIPTOR.services_by_name['TextToSpeech'] = _TEXTTOSPEECH
# @@protoc_insertion_point(module_scope) | rero_grpc/text_to_speech_pb2.py | """Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='text_to_speech.proto',
package='rero',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x14text_to_speech.proto\x12\x04rero\"\x1a\n\nTTSRequest\x12\x0c\n\x04text\x18\x01 \x01(\t\"\x1d\n\x0bTTSResponse\x12\x0e\n\x06status\x18\x01 \x01(\x08\x32<\n\x0cTextToSpeech\x12,\n\x03TTS\x12\x10.rero.TTSRequest\x1a\x11.rero.TTSResponse\"\x00\x62\x06proto3'
)
_TTSREQUEST = _descriptor.Descriptor(
name='TTSRequest',
full_name='rero.TTSRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='rero.TTSRequest.text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=56,
)
_TTSRESPONSE = _descriptor.Descriptor(
name='TTSResponse',
full_name='rero.TTSResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='rero.TTSResponse.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=87,
)
DESCRIPTOR.message_types_by_name['TTSRequest'] = _TTSREQUEST
DESCRIPTOR.message_types_by_name['TTSResponse'] = _TTSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TTSRequest = _reflection.GeneratedProtocolMessageType('TTSRequest', (_message.Message,), {
'DESCRIPTOR' : _TTSREQUEST,
'__module__' : 'text_to_speech_pb2'
# @@protoc_insertion_point(class_scope:rero.TTSRequest)
})
_sym_db.RegisterMessage(TTSRequest)
TTSResponse = _reflection.GeneratedProtocolMessageType('TTSResponse', (_message.Message,), {
'DESCRIPTOR' : _TTSRESPONSE,
'__module__' : 'text_to_speech_pb2'
# @@protoc_insertion_point(class_scope:rero.TTSResponse)
})
_sym_db.RegisterMessage(TTSResponse)
_TEXTTOSPEECH = _descriptor.ServiceDescriptor(
name='TextToSpeech',
full_name='rero.TextToSpeech',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=89,
serialized_end=149,
methods=[
_descriptor.MethodDescriptor(
name='TTS',
full_name='rero.TextToSpeech.TTS',
index=0,
containing_service=None,
input_type=_TTSREQUEST,
output_type=_TTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_TEXTTOSPEECH)
DESCRIPTOR.services_by_name['TextToSpeech'] = _TEXTTOSPEECH
# @@protoc_insertion_point(module_scope) | 0.257859 | 0.067332 |
import unittest
import tempfile
import shutil
import os
import io
from format_templates.format_templates import replace_iter, find_iters, render
class TestFormatting(unittest.TestCase):
def setUp(self):
self.data = {
"name": "World",
"numbers": xrange(1,5),
"nested": {
"nested_a": xrange(1,5),
"nested_b": xrange(6,11)
},
"nested2": [xrange(1,5), xrange(6,11)]
}
self.template = "Hello {name}. {{%numbers {numbers[i]} %}}"
self.template2 = "{{%nested {{%nested_b {nested_b[i]} %}} %}}"
def test_find_iters(self):
self.assertEqual(['numbers'], find_iters(self.template))
def test_find_nested_iters(self):
"""
Expected to fail.
"""
self.assertEqual(['nested','nested_b'], find_iters(self.template2))
def test_replace_iter(self):
template = "{{%numbers {numbers[i]} %}}"
self.assertEqual('{numbers[0]}{numbers[1]}{numbers[2]}{numbers[3]}',
replace_iter(self.data['numbers'], 'numbers', template))
def test_render(self):
rendered_string = "Hello World. 1234"
self.assertEqual(rendered_string, render(self.data, self.template))
pass
class TestHTMLRender(unittest.TestCase):
"""
Test render an html page.
"""
test_dir = os.path.dirname(__file__)
def model(self):
from models.page1 import page
return page
def render_template(self, model, template_name):
"""
Open template file, replace placeholders with data from model
"""
template_path = os.path.join(self.test_dir, 'templates/' + template_name + '.html')
with io.open(template_path) as template:
template_name = template.read()
return render(model, template_name)
def render_html(self, model):
return self.render_template(model, model['template'])
def test_render(self):
test_html_path = os.path.join(self.test_dir, 'test.html')
rendered_html = self.render_html(self.model())
with io.open(test_html_path) as test_html:
self.assertEqual(test_html.read(),rendered_html) | tests/__init__.py | import unittest
import tempfile
import shutil
import os
import io
from format_templates.format_templates import replace_iter, find_iters, render
class TestFormatting(unittest.TestCase):
def setUp(self):
self.data = {
"name": "World",
"numbers": xrange(1,5),
"nested": {
"nested_a": xrange(1,5),
"nested_b": xrange(6,11)
},
"nested2": [xrange(1,5), xrange(6,11)]
}
self.template = "Hello {name}. {{%numbers {numbers[i]} %}}"
self.template2 = "{{%nested {{%nested_b {nested_b[i]} %}} %}}"
def test_find_iters(self):
self.assertEqual(['numbers'], find_iters(self.template))
def test_find_nested_iters(self):
"""
Expected to fail.
"""
self.assertEqual(['nested','nested_b'], find_iters(self.template2))
def test_replace_iter(self):
template = "{{%numbers {numbers[i]} %}}"
self.assertEqual('{numbers[0]}{numbers[1]}{numbers[2]}{numbers[3]}',
replace_iter(self.data['numbers'], 'numbers', template))
def test_render(self):
rendered_string = "Hello World. 1234"
self.assertEqual(rendered_string, render(self.data, self.template))
pass
class TestHTMLRender(unittest.TestCase):
"""
Test render an html page.
"""
test_dir = os.path.dirname(__file__)
def model(self):
from models.page1 import page
return page
def render_template(self, model, template_name):
"""
Open template file, replace placeholders with data from model
"""
template_path = os.path.join(self.test_dir, 'templates/' + template_name + '.html')
with io.open(template_path) as template:
template_name = template.read()
return render(model, template_name)
def render_html(self, model):
return self.render_template(model, model['template'])
def test_render(self):
test_html_path = os.path.join(self.test_dir, 'test.html')
rendered_html = self.render_html(self.model())
with io.open(test_html_path) as test_html:
self.assertEqual(test_html.read(),rendered_html) | 0.315736 | 0.414543 |
import io
from setuptools import setup, find_packages
def readme():
with io.open('README.md', encoding='utf-8') as f:
return f.read()
def requirements(filename):
reqs = list()
with io.open(filename, encoding='utf-8') as f:
for line in f.readlines():
reqs.append(line.strip())
return reqs
setup(
name='{{ cookiecutter.package_name }}',
version='{{ cookiecutter.version }}',
packages=find_packages(),
url="https://www.github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.repo_name }}",
download_url='https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.repo_name }}/archive/{{ cookiecutter.version }}.tar.gz',
license='{{ cookiecutter.license }}',
author='{{ cookiecutter.author }}',
author_email='{{ cookiecutter.email }}',
description='{{ cookiecutter.project_description }}',
long_description=readme(),
long_description_content_type='text/markdown',
install_requires=requirements(filename='requirements.txt'),
data_files=[],
entry_points={
'console_scripts': [
'{{ cookiecutter.package_name }}={{ cookiecutter.package_name }}.server:run'
],
},
include_package_data=True,
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
{%- if cookiecutter.license == "MIT License" %}
"License :: OSI Approved :: MIT License",
{%- endif %}
{%- if cookiecutter.license == "BSD License" %}
"License :: OSI Approved :: BSD License",
{%- endif %}
{%- if cookiecutter.license == "ISC License" %}
"License :: OSI Approved :: ISC License",
{%- endif %}
{%- if cookiecutter.license == "Apache Software License 2.0" %}
"License :: OSI Approved :: Apache Software License",
{%- endif %}
{%- if cookiecutter.license == "GNU General Public License v3" %}
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
{%- endif %}
"Intended Audience :: Developers"
],
extras_require={
"tests": requirements(filename='tests/requirements.txt'),
},
python_requires='>=3.6',
project_urls={
'Bug Reports': 'https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.repo_name }}/issues',
'Source': 'https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.repo_name }}'
},
) | {{cookiecutter.repo_name}}/setup.py |
import io
from setuptools import setup, find_packages
def readme():
with io.open('README.md', encoding='utf-8') as f:
return f.read()
def requirements(filename):
reqs = list()
with io.open(filename, encoding='utf-8') as f:
for line in f.readlines():
reqs.append(line.strip())
return reqs
setup(
name='{{ cookiecutter.package_name }}',
version='{{ cookiecutter.version }}',
packages=find_packages(),
url="https://www.github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.repo_name }}",
download_url='https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.repo_name }}/archive/{{ cookiecutter.version }}.tar.gz',
license='{{ cookiecutter.license }}',
author='{{ cookiecutter.author }}',
author_email='{{ cookiecutter.email }}',
description='{{ cookiecutter.project_description }}',
long_description=readme(),
long_description_content_type='text/markdown',
install_requires=requirements(filename='requirements.txt'),
data_files=[],
entry_points={
'console_scripts': [
'{{ cookiecutter.package_name }}={{ cookiecutter.package_name }}.server:run'
],
},
include_package_data=True,
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
{%- if cookiecutter.license == "MIT License" %}
"License :: OSI Approved :: MIT License",
{%- endif %}
{%- if cookiecutter.license == "BSD License" %}
"License :: OSI Approved :: BSD License",
{%- endif %}
{%- if cookiecutter.license == "ISC License" %}
"License :: OSI Approved :: ISC License",
{%- endif %}
{%- if cookiecutter.license == "Apache Software License 2.0" %}
"License :: OSI Approved :: Apache Software License",
{%- endif %}
{%- if cookiecutter.license == "GNU General Public License v3" %}
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
{%- endif %}
"Intended Audience :: Developers"
],
extras_require={
"tests": requirements(filename='tests/requirements.txt'),
},
python_requires='>=3.6',
project_urls={
'Bug Reports': 'https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.repo_name }}/issues',
'Source': 'https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.repo_name }}'
},
) | 0.414069 | 0.147432 |
from conans import DEFAULT_REVISION_V1
from conans.model.ref import PackageReference
class CommonService(object):
def _get_latest_pref(self, pref):
ref = self._get_latest_ref(pref.ref)
pref = PackageReference(ref, pref.id)
tmp = self._server_store.get_last_package_revision(pref)
if not tmp:
prev = DEFAULT_REVISION_V1
else:
prev = tmp.revision
return pref.copy_with_revs(ref.revision, prev)
def _get_latest_ref(self, ref):
tmp = self._server_store.get_last_revision(ref)
if not tmp:
rrev = DEFAULT_REVISION_V1
else:
rrev = tmp.revision
return ref.copy_with_rev(rrev)
def remove_conanfile(self, ref):
self._authorizer.check_delete_conan(self._auth_user, ref)
self._server_store.remove_conanfile(ref)
def remove_packages(self, ref, package_ids_filter):
"""If the revision is not specified it will remove the packages from all the recipes
(v1 compatibility)"""
for package_id in package_ids_filter:
pref = PackageReference(ref, package_id)
self._authorizer.check_delete_package(self._auth_user, pref)
if not package_ids_filter: # Remove all packages, check that we can remove conanfile
self._authorizer.check_delete_conan(self._auth_user, ref)
for rrev in self._server_store.get_recipe_revisions(ref):
self._server_store.remove_packages(ref.copy_with_rev(rrev.revision),
package_ids_filter)
def remove_package(self, pref):
self._authorizer.check_delete_package(self._auth_user, pref)
for rrev in self._server_store.get_recipe_revisions(pref.ref):
new_pref = pref.copy_with_revs(rrev.revision, pref.revision)
for prev in self._server_store.get_package_revisions(new_pref):
full_pref = new_pref.copy_with_revs(rrev.revision, prev.revision)
self._server_store.remove_package(full_pref)
def remove_all_packages(self, ref):
for rrev in self._server_store.get_recipe_revisions(ref):
self._server_store.remove_all_packages(ref.copy_with_rev(rrev.revision))
def remove_conanfile_files(self, ref, files):
self._authorizer.check_delete_conan(self._auth_user, ref)
self._server_store.remove_conanfile_files(ref, files)
def remove_conanfile_file(self, ref, path):
self.remove_conanfile_files(ref, [path]) | conans/server/service/common/common.py | from conans import DEFAULT_REVISION_V1
from conans.model.ref import PackageReference
class CommonService(object):
def _get_latest_pref(self, pref):
ref = self._get_latest_ref(pref.ref)
pref = PackageReference(ref, pref.id)
tmp = self._server_store.get_last_package_revision(pref)
if not tmp:
prev = DEFAULT_REVISION_V1
else:
prev = tmp.revision
return pref.copy_with_revs(ref.revision, prev)
def _get_latest_ref(self, ref):
tmp = self._server_store.get_last_revision(ref)
if not tmp:
rrev = DEFAULT_REVISION_V1
else:
rrev = tmp.revision
return ref.copy_with_rev(rrev)
def remove_conanfile(self, ref):
self._authorizer.check_delete_conan(self._auth_user, ref)
self._server_store.remove_conanfile(ref)
def remove_packages(self, ref, package_ids_filter):
"""If the revision is not specified it will remove the packages from all the recipes
(v1 compatibility)"""
for package_id in package_ids_filter:
pref = PackageReference(ref, package_id)
self._authorizer.check_delete_package(self._auth_user, pref)
if not package_ids_filter: # Remove all packages, check that we can remove conanfile
self._authorizer.check_delete_conan(self._auth_user, ref)
for rrev in self._server_store.get_recipe_revisions(ref):
self._server_store.remove_packages(ref.copy_with_rev(rrev.revision),
package_ids_filter)
def remove_package(self, pref):
self._authorizer.check_delete_package(self._auth_user, pref)
for rrev in self._server_store.get_recipe_revisions(pref.ref):
new_pref = pref.copy_with_revs(rrev.revision, pref.revision)
for prev in self._server_store.get_package_revisions(new_pref):
full_pref = new_pref.copy_with_revs(rrev.revision, prev.revision)
self._server_store.remove_package(full_pref)
def remove_all_packages(self, ref):
for rrev in self._server_store.get_recipe_revisions(ref):
self._server_store.remove_all_packages(ref.copy_with_rev(rrev.revision))
def remove_conanfile_files(self, ref, files):
self._authorizer.check_delete_conan(self._auth_user, ref)
self._server_store.remove_conanfile_files(ref, files)
def remove_conanfile_file(self, ref, path):
self.remove_conanfile_files(ref, [path]) | 0.376967 | 0.115511 |
from __future__ import absolute_import, with_statement
import os
import logging
import functools
import hashlib
from assetman.tools import get_shard_from_list, _utf8
from assetman.manifest import Manifest
class AssetManager(object):
"""AssetManager attempts to provide easy-to-use asset management and
compilation for Tornado (or other?) templates.
On the template side, assuming this `assetman` module is available in the
template context, use in Tornado should be as easy as:
{% apply assetman.include_js %}
js/utils.js
js/lib.js
js/main.js
{% end %}
(Variations can/should be created for other frameworks.)
With this block in place, each individual JavaScript file will be included
in the resulting document in development. In production, a single,
compiled JavaScript will be included instead.
"""
def __init__(self, rel_url_text, local=False, include_tag=True, src_path=None, settings=None, **attrs):
"""Creates an asset manager from `rel_url_text`, a string which should
contain space (or newline) separated asset URLs.
Optional params:
* local - if True, URLs in rendered output will point at our own
hosts instead of our CDN.
* include_tag - if False, the only rendered output will be URLs,
without full HTML elements (useful, e.g., for mobile app manifests)
* src_path - an optional annotation for recording where this asset
manager originated.
Any extra kwargs will be interpreted as extra HTML params to include
on the rendered element.
"""
self.rel_urls = filter(None, _utf8(rel_url_text).split())
self.local = local
self.include_tag = include_tag
self.src_path = src_path
self.attrs = attrs
self._manifest = None
assert settings
self.settings = settings
logging.debug('%s URLs: %r', self.__class__.__name__, self.rel_urls)
# Lazy-load the manifest attribute
def get_manifest(self):
if not self._manifest:
self._manifest = Manifest(self.settings).load()
return self._manifest
def set_manifest(self, manifest):
self._manifest = manifest
manifest = property(get_manifest, set_manifest)
def get_hash(self):
"""Gets the md5 hash for the URLs in this block of assets, which will
be used to refer to the compiled assets in production.
"""
return hashlib.md5('\n'.join(self.rel_urls)).hexdigest()
def get_ext(self):
"""Returns the file extension (without leading period) to use for the
compiled version of these assets.
"""
raise NotImplementedError
def get_compiled_name(self):
"""Returns the filename for the compiled version of this asset bundle,
which is composed of its version hash and a filed extension.
"""
name_hash = self.get_hash()
return self.manifest.blocks[name_hash]['versioned_path']
def make_asset_url(self, rel_url):
"""Builds a full URL based the given relative URL."""
if self.settings['enable_static_compilation']:
prefix = self.settings['static_url_prefix']
elif self.local:
prefix = self.settings.get('local_cdn_url_prefix')
else:
prefix = get_shard_from_list(self.settings['cdn_url_prefix'], os.path.basename(rel_url))
return prefix.rstrip('/') + '/' + rel_url.lstrip('/')
def render_attrs(self):
"""Returns this asset block's attrs as an HTML string. Includes a
leading space.
"""
attrs = ' '.join('%s=%r' % (attr, _utf8(val))
for attr, val in self.attrs.iteritems())
return ' ' + attrs if attrs else ''
def render_asset(self, url):
"""Renders an individual asset at the given URL. The given URL should
be the full URL to of the asset.
"""
if self.include_tag:
return self.render_asset_element(url)
else:
return url
def render_asset_element(self, url):
raise NotImplementedError
def render(self):
"""Renders these assets. If static compilation is enabled, each asset is
rendered individually. In a production environment, this should be disabled and
just the compiled asset should rendered.
"""
try:
if self.settings['enable_static_compilation']:
urls = map(self.make_asset_url, self.rel_urls)
return '\n'.join(map(self.render_asset, urls))
else:
compiled_name = self.get_compiled_name()
url = self.make_asset_url(compiled_name)
return self.render_asset(url)
except:
logging.error('failed getting assets for %s', self.rel_urls)
raise
@classmethod
def include(cls, s=None, **kwargs):
"""A shortcut for creating and rendering an AssetManager, which needs
to support two styles of invocation in templates:
{% apply assetman.include_js %}path/to/lib.js{% end %}
and
{% apply assetman.include_js(local=True) %}path/to/lib.js{% end %}
Ie, a "bare" application without any args and one with args. In the
latter case, we need to return a partial function that can be applied
to a string.
"""
if s is None:
return functools.partial(cls.include, **kwargs)
return cls(s, **kwargs).render()
def static_url(self, url_path, local=None):
"""A shortcut for ensuring that the given URL is versioned in production.
"""
if local is not None:
self.local = local
if self.settings['enable_static_compilation']:
return self.make_asset_url(url_path)
else:
assert url_path in self.manifest.assets, url_path
versioned_path = self.manifest.assets[url_path]['versioned_path']
return self.make_asset_url(versioned_path)
def __str__(self):
return '<%s src:%s assets:%s>' % (
self.__class__.__name__,
self.src_path or '<template>',
len(self.rel_urls))
class JSManager(AssetManager):
def get_ext(self):
return 'js'
def render_asset_element(self, url):
return '<script src="%s" type="text/javascript"%s></script>' % (
url, self.render_attrs())
class CSSManager(AssetManager):
def get_ext(self):
return 'css'
def render_asset_element(self, url):
return '<link href="%s" rel="stylesheet" type="text/css"%s>' % (
url, self.render_attrs())
class LessManager(CSSManager):
# No customization needed because the custom static asset compiler handler
# will take the *.less URL and return the CSS output.
pass
class SassManager(CSSManager):
# No customization needed because the custom static asset compiler handler
# will take the *.scss URL and return the CSS output.
pass | assetman/managers.py | from __future__ import absolute_import, with_statement
import os
import logging
import functools
import hashlib
from assetman.tools import get_shard_from_list, _utf8
from assetman.manifest import Manifest
class AssetManager(object):
"""AssetManager attempts to provide easy-to-use asset management and
compilation for Tornado (or other?) templates.
On the template side, assuming this `assetman` module is available in the
template context, use in Tornado should be as easy as:
{% apply assetman.include_js %}
js/utils.js
js/lib.js
js/main.js
{% end %}
(Variations can/should be created for other frameworks.)
With this block in place, each individual JavaScript file will be included
in the resulting document in development. In production, a single,
compiled JavaScript will be included instead.
"""
def __init__(self, rel_url_text, local=False, include_tag=True, src_path=None, settings=None, **attrs):
"""Creates an asset manager from `rel_url_text`, a string which should
contain space (or newline) separated asset URLs.
Optional params:
* local - if True, URLs in rendered output will point at our own
hosts instead of our CDN.
* include_tag - if False, the only rendered output will be URLs,
without full HTML elements (useful, e.g., for mobile app manifests)
* src_path - an optional annotation for recording where this asset
manager originated.
Any extra kwargs will be interpreted as extra HTML params to include
on the rendered element.
"""
self.rel_urls = filter(None, _utf8(rel_url_text).split())
self.local = local
self.include_tag = include_tag
self.src_path = src_path
self.attrs = attrs
self._manifest = None
assert settings
self.settings = settings
logging.debug('%s URLs: %r', self.__class__.__name__, self.rel_urls)
# Lazy-load the manifest attribute
def get_manifest(self):
if not self._manifest:
self._manifest = Manifest(self.settings).load()
return self._manifest
def set_manifest(self, manifest):
self._manifest = manifest
manifest = property(get_manifest, set_manifest)
def get_hash(self):
"""Gets the md5 hash for the URLs in this block of assets, which will
be used to refer to the compiled assets in production.
"""
return hashlib.md5('\n'.join(self.rel_urls)).hexdigest()
def get_ext(self):
"""Returns the file extension (without leading period) to use for the
compiled version of these assets.
"""
raise NotImplementedError
def get_compiled_name(self):
"""Returns the filename for the compiled version of this asset bundle,
which is composed of its version hash and a filed extension.
"""
name_hash = self.get_hash()
return self.manifest.blocks[name_hash]['versioned_path']
def make_asset_url(self, rel_url):
"""Builds a full URL based the given relative URL."""
if self.settings['enable_static_compilation']:
prefix = self.settings['static_url_prefix']
elif self.local:
prefix = self.settings.get('local_cdn_url_prefix')
else:
prefix = get_shard_from_list(self.settings['cdn_url_prefix'], os.path.basename(rel_url))
return prefix.rstrip('/') + '/' + rel_url.lstrip('/')
def render_attrs(self):
"""Returns this asset block's attrs as an HTML string. Includes a
leading space.
"""
attrs = ' '.join('%s=%r' % (attr, _utf8(val))
for attr, val in self.attrs.iteritems())
return ' ' + attrs if attrs else ''
def render_asset(self, url):
"""Renders an individual asset at the given URL. The given URL should
be the full URL to of the asset.
"""
if self.include_tag:
return self.render_asset_element(url)
else:
return url
def render_asset_element(self, url):
raise NotImplementedError
def render(self):
"""Renders these assets. If static compilation is enabled, each asset is
rendered individually. In a production environment, this should be disabled and
just the compiled asset should rendered.
"""
try:
if self.settings['enable_static_compilation']:
urls = map(self.make_asset_url, self.rel_urls)
return '\n'.join(map(self.render_asset, urls))
else:
compiled_name = self.get_compiled_name()
url = self.make_asset_url(compiled_name)
return self.render_asset(url)
except:
logging.error('failed getting assets for %s', self.rel_urls)
raise
@classmethod
def include(cls, s=None, **kwargs):
"""A shortcut for creating and rendering an AssetManager, which needs
to support two styles of invocation in templates:
{% apply assetman.include_js %}path/to/lib.js{% end %}
and
{% apply assetman.include_js(local=True) %}path/to/lib.js{% end %}
Ie, a "bare" application without any args and one with args. In the
latter case, we need to return a partial function that can be applied
to a string.
"""
if s is None:
return functools.partial(cls.include, **kwargs)
return cls(s, **kwargs).render()
def static_url(self, url_path, local=None):
"""A shortcut for ensuring that the given URL is versioned in production.
"""
if local is not None:
self.local = local
if self.settings['enable_static_compilation']:
return self.make_asset_url(url_path)
else:
assert url_path in self.manifest.assets, url_path
versioned_path = self.manifest.assets[url_path]['versioned_path']
return self.make_asset_url(versioned_path)
def __str__(self):
return '<%s src:%s assets:%s>' % (
self.__class__.__name__,
self.src_path or '<template>',
len(self.rel_urls))
class JSManager(AssetManager):
def get_ext(self):
return 'js'
def render_asset_element(self, url):
return '<script src="%s" type="text/javascript"%s></script>' % (
url, self.render_attrs())
class CSSManager(AssetManager):
def get_ext(self):
return 'css'
def render_asset_element(self, url):
return '<link href="%s" rel="stylesheet" type="text/css"%s>' % (
url, self.render_attrs())
class LessManager(CSSManager):
# No customization needed because the custom static asset compiler handler
# will take the *.less URL and return the CSS output.
pass
class SassManager(CSSManager):
# No customization needed because the custom static asset compiler handler
# will take the *.scss URL and return the CSS output.
pass | 0.814607 | 0.194062 |
import numpy as np
import abc
def action(f):
def wrapper(*args):
result = f(*args)
world = args[0]
world.check_n_satisfied()
if result is None:
done = False
reward = 0
stats = world.stats
if world.interface.time_out:
done = True
else:
reward, done, stats = result
return reward, done, stats
return wrapper
class TaskObject(object):
def __init__(self, name, index, interface):
self._name = name
self._index = index
self._interface = interface
self._instances = []
self._masked_instances = []
self._locked_instance = None
@property
def name(self):
return self._name
@property
def instances(self):
return self._instances
@property
def index(self):
return self._index
def add_instance(self, spec):
spec = spec.copy()
pfix = '_%i' % len(self.instances)
spec['name'] += pfix
self._interface.add_body(spec)
self._instances.append(spec['name'])
def has_instance(self, instance):
return instance in self._instances
def reset(self):
self._instances = []
self._locked_instance = None
self._masked_instances = []
def mask_instance(self, instance):
assert(instance in self._instances)
self._masked_instances.append(instance)
@property
def locked_instance(self):
return self._locked_instance
def lock_nearest_instance(self):
instances = []
for o in self.instances:
if o not in self._masked_instances:
instances.append(o)
if len(instances) == 0:
instances = self.instances
comp_func = lambda name: np.linalg.norm(
self._interface.relative_pos(name))
ni = min(instances, key=comp_func)
self._locked_instance = ni
class BaseWorld(object):
def __init__(self, interface, scene_specs):
self.interface = interface
self._dimensions = None
self.scene_specs = scene_specs
self.task_objects = []
for i, o in enumerate(scene_specs['task_objects']):
self.task_objects.append(TaskObject(o, i, interface))
def start_world(self):
raise NotImplementedError
def reset_world(self):
raise NotImplementedError
@property
def state(self):
return {'object_state': self.object_state,
'agent_state': self.agent_state,
'images': self.image}
@abc.abstractproperty
def image(self):
"""return object position relative to the gripper"""
return np.array(self.interface.bullet.capture_image())
@property
def depth(self):
"""return object position relative to the gripper"""
return np.array(self.interface.bullet.depth)
@property
def object_state(self):
"""return object position relative to the gripper"""
poses = []
for tobj in self.task_objects:
poses.append(self.interface.relative_pos(tobj.locked_instance))
a = np.vstack(poses).astype(np.float32)
return a
@property
def agent_state(self):
state = np.zeros(2, dtype=np.float32)
state[self.interface.gripper.state] = 1
return state
@property
def dimensions(self):
if not self._dimensions:
self._dimensions = {'object_state': [len(self.task_objects), 3],
'agent_state': self.agent_state.shape,
'images': self.image.shape}
return self._dimensions
@property
def all_object_instances(self):
all_inst = []
for tobj in self.task_objects:
all_inst += tobj.instances
return all_inst
def ind_to_name(self, query_idx):
"""
object index to task object name
"""
return self.task_objects[query_idx].name
def name_to_ind(self, query_name):
"""
object name to task object index
"""
for i, tobj in enumerate(self.task_objects):
if tobj.name == query_name:
return i
return None
def get_task_object(self, query):
if isinstance(query, basestring):
query = self.name_to_ind(query)
return self.task_objects[query]
def get_object_instance(self, query):
return self.get_task_object(query).locked_instance
def set_callback(self, callback_func, freq):
self.interface.set_callback(callback_func, freq)
def unset_callback(self):
self.interface.unset_callback()
@action
def action_move_to(self, obj):
"""move to above"""
obj = self.get_object_instance(obj)
self.interface.move_to_above(obj)
@action
def action_grasp(self, obj):
"""reach and grip"""
obj = self.get_object_instance(obj)
self.interface.reach_to_grasp(obj)
self.interface.grip(obj)
@action
def action_drop(self, obj):
"""reach and release"""
obj = self.get_object_instance(obj)
self.interface.reach_to_drop(obj)
self.interface.release()
@action
def action_press(self, obj, obj2, loc):
"""reach and release"""
obj = self.get_object_instance(obj)
obj2 = self.get_object_instance(obj2)
self.interface.reach_to_press(obj, obj2, loc)
self.interface.release()
@action
def action_release(self):
"""release the gripped object"""
self.interface.wait(10)
self.interface.release()
@action
def action_noop(self):
return None
def satisfied(self, cstr):
"""
check if a condition is satisfied
"""
all_satisfied = True
n_satisfied = 0
if cstr['type'] == 'on_top':
src_objs = self.get_task_object(cstr['src']).instances
tgt_obj = self.get_task_object(cstr['target']).instances[0]
for so in src_objs:
if not self.interface.is_on_top_of(so, tgt_obj):
all_satisfied = False
else:
n_satisfied += 1
else:
raise NotImplementedError(
'cannot check condition! %s' % cstr['type'])
return all_satisfied, n_satisfied
def mask_object(self, oname):
"""
mask out an object with the given name
"""
for tobj in self.task_objects:
if tobj.has_instance(oname):
tobj.mask_instance(oname)
def lock_task_objects(self):
"""
for each task object, find the nearest instance and lock it
"""
for tobj in self.task_objects:
tobj.lock_nearest_instance()
def add_instance(self, spec):
tobj = self.get_task_object(spec['name'])
tobj.add_instance(spec) | vat/envs/base_world.py | import numpy as np
import abc
def action(f):
def wrapper(*args):
result = f(*args)
world = args[0]
world.check_n_satisfied()
if result is None:
done = False
reward = 0
stats = world.stats
if world.interface.time_out:
done = True
else:
reward, done, stats = result
return reward, done, stats
return wrapper
class TaskObject(object):
def __init__(self, name, index, interface):
self._name = name
self._index = index
self._interface = interface
self._instances = []
self._masked_instances = []
self._locked_instance = None
@property
def name(self):
return self._name
@property
def instances(self):
return self._instances
@property
def index(self):
return self._index
def add_instance(self, spec):
spec = spec.copy()
pfix = '_%i' % len(self.instances)
spec['name'] += pfix
self._interface.add_body(spec)
self._instances.append(spec['name'])
def has_instance(self, instance):
return instance in self._instances
def reset(self):
self._instances = []
self._locked_instance = None
self._masked_instances = []
def mask_instance(self, instance):
assert(instance in self._instances)
self._masked_instances.append(instance)
@property
def locked_instance(self):
return self._locked_instance
def lock_nearest_instance(self):
instances = []
for o in self.instances:
if o not in self._masked_instances:
instances.append(o)
if len(instances) == 0:
instances = self.instances
comp_func = lambda name: np.linalg.norm(
self._interface.relative_pos(name))
ni = min(instances, key=comp_func)
self._locked_instance = ni
class BaseWorld(object):
def __init__(self, interface, scene_specs):
self.interface = interface
self._dimensions = None
self.scene_specs = scene_specs
self.task_objects = []
for i, o in enumerate(scene_specs['task_objects']):
self.task_objects.append(TaskObject(o, i, interface))
def start_world(self):
raise NotImplementedError
def reset_world(self):
raise NotImplementedError
@property
def state(self):
return {'object_state': self.object_state,
'agent_state': self.agent_state,
'images': self.image}
@abc.abstractproperty
def image(self):
"""return object position relative to the gripper"""
return np.array(self.interface.bullet.capture_image())
@property
def depth(self):
"""return object position relative to the gripper"""
return np.array(self.interface.bullet.depth)
@property
def object_state(self):
"""return object position relative to the gripper"""
poses = []
for tobj in self.task_objects:
poses.append(self.interface.relative_pos(tobj.locked_instance))
a = np.vstack(poses).astype(np.float32)
return a
@property
def agent_state(self):
state = np.zeros(2, dtype=np.float32)
state[self.interface.gripper.state] = 1
return state
@property
def dimensions(self):
if not self._dimensions:
self._dimensions = {'object_state': [len(self.task_objects), 3],
'agent_state': self.agent_state.shape,
'images': self.image.shape}
return self._dimensions
@property
def all_object_instances(self):
all_inst = []
for tobj in self.task_objects:
all_inst += tobj.instances
return all_inst
def ind_to_name(self, query_idx):
"""
object index to task object name
"""
return self.task_objects[query_idx].name
def name_to_ind(self, query_name):
"""
object name to task object index
"""
for i, tobj in enumerate(self.task_objects):
if tobj.name == query_name:
return i
return None
def get_task_object(self, query):
if isinstance(query, basestring):
query = self.name_to_ind(query)
return self.task_objects[query]
def get_object_instance(self, query):
return self.get_task_object(query).locked_instance
def set_callback(self, callback_func, freq):
self.interface.set_callback(callback_func, freq)
def unset_callback(self):
self.interface.unset_callback()
@action
def action_move_to(self, obj):
"""move to above"""
obj = self.get_object_instance(obj)
self.interface.move_to_above(obj)
@action
def action_grasp(self, obj):
"""reach and grip"""
obj = self.get_object_instance(obj)
self.interface.reach_to_grasp(obj)
self.interface.grip(obj)
@action
def action_drop(self, obj):
"""reach and release"""
obj = self.get_object_instance(obj)
self.interface.reach_to_drop(obj)
self.interface.release()
@action
def action_press(self, obj, obj2, loc):
"""reach and release"""
obj = self.get_object_instance(obj)
obj2 = self.get_object_instance(obj2)
self.interface.reach_to_press(obj, obj2, loc)
self.interface.release()
@action
def action_release(self):
"""release the gripped object"""
self.interface.wait(10)
self.interface.release()
@action
def action_noop(self):
return None
def satisfied(self, cstr):
"""
check if a condition is satisfied
"""
all_satisfied = True
n_satisfied = 0
if cstr['type'] == 'on_top':
src_objs = self.get_task_object(cstr['src']).instances
tgt_obj = self.get_task_object(cstr['target']).instances[0]
for so in src_objs:
if not self.interface.is_on_top_of(so, tgt_obj):
all_satisfied = False
else:
n_satisfied += 1
else:
raise NotImplementedError(
'cannot check condition! %s' % cstr['type'])
return all_satisfied, n_satisfied
def mask_object(self, oname):
"""
mask out an object with the given name
"""
for tobj in self.task_objects:
if tobj.has_instance(oname):
tobj.mask_instance(oname)
def lock_task_objects(self):
"""
for each task object, find the nearest instance and lock it
"""
for tobj in self.task_objects:
tobj.lock_nearest_instance()
def add_instance(self, spec):
tobj = self.get_task_object(spec['name'])
tobj.add_instance(spec) | 0.64232 | 0.310028 |
import json
from urllib.parse import quote_plus as url_encode
import csv
from elsapy.elsclient import ElsClient
con_file = open("config.json")
config = json.load(con_file)
con_file.close()
## Initialize client
client = ElsClient(config['apikey']['mark'])
codes = ['ALL','ABS','AF-ID','AFFIL','AFFILCITY','AFFILCOUNTRY','AFFILORG','AU-ID',
'AUTHOR-NAME','AUTH','AUTHFIRST','AUTHLASTNAME', 'AUTHCOLLAB','AUTHKEY','CASREGNUMBER','CHEM',
'CHEMNAME','CODEN','CONF','CONFLOC','CONFNAME','CONFSPONSORS',
'DOI','EDFIRST','EDITOR','EDLASTNAME','EISSN','EXACTSRCTITLE','FIRSTAUTH','FUND-SPONSOR','FUND-ACR',
'FUND-NO','ISBN','ISSN','ISSNP','ISSUE','KEY','LANGUAGE','MANUFACTURER',
'PMID','PUBLISHER','REF','REFAUTH','REFTITLE',
'REFSRCTITLE','SEQBANK','SEQNUMBER','SRCTITLE',
'TITLE','TITLE-ABS-KEY','TITLE-ABS-KEY-AUTH','TRADENAME','VOLUME','WEBSITE']
search_count = {'ALL': 0,'ABS': 0,'AF-ID': 0,'AFFIL': 0,'AFFILCITY': 0,'AFFILCOUNTRY': 0,'AFFILORG': 0,
'AU-ID': 0,'AUTHOR-NAME': 0,'AUTH': 0,'AUTHFIRST': 0,'AUTHLASTNAME': 0,'AUTHCOLLAB': 0,
'AUTHKEY': 0,'CASREGNUMBER': 0,'CHEM': 0,'CHEMNAME': 0,'CODEN': 0,'CONF': 0,'CONFLOC': 0,
'CONFNAME': 0,'CONFSPONSORS': 0,'DOI': 0,'EDFIRST': 0,'EDITOR': 0,'EDLASTNAME': 0,'EISSN': 0,
'EXACTSRCTITLE': 0,'FIRSTAUTH': 0,'FUND-SPONSOR': 0,'FUND-ACR': 0,'FUND-NO': 0,
'ISBN': 0,'ISSN': 0,'ISSNP': 0,'ISSUE': 0,'KEY': 0,'LANGUAGE': 0,'MANUFACTURER': 0,'PMID': 0,
'PUBLISHER': 0,'REF': 0,'REFAUTH': 0,'REFTITLE': 0,'REFSRCTITLE': 0,'SEQBANK': 0,'SEQNUMBER': 0,
'SRCTITLE': 0,'TITLE': 0,'TITLE-ABS-KEY': 0,'TITLE-ABS-KEY-AUTH': 0,'TRADENAME': 0,'VOLUME': 0,
'WEBSITE': 0}
for code in codes:
query = code + "(emotion) OR " + code + "(emotional) OR " + code + "(emotions) OR " + code + "(empathy) OR " + code + "(empathic) OR " + code + "(feeling) " \
"OR " + code + "(feelings) OR " + code + "(mood) OR " + code + "(moods) OR " + code + "(motivation) OR " + code + "(motivations) OR " + code + "(preference) " \
"OR " + code + "(preferences) OR " + code + "(stress) OR " + code + "(well-being) " \
"AND PUBYEAR < 2021"
uri = u'https://api.elsevier.com/content/search/scopus?query=' + url_encode(query) + "&count=1"
api_response = client.exec_request(uri)
search_count[code] = int(api_response['search-results']['opensearch:totalResults'])
with open('output/resultsCount.csv', 'w') as f:
for key in search_count.keys():
f.write("%s,%s\n"%(key,search_count[key])) | findResultsCount.py | import json
from urllib.parse import quote_plus as url_encode
import csv
from elsapy.elsclient import ElsClient
con_file = open("config.json")
config = json.load(con_file)
con_file.close()
## Initialize client
client = ElsClient(config['apikey']['mark'])
codes = ['ALL','ABS','AF-ID','AFFIL','AFFILCITY','AFFILCOUNTRY','AFFILORG','AU-ID',
'AUTHOR-NAME','AUTH','AUTHFIRST','AUTHLASTNAME', 'AUTHCOLLAB','AUTHKEY','CASREGNUMBER','CHEM',
'CHEMNAME','CODEN','CONF','CONFLOC','CONFNAME','CONFSPONSORS',
'DOI','EDFIRST','EDITOR','EDLASTNAME','EISSN','EXACTSRCTITLE','FIRSTAUTH','FUND-SPONSOR','FUND-ACR',
'FUND-NO','ISBN','ISSN','ISSNP','ISSUE','KEY','LANGUAGE','MANUFACTURER',
'PMID','PUBLISHER','REF','REFAUTH','REFTITLE',
'REFSRCTITLE','SEQBANK','SEQNUMBER','SRCTITLE',
'TITLE','TITLE-ABS-KEY','TITLE-ABS-KEY-AUTH','TRADENAME','VOLUME','WEBSITE']
search_count = {'ALL': 0,'ABS': 0,'AF-ID': 0,'AFFIL': 0,'AFFILCITY': 0,'AFFILCOUNTRY': 0,'AFFILORG': 0,
'AU-ID': 0,'AUTHOR-NAME': 0,'AUTH': 0,'AUTHFIRST': 0,'AUTHLASTNAME': 0,'AUTHCOLLAB': 0,
'AUTHKEY': 0,'CASREGNUMBER': 0,'CHEM': 0,'CHEMNAME': 0,'CODEN': 0,'CONF': 0,'CONFLOC': 0,
'CONFNAME': 0,'CONFSPONSORS': 0,'DOI': 0,'EDFIRST': 0,'EDITOR': 0,'EDLASTNAME': 0,'EISSN': 0,
'EXACTSRCTITLE': 0,'FIRSTAUTH': 0,'FUND-SPONSOR': 0,'FUND-ACR': 0,'FUND-NO': 0,
'ISBN': 0,'ISSN': 0,'ISSNP': 0,'ISSUE': 0,'KEY': 0,'LANGUAGE': 0,'MANUFACTURER': 0,'PMID': 0,
'PUBLISHER': 0,'REF': 0,'REFAUTH': 0,'REFTITLE': 0,'REFSRCTITLE': 0,'SEQBANK': 0,'SEQNUMBER': 0,
'SRCTITLE': 0,'TITLE': 0,'TITLE-ABS-KEY': 0,'TITLE-ABS-KEY-AUTH': 0,'TRADENAME': 0,'VOLUME': 0,
'WEBSITE': 0}
for code in codes:
query = code + "(emotion) OR " + code + "(emotional) OR " + code + "(emotions) OR " + code + "(empathy) OR " + code + "(empathic) OR " + code + "(feeling) " \
"OR " + code + "(feelings) OR " + code + "(mood) OR " + code + "(moods) OR " + code + "(motivation) OR " + code + "(motivations) OR " + code + "(preference) " \
"OR " + code + "(preferences) OR " + code + "(stress) OR " + code + "(well-being) " \
"AND PUBYEAR < 2021"
uri = u'https://api.elsevier.com/content/search/scopus?query=' + url_encode(query) + "&count=1"
api_response = client.exec_request(uri)
search_count[code] = int(api_response['search-results']['opensearch:totalResults'])
with open('output/resultsCount.csv', 'w') as f:
for key in search_count.keys():
f.write("%s,%s\n"%(key,search_count[key])) | 0.138404 | 0.053403 |
import requests
import datetime as dt
bcb_urls = {
'IPCA-Serviços': 'https://api.bcb.gov.br/dados/serie/bcdata.sgs.10844/dados?formato=json',
'IPCA-Bens não-duráveis': 'https://api.bcb.gov.br/dados/serie/bcdata.sgs.10841/dados?formato=json',
'IPCA-Bens semi-duráveis': 'https://api.bcb.gov.br/dados/serie/bcdata.sgs.10842/dados?formato=json',
'IPCA-Bens duráveis': 'https://api.bcb.gov.br/dados/serie/bcdata.sgs.10842/dados?formato=json',
} # URLs from the Brazilian Central Bank (BCB)
quandl_urls = {
'IGP-M': 'https://www.quandl.com/api/v3/datasets/BCB/189.json?api_key=<KEY>',
'INPC': 'https://www.quandl.com/api/v3/datasets/BCB/188.json?api_key=<KEY>',
'IPCA': 'https://www.quandl.com/api/v3/datasets/BCB/433.json?api_key=<KEY>',
'IPA': 'https://www.quandl.com/api/v3/datasets/BCB/225.json?api_key=<KEY>',
'IGP-DI': 'https://www.quandl.com/api/v3/datasets/BCB/190.json?api_key=<KEY>'
} # URLs from Quandl data marketplace
def parse_quandl_json(url, ignore_day=True):
"""
Returns a list of dictionaries using the 'date':'value' schema using the Quandl's API.
:param url: a url referenced as a value in quandl_urls dict
:param ignore_day: ignores the day when parsing data. IGP usually is delivered in the last day of the month.
:return:
:rtype: list
>>> parse_quandl_json(quandl_urls['IGP-M'], ignore_day=False)[0]
{'date': datetime.datetime(1989, 6, 30, 0, 0), 'value': 19.68}
"""
global quandl_urls
assert url in quandl_urls.values(), f'{url} is not a quandl url'
data = requests.get(url).json()
output = list()
for item in data['dataset']['data']:
date, rate = item
date = dt.datetime.strptime(date, '%Y-%m-%d') #Quandl uses '2009-09-30' date style
if ignore_day:
date = dt.datetime(date.year, date.month, 1) #Sets day to 1
new_item = {'date': date, 'value': float(rate)}
output.append(new_item)
output = sorted(output, key=lambda k: k['date']) #Sorts the list. Newest date first.
return output
def parse_bcb_json(url):
"""
Returns a list of dictionaries using the 'date':'value' schema using Brazil Central Bank API.
:param url: A string pointing to BCB's JSON API
:type url: str
:return: Sorted list of dicts
:rtype: list
>>> parse_bcb_json(bcb_urls['IPCA-Serviços'])[0]
{'date': datetime.datetime(1992, 1, 1, 0, 0), 'value': '25.84'}
"""
global bcb_urls
assert url in bcb_urls.values(), f'{url} is not a Central Bank url'
data = requests.get(url).json()
output = list()
for item in data:
date, rate = item['data'], item['valor']
date = dt.datetime.strptime(date, '%d/%m/%Y')#BCB uses '30/09/2009' date style
new_item = {'date': date, 'value': float(rate)}
output.append(new_item)
output = sorted(output, key=lambda k: k['date']) #Sorts the list. Newest date first.
return output
def get_cumulative_inflation(inflation_index, start_date, end_date=None, ignore_day=True):
"""
Returns the cumulative inflation as float, using the chosen inflation_index.
If no end_date is defined, returns month's inflation.
:param inflation_index: Name of the Inflation Index.
:type inflation_index: str
:param start_date: Starting date, which is included.
:type start_date: dt.datetime
:param end_date: Ending date, which is included.
:type end_date: dt.datetime
:param ignore_day: Ignores days when parsing the data. Choose this if you want monthly results.
:type ignore_day: bool
:return: The total inflation of time.
:rtype: float
:Examples:
>>> get_cumulative_inflation('IPCA-Serviços', dt.datetime(1995,1,1), dt.datetime(2000,1,1))
2.0355712526263328
>>> get_cumulative_inflation('IGP-M' , dt.datetime(1997,1,1), dt.datetime(1997,1,1))
1.0177
"""
if (end_date is None) or (end_date == start_date):
end_date = start_date # Defaults to the same date
else:
assert start_date < end_date, f'{start_date} must be equal or earlier then {end_date}.'
assert (type(start_date) is dt.datetime) and (type(end_date) is dt.datetime), 'Dates must be datetime datatype.'
value = 1
if inflation_index in bcb_urls.keys():
inflation_data = parse_bcb_json(bcb_urls[inflation_index])
elif inflation_index in quandl_urls.keys():
inflation_data = parse_quandl_json(quandl_urls[inflation_index], ignore_day)
else:
indexes_names = ', '.join(list(bcb_urls.keys())) + ', ' + ', '.join(list(quandl_urls.keys()))
raise ValueError(f'{inflation_index} is not a valid option. Try the following instead:\n{indexes_names}')
for rate in inflation_data:
if (rate['date'] >= start_date) and (rate['date'] <= end_date):
new_value = rate['value'] / 100 + 1 # Turns the value into a percentile
value *= new_value
else:
pass
if value == 1:
if (not ignore_day) and (inflation_index in quandl_urls.keys()):
raise ValueError(f'Invalid output {value}. Probably should pass True to ignore_day.')
raise ValueError(f'Unknown error error. Try other dates or indexes.')
return value | inflationtools/main.py | import requests
import datetime as dt
bcb_urls = {
'IPCA-Serviços': 'https://api.bcb.gov.br/dados/serie/bcdata.sgs.10844/dados?formato=json',
'IPCA-Bens não-duráveis': 'https://api.bcb.gov.br/dados/serie/bcdata.sgs.10841/dados?formato=json',
'IPCA-Bens semi-duráveis': 'https://api.bcb.gov.br/dados/serie/bcdata.sgs.10842/dados?formato=json',
'IPCA-Bens duráveis': 'https://api.bcb.gov.br/dados/serie/bcdata.sgs.10842/dados?formato=json',
} # URLs from the Brazilian Central Bank (BCB)
quandl_urls = {
'IGP-M': 'https://www.quandl.com/api/v3/datasets/BCB/189.json?api_key=<KEY>',
'INPC': 'https://www.quandl.com/api/v3/datasets/BCB/188.json?api_key=<KEY>',
'IPCA': 'https://www.quandl.com/api/v3/datasets/BCB/433.json?api_key=<KEY>',
'IPA': 'https://www.quandl.com/api/v3/datasets/BCB/225.json?api_key=<KEY>',
'IGP-DI': 'https://www.quandl.com/api/v3/datasets/BCB/190.json?api_key=<KEY>'
} # URLs from Quandl data marketplace
def parse_quandl_json(url, ignore_day=True):
"""
Returns a list of dictionaries using the 'date':'value' schema using the Quandl's API.
:param url: a url referenced as a value in quandl_urls dict
:param ignore_day: ignores the day when parsing data. IGP usually is delivered in the last day of the month.
:return:
:rtype: list
>>> parse_quandl_json(quandl_urls['IGP-M'], ignore_day=False)[0]
{'date': datetime.datetime(1989, 6, 30, 0, 0), 'value': 19.68}
"""
global quandl_urls
assert url in quandl_urls.values(), f'{url} is not a quandl url'
data = requests.get(url).json()
output = list()
for item in data['dataset']['data']:
date, rate = item
date = dt.datetime.strptime(date, '%Y-%m-%d') #Quandl uses '2009-09-30' date style
if ignore_day:
date = dt.datetime(date.year, date.month, 1) #Sets day to 1
new_item = {'date': date, 'value': float(rate)}
output.append(new_item)
output = sorted(output, key=lambda k: k['date']) #Sorts the list. Newest date first.
return output
def parse_bcb_json(url):
"""
Returns a list of dictionaries using the 'date':'value' schema using Brazil Central Bank API.
:param url: A string pointing to BCB's JSON API
:type url: str
:return: Sorted list of dicts
:rtype: list
>>> parse_bcb_json(bcb_urls['IPCA-Serviços'])[0]
{'date': datetime.datetime(1992, 1, 1, 0, 0), 'value': '25.84'}
"""
global bcb_urls
assert url in bcb_urls.values(), f'{url} is not a Central Bank url'
data = requests.get(url).json()
output = list()
for item in data:
date, rate = item['data'], item['valor']
date = dt.datetime.strptime(date, '%d/%m/%Y')#BCB uses '30/09/2009' date style
new_item = {'date': date, 'value': float(rate)}
output.append(new_item)
output = sorted(output, key=lambda k: k['date']) #Sorts the list. Newest date first.
return output
def get_cumulative_inflation(inflation_index, start_date, end_date=None, ignore_day=True):
"""
Returns the cumulative inflation as float, using the chosen inflation_index.
If no end_date is defined, returns month's inflation.
:param inflation_index: Name of the Inflation Index.
:type inflation_index: str
:param start_date: Starting date, which is included.
:type start_date: dt.datetime
:param end_date: Ending date, which is included.
:type end_date: dt.datetime
:param ignore_day: Ignores days when parsing the data. Choose this if you want monthly results.
:type ignore_day: bool
:return: The total inflation of time.
:rtype: float
:Examples:
>>> get_cumulative_inflation('IPCA-Serviços', dt.datetime(1995,1,1), dt.datetime(2000,1,1))
2.0355712526263328
>>> get_cumulative_inflation('IGP-M' , dt.datetime(1997,1,1), dt.datetime(1997,1,1))
1.0177
"""
if (end_date is None) or (end_date == start_date):
end_date = start_date # Defaults to the same date
else:
assert start_date < end_date, f'{start_date} must be equal or earlier then {end_date}.'
assert (type(start_date) is dt.datetime) and (type(end_date) is dt.datetime), 'Dates must be datetime datatype.'
value = 1
if inflation_index in bcb_urls.keys():
inflation_data = parse_bcb_json(bcb_urls[inflation_index])
elif inflation_index in quandl_urls.keys():
inflation_data = parse_quandl_json(quandl_urls[inflation_index], ignore_day)
else:
indexes_names = ', '.join(list(bcb_urls.keys())) + ', ' + ', '.join(list(quandl_urls.keys()))
raise ValueError(f'{inflation_index} is not a valid option. Try the following instead:\n{indexes_names}')
for rate in inflation_data:
if (rate['date'] >= start_date) and (rate['date'] <= end_date):
new_value = rate['value'] / 100 + 1 # Turns the value into a percentile
value *= new_value
else:
pass
if value == 1:
if (not ignore_day) and (inflation_index in quandl_urls.keys()):
raise ValueError(f'Invalid output {value}. Probably should pass True to ignore_day.')
raise ValueError(f'Unknown error error. Try other dates or indexes.')
return value | 0.671255 | 0.533762 |
from trac.config import IntOption
from trac.core import Component, implements
from trac.env import ISystemInfoProvider
from trac.util import get_pkginfo
from tracspamfilter.api import IFilterStrategy, N_
from spambayes.hammie import Hammie
from spambayes.storage import SQLClassifier
class BayesianFilterStrategy(Component):
"""Bayesian filtering strategy based on SpamBayes."""
implements(IFilterStrategy, ISystemInfoProvider)
karma_points = IntOption('spam-filter', 'bayes_karma', '15',
"""By what factor Bayesian spam probability score affects the overall
karma of a submission.""", doc_domain='tracspamfilter')
min_training = IntOption('spam-filter', 'bayes_min_training', '25',
"""The minimum number of submissions in the training database required
for the filter to start impacting the karma of submissions.""",
doc_domain='tracspamfilter')
min_dbcount = IntOption('spam-filter', 'bayes_min_dbcount', '5',
"""Entries with a count less than this value get removed from the
database when calling the reduce function.""",
doc_domain='tracspamfilter')
# IFilterStrategy implementation
def is_external(self):
return False
def test(self, req, author, content, ip):
hammie = self._get_hammie()
nspam = hammie.bayes.nspam
nham = hammie.bayes.nham
if author is not None:
testcontent = author + '\n' + content
else:
testcontent = content
if min(nspam, nham) < self.min_training:
self.log.info("Bayes filter strategy requires more training. "
"It currently has only %d words marked as ham, and "
"%d marked as spam, but requires at least %d for "
"each.", nham, nspam, self.min_training)
return
if nham - nspam > min(nham, nspam) * 2:
self.log.warn("The difference between the number of ham versus "
"spam submissions in the training database is "
"large, results may be bad.")
score = hammie.score(testcontent.encode('utf-8'))
self.log.debug("SpamBayes reported spam probability of %s", score)
points = -int(round(self.karma_points * (score * 2 - 1)))
if points != 0:
return (points,
N_("SpamBayes determined spam probability of %s%%"),
("%3.2f" % (score * 100)))
def train(self, req, author, content, ip, spam=True):
if author is not None:
testcontent = author + '\n' + content
else:
testcontent = content
self.log.info("Training SpamBayes, marking content as %s",
spam and "spam" or "ham")
hammie = self._get_hammie()
hammie.train(testcontent.encode('utf-8', 'ignore'), spam)
hammie.store()
return 1
# ISystemInfoProvider methods
def get_system_info(self):
import spambayes
yield 'SpamBayes', get_pkginfo(spambayes)['version']
# Internal methods
def _get_hammie(self):
try: # 1.0
return Hammie(TracDbClassifier(self.env, self.log))
except TypeError: # 1.1
return Hammie(TracDbClassifier(self.env, self.log), 'c')
def _get_numbers(self):
hammie = self._get_hammie()
return hammie.nspam, hammie.nham
# used by admin panel
def reduce(self):
self.env.db_transaction("""
DELETE FROM spamfilter_bayes
WHERE nspam+nham < %s AND NOT word = 'saved state'
""", (self.min_dbcount,))
def dblines(self):
total = self.env.db_query("""
SELECT COUNT(*) FROM spamfilter_bayes
WHERE NOT word = 'saved state'
""")[0][0]
spam = self.env.db_query("""
SELECT COUNT(*) FROM spamfilter_bayes
WHERE nham = 0 AND NOT word = 'saved state'
""")[0][0]
ham = self.env.db_query("""
SELECT COUNT(*) FROM spamfilter_bayes
WHERE nspam = 0 AND NOT word = 'saved state'
""")[0][0]
reduce = self.env.db_query("""
SELECT COUNT(*) FROM spamfilter_bayes
WHERE nspam+nham < %s AND NOT word = 'saved state'
""", (self.min_dbcount,))[0][0]
return total, spam, ham, reduce
class TracDbClassifier(SQLClassifier):
# FIXME: This thing is incredibly slow
def __init__(self, env_db, log):
self.env_db = env_db
self.log = log
self.nham = None
self.nspam = None
SQLClassifier.__init__(self, 'Trac')
def load(self):
if self._has_key(self.statekey):
row = self._get_row(self.statekey)
self.nspam = row['nspam']
self.nham = row['nham']
else: # new database
self.nspam = self.nham = 0
def _sanitize(self, text):
if isinstance(text, unicode):
return text
# Remove invalid byte sequences from utf-8 encoded text
return text.decode('utf-8', 'ignore')
def _get_row(self, word):
word = self._sanitize(word)
for row in self.env_db.db_query("""
SELECT nspam,nham FROM spamfilter_bayes WHERE word=%s
""", (word,)):
break
else:
return {}
# prevent assertion - happens when there are failures in training and
# the count is not updated due to an exception
if word != self.statekey:
if row[0] > self.nspam:
self.log.warn("Reset SPAM count from %d to %d due to keyword "
"'%s'.", self.nspam, row[0], word)
self.nspam = row[0]
self.store()
if row[1] > self.nham:
self.log.warn("Reset HAM count from %d to %d due to keyword "
"'%s'.", self.nham, row[1], word)
self.nham = row[1]
self.store()
return {'nspam': row[0], 'nham': row[1]}
def _set_row(self, word, nspam, nham):
word = self._sanitize(word)
with self.env_db.db_transaction as db:
if self._has_key(word):
db("UPDATE spamfilter_bayes SET nspam=%s,nham=%s "
"WHERE word=%s", (nspam, nham, word))
else:
db("INSERT INTO spamfilter_bayes (word,nspam,nham) "
"VALUES (%s,%s,%s)", (word, nspam, nham))
def _delete_row(self, word):
word = self._sanitize(word)
self.env_db.db_transaction("""
DELETE FROM spamfilter_bayes WHERE word=%s
""", (word,))
def _has_key(self, key):
key = self._sanitize(key)
for count, in self.env_db.db_query("""
SELECT COUNT(*) FROM spamfilter_bayes WHERE word=%s
""", (key,)):
return bool(count)
def _wordinfoget(self, word):
row = self._get_row(word)
if row:
item = self.WordInfoClass()
item.__setstate__((row['nspam'], row['nham']))
return item
def _wordinfokeys(self):
words = []
for word, in self.env_db.db_query("""
SELECT word FROM spamfilter_bayes
"""):
words.append(word)
return words | files/spam-filter/tracspamfilter/filters/bayes.py |
from trac.config import IntOption
from trac.core import Component, implements
from trac.env import ISystemInfoProvider
from trac.util import get_pkginfo
from tracspamfilter.api import IFilterStrategy, N_
from spambayes.hammie import Hammie
from spambayes.storage import SQLClassifier
class BayesianFilterStrategy(Component):
"""Bayesian filtering strategy based on SpamBayes."""
implements(IFilterStrategy, ISystemInfoProvider)
karma_points = IntOption('spam-filter', 'bayes_karma', '15',
"""By what factor Bayesian spam probability score affects the overall
karma of a submission.""", doc_domain='tracspamfilter')
min_training = IntOption('spam-filter', 'bayes_min_training', '25',
"""The minimum number of submissions in the training database required
for the filter to start impacting the karma of submissions.""",
doc_domain='tracspamfilter')
min_dbcount = IntOption('spam-filter', 'bayes_min_dbcount', '5',
"""Entries with a count less than this value get removed from the
database when calling the reduce function.""",
doc_domain='tracspamfilter')
# IFilterStrategy implementation
def is_external(self):
return False
def test(self, req, author, content, ip):
hammie = self._get_hammie()
nspam = hammie.bayes.nspam
nham = hammie.bayes.nham
if author is not None:
testcontent = author + '\n' + content
else:
testcontent = content
if min(nspam, nham) < self.min_training:
self.log.info("Bayes filter strategy requires more training. "
"It currently has only %d words marked as ham, and "
"%d marked as spam, but requires at least %d for "
"each.", nham, nspam, self.min_training)
return
if nham - nspam > min(nham, nspam) * 2:
self.log.warn("The difference between the number of ham versus "
"spam submissions in the training database is "
"large, results may be bad.")
score = hammie.score(testcontent.encode('utf-8'))
self.log.debug("SpamBayes reported spam probability of %s", score)
points = -int(round(self.karma_points * (score * 2 - 1)))
if points != 0:
return (points,
N_("SpamBayes determined spam probability of %s%%"),
("%3.2f" % (score * 100)))
def train(self, req, author, content, ip, spam=True):
if author is not None:
testcontent = author + '\n' + content
else:
testcontent = content
self.log.info("Training SpamBayes, marking content as %s",
spam and "spam" or "ham")
hammie = self._get_hammie()
hammie.train(testcontent.encode('utf-8', 'ignore'), spam)
hammie.store()
return 1
# ISystemInfoProvider methods
def get_system_info(self):
import spambayes
yield 'SpamBayes', get_pkginfo(spambayes)['version']
# Internal methods
def _get_hammie(self):
try: # 1.0
return Hammie(TracDbClassifier(self.env, self.log))
except TypeError: # 1.1
return Hammie(TracDbClassifier(self.env, self.log), 'c')
def _get_numbers(self):
hammie = self._get_hammie()
return hammie.nspam, hammie.nham
# used by admin panel
def reduce(self):
self.env.db_transaction("""
DELETE FROM spamfilter_bayes
WHERE nspam+nham < %s AND NOT word = 'saved state'
""", (self.min_dbcount,))
def dblines(self):
total = self.env.db_query("""
SELECT COUNT(*) FROM spamfilter_bayes
WHERE NOT word = 'saved state'
""")[0][0]
spam = self.env.db_query("""
SELECT COUNT(*) FROM spamfilter_bayes
WHERE nham = 0 AND NOT word = 'saved state'
""")[0][0]
ham = self.env.db_query("""
SELECT COUNT(*) FROM spamfilter_bayes
WHERE nspam = 0 AND NOT word = 'saved state'
""")[0][0]
reduce = self.env.db_query("""
SELECT COUNT(*) FROM spamfilter_bayes
WHERE nspam+nham < %s AND NOT word = 'saved state'
""", (self.min_dbcount,))[0][0]
return total, spam, ham, reduce
class TracDbClassifier(SQLClassifier):
# FIXME: This thing is incredibly slow
def __init__(self, env_db, log):
self.env_db = env_db
self.log = log
self.nham = None
self.nspam = None
SQLClassifier.__init__(self, 'Trac')
def load(self):
if self._has_key(self.statekey):
row = self._get_row(self.statekey)
self.nspam = row['nspam']
self.nham = row['nham']
else: # new database
self.nspam = self.nham = 0
def _sanitize(self, text):
if isinstance(text, unicode):
return text
# Remove invalid byte sequences from utf-8 encoded text
return text.decode('utf-8', 'ignore')
def _get_row(self, word):
word = self._sanitize(word)
for row in self.env_db.db_query("""
SELECT nspam,nham FROM spamfilter_bayes WHERE word=%s
""", (word,)):
break
else:
return {}
# prevent assertion - happens when there are failures in training and
# the count is not updated due to an exception
if word != self.statekey:
if row[0] > self.nspam:
self.log.warn("Reset SPAM count from %d to %d due to keyword "
"'%s'.", self.nspam, row[0], word)
self.nspam = row[0]
self.store()
if row[1] > self.nham:
self.log.warn("Reset HAM count from %d to %d due to keyword "
"'%s'.", self.nham, row[1], word)
self.nham = row[1]
self.store()
return {'nspam': row[0], 'nham': row[1]}
def _set_row(self, word, nspam, nham):
word = self._sanitize(word)
with self.env_db.db_transaction as db:
if self._has_key(word):
db("UPDATE spamfilter_bayes SET nspam=%s,nham=%s "
"WHERE word=%s", (nspam, nham, word))
else:
db("INSERT INTO spamfilter_bayes (word,nspam,nham) "
"VALUES (%s,%s,%s)", (word, nspam, nham))
def _delete_row(self, word):
word = self._sanitize(word)
self.env_db.db_transaction("""
DELETE FROM spamfilter_bayes WHERE word=%s
""", (word,))
def _has_key(self, key):
key = self._sanitize(key)
for count, in self.env_db.db_query("""
SELECT COUNT(*) FROM spamfilter_bayes WHERE word=%s
""", (key,)):
return bool(count)
def _wordinfoget(self, word):
row = self._get_row(word)
if row:
item = self.WordInfoClass()
item.__setstate__((row['nspam'], row['nham']))
return item
def _wordinfokeys(self):
words = []
for word, in self.env_db.db_query("""
SELECT word FROM spamfilter_bayes
"""):
words.append(word)
return words | 0.378229 | 0.13852 |
import pygame
import logging
import copy
import time
import sys
from board import SudokuBoard
from tile import TileText
DEFAULT_BG_COL = (255, 255, 255)
MAX_BACKUP_LENGTH = 100
class Button(object):
DEFAULT_COL = (0, 0, 0)
DEFAULT_TEXTCOL = (0, 0, 0)
def __init__(self, text):
self.text = text
def draw(self, screen, x, y, width, height):
rect = pygame.Rect(x, y, width, height)
pygame.draw.rect(screen, self.DEFAULT_COL, rect, width=2)
font = pygame.font.SysFont(None, int(9*height/10))
img = font.render(self.text, True, self.DEFAULT_TEXTCOL)
screen.blit(img, (int(x + width/20), int(y + height//4)))
class TextBox(object):
DEFAULT_COL = (0, 0, 0)
DEFAULT_TEXTCOL = (0, 0, 0)
def __init__(self, text):
self.text = text
def draw(self, screen, x, y, width, height):
rect = pygame.Rect(x, y, width, height)
pygame.draw.rect(screen, self.DEFAULT_COL, rect, width=-1)
font = pygame.font.SysFont(None, int(8.5*height/10))
img = font.render(self.text, True, self.DEFAULT_TEXTCOL)
screen.blit(img, (int(x + width/20), int(y + height//4)))
def main():
# Set up logs
logging.basicConfig()
logging.info("Successfully imported pygame")
pygame.init()
pygame.font.init()
logging.info("Successfully initialized pygame")
# Set up sudoku board
filename = 'data/example1.txt'
if len(sys.argv) > 1:
filename = sys.argv[1]
board = SudokuBoard(input_file=filename)
# Set up display
screen_size = (1600, 900)
screen = pygame.display.set_mode(size=screen_size,
flags=pygame.RESIZABLE)
pygame.display.set_caption("Sidekus")
screen.fill(DEFAULT_BG_COL)
# Implement reset button
reset_button_x = int(3.25 * screen_size[0] / 4)
reset_button_y = int(0.25 * screen_size[1])
resb_width = int(screen_size[0] / 9)
resb_height = int(0.05 * screen_size[1])
# Implement check button
check_button_x = int(3.25 * screen_size[0] / 4)
check_button_y = int(0.35 * screen_size[1])
cb_width = int(screen_size[0] / 7)
cb_height = int(0.05 * screen_size[1])
# Implement undo button
undo_button_x = int(3.25 * screen_size[0] / 4)
undo_button_y = int(0.45 * screen_size[1])
ub_width = int(screen_size[0] / 10)
ub_height = int(0.05 * screen_size[1])
# Implement redo button
redo_button_x = int(3.25 * screen_size[0] / 4)
redo_button_y = int(0.55 * screen_size[1])
rb_width = int(screen_size[0] / 10)
rb_height = int(0.05 * screen_size[1])
# Instructions boxes
inst_start_x = 0
inst_start_y = int(3 * screen_size[1] / 18)
inst_width = int(screen_size[0] / 8)
inst_height = int(0.05 * screen_size[1])
inst_title = TextBox("Controls")
inst_1 = TextBox("1-9 : enter digit")
inst_2 = TextBox("Ctrl+1-9 : green pencil mark")
inst_3 = TextBox("Shift+1-9 : red pencil mark")
inst_4 = TextBox("Space : highlight repeats")
inst_5 = TextBox("Mouse click: select cell")
inst_6 = TextBox("Ctrl+click : select cells")
controls = [inst_title, inst_1, inst_2, inst_3, inst_4, inst_5, inst_6]
# Clock
clock_title = TextBox("Time elapsed")
time_button = 0
# Set up game loop
done = False
is_highlight = False
tiles_to_update = set()
check_button = Button("Check Solution")
undo_button = Button("Undo Move")
redo_button = Button("Redo Move")
show_solved_button = False
solved = False
logging.info("Initializing display")
solved_button = None
board_backup = []
board_backup.append(copy.deepcopy(board))
initial_state = board_backup[0]
reset_button = Button("Reset board")
redo_list = []
solve_time = None
start = time.time()
while not done:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
done = True
break
elif event.type == pygame.KEYDOWN:
keys = pygame.key.get_pressed()
if len(tiles_to_update) == 0:
continue
move_made = False
if keys[pygame.K_SPACE]:
if len(tiles_to_update) == 1:
tile = tiles_to_update.pop()
tile_x, tile_y = tile
board.highlight_repeats(tile_x, tile_y)
tiles_to_update.add(tile)
# Check for movement of cursor
if (keys[pygame.K_LEFT] or keys[pygame.K_RIGHT] or
keys[pygame.K_UP] or keys[pygame.K_DOWN]): # noqa: E129
if len(tiles_to_update) == 1:
tile = tiles_to_update.pop()
board.highlighted[tile] = False
if keys[pygame.K_UP]:
board.highlighted[tile[0], (tile[1]-1) % 9] = True
tiles_to_update.add((tile[0], (tile[1]-1) % 9))
elif keys[pygame.K_DOWN]:
board.highlighted[tile[0], (tile[1]+1) % 9] = True
tiles_to_update.add((tile[0], (tile[1]+1) % 9))
elif keys[pygame.K_LEFT]:
board.highlighted[(tile[0]-1) % 9, tile[1]] = True
tiles_to_update.add(((tile[0]-1) % 9, tile[1]))
elif keys[pygame.K_RIGHT]:
board.highlighted[(tile[0]+1) % 9, tile[1]] = True
tiles_to_update.add(((tile[0]+1) % 9, tile[1]))
# If no modifiers, then just write the digit
if not (keys[pygame.K_LCTRL] or keys[pygame.K_RCTRL] or
keys[pygame.K_LSHIFT] or keys[pygame.K_RSHIFT]):
dig = None
for i, k in enumerate(range(pygame.K_1, pygame.K_9+1)):
if keys[k]:
dig = i+1
break
else:
continue
tile_text = TileText(dig=dig, user=True)
for tile in tiles_to_update:
if board.tiles[tile[0]][tile[1]].text.user:
board.update_tile(tile[0], tile[1], tile_text)
move_made = True
else:
# Pencil marks
digs = []
for i, k in enumerate(range(pygame.K_1, pygame.K_9+1)):
if keys[k]:
digs.append(i+1)
if len(digs) == 0:
continue
# Center pencil mark
if keys[pygame.K_LCTRL] or keys[pygame.K_RCTRL]:
tile_text = TileText(dig=None, center=digs, user=True)
# Top pencil mark
elif keys[pygame.K_LSHIFT] or keys[pygame.K_RSHIFT]:
tile_text = TileText(top=digs, user=True)
for tile in tiles_to_update:
if board.tiles[tile[0]][tile[1]].text.user:
board.update_tile(tile[0], tile[1], tile_text)
move_made = True
# Add board backups so we can undo moves
if move_made:
if (len(board_backup) == MAX_BACKUP_LENGTH and
board_backup[-1] is not None): # noqa : E129
board_backup.pop()
board_backup.insert(0, copy.deepcopy(board))
elif event.type == pygame.MOUSEBUTTONDOWN:
keys = pygame.key.get_pressed()
mpos = pygame.mouse.get_pos()
if keys[pygame.K_LCTRL] or keys[pygame.K_RCTRL]:
tile_idx = board.get_clicked(mpos)
if tile_idx is not None:
tiles_to_update.add(tile_idx)
else:
is_highlight = True
tiles_to_update.clear()
board.reset_highlight()
# Implement check solution
if check_button_x < mpos[0] < check_button_x + cb_width:
if check_button_y < mpos[1] < check_button_y + cb_height:
solved = board.check_solve()
if solved:
solved_button = TextBox("Looks good!")
else:
solved_button = TextBox("Nah mate you're off")
show_solved_button = True
# Implement undo
if undo_button_x < mpos[0] < undo_button_x + ub_width:
if undo_button_y < mpos[1] < undo_button_y + ub_height:
if len(board_backup) > 0:
board = board_backup.pop(0)
redo_list.insert(0, copy.deepcopy(board))
board.draw(screen)
# Implement redo
if redo_button_x < mpos[0] < redo_button_x + rb_width:
if redo_button_y < mpos[1] < redo_button_y + rb_height:
if len(redo_list) > 0:
board = redo_list.pop(0)
board_backup.insert(0, copy.deepcopy(board))
board.draw(screen)
# Implement reset
if reset_button_x < mpos[0] < reset_button_x + resb_width:
if reset_button_y < mpos[1] < reset_button_y + resb_height:
board = copy.deepcopy(initial_state)
board_backup = []
board_backup.append(copy.deepcopy(board))
board.draw(screen)
start = time.time()
elif event.type == pygame.MOUSEBUTTONUP:
is_highlight = False
if is_highlight:
mpos = pygame.mouse.get_pos()
tile_idx = board.get_clicked(mpos)
if tile_idx is not None:
tiles_to_update.add(tile_idx)
# Draw on screen
screen.fill(DEFAULT_BG_COL)
screen_size = screen.get_size()
# Reset buttons
# Implement reset button
reset_button_x = int(3.25 * screen_size[0] / 4)
reset_button_y = int(0.25 * screen_size[1])
resb_width = int(screen_size[0] / 9)
resb_height = int(0.05 * screen_size[1])
# Implement check button
check_button_x = int(3.25 * screen_size[0] / 4)
check_button_y = int(0.35 * screen_size[1])
cb_width = int(screen_size[0] / 7)
cb_height = int(0.05 * screen_size[1])
# Implement undo button
undo_button_x = int(3.25 * screen_size[0] / 4)
undo_button_y = int(0.45 * screen_size[1])
ub_width = int(screen_size[0] / 10)
ub_height = int(0.05 * screen_size[1])
# Implement redo button
redo_button_x = int(3.25 * screen_size[0] / 4)
redo_button_y = int(0.55 * screen_size[1])
rb_width = int(screen_size[0] / 10)
rb_height = int(0.05 * screen_size[1])
# Instructions boxes
inst_start_x = 0
inst_start_y = int(3 * screen_size[1] / 18)
inst_width = int(screen_size[0] / 8)
inst_height = int(0.05 * screen_size[1])
# Print controls
for i, ctrl in enumerate(controls):
inst_y = inst_start_y + int(i * 1.25 * inst_height)
ctrl.draw(screen, inst_start_x, inst_y, inst_width, inst_height)
clock_y = inst_start_y + int(1.25 * inst_height * (len(controls)+1))
clock_title.draw(screen, inst_start_x, clock_y,
inst_width, inst_height)
if not solved:
time_elapsed = time.time() - start
mins, secs = int(time_elapsed // 60), int(time_elapsed % 60)
millisecs = str((time_elapsed % 60) - secs)
millisecs = millisecs[2: min(5, len(millisecs))]
time_str = "{:d}:{:02d}:{}".format(mins, secs, millisecs)
time_button = TextBox(time_str)
else:
if solve_time is None:
time_elapsed = time.time() - start
mins, secs = int(time_elapsed // 60), int(time_elapsed % 60)
millisecs = str((time_elapsed % 60) - secs)
millisecs = millisecs[2: min(5, len(millisecs))]
solve_time = "{:d}:{:02d}:{}".format(mins, secs, millisecs)
time_button = TextBox(solve_time)
time_button.draw(screen, inst_start_x,
clock_y + int(1.25 * inst_height),
inst_width, inst_height)
# Deal with buttons
check_button.draw(screen, check_button_x, check_button_y,
cb_width, cb_height)
undo_button.draw(screen, undo_button_x, undo_button_y,
ub_width, ub_height)
reset_button.draw(screen, reset_button_x, reset_button_y,
resb_width, resb_height)
if len(redo_list):
redo_button.draw(screen, redo_button_x, redo_button_y,
rb_width, rb_height)
if show_solved_button:
if solved:
solved_button.draw(screen, check_button_x - 0.1*cb_width,
check_button_y + 6 * cb_height,
0.8 * cb_width,
cb_height)
else:
solved_button.draw(screen, check_button_x - 0.1*cb_width,
check_button_y + 6 * cb_height,
1.25 * cb_width,
cb_height)
board.draw(screen)
pygame.display.update()
logging.info("Display loop ended, program quitting")
if __name__ == '__main__':
main() | src/main.py | import pygame
import logging
import copy
import time
import sys
from board import SudokuBoard
from tile import TileText
DEFAULT_BG_COL = (255, 255, 255)
MAX_BACKUP_LENGTH = 100
class Button(object):
DEFAULT_COL = (0, 0, 0)
DEFAULT_TEXTCOL = (0, 0, 0)
def __init__(self, text):
self.text = text
def draw(self, screen, x, y, width, height):
rect = pygame.Rect(x, y, width, height)
pygame.draw.rect(screen, self.DEFAULT_COL, rect, width=2)
font = pygame.font.SysFont(None, int(9*height/10))
img = font.render(self.text, True, self.DEFAULT_TEXTCOL)
screen.blit(img, (int(x + width/20), int(y + height//4)))
class TextBox(object):
DEFAULT_COL = (0, 0, 0)
DEFAULT_TEXTCOL = (0, 0, 0)
def __init__(self, text):
self.text = text
def draw(self, screen, x, y, width, height):
rect = pygame.Rect(x, y, width, height)
pygame.draw.rect(screen, self.DEFAULT_COL, rect, width=-1)
font = pygame.font.SysFont(None, int(8.5*height/10))
img = font.render(self.text, True, self.DEFAULT_TEXTCOL)
screen.blit(img, (int(x + width/20), int(y + height//4)))
def main():
# Set up logs
logging.basicConfig()
logging.info("Successfully imported pygame")
pygame.init()
pygame.font.init()
logging.info("Successfully initialized pygame")
# Set up sudoku board
filename = 'data/example1.txt'
if len(sys.argv) > 1:
filename = sys.argv[1]
board = SudokuBoard(input_file=filename)
# Set up display
screen_size = (1600, 900)
screen = pygame.display.set_mode(size=screen_size,
flags=pygame.RESIZABLE)
pygame.display.set_caption("Sidekus")
screen.fill(DEFAULT_BG_COL)
# Implement reset button
reset_button_x = int(3.25 * screen_size[0] / 4)
reset_button_y = int(0.25 * screen_size[1])
resb_width = int(screen_size[0] / 9)
resb_height = int(0.05 * screen_size[1])
# Implement check button
check_button_x = int(3.25 * screen_size[0] / 4)
check_button_y = int(0.35 * screen_size[1])
cb_width = int(screen_size[0] / 7)
cb_height = int(0.05 * screen_size[1])
# Implement undo button
undo_button_x = int(3.25 * screen_size[0] / 4)
undo_button_y = int(0.45 * screen_size[1])
ub_width = int(screen_size[0] / 10)
ub_height = int(0.05 * screen_size[1])
# Implement redo button
redo_button_x = int(3.25 * screen_size[0] / 4)
redo_button_y = int(0.55 * screen_size[1])
rb_width = int(screen_size[0] / 10)
rb_height = int(0.05 * screen_size[1])
# Instructions boxes
inst_start_x = 0
inst_start_y = int(3 * screen_size[1] / 18)
inst_width = int(screen_size[0] / 8)
inst_height = int(0.05 * screen_size[1])
inst_title = TextBox("Controls")
inst_1 = TextBox("1-9 : enter digit")
inst_2 = TextBox("Ctrl+1-9 : green pencil mark")
inst_3 = TextBox("Shift+1-9 : red pencil mark")
inst_4 = TextBox("Space : highlight repeats")
inst_5 = TextBox("Mouse click: select cell")
inst_6 = TextBox("Ctrl+click : select cells")
controls = [inst_title, inst_1, inst_2, inst_3, inst_4, inst_5, inst_6]
# Clock
clock_title = TextBox("Time elapsed")
time_button = 0
# Set up game loop
done = False
is_highlight = False
tiles_to_update = set()
check_button = Button("Check Solution")
undo_button = Button("Undo Move")
redo_button = Button("Redo Move")
show_solved_button = False
solved = False
logging.info("Initializing display")
solved_button = None
board_backup = []
board_backup.append(copy.deepcopy(board))
initial_state = board_backup[0]
reset_button = Button("Reset board")
redo_list = []
solve_time = None
start = time.time()
while not done:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
done = True
break
elif event.type == pygame.KEYDOWN:
keys = pygame.key.get_pressed()
if len(tiles_to_update) == 0:
continue
move_made = False
if keys[pygame.K_SPACE]:
if len(tiles_to_update) == 1:
tile = tiles_to_update.pop()
tile_x, tile_y = tile
board.highlight_repeats(tile_x, tile_y)
tiles_to_update.add(tile)
# Check for movement of cursor
if (keys[pygame.K_LEFT] or keys[pygame.K_RIGHT] or
keys[pygame.K_UP] or keys[pygame.K_DOWN]): # noqa: E129
if len(tiles_to_update) == 1:
tile = tiles_to_update.pop()
board.highlighted[tile] = False
if keys[pygame.K_UP]:
board.highlighted[tile[0], (tile[1]-1) % 9] = True
tiles_to_update.add((tile[0], (tile[1]-1) % 9))
elif keys[pygame.K_DOWN]:
board.highlighted[tile[0], (tile[1]+1) % 9] = True
tiles_to_update.add((tile[0], (tile[1]+1) % 9))
elif keys[pygame.K_LEFT]:
board.highlighted[(tile[0]-1) % 9, tile[1]] = True
tiles_to_update.add(((tile[0]-1) % 9, tile[1]))
elif keys[pygame.K_RIGHT]:
board.highlighted[(tile[0]+1) % 9, tile[1]] = True
tiles_to_update.add(((tile[0]+1) % 9, tile[1]))
# If no modifiers, then just write the digit
if not (keys[pygame.K_LCTRL] or keys[pygame.K_RCTRL] or
keys[pygame.K_LSHIFT] or keys[pygame.K_RSHIFT]):
dig = None
for i, k in enumerate(range(pygame.K_1, pygame.K_9+1)):
if keys[k]:
dig = i+1
break
else:
continue
tile_text = TileText(dig=dig, user=True)
for tile in tiles_to_update:
if board.tiles[tile[0]][tile[1]].text.user:
board.update_tile(tile[0], tile[1], tile_text)
move_made = True
else:
# Pencil marks
digs = []
for i, k in enumerate(range(pygame.K_1, pygame.K_9+1)):
if keys[k]:
digs.append(i+1)
if len(digs) == 0:
continue
# Center pencil mark
if keys[pygame.K_LCTRL] or keys[pygame.K_RCTRL]:
tile_text = TileText(dig=None, center=digs, user=True)
# Top pencil mark
elif keys[pygame.K_LSHIFT] or keys[pygame.K_RSHIFT]:
tile_text = TileText(top=digs, user=True)
for tile in tiles_to_update:
if board.tiles[tile[0]][tile[1]].text.user:
board.update_tile(tile[0], tile[1], tile_text)
move_made = True
# Add board backups so we can undo moves
if move_made:
if (len(board_backup) == MAX_BACKUP_LENGTH and
board_backup[-1] is not None): # noqa : E129
board_backup.pop()
board_backup.insert(0, copy.deepcopy(board))
elif event.type == pygame.MOUSEBUTTONDOWN:
keys = pygame.key.get_pressed()
mpos = pygame.mouse.get_pos()
if keys[pygame.K_LCTRL] or keys[pygame.K_RCTRL]:
tile_idx = board.get_clicked(mpos)
if tile_idx is not None:
tiles_to_update.add(tile_idx)
else:
is_highlight = True
tiles_to_update.clear()
board.reset_highlight()
# Implement check solution
if check_button_x < mpos[0] < check_button_x + cb_width:
if check_button_y < mpos[1] < check_button_y + cb_height:
solved = board.check_solve()
if solved:
solved_button = TextBox("Looks good!")
else:
solved_button = TextBox("Nah mate you're off")
show_solved_button = True
# Implement undo
if undo_button_x < mpos[0] < undo_button_x + ub_width:
if undo_button_y < mpos[1] < undo_button_y + ub_height:
if len(board_backup) > 0:
board = board_backup.pop(0)
redo_list.insert(0, copy.deepcopy(board))
board.draw(screen)
# Implement redo
if redo_button_x < mpos[0] < redo_button_x + rb_width:
if redo_button_y < mpos[1] < redo_button_y + rb_height:
if len(redo_list) > 0:
board = redo_list.pop(0)
board_backup.insert(0, copy.deepcopy(board))
board.draw(screen)
# Implement reset
if reset_button_x < mpos[0] < reset_button_x + resb_width:
if reset_button_y < mpos[1] < reset_button_y + resb_height:
board = copy.deepcopy(initial_state)
board_backup = []
board_backup.append(copy.deepcopy(board))
board.draw(screen)
start = time.time()
elif event.type == pygame.MOUSEBUTTONUP:
is_highlight = False
if is_highlight:
mpos = pygame.mouse.get_pos()
tile_idx = board.get_clicked(mpos)
if tile_idx is not None:
tiles_to_update.add(tile_idx)
# Draw on screen
screen.fill(DEFAULT_BG_COL)
screen_size = screen.get_size()
# Reset buttons
# Implement reset button
reset_button_x = int(3.25 * screen_size[0] / 4)
reset_button_y = int(0.25 * screen_size[1])
resb_width = int(screen_size[0] / 9)
resb_height = int(0.05 * screen_size[1])
# Implement check button
check_button_x = int(3.25 * screen_size[0] / 4)
check_button_y = int(0.35 * screen_size[1])
cb_width = int(screen_size[0] / 7)
cb_height = int(0.05 * screen_size[1])
# Implement undo button
undo_button_x = int(3.25 * screen_size[0] / 4)
undo_button_y = int(0.45 * screen_size[1])
ub_width = int(screen_size[0] / 10)
ub_height = int(0.05 * screen_size[1])
# Implement redo button
redo_button_x = int(3.25 * screen_size[0] / 4)
redo_button_y = int(0.55 * screen_size[1])
rb_width = int(screen_size[0] / 10)
rb_height = int(0.05 * screen_size[1])
# Instructions boxes
inst_start_x = 0
inst_start_y = int(3 * screen_size[1] / 18)
inst_width = int(screen_size[0] / 8)
inst_height = int(0.05 * screen_size[1])
# Print controls
for i, ctrl in enumerate(controls):
inst_y = inst_start_y + int(i * 1.25 * inst_height)
ctrl.draw(screen, inst_start_x, inst_y, inst_width, inst_height)
clock_y = inst_start_y + int(1.25 * inst_height * (len(controls)+1))
clock_title.draw(screen, inst_start_x, clock_y,
inst_width, inst_height)
if not solved:
time_elapsed = time.time() - start
mins, secs = int(time_elapsed // 60), int(time_elapsed % 60)
millisecs = str((time_elapsed % 60) - secs)
millisecs = millisecs[2: min(5, len(millisecs))]
time_str = "{:d}:{:02d}:{}".format(mins, secs, millisecs)
time_button = TextBox(time_str)
else:
if solve_time is None:
time_elapsed = time.time() - start
mins, secs = int(time_elapsed // 60), int(time_elapsed % 60)
millisecs = str((time_elapsed % 60) - secs)
millisecs = millisecs[2: min(5, len(millisecs))]
solve_time = "{:d}:{:02d}:{}".format(mins, secs, millisecs)
time_button = TextBox(solve_time)
time_button.draw(screen, inst_start_x,
clock_y + int(1.25 * inst_height),
inst_width, inst_height)
# Deal with buttons
check_button.draw(screen, check_button_x, check_button_y,
cb_width, cb_height)
undo_button.draw(screen, undo_button_x, undo_button_y,
ub_width, ub_height)
reset_button.draw(screen, reset_button_x, reset_button_y,
resb_width, resb_height)
if len(redo_list):
redo_button.draw(screen, redo_button_x, redo_button_y,
rb_width, rb_height)
if show_solved_button:
if solved:
solved_button.draw(screen, check_button_x - 0.1*cb_width,
check_button_y + 6 * cb_height,
0.8 * cb_width,
cb_height)
else:
solved_button.draw(screen, check_button_x - 0.1*cb_width,
check_button_y + 6 * cb_height,
1.25 * cb_width,
cb_height)
board.draw(screen)
pygame.display.update()
logging.info("Display loop ended, program quitting")
if __name__ == '__main__':
main() | 0.318273 | 0.170715 |
import logging
from pants.backend.python.goals.setup_py import SetupKwargs, SetupKwargsRequest
from pants.engine.fs import DigestContents, GlobMatchErrorBehavior, PathGlobs
from pants.engine.rules import Get, collect_rules, rule
from pants.engine.target import Target
from pants.engine.unions import UnionRule
logger = logging.getLogger(__name__)
# These setup.py arguments will be used by ALL Python distributions
# created in this repository.
HARDCODED_KWARGS = {
"author": "<NAME>.",
"author_email": "<EMAIL>",
"maintainer": "Grapl, Inc.",
"maintainer_email": "<EMAIL>",
"url": "https://github.com/grapl-security/grapl",
"project_urls": {
"Documentation": "https://grapl.readthedocs.io",
"Source": "https://github.com/grapl-security/grapl",
"Tracker": "https://github.com/grapl-security/grapl/issues",
},
"license": "MIT",
"classifiers": [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
}
class GraplSetupKwargsRequest(SetupKwargsRequest):
@classmethod
def is_applicable(cls, _: Target) -> bool:
"""Use this for ALL Python distributions created in this repository."""
return True
@rule
async def setup_kwargs_plugin(request: GraplSetupKwargsRequest) -> SetupKwargs:
explicit_kwargs = request.explicit_kwargs
if "name" not in explicit_kwargs:
raise ValueError(
f"Must provide a `name` key in the `provides` field for {request.target.address}"
)
if "description" not in explicit_kwargs:
raise ValueError(
f"Must provide a `description` key in the `provides` field for {request.target.address}"
)
if "version" not in explicit_kwargs:
raise ValueError(
f"Must provide a `version` key in the `provides` field for {request.target.address}"
)
# Look for a README.md file as a sibling to the BUILD file this
# target is defined in.
default_readme_path = f"{request.target.address.spec_path}/README.md"
if "long_description" in explicit_kwargs:
raise ValueError(
f"Do not provide a `long_description` in the `provides` field for {request.target.address}. "
f"Instead, either place a `README.md` file at {default_readme_path} "
"OR specify a path to an appropriate Markdown file, relative to the Pants root "
"in the `readme` key in the `provides` field"
)
# "readme" is a key that we (Grapl) use; it's not in standard
# Pants. There may be some changes coming soon, though:
# https://github.com/pantsbuild/pants/issues/11554
readme_path = (
explicit_kwargs.pop("readme")
if "readme" in explicit_kwargs
else default_readme_path
)
logger.info(f"Reading long_description from {readme_path}")
digest_contents = await Get(
DigestContents,
PathGlobs(
[readme_path],
description_of_origin=f"README resolution in `setup_py()` plugin ({__file__}) for {request.target.address}",
glob_match_error_behavior=GlobMatchErrorBehavior.error,
),
)
long_description = "\n".join(
file_content.content.decode() for file_content in digest_contents
)
explicit_kwargs["long_description"] = long_description
# Set hardcoded values, raising an exception if any of them are
# overridden by the user.
conflicts = set(explicit_kwargs.keys()).intersection(HARDCODED_KWARGS.keys())
if conflicts:
raise ValueError(
f"These kwargs should not be set in the `provides` field for {request.target.address} "
"because our internal plugin will automatically set them: "
f"{sorted(conflicts)}"
)
explicit_kwargs.update(HARDCODED_KWARGS)
return SetupKwargs(explicit_kwargs, address=request.target.address)
def rules():
return [
*collect_rules(),
UnionRule(SetupKwargsRequest, GraplSetupKwargsRequest),
] | pants-plugins/grapl_setup_py/grapl_setupargs.py | import logging
from pants.backend.python.goals.setup_py import SetupKwargs, SetupKwargsRequest
from pants.engine.fs import DigestContents, GlobMatchErrorBehavior, PathGlobs
from pants.engine.rules import Get, collect_rules, rule
from pants.engine.target import Target
from pants.engine.unions import UnionRule
logger = logging.getLogger(__name__)
# These setup.py arguments will be used by ALL Python distributions
# created in this repository.
HARDCODED_KWARGS = {
"author": "<NAME>.",
"author_email": "<EMAIL>",
"maintainer": "Grapl, Inc.",
"maintainer_email": "<EMAIL>",
"url": "https://github.com/grapl-security/grapl",
"project_urls": {
"Documentation": "https://grapl.readthedocs.io",
"Source": "https://github.com/grapl-security/grapl",
"Tracker": "https://github.com/grapl-security/grapl/issues",
},
"license": "MIT",
"classifiers": [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
}
class GraplSetupKwargsRequest(SetupKwargsRequest):
@classmethod
def is_applicable(cls, _: Target) -> bool:
"""Use this for ALL Python distributions created in this repository."""
return True
@rule
async def setup_kwargs_plugin(request: GraplSetupKwargsRequest) -> SetupKwargs:
explicit_kwargs = request.explicit_kwargs
if "name" not in explicit_kwargs:
raise ValueError(
f"Must provide a `name` key in the `provides` field for {request.target.address}"
)
if "description" not in explicit_kwargs:
raise ValueError(
f"Must provide a `description` key in the `provides` field for {request.target.address}"
)
if "version" not in explicit_kwargs:
raise ValueError(
f"Must provide a `version` key in the `provides` field for {request.target.address}"
)
# Look for a README.md file as a sibling to the BUILD file this
# target is defined in.
default_readme_path = f"{request.target.address.spec_path}/README.md"
if "long_description" in explicit_kwargs:
raise ValueError(
f"Do not provide a `long_description` in the `provides` field for {request.target.address}. "
f"Instead, either place a `README.md` file at {default_readme_path} "
"OR specify a path to an appropriate Markdown file, relative to the Pants root "
"in the `readme` key in the `provides` field"
)
# "readme" is a key that we (Grapl) use; it's not in standard
# Pants. There may be some changes coming soon, though:
# https://github.com/pantsbuild/pants/issues/11554
readme_path = (
explicit_kwargs.pop("readme")
if "readme" in explicit_kwargs
else default_readme_path
)
logger.info(f"Reading long_description from {readme_path}")
digest_contents = await Get(
DigestContents,
PathGlobs(
[readme_path],
description_of_origin=f"README resolution in `setup_py()` plugin ({__file__}) for {request.target.address}",
glob_match_error_behavior=GlobMatchErrorBehavior.error,
),
)
long_description = "\n".join(
file_content.content.decode() for file_content in digest_contents
)
explicit_kwargs["long_description"] = long_description
# Set hardcoded values, raising an exception if any of them are
# overridden by the user.
conflicts = set(explicit_kwargs.keys()).intersection(HARDCODED_KWARGS.keys())
if conflicts:
raise ValueError(
f"These kwargs should not be set in the `provides` field for {request.target.address} "
"because our internal plugin will automatically set them: "
f"{sorted(conflicts)}"
)
explicit_kwargs.update(HARDCODED_KWARGS)
return SetupKwargs(explicit_kwargs, address=request.target.address)
def rules():
return [
*collect_rules(),
UnionRule(SetupKwargsRequest, GraplSetupKwargsRequest),
] | 0.822296 | 0.265333 |
import copy
import sys
from geometry_msgs.msg import Pose, Point, Quaternion
import moveit_commander
import intera_interface
import rospy
from tf import TransformListener
from copy import deepcopy
from get_task_srv.srv import get_task
class Robot(object):
def __init__(self, limb='right', tip_name="right_gripper_tip"):
self._limb_name = limb
self._limb = intera_interface.Limb(limb)
self.gripper = intera_interface.Gripper(limb)
self._tip_name = tip_name
def move_to_start(self, start_angles=None):
print("Moving the {0} arm to start pose...".format(self._limb_name))
if not start_angles:
start_angles = dict(zip(self._joint_names, [0]*7))
self._guarded_move_to_joint_position(start_angles)
def approach(self, pose):
joint_angles = self._limb.ik_request(pose, self._tip_name)
self._guarded_move_to_joint_position(joint_angles)
def _guarded_move_to_joint_position(self, joint_angles, timeout=60.0):
self._limb.set_joint_position_speed(0.1)
if joint_angles:
self._limb.move_to_joint_positions(joint_angles, timeout=timeout)
else:
rospy.logerr("No Joint Angles provided for move_to_joint_positions. Staying put.")
class Runner:
def __init__(self, robot):
self.task = 0
self._robot_frame = "base_d"
self._peg_frame = "peg"
self._hole_frame = "hole"
self._cur_gripper_pose = Pose()
self._prev_gripper_pose = Pose()
self._tf_listener = TransformListener()
self.robot = robot
def handle_get_task(self, req):
return self.task
def reach_peg(self, use_wp1=False, use_wp2=False, peg_hover=0.10): #hover_dis = 0.15, 0.2
self.robot.gripper.open()
self._tf_listener.waitForTransform(self._robot_frame, self._peg_frame, rospy.Time(0), rospy.Duration(2))
trans, _ = self._tf_listener.lookupTransform(self._robot_frame, self._peg_frame, rospy.Time(0))
peg_position = Point(x=trans[0], y=trans[1], z=trans[2])
if use_wp1:
wp1 = Pose()
wp1.position = Point(0.657, 0.372, 0.15)
wp1.orientation = Quaternion(0, 1, 0, 0)
self.robot.approach(wp1)
if use_wp2:
wp2 = Pose()
wp2.position = Point(0.672, 0.170, 0.15)
wp2.orientation = Quaternion(0, 1, 0, 0)
self.robot.approach(wp2)
print("Reaching Peg")
self._cur_gripper_pose = Pose()
self._cur_gripper_pose.position = peg_position
self._cur_gripper_pose.position.z += peg_hover
self._cur_gripper_pose.position.y -= 0.01
self._cur_gripper_pose.position.x += 0.02
self._cur_gripper_pose.orientation.x = 0
self._cur_gripper_pose.orientation.y = 1
self._cur_gripper_pose.orientation.z = 0
self._cur_gripper_pose.orientation.w = 0
self.robot.approach(self._cur_gripper_pose)
def pick_up_peg(self, peg_hover=0.10): #hover_dis = 0.2, 0.3
self.task += 1
down = peg_hover + 0.02
self._cur_gripper_pose.position.z -= down
print("Picking up block")
self.robot.approach(self._cur_gripper_pose)
self.robot.gripper.close()
def reach_box_lid(self, use_wp1=False, use_wp2=False, use_wp3=False, use_wp4=False, peg_hover=0.1, lid_hover=0.22): #peg_hover = 0.2, 0.3 lid_hover=0.17, 0.27
self.task += 1
self._cur_gripper_pose.position.z += peg_hover
print("Going up")
self.robot.approach(self._cur_gripper_pose)
if use_wp1:
wp1 = Pose()
wp1.position = Point(0.91, 0.207, 0.2)
wp1.orientation = Quaternion(0, 1, 0, 0)
self.robot.approach(wp1)
if use_wp2:
wp2 = Pose()
wp2.position = Point(0.677, 0.244, 0.2)
wp2.orientation = Quaternion(0, 1, 0, 0)
self.robot.approach(wp2)
if use_wp3:
wp3 = Pose()
wp3.position = Point(0.622, 0.291, 0.2)
wp3.orientation = Quaternion(0, 1, 0, 0)
self.robot.approach(wp3)
if use_wp4:
wp4 = Pose()
wp4.position = Point(0.73, 0.0, 0.3)
wp4.orientation = Quaternion(0, 1, 0, 0)
self.robot.approach(wp4)
self._tf_listener.waitForTransform(self._robot_frame, self._hole_frame, rospy.Time(0), rospy.Duration(2))
trans, _ = self._tf_listener.lookupTransform(self._robot_frame, self._hole_frame, rospy.Time(0))
hole_position = Point(x=trans[0], y=trans[1], z=trans[2])
self._cur_gripper_pose = Pose()
self._cur_gripper_pose.position = hole_position
self._cur_gripper_pose.position.z += lid_hover
self._cur_gripper_pose.position.x -= 0.01
#self._cur_gripper_pose.position.y -= 0.01
self._cur_gripper_pose.orientation.x = 0
self._cur_gripper_pose.orientation.y = 1
self._cur_gripper_pose.orientation.z = 0
self._cur_gripper_pose.orientation.w = 0
print("Going above hole")
self.robot.approach(self._cur_gripper_pose)
def insert_peg_in_lid(self, lid_hover=0.22): #lid_hover = 0.17, 0.27
self.task += 1
print("Going down")
down = lid_hover - 0.06
self._cur_gripper_pose.position.z -= down
self.robot.approach(self._cur_gripper_pose)
self._prev_gripper_pose = deepcopy(self._cur_gripper_pose)
def open_lid(self):
self.task += 1
self._tf_listener.waitForTransform(self._robot_frame, "box", rospy.Time(0), rospy.Duration(2))
trans, _ = self._tf_listener.lookupTransform(self._robot_frame, "box", rospy.Time(0))
self._cur_gripper_pose.position.x = trans[0] - 0.01
self._cur_gripper_pose.position.y = trans[1] + 0.06
self.robot.approach(self._cur_gripper_pose)
def close_lid(self):
self.task += 1
self._prev_gripper_pose.position.x -= 0.01
self.robot.approach(self._prev_gripper_pose)
self._cur_gripper_pose = deepcopy(self._prev_gripper_pose)
def remove_peg(self, lid_hover=0.22): #lid_hover = 0.17, 0.27
self.task += 1
up = lid_hover - 0.06
self._cur_gripper_pose.position.z += up
self.robot.approach(self._cur_gripper_pose)
def get_obs(self, peg_use_wp1, peg_use_wp2, box_use_wp1, box_use_wp2, box_use_wp3, box_use_wp4, peg_hover, lid_hover):
self.reach_peg(use_wp1=peg_use_wp1, use_wp2=peg_use_wp2, peg_hover=peg_hover) #0
self.pick_up_peg(peg_hover=peg_hover) #1
self.reach_box_lid(use_wp1=box_use_wp1, use_wp2=box_use_wp2, use_wp3=box_use_wp3, use_wp4=box_use_wp4, peg_hover=peg_hover, lid_hover=lid_hover) #2
self.insert_peg_in_lid(lid_hover=lid_hover) #3
self.open_lid() #4
self.close_lid() #5
self.remove_peg() #6
if __name__ == '__main__':
INITIAL_ROBOT_JOINT_POS = {
'right_j0': -0.140923828125,
'right_j1': -1.2789248046875,
'right_j2': -3.043166015625,
'right_j3': -2.139623046875,
'right_j4': -0.047607421875,
'right_j5': -0.7052822265625,
'right_j6': -1.4102060546875,
}
#CONFIG
peg_use_wp1 = False
peg_use_wp2 = False
peg_hover = 0.2 #0.10, 0.15, 0.2
box_use_wp1 = False
box_use_wp2 = False
box_use_wp3 = False
box_use_wp4 = True
lid_hover = 0.17 #0.17, 0.19, 0.22
rospy.init_node('demo_toyworld', anonymous=True)
robot = Robot()
robot.move_to_start(start_angles=INITIAL_ROBOT_JOINT_POS)
usr_input = raw_input("Type 'done' when you are ready.").lower()
if usr_input == 'done':
runner = Runner(robot)
print("Starting get_task service.")
rospy.Service('get_task', get_task, runner.handle_get_task)
runner.get_obs(peg_use_wp1, peg_use_wp2, box_use_wp1, box_use_wp2, box_use_wp3, box_use_wp4, peg_hover, lid_hover)
elif usr_input == 'exit':
sys.exit() | docker/sawyer-robot/internal/temp/data_collection/run_toy_task.py |
import copy
import sys
from geometry_msgs.msg import Pose, Point, Quaternion
import moveit_commander
import intera_interface
import rospy
from tf import TransformListener
from copy import deepcopy
from get_task_srv.srv import get_task
class Robot(object):
def __init__(self, limb='right', tip_name="right_gripper_tip"):
self._limb_name = limb
self._limb = intera_interface.Limb(limb)
self.gripper = intera_interface.Gripper(limb)
self._tip_name = tip_name
def move_to_start(self, start_angles=None):
print("Moving the {0} arm to start pose...".format(self._limb_name))
if not start_angles:
start_angles = dict(zip(self._joint_names, [0]*7))
self._guarded_move_to_joint_position(start_angles)
def approach(self, pose):
joint_angles = self._limb.ik_request(pose, self._tip_name)
self._guarded_move_to_joint_position(joint_angles)
def _guarded_move_to_joint_position(self, joint_angles, timeout=60.0):
self._limb.set_joint_position_speed(0.1)
if joint_angles:
self._limb.move_to_joint_positions(joint_angles, timeout=timeout)
else:
rospy.logerr("No Joint Angles provided for move_to_joint_positions. Staying put.")
class Runner:
def __init__(self, robot):
self.task = 0
self._robot_frame = "base_d"
self._peg_frame = "peg"
self._hole_frame = "hole"
self._cur_gripper_pose = Pose()
self._prev_gripper_pose = Pose()
self._tf_listener = TransformListener()
self.robot = robot
def handle_get_task(self, req):
return self.task
def reach_peg(self, use_wp1=False, use_wp2=False, peg_hover=0.10): #hover_dis = 0.15, 0.2
self.robot.gripper.open()
self._tf_listener.waitForTransform(self._robot_frame, self._peg_frame, rospy.Time(0), rospy.Duration(2))
trans, _ = self._tf_listener.lookupTransform(self._robot_frame, self._peg_frame, rospy.Time(0))
peg_position = Point(x=trans[0], y=trans[1], z=trans[2])
if use_wp1:
wp1 = Pose()
wp1.position = Point(0.657, 0.372, 0.15)
wp1.orientation = Quaternion(0, 1, 0, 0)
self.robot.approach(wp1)
if use_wp2:
wp2 = Pose()
wp2.position = Point(0.672, 0.170, 0.15)
wp2.orientation = Quaternion(0, 1, 0, 0)
self.robot.approach(wp2)
print("Reaching Peg")
self._cur_gripper_pose = Pose()
self._cur_gripper_pose.position = peg_position
self._cur_gripper_pose.position.z += peg_hover
self._cur_gripper_pose.position.y -= 0.01
self._cur_gripper_pose.position.x += 0.02
self._cur_gripper_pose.orientation.x = 0
self._cur_gripper_pose.orientation.y = 1
self._cur_gripper_pose.orientation.z = 0
self._cur_gripper_pose.orientation.w = 0
self.robot.approach(self._cur_gripper_pose)
def pick_up_peg(self, peg_hover=0.10): #hover_dis = 0.2, 0.3
self.task += 1
down = peg_hover + 0.02
self._cur_gripper_pose.position.z -= down
print("Picking up block")
self.robot.approach(self._cur_gripper_pose)
self.robot.gripper.close()
def reach_box_lid(self, use_wp1=False, use_wp2=False, use_wp3=False, use_wp4=False, peg_hover=0.1, lid_hover=0.22): #peg_hover = 0.2, 0.3 lid_hover=0.17, 0.27
self.task += 1
self._cur_gripper_pose.position.z += peg_hover
print("Going up")
self.robot.approach(self._cur_gripper_pose)
if use_wp1:
wp1 = Pose()
wp1.position = Point(0.91, 0.207, 0.2)
wp1.orientation = Quaternion(0, 1, 0, 0)
self.robot.approach(wp1)
if use_wp2:
wp2 = Pose()
wp2.position = Point(0.677, 0.244, 0.2)
wp2.orientation = Quaternion(0, 1, 0, 0)
self.robot.approach(wp2)
if use_wp3:
wp3 = Pose()
wp3.position = Point(0.622, 0.291, 0.2)
wp3.orientation = Quaternion(0, 1, 0, 0)
self.robot.approach(wp3)
if use_wp4:
wp4 = Pose()
wp4.position = Point(0.73, 0.0, 0.3)
wp4.orientation = Quaternion(0, 1, 0, 0)
self.robot.approach(wp4)
self._tf_listener.waitForTransform(self._robot_frame, self._hole_frame, rospy.Time(0), rospy.Duration(2))
trans, _ = self._tf_listener.lookupTransform(self._robot_frame, self._hole_frame, rospy.Time(0))
hole_position = Point(x=trans[0], y=trans[1], z=trans[2])
self._cur_gripper_pose = Pose()
self._cur_gripper_pose.position = hole_position
self._cur_gripper_pose.position.z += lid_hover
self._cur_gripper_pose.position.x -= 0.01
#self._cur_gripper_pose.position.y -= 0.01
self._cur_gripper_pose.orientation.x = 0
self._cur_gripper_pose.orientation.y = 1
self._cur_gripper_pose.orientation.z = 0
self._cur_gripper_pose.orientation.w = 0
print("Going above hole")
self.robot.approach(self._cur_gripper_pose)
def insert_peg_in_lid(self, lid_hover=0.22): #lid_hover = 0.17, 0.27
self.task += 1
print("Going down")
down = lid_hover - 0.06
self._cur_gripper_pose.position.z -= down
self.robot.approach(self._cur_gripper_pose)
self._prev_gripper_pose = deepcopy(self._cur_gripper_pose)
def open_lid(self):
self.task += 1
self._tf_listener.waitForTransform(self._robot_frame, "box", rospy.Time(0), rospy.Duration(2))
trans, _ = self._tf_listener.lookupTransform(self._robot_frame, "box", rospy.Time(0))
self._cur_gripper_pose.position.x = trans[0] - 0.01
self._cur_gripper_pose.position.y = trans[1] + 0.06
self.robot.approach(self._cur_gripper_pose)
def close_lid(self):
self.task += 1
self._prev_gripper_pose.position.x -= 0.01
self.robot.approach(self._prev_gripper_pose)
self._cur_gripper_pose = deepcopy(self._prev_gripper_pose)
def remove_peg(self, lid_hover=0.22): #lid_hover = 0.17, 0.27
self.task += 1
up = lid_hover - 0.06
self._cur_gripper_pose.position.z += up
self.robot.approach(self._cur_gripper_pose)
def get_obs(self, peg_use_wp1, peg_use_wp2, box_use_wp1, box_use_wp2, box_use_wp3, box_use_wp4, peg_hover, lid_hover):
self.reach_peg(use_wp1=peg_use_wp1, use_wp2=peg_use_wp2, peg_hover=peg_hover) #0
self.pick_up_peg(peg_hover=peg_hover) #1
self.reach_box_lid(use_wp1=box_use_wp1, use_wp2=box_use_wp2, use_wp3=box_use_wp3, use_wp4=box_use_wp4, peg_hover=peg_hover, lid_hover=lid_hover) #2
self.insert_peg_in_lid(lid_hover=lid_hover) #3
self.open_lid() #4
self.close_lid() #5
self.remove_peg() #6
if __name__ == '__main__':
INITIAL_ROBOT_JOINT_POS = {
'right_j0': -0.140923828125,
'right_j1': -1.2789248046875,
'right_j2': -3.043166015625,
'right_j3': -2.139623046875,
'right_j4': -0.047607421875,
'right_j5': -0.7052822265625,
'right_j6': -1.4102060546875,
}
#CONFIG
peg_use_wp1 = False
peg_use_wp2 = False
peg_hover = 0.2 #0.10, 0.15, 0.2
box_use_wp1 = False
box_use_wp2 = False
box_use_wp3 = False
box_use_wp4 = True
lid_hover = 0.17 #0.17, 0.19, 0.22
rospy.init_node('demo_toyworld', anonymous=True)
robot = Robot()
robot.move_to_start(start_angles=INITIAL_ROBOT_JOINT_POS)
usr_input = raw_input("Type 'done' when you are ready.").lower()
if usr_input == 'done':
runner = Runner(robot)
print("Starting get_task service.")
rospy.Service('get_task', get_task, runner.handle_get_task)
runner.get_obs(peg_use_wp1, peg_use_wp2, box_use_wp1, box_use_wp2, box_use_wp3, box_use_wp4, peg_hover, lid_hover)
elif usr_input == 'exit':
sys.exit() | 0.38827 | 0.211824 |
"""Module to talk to EtherpadLite API."""
import json
import urllib
import urllib2
class APIClient:
"""Client to talk to EtherpadLite API."""
API_VERSION = "1.2.8"
CODE_OK = 0
CODE_INVALID_PARAMETERS = 1
CODE_INTERNAL_ERROR = 2
CODE_INVALID_FUNCTION = 3
CODE_INVALID_API_KEY = 4
TIMEOUT = 20
apiKey = ""
baseUrl = "http://localhost:9001/api"
def __init__(self, apiKey=None, baseUrl=None, post=False):
if apiKey:
self.apiKey = apiKey
if baseUrl:
self.baseUrl = baseUrl
self.post = post
def call(self, function, arguments=None):
"""Create a dictionary of all parameters"""
url = '%s/%s/%s' % (self.baseUrl, self.API_VERSION, function)
params = arguments or {}
params.update({'apikey': self.apiKey})
data = urllib.urlencode(params, True)
try:
opener = urllib2.build_opener()
request = url + "?" + data
if self.post:
request = urllib2.Request(url=url, data=data)
response = opener.open(request, timeout=self.TIMEOUT)
result = response.read()
response.close()
except urllib2.HTTPError:
raise
result = json.loads(result)
if result is None:
raise ValueError("JSON response could not be decoded")
return self.handleResult(result)
def handleResult(self, result):
"""Handle API call result"""
if 'code' not in result:
raise Exception("API response has no code")
if 'message' not in result:
raise Exception("API response has no message")
if 'data' not in result:
result['data'] = None
if result['code'] == self.CODE_OK:
return result['data']
elif result['code'] == self.CODE_INVALID_PARAMETERS or result['code'] == self.CODE_INVALID_API_KEY:
raise ValueError(result['message'])
elif result['code'] == self.CODE_INTERNAL_ERROR:
raise Exception(result['message'])
elif result['code'] == self.CODE_INVALID_FUNCTION:
raise Exception(result['message'])
else:
raise Exception("An unexpected error occurred whilst handling the response")
# GLOBALS
def listAllPads(self):
"""returns all pads (1.2.7)"""
return self.call("listAllPads")
def listAllGroups(self):
"""returns all groups"""
return self.call("listAllGroups")
def checkToken(self):
""""""
return self.call("checkToken")
# GROUPS
# Pads can belong to a group. There will always be public pads that do not belong to a group (or we give this group the id 0)
def createGroup(self):
"""creates a new group"""
return self.call("createGroup")
def createGroupIfNotExistsFor(self, groupMapper):
"""this functions helps you to map your application group ids to etherpad lite group ids"""
return self.call("createGroupIfNotExistsFor", {
"groupMapper": groupMapper
})
def deleteGroup(self, groupID):
"""deletes a group"""
return self.call("deleteGroup", {
"groupID": groupID
})
def listPads(self, groupID):
"""returns all pads of this group"""
return self.call("listPads", {
"groupID": groupID
})
def createGroupPad(self, groupID, padName, text=''):
"""creates a new pad in this group"""
params = {
"groupID": groupID,
"padName": padName,
}
if text:
params['text'] = text
return self.call("createGroupPad", params)
# AUTHORS
# Theses authors are bind to the attributes the users choose (color and name).
def createAuthor(self, name=''):
"""creates a new author"""
params = {}
if name:
params['name'] = name
return self.call("createAuthor", params)
def createAuthorIfNotExistsFor(self, authorMapper, name=''):
"""this functions helps you to map your application author ids to etherpad lite author ids"""
params = {
'authorMapper': authorMapper
}
if name:
params['name'] = name
return self.call("createAuthorIfNotExistsFor", params)
def getAuthorName(self, authorID):
return self.call("getAuthorName", {
"authorID": authorID
})
def listPads(self, groupID):
"""returns all pads of this author (1.2.7)"""
return self.call("listPadsOfAuthor", {
"authorID": authorID
})
# SESSIONS
# Sessions can be created between a group and a author. This allows
# an author to access more than one group. The sessionID will be set as
# a cookie to the client and is valid until a certain date.
def createSession(self, groupID, authorID, validUntil):
"""creates a new session"""
return self.call("createSession", {
"groupID": groupID,
"authorID": authorID,
"validUntil": validUntil
})
def deleteSession(self, sessionID):
"""deletes a session"""
return self.call("deleteSession", {
"sessionID": sessionID
})
def getSessionInfo(self, sessionID):
"""returns informations about a session"""
return self.call("getSessionInfo", {
"sessionID": sessionID
})
def listSessionsOfGroup(self, groupID):
"""returns all sessions of a group"""
return self.call("listSessionsOfGroup", {
"groupID": groupID
})
def listSessionsOfAuthor(self, authorID):
"""returns all sessions of an author"""
return self.call("listSessionsOfAuthor", {
"authorID": authorID
})
# PAD CONTENT
# Pad content can be updated and retrieved through the API
def getAttributePool(self, padID):
"""returns the text of a pad"""
return self.call("getAttributePool", {"padID": padID})
def getText(self, padID, rev=None):
"""returns the text of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getText", params)
# introduced with pull request merge
def getHtml(self, padID, rev=None):
"""returns the html of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getHTML", params)
def setText(self, padID, text):
"""sets the text of a pad"""
return self.call("setText", {
"padID": padID,
"text": text
})
def setHtml(self, padID, html):
"""sets the text of a pad from html"""
return self.call("setHTML", {
"padID": padID,
"html": html
})
# PAD
# Group pads are normal pads, but with the name schema
# GROUPID$PADNAME. A security manager controls access of them and its
# forbidden for normal pads to include a in the name.
def createPad(self, padID, text=''):
"""creates a new pad"""
params = {
"padID": padID,
}
if text:
params['text'] = text
return self.call("createPad", params)
def createDiffHTML(self, padID, startRev, endRev):
"""creates a diff between two pads"""
params = {
"padID": padID,
"startRev": startRev,
"endRev": endRev
}
return self.call("createDiffHTML", params)
def getLastEdited(self, padID):
return self.call("getLastEdited", {
"padID": padID
})
def getRevisionChangeset(self, padID, rev=None):
"""return the changeset at given rev of this pad"""
params = {"padID": padID}
if rev:
params["rev"] = rev
return self.call("getRevisionChangeset", params)
def getRevisionsCount(self, padID):
"""returns the number of revisions of this pad"""
return self.call("getRevisionsCount", {
"padID": padID
})
def listAuthorsOfPad(self, padID):
"""returns the list of the authors of this pad"""
return self.call("listAuthorsOfPad", {
"padID": padID
})
def padUsers(self, padID):
"""returns the list of users of this pad"""
return self.call("padUsers", {
"padID": padID
})
def padUsersCount(self, padID):
"""returns the number of users of this pad"""
return self.call("padUsersCount", {
"padID": padID
})
def deletePad(self, padID):
"""deletes a pad"""
return self.call("deletePad", {
"padID": padID
})
def getReadOnlyID(self, padID):
"""returns the read only link of a pad"""
return self.call("getReadOnlyID", {
"padID": padID
})
def setPublicStatus(self, padID, publicStatus):
"""sets a boolean for the public status of a pad"""
return self.call("setPublicStatus", {
"padID": padID,
"publicStatus": publicStatus
})
def getPublicStatus(self, padID):
"""return true of false"""
return self.call("getPublicStatus", {
"padID": padID
})
def setPassword(self, padID, password):
"""returns ok or a error message"""
return self.call("setPassword", {
"padID": padID,
"password": password
})
def isPasswordProtected(self, padID):
"""returns true or false"""
return self.call("isPasswordProtected", {
"padID": padID
})
# MESSAGE
def sendClientsMessage(self, padID, msg):
return self.call("sendClientsMessage", {
"padID": padID,
"msg": msg
})
def getChatHistory(self, padID, start=None, end=None):
params = {
"padID": padID
}
if not start is None and not end is None:
params["start"] = start
params["end"] = end
return self.call("getChatHistory", params)
def getChatHead(self, padID):
return self.call("getChatHead", {
"padID": padID
}) | src/py_etherpad/APIClient.py | """Module to talk to EtherpadLite API."""
import json
import urllib
import urllib2
class APIClient:
"""Client to talk to EtherpadLite API."""
API_VERSION = "1.2.8"
CODE_OK = 0
CODE_INVALID_PARAMETERS = 1
CODE_INTERNAL_ERROR = 2
CODE_INVALID_FUNCTION = 3
CODE_INVALID_API_KEY = 4
TIMEOUT = 20
apiKey = ""
baseUrl = "http://localhost:9001/api"
def __init__(self, apiKey=None, baseUrl=None, post=False):
if apiKey:
self.apiKey = apiKey
if baseUrl:
self.baseUrl = baseUrl
self.post = post
def call(self, function, arguments=None):
"""Create a dictionary of all parameters"""
url = '%s/%s/%s' % (self.baseUrl, self.API_VERSION, function)
params = arguments or {}
params.update({'apikey': self.apiKey})
data = urllib.urlencode(params, True)
try:
opener = urllib2.build_opener()
request = url + "?" + data
if self.post:
request = urllib2.Request(url=url, data=data)
response = opener.open(request, timeout=self.TIMEOUT)
result = response.read()
response.close()
except urllib2.HTTPError:
raise
result = json.loads(result)
if result is None:
raise ValueError("JSON response could not be decoded")
return self.handleResult(result)
def handleResult(self, result):
"""Handle API call result"""
if 'code' not in result:
raise Exception("API response has no code")
if 'message' not in result:
raise Exception("API response has no message")
if 'data' not in result:
result['data'] = None
if result['code'] == self.CODE_OK:
return result['data']
elif result['code'] == self.CODE_INVALID_PARAMETERS or result['code'] == self.CODE_INVALID_API_KEY:
raise ValueError(result['message'])
elif result['code'] == self.CODE_INTERNAL_ERROR:
raise Exception(result['message'])
elif result['code'] == self.CODE_INVALID_FUNCTION:
raise Exception(result['message'])
else:
raise Exception("An unexpected error occurred whilst handling the response")
# GLOBALS
def listAllPads(self):
"""returns all pads (1.2.7)"""
return self.call("listAllPads")
def listAllGroups(self):
"""returns all groups"""
return self.call("listAllGroups")
def checkToken(self):
""""""
return self.call("checkToken")
# GROUPS
# Pads can belong to a group. There will always be public pads that do not belong to a group (or we give this group the id 0)
def createGroup(self):
"""creates a new group"""
return self.call("createGroup")
def createGroupIfNotExistsFor(self, groupMapper):
"""this functions helps you to map your application group ids to etherpad lite group ids"""
return self.call("createGroupIfNotExistsFor", {
"groupMapper": groupMapper
})
def deleteGroup(self, groupID):
"""deletes a group"""
return self.call("deleteGroup", {
"groupID": groupID
})
def listPads(self, groupID):
"""returns all pads of this group"""
return self.call("listPads", {
"groupID": groupID
})
def createGroupPad(self, groupID, padName, text=''):
"""creates a new pad in this group"""
params = {
"groupID": groupID,
"padName": padName,
}
if text:
params['text'] = text
return self.call("createGroupPad", params)
# AUTHORS
# Theses authors are bind to the attributes the users choose (color and name).
def createAuthor(self, name=''):
"""creates a new author"""
params = {}
if name:
params['name'] = name
return self.call("createAuthor", params)
def createAuthorIfNotExistsFor(self, authorMapper, name=''):
"""this functions helps you to map your application author ids to etherpad lite author ids"""
params = {
'authorMapper': authorMapper
}
if name:
params['name'] = name
return self.call("createAuthorIfNotExistsFor", params)
def getAuthorName(self, authorID):
return self.call("getAuthorName", {
"authorID": authorID
})
def listPads(self, groupID):
"""returns all pads of this author (1.2.7)"""
return self.call("listPadsOfAuthor", {
"authorID": authorID
})
# SESSIONS
# Sessions can be created between a group and a author. This allows
# an author to access more than one group. The sessionID will be set as
# a cookie to the client and is valid until a certain date.
def createSession(self, groupID, authorID, validUntil):
"""creates a new session"""
return self.call("createSession", {
"groupID": groupID,
"authorID": authorID,
"validUntil": validUntil
})
def deleteSession(self, sessionID):
"""deletes a session"""
return self.call("deleteSession", {
"sessionID": sessionID
})
def getSessionInfo(self, sessionID):
"""returns informations about a session"""
return self.call("getSessionInfo", {
"sessionID": sessionID
})
def listSessionsOfGroup(self, groupID):
"""returns all sessions of a group"""
return self.call("listSessionsOfGroup", {
"groupID": groupID
})
def listSessionsOfAuthor(self, authorID):
"""returns all sessions of an author"""
return self.call("listSessionsOfAuthor", {
"authorID": authorID
})
# PAD CONTENT
# Pad content can be updated and retrieved through the API
def getAttributePool(self, padID):
"""returns the text of a pad"""
return self.call("getAttributePool", {"padID": padID})
def getText(self, padID, rev=None):
"""returns the text of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getText", params)
# introduced with pull request merge
def getHtml(self, padID, rev=None):
"""returns the html of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getHTML", params)
def setText(self, padID, text):
"""sets the text of a pad"""
return self.call("setText", {
"padID": padID,
"text": text
})
def setHtml(self, padID, html):
"""sets the text of a pad from html"""
return self.call("setHTML", {
"padID": padID,
"html": html
})
# PAD
# Group pads are normal pads, but with the name schema
# GROUPID$PADNAME. A security manager controls access of them and its
# forbidden for normal pads to include a in the name.
def createPad(self, padID, text=''):
"""creates a new pad"""
params = {
"padID": padID,
}
if text:
params['text'] = text
return self.call("createPad", params)
def createDiffHTML(self, padID, startRev, endRev):
"""creates a diff between two pads"""
params = {
"padID": padID,
"startRev": startRev,
"endRev": endRev
}
return self.call("createDiffHTML", params)
def getLastEdited(self, padID):
return self.call("getLastEdited", {
"padID": padID
})
def getRevisionChangeset(self, padID, rev=None):
"""return the changeset at given rev of this pad"""
params = {"padID": padID}
if rev:
params["rev"] = rev
return self.call("getRevisionChangeset", params)
def getRevisionsCount(self, padID):
"""returns the number of revisions of this pad"""
return self.call("getRevisionsCount", {
"padID": padID
})
def listAuthorsOfPad(self, padID):
"""returns the list of the authors of this pad"""
return self.call("listAuthorsOfPad", {
"padID": padID
})
def padUsers(self, padID):
"""returns the list of users of this pad"""
return self.call("padUsers", {
"padID": padID
})
def padUsersCount(self, padID):
"""returns the number of users of this pad"""
return self.call("padUsersCount", {
"padID": padID
})
def deletePad(self, padID):
"""deletes a pad"""
return self.call("deletePad", {
"padID": padID
})
def getReadOnlyID(self, padID):
"""returns the read only link of a pad"""
return self.call("getReadOnlyID", {
"padID": padID
})
def setPublicStatus(self, padID, publicStatus):
"""sets a boolean for the public status of a pad"""
return self.call("setPublicStatus", {
"padID": padID,
"publicStatus": publicStatus
})
def getPublicStatus(self, padID):
"""return true of false"""
return self.call("getPublicStatus", {
"padID": padID
})
def setPassword(self, padID, password):
"""returns ok or a error message"""
return self.call("setPassword", {
"padID": padID,
"password": password
})
def isPasswordProtected(self, padID):
"""returns true or false"""
return self.call("isPasswordProtected", {
"padID": padID
})
# MESSAGE
def sendClientsMessage(self, padID, msg):
return self.call("sendClientsMessage", {
"padID": padID,
"msg": msg
})
def getChatHistory(self, padID, start=None, end=None):
params = {
"padID": padID
}
if not start is None and not end is None:
params["start"] = start
params["end"] = end
return self.call("getChatHistory", params)
def getChatHead(self, padID):
return self.call("getChatHead", {
"padID": padID
}) | 0.630912 | 0.249853 |
from typing import Awaitable, Any, Callable, Dict, List, Optional, Union, TYPE_CHECKING
if TYPE_CHECKING:
from cripy import ConnectionType, SessionType
__all__ = ["Fetch"]
class Fetch:
"""
A domain for letting clients substitute browser's network layer with client code.
Domain Dependencies:
* Network
* IO
* Page
Status: Experimental
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch`
"""
__slots__ = ["client"]
def __init__(self, client: Union["ConnectionType", "SessionType"]) -> None:
"""Initialize a new instance of Fetch
:param client: The client instance to be used to communicate with the remote browser instance
"""
self.client: Union["ConnectionType", "SessionType"] = client
def disable(self) -> Awaitable[Dict]:
"""
Disables the fetch domain.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-disable`
:return: The results of the command
"""
return self.client.send("Fetch.disable", {})
def enable(
self,
patterns: Optional[List[Dict[str, Any]]] = None,
handleAuthRequests: Optional[bool] = None,
) -> Awaitable[Dict]:
"""
Enables issuing of requestPaused events. A request will be paused until client
calls one of failRequest, fulfillRequest or continueRequest/continueWithAuth.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-enable`
:param patterns: If specified, only requests matching any of these patterns will produce
fetchRequested event and will be paused until clients response. If not set,
all requests will be affected.
:param handleAuthRequests: If true, authRequired events will be issued and requests will be paused
expecting a call to continueWithAuth.
:return: The results of the command
"""
msg = {}
if patterns is not None:
msg["patterns"] = patterns
if handleAuthRequests is not None:
msg["handleAuthRequests"] = handleAuthRequests
return self.client.send("Fetch.enable", msg)
def failRequest(self, requestId: str, errorReason: str) -> Awaitable[Dict]:
"""
Causes the request to fail with specified reason.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-failRequest`
:param requestId: An id the client received in requestPaused event.
:param errorReason: Causes the request to fail with the given reason.
:return: The results of the command
"""
return self.client.send(
"Fetch.failRequest", {"requestId": requestId, "errorReason": errorReason}
)
def fulfillRequest(
self,
requestId: str,
responseCode: int,
responseHeaders: List[Dict[str, Any]],
body: Optional[str] = None,
responsePhrase: Optional[str] = None,
) -> Awaitable[Dict]:
"""
Provides response to the request.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-fulfillRequest`
:param requestId: An id the client received in requestPaused event.
:param responseCode: An HTTP response code.
:param responseHeaders: Response headers.
:param body: A response body.
:param responsePhrase: A textual representation of responseCode.
If absent, a standard phrase mathcing responseCode is used.
:return: The results of the command
"""
msg = {
"requestId": requestId,
"responseCode": responseCode,
"responseHeaders": responseHeaders,
}
if body is not None:
msg["body"] = body
if responsePhrase is not None:
msg["responsePhrase"] = responsePhrase
return self.client.send("Fetch.fulfillRequest", msg)
def continueRequest(
self,
requestId: str,
url: Optional[str] = None,
method: Optional[str] = None,
postData: Optional[str] = None,
headers: Optional[List[Dict[str, Any]]] = None,
) -> Awaitable[Dict]:
"""
Continues the request, optionally modifying some of its parameters.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-continueRequest`
:param requestId: An id the client received in requestPaused event.
:param url: If set, the request url will be modified in a way that's not observable by page.
:param method: If set, the request method is overridden.
:param postData: If set, overrides the post data in the request.
:param headers: If set, overrides the request headrts.
:return: The results of the command
"""
msg = {"requestId": requestId}
if url is not None:
msg["url"] = url
if method is not None:
msg["method"] = method
if postData is not None:
msg["postData"] = postData
if headers is not None:
msg["headers"] = headers
return self.client.send("Fetch.continueRequest", msg)
def continueWithAuth(
self, requestId: str, authChallengeResponse: Dict[str, Any]
) -> Awaitable[Dict]:
"""
Continues a request supplying authChallengeResponse following authRequired event.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-continueWithAuth`
:param requestId: An id the client received in authRequired event.
:param authChallengeResponse: Response to with an authChallenge.
:return: The results of the command
"""
return self.client.send(
"Fetch.continueWithAuth",
{"requestId": requestId, "authChallengeResponse": authChallengeResponse},
)
def getResponseBody(self, requestId: str) -> Awaitable[Dict]:
"""
Causes the body of the response to be received from the server and
returned as a single string. May only be issued for a request that
is paused in the Response stage and is mutually exclusive with
takeResponseBodyForInterceptionAsStream. Calling other methods that
affect the request or disabling fetch domain before body is received
results in an undefined behavior.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-getResponseBody`
:param requestId: Identifier for the intercepted request to get body for.
:return: The results of the command
"""
return self.client.send("Fetch.getResponseBody", {"requestId": requestId})
def takeResponseBodyAsStream(self, requestId: str) -> Awaitable[Dict]:
"""
Returns a handle to the stream representing the response body.
The request must be paused in the HeadersReceived stage.
Note that after this command the request can't be continued
as is -- client either needs to cancel it or to provide the
response body.
The stream only supports sequential read, IO.read will fail if the position
is specified.
This method is mutually exclusive with getResponseBody.
Calling other methods that affect the request or disabling fetch
domain before body is received results in an undefined behavior.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-takeResponseBodyAsStream`
:param requestId: The requestId
:return: The results of the command
"""
return self.client.send(
"Fetch.takeResponseBodyAsStream", {"requestId": requestId}
)
def requestPaused(
self, listener: Optional[Callable[[Dict[str, Any]], Any]] = None
) -> Any:
"""
Issued when the domain is enabled and the request URL matches the
specified filter. The request is paused until the client responds
with one of continueRequest, failRequest or fulfillRequest.
The stage of the request can be determined by presence of responseErrorReason
and responseStatusCode -- the request is at the response stage if either
of these fields is present and in the request stage otherwise.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#event-requestPaused`
:param listener: Optional listener function
:return: If a listener was supplied the return value is a callable that
will remove the supplied listener otherwise a future that resolves
with the value of the event
"""
event_name = "Fetch.requestPaused"
if listener is None:
future = self.client.loop.create_future()
def _listener(event: Optional[Dict] = None) -> None:
future.set_result(event)
self.client.once(event_name, _listener)
return future
self.client.on(event_name, listener)
return lambda: self.client.remove_listener(event_name, listener)
def authRequired(
self, listener: Optional[Callable[[Dict[str, Any]], Any]] = None
) -> Any:
"""
Issued when the domain is enabled with handleAuthRequests set to true.
The request is paused until client responds with continueWithAuth.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#event-authRequired`
:param listener: Optional listener function
:return: If a listener was supplied the return value is a callable that
will remove the supplied listener otherwise a future that resolves
with the value of the event
"""
event_name = "Fetch.authRequired"
if listener is None:
future = self.client.loop.create_future()
def _listener(event: Optional[Dict] = None) -> None:
future.set_result(event)
self.client.once(event_name, _listener)
return future
self.client.on(event_name, listener)
return lambda: self.client.remove_listener(event_name, listener) | cripy/protocol/fetch.py | from typing import Awaitable, Any, Callable, Dict, List, Optional, Union, TYPE_CHECKING
if TYPE_CHECKING:
from cripy import ConnectionType, SessionType
__all__ = ["Fetch"]
class Fetch:
"""
A domain for letting clients substitute browser's network layer with client code.
Domain Dependencies:
* Network
* IO
* Page
Status: Experimental
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch`
"""
__slots__ = ["client"]
def __init__(self, client: Union["ConnectionType", "SessionType"]) -> None:
"""Initialize a new instance of Fetch
:param client: The client instance to be used to communicate with the remote browser instance
"""
self.client: Union["ConnectionType", "SessionType"] = client
def disable(self) -> Awaitable[Dict]:
"""
Disables the fetch domain.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-disable`
:return: The results of the command
"""
return self.client.send("Fetch.disable", {})
def enable(
self,
patterns: Optional[List[Dict[str, Any]]] = None,
handleAuthRequests: Optional[bool] = None,
) -> Awaitable[Dict]:
"""
Enables issuing of requestPaused events. A request will be paused until client
calls one of failRequest, fulfillRequest or continueRequest/continueWithAuth.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-enable`
:param patterns: If specified, only requests matching any of these patterns will produce
fetchRequested event and will be paused until clients response. If not set,
all requests will be affected.
:param handleAuthRequests: If true, authRequired events will be issued and requests will be paused
expecting a call to continueWithAuth.
:return: The results of the command
"""
msg = {}
if patterns is not None:
msg["patterns"] = patterns
if handleAuthRequests is not None:
msg["handleAuthRequests"] = handleAuthRequests
return self.client.send("Fetch.enable", msg)
def failRequest(self, requestId: str, errorReason: str) -> Awaitable[Dict]:
"""
Causes the request to fail with specified reason.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-failRequest`
:param requestId: An id the client received in requestPaused event.
:param errorReason: Causes the request to fail with the given reason.
:return: The results of the command
"""
return self.client.send(
"Fetch.failRequest", {"requestId": requestId, "errorReason": errorReason}
)
def fulfillRequest(
self,
requestId: str,
responseCode: int,
responseHeaders: List[Dict[str, Any]],
body: Optional[str] = None,
responsePhrase: Optional[str] = None,
) -> Awaitable[Dict]:
"""
Provides response to the request.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-fulfillRequest`
:param requestId: An id the client received in requestPaused event.
:param responseCode: An HTTP response code.
:param responseHeaders: Response headers.
:param body: A response body.
:param responsePhrase: A textual representation of responseCode.
If absent, a standard phrase mathcing responseCode is used.
:return: The results of the command
"""
msg = {
"requestId": requestId,
"responseCode": responseCode,
"responseHeaders": responseHeaders,
}
if body is not None:
msg["body"] = body
if responsePhrase is not None:
msg["responsePhrase"] = responsePhrase
return self.client.send("Fetch.fulfillRequest", msg)
def continueRequest(
self,
requestId: str,
url: Optional[str] = None,
method: Optional[str] = None,
postData: Optional[str] = None,
headers: Optional[List[Dict[str, Any]]] = None,
) -> Awaitable[Dict]:
"""
Continues the request, optionally modifying some of its parameters.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-continueRequest`
:param requestId: An id the client received in requestPaused event.
:param url: If set, the request url will be modified in a way that's not observable by page.
:param method: If set, the request method is overridden.
:param postData: If set, overrides the post data in the request.
:param headers: If set, overrides the request headrts.
:return: The results of the command
"""
msg = {"requestId": requestId}
if url is not None:
msg["url"] = url
if method is not None:
msg["method"] = method
if postData is not None:
msg["postData"] = postData
if headers is not None:
msg["headers"] = headers
return self.client.send("Fetch.continueRequest", msg)
def continueWithAuth(
self, requestId: str, authChallengeResponse: Dict[str, Any]
) -> Awaitable[Dict]:
"""
Continues a request supplying authChallengeResponse following authRequired event.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-continueWithAuth`
:param requestId: An id the client received in authRequired event.
:param authChallengeResponse: Response to with an authChallenge.
:return: The results of the command
"""
return self.client.send(
"Fetch.continueWithAuth",
{"requestId": requestId, "authChallengeResponse": authChallengeResponse},
)
def getResponseBody(self, requestId: str) -> Awaitable[Dict]:
"""
Causes the body of the response to be received from the server and
returned as a single string. May only be issued for a request that
is paused in the Response stage and is mutually exclusive with
takeResponseBodyForInterceptionAsStream. Calling other methods that
affect the request or disabling fetch domain before body is received
results in an undefined behavior.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-getResponseBody`
:param requestId: Identifier for the intercepted request to get body for.
:return: The results of the command
"""
return self.client.send("Fetch.getResponseBody", {"requestId": requestId})
def takeResponseBodyAsStream(self, requestId: str) -> Awaitable[Dict]:
"""
Returns a handle to the stream representing the response body.
The request must be paused in the HeadersReceived stage.
Note that after this command the request can't be continued
as is -- client either needs to cancel it or to provide the
response body.
The stream only supports sequential read, IO.read will fail if the position
is specified.
This method is mutually exclusive with getResponseBody.
Calling other methods that affect the request or disabling fetch
domain before body is received results in an undefined behavior.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-takeResponseBodyAsStream`
:param requestId: The requestId
:return: The results of the command
"""
return self.client.send(
"Fetch.takeResponseBodyAsStream", {"requestId": requestId}
)
def requestPaused(
self, listener: Optional[Callable[[Dict[str, Any]], Any]] = None
) -> Any:
"""
Issued when the domain is enabled and the request URL matches the
specified filter. The request is paused until the client responds
with one of continueRequest, failRequest or fulfillRequest.
The stage of the request can be determined by presence of responseErrorReason
and responseStatusCode -- the request is at the response stage if either
of these fields is present and in the request stage otherwise.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#event-requestPaused`
:param listener: Optional listener function
:return: If a listener was supplied the return value is a callable that
will remove the supplied listener otherwise a future that resolves
with the value of the event
"""
event_name = "Fetch.requestPaused"
if listener is None:
future = self.client.loop.create_future()
def _listener(event: Optional[Dict] = None) -> None:
future.set_result(event)
self.client.once(event_name, _listener)
return future
self.client.on(event_name, listener)
return lambda: self.client.remove_listener(event_name, listener)
def authRequired(
self, listener: Optional[Callable[[Dict[str, Any]], Any]] = None
) -> Any:
"""
Issued when the domain is enabled with handleAuthRequests set to true.
The request is paused until client responds with continueWithAuth.
See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#event-authRequired`
:param listener: Optional listener function
:return: If a listener was supplied the return value is a callable that
will remove the supplied listener otherwise a future that resolves
with the value of the event
"""
event_name = "Fetch.authRequired"
if listener is None:
future = self.client.loop.create_future()
def _listener(event: Optional[Dict] = None) -> None:
future.set_result(event)
self.client.once(event_name, _listener)
return future
self.client.on(event_name, listener)
return lambda: self.client.remove_listener(event_name, listener) | 0.83762 | 0.255828 |
from functools import partial
from typing import NamedTuple
import jax
import jax.numpy as jnp
import jax.random as jrandom
from jax.scipy.special import logsumexp
from slzero.dataset import Batch, BatchStreamer
from slzero.decoding import viterbi
class Params(NamedTuple):
weight: jnp.ndarray
transitions: jnp.ndarray
def init_params(rng_key: jrandom.KeyArray, num_labels: int, num_tokens: int) -> Params:
weight_key, transitions_key = jrandom.split(rng_key)
return Params(
jrandom.uniform(weight_key, (num_tokens, num_labels)),
jrandom.uniform(transitions_key, (num_labels, num_labels)),
)
def train(
params: Params,
batch_stream: BatchStreamer,
num_epochs: int,
learning_rate: float,
) -> Params:
for i in range(1, num_epochs + 1):
print(f"Epoch: {i}")
total_loss = 0.0
for batch in batch_stream():
loss, params = train_step(params, batch, learning_rate)
total_loss += float(loss)
print(f"Loss: {total_loss:.4f}")
return params
@partial(jax.jit, static_argnums=(2,))
def train_step(
params: Params,
batch: Batch,
learning_rate: float,
) -> tuple[jnp.ndarray, Params]:
loss, grads = jax.value_and_grad(compute_loss)(params, *batch)
params = jax.tree_map(
lambda param, grad: param - learning_rate * grad, params, grads
)
return loss, params
def compute_loss(
params: Params, X_batch: jnp.ndarray, y_batch: jnp.ndarray, masks: jnp.ndarray
) -> jnp.ndarray:
logits = params.weight[X_batch] * masks[..., None]
loss = -jax.vmap(log_likelihood, in_axes=(None, 0, 0, 0))(
params.transitions, logits, y_batch, masks
)
return jnp.sum(loss)
def log_likelihood(
transitions: jnp.ndarray, xs: jnp.ndarray, ys: jnp.ndarray, masks: jnp.ndarray
) -> jnp.ndarray:
score = sequence_score(transitions, xs, ys, masks)
log_Z = forward_algorithm(transitions, xs, masks)
return score - log_Z
def sequence_score(
transitions: jnp.ndarray, xs: jnp.ndarray, ys: jnp.ndarray, masks: jnp.ndarray
) -> jnp.ndarray:
def step(
carry: jnp.ndarray,
inputs: tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray],
) -> tuple[jnp.ndarray, None]:
x, y_from, y_to, mask = inputs
return carry + (x[y_to] + transitions[y_from, y_to]) * mask, None
init = xs[0, ys[0]] * masks[0]
score, _ = jax.lax.scan(step, init, (xs[1:], ys[:-1], ys[1:], masks[1:]))
return score
def forward_algorithm(
transitions: jnp.ndarray, xs: jnp.ndarray, masks: jnp.ndarray
) -> jnp.ndarray:
def step(
carry: jnp.ndarray, inputs: tuple[jnp.ndarray, jnp.ndarray]
) -> tuple[jnp.ndarray, None]:
x, mask = inputs
log_alpha = (
logsumexp(carry[:, None] + transitions, axis=0) + x
) * mask + carry * (~mask)
return log_alpha, None
init = xs[0] * masks[0]
log_alpha, _ = jax.lax.scan(step, init, (xs[1:], masks[1:]))
return logsumexp(log_alpha, axis=0)
@partial(jax.jit, static_argnums=(3,))
def predict(
params: Params, X_batch: jnp.ndarray, masks: jnp.ndarray, ignore_label: int = -1
) -> tuple[jnp.ndarray, jnp.ndarray]:
logits = params.weight[X_batch] * masks[..., None]
return jax.vmap(viterbi, in_axes=(None, 0, 0, None))(
params.transitions, logits, masks, ignore_label
)
def evaluate(
params: Params, batch_stream: BatchStreamer, ignore_label: int = -1
) -> float:
correct = 0
total = 0
for X_batch, y_batch, masks in batch_stream():
_, predictions = predict(params, X_batch, masks, ignore_label)
ignores = (~masks).sum()
correct += jnp.equal(y_batch, predictions).sum() - ignores
total += y_batch.size - ignores
return correct / total | slzero/crf/functions.py | from functools import partial
from typing import NamedTuple
import jax
import jax.numpy as jnp
import jax.random as jrandom
from jax.scipy.special import logsumexp
from slzero.dataset import Batch, BatchStreamer
from slzero.decoding import viterbi
class Params(NamedTuple):
weight: jnp.ndarray
transitions: jnp.ndarray
def init_params(rng_key: jrandom.KeyArray, num_labels: int, num_tokens: int) -> Params:
weight_key, transitions_key = jrandom.split(rng_key)
return Params(
jrandom.uniform(weight_key, (num_tokens, num_labels)),
jrandom.uniform(transitions_key, (num_labels, num_labels)),
)
def train(
params: Params,
batch_stream: BatchStreamer,
num_epochs: int,
learning_rate: float,
) -> Params:
for i in range(1, num_epochs + 1):
print(f"Epoch: {i}")
total_loss = 0.0
for batch in batch_stream():
loss, params = train_step(params, batch, learning_rate)
total_loss += float(loss)
print(f"Loss: {total_loss:.4f}")
return params
@partial(jax.jit, static_argnums=(2,))
def train_step(
params: Params,
batch: Batch,
learning_rate: float,
) -> tuple[jnp.ndarray, Params]:
loss, grads = jax.value_and_grad(compute_loss)(params, *batch)
params = jax.tree_map(
lambda param, grad: param - learning_rate * grad, params, grads
)
return loss, params
def compute_loss(
params: Params, X_batch: jnp.ndarray, y_batch: jnp.ndarray, masks: jnp.ndarray
) -> jnp.ndarray:
logits = params.weight[X_batch] * masks[..., None]
loss = -jax.vmap(log_likelihood, in_axes=(None, 0, 0, 0))(
params.transitions, logits, y_batch, masks
)
return jnp.sum(loss)
def log_likelihood(
transitions: jnp.ndarray, xs: jnp.ndarray, ys: jnp.ndarray, masks: jnp.ndarray
) -> jnp.ndarray:
score = sequence_score(transitions, xs, ys, masks)
log_Z = forward_algorithm(transitions, xs, masks)
return score - log_Z
def sequence_score(
transitions: jnp.ndarray, xs: jnp.ndarray, ys: jnp.ndarray, masks: jnp.ndarray
) -> jnp.ndarray:
def step(
carry: jnp.ndarray,
inputs: tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray],
) -> tuple[jnp.ndarray, None]:
x, y_from, y_to, mask = inputs
return carry + (x[y_to] + transitions[y_from, y_to]) * mask, None
init = xs[0, ys[0]] * masks[0]
score, _ = jax.lax.scan(step, init, (xs[1:], ys[:-1], ys[1:], masks[1:]))
return score
def forward_algorithm(
transitions: jnp.ndarray, xs: jnp.ndarray, masks: jnp.ndarray
) -> jnp.ndarray:
def step(
carry: jnp.ndarray, inputs: tuple[jnp.ndarray, jnp.ndarray]
) -> tuple[jnp.ndarray, None]:
x, mask = inputs
log_alpha = (
logsumexp(carry[:, None] + transitions, axis=0) + x
) * mask + carry * (~mask)
return log_alpha, None
init = xs[0] * masks[0]
log_alpha, _ = jax.lax.scan(step, init, (xs[1:], masks[1:]))
return logsumexp(log_alpha, axis=0)
@partial(jax.jit, static_argnums=(3,))
def predict(
params: Params, X_batch: jnp.ndarray, masks: jnp.ndarray, ignore_label: int = -1
) -> tuple[jnp.ndarray, jnp.ndarray]:
logits = params.weight[X_batch] * masks[..., None]
return jax.vmap(viterbi, in_axes=(None, 0, 0, None))(
params.transitions, logits, masks, ignore_label
)
def evaluate(
params: Params, batch_stream: BatchStreamer, ignore_label: int = -1
) -> float:
correct = 0
total = 0
for X_batch, y_batch, masks in batch_stream():
_, predictions = predict(params, X_batch, masks, ignore_label)
ignores = (~masks).sum()
correct += jnp.equal(y_batch, predictions).sum() - ignores
total += y_batch.size - ignores
return correct / total | 0.915235 | 0.681528 |
import unittest
import os,sys
import pickle
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem import rdChemReactions, AllChem
from rdkit import Geometry
from rdkit import RDConfig
import itertools, time
test_data = [("good", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M RGP 1 2 1
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R# 0 0 0 0 0 0 0 0 0 2 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M RGP 1 1 2
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0
11.9811 -6.9292 0.0000 R# 0 0 0 0 0 0 0 0 0 2 0 0
1 2 1 0 0 0 0
M RGP 2 1 1 2 2
M END'''),
("bad", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M RGP 1 2 1
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M RGP 1 1 2
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M RGP 2 1 1 2 2
M END'''),
# chemdraw style
("bad", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M END'''),
("fail", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R3 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M END'''),
]
unused_rlabel_in_product = """$RXN
bug.rxn
ChemDraw06121709062D
1 1
$MOL
2 1 0 0 0 0 0 0 0 0999 V2000
0.1604 0.3798 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.1604 -0.3798 0.0000 R 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0
M END
$MOL
2 1 0 0 0 0 0 0 0 0999 V2000
-1.2690 -1.3345 0.0000 R 0 0 0 0 0 0 0 0 0 1 0 0
1.2690 1.3345 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0
M END
"""
kekule_rxn = """$RXN
bug.rxn
ChemDraw06121709062D
1 1
$MOL
RDKit 2D
6 6 0 0 0 0 0 0 0 0999 V2000
1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 2 0
3 4 1 0
4 5 2 0
5 6 1 0
6 1 2 0
M END
$MOL
RDKit 2D
6 6 0 0 0 0 0 0 0 0999 V2000
1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 2 0
3 4 1 0
4 5 2 0
5 6 1 0
6 1 2 0
M END
"""
good_res = (0,0,2,1,(((0, 'halogen.bromine.aromatic'),), ((1, 'boronicacid'),)))
bad_res = (3,0,2,1,(((0, 'halogen.bromine.aromatic'),), ((1, 'boronicacid'),)))
class TestCase(unittest.TestCase) :
def test_sanitize(self):
for status, block in test_data:
print("*"*44)
rxna = AllChem.ReactionFromRxnBlock(block)
rxnb = AllChem.ReactionFromRxnBlock(block)
rxna.Initialize()
res = rdChemReactions.PreprocessReaction(rxna)
print(AllChem.ReactionToRxnBlock(rxna))
if status == "good":
self.assertEquals(res, good_res)
elif status == "bad":
self.assertEquals(res, bad_res)
print (">"*44)
rxnb.Initialize()
try:
rdChemReactions.SanitizeRxn(rxnb)
res = rdChemReactions.PreprocessReaction(rxnb)
print(AllChem.ReactionToRxnBlock(rxnb))
self.assertEquals(res, good_res)
assert not status == "fail"
except Exception:
print ("$RXN Failed")
if status == "fail":
continue
raise
def test_unused_rlabel_in_product(self):
rxn = AllChem.ReactionFromRxnBlock(unused_rlabel_in_product)
# test was for a seg fault
rdChemReactions.SanitizeRxn(rxn)
def test_only_aromatize_if_possible(self):
rxn = AllChem.ReactionFromRxnBlock(kekule_rxn)
# test was for a seg fault
groups = rxn.RunReactants([Chem.MolFromSmiles("c1ccccc1")])
print(groups)
self.assertFalse(len(groups))
# check normal sanitization
rdChemReactions.SanitizeRxn(rxn)
groups = rxn.RunReactants([Chem.MolFromSmiles("c1ccccc1")])
self.assertTrue(len(groups[0]))
# now check adjustparams with ONLY aromatize if possible
rxn = AllChem.ReactionFromRxnBlock(kekule_rxn)
rdChemReactions.SanitizeRxn(rxn)
groups = rxn.RunReactants([Chem.MolFromSmiles("c1ccccc1")])
self.assertTrue(len(groups[0]))
def test_github_4162(self):
rxn = rdChemReactions.ReactionFromSmarts(
"[C:1](=[O:2])-[OD1].[N!H0:3]>>[C:1](=[O:2])[N:3]")
rxn_copy = rdChemReactions.ChemicalReaction(rxn)
rdChemReactions.SanitizeRxn(rxn)
rdChemReactions.SanitizeRxn(rxn_copy)
pkl = rxn.ToBinary()
rxn_from_pickle = rdChemReactions.ChemicalReaction(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
pkl = pickle.dumps(rxn)
rxn_from_pickle = pickle.loads(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
pkl = rxn_from_pickle.ToBinary()
rxn_from_pickle = rdChemReactions.ChemicalReaction(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
pkl = pickle.dumps(rxn_from_pickle)
rxn_from_pickle = pickle.loads(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
if __name__ == '__main__':
unittest.main() | Code/GraphMol/ChemReactions/Wrap/testSanitize.py |
import unittest
import os,sys
import pickle
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem import rdChemReactions, AllChem
from rdkit import Geometry
from rdkit import RDConfig
import itertools, time
test_data = [("good", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M RGP 1 2 1
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R# 0 0 0 0 0 0 0 0 0 2 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M RGP 1 1 2
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0
11.9811 -6.9292 0.0000 R# 0 0 0 0 0 0 0 0 0 2 0 0
1 2 1 0 0 0 0
M RGP 2 1 1 2 2
M END'''),
("bad", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M RGP 1 2 1
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M RGP 1 1 2
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M RGP 2 1 1 2 2
M END'''),
# chemdraw style
("bad", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M END'''),
("fail", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R3 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M END'''),
]
unused_rlabel_in_product = """$RXN
bug.rxn
ChemDraw06121709062D
1 1
$MOL
2 1 0 0 0 0 0 0 0 0999 V2000
0.1604 0.3798 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.1604 -0.3798 0.0000 R 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0
M END
$MOL
2 1 0 0 0 0 0 0 0 0999 V2000
-1.2690 -1.3345 0.0000 R 0 0 0 0 0 0 0 0 0 1 0 0
1.2690 1.3345 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0
M END
"""
kekule_rxn = """$RXN
bug.rxn
ChemDraw06121709062D
1 1
$MOL
RDKit 2D
6 6 0 0 0 0 0 0 0 0999 V2000
1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 2 0
3 4 1 0
4 5 2 0
5 6 1 0
6 1 2 0
M END
$MOL
RDKit 2D
6 6 0 0 0 0 0 0 0 0999 V2000
1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 2 0
3 4 1 0
4 5 2 0
5 6 1 0
6 1 2 0
M END
"""
good_res = (0,0,2,1,(((0, 'halogen.bromine.aromatic'),), ((1, 'boronicacid'),)))
bad_res = (3,0,2,1,(((0, 'halogen.bromine.aromatic'),), ((1, 'boronicacid'),)))
class TestCase(unittest.TestCase) :
def test_sanitize(self):
for status, block in test_data:
print("*"*44)
rxna = AllChem.ReactionFromRxnBlock(block)
rxnb = AllChem.ReactionFromRxnBlock(block)
rxna.Initialize()
res = rdChemReactions.PreprocessReaction(rxna)
print(AllChem.ReactionToRxnBlock(rxna))
if status == "good":
self.assertEquals(res, good_res)
elif status == "bad":
self.assertEquals(res, bad_res)
print (">"*44)
rxnb.Initialize()
try:
rdChemReactions.SanitizeRxn(rxnb)
res = rdChemReactions.PreprocessReaction(rxnb)
print(AllChem.ReactionToRxnBlock(rxnb))
self.assertEquals(res, good_res)
assert not status == "fail"
except Exception:
print ("$RXN Failed")
if status == "fail":
continue
raise
def test_unused_rlabel_in_product(self):
rxn = AllChem.ReactionFromRxnBlock(unused_rlabel_in_product)
# test was for a seg fault
rdChemReactions.SanitizeRxn(rxn)
def test_only_aromatize_if_possible(self):
rxn = AllChem.ReactionFromRxnBlock(kekule_rxn)
# test was for a seg fault
groups = rxn.RunReactants([Chem.MolFromSmiles("c1ccccc1")])
print(groups)
self.assertFalse(len(groups))
# check normal sanitization
rdChemReactions.SanitizeRxn(rxn)
groups = rxn.RunReactants([Chem.MolFromSmiles("c1ccccc1")])
self.assertTrue(len(groups[0]))
# now check adjustparams with ONLY aromatize if possible
rxn = AllChem.ReactionFromRxnBlock(kekule_rxn)
rdChemReactions.SanitizeRxn(rxn)
groups = rxn.RunReactants([Chem.MolFromSmiles("c1ccccc1")])
self.assertTrue(len(groups[0]))
def test_github_4162(self):
rxn = rdChemReactions.ReactionFromSmarts(
"[C:1](=[O:2])-[OD1].[N!H0:3]>>[C:1](=[O:2])[N:3]")
rxn_copy = rdChemReactions.ChemicalReaction(rxn)
rdChemReactions.SanitizeRxn(rxn)
rdChemReactions.SanitizeRxn(rxn_copy)
pkl = rxn.ToBinary()
rxn_from_pickle = rdChemReactions.ChemicalReaction(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
pkl = pickle.dumps(rxn)
rxn_from_pickle = pickle.loads(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
pkl = rxn_from_pickle.ToBinary()
rxn_from_pickle = rdChemReactions.ChemicalReaction(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
pkl = pickle.dumps(rxn_from_pickle)
rxn_from_pickle = pickle.loads(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
if __name__ == '__main__':
unittest.main() | 0.201853 | 0.169543 |
import argparse
import pandas as pd
from dpmModule.character.characterKernel import ItemedCharacter, JobGenerator
from dpmModule.character.characterTemplate import get_template_generator
from dpmModule.jobs import jobMap, weaponList
from dpmModule.kernel import core
from dpmModule.status.ability import Ability_grade
from .loader import load_data
from .preset import get_preset
from .saver import save_data
def get_args():
parser = argparse.ArgumentParser("Optimization hint argument")
parser.add_argument(
"--id", type=str, help="Target preset id to calculate statistics"
)
parser.add_argument("--ulevel", type=int, default=8000)
parser.add_argument("--cdr", type=int, default=0)
parser.add_argument("--time", type=int, default=1800)
parser.add_argument("--task", default="dpm")
parser.add_argument("--calc", action="store_true")
return parser.parse_args()
def armor_percent_to_float(num: float):
return (100 - num) / 100
def armor_float_to_percent(num: float):
return 100 - num * 100
def get_modifier(args) -> core.CharacterModifier:
preset = get_preset(args.id)
template = get_template_generator("high_standard")().get_template(args.ulevel)
target: ItemedCharacter = template(weaponList[preset.job], args.cdr)
gen: JobGenerator = jobMap[preset.job].JobGenerator()
v_builder = core.AlwaysMaximumVBuilder()
graph = gen.package(
target,
v_builder,
options=preset.options,
ulevel=args.ulevel,
weaponstat=[4, 9],
ability_grade=Ability_grade(4, 1),
)
return graph.get_default_buff_modifier()
def optimization_hint(args, df: pd.DataFrame):
buff_modifier = get_modifier(args)
df = df[["name", "deal", "mdf"]]
df = df.loc[df["deal"] > 0]
deal_total = df["deal"].sum()
df["crit_damage"] = df["mdf"].apply(lambda x: x["crit_damage"])
df["pdamage"] = df["mdf"].apply(lambda x: x["pdamage"])
df["boss_pdamage"] = df["mdf"].apply(lambda x: x["boss_pdamage"])
df["armor_ignore"] = df["mdf"].apply(lambda x: x["armor_ignore"])
df["patt"] = df["mdf"].apply(lambda x: x["patt"])
grouped = df.groupby(["name"])
df = pd.DataFrame()
df["share"] = grouped["deal"].sum() / deal_total
df["crit_damage"] = grouped["crit_damage"].mean()
df["pdamage"] = grouped["pdamage"].mean()
df["boss_pdamage"] = grouped["boss_pdamage"].mean()
df["armor_ignore"] = grouped["armor_ignore"].mean()
df["patt"] = grouped["patt"].mean()
print(df)
crit_damage = (df["crit_damage"] * df["share"]).sum()
pdamage = (df["pdamage"] * df["share"]).sum()
boss_pdamage = (df["boss_pdamage"] * df["share"]).sum()
armor_ignore = (df["armor_ignore"] * df["share"]).sum()
patt = (df["patt"] * df["share"]).sum()
print(
{
"crit_damage": crit_damage - buff_modifier.crit_damage,
"pdamage": pdamage - buff_modifier.pdamage,
"boss_pdamage": boss_pdamage - buff_modifier.boss_pdamage,
"armor_ignore": armor_float_to_percent(
armor_percent_to_float(armor_ignore)
/ armor_percent_to_float(buff_modifier.armor_ignore)
/ armor_percent_to_float(20)
),
"patt": patt - buff_modifier.patt,
}
)
if __name__ == "__main__":
args = get_args()
if args.calc:
data = save_data(args)
else:
data = load_data(args)
optimization_hint(args, data) | statistics/optimization_hint.py | import argparse
import pandas as pd
from dpmModule.character.characterKernel import ItemedCharacter, JobGenerator
from dpmModule.character.characterTemplate import get_template_generator
from dpmModule.jobs import jobMap, weaponList
from dpmModule.kernel import core
from dpmModule.status.ability import Ability_grade
from .loader import load_data
from .preset import get_preset
from .saver import save_data
def get_args():
parser = argparse.ArgumentParser("Optimization hint argument")
parser.add_argument(
"--id", type=str, help="Target preset id to calculate statistics"
)
parser.add_argument("--ulevel", type=int, default=8000)
parser.add_argument("--cdr", type=int, default=0)
parser.add_argument("--time", type=int, default=1800)
parser.add_argument("--task", default="dpm")
parser.add_argument("--calc", action="store_true")
return parser.parse_args()
def armor_percent_to_float(num: float):
return (100 - num) / 100
def armor_float_to_percent(num: float):
return 100 - num * 100
def get_modifier(args) -> core.CharacterModifier:
preset = get_preset(args.id)
template = get_template_generator("high_standard")().get_template(args.ulevel)
target: ItemedCharacter = template(weaponList[preset.job], args.cdr)
gen: JobGenerator = jobMap[preset.job].JobGenerator()
v_builder = core.AlwaysMaximumVBuilder()
graph = gen.package(
target,
v_builder,
options=preset.options,
ulevel=args.ulevel,
weaponstat=[4, 9],
ability_grade=Ability_grade(4, 1),
)
return graph.get_default_buff_modifier()
def optimization_hint(args, df: pd.DataFrame):
buff_modifier = get_modifier(args)
df = df[["name", "deal", "mdf"]]
df = df.loc[df["deal"] > 0]
deal_total = df["deal"].sum()
df["crit_damage"] = df["mdf"].apply(lambda x: x["crit_damage"])
df["pdamage"] = df["mdf"].apply(lambda x: x["pdamage"])
df["boss_pdamage"] = df["mdf"].apply(lambda x: x["boss_pdamage"])
df["armor_ignore"] = df["mdf"].apply(lambda x: x["armor_ignore"])
df["patt"] = df["mdf"].apply(lambda x: x["patt"])
grouped = df.groupby(["name"])
df = pd.DataFrame()
df["share"] = grouped["deal"].sum() / deal_total
df["crit_damage"] = grouped["crit_damage"].mean()
df["pdamage"] = grouped["pdamage"].mean()
df["boss_pdamage"] = grouped["boss_pdamage"].mean()
df["armor_ignore"] = grouped["armor_ignore"].mean()
df["patt"] = grouped["patt"].mean()
print(df)
crit_damage = (df["crit_damage"] * df["share"]).sum()
pdamage = (df["pdamage"] * df["share"]).sum()
boss_pdamage = (df["boss_pdamage"] * df["share"]).sum()
armor_ignore = (df["armor_ignore"] * df["share"]).sum()
patt = (df["patt"] * df["share"]).sum()
print(
{
"crit_damage": crit_damage - buff_modifier.crit_damage,
"pdamage": pdamage - buff_modifier.pdamage,
"boss_pdamage": boss_pdamage - buff_modifier.boss_pdamage,
"armor_ignore": armor_float_to_percent(
armor_percent_to_float(armor_ignore)
/ armor_percent_to_float(buff_modifier.armor_ignore)
/ armor_percent_to_float(20)
),
"patt": patt - buff_modifier.patt,
}
)
if __name__ == "__main__":
args = get_args()
if args.calc:
data = save_data(args)
else:
data = load_data(args)
optimization_hint(args, data) | 0.562657 | 0.129761 |
import os
def add_slash(m):
"""
Helper function that appends a / if one does not exist.
Prameters:
m: The string to append to.
"""
if m[-1] != "/":
return m + "/"
else:
return m
class Directory:
path_string = None
def __init__(self, in_string, ignore=False):
if not ignore and not os.path.isdir(os.path.normpath(in_string)):
raise OSError("Not a vaild path: " + in_string)
self.path_string = add_slash(in_string)
def as_string(self):
return os.path.normpath(self.path_string)
def basename(self):
res = os.path.basename(os.path.normpath(self.path_string))
return res
def parent_dir(self, ignore=False):
res_text = os.path.dirname(os.path.normpath(self.path_string).rstrip("/"))
if not ignore and not os.path.isdir(res_text):
raise OSError("Not a vaild path")
return Directory(res_text, ignore=ignore)
def append_dir(self, app_string, ignore=False):
res_text = os.path.join(self.path_string, app_string)
if not ignore and not os.path.isdir(res_text):
raise OSError("Not a vaild path")
return Directory(res_text, ignore=ignore)
def append_file(self, app_string, ignore=False):
res_text = os.path.join(self.path_string, app_string)
if not ignore and not os.path.isfile(res_text):
raise OSError("Not a vaild file")
return Directory(res_text, ignore=ignore)
class File:
file_string = None
def __init__(self, in_string, ignore=False):
if not ignore and not os.path.isfile(in_string):
raise OSError("Not a vaild file: " + in_string)
self.file_string = add_slash(in_string)
def as_string(self):
return os.path.normpath(self.file_string)
def basename(self):
res = os.path.basename(os.path.normpath(self.file_string))
return res
def extension(self):
res = self.basename()
res = os.path.splitext(res)[-1]
return res
def without_extension(self):
res = self.basename()
res = os.path.splitext(res)[0]
return res
def parent_dir(self, ignore=False):
res_text = os.path.dirname(os.path.normpath(self.file_string).rstrip("/"))
if not ignore and not os.path.isfile(res_text):
raise OSError("Not a vaild directory")
return Directory(res_text, ignore=ignore) | ProQuest2Bepress/paths.py | import os
def add_slash(m):
"""
Helper function that appends a / if one does not exist.
Prameters:
m: The string to append to.
"""
if m[-1] != "/":
return m + "/"
else:
return m
class Directory:
path_string = None
def __init__(self, in_string, ignore=False):
if not ignore and not os.path.isdir(os.path.normpath(in_string)):
raise OSError("Not a vaild path: " + in_string)
self.path_string = add_slash(in_string)
def as_string(self):
return os.path.normpath(self.path_string)
def basename(self):
res = os.path.basename(os.path.normpath(self.path_string))
return res
def parent_dir(self, ignore=False):
res_text = os.path.dirname(os.path.normpath(self.path_string).rstrip("/"))
if not ignore and not os.path.isdir(res_text):
raise OSError("Not a vaild path")
return Directory(res_text, ignore=ignore)
def append_dir(self, app_string, ignore=False):
res_text = os.path.join(self.path_string, app_string)
if not ignore and not os.path.isdir(res_text):
raise OSError("Not a vaild path")
return Directory(res_text, ignore=ignore)
def append_file(self, app_string, ignore=False):
res_text = os.path.join(self.path_string, app_string)
if not ignore and not os.path.isfile(res_text):
raise OSError("Not a vaild file")
return Directory(res_text, ignore=ignore)
class File:
file_string = None
def __init__(self, in_string, ignore=False):
if not ignore and not os.path.isfile(in_string):
raise OSError("Not a vaild file: " + in_string)
self.file_string = add_slash(in_string)
def as_string(self):
return os.path.normpath(self.file_string)
def basename(self):
res = os.path.basename(os.path.normpath(self.file_string))
return res
def extension(self):
res = self.basename()
res = os.path.splitext(res)[-1]
return res
def without_extension(self):
res = self.basename()
res = os.path.splitext(res)[0]
return res
def parent_dir(self, ignore=False):
res_text = os.path.dirname(os.path.normpath(self.file_string).rstrip("/"))
if not ignore and not os.path.isfile(res_text):
raise OSError("Not a vaild directory")
return Directory(res_text, ignore=ignore) | 0.40439 | 0.197232 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler.internal import _pywrap_profiler
from tensorflow.python.util.tf_export import tf_export
_profiler = None
_profiler_lock = threading.Lock()
@tf_export('profiler.experimental.ProfilerOptions', v1=[])
class ProfilerOptions(
collections.namedtuple(
'ProfilerOptions',
['host_tracer_level', 'python_tracer_level', 'device_tracer_level'])):
"""Options for finer control over the profiler.
Use `tf.profiler.ProfilerOptions` to control `tf.profiler`
behavior.
Fields:
host_tracer_level: Adjust CPU tracing level. Values are: 1 - critical info
only, 2 - info, 3 - verbose. [default value is 2]
python_tracer_level: Toggle tracing of Python function calls. Values are: 1
- enabled, 0 - disabled [default value is 0]
device_tracer_level: Adjust device (TPU/GPU) tracing level. Values are: 1 -
enabled, 0 - disabled [default value is 1]
"""
def __new__(cls,
host_tracer_level=2,
python_tracer_level=0,
device_tracer_level=1):
return super(ProfilerOptions,
cls).__new__(cls, host_tracer_level, python_tracer_level,
device_tracer_level)
@tf_export('profiler.experimental.start', v1=[])
def start(logdir, options=None):
"""Start profiling TensorFlow performance.
Args:
logdir: Profiling results log directory.
options: `ProfilerOptions` namedtuple to specify miscellaneous profiler
options. See example usage below.
Raises:
AlreadyExistsError: If a profiling session is already running.
Example usage:
```python
options = tf.profiler.experimental.ProfilerOptions(host_tracer_level = 3,
python_tracer_level = 1,
device_tracer_level = 1)
tf.profiler.experimental.start('logdir_path', options = options)
# Training code here
tf.profiler.experimental.stop()
```
To view the profiling results, launch TensorBoard and point it to `logdir`.
Open your browser and go to `localhost:6006/#profile` to view profiling
results.
"""
global _profiler
with _profiler_lock:
if _profiler is not None:
raise errors.AlreadyExistsError(None, None,
'Another profiler is running.')
_profiler = _pywrap_profiler.ProfilerSession()
try:
# support for namedtuple in pybind11 is missing, we change it to
# dict type first.
opts = dict(options._asdict()) if options is not None else {}
_profiler.start(logdir, opts)
except errors.AlreadyExistsError:
logging.warning('Another profiler session is running which is probably '
'created by profiler server. Please avoid using profiler '
'server and profiler APIs at the same time.')
raise errors.AlreadyExistsError(None, None,
'Another profiler is running.')
except Exception:
_profiler = None
raise
@tf_export('profiler.experimental.stop', v1=[])
def stop(save=True):
"""Stops the current profiling session.
The profiler session will be stopped and profile results can be saved.
Args:
save: An optional variable to save the results to TensorBoard. Default True.
Raises:
UnavailableError: If there is no active profiling session.
"""
global _profiler
with _profiler_lock:
if _profiler is None:
raise errors.UnavailableError(
None, None,
'Cannot export profiling results. No profiler is running.')
if save:
try:
_profiler.export_to_tb()
except Exception:
_profiler = None
raise
_profiler = None
def warmup():
"""Warm-up the profiler session.
The profiler session will set up profiling context, including loading CUPTI
library for GPU profiling. This is used for improving the accuracy of
the profiling results.
"""
start('')
stop(save=False)
@tf_export('profiler.experimental.server.start', v1=[])
def start_server(port):
"""Start a profiler grpc server that listens to given port.
The profiler server will exit when the process finishes. The service is
defined in tensorflow/core/profiler/profiler_service.proto.
Args:
port: port profiler server listens to.
Example usage: ```python tf.profiler.experimental.server.start('6009') # do
your training here.
"""
_pywrap_profiler.start_server(port)
@tf_export('profiler.experimental.Profile', v1=[])
class Profile(object):
"""Context-manager profile API.
Profiling will start when entering the scope, and stop and save the results to
the logdir when exits the scope. Open TensorBoard profile tab to view results.
Example usage:
```python
with tf.profiler.experimental.Profile("/path/to/logdir"):
# do some work
```
"""
def __init__(self, logdir, options=None):
"""Creates a context manager object for profiler API.
Args:
logdir: profile data will save to this directory.
options: An optional tf.profiler.ProfilerOptions can be provided to fine
tune the profiler's behavior.
"""
self._logdir = logdir
self._options = options
def __enter__(self):
start(self._logdir, self._options)
def __exit__(self, typ, value, tb):
stop() | tensorflow/python/profiler/profiler_v2.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler.internal import _pywrap_profiler
from tensorflow.python.util.tf_export import tf_export
_profiler = None
_profiler_lock = threading.Lock()
@tf_export('profiler.experimental.ProfilerOptions', v1=[])
class ProfilerOptions(
collections.namedtuple(
'ProfilerOptions',
['host_tracer_level', 'python_tracer_level', 'device_tracer_level'])):
"""Options for finer control over the profiler.
Use `tf.profiler.ProfilerOptions` to control `tf.profiler`
behavior.
Fields:
host_tracer_level: Adjust CPU tracing level. Values are: 1 - critical info
only, 2 - info, 3 - verbose. [default value is 2]
python_tracer_level: Toggle tracing of Python function calls. Values are: 1
- enabled, 0 - disabled [default value is 0]
device_tracer_level: Adjust device (TPU/GPU) tracing level. Values are: 1 -
enabled, 0 - disabled [default value is 1]
"""
def __new__(cls,
host_tracer_level=2,
python_tracer_level=0,
device_tracer_level=1):
return super(ProfilerOptions,
cls).__new__(cls, host_tracer_level, python_tracer_level,
device_tracer_level)
@tf_export('profiler.experimental.start', v1=[])
def start(logdir, options=None):
"""Start profiling TensorFlow performance.
Args:
logdir: Profiling results log directory.
options: `ProfilerOptions` namedtuple to specify miscellaneous profiler
options. See example usage below.
Raises:
AlreadyExistsError: If a profiling session is already running.
Example usage:
```python
options = tf.profiler.experimental.ProfilerOptions(host_tracer_level = 3,
python_tracer_level = 1,
device_tracer_level = 1)
tf.profiler.experimental.start('logdir_path', options = options)
# Training code here
tf.profiler.experimental.stop()
```
To view the profiling results, launch TensorBoard and point it to `logdir`.
Open your browser and go to `localhost:6006/#profile` to view profiling
results.
"""
global _profiler
with _profiler_lock:
if _profiler is not None:
raise errors.AlreadyExistsError(None, None,
'Another profiler is running.')
_profiler = _pywrap_profiler.ProfilerSession()
try:
# support for namedtuple in pybind11 is missing, we change it to
# dict type first.
opts = dict(options._asdict()) if options is not None else {}
_profiler.start(logdir, opts)
except errors.AlreadyExistsError:
logging.warning('Another profiler session is running which is probably '
'created by profiler server. Please avoid using profiler '
'server and profiler APIs at the same time.')
raise errors.AlreadyExistsError(None, None,
'Another profiler is running.')
except Exception:
_profiler = None
raise
@tf_export('profiler.experimental.stop', v1=[])
def stop(save=True):
"""Stops the current profiling session.
The profiler session will be stopped and profile results can be saved.
Args:
save: An optional variable to save the results to TensorBoard. Default True.
Raises:
UnavailableError: If there is no active profiling session.
"""
global _profiler
with _profiler_lock:
if _profiler is None:
raise errors.UnavailableError(
None, None,
'Cannot export profiling results. No profiler is running.')
if save:
try:
_profiler.export_to_tb()
except Exception:
_profiler = None
raise
_profiler = None
def warmup():
"""Warm-up the profiler session.
The profiler session will set up profiling context, including loading CUPTI
library for GPU profiling. This is used for improving the accuracy of
the profiling results.
"""
start('')
stop(save=False)
@tf_export('profiler.experimental.server.start', v1=[])
def start_server(port):
"""Start a profiler grpc server that listens to given port.
The profiler server will exit when the process finishes. The service is
defined in tensorflow/core/profiler/profiler_service.proto.
Args:
port: port profiler server listens to.
Example usage: ```python tf.profiler.experimental.server.start('6009') # do
your training here.
"""
_pywrap_profiler.start_server(port)
@tf_export('profiler.experimental.Profile', v1=[])
class Profile(object):
"""Context-manager profile API.
Profiling will start when entering the scope, and stop and save the results to
the logdir when exits the scope. Open TensorBoard profile tab to view results.
Example usage:
```python
with tf.profiler.experimental.Profile("/path/to/logdir"):
# do some work
```
"""
def __init__(self, logdir, options=None):
"""Creates a context manager object for profiler API.
Args:
logdir: profile data will save to this directory.
options: An optional tf.profiler.ProfilerOptions can be provided to fine
tune the profiler's behavior.
"""
self._logdir = logdir
self._options = options
def __enter__(self):
start(self._logdir, self._options)
def __exit__(self, typ, value, tb):
stop() | 0.874104 | 0.365457 |
from __future__ import absolute_import, unicode_literals
import logging
from rest_framework import viewsets, serializers, status
from rest_framework.response import Response
from django.contrib.sites.models import Site
from dbaas.middleware import UserMiddleware
from logical import models
from logical.forms import DatabaseForm
from physical.models import Plan, Environment
from account.models import Team
from .credential import CredentialSerializer
from notification.tasks import TaskRegister
LOG = logging.getLogger(__name__)
class DatabaseSerializer(serializers.HyperlinkedModelSerializer):
plan = serializers.HyperlinkedRelatedField(
source='plan', view_name='plan-detail',
queryset=Plan.objects.filter(is_active=True)
)
replication_topology_id = serializers.Field(
source='databaseinfra.plan.replication_topology.id'
)
environment = serializers.HyperlinkedRelatedField(
source='environment', view_name='environment-detail',
queryset=Environment.objects
)
team = serializers.HyperlinkedRelatedField(
source='team', view_name='team-detail', queryset=Team.objects
)
endpoint = serializers.Field(source='endpoint')
infra_endpoint = serializers.Field(source='databaseinfra.endpoint')
quarantine_dt = serializers.Field(source='quarantine_dt')
# total_size_in_bytes = serializers.Field(source='total_size')
total_size_in_bytes = serializers.SerializerMethodField('get_total_size')
credentials = CredentialSerializer(many=True, read_only=True)
status = serializers.Field(source='status')
# used_size_in_bytes = serializers.Field(source='used_size_in_bytes')
used_size_in_bytes = serializers.SerializerMethodField(
'get_used_size_in_bytes'
)
engine = serializers.CharField(source='infra.engine', read_only=True)
is_locked = serializers.SerializerMethodField('get_is_locked')
class Meta:
model = models.Database
fields = (
'url', 'id', 'name', 'infra_endpoint', 'endpoint', 'plan',
'environment', 'project', 'team', 'quarantine_dt',
'total_size_in_bytes', 'credentials', 'description', 'status',
'used_size_in_bytes', 'subscribe_to_email_events',
'created_at', 'engine', 'replication_topology_id',
'is_locked'
)
read_only = ('credentials', 'status', 'used_size_in_bytes')
def __init__(self, *args, **kwargs):
super(DatabaseSerializer, self).__init__(*args, **kwargs)
request = self.context.get('request', None)
if request:
creating = request.method == 'POST'
self.fields['plan'].read_only = not creating
self.fields['environment'].read_only = not creating
self.fields['name'].read_only = not creating
self.fields['credentials'].read_only = True
def _get_or_none_if_error(self, database, prop_name):
try:
val = getattr(database, prop_name)
except Exception as e:
LOG.error("Error get {} of database with id {}, error: {}".format(
prop_name, database.id, e
))
return
return val
def get_total_size(self, database):
return self._get_or_none_if_error(database, 'total_size')
def get_used_size_in_bytes(self, database):
return self._get_or_none_if_error(database, 'used_size_in_bytes')
def get_is_locked(self, database):
return bool(database.current_locked_task)
class DatabaseAPI(viewsets.ModelViewSet):
"""
* ### __List databases__
__GET__ /api/database/
* ### __To create a new database__
__POST__ /api/database/
{
"name": "{name}",
"plan": "{api_url}/plan/{plan_id}/",
"environment": "{api_url}/environment/{environment_id}/",
"project": "{api_url}/project/{project_id}/",
"team": "{api_url}/team/{team_id}/",
"description": "{description}",
"subscribe_to_email_events": "{subscribe_to_email_events}",
"contacts": "{contacts}"
}
* ### __Show details about a database__
__GET__ /api/database/`database_id`/
* ### __To delete a database (will put it on quarantine)__
__DELETE__ /api/database/`database_id`/
* ### __To change database project__
__PUT__ /api/database/`database_id`/
{
"project": "{api_url}/project/{project_id}/",
"description": "{description}",
"subscribe_to_email_events": "{subscribe_to_email_events}",
"contacts": "{contacts}"
}
"""
serializer_class = DatabaseSerializer
queryset = models.Database.objects.all()
def create(self, request):
serializer = self.get_serializer(
data=request.DATA, files=request.FILES)
if serializer.is_valid():
self.pre_save(serializer.object)
data = serializer.restore_fields(request.DATA, request.FILES)
backup_hour, maintenance_hour, maintenance_day = (
DatabaseForm.randomize_backup_and_maintenance_hour()
)
LOG.error("{}".format(data))
result = TaskRegister.database_create(
name=data['name'], plan=data['plan'],
environment=data['environment'], team=data['team'],
project=data['project'], description=data['description'],
backup_hour=data.get('backup_hour', backup_hour),
maintenance_window=data.get(
'maintenance_window', maintenance_hour
),
maintenance_day=data.get('maintenance_day', maintenance_day),
subscribe_to_email_events=data['subscribe_to_email_events'],
user=request.user,
register_user=False
)
headers = self.get_success_headers(data)
task_url = Site.objects.get_current().domain + \
'/api/task?task_id=%s' % str(result.id)
return Response(
{"task": task_url}, status=status.HTTP_201_CREATED,
headers=headers
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
UserMiddleware.set_current_user(request.user)
if instance.is_in_quarantine or instance.is_protected:
return Response(status=status.HTTP_401_UNAUTHORIZED)
instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | dbaas/api/database.py | from __future__ import absolute_import, unicode_literals
import logging
from rest_framework import viewsets, serializers, status
from rest_framework.response import Response
from django.contrib.sites.models import Site
from dbaas.middleware import UserMiddleware
from logical import models
from logical.forms import DatabaseForm
from physical.models import Plan, Environment
from account.models import Team
from .credential import CredentialSerializer
from notification.tasks import TaskRegister
LOG = logging.getLogger(__name__)
class DatabaseSerializer(serializers.HyperlinkedModelSerializer):
plan = serializers.HyperlinkedRelatedField(
source='plan', view_name='plan-detail',
queryset=Plan.objects.filter(is_active=True)
)
replication_topology_id = serializers.Field(
source='databaseinfra.plan.replication_topology.id'
)
environment = serializers.HyperlinkedRelatedField(
source='environment', view_name='environment-detail',
queryset=Environment.objects
)
team = serializers.HyperlinkedRelatedField(
source='team', view_name='team-detail', queryset=Team.objects
)
endpoint = serializers.Field(source='endpoint')
infra_endpoint = serializers.Field(source='databaseinfra.endpoint')
quarantine_dt = serializers.Field(source='quarantine_dt')
# total_size_in_bytes = serializers.Field(source='total_size')
total_size_in_bytes = serializers.SerializerMethodField('get_total_size')
credentials = CredentialSerializer(many=True, read_only=True)
status = serializers.Field(source='status')
# used_size_in_bytes = serializers.Field(source='used_size_in_bytes')
used_size_in_bytes = serializers.SerializerMethodField(
'get_used_size_in_bytes'
)
engine = serializers.CharField(source='infra.engine', read_only=True)
is_locked = serializers.SerializerMethodField('get_is_locked')
class Meta:
model = models.Database
fields = (
'url', 'id', 'name', 'infra_endpoint', 'endpoint', 'plan',
'environment', 'project', 'team', 'quarantine_dt',
'total_size_in_bytes', 'credentials', 'description', 'status',
'used_size_in_bytes', 'subscribe_to_email_events',
'created_at', 'engine', 'replication_topology_id',
'is_locked'
)
read_only = ('credentials', 'status', 'used_size_in_bytes')
def __init__(self, *args, **kwargs):
super(DatabaseSerializer, self).__init__(*args, **kwargs)
request = self.context.get('request', None)
if request:
creating = request.method == 'POST'
self.fields['plan'].read_only = not creating
self.fields['environment'].read_only = not creating
self.fields['name'].read_only = not creating
self.fields['credentials'].read_only = True
def _get_or_none_if_error(self, database, prop_name):
try:
val = getattr(database, prop_name)
except Exception as e:
LOG.error("Error get {} of database with id {}, error: {}".format(
prop_name, database.id, e
))
return
return val
def get_total_size(self, database):
return self._get_or_none_if_error(database, 'total_size')
def get_used_size_in_bytes(self, database):
return self._get_or_none_if_error(database, 'used_size_in_bytes')
def get_is_locked(self, database):
return bool(database.current_locked_task)
class DatabaseAPI(viewsets.ModelViewSet):
"""
* ### __List databases__
__GET__ /api/database/
* ### __To create a new database__
__POST__ /api/database/
{
"name": "{name}",
"plan": "{api_url}/plan/{plan_id}/",
"environment": "{api_url}/environment/{environment_id}/",
"project": "{api_url}/project/{project_id}/",
"team": "{api_url}/team/{team_id}/",
"description": "{description}",
"subscribe_to_email_events": "{subscribe_to_email_events}",
"contacts": "{contacts}"
}
* ### __Show details about a database__
__GET__ /api/database/`database_id`/
* ### __To delete a database (will put it on quarantine)__
__DELETE__ /api/database/`database_id`/
* ### __To change database project__
__PUT__ /api/database/`database_id`/
{
"project": "{api_url}/project/{project_id}/",
"description": "{description}",
"subscribe_to_email_events": "{subscribe_to_email_events}",
"contacts": "{contacts}"
}
"""
serializer_class = DatabaseSerializer
queryset = models.Database.objects.all()
def create(self, request):
serializer = self.get_serializer(
data=request.DATA, files=request.FILES)
if serializer.is_valid():
self.pre_save(serializer.object)
data = serializer.restore_fields(request.DATA, request.FILES)
backup_hour, maintenance_hour, maintenance_day = (
DatabaseForm.randomize_backup_and_maintenance_hour()
)
LOG.error("{}".format(data))
result = TaskRegister.database_create(
name=data['name'], plan=data['plan'],
environment=data['environment'], team=data['team'],
project=data['project'], description=data['description'],
backup_hour=data.get('backup_hour', backup_hour),
maintenance_window=data.get(
'maintenance_window', maintenance_hour
),
maintenance_day=data.get('maintenance_day', maintenance_day),
subscribe_to_email_events=data['subscribe_to_email_events'],
user=request.user,
register_user=False
)
headers = self.get_success_headers(data)
task_url = Site.objects.get_current().domain + \
'/api/task?task_id=%s' % str(result.id)
return Response(
{"task": task_url}, status=status.HTTP_201_CREATED,
headers=headers
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
UserMiddleware.set_current_user(request.user)
if instance.is_in_quarantine or instance.is_protected:
return Response(status=status.HTTP_401_UNAUTHORIZED)
instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | 0.637595 | 0.09122 |
import argparse
import logging
import logging.handlers
import time
import json
import paho.mqtt.client as mqtt
import eiscp
version="0.7"
parser = argparse.ArgumentParser(description='Bridge between onkyo-eiscp and MQTT')
parser.add_argument('--mqtt-host', default='localhost', help='MQTT server address. Defaults to "localhost"')
parser.add_argument('--mqtt-port', default='1883', type=int, help='MQTT server port. Defaults to 1883')
parser.add_argument('--mqtt-topic', default='onkyo/', help='Topic prefix to be used for subscribing/publishing. Defaults to "onkyo/"')
parser.add_argument('--onkyo-address', help='IP or hostname of the AVR. Defaults to autodiscover')
parser.add_argument('--onkyo-id', help='Device identifier of AVR to connecct to. Uses autodiscover')
parser.add_argument('--log', help='set log level to the specified value. Defaults to WARNING. Try DEBUG for maximum detail')
parser.add_argument('--syslog', action='store_true', help='enable logging to syslog')
args=parser.parse_args()
if args.log:
logging.getLogger().setLevel(args.log)
if args.syslog:
logging.getLogger().addHandler(logging.handlers.SysLogHandler())
topic=args.mqtt_topic
if not topic.endswith("/"):
topic+="/"
lastSend=0
logging.info('Starting onkyo2mqtt V%s with topic prefix \"%s\"' %(version, topic))
def sendavr(cmd):
global lastSend
now=time.time()
if now-lastSend<0.05:
time.sleep(0.05-(now-lastSend))
receiver.send(cmd)
lastSend=time.time()
logging.info("Sent command %s" % (cmd))
def msghandler(mqc,userdata,msg):
try:
global topic
if msg.retain:
return
mytopic=msg.topic[len(topic):]
if mytopic=="command":
sendavr(msg.payload)
elif mytopic[0:4]=="set/":
llcmd=eiscp.core.command_to_iscp(msg.payload.decode("utf-8"))
sendavr(llcmd)
except Exception as e:
logging.warning("Error processing message %s" % e)
def connecthandler(mqc,userdata,rc,properties=None):
logging.info("Connected to MQTT broker with rc=%s" % (rc))
mqc.subscribe(topic+"set/#",qos=0)
mqc.subscribe(topic+"command",qos=0)
mqc.publish(topic+"connected",2,qos=1,retain=True)
def disconnecthandler(mqc,userdata,rc):
logging.warning("Disconnected from MQTT broker with rc=%s" % (rc))
time.sleep(5)
mqc=mqtt.Client()
mqc.on_message=msghandler
mqc.on_connect=connecthandler
mqc.on_disconnect=disconnecthandler
mqc.will_set(topic+"connected",0,qos=2,retain=True)
mqc.connect(args.mqtt_host,args.mqtt_port,60)
mqc.publish(topic+"connected",1,qos=1,retain=True)
if args.onkyo_address:
receiver=eiscp.eISCP(args.onkyo_address)
else:
logging.info('Starting auto-discovery of Onkyo AVRs')
receivers=eiscp.eISCP.discover()
for receiver in receivers:
logging.info("Disocvered %s at %s:%s with id %s" % (
receiver.info['model_name'], receiver.host, receiver.port, receiver.info['identifier']))
if args.onkyo_id:
receivers=[r for r in receivers
if args.onkyo_id in r.info['identifier']]
if len(receivers)==0:
logging.warning("No specified AVRs discovered")
exit(1)
elif len(receivers)!=1:
logging.warning("More than one AVR discovered, please specify explicitely using --onkyo-address or --onkyo-id")
exit(1)
receiver=receivers.pop(0)
logging.info('Discovered AVR at %s',receiver)
# Query some initial values
for icmd in ("PWR","MVL","SLI","SLA","LMD"):
sendavr(icmd+"QSTN")
mqc.loop_start()
def publish(suffix,val,raw):
global topic,mqc
robj={}
robj["val"]=val
if raw is not None:
robj["onkyo_raw"]=raw
mqc.publish(topic+"status/"+suffix,json.dumps(robj),qos=0,retain=True)
while True:
msg=receiver.get(3600)
if msg is not None:
try:
parsed=eiscp.core.iscp_to_command(msg)
# Either part of the parsed command can be a list
if isinstance(parsed[1],str) or isinstance(parsed[1],int):
val=parsed[1]
else:
val=parsed[1][0]
if isinstance(parsed[0],str):
publish(parsed[0],val,msg)
else:
for pp in parsed[0]:
publish(pp,val,msg)
except:
publish(msg[:3],msg[3:],msg) | onkyo2mqtt.py |
import argparse
import logging
import logging.handlers
import time
import json
import paho.mqtt.client as mqtt
import eiscp
version="0.7"
parser = argparse.ArgumentParser(description='Bridge between onkyo-eiscp and MQTT')
parser.add_argument('--mqtt-host', default='localhost', help='MQTT server address. Defaults to "localhost"')
parser.add_argument('--mqtt-port', default='1883', type=int, help='MQTT server port. Defaults to 1883')
parser.add_argument('--mqtt-topic', default='onkyo/', help='Topic prefix to be used for subscribing/publishing. Defaults to "onkyo/"')
parser.add_argument('--onkyo-address', help='IP or hostname of the AVR. Defaults to autodiscover')
parser.add_argument('--onkyo-id', help='Device identifier of AVR to connecct to. Uses autodiscover')
parser.add_argument('--log', help='set log level to the specified value. Defaults to WARNING. Try DEBUG for maximum detail')
parser.add_argument('--syslog', action='store_true', help='enable logging to syslog')
args=parser.parse_args()
if args.log:
logging.getLogger().setLevel(args.log)
if args.syslog:
logging.getLogger().addHandler(logging.handlers.SysLogHandler())
topic=args.mqtt_topic
if not topic.endswith("/"):
topic+="/"
lastSend=0
logging.info('Starting onkyo2mqtt V%s with topic prefix \"%s\"' %(version, topic))
def sendavr(cmd):
global lastSend
now=time.time()
if now-lastSend<0.05:
time.sleep(0.05-(now-lastSend))
receiver.send(cmd)
lastSend=time.time()
logging.info("Sent command %s" % (cmd))
def msghandler(mqc,userdata,msg):
try:
global topic
if msg.retain:
return
mytopic=msg.topic[len(topic):]
if mytopic=="command":
sendavr(msg.payload)
elif mytopic[0:4]=="set/":
llcmd=eiscp.core.command_to_iscp(msg.payload.decode("utf-8"))
sendavr(llcmd)
except Exception as e:
logging.warning("Error processing message %s" % e)
def connecthandler(mqc,userdata,rc,properties=None):
logging.info("Connected to MQTT broker with rc=%s" % (rc))
mqc.subscribe(topic+"set/#",qos=0)
mqc.subscribe(topic+"command",qos=0)
mqc.publish(topic+"connected",2,qos=1,retain=True)
def disconnecthandler(mqc,userdata,rc):
logging.warning("Disconnected from MQTT broker with rc=%s" % (rc))
time.sleep(5)
mqc=mqtt.Client()
mqc.on_message=msghandler
mqc.on_connect=connecthandler
mqc.on_disconnect=disconnecthandler
mqc.will_set(topic+"connected",0,qos=2,retain=True)
mqc.connect(args.mqtt_host,args.mqtt_port,60)
mqc.publish(topic+"connected",1,qos=1,retain=True)
if args.onkyo_address:
receiver=eiscp.eISCP(args.onkyo_address)
else:
logging.info('Starting auto-discovery of Onkyo AVRs')
receivers=eiscp.eISCP.discover()
for receiver in receivers:
logging.info("Disocvered %s at %s:%s with id %s" % (
receiver.info['model_name'], receiver.host, receiver.port, receiver.info['identifier']))
if args.onkyo_id:
receivers=[r for r in receivers
if args.onkyo_id in r.info['identifier']]
if len(receivers)==0:
logging.warning("No specified AVRs discovered")
exit(1)
elif len(receivers)!=1:
logging.warning("More than one AVR discovered, please specify explicitely using --onkyo-address or --onkyo-id")
exit(1)
receiver=receivers.pop(0)
logging.info('Discovered AVR at %s',receiver)
# Query some initial values
for icmd in ("PWR","MVL","SLI","SLA","LMD"):
sendavr(icmd+"QSTN")
mqc.loop_start()
def publish(suffix,val,raw):
global topic,mqc
robj={}
robj["val"]=val
if raw is not None:
robj["onkyo_raw"]=raw
mqc.publish(topic+"status/"+suffix,json.dumps(robj),qos=0,retain=True)
while True:
msg=receiver.get(3600)
if msg is not None:
try:
parsed=eiscp.core.iscp_to_command(msg)
# Either part of the parsed command can be a list
if isinstance(parsed[1],str) or isinstance(parsed[1],int):
val=parsed[1]
else:
val=parsed[1][0]
if isinstance(parsed[0],str):
publish(parsed[0],val,msg)
else:
for pp in parsed[0]:
publish(pp,val,msg)
except:
publish(msg[:3],msg[3:],msg) | 0.163679 | 0.058319 |
from chainer import cuda
from chainer import functions
from chainer import gradient_check
import numpy
import pytest
from chainer_chemistry.config import MAX_ATOMIC_NUM
from chainer_chemistry.links.readout.general_readout import GeneralReadout
from chainer_chemistry.utils.permutation import permute_node
atom_size = 5
hidden_dim = 7
batch_size = 2
@pytest.fixture
def readouts():
modes = ['sum', 'max', 'summax']
return (GeneralReadout(mode=mode) for mode in modes)
@pytest.fixture
def data():
numpy.random.seed(0)
atom_data = numpy.random.uniform(
0, high=MAX_ATOMIC_NUM, size=(batch_size, atom_size, hidden_dim)
).astype('f')
y_grad = numpy.random.uniform(-1, 1, (batch_size, hidden_dim)).astype('f')
return atom_data, y_grad
def check_forward(readout, atom_data):
y_actual = cuda.to_cpu(readout(atom_data).data)
if readout.mode == ('sum' and 'max'):
assert y_actual.shape == (batch_size, hidden_dim)
elif readout.mode == 'summax':
assert y_actual.shape == (batch_size, hidden_dim * 2)
def test_forward_cpu(readouts, data):
atom_data = data[0]
for readout in readouts:
check_forward(readout, atom_data)
@pytest.mark.gpu
def test_forward_gpu(readouts, data):
atom_data = cuda.to_gpu(data[0])
for readout in readouts:
readout.to_gpu()
check_forward(readout, atom_data)
def test_forward_cpu_assert_raises(data):
atom_data = data[0]
readout = GeneralReadout(mode='invalid')
with pytest.raises(ValueError):
cuda.to_cpu(readout(atom_data).data)
def test_backward_cpu(readouts, data):
atom_data, y_grad = data
for readout in readouts:
if readout.mode == 'summax':
y_grad = functions.concat((y_grad, y_grad), axis=1).data
gradient_check.check_backward(
readout, atom_data, y_grad, atol=1e-2, rtol=1e-2)
@pytest.mark.gpu
def test_backward_gpu(readouts, data):
atom_data, y_grad = map(cuda.to_gpu, data)
for readout in readouts:
readout.to_gpu()
if readout.mode == 'summax':
y_grad = functions.concat((y_grad, y_grad), axis=1).data
# TODO(nakago): check why tolerance is so high.
gradient_check.check_backward(
readout, atom_data, y_grad, atol=1e-1, rtol=1e-1)
def test_forward_cpu_graph_invariant(readouts, data):
atom_data = data[0]
permutation_index = numpy.random.permutation(atom_size)
permute_atom_data = permute_node(atom_data, permutation_index, axis=1)
for readout in readouts:
y_actual = cuda.to_cpu(readout(atom_data).data)
permute_y_actual = cuda.to_cpu(readout(permute_atom_data).data)
numpy.testing.assert_allclose(
y_actual, permute_y_actual, rtol=1e-5, atol=1e-5)
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s']) | tests/links_tests/readout_tests/test_general_readout.py | from chainer import cuda
from chainer import functions
from chainer import gradient_check
import numpy
import pytest
from chainer_chemistry.config import MAX_ATOMIC_NUM
from chainer_chemistry.links.readout.general_readout import GeneralReadout
from chainer_chemistry.utils.permutation import permute_node
atom_size = 5
hidden_dim = 7
batch_size = 2
@pytest.fixture
def readouts():
modes = ['sum', 'max', 'summax']
return (GeneralReadout(mode=mode) for mode in modes)
@pytest.fixture
def data():
numpy.random.seed(0)
atom_data = numpy.random.uniform(
0, high=MAX_ATOMIC_NUM, size=(batch_size, atom_size, hidden_dim)
).astype('f')
y_grad = numpy.random.uniform(-1, 1, (batch_size, hidden_dim)).astype('f')
return atom_data, y_grad
def check_forward(readout, atom_data):
y_actual = cuda.to_cpu(readout(atom_data).data)
if readout.mode == ('sum' and 'max'):
assert y_actual.shape == (batch_size, hidden_dim)
elif readout.mode == 'summax':
assert y_actual.shape == (batch_size, hidden_dim * 2)
def test_forward_cpu(readouts, data):
atom_data = data[0]
for readout in readouts:
check_forward(readout, atom_data)
@pytest.mark.gpu
def test_forward_gpu(readouts, data):
atom_data = cuda.to_gpu(data[0])
for readout in readouts:
readout.to_gpu()
check_forward(readout, atom_data)
def test_forward_cpu_assert_raises(data):
atom_data = data[0]
readout = GeneralReadout(mode='invalid')
with pytest.raises(ValueError):
cuda.to_cpu(readout(atom_data).data)
def test_backward_cpu(readouts, data):
atom_data, y_grad = data
for readout in readouts:
if readout.mode == 'summax':
y_grad = functions.concat((y_grad, y_grad), axis=1).data
gradient_check.check_backward(
readout, atom_data, y_grad, atol=1e-2, rtol=1e-2)
@pytest.mark.gpu
def test_backward_gpu(readouts, data):
atom_data, y_grad = map(cuda.to_gpu, data)
for readout in readouts:
readout.to_gpu()
if readout.mode == 'summax':
y_grad = functions.concat((y_grad, y_grad), axis=1).data
# TODO(nakago): check why tolerance is so high.
gradient_check.check_backward(
readout, atom_data, y_grad, atol=1e-1, rtol=1e-1)
def test_forward_cpu_graph_invariant(readouts, data):
atom_data = data[0]
permutation_index = numpy.random.permutation(atom_size)
permute_atom_data = permute_node(atom_data, permutation_index, axis=1)
for readout in readouts:
y_actual = cuda.to_cpu(readout(atom_data).data)
permute_y_actual = cuda.to_cpu(readout(permute_atom_data).data)
numpy.testing.assert_allclose(
y_actual, permute_y_actual, rtol=1e-5, atol=1e-5)
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s']) | 0.457137 | 0.585931 |
import os
import json
from math import pow,log2,ceil
def ordering(wantedlist,inital_penalty = 0):
if wantedlist == []:
return "Cannot enchant: no enchantment given"
sortedlist, numlist = enchantment_split(wantedlist)
total_step = int(log2(len(sortedlist)))+1
penalty = inital_penalty + total_step
if penalty > 6:
return "Cannot enchant: final penalty larger than 6"
# the factor of enchantment (first 16 books, should be enough though), no idea to form an equation or function
multiplyfactor = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
ordering_num = [[] for x in range(total_step)]
ordering = []
# multiplied enchatment factor
xp_extra_level = 0
priority_list = []
# generate base enchantment level list
# i.e. add the sum of penalty level by item and merged books
# also, count the max_step
total_enchantment = len(numlist)
xp_list, max_step = enchant_layer(total_step, total_enchantment, inital_penalty)
while numlist:
temp_xp_list = []
for i in range(len(max_step)):
if max_step[i] == 0:
temp_xp_list.append(1000)
else:
temp_xp_list.append(xp_list[i])
step = temp_xp_list.index(min(temp_xp_list))
existed_num = len(ordering_num[step])
tobe_enchanted = max(numlist)
ordering_num[step].append(tobe_enchanted)
xp_list[step] += tobe_enchanted
max_step[step] -= 1
# combining enchantments cause the level counted more than one times
if existed_num != 0:
xp_extra_level += tobe_enchanted * multiplyfactor[existed_num]
numlist.remove(tobe_enchanted)
xp_max = max(xp_list)
if xp_max > 39:
return "Cannot enchant! max xp larger than 39"
#penalty of merged books
xp_penalty_book = 0
for element in ordering_num:
#penalty for merge book
xp_penalty_book += merged_penalty_book(len(element))
#list steps with name
for j in element:
for k in sortedlist:
if k[1] == j:
ordering.append(k)
sortedlist.remove(k)
break
xp_sum = sum(xp_list)+xp_extra_level+xp_penalty_book
return ordering, xp_max, penalty, xp_sum
def enchant_layer(total_step,total_enchantment, inital_penalty):
xp_list = []
max_step = []
for i in range(total_step):
# add the penalty level by item
xp_list.append(2** i+inital_penalty -1)
num_of_enchantment = min(2**i, total_enchantment)
max_step.append(num_of_enchantment)
total_enchantment -= num_of_enchantment
merged_books_penalty = 2**ceil(log2(num_of_enchantment)) -1
# add the penalty level by merged books
xp_list[i] += merged_books_penalty
return xp_list,max_step
def merged_penalty_book(num):
if num == 0:
return 0
xp = 0
p_list = [0 for x in range(num)]
while len(p_list) != 1 :
new_list = []
for i in range(num//2):
xp += sum([2**x-1 for x in p_list[i*2:i*2+2]])
new_list.append(max(p_list[i*2:i*2+2])+1)
if num %2 != 0:
new_list.append(p_list[-1])
p_list = new_list[:]
num = len(p_list)
return xp
def enchantment_split(wantedlist):
#get the xp required and in(tobe_enchanted it
numlist = [x[1] for x in wantedlist]
numlist.sort(reverse=True)
sortedlist = []
for num in numlist:
for element in wantedlist:
if element[1] == num:
sortedlist.append(element)
wantedlist.remove(element)
return sortedlist,numlist | ordering.py | import os
import json
from math import pow,log2,ceil
def ordering(wantedlist,inital_penalty = 0):
if wantedlist == []:
return "Cannot enchant: no enchantment given"
sortedlist, numlist = enchantment_split(wantedlist)
total_step = int(log2(len(sortedlist)))+1
penalty = inital_penalty + total_step
if penalty > 6:
return "Cannot enchant: final penalty larger than 6"
# the factor of enchantment (first 16 books, should be enough though), no idea to form an equation or function
multiplyfactor = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
ordering_num = [[] for x in range(total_step)]
ordering = []
# multiplied enchatment factor
xp_extra_level = 0
priority_list = []
# generate base enchantment level list
# i.e. add the sum of penalty level by item and merged books
# also, count the max_step
total_enchantment = len(numlist)
xp_list, max_step = enchant_layer(total_step, total_enchantment, inital_penalty)
while numlist:
temp_xp_list = []
for i in range(len(max_step)):
if max_step[i] == 0:
temp_xp_list.append(1000)
else:
temp_xp_list.append(xp_list[i])
step = temp_xp_list.index(min(temp_xp_list))
existed_num = len(ordering_num[step])
tobe_enchanted = max(numlist)
ordering_num[step].append(tobe_enchanted)
xp_list[step] += tobe_enchanted
max_step[step] -= 1
# combining enchantments cause the level counted more than one times
if existed_num != 0:
xp_extra_level += tobe_enchanted * multiplyfactor[existed_num]
numlist.remove(tobe_enchanted)
xp_max = max(xp_list)
if xp_max > 39:
return "Cannot enchant! max xp larger than 39"
#penalty of merged books
xp_penalty_book = 0
for element in ordering_num:
#penalty for merge book
xp_penalty_book += merged_penalty_book(len(element))
#list steps with name
for j in element:
for k in sortedlist:
if k[1] == j:
ordering.append(k)
sortedlist.remove(k)
break
xp_sum = sum(xp_list)+xp_extra_level+xp_penalty_book
return ordering, xp_max, penalty, xp_sum
def enchant_layer(total_step,total_enchantment, inital_penalty):
xp_list = []
max_step = []
for i in range(total_step):
# add the penalty level by item
xp_list.append(2** i+inital_penalty -1)
num_of_enchantment = min(2**i, total_enchantment)
max_step.append(num_of_enchantment)
total_enchantment -= num_of_enchantment
merged_books_penalty = 2**ceil(log2(num_of_enchantment)) -1
# add the penalty level by merged books
xp_list[i] += merged_books_penalty
return xp_list,max_step
def merged_penalty_book(num):
if num == 0:
return 0
xp = 0
p_list = [0 for x in range(num)]
while len(p_list) != 1 :
new_list = []
for i in range(num//2):
xp += sum([2**x-1 for x in p_list[i*2:i*2+2]])
new_list.append(max(p_list[i*2:i*2+2])+1)
if num %2 != 0:
new_list.append(p_list[-1])
p_list = new_list[:]
num = len(p_list)
return xp
def enchantment_split(wantedlist):
#get the xp required and in(tobe_enchanted it
numlist = [x[1] for x in wantedlist]
numlist.sort(reverse=True)
sortedlist = []
for num in numlist:
for element in wantedlist:
if element[1] == num:
sortedlist.append(element)
wantedlist.remove(element)
return sortedlist,numlist | 0.113113 | 0.229503 |
import concurrent.futures
import sys
import threading
from concurrent.futures import ThreadPoolExecutor
import tinify
import os
# tinify.key = "<KEY>"
total_pic_old_size = 0
total_pic_new_size = 0
total_pic_num = 0
# all thread pool tasks futures
futures = []
# tinypng API key cache file
API_KEY_CACHE_PATH = "key_cache.txt"
# indicate weather the program running into error
error = False
def create_thread_pool():
core_num = get_cpu_core_num()
pool = ThreadPoolExecutor(max_workers=core_num, thread_name_prefix="thread_for_tiny")
return pool
def get_cpu_core_num():
return os.cpu_count()
def get_file_size_KB(file):
file_size = os.path.getsize(file) / 1024.0
return round(file_size, 2)
def get_file_size_MB(file):
file_size = os.path.getsize(file) / 1024.0 / 1024.0
return round(file_size, 2)
def colored(r, g, b, text):
return "\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(r, g, b, text)
def compress(path, thread_pool):
global total_pic_old_size
global total_pic_new_size
global total_pic_num
global futures
if not os.path.isdir(path):
file = path
old_file_size = get_file_size_KB(file)
old_file_size_str = str(round(old_file_size, 2))
if is_pic(file):
total_pic_num += 1
total_pic_old_size += old_file_size
future = thread_pool.submit(tiny_task, file, old_file_size)
futures.append(future)
future.add_done_callback(tiny_task_result_callback)
else:
print("current file: {0}, size: {1}KB".format(file, old_file_size_str))
else:
for file in os.listdir(path):
sub_file = os.path.join(path, file)
compress(sub_file, thread_pool)
def is_pic(file):
return file.endswith(".jpg") \
or file.endswith(".jpeg") \
or file.endswith(".png")
def tiny_task(file, old_file_size):
print("\ncurrent file is pic: {0}, size: {1}KB, tinify it...".format(file, str(round(old_file_size, 2))))
print("current tiny thread is {0}".format(threading.current_thread().name))
try:
source = tinify.from_file(file)
source.to_file(file)
return get_file_size_KB(file), old_file_size, file
except tinify.AccountError:
global error
error = True
print(colored(255, 25, 25, "\nyour API key is invalid! try a new one!"))
def tiny_task_result_callback(future):
global total_pic_old_size
global total_pic_new_size
result = future.result()
if result is None:
return
new_file_size = result[0]
old_file_size = result[1]
file = result[2]
total_pic_new_size += new_file_size
new_file_size_str = str(round(new_file_size, 2))
percent_str = str(round(100 - 100 * new_file_size / old_file_size, 2))
print(colored(25, 255, 25, "\nfile {0} tinify done! now the pic size: {1}KB, shrunk by {2}%".format(
file, new_file_size_str, percent_str)))
if __name__ == '__main__':
if len(sys.argv) <= 1:
# no API key, try cache
if not os.path.exists(API_KEY_CACHE_PATH):
raise Exception("no API key input and no cached key found!")
else:
with open("./" + API_KEY_CACHE_PATH, 'r') as f:
key = f.read()
print("find cached API key: {0}".format(key))
tinify.key = key
else:
key = sys.argv[1]
tinify.key = key
with open("./" + API_KEY_CACHE_PATH, 'w+') as f:
f.write(key)
print("current path: " + os.path.curdir)
thread_pool = create_thread_pool()
compress(os.path.curdir, thread_pool)
# wait until all tasks done
concurrent.futures.wait(futures)
if error:
print(colored(255, 25, 25, "\nError occurred, please check if you run this program properly."))
elif total_pic_num == 0:
print("\nDone! But no pics were found in the directory.")
else:
print(colored(255, 198, 35,
"\nCompress done! All {5} pics shrunk from {0}KB({1}MB) to {2}KB({3}MB), shrunk by {4}%.".format(
round(total_pic_old_size, 2),
round(total_pic_old_size / 1024, 2), round(total_pic_new_size, 2),
round(total_pic_new_size / 1024, 2),
str(round(100 - 100 * total_pic_new_size / total_pic_old_size, 2)), total_pic_num))) | main.py | import concurrent.futures
import sys
import threading
from concurrent.futures import ThreadPoolExecutor
import tinify
import os
# tinify.key = "<KEY>"
total_pic_old_size = 0
total_pic_new_size = 0
total_pic_num = 0
# all thread pool tasks futures
futures = []
# tinypng API key cache file
API_KEY_CACHE_PATH = "key_cache.txt"
# indicate weather the program running into error
error = False
def create_thread_pool():
core_num = get_cpu_core_num()
pool = ThreadPoolExecutor(max_workers=core_num, thread_name_prefix="thread_for_tiny")
return pool
def get_cpu_core_num():
return os.cpu_count()
def get_file_size_KB(file):
file_size = os.path.getsize(file) / 1024.0
return round(file_size, 2)
def get_file_size_MB(file):
file_size = os.path.getsize(file) / 1024.0 / 1024.0
return round(file_size, 2)
def colored(r, g, b, text):
return "\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(r, g, b, text)
def compress(path, thread_pool):
global total_pic_old_size
global total_pic_new_size
global total_pic_num
global futures
if not os.path.isdir(path):
file = path
old_file_size = get_file_size_KB(file)
old_file_size_str = str(round(old_file_size, 2))
if is_pic(file):
total_pic_num += 1
total_pic_old_size += old_file_size
future = thread_pool.submit(tiny_task, file, old_file_size)
futures.append(future)
future.add_done_callback(tiny_task_result_callback)
else:
print("current file: {0}, size: {1}KB".format(file, old_file_size_str))
else:
for file in os.listdir(path):
sub_file = os.path.join(path, file)
compress(sub_file, thread_pool)
def is_pic(file):
return file.endswith(".jpg") \
or file.endswith(".jpeg") \
or file.endswith(".png")
def tiny_task(file, old_file_size):
print("\ncurrent file is pic: {0}, size: {1}KB, tinify it...".format(file, str(round(old_file_size, 2))))
print("current tiny thread is {0}".format(threading.current_thread().name))
try:
source = tinify.from_file(file)
source.to_file(file)
return get_file_size_KB(file), old_file_size, file
except tinify.AccountError:
global error
error = True
print(colored(255, 25, 25, "\nyour API key is invalid! try a new one!"))
def tiny_task_result_callback(future):
global total_pic_old_size
global total_pic_new_size
result = future.result()
if result is None:
return
new_file_size = result[0]
old_file_size = result[1]
file = result[2]
total_pic_new_size += new_file_size
new_file_size_str = str(round(new_file_size, 2))
percent_str = str(round(100 - 100 * new_file_size / old_file_size, 2))
print(colored(25, 255, 25, "\nfile {0} tinify done! now the pic size: {1}KB, shrunk by {2}%".format(
file, new_file_size_str, percent_str)))
if __name__ == '__main__':
if len(sys.argv) <= 1:
# no API key, try cache
if not os.path.exists(API_KEY_CACHE_PATH):
raise Exception("no API key input and no cached key found!")
else:
with open("./" + API_KEY_CACHE_PATH, 'r') as f:
key = f.read()
print("find cached API key: {0}".format(key))
tinify.key = key
else:
key = sys.argv[1]
tinify.key = key
with open("./" + API_KEY_CACHE_PATH, 'w+') as f:
f.write(key)
print("current path: " + os.path.curdir)
thread_pool = create_thread_pool()
compress(os.path.curdir, thread_pool)
# wait until all tasks done
concurrent.futures.wait(futures)
if error:
print(colored(255, 25, 25, "\nError occurred, please check if you run this program properly."))
elif total_pic_num == 0:
print("\nDone! But no pics were found in the directory.")
else:
print(colored(255, 198, 35,
"\nCompress done! All {5} pics shrunk from {0}KB({1}MB) to {2}KB({3}MB), shrunk by {4}%.".format(
round(total_pic_old_size, 2),
round(total_pic_old_size / 1024, 2), round(total_pic_new_size, 2),
round(total_pic_new_size / 1024, 2),
str(round(100 - 100 * total_pic_new_size / total_pic_old_size, 2)), total_pic_num))) | 0.197832 | 0.079496 |