input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# coding=utf-8
"""Class related metaclasses.
This module defines:
* Class,
* PlainClass,
* Attribute,
"""
from typing_extensions import Literal
from typing import List, Optional, Dict, Union, Any
import abc
import collections
from modelscript.megamodels.elements import SourceModelElement
from modelscript.megamodels.models import Placeholder
from modelscript.megamodels.py import MAttribute, MComposition
from modelscript.metamodels.classes import (
PackagableElement,
Entity,
Member)
from modelscript.metamodels.classes.associations import (
Role)
from modelscript.base.exceptions import (
MethodToBeDefined)
Later = Optional
__all__ = (
'Class',
'PlainClass',
'Attribute')
class Class(PackagableElement, Entity, metaclass=abc.ABCMeta):
""" Classes.
"""
META_COMPOSITIONS = [
# 'attributes', TODO:3 restore, raise an exception
]
isAbstract: bool
superclasses: List[Union[Placeholder, 'Class']] # later: List['Class']
"""Names of superclasses later resolved as classes."""
subclassed: List['Class']
"""Subclasses"""
_ownedAttributeNamed: Dict[str, 'Attribute']
""" Attributes directly declared by the class.
No inherited attributes. Attributed indexed by name.
"""
_ownedOppositeRoleNamed: Dict[str, Role]
# defined by finalize.add_attached_roles_to_classes
# The opposite role a index by their name.
# This is possible because all opposite roles
# have a different name, which is not the case for
# played roles.
_ownedPlayedRoles: List[Role]
# Defined by finalize.add_attached_roles_to_classes.
# A list because various played role can have the same name.
# -------- inherited part ----------------------------------
inheritanceCycles: Later[List[List['Class']]]
"""The list of cycles starting and going to the current class.
For instance [[A,B,A], [A,A]]
"""
_inheritedAttributeNamed: Later[Dict[str, 'Attribute']]
"""Dict of inherited attributes from all super classes.
Inherited attributes are stored by names.
"""
_inheritedOppositeRoleNamed: Optional[Dict[str, Role]]
"""Dict of inherited opposite roles from all super classes.
Inherited role are stored by names.
This is ok since all opposite roles must have a
different name. This contrasts with played role
that can have arbitrary (duplicated) names."""
_inheritedPlayedRoles: Optional[List[Role]]
"""List of inherited played roles from all super classes.
This is a list rather that a dict since various
played role can have the same name."""
def __init__(self,
name: str,
model,
isAbstract: bool = False,
superclasses: List[Placeholder] = (),
package=None,
lineNo=None, description=None, astNode=None):
super(Class, self).__init__(
name=name,
model=model,
package=package,
astNode=astNode,
lineNo=lineNo,
description=description)
self.isAbstract = isAbstract
self.superclasses = superclasses
self.subclasses = []
# Subclasses are [] during fillModel then set during resolve()
self._ownedAttributeNamed = collections.OrderedDict()
# Will be filled by the parser fillModel"""
self._ownedOppositeRoleNamed = collections.OrderedDict()
self._ownedPlayedRoles = []
# -------- inherited part ----------------------------------
self.inheritanceCycles = None
# This attribute is set during finalize
self._inheritedAttributeNamed = None
# Computed by finalize.add_inherited_attributes
# Before this it is set to None. This serves as
# a marker to indicates whether add_inherited_attributes
# has been run or not on this class.
self._inheritedOppositeRoleNamed = None
# This attribute is Computed by finalize.add_inherited_roles
# Before this it is set to None. This serves as
# a marker to indicates whether add_inherited_roles
# has been run or not on this class.
self._inheritedPlayedRoles = None
# Computed by finalize.add_inherited_roles
# Before this it is set to None. This serves as
# a marker to indicates whether add_inherited_roles
# has been run or not on this class.
# -----------------------------------------------------------------
# ownedAttributes
# -----------------------------------------------------------------
@property
def ownedAttributes(self):
return list(self._ownedAttributeNamed.values())
def ownedAttribute(self, name):
if name in self._ownedAttributeNamed:
return self._ownedAttributeNamed[name]
else:
return None
@property
def ownedAttributeNames(self):
return list(self._ownedAttributeNamed.keys())
# -----------------------------------------------------------------
# inheritedAttributes
# -----------------------------------------------------------------
@property
def inheritedAttributes(self):
if self._inheritedAttributeNamed is None:
# This should happened just when a cycle has been detected.
# In this case a fatal issue has been raised, which is ok,
# but the rest of finalize code didn't execute properly.
# The [] value here is to ensure that the model is still
# usable and that the cycle detection fail gracefully.
# Instead of raising an exception in this method it is best
# to ignore inherited attributes. This prevent a new
# exception when using the model.
# Just like the printer. It is not
# nice to require client to check which attributes
# are defined or not.
return []
return list(self._inheritedAttributeNamed.values())
def inheritedAttribute(self, name):
if self._inheritedAttributeNamed is None:
# see inheritedAttributes
return []
if name in self._inheritedAttributeNamed:
return self._inheritedAttributeNamed[name]
else:
return None
@property
def inheritedAttributeNames(self):
if self._inheritedAttributeNamed is None:
# see inheritedAttributes
return []
return list(self._inheritedAttributeNamed.keys())
# -----------------------------------------------------------------
# attributes
# -----------------------------------------------------------------
@property
def attributes(self):
return self.ownedAttributes+self.inheritedAttributes
def attribute(self, name):
oa = self.ownedAttribute(name)
if oa is not None:
return oa
else:
return self.inheritedAttribute(name)
@property
def attributeNames(self):
return self.ownedAttributeNames+self.inheritedAttributeNames
# -----------------------------------------------------------------
# ownedOppositeRoles
# -----------------------------------------------------------------
@property
def ownedOppositeRoles(self):
return list(self._ownedOppositeRoleNamed.values())
def ownedOppositeRole(self, name):
if name in self._ownedOppositeRoleNamed:
return self._ownedOppositeRoleNamed[name]
else:
return None
@property
def ownedOppositeRoleNames(self):
return list(self._ownedOppositeRoleNamed.keys())
# -----------------------------------------------------------------
# inheritedOppositeRoles
# -----------------------------------------------------------------
@property
def inheritedOppositeRoles(self):
if self._inheritedOppositeRoleNamed is None:
# When there is no cycle, the content of the attribute
# is a dict. Otherwise it will remain to None.
# If a cycle is detected a fatal issue is raised, which is ok,
# but the rest of finalize code didn't execute properly.
# The [] value here is to ensure that the model is still
# usable and that the cycle detection fail gracefully.
# Instead of raising an exception in this method it is best
# to ignore inherited attributes. This prevent a new
# exception when using the model.
# Just like the printer. It is not
# nice to require client to check which attributes
# are defined or not.
return []
return list(self._inheritedAttributeNamed.values())
def inheritedOppositeRole(self, name):
if self._inheritedOppositeRoleNamed is None:
# see inheritedOppositeRoles
return None
if name in self._inheritedOppositeRoleNamed:
return self._inheritedOppositeRoleNamed[name]
else:
return None
@property
def inheritedOppositeRoleNames(self):
if self._inheritedOppositeRoleNamed is None:
# see inheritedAttributes
return []
return list(self._inheritedOppositeRoleNamed.keys())
# -----------------------------------------------------------------
# oppositeRoles
# -----------------------------------------------------------------
@property
def oppositeRoles(self):
return self.ownedOppositeRoles+self.inheritedOppositeRoles
def oppositeRole(self, name):
oor = self.ownedOppositeRole(name)
if oor is not None:
return oor
else:
return self.inheritedOppositeRole(name)
@property
def oppositeRoleNames(self):
return self.ownedOppositeRoleNames\
+ self.inheritedOppositeRoleNames
# -----------------------------------------------------------------
# ownedPlayedRoles
# -----------------------------------------------------------------
@property
def ownedPlayedRoles(self):
return list(self._ownedPlayedRoles)
# There is no method ownedPlayedRole(self, name) because the
# ownedPlayedRoles are not indexed by name since various roles
# may have the same name.
@property
def ownedPlayedRoleNames(self):
return [
r.name for r in self._ownedPlayedRoles]
# -----------------------------------------------------------------
# inheritedPlayedRoles
# -----------------------------------------------------------------
@property
def inheritedPlayedRoles(self):
if self._inheritedOppositeRoleNamed is None:
# When there is no cycle, the content of the attribute
# is a dict. Otherwise it will remain to None.
# If a cycle is detected a fatal issue is raised, which is ok,
# but the rest of finalize code didn't execute properly.
# The [] value here is to ensure that the model is still
# usable and that the cycle detection fail gracefully.
# Instead of raising an exception in this method it is best
# to ignore inherited attributes. This prevent a new
# exception when using the model.
# Just like the printer. It is not
# nice to require client to check which attributes
# are defined or not.
return []
return list(self._inheritedPlayedRoles)
@property
def inheritedPlayedRoleNames(self):
if self._inheritedOppositeRoleNamed is None:
# see inheritedAttributes
return []
return list(self._inheritedPlayedRoles)
# -----------------------------------------------------------------
# playedRoles
# -----------------------------------------------------------------
@property
def playedRoles(self):
return self.ownedPlayedRoles+self.inheritedPlayedRoles
@property
def playedRoleNames(self):
return self.ownedPlayedRoleNames\
+ self.inheritedPlayedRoles
# ------- misc ------------------------------------------------------
@property
def cycles(self) -> str:
"""
A string describing the inheritance cycles starting from this
class if any or None if there is no cycle.
"""
if self.inheritanceCycles:
return (
', '.join(
('<'.join(c.name for c in cycle))
for cycle in self.inheritanceCycles))
else:
return None
@property
def names(self):
return (
self.attributeNames
+ self.invariantNames)
@property
def idPrint(self) -> List['Attribute']:
"""List of all {id} attributes.
"""
return [
a for a in self.attributes
if a.isId]
def __str__(self):
return self.name
def __repr__(self):
return '<class %s>' % self.name
class PlainClass(Class):
"""
PlainClasses, that is, classes that are not association class.
"""
def __init__(self, name, model,
isAbstract=False, superclasses=(),
package=None,
lineNo=None, description=None, astNode=None):
super(PlainClass, self).__init__(
name=name,
model=model,
isAbstract=isAbstract,
superclasses=superclasses,
package=package,
astNode=astNode,
lineNo=lineNo,
description=description)
self.model._plainClassNamed[name] = self
def isPlainClass(self):
return True
class Attribute(SourceModelElement, Member):
""" Attributes.
"""
class_: 'Class'
"""Class of the attribute."""
type: Union[str, 'AttributeType']
"""Type of the attribute (AttributeType). An AttributeType is
a simple type plus optionality and multiplicity.
Something that could be like String[0..1] or String[*]."""
isDerived: bool
"""Whether the attribute is | |
represents an available package upgrade.
COMPLIANCE: This represents a Compliance Note
DSSE_ATTESTATION: This represents a DSSE attestation Note
"""
NOTE_KIND_UNSPECIFIED = 0
VULNERABILITY = 1
BUILD = 2
IMAGE = 3
PACKAGE = 4
DEPLOYMENT = 5
DISCOVERY = 6
ATTESTATION = 7
UPGRADE = 8
COMPLIANCE = 9
DSSE_ATTESTATION = 10
attestation = _messages.MessageField('AttestationOccurrence', 1)
build = _messages.MessageField('BuildOccurrence', 2)
compliance = _messages.MessageField('ComplianceOccurrence', 3)
createTime = _messages.StringField(4)
deployment = _messages.MessageField('DeploymentOccurrence', 5)
discovery = _messages.MessageField('DiscoveryOccurrence', 6)
dsseAttestation = _messages.MessageField('DSSEAttestationOccurrence', 7)
envelope = _messages.MessageField('Envelope', 8)
image = _messages.MessageField('ImageOccurrence', 9)
kind = _messages.EnumField('KindValueValuesEnum', 10)
name = _messages.StringField(11)
noteName = _messages.StringField(12)
package = _messages.MessageField('PackageOccurrence', 13)
remediation = _messages.StringField(14)
resourceUri = _messages.StringField(15)
updateTime = _messages.StringField(16)
upgrade = _messages.MessageField('UpgradeOccurrence', 17)
vulnerability = _messages.MessageField('VulnerabilityOccurrence', 18)
class OndemandscanningProjectsLocationsOperationsCancelRequest(_messages.Message):
r"""A OndemandscanningProjectsLocationsOperationsCancelRequest object.
Fields:
name: The name of the operation resource to be cancelled.
"""
name = _messages.StringField(1, required=True)
class OndemandscanningProjectsLocationsOperationsDeleteRequest(_messages.Message):
r"""A OndemandscanningProjectsLocationsOperationsDeleteRequest object.
Fields:
name: The name of the operation resource to be deleted.
"""
name = _messages.StringField(1, required=True)
class OndemandscanningProjectsLocationsOperationsGetRequest(_messages.Message):
r"""A OndemandscanningProjectsLocationsOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class OndemandscanningProjectsLocationsOperationsListRequest(_messages.Message):
r"""A OndemandscanningProjectsLocationsOperationsListRequest object.
Fields:
filter: The standard list filter.
name: The name of the operation's parent resource.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class OndemandscanningProjectsLocationsOperationsWaitRequest(_messages.Message):
r"""A OndemandscanningProjectsLocationsOperationsWaitRequest object.
Fields:
name: The name of the operation resource to wait on.
timeout: The maximum duration to wait before timing out. If left blank,
the wait will be at most the time permitted by the underlying HTTP/RPC
protocol. If RPC context deadline is also specified, the shorter one
will be used.
"""
name = _messages.StringField(1, required=True)
timeout = _messages.StringField(2)
class OndemandscanningProjectsLocationsScansAnalyzePackagesRequest(_messages.Message):
r"""A OndemandscanningProjectsLocationsScansAnalyzePackagesRequest object.
Fields:
analyzePackagesRequestV1: A AnalyzePackagesRequestV1 resource to be passed
as the request body.
parent: Required. The parent of the resource for which analysis is
requested. Format: projects/[project_name]/locations/[location]
"""
analyzePackagesRequestV1 = _messages.MessageField('AnalyzePackagesRequestV1', 1)
parent = _messages.StringField(2, required=True)
class OndemandscanningProjectsLocationsScansVulnerabilitiesListRequest(_messages.Message):
r"""A OndemandscanningProjectsLocationsScansVulnerabilitiesListRequest
object.
Fields:
pageSize: The number of vulnerabilities to retrieve.
pageToken: The page token, resulting from a previous call to
ListVulnerabilities.
parent: Required. The parent of the collection of Vulnerabilities being
requested. Format:
projects/[project_name]/locations/[location]/scans/[scan_id]
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success. If
the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should be a resource name ending with
`operations/{unique_id}`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class PackageData(_messages.Message):
r"""A PackageData object.
Enums:
PackageTypeValueValuesEnum: The type of package: os, maven, go, etc.
Fields:
cpeUri: The cpe_uri in [cpe format] (https://cpe.mitre.org/specification/)
in which the vulnerability may manifest. Examples include distro or
storage location for vulnerable jar.
os: The OS affected by a vulnerability This field is deprecated and the
information is in cpe_uri
osVersion: The version of the OS This field is deprecated and the
information is in cpe_uri
package: The package being analysed for vulnerabilities
packageType: The type of package: os, maven, go, etc.
unused: A string attribute.
version: The version of the package being analysed
"""
class PackageTypeValueValuesEnum(_messages.Enum):
r"""The type of package: os, maven, go, etc.
Values:
PACKAGE_TYPE_UNSPECIFIED: <no description>
OS: Operating System
MAVEN: Java packages from Maven.
GO: Go third-party packages.
GO_STDLIB: Go toolchain + standard library packages.
"""
PACKAGE_TYPE_UNSPECIFIED = 0
OS = 1
MAVEN = 2
GO = 3
GO_STDLIB = 4
cpeUri = _messages.StringField(1)
os = _messages.StringField(2)
osVersion = _messages.StringField(3)
package = _messages.StringField(4)
packageType = _messages.EnumField('PackageTypeValueValuesEnum', 5)
unused = _messages.StringField(6)
version = _messages.StringField(7)
class PackageIssue(_messages.Message):
r"""A detail for a distro and package this vulnerability occurrence was
found in and its associated fix (if one is available).
Enums:
EffectiveSeverityValueValuesEnum: Output only. The distro or language
system assigned severity for this vulnerability when that is available
and note provider assigned severity when it is not available.
Fields:
affectedCpeUri: Required. The [CPE
URI](https://cpe.mitre.org/specification/) this vulnerability was found
in.
affectedPackage: Required. The package this vulnerability was found in.
affectedVersion: Required. The version of the package that is installed on
the resource affected by this vulnerability.
effectiveSeverity: Output only. The distro or language system assigned
severity for this vulnerability when that is available and note provider
assigned severity when it is not available.
fixAvailable: Output only. Whether a fix is available for this package.
fixedCpeUri: The [CPE URI](https://cpe.mitre.org/specification/) this
vulnerability was fixed in. It is possible for this to be different from
the affected_cpe_uri.
fixedPackage: The package this vulnerability was fixed in. It is possible
for this to be different from the affected_package.
fixedVersion: Required. The version of the package this vulnerability was
fixed in. Setting this to VersionKind.MAXIMUM means | |
<filename>pyswip/core.py
# -*- coding: utf-8 -*-
# pyswip -- Python SWI-Prolog bridge
# Copyright (c) 2007-2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import os
import sys
import glob
import warnings
import atexit
from subprocess import Popen, PIPE
from ctypes import *
from ctypes.util import find_library
import tempfile
import shutil
# To initialize the SWI-Prolog environment, two things need to be done: the
# first is to find where the SO/DLL is located and the second is to find the
# SWI-Prolog home, to get the saved state.
#
# The goal of the (entangled) process below is to make the library installation
# independent.
def create_temporary_copy(path):
temp_dir = tempfile.gettempdir()
temp = tempfile.NamedTemporaryFile(dir=temp_dir)
temp_path = os.path.join(temp_dir, temp.name)
shutil.copy2(path, temp_path)
return temp, temp_path
class SWIPl:
def __init__(self):
# Find the path and resource file. SWI_HOME_DIR shall be treated as a constant
# by users of this module
self._path, self.SWI_HOME_DIR = SWIPl._findSwipl()
SWIPl._fixWindowsPath(self._path)
temp, temp_path = create_temporary_copy(self._path)
# Load the library
self._lib = CDLL(temp_path)
temp.close()
self.PL_initialise = self._lib.PL_initialise
self.PL_initialise = check_strings(None, 1)(self.PL_initialise)
# PL_initialise.argtypes = [c_int, c_c??
self.PL_open_foreign_frame = self._lib.PL_open_foreign_frame
self.PL_open_foreign_frame.restype = fid_t
self.PL_foreign_control = self._lib.PL_foreign_control
self.PL_foreign_control.argtypes = [control_t]
self.PL_foreign_control.restype = c_int
self.PL_foreign_context = self._lib.PL_foreign_context
self.PL_foreign_context.argtypes = [control_t]
self.PL_foreign_context.restype = intptr_t
self.PL_retry = self._lib._PL_retry
self.PL_retry.argtypes = [intptr_t]
self.PL_retry.restype = foreign_t
self.PL_new_term_ref = self._lib.PL_new_term_ref
self.PL_new_term_ref.restype = term_t
self.PL_new_term_refs = self._lib.PL_new_term_refs
self.PL_new_term_refs.argtypes = [c_int]
self.PL_new_term_refs.restype = term_t
self.PL_chars_to_term = self._lib.PL_chars_to_term
self.PL_chars_to_term.argtypes = [c_char_p, term_t]
self.PL_chars_to_term.restype = c_int
self.PL_chars_to_term = check_strings(0, None)(self.PL_chars_to_term)
self.PL_call = self._lib.PL_call
self.PL_call.argtypes = [term_t, module_t]
self.PL_call.restype = c_int
self.PL_call_predicate = self._lib.PL_call_predicate
self.PL_call_predicate.argtypes = [module_t, c_int, predicate_t, term_t]
self.PL_call_predicate.restype = c_int
self.PL_discard_foreign_frame = self._lib.PL_discard_foreign_frame
self.PL_discard_foreign_frame.argtypes = [fid_t]
self.PL_discard_foreign_frame.restype = None
self.PL_put_list_chars = self._lib.PL_put_list_chars
self.PL_put_list_chars.argtypes = [term_t, c_char_p]
self.PL_put_list_chars.restype = c_int
self.PL_put_list_chars = check_strings(1, None)(self.PL_put_list_chars)
# PL_EXPORT(void) PL_register_atom(atom_t a);
self.PL_register_atom = self._lib.PL_register_atom
self.PL_register_atom.argtypes = [atom_t]
self.PL_register_atom.restype = None
# PL_EXPORT(void) PL_unregister_atom(atom_t a);
self.PL_unregister_atom = self._lib.PL_unregister_atom
self.PL_unregister_atom.argtypes = [atom_t]
self.PL_unregister_atom.restype = None
# PL_EXPORT(atom_t) PL_functor_name(functor_t f);
self.PL_functor_name = self._lib.PL_functor_name
self.PL_functor_name.argtypes = [functor_t]
self.PL_functor_name.restype = atom_t
# PL_EXPORT(int) PL_functor_arity(functor_t f);
self.PL_functor_arity = self._lib.PL_functor_arity
self.PL_functor_arity.argtypes = [functor_t]
self.PL_functor_arity.restype = c_int
# /* Get C-values from Prolog terms */
# PL_EXPORT(int) PL_get_atom(term_t t, atom_t *a);
self.PL_get_atom = self._lib.PL_get_atom
self.PL_get_atom.argtypes = [term_t, POINTER(atom_t)]
self.PL_get_atom.restype = c_int
# PL_EXPORT(int) PL_get_bool(term_t t, int *value);
self.PL_get_bool = self._lib.PL_get_bool
self.PL_get_bool.argtypes = [term_t, POINTER(c_int)]
self.PL_get_bool.restype = c_int
# PL_EXPORT(int) PL_get_atom_chars(term_t t, char **a);
self.PL_get_atom_chars = self._lib.PL_get_atom_chars # FIXME
self.PL_get_atom_chars.argtypes = [term_t, POINTER(c_char_p)]
self.PL_get_atom_chars.restype = c_int
self.PL_get_atom_chars = check_strings(None, 1)(self.PL_get_atom_chars)
self.PL_get_string_chars = self._lib.PL_get_string
self.PL_get_string_chars.argtypes = [term_t, POINTER(c_char_p), c_int_p]
# PL_EXPORT(int) PL_get_chars(term_t t, char **s, unsigned int flags);
self.PL_get_chars = self._lib.PL_get_chars # FIXME:
self.PL_get_chars = check_strings(None, 1)(self.PL_get_chars)
# PL_EXPORT(int) PL_get_list_chars(term_t l, char **s,
# unsigned int flags);
# PL_EXPORT(int) PL_get_atom_nchars(term_t t, size_t *len, char **a);
# PL_EXPORT(int) PL_get_list_nchars(term_t l,
# size_t *len, char **s,
# unsigned int flags);
# PL_EXPORT(int) PL_get_nchars(term_t t,
# size_t *len, char **s,
# unsigned int flags);
# PL_EXPORT(int) PL_get_integer(term_t t, int *i);
self.PL_get_integer = self._lib.PL_get_integer
self.PL_get_integer.argtypes = [term_t, POINTER(c_int)]
self.PL_get_integer.restype = c_int
# PL_EXPORT(int) PL_get_long(term_t t, long *i);
self.PL_get_long = self._lib.PL_get_long
self.PL_get_long.argtypes = [term_t, POINTER(c_long)]
self.PL_get_long.restype = c_int
# PL_EXPORT(int) PL_get_pointer(term_t t, void **ptr);
# PL_EXPORT(int) PL_get_float(term_t t, double *f);
self.PL_get_float = self._lib.PL_get_float
self.PL_get_float.argtypes = [term_t, c_double_p]
self.PL_get_float.restype = c_int
# PL_EXPORT(int) PL_get_functor(term_t t, functor_t *f);
self.PL_get_functor = self._lib.PL_get_functor
self.PL_get_functor.argtypes = [term_t, POINTER(functor_t)]
self.PL_get_functor.restype = c_int
# PL_EXPORT(int) PL_get_name_arity(term_t t, atom_t *name, int *arity);
self.PL_get_name_arity = self._lib.PL_get_name_arity
self.PL_get_name_arity.argtypes = [term_t, POINTER(atom_t), POINTER(c_int)]
self.PL_get_name_arity.restype = c_int
# PL_EXPORT(int) PL_get_module(term_t t, module_t *module);
# PL_EXPORT(int) PL_get_arg(int index, term_t t, term_t a);
self.PL_get_arg = self._lib.PL_get_arg
self.PL_get_arg.argtypes = [c_int, term_t, term_t]
self.PL_get_arg.restype = c_int
# PL_EXPORT(int) PL_get_list(term_t l, term_t h, term_t t);
# PL_EXPORT(int) PL_get_head(term_t l, term_t h);
self.PL_get_head = self._lib.PL_get_head
self.PL_get_head.argtypes = [term_t, term_t]
self.PL_get_head.restype = c_int
# PL_EXPORT(int) PL_get_tail(term_t l, term_t t);
self.PL_get_tail = self._lib.PL_get_tail
self.PL_get_tail.argtypes = [term_t, term_t]
self.PL_get_tail.restype = c_int
# PL_EXPORT(int) PL_get_nil(term_t l);
self.PL_get_nil = self._lib.PL_get_nil
self.PL_get_nil.argtypes = [term_t]
self.PL_get_nil.restype = c_int
# PL_EXPORT(int) PL_get_term_value(term_t t, term_value_t *v);
# PL_EXPORT(char *) PL_quote(int chr, const char *data);
self.PL_put_atom_chars = self._lib.PL_put_atom_chars
self.PL_put_atom_chars.argtypes = [term_t, c_char_p]
self.PL_put_atom_chars.restype = c_int
self.PL_put_atom_chars = check_strings(1, None)(self.PL_put_atom_chars)
self.PL_atom_chars = self._lib.PL_atom_chars
self.PL_atom_chars.argtypes = [atom_t]
self.PL_atom_chars.restype = c_char_p
self.PL_predicate = self._lib.PL_predicate
self.PL_predicate.argtypes = [c_char_p, c_int, c_char_p]
self.PL_predicate.restype = predicate_t
self.PL_predicate = check_strings([0, 2], None)(self.PL_predicate)
self.PL_pred = self._lib.PL_pred
self.PL_pred.argtypes = [functor_t, module_t]
self.PL_pred.restype = predicate_t
self.PL_open_query = self._lib.PL_open_query
self.PL_open_query.argtypes = [module_t, c_int, predicate_t, term_t]
self.PL_open_query.restype = qid_t
self.PL_next_solution = self._lib.PL_next_solution
self.PL_next_solution.argtypes = [qid_t]
self.PL_next_solution.restype = c_int
self.PL_copy_term_ref = self._lib.PL_copy_term_ref
self.PL_copy_term_ref.argtypes = [term_t]
self.PL_copy_term_ref.restype = term_t
self.PL_get_list = self._lib.PL_get_list
self.PL_get_list.argtypes = [term_t, term_t, term_t]
self.PL_get_list.restype = c_int
self.PL_get_chars = self._lib.PL_get_chars # FIXME
self.PL_close_query = self._lib.PL_close_query
self.PL_close_query.argtypes = [qid_t]
self.PL_close_query.restype = None
# void PL_cut_query(qid)
self.PL_cut_query = self._lib.PL_cut_query
self.PL_cut_query.argtypes = [qid_t]
self.PL_cut_query.restype = None
self.PL_halt = self._lib.PL_halt
self.PL_halt.argtypes = [c_int]
self.PL_halt.restype = None
# PL_EXPORT(int) PL_cleanup(int status);
self.PL_cleanup = self._lib.PL_cleanup
self.PL_cleanup.restype = c_int
self.PL_unify_integer = self._lib.PL_unify_integer
self.PL_unify = self._lib.PL_unify
self.PL_unify.argtypes = [term_t, term_t]
self.PL_unify.restype = c_int
self.PL_unify_float = self._lib.PL_unify_float
self.PL_unify_float.argtypes = [term_t, c_double]
self.PL_unify_float.restype = c_int
# PL_EXPORT(int) PL_unify_arg(int index, term_t t, term_t a) WUNUSED;
self.PL_unify_arg = self._lib.PL_unify_arg
self.PL_unify_arg.argtypes = [c_int, term_t, term_t]
self.PL_unify_arg.restype = c_int
# Verify types
self.PL_term_type = self._lib.PL_term_type
self.PL_term_type.argtypes = [term_t]
self.PL_term_type.restype = c_int
self.PL_is_variable = self._lib.PL_is_variable
self.PL_is_variable.argtypes = [term_t]
self.PL_is_variable.restype = c_int
self.PL_is_ground = self._lib.PL_is_ground
self.PL_is_ground.argtypes = [term_t]
self.PL_is_ground.restype = c_int
self.PL_is_atom = self._lib.PL_is_atom
self.PL_is_atom.argtypes = [term_t]
self.PL_is_atom.restype = c_int
self.PL_is_integer = self._lib.PL_is_integer
self.PL_is_integer.argtypes = [term_t]
self.PL_is_integer.restype = c_int
self.PL_is_string = self._lib.PL_is_string
self.PL_is_string.argtypes = [term_t]
self.PL_is_string.restype = c_int
self.PL_is_float = self._lib.PL_is_float
self.PL_is_float.argtypes = [term_t]
self.PL_is_float.restype = c_int
# PL_is_rational = _lib.PL_is_rational
# PL_is_rational.argtypes = [term_t]
# PL_is_rational.restype = c_int
self.PL_is_compound = self._lib.PL_is_compound
self.PL_is_compound.argtypes = [term_t]
self.PL_is_compound.restype = c_int
self.PL_is_functor = self._lib.PL_is_functor
self.PL_is_functor.argtypes = [term_t, functor_t]
self.PL_is_functor.restype = c_int
self.PL_is_list = self._lib.PL_is_list
self.PL_is_list.argtypes = [term_t]
self.PL_is_list.restype = c_int
self.PL_is_atomic = self._lib.PL_is_atomic
self.PL_is_atomic.argtypes = [term_t]
self.PL_is_atomic.restype = c_int
self.PL_is_number = self._lib.PL_is_number
self.PL_is_number.argtypes = [term_t]
self.PL_is_number.restype = c_int
# /* Assign to term-references */
# PL_EXPORT(void) PL_put_variable(term_t t);
self.PL_put_variable = self._lib.PL_put_variable
self.PL_put_variable.argtypes = [term_t]
self.PL_put_variable.restype = None
# PL_EXPORT(void) PL_put_atom(term_t t, atom_t a);
# PL_EXPORT(void) PL_put_atom_chars(term_t t, const char *chars);
# PL_EXPORT(void) PL_put_string_chars(term_t t, const char *chars);
# PL_EXPORT(void) PL_put_list_chars(term_t t, const char *chars);
# PL_EXPORT(void) PL_put_list_codes(term_t t, const char *chars);
# PL_EXPORT(void) PL_put_atom_nchars(term_t t, size_t l, const char *chars);
# PL_EXPORT(void) PL_put_string_nchars(term_t t, size_t len, const char *chars);
# PL_EXPORT(void) PL_put_list_nchars(term_t t, size_t l, const char *chars);
# PL_EXPORT(void) PL_put_list_ncodes(term_t t, size_t l, const char *chars);
# PL_EXPORT(void) PL_put_integer(term_t t, long i);
self.PL_put_integer = self._lib.PL_put_integer
self.PL_put_integer.argtypes = [term_t, c_long]
self.PL_put_integer.restype = None
# PL_EXPORT(void) PL_put_pointer(term_t t, void *ptr);
# PL_EXPORT(void) PL_put_float(term_t t, double f);
# PL_EXPORT(void) PL_put_functor(term_t t, functor_t functor);
self.PL_put_functor = self._lib.PL_put_functor
self.PL_put_functor.argtypes = [term_t, functor_t]
self.PL_put_functor.restype = None
# PL_EXPORT(void) PL_put_list(term_t l);
self.PL_put_list = self._lib.PL_put_list
self.PL_put_list.argtypes = [term_t]
self.PL_put_list.restype = None
# PL_EXPORT(void) PL_put_nil(term_t l);
self.PL_put_nil = self._lib.PL_put_nil
self.PL_put_nil.argtypes = [term_t]
self.PL_put_nil.restype = None
# PL_EXPORT(void) PL_put_term(term_t t1, term_t t2);
self.PL_put_term = self._lib.PL_put_term
self.PL_put_term.argtypes = [term_t, term_t]
self.PL_put_term.restype = None
# /* construct a functor or list-cell */
# PL_EXPORT(void) PL_cons_functor(term_t h, functor_t f, ...);
# class _PL_cons_functor(object):
| |
import mock
import os
import pandas as pd
from datetime import datetime
from flexmock import flexmock
from sportsreference import utils
from sportsreference.constants import AWAY
from sportsreference.nfl.constants import BOXSCORE_URL, BOXSCORES_URL
from sportsreference.nfl.boxscore import Boxscore, Boxscores
MONTH = 10
YEAR = 2017
BOXSCORE = '201802040nwe'
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'nfl', filename)
return open('%s' % filepath, 'r').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 200
self.html_contents = html_contents
self.text = html_contents
if url == BOXSCORES_URL % (YEAR, 7):
return MockPQ(read_file('boxscores-7-2017.html'))
if url == BOXSCORES_URL % (YEAR, 8):
return MockPQ(read_file('boxscores-8-2017.html'))
boxscore = read_file('%s.html' % BOXSCORE)
return MockPQ(boxscore)
class MockDateTime:
def __init__(self, year, month):
self.year = year
self.month = month
class TestNFLBoxscore:
@mock.patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
self.results = {
'date': 'Sunday Feb 4, 2018',
'time': '6:30pm',
'stadium': 'U.S. Bank Stadium',
'attendance': 67612,
'duration': '3:46',
'winner': AWAY,
'winning_name': '<NAME>',
'winning_abbr': 'PHI',
'losing_name': '<NAME>',
'losing_abbr': 'NWE',
'away_points': 41,
'away_first_downs': 25,
'away_rush_attempts': 27,
'away_rush_yards': 164,
'away_rush_touchdowns': 1,
'away_pass_completions': 29,
'away_pass_attempts': 44,
'away_pass_yards': 374,
'away_pass_touchdowns': 4,
'away_interceptions': 1,
'away_times_sacked': 0,
'away_yards_lost_from_sacks': 0,
'away_net_pass_yards': 374,
'away_total_yards': 538,
'away_fumbles': 0,
'away_fumbles_lost': 0,
'away_turnovers': 1,
'away_penalties': 6,
'away_yards_from_penalties': 35,
'away_third_down_conversions': 10,
'away_third_down_attempts': 16,
'away_fourth_down_conversions': 2,
'away_fourth_down_attempts': 2,
'away_time_of_possession': '34:04',
'home_points': 33,
'home_first_downs': 29,
'home_rush_attempts': 22,
'home_rush_yards': 113,
'home_rush_touchdowns': 1,
'home_pass_completions': 28,
'home_pass_attempts': 49,
'home_pass_yards': 505,
'home_pass_touchdowns': 3,
'home_interceptions': 0,
'home_times_sacked': 1,
'home_yards_lost_from_sacks': 5,
'home_net_pass_yards': 500,
'home_total_yards': 613,
'home_fumbles': 1,
'home_fumbles_lost': 1,
'home_turnovers': 1,
'home_penalties': 1,
'home_yards_from_penalties': 5,
'home_third_down_conversions': 5,
'home_third_down_attempts': 10,
'home_fourth_down_conversions': 1,
'home_fourth_down_attempts': 2,
'home_time_of_possession': '25:56',
}
flexmock(utils) \
.should_receive('_todays_date') \
.and_return(MockDateTime(YEAR, MONTH))
self.boxscore = Boxscore(BOXSCORE)
def test_nfl_boxscore_returns_requested_boxscore(self):
for attribute, value in self.results.items():
assert getattr(self.boxscore, attribute) == value
def test_invalid_url_yields_empty_class(self):
flexmock(Boxscore) \
.should_receive('_retrieve_html_page') \
.and_return(None)
boxscore = Boxscore(BOXSCORE)
for key, value in boxscore.__dict__.items():
if key == '_uri':
continue
assert value is None
def test_nfl_boxscore_dataframe_returns_dataframe_of_all_values(self):
df = pd.DataFrame([self.results], index=[BOXSCORE])
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, self.boxscore.dataframe]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
class TestNFLBoxscores:
def setup_method(self):
self.expected = {
'7-2017': [
{'boxscore': '201710190rai',
'away_name': 'Kansas City Chiefs',
'away_abbr': 'kan',
'away_score': 30,
'home_name': 'Oakland Raiders',
'home_abbr': 'rai',
'home_score': 31,
'winning_name': 'Oakland Raiders',
'winning_abbr': 'rai',
'losing_name': 'Kansas City Chiefs',
'losing_abbr': 'kan'},
{'boxscore': '201710220chi',
'away_name': 'Car<NAME>',
'away_abbr': 'car',
'away_score': 3,
'home_name': 'Chicago Bears',
'home_abbr': 'chi',
'home_score': 17,
'winning_name': 'Chicago Bears',
'winning_abbr': 'chi',
'losing_name': 'Carolina Panthers',
'losing_abbr': 'car'},
{'boxscore': '201710220buf',
'away_name': 'Tampa Bay Buccaneers',
'away_abbr': 'tam',
'away_score': 27,
'home_name': '<NAME>',
'home_abbr': 'buf',
'home_score': 30,
'winning_name': '<NAME>',
'winning_abbr': 'buf',
'losing_name': '<NAME>',
'losing_abbr': 'tam'},
{'boxscore': '201710220ram',
'away_name': 'Arizona Cardinals',
'away_abbr': 'crd',
'away_score': 0,
'home_name': 'Los Angeles Rams',
'home_abbr': 'ram',
'home_score': 33,
'winning_name': 'Los Angeles Rams',
'winning_abbr': 'ram',
'losing_name': 'Arizona Cardinals',
'losing_abbr': 'crd'},
{'boxscore': '201710220min',
'away_name': 'Baltimore Ravens',
'away_abbr': 'rav',
'away_score': 16,
'home_name': 'Minnesota Vikings',
'home_abbr': 'min',
'home_score': 24,
'winning_name': 'Minnesota Vikings',
'winning_abbr': 'min',
'losing_name': 'Baltimore Ravens',
'losing_abbr': 'rav'},
{'boxscore': '201710220mia',
'away_name': '<NAME>',
'away_abbr': 'nyj',
'away_score': 28,
'home_name': '<NAME>',
'home_abbr': 'mia',
'home_score': 31,
'winning_name': '<NAME>',
'winning_abbr': 'mia',
'losing_name': '<NAME>',
'losing_abbr': 'nyj'},
{'boxscore': '201710220gnb',
'away_name': 'New Orleans Saints',
'away_abbr': 'nor',
'away_score': 26,
'home_name': '<NAME>',
'home_abbr': 'gnb',
'home_score': 17,
'winning_name': 'New Orleans Saints',
'winning_abbr': 'nor',
'losing_name': '<NAME>',
'losing_abbr': 'gnb'},
{'boxscore': '201710220clt',
'away_name': '<NAME>',
'away_abbr': 'jax',
'away_score': 27,
'home_name': 'Indianapolis Colts',
'home_abbr': 'clt',
'home_score': 0,
'winning_name': '<NAME>',
'winning_abbr': 'jax',
'losing_name': 'Indianapol<NAME>',
'losing_abbr': 'clt'},
{'boxscore': '201710220cle',
'away_name': '<NAME>',
'away_abbr': 'oti',
'away_score': 12,
'home_name': '<NAME>',
'home_abbr': 'cle',
'home_score': 9,
'winning_name': '<NAME>',
'winning_abbr': 'oti',
'losing_name': '<NAME>',
'losing_abbr': 'cle'},
{'boxscore': '201710220sfo',
'away_name': '<NAME>',
'away_abbr': 'dal',
'away_score': 40,
'home_name': '<NAME>',
'home_abbr': 'sfo',
'home_score': 10,
'winning_name': '<NAME>',
'winning_abbr': 'dal',
'losing_name': '<NAME>',
'losing_abbr': 'sfo'},
{'boxscore': '201710220sdg',
'away_name': '<NAME>',
'away_abbr': 'den',
'away_score': 0,
'home_name': 'Los <NAME>',
'home_abbr': 'sdg',
'home_score': 21,
'winning_name': '<NAME>',
'winning_abbr': 'sdg',
'losing_name': '<NAME>',
'losing_abbr': 'den'},
{'boxscore': '201710220pit',
'away_name': 'Cincinnati Bengals',
'away_abbr': 'cin',
'away_score': 14,
'home_name': 'Pittsburgh Steelers',
'home_abbr': 'pit',
'home_score': 29,
'winning_name': '<NAME>',
'winning_abbr': 'pit',
'losing_name': '<NAME>',
'losing_abbr': 'cin'},
{'boxscore': '201710220nyg',
'away_name': 'Seattle Seahawks',
'away_abbr': 'sea',
'away_score': 24,
'home_name': '<NAME>',
'home_abbr': 'nyg',
'home_score': 7,
'winning_name': 'Seattle Seahawks',
'winning_abbr': 'sea',
'losing_name': '<NAME>',
'losing_abbr': 'nyg'},
{'boxscore': '201710220nwe',
'away_name': '<NAME>',
'away_abbr': 'atl',
'away_score': 7,
'home_name': '<NAME>',
'home_abbr': 'nwe',
'home_score': 23,
'winning_name': '<NAME>',
'winning_abbr': 'nwe',
'losing_name': '<NAME>',
'losing_abbr': 'atl'},
{'boxscore': '201710230phi',
'away_name': '<NAME>',
'away_abbr': 'was',
'away_score': 24,
'home_name': '<NAME>',
'home_abbr': 'phi',
'home_score': 34,
'winning_name': '<NAME>',
'winning_abbr': 'phi',
'losing_name': '<NAME>',
'losing_abbr': 'was'}
]
}
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search(self, *args, **kwargs):
result = Boxscores(7, 2017).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_invalid_end(self, *args, **kwargs):
result = Boxscores(7, 2017, 5).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_multiple_weeks(self, *args, **kwargs):
expected = {
'7-2017': [
{'boxscore': '201710190rai',
'away_name': 'Kansas City Chiefs',
'away_abbr': 'kan',
'away_score': 30,
'home_name': 'Oakland Raiders',
'home_abbr': 'rai',
'home_score': 31,
'winning_name': 'Oakland Raiders',
'winning_abbr': 'rai',
'losing_name': 'Kansas City Chiefs',
'losing_abbr': 'kan'},
{'boxscore': '201710220chi',
'away_name': 'Carolina Panthers',
'away_abbr': 'car',
'away_score': 3,
'home_name': 'Chicago Bears',
'home_abbr': 'chi',
'home_score': 17,
'winning_name': 'Chicago Bears',
'winning_abbr': 'chi',
'losing_name': 'Carolina Panthers',
'losing_abbr': 'car'},
{'boxscore': '201710220buf',
'away_name': 'Tampa Bay Buccaneers',
'away_abbr': 'tam',
'away_score': 27,
'home_name': 'Buffalo Bills',
'home_abbr': 'buf',
'home_score': 30,
'winning_name': 'Buffalo Bills',
'winning_abbr': 'buf',
'losing_name': '<NAME>',
'losing_abbr': 'tam'},
{'boxscore': '201710220ram',
'away_name': 'Arizona Cardinals',
'away_abbr': 'crd',
'away_score': 0,
'home_name': 'Los Angeles Rams',
'home_abbr': 'ram',
'home_score': 33,
'winning_name': 'Los Angeles Rams',
'winning_abbr': 'ram',
'losing_name': 'Arizona Cardinals',
'losing_abbr': 'crd'},
{'boxscore': '201710220min',
'away_name': '<NAME>',
'away_abbr': 'rav',
'away_score': 16,
'home_name': 'Minnesota Vikings',
'home_abbr': 'min',
'home_score': 24,
'winning_name': 'Minnesota Vikings',
'winning_abbr': 'min',
'losing_name': '<NAME>',
'losing_abbr': 'rav'},
{'boxscore': '201710220mia',
'away_name': 'New York Jets',
'away_abbr': 'nyj',
'away_score': 28,
'home_name': '<NAME>',
'home_abbr': 'mia',
'home_score': 31,
'winning_name': '<NAME>',
'winning_abbr': 'mia',
'losing_name': '<NAME>',
'losing_abbr': 'nyj'},
{'boxscore': '201710220gnb',
'away_name': 'New Orleans Saints',
'away_abbr': 'nor',
'away_score': 26,
'home_name': 'Green Bay Packers',
'home_abbr': 'gnb',
'home_score': 17,
'winning_name': 'New Orleans Saints',
'winning_abbr': 'nor',
'losing_name': '<NAME>',
'losing_abbr': 'gnb'},
{'boxscore': '201710220clt',
'away_name': '<NAME>',
'away_abbr': 'jax',
'away_score': 27,
'home_name': 'Indianapolis Colts',
'home_abbr': 'clt',
'home_score': 0,
'winning_name': '<NAME>',
'winning_abbr': 'jax',
'losing_name': 'Indianapolis Colts',
'losing_abbr': 'clt'},
{'boxscore': '201710220cle',
'away_name': '<NAME>',
'away_abbr': 'oti',
'away_score': 12,
'home_name': '<NAME>',
'home_abbr': 'cle',
'home_score': 9,
'winning_name': '<NAME>',
'winning_abbr': 'oti',
'losing_name': '<NAME>',
'losing_abbr': 'cle'},
{'boxscore': '201710220sfo',
'away_name': '<NAME>',
'away_abbr': 'dal',
'away_score': 40,
'home_name': 'San Francisco 49ers',
'home_abbr': 'sfo',
'home_score': 10,
'winning_name': '<NAME>',
'winning_abbr': 'dal',
'losing_name': 'San Francisco 49ers',
'losing_abbr': 'sfo'},
{'boxscore': '201710220sdg',
'away_name': '<NAME>',
'away_abbr': 'den',
'away_score': 0,
'home_name': '<NAME>',
'home_abbr': 'sdg',
'home_score': 21,
'winning_name': 'Los Angeles Chargers',
'winning_abbr': 'sdg',
'losing_name': '<NAME>',
'losing_abbr': 'den'},
{'boxscore': '201710220pit',
'away_name': '<NAME>',
'away_abbr': 'cin',
'away_score': 14,
'home_name': '<NAME>',
'home_abbr': 'pit',
'home_score': 29,
'winning_name': '<NAME>',
'winning_abbr': 'pit',
'losing_name': '<NAME>',
'losing_abbr': 'cin'},
{'boxscore': '201710220nyg',
'away_name': 'Seattle Seahawks',
'away_abbr': 'sea',
'away_score': 24,
'home_name': '<NAME>',
'home_abbr': 'nyg',
'home_score': 7,
'winning_name': 'Seattle Seahawks',
'winning_abbr': 'sea',
'losing_name': '<NAME>',
'losing_abbr': 'nyg'},
{'boxscore': '201710220nwe',
'away_name': '<NAME>',
'away_abbr': 'atl',
'away_score': 7,
'home_name': '<NAME>',
'home_abbr': 'nwe',
'home_score': 23,
'winning_name': '<NAME>',
'winning_abbr': 'nwe',
'losing_name': '<NAME>',
'losing_abbr': 'atl'},
{'boxscore': '201710230phi',
'away_name': 'Washington Redskins',
'away_abbr': 'was',
'away_score': 24,
'home_name': '<NAME>',
'home_abbr': 'phi',
'home_score': 34,
'winning_name': '<NAME>',
'winning_abbr': 'phi',
'losing_name': '<NAME>',
'losing_abbr': 'was'}
],
'8-2017': [
{'boxscore': '201710260rav',
'away_name': '<NAME>',
'away_abbr': 'mia',
'away_score': 0,
'home_name': '<NAME>',
'home_abbr': 'rav',
'home_score': 40,
'winning_name': '<NAME>',
'winning_abbr': 'rav',
'losing_name': '<NAME>',
'losing_abbr': 'mia'},
{'boxscore': '201710290cle',
'away_name': '<NAME>',
'away_abbr': 'min',
'away_score': 33,
'home_name': '<NAME>',
'home_abbr': 'cle',
'home_score': 16,
'winning_name': '<NAME>',
'winning_abbr': 'min',
'losing_name': '<NAME>',
'losing_abbr': 'cle'},
{'boxscore': '201710290buf',
'away_name': '<NAME>',
'away_abbr': 'rai',
'away_score': 14,
'home_name': '<NAME>',
'home_abbr': 'buf',
'home_score': 34,
'winning_name': '<NAME>',
'winning_abbr': 'buf',
'losing_name': '<NAME>',
'losing_abbr': 'rai'},
{'boxscore': '201710290tam',
'away_name': '<NAME>',
'away_abbr': 'car',
'away_score': 17,
'home_name': '<NAME>',
'home_abbr': 'tam',
'home_score': 3,
'winning_name': '<NAME>thers',
'winning_abbr': 'car',
'losing_name': 'T<NAME> Buccaneers',
'losing_abbr': 'tam'},
{'boxscore': '201710290phi',
'away_name': '<NAME>',
'away_abbr': 'sfo',
'away_score': 10,
'home_name': '<NAME>',
'home_abbr': 'phi',
'home_score': 33,
'winning_name': '<NAME>',
'winning_abbr': 'phi',
'losing_name': '<NAME>',
'losing_abbr': 'sfo'},
{'boxscore': '201710290nyj',
'away_name': '<NAME>',
'away_abbr': 'atl',
'away_score': 25,
'home_name': '<NAME>',
'home_abbr': 'nyj',
'home_score': 20,
'winning_name': '<NAME>',
'winning_abbr': 'atl',
'losing_name': '<NAME>',
'losing_abbr': 'nyj'},
{'boxscore': '201710290nwe',
'away_name': '<NAME>',
'away_abbr': 'sdg',
'away_score': 13,
| |
import pygame
from settings import *
from collections import deque
from ray_casting import mapping
from numba.core import types
from numba.typed import Dict
from numba import int32
class Sprites:
def __init__(self):
self.sprite_parameters = {
'sprite_barrel': {
'sprite': pygame.image.load('sprites/barrel/base/0.png').convert_alpha(),
'viewing_angles': None,
'shift': 1.8,
'scale': (0.4, 0.4),
'side': 30,
'animation': deque(
[pygame.image.load(f'sprites/barrel/anim/{i}.png').convert_alpha() for i in range(12)]),
'death_animation': deque([pygame.image.load(f'sprites/barrel/death/{i}.png')
.convert_alpha() for i in range(4)]),
'is_dead': None,
'dead_shift': 2.6,
'animation_dist': 800,
'animation_speed': 6,
'blocked': True,
'flag': 'decor',
'obj_action': []
},
'sprite_pin': {
'sprite': pygame.image.load('sprites/pin/base/0.png').convert_alpha(),
'viewing_angles': None,
'shift': 0.6,
'scale': (0.6, 0.6),
'side': 30,
'animation': deque([pygame.image.load(f'sprites/pin/anim/{i}.png').convert_alpha() for i in range(8)]),
'death_animation': [],
'is_dead': 'immortal',
'dead_shift': None,
'animation_dist': 800,
'animation_speed': 10,
'blocked': True,
'flag': 'decor',
'obj_action': []
},
'sprite_flame': {
'sprite': pygame.image.load('sprites/flame/base/0.png').convert_alpha(),
'viewing_angles': None,
'shift': 0.7,
'scale': (0.6, 0.6),
'side': 30,
'animation': deque(
[pygame.image.load(f'sprites/flame/anim/{i}.png').convert_alpha() for i in range(16)]),
'death_animation': [],
'is_dead': 'immortal',
'dead_shift': 1.8,
'animation_dist': 1800,
'animation_speed': 5,
'blocked': None,
'flag': 'decor',
'obj_action': []
},
'npc_devil0': {
'sprite': [pygame.image.load(f'sprites/npc/devil0/base/{i}.png').convert_alpha() for i in range(8)],
'viewing_angles': True,
'shift': 0.0,
'scale': (1.1, 1.1),
'side': 50,
'animation': [],
'death_animation': deque([pygame.image.load(f'sprites/npc/devil0/death/{i}.png')
.convert_alpha() for i in range(6)]),
'is_dead': None,
'dead_shift': 0.6,
'animation_dist': None,
'animation_speed': 10,
'blocked': True,
'flag': 'npc',
'obj_action': deque(
[pygame.image.load(f'sprites/npc/devil0/anim/{i}.png').convert_alpha() for i in range(9)]),
},
'npc_devil1': {
'sprite': [pygame.image.load(f'sprites/npc/devil1/base/{i}.png').convert_alpha() for i in range(8)],
'viewing_angles': True,
'shift': 0,
'scale': (0.9, 1.0),
'side': 30,
'animation': [],
'death_animation': deque([pygame.image.load(f'sprites/npc/devil1/death/{i}.png')
.convert_alpha() for i in range(11)]),
'is_dead': None,
'dead_shift': 0.5,
'animation_dist': None,
'animation_speed': 6,
'blocked': True, # <-------------------
'flag': 'npc',
'obj_action': deque([pygame.image.load(f'sprites/npc/devil1/action/{i}.png')
.convert_alpha() for i in range(6)])
},
'npc_soldier0': {
'sprite': [pygame.image.load(f'sprites/npc/soldier0/base/{i}.png').convert_alpha() for i in range(8)],
'viewing_angles': True,
'shift': 0.8,
'scale': (0.4, 0.6),
'side': 30,
'animation': [],
'death_animation': deque([pygame.image.load(f'sprites/npc/soldier0/death/{i}.png')
.convert_alpha() for i in range(10)]),
'is_dead': None,
'dead_shift': 1.7,
'animation_dist': None,
'animation_speed': 6,
'blocked': True,
'flag': 'npc',
'obj_action': deque([pygame.image.load(f'sprites/npc/soldier0/action/{i}.png')
.convert_alpha() for i in range(4)])
},
'npc_soldier1': {
'sprite': [pygame.image.load(f'sprites/npc/soldier1/base/{i}.png').convert_alpha() for i in range(8)],
'viewing_angles': True,
'shift': 0.8,
'scale': (0.4, 0.6),
'side': 30,
'animation': [],
'death_animation': deque([pygame.image.load(f'sprites/npc/soldier1/death/{i}.png')
.convert_alpha() for i in range(11)]),
'is_dead': None,
'dead_shift': 1.7,
'animation_dist': None,
'animation_speed': 6,
'blocked': True, # <-------------------
'flag': 'npc',
'obj_action': deque([pygame.image.load(f'sprites/npc/soldier1/action/{i}.png')
.convert_alpha() for i in range(4)])
},
'sprite_door_v': {
'sprite': [pygame.image.load(f'sprites/doors/door_v/{i}.png').convert_alpha() for i in range(16)],
'viewing_angles': True,
'shift': 0.1,
'scale': (2.6, 1.2),
'side': 100,
'animation': [],
'death_animation': [],
'is_dead': 'immortal',
'dead_shift': 0,
'animation_dist': 0,
'animation_speed': 0,
'blocked': True,
'flag': 'door_h',
'obj_action': []
},
'sprite_door_h': {
'sprite': [pygame.image.load(f'sprites/doors/door_h/{i}.png').convert_alpha() for i in range(16)],
'viewing_angles': True,
'shift': 0.1,
'scale': (2.6, 1.2),
'side': 100,
'animation': [],
'death_animation': [],
'is_dead': 'immortal',
'dead_shift': 0,
'animation_dist': 0,
'animation_speed': 0,
'blocked': True,
'flag': 'door_v',
'obj_action': []
},
}
self.list_of_objects = [
SpriteObject(self.sprite_parameters['sprite_barrel'], (7.1, 2.1)),
SpriteObject(self.sprite_parameters['sprite_barrel'], (5.9, 2.1)),
SpriteObject(self.sprite_parameters['sprite_barrel'], (14.8, 12.28)),
SpriteObject(self.sprite_parameters['sprite_barrel'], (16.5, 7.61)),
SpriteObject(self.sprite_parameters['sprite_barrel'], (12.54, 2.42)),
SpriteObject(self.sprite_parameters['sprite_barrel'], (19.2, 2.62)),
SpriteObject(self.sprite_parameters['sprite_barrel'], (21.79, 8.93)),
SpriteObject(self.sprite_parameters['sprite_barrel'], (21.57, 13.58)),
SpriteObject(self.sprite_parameters['sprite_barrel'], (12.32, 13.62)),
SpriteObject(self.sprite_parameters['sprite_flame'], (1.25, 1.6)),
SpriteObject(self.sprite_parameters['sprite_flame'], (3.54, 8.42)),
SpriteObject(self.sprite_parameters['sprite_flame'], (5.53, 9.43)),
SpriteObject(self.sprite_parameters['sprite_flame'], (9.42, 8.48)),
SpriteObject(self.sprite_parameters['sprite_flame'], (10.36, 3.73)),
SpriteObject(self.sprite_parameters['sprite_flame'], (13.8, 11.32)),
SpriteObject(self.sprite_parameters['sprite_flame'], (19.3, 12.76)),
SpriteObject(self.sprite_parameters['sprite_flame'], (16.34, 4.5)),
SpriteObject(self.sprite_parameters['sprite_flame'], (16.11, 1.47)),
SpriteObject(self.sprite_parameters['sprite_flame'], (22.31, 1.59)),
SpriteObject(self.sprite_parameters['sprite_flame'], (22.47, 14.48)),
SpriteObject(self.sprite_parameters['sprite_flame'], (11.46, 14.55)),
SpriteObject(self.sprite_parameters['sprite_flame'], (1.46, 14.41)),
SpriteObject(self.sprite_parameters['sprite_pin'], (8.8, 2.5)),
SpriteObject(self.sprite_parameters['sprite_pin'], (16.82, 12.52)),
SpriteObject(self.sprite_parameters['sprite_pin'], (17.45, 3.55)),
SpriteObject(self.sprite_parameters['sprite_pin'], (10.62, 9.36)),
SpriteObject(self.sprite_parameters['sprite_pin'], (7.01, 13.47)),
SpriteObject(self.sprite_parameters['sprite_flame'], (8.6, 5.6)),
SpriteObject(self.sprite_parameters['npc_soldier0'], (2.5, 1.5)),
SpriteObject(self.sprite_parameters['npc_soldier0'], (5.51, 1.5)),
SpriteObject(self.sprite_parameters['npc_soldier0'], (6.61, 2.92)),
SpriteObject(self.sprite_parameters['npc_soldier0'], (7.68, 1.47)),
SpriteObject(self.sprite_parameters['npc_soldier0'], (8.75, 3.65)),
SpriteObject(self.sprite_parameters['npc_soldier0'], (1.27, 11.5)),
SpriteObject(self.sprite_parameters['npc_soldier0'], (1.26, 8.29)),
# SpriteObject(self.sprite_parameters['npc_soldier0'], (2.56, 7.38)), # <------------
SpriteObject(self.sprite_parameters['npc_soldier1'], (10.5, 1.1)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (3.66, 5.27)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (4.38, 6.56)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (4.33, 9.01)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (4.46, 10.35)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (13.16, 2.16)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (12.09, 1.28)),
SpriteObject(self.sprite_parameters['npc_devil0'], (3.9, 6.41)),
SpriteObject(self.sprite_parameters['npc_devil0'], (14.36, 3.31)),
SpriteObject(self.sprite_parameters['npc_devil0'], (2.14, 13.81)),
SpriteObject(self.sprite_parameters['npc_devil1'], (10.73, 3.90)),
SpriteObject(self.sprite_parameters['npc_devil1'], (10.6, 5.52)),
SpriteObject(self.sprite_parameters['npc_devil1'], (11.75, 5.75)),
SpriteObject(self.sprite_parameters['npc_devil1'], (14.87, 14.27)),
SpriteObject(self.sprite_parameters['npc_devil0'], (15.85, 14.74)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (17.02, 14.26)),
SpriteObject(self.sprite_parameters['npc_devil0'], (18.20, 14.73)),
SpriteObject(self.sprite_parameters['npc_devil1'], (19.77, 14.26)),
SpriteObject(self.sprite_parameters['npc_soldier0'], (8.10, 11.93)),
SpriteObject(self.sprite_parameters['npc_soldier0'], (12.14, 10.34)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (15.27, 6.56)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (14.31, 11.56)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (17.62, 9.66)),
SpriteObject(self.sprite_parameters['npc_devil0'], (22.53, 9.00)),
SpriteObject(self.sprite_parameters['npc_devil1'], (20.12, 3.55)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (18.29, 12.66)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (18.53, 8.33)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (20.42, 7.53)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (22.3, 2.89)),
SpriteObject(self.sprite_parameters['npc_soldier1'], (22.76, 10.21)),
SpriteObject(self.sprite_parameters['sprite_door_v'], (9.5, 4.5)),
SpriteObject(self.sprite_parameters['sprite_door_v'], (9.5, 1.5)),
SpriteObject(self.sprite_parameters['sprite_door_v'], (6.5, 8.5)),
SpriteObject(self.sprite_parameters['sprite_door_v'], (2.5, 10.5)),
SpriteObject(self.sprite_parameters['sprite_door_v'], (13.5, 14.5)),
SpriteObject(self.sprite_parameters['sprite_door_v'], (3.5, 3.5)),
SpriteObject(self.sprite_parameters['sprite_door_h'], (1.5, 4.5)),
SpriteObject(self.sprite_parameters['sprite_door_h'], (5.5, 4.5)),
SpriteObject(self.sprite_parameters['sprite_door_h'], (13.5, 4.5)),
SpriteObject(self.sprite_parameters['sprite_door_h'], (22.5, 11.5)),
SpriteObject(self.sprite_parameters['sprite_door_h'], (11.5, 6.5)),
]
@property
def sprite_shot(self):
return min([obj.is_on_fire for obj in self.list_of_objects], default=(float('inf'), 0))
@property
def blocked_doors(self):
blocked_doors = Dict.empty(key_type=types.UniTuple(int32, 2), value_type=int32)
for obj in self.list_of_objects:
if obj.flag == 'door_h' or obj.flag == 'door_v':
if obj.blocked:
i, j = mapping(obj.x, obj.y)
blocked_doors[(i, j)] = 0
return blocked_doors
class SpriteObject:
def __init__(self, parameters, pos):
self.object = parameters['sprite'].copy()
self.viewing_angles = parameters['viewing_angles']
self.shift = parameters['shift']
self.scale = parameters['scale']
self.animation = parameters['animation'].copy()
# ---------------------
self.death_animation = parameters['death_animation'].copy()
self.is_dead = parameters['is_dead']
self.dead_shift = parameters['dead_shift']
# ---------------------
self.animation_dist = parameters['animation_dist']
self.animation_speed = parameters['animation_speed']
self.blocked = parameters['blocked']
self.flag = parameters['flag']
self.obj_action = parameters['obj_action'].copy()
self.x, self.y = pos[0] * TILE, pos[1] * TILE
self.side = parameters['side'] # <-------------------------------------------------------
# self.pos = self.x - self.side // 2, self.y - self.side // 2
self.dead_animation_count = 0
self.animation_count = 0
self.npc_action_trigger = False
self.door_open_trigger = False
self.door_prev_pos = self.y if self.flag == 'door_h' else self.x
self.delete = False
if self.viewing_angles:
if len(self.object) == 8:
self.sprite_angles = [frozenset(range(338, 361)) | frozenset(range(0, 23))] + \
[frozenset(range(i, i + 45)) for i in range(23, 338, 45)]
else:
self.sprite_angles = [frozenset(range(348, 361)) | frozenset(range(0, 11))] + \
[frozenset(range(i, i + 23)) for i in range(11, 348, 23)]
self.sprite_positions = {angle: pos for angle, pos in zip(self.sprite_angles, self.object)}
@property
def is_on_fire(self):
if CENTER_RAY - self.side // 2 < self.current_ray < CENTER_RAY + self.side // 2 and self.blocked:
return (self.distance_to_sprite, self.proj_height)
return (float('inf'), None)
@property
def pos(self):
return self.x - self.side // 2, self.y - self.side // 2
def object_locate(self, player):
dx, dy = self.x - player.x, self.y - player.y
self.distance_to_sprite = math.sqrt(dx ** 2 + dy ** 2)
self.theta = math.atan2(dy, dx)
gamma = self.theta - player.angle
if dx > 0 and 180 <= math.degrees(player.angle) <= 360 or dx < 0 and dy < 0:
gamma += DOUBLE_PI
self.theta -= 1.4 * gamma
delta_rays = int(gamma / DELTA_ANGLE)
self.current_ray = CENTER_RAY + delta_rays
if self.flag not in {'door_h', 'door_v'}: # <------------------
self.distance_to_sprite *= math.cos(HALF_FOV - self.current_ray * DELTA_ANGLE)
fake_ray = self.current_ray + FAKE_RAYS
if 0 <= fake_ray <= FAKE_RAYS_RANGE and self.distance_to_sprite > 30:
self.proj_height = min(int(PROJ_COEFF / self.distance_to_sprite),
DOUBLE_HEIGHT if self.flag not in {'door_h', 'door_v'} else HEIGHT) # <--------
sprite_width = int(self.proj_height * self.scale[0])
sprite_height = int(self.proj_height * self.scale[1])
half_sprite_width = sprite_width // 2
half_sprite_height = sprite_height // 2
shift = half_sprite_height * self.shift
# logic for doors, npc, decors
if self.flag == 'door_h' or self.flag == 'door_v':
if self.door_open_trigger:
self.door_open()
self.object = self.visible_sprite()
sprite_object = self.sprite_animation()
else:
if self.is_dead and self.is_dead != 'immortal':
sprite_object = self.dead_animation()
shift = half_sprite_height * self.dead_shift
sprite_height = int(sprite_height / 1.3)
elif self.npc_action_trigger:
sprite_object = self.npc_in_action()
else:
# choose sprite for angle
self.object = self.visible_sprite()
# sprite animation
sprite_object = self.sprite_animation()
# print(sprite_width, sprite_height)
# if sprite_width > DOUBLE_WIDTH or sprite_height > DOUBLE_HEIGHT:
# sprite_rect = sprite_object.get_rect()
# kw = sprite_width / WIDTH
# kh = sprite_height / HEIGHT
# sprite_object = sprite_object.subsurface(sprite_rect.centerx - sprite_rect.w / kw / 2,
# sprite_rect.centery - sprite_rect.h / kh / 2,
# sprite_rect.w / kw, sprite_rect.h / kh)
# sprite = pygame.transform.scale(sprite_object, (WIDTH, HEIGHT))
# sprite_pos = (self.current_ray * SCALE - HALF_WIDTH, HALF_HEIGHT - HALF_HEIGHT + shift)
# else:
# sprite scale and pos
# print(sprite_object if type(sprite_object) == list else 0)
sprite = pygame.transform.scale(sprite_object, (sprite_width, sprite_height))
sprite_pos = (self.current_ray * SCALE - half_sprite_width, HALF_HEIGHT - half_sprite_height + shift)
return (self.distance_to_sprite, sprite, sprite_pos)
else:
return (False,)
def sprite_animation(self):
if self.animation and self.distance_to_sprite < self.animation_dist:
sprite_object = self.animation[0]
if self.animation_count < self.animation_speed:
self.animation_count += 1
else:
self.animation.rotate(-1)
self.animation_count = 0
return sprite_object
return self.object
def visible_sprite(self):
if self.viewing_angles:
if self.theta < 0:
self.theta += DOUBLE_PI
self.theta = 360 - int(math.degrees(self.theta))
for angles in self.sprite_angles:
if self.theta in angles:
return self.sprite_positions[angles]
return self.object
def dead_animation(self):
if len(self.death_animation):
if self.dead_animation_count < self.animation_speed:
self.dead_sprite = self.death_animation[0]
self.dead_animation_count += 1
else:
self.dead_sprite = self.death_animation.popleft()
self.dead_animation_count = 0
return self.dead_sprite
def npc_in_action(self):
sprite_object = self.obj_action[0]
if self.animation_count < self.animation_speed:
self.animation_count += 1
else:
self.obj_action.rotate()
| |
conformally rescaled
metrci. This (conformal) laplacian is only used in the definition of
Ricci that shows up in the evolution equation for At (under the trace
free operation), and even then only in the part that multiplies the
metric and which will drop out on taking the trace free part. So, in
fact, the code could be written to completely ignore this operation
in the evolution equations themselves. However, if the constraints
are included or the full Ricci is needed for another reason, this
would be needed.
"""
global d, inv_metric, C2
if inv_metric == undef:
inv_metric = get_inverse_metric()
#ewh3 return sum([(inv_metric[i, j] * d2(i, j, a) - sum([C2[l, i, j] * d(l, a) for l in e_i])) for i, j in e_ij])
return sum([ inv_metric[i, j] * (d2(i, j, a) - sum([C2[l, i, j] * d(l, a) for l in e_i])) for i, j in e_ij])
def sqr(a):
"""
Computes the square of the matrix. Assumes metric is set.
"""
global inv_metric
if inv_metric == undef:
inv_metric = get_inverse_metric()
return sum([a[i, j]*sum([inv_metric[i, k] * inv_metric[j, l] * a[k, l] for k in e_i for l in e_i]) for i, j in e_ij])
def trace_free(x):
"""
makes the operator trace-free
"""
global metric, inv_metric
if inv_metric == undef:
inv_metric = get_inverse_metric()
trace = sum([ inv_metric[i, j] * x[i, j] for i, j in e_ij])
# X_{ab} - 1/3 gt_{ab} X.
# tf = Matrix([x[i, j] - 1/3*metric[i,j]*trace for i, j in e_ij])
tf = Matrix([x[i, j] - metric[i,j]*trace/3 for i, j in e_ij])
return tf.reshape(3, 3)
def vec_j_del_j(b, a):
"""
expands to $\beta^i\partial_i \alpha$
"""
return sum([b[i]*d(i, a) for i in e_i])
#[ewh] Adding this as this term needs to be in the beta equation as an
# advective derivative ... and not as a regular (partial) derivative.
def vec_j_ad_j(b, f):
"""
expands to $\beta^i\partial_i f$
"""
return sum([b[i]*ad(i, f) for i in e_i])
#vec_k_del_k = vec_j_del_j
##########################################################################
# metric related functions
##########################################################################
def set_metric(g):
"""
sets the metric variable, so that dendro knows how to compute the derived variables. This should be done fairly
early on. e.g.,
gt = dendro.sym_3x3("gt")
dendro.set_metric(gt)
"""
global metric
metric = g
def set_ref_metric(f):
"""
sets the reference metric variable, so that dendro knows how to compute the derived variables. This should be done fairly
early on. e.g.,
f_ref = dendro.sym_3x3("f_ref")
dendro.set_metric(f_ref)
"""
global ref_metric
ref_metric = f
def get_inverse_metric():
"""
Computes and returns the inverse metric. The variables need for be defined in advance. e.g.,
gt = dendro.sym_3x3("gt")
dendro.set_metric(gt)
igt = dendro.get_inverse_metric()
"""
global metric, inv_metric, undef
if metric == undef:
raise ValueError('Dendro: Metric not defined.')
if inv_metric == undef:
# method : ('GE', 'LU', or 'ADJ')
inv_metric = simplify(metric.inv('ADJ'))
return inv_metric
def get_inverse_ref_metric():
"""
Computes and returns the inverse metric. The variables need for be defined in advance. e.g.,
f_ref = dendro.sym_3x3("f_ref")
dendro.set_ref_metric(f_ref)
if_ref = dendro.get_inverse_ref_metric()
"""
global ref_metric, inv_ref_metric, ref_undef
if ref_metric == undef:
raise ValueError('Dendro: Metric not defined.')
if inv_ref_metric == undef:
# method : ('GE', 'LU', or 'ADJ')
inv_ref_metric = simplify(ref_metric.inv('ADJ'))
return inv_ref_metric
def get_first_christoffel():
"""
Computes and returns the first Christoffel Symbols. Assumes the metric has been set. e.g.,
dendro.set_metric(gt);
C1 = dendro.get_first_christoffel();
"""
global metric, inv_metric, undef, C1, d
if inv_metric == undef:
get_inverse_metric()
if C1 == undef:
C1 = MutableDenseNDimArray(range(27), (3, 3, 3))
for k in e_i:
for j in e_i:
for i in e_i:
# C1[k, i, j] = 1 / 2 * (d(j, metric[k, i]) + d(i, metric[k, j]) - d(k, metric[i, j]))
C1[k, i, j] = 0.5 * (d(j, metric[k, i]) + d(i, metric[k, j]) - d(k, metric[i, j]))
return C1
def get_second_christoffel():
"""
Computes and returns the second Christoffel Symbols. Assumes the metric has been set. Will compute the first
Christoffel if not already computed. e.g.,
dendro.set_metric(gt);
C2 = dendro.get_second_christoffel();
"""
global C2, C1, inv_metric
if C2 == undef:
if C1 == undef:
get_first_christoffel()
igt_t = Array(inv_metric, (3, 3))
C2 = tensorcontraction(tensorproduct(igt_t, C1), (1, 2))
return C2
def get_complete_christoffel(chi):
"""
Computes and returns the second Christoffel Symbols. Assumes the metric has been set. Will compute the first/second
Christoffel if not already computed. e.g.,
dendro.set_metric(gt);
C2_spatial = dendro.get_complete_christoffel();
"""
global metric, inv_metric, undef, C1, C2, C3, d
if C3 == undef:
C3 = MutableDenseNDimArray(range(27), (3, 3, 3))
if C2 == undef:
get_second_christoffel()
for k in e_i:
for j in e_i:
for i in e_i:
# C3[i, j, k] = C2[i, j, k] - 1/(2*chi)*(KroneckerDelta(i, j) * d(k, chi) +
C3[i, j, k] = C2[i, j, k] - 0.5/(chi)*(KroneckerDelta(i, j) * d(k, chi) +
KroneckerDelta(i, k) * d(j, chi) -
metric[j, k]*sum([inv_metric[i, m]*d(m, chi) for m in e_i])
)
return C3
def compute_ricci(Gt, chi):
"""
Computes the Ricci tensor. e.g.,
dendro.set_metric(gt)
R = dendro.compute_ricci(Gt, chi)
or
dendro.compute_ricci(Gt, chi)
and use
dendro.ricci
The conformal connection coefficient and the conformal variable needs to be supplied.
"""
global metric, inv_metric, C1, C2
Lchi = laplacian_conformal(chi)
#print(type(Lchi))
#print('Done with Lphi') #simplify(Lchi))
#ewh4 DKchiDkchi = Matrix([4*metric[i, j]*sum([sum([inv_metric[k, l]*d(l, chi) for l in e_i])*d(k, chi) for k in e_i]) for i, j in e_ij])
DKchiDkchi = Matrix([0.25/chi/chi*metric[i, j]*sum([sum([inv_metric[k, l]*d(l, chi) for l in e_i])*d(k, chi) for k in e_i]) for i, j in e_ij])
#print('done with DKchi') # simplify(DKchiDkchi))
CalGt = [sum(inv_metric[k,l]*C2[i,k,l] for k, l in e_ij) for i in e_i]
Rt = Matrix([-0.5*sum([inv_metric[l, m]*d2(l, m, metric[i, j]) for l, m in e_ij]) +
0.5*sum([metric[k,i]*d(j, Gt[k]) + metric[k,j]*d(i, Gt[k]) for k in e_i]) +
0.5*sum([CalGt[k]*(C1[i,j,k] + C1[j,i,k]) for k in e_i]) +
sum([inv_metric[l,m]*(C2[k,l,i]*C1[j,k,m] + C2[k,l,j]*C1[i,k,m] + C2[k,i,m]*C1[k,l,j])
for k in e_i for l,m in e_ij]) for i,j in e_ij])
#print('done with Rt') #simplify(Rt))
#ewh5 Rphi_tmp = Matrix([2*metric[i, j]*Lchi - 4*d(i, chi)*d(j, chi) for i, j in e_ij])
#dwn Rphi_tmp = Matrix([ 0.5*metric[i, j]*Lchi/chi - 0.25*d(i, chi)*d(j, chi)/chi/chi for i, j in e_ij])
#print(simplify(Rphi_tmp))
#ewh6 Rphi = -2*_Di_Dj(chi) - Rphi_tmp.reshape(3, 3) - DKchiDkchi.reshape(3, 3)
#dwn Rphi = -0.5*_Di_Dj(chi)/chi - Rphi_tmp.reshape(3, 3) - DKchiDkchi.reshape(3, 3)
xRphi = Matrix( [ 1/(2*chi)*(d2(i,j,chi) -
sum(C2[k,j,i]*d(k,chi) for k in e_i)) -
1/(4*chi*chi)*d(i,chi)*d(j,chi) for i, j in e_ij]).reshape(3,3)
Rphi = xRphi + Matrix( [
1/(2*chi)*metric[i,j] * ( sum(inv_metric[k,l]*(d2(k,l,chi) -
3/(2*chi)*d(k,chi)*d(l,chi)) for k, l in e_ij) -
sum(CalGt[m]*d(m,chi) for m in e_i))
for i, j in e_ij ] ).reshape(3,3)
return [Rt.reshape(3, 3) + Rphi, Rt.reshape(3,3), Rphi, CalGt]
##########################################################################
# code generation function
##########################################################################
def generate(ex, vnames, idx):
"""
Generate the C++ code by simplifying the expressions.
"""
# print(ex)
mi = [0, 1, 2, 4, 5, 8]
midx = ['00', '01', '02', '11', '12', '22']
# total number of expressions
# print("--------------------------------------------------------")
num_e = 0
lexp = []
lname = []
for i, e in enumerate(ex):
if type(e) == list:
num_e = num_e + len(e)
for j, ev in enumerate(e):
lexp.append(ev)
lname.append(vnames[i]+repr(j)+idx)
elif type(e) == Matrix:
num_e = num_e + len(e)
for j, k in enumerate(mi):
lexp.append(e[k])
lname.append(vnames[i]+midx[j]+idx)
else:
num_e = num_e + 1
lexp.append(e)
lname.append(vnames[i]+idx)
# print(num_e)
# print(len(lname))
print('// Dendro: {{{ ')
print('// Dendro: original ops: ', count_ops(lexp))
# print("--------------------------------------------------------")
# print("Now trying Common Subexpression Detection and Collection")
# print("--------------------------------------------------------")
# Common Subexpression Detection and Collection
# for i in range(len(ex)):
# # print("--------------------------------------------------------")
# # print(ex[i])
# # print("--------------------------------------------------------")
# ee_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(5))
# ee_syms = numbered_symbols(prefix=ee_name)
# _v = cse(ex[i],symbols=ee_syms)
# # print(type(_v))
# for (v1,v2) in _v[0]:
# print("double %s = %s;" % (v1, v2))
# print("%s = %s" % (vnames[i], _v[1][0]))
#mex = Matrix(ex)
ee_name = 'DENDRO_' #''.join(random.choice(string.ascii_uppercase) for _ in range(5))
ee_syms = numbered_symbols(prefix=ee_name)
_v = cse(lexp, symbols=ee_syms, optimizations='basic')
custom_functions = {'grad': 'grad', 'grad2': 'grad2', 'agrad': 'agrad', 'kograd': 'kograd'}
rops=0
print('// Dendro: printing temp variables')
for (v1, v2) in _v[0]:
# print("double %s = %s;" % (v1, v2)) # replace_pow(v2)))
print('double ', end='')
#print_ccode(v2, assign_to=v1, user_functions=custom_functions)
print(change_deriv_names(ccode(v2, assign_to=v1, user_functions=custom_functions)))
rops = rops + count_ops(v2)
print()
print('// Dendro: printing variables')
for i, e in enumerate(_v[1]):
print("//--")
# print("%s = %s;" % (lname[i], e)) # replace_pow(e)))
#f = open(str(lname[i])+'.gv','w')
#print(dotprint(e), file=f)
#f.close()
#print_ccode(e, assign_to=lname[i], user_functions=custom_functions)
print(change_deriv_names(ccode(e, assign_to=lname[i], user_functions=custom_functions)))
rops = | |
# test_objects.py -- tests for objects.py
# Copyright (C) 2007 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License or (at your option) any later version of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Tests for git base objects."""
# TODO: Round-trip parse-serialize-parse and serialize-parse-serialize tests.
from cStringIO import StringIO
import datetime
import os
import stat
import warnings
from dulwich.errors import (
ObjectFormatException,
)
from dulwich._compat import (
permutations,
)
from dulwich.objects import (
Blob,
Tree,
Commit,
ShaFile,
Tag,
format_timezone,
hex_to_sha,
sha_to_hex,
hex_to_filename,
check_hexsha,
check_identity,
parse_timezone,
TreeEntry,
parse_tree,
_parse_tree_py,
sorted_tree_items,
_sorted_tree_items_py,
)
from dulwich.tests import (
TestCase,
)
from utils import (
make_commit,
make_object,
functest_builder,
ext_functest_builder,
)
a_sha = '6f670c0fb53f9463760b7295fbb814e965fb20c8'
b_sha = '2969be3e8ee1c0222396a5611407e4769f14e54b'
c_sha = '954a536f7819d40e6f637f849ee187dd10066349'
tree_sha = '70c190eb48fa8bbb50ddc692a17b44cb781af7f6'
tag_sha = '71033db03a03c6a36721efcf1968dd8f8e0cf023'
class TestHexToSha(TestCase):
def test_simple(self):
self.assertEqual("\xab\xcd" * 10, hex_to_sha("abcd" * 10))
def test_reverse(self):
self.assertEqual("abcd" * 10, sha_to_hex("\xab\xcd" * 10))
class BlobReadTests(TestCase):
"""Test decompression of blobs"""
def get_sha_file(self, cls, base, sha):
dir = os.path.join(os.path.dirname(__file__), 'data', base)
return cls.from_path(hex_to_filename(dir, sha))
def get_blob(self, sha):
"""Return the blob named sha from the test data dir"""
return self.get_sha_file(Blob, 'blobs', sha)
def get_tree(self, sha):
return self.get_sha_file(Tree, 'trees', sha)
def get_tag(self, sha):
return self.get_sha_file(Tag, 'tags', sha)
def commit(self, sha):
return self.get_sha_file(Commit, 'commits', sha)
def test_decompress_simple_blob(self):
b = self.get_blob(a_sha)
self.assertEqual(b.data, 'test 1\n')
self.assertEqual(b.sha().hexdigest(), a_sha)
def test_hash(self):
b = self.get_blob(a_sha)
self.assertEqual(hash(b.id), hash(b))
def test_parse_empty_blob_object(self):
sha = 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391'
b = self.get_blob(sha)
self.assertEqual(b.data, '')
self.assertEqual(b.id, sha)
self.assertEqual(b.sha().hexdigest(), sha)
def test_create_blob_from_string(self):
string = 'test 2\n'
b = Blob.from_string(string)
self.assertEqual(b.data, string)
self.assertEqual(b.sha().hexdigest(), b_sha)
def test_legacy_from_file(self):
b1 = Blob.from_string("foo")
b_raw = b1.as_legacy_object()
b2 = b1.from_file(StringIO(b_raw))
self.assertEqual(b1, b2)
def test_chunks(self):
string = 'test 5\n'
b = Blob.from_string(string)
self.assertEqual([string], b.chunked)
def test_set_chunks(self):
b = Blob()
b.chunked = ['te', 'st', ' 5\n']
self.assertEqual('test 5\n', b.data)
b.chunked = ['te', 'st', ' 6\n']
self.assertEqual('test 6\n', b.as_raw_string())
def test_parse_legacy_blob(self):
string = 'test 3\n'
b = self.get_blob(c_sha)
self.assertEqual(b.data, string)
self.assertEqual(b.sha().hexdigest(), c_sha)
def test_eq(self):
blob1 = self.get_blob(a_sha)
blob2 = self.get_blob(a_sha)
self.assertEqual(blob1, blob2)
def test_read_tree_from_file(self):
t = self.get_tree(tree_sha)
self.assertEqual(t.items()[0], ('a', 33188, a_sha))
self.assertEqual(t.items()[1], ('b', 33188, b_sha))
def test_read_tag_from_file(self):
t = self.get_tag(tag_sha)
self.assertEqual(t.object,
(Commit, '51b668fd5bf7061b7d6fa525f88803e6cfadaa51'))
self.assertEqual(t.name,'signed')
self.assertEqual(t.tagger,'<NAME> <<EMAIL>>')
self.assertEqual(t.tag_time, 1231203091)
self.assertEqual(t.message, 'This is a signed tag\n-----BEGIN PGP SIGNATURE-----\nVersion: GnuPG v1.4.9 (GNU/Linux)\n\niEYEABECAAYFAkliqx8ACgkQqSMmLy9u/kcx5ACfakZ9NnPl02tOyYP6pkBoEkU1\n5EcAn0UFgokaSvS371Ym/4W9iJj6vh3h\n=ql7y\n-----END PGP SIGNATURE-----\n')
def test_read_commit_from_file(self):
sha = '60dacdc733de308bb77bb76ce0fb0f9b44c9769e'
c = self.commit(sha)
self.assertEqual(c.tree, tree_sha)
self.assertEqual(c.parents,
['0d89f20333fbb1d2f3a94da77f4981373d8f4310'])
self.assertEqual(c.author,
'<NAME> <<EMAIL>>')
self.assertEqual(c.committer,
'<NAME> <<EMAIL>>')
self.assertEqual(c.commit_time, 1174759230)
self.assertEqual(c.commit_timezone, 0)
self.assertEqual(c.author_timezone, 0)
self.assertEqual(c.message, 'Test commit\n')
def test_read_commit_no_parents(self):
sha = '0d89f20333fbb1d2f3a94da77f4981373d8f4310'
c = self.commit(sha)
self.assertEqual(c.tree, '90182552c4a85a45ec2a835cadc3451bebdfe870')
self.assertEqual(c.parents, [])
self.assertEqual(c.author,
'<NAME> <<EMAIL>>')
self.assertEqual(c.committer,
'<NAME> <<EMAIL>>')
self.assertEqual(c.commit_time, 1174758034)
self.assertEqual(c.commit_timezone, 0)
self.assertEqual(c.author_timezone, 0)
self.assertEqual(c.message, 'Test commit\n')
def test_read_commit_two_parents(self):
sha = '5dac377bdded4c9aeb8dff595f0faeebcc8498cc'
c = self.commit(sha)
self.assertEqual(c.tree, 'd80c186a03f423a81b39df39dc87fd269736ca86')
self.assertEqual(c.parents, ['ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd',
'4cffe90e0a41ad3f5190079d7c8f036bde29cbe6'])
self.assertEqual(c.author,
'<NAME> <<EMAIL>>')
self.assertEqual(c.committer,
'<NAME> <<EMAIL>>')
self.assertEqual(c.commit_time, 1174773719)
self.assertEqual(c.commit_timezone, 0)
self.assertEqual(c.author_timezone, 0)
self.assertEqual(c.message, 'Merge ../b\n')
def test_stub_sha(self):
sha = '5' * 40
c = make_commit(id=sha, message='foo')
self.assertTrue(isinstance(c, Commit))
self.assertEqual(sha, c.id)
self.assertNotEqual(sha, c._make_sha())
class ShaFileCheckTests(TestCase):
def assertCheckFails(self, cls, data):
obj = cls()
def do_check():
obj.set_raw_string(data)
obj.check()
self.assertRaises(ObjectFormatException, do_check)
def assertCheckSucceeds(self, cls, data):
obj = cls()
obj.set_raw_string(data)
self.assertEqual(None, obj.check())
small_buffer_zlib_object = (
"\x48\x89\x15\xcc\x31\x0e\xc2\x30\x0c\x40\x51\xe6"
"\x9c\xc2\x3b\xaa\x64\x37\xc4\xc1\x12\x42\x5c\xc5"
"\x49\xac\x52\xd4\x92\xaa\x78\xe1\xf6\x94\xed\xeb"
"\x0d\xdf\x75\x02\xa2\x7c\xea\xe5\x65\xd5\x81\x8b"
"\x9a\x61\xba\xa0\xa9\x08\x36\xc9\x4c\x1a\xad\x88"
"\x16\xba\x46\xc4\xa8\x99\x6a\x64\xe1\xe0\xdf\xcd"
"\xa0\xf6\x75\x9d\x3d\xf8\xf1\xd0\x77\xdb\xfb\xdc"
"\x86\xa3\x87\xf1\x2f\x93\xed\x00\xb7\xc7\xd2\xab"
"\x2e\xcf\xfe\xf1\x3b\x50\xa4\x91\x53\x12\x24\x38"
"\x23\x21\x86\xf0\x03\x2f\x91\x24\x52"
)
class ShaFileTests(TestCase):
def test_deflated_smaller_window_buffer(self):
# zlib on some systems uses smaller buffers,
# resulting in a different header.
# See https://github.com/libgit2/libgit2/pull/464
sf = ShaFile.from_file(StringIO(small_buffer_zlib_object))
self.assertEqual(sf.type_name, "tag")
self.assertEqual(sf.tagger, " <@localhost>")
class CommitSerializationTests(TestCase):
def make_commit(self, **kwargs):
attrs = {'tree': 'd80c186a03f423a81b39df39dc87fd269736ca86',
'parents': ['ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd',
'4cffe90e0a41ad3f5190079d7c8f036bde29cbe6'],
'author': 'James Westby <<EMAIL>>',
'committer': '<NAME> <<EMAIL>>',
'commit_time': 1174773719,
'author_time': 1174773719,
'commit_timezone': 0,
'author_timezone': 0,
'message': 'Merge ../b\n'}
attrs.update(kwargs)
return make_commit(**attrs)
def test_encoding(self):
c = self.make_commit(encoding='iso8859-1')
self.assertTrue('encoding iso8859-1\n' in c.as_raw_string())
def test_short_timestamp(self):
c = self.make_commit(commit_time=30)
c1 = Commit()
c1.set_raw_string(c.as_raw_string())
self.assertEqual(30, c1.commit_time)
def test_raw_length(self):
c = self.make_commit()
self.assertEqual(len(c.as_raw_string()), c.raw_length())
def test_simple(self):
c = self.make_commit()
self.assertEqual(c.id, '5dac377bdded4c9aeb8dff595f0faeebcc8498cc')
self.assertEqual(
'tree d80c186a03f423a81b39df39dc87fd269736ca86\n'
'parent ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd\n'
'parent 4cffe90e0a41ad3f5190079d7c8f036bde29cbe6\n'
'author <NAME> <<EMAIL>> '
'1174773719 +0000\n'
'committer <NAME> <<EMAIL>> '
'1174773719 +0000\n'
'\n'
'Merge ../b\n', c.as_raw_string())
def test_timezone(self):
c = self.make_commit(commit_timezone=(5 * 60))
self.assertTrue(" +0005\n" in c.as_raw_string())
def test_neg_timezone(self):
c = self.make_commit(commit_timezone=(-1 * 3600))
self.assertTrue(" -0100\n" in c.as_raw_string())
def test_deserialize(self):
c = self.make_commit()
d = Commit()
d._deserialize(c.as_raw_chunks())
self.assertEqual(c, d)
def test_serialize_mergetag(self):
tag = make_object(
Tag, object=(Commit, "a38d6181ff27824c79fc7df825164a212eff6a3f"),
object_type_name="commit",
name="v2.6.22-rc7",
tag_time=1183319674,
tag_timezone=0,
tagger="<NAME> <<EMAIL>>",
message=default_message)
commit = self.make_commit(mergetag=[tag])
self.assertEqual("""tree d80c186a03f423a81b39df39dc87fd269736ca86
parent ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd
parent 4cffe90e0a41ad3f5190079d7c8f036bde29cbe6
author <NAME> <<EMAIL>> 1174773719 +0000
committer <NAME> <<EMAIL>> 1174773719 +0000
mergetag object a38d6181ff27824c79fc7df825164a212eff6a3f
type commit
tag v2.6.22-rc7
tagger <NAME> <<EMAIL>> 1183319674 +0000
Linux 2.6.22-rc7
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.7 (GNU/Linux)
iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql
OK2XeQOiEeXtT76rV4t2WR4=
=ivrA
-----END PGP SIGNATURE-----
Merge ../b
""", commit.as_raw_string())
def test_serialize_mergetags(self):
tag = make_object(
Tag, object=(Commit, "a38d6181ff27824c79fc7df825164a212eff6a3f"),
object_type_name="commit",
name="v2.6.22-rc7",
tag_time=1183319674,
tag_timezone=0,
tagger="<NAME> <<EMAIL>>",
message=default_message)
commit = self.make_commit(mergetag=[tag, tag])
self.assertEqual("""tree d80c186a03f423a81b39df39dc87fd269736ca86
parent ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd
parent 4cffe90e0a41ad3f5190079d7c8f036bde29cbe6
author <NAME> <<EMAIL>> 1174773719 +0000
committer <NAME> <<EMAIL>> 1174773719 +0000
mergetag object a38d6181ff27824c79fc7df825164a212eff6a3f
type commit
tag v2.6.22-rc7
tagger <NAME> <<EMAIL>> 1183319674 +0000
Linux 2.6.22-rc7
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.7 (GNU/Linux)
iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql
OK2XeQOiEeXtT76rV4t2WR4=
=ivrA
-----END PGP SIGNATURE-----
mergetag object a38d6181ff27824c79fc7df825164a212eff6a3f
type commit
tag v2.6.22-rc7
tagger <NAME> <<EMAIL>> 1183319674 +0000
Linux 2.6.22-rc7
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.7 (GNU/Linux)
iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql
OK2XeQOiEeXtT76rV4t2WR4=
=ivrA
-----END PGP SIGNATURE-----
Merge ../b
""", commit.as_raw_string())
def test_deserialize_mergetag(self):
tag = make_object(
Tag, object=(Commit, "a38d6181ff27824c79fc7df825164a212eff6a3f"),
object_type_name="commit",
name="v2.6.22-rc7",
tag_time=1183319674,
tag_timezone=0,
tagger="<NAME> <<EMAIL>>",
message=default_message)
commit = self.make_commit(mergetag=[tag])
d = Commit()
d._deserialize(commit.as_raw_chunks())
self.assertEqual(commit, d)
def test_deserialize_mergetags(self):
tag = make_object(
Tag, object=(Commit, "a38d6181ff27824c79fc7df825164a212eff6a3f"),
object_type_name="commit",
name="v2.6.22-rc7",
tag_time=1183319674,
tag_timezone=0,
tagger="<NAME> <<EMAIL>>",
message=default_message)
commit = self.make_commit(mergetag=[tag, tag])
d = Commit()
d._deserialize(commit.as_raw_chunks())
self.assertEquals(commit, d)
default_committer = '<NAME> <<EMAIL>> 1174773719 +0000'
class CommitParseTests(ShaFileCheckTests):
def make_commit_lines(self,
tree='d80c186a03f423a81b39df39dc87fd269736ca86',
parents=['ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd',
'4cffe90e0a41ad3f5190079d7c8f036bde29cbe6'],
author=default_committer,
committer=default_committer,
encoding=None,
message='Merge ../b\n',
extra=None):
lines = []
if tree is not None:
lines.append('tree %s' % tree)
if parents is not None:
lines.extend('parent %s' % p for p in parents)
if author is not None:
lines.append('author %s' % author)
if committer is not None:
lines.append('committer %s' % committer)
if encoding is not None:
lines.append('encoding %s' % encoding)
if extra is not None:
for name, value in sorted(extra.iteritems()):
lines.append('%s %s' % (name, value))
lines.append('')
if message is not None:
lines.append(message)
return lines
def make_commit_text(self, **kwargs):
return '\n'.join(self.make_commit_lines(**kwargs))
def test_simple(self):
c = Commit.from_string(self.make_commit_text())
self.assertEqual('Merge ../b\n', c.message)
self.assertEqual('<NAME> <<EMAIL>>', c.author)
self.assertEqual('<NAME> <<EMAIL>>',
c.committer)
self.assertEqual('d80c186a03f423a81b39df39dc87fd269736ca86', c.tree)
self.assertEqual(['ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd',
'4cffe90e0a41ad3f5190079d7c8f036bde29cbe6'],
c.parents)
expected_time = datetime.datetime(2007, 3, 24, 22, 1, 59)
self.assertEqual(expected_time,
datetime.datetime.utcfromtimestamp(c.commit_time))
self.assertEqual(0, c.commit_timezone)
self.assertEqual(expected_time,
datetime.datetime.utcfromtimestamp(c.author_time))
self.assertEqual(0, c.author_timezone)
self.assertEqual(None, c.encoding)
def test_custom(self):
c = Commit.from_string(self.make_commit_text(
extra={'extra-field': 'data'}))
self.assertEqual([('extra-field', 'data')], c.extra)
def test_encoding(self):
c = Commit.from_string(self.make_commit_text(encoding='UTF-8'))
self.assertEqual('UTF-8', c.encoding)
def test_check(self):
self.assertCheckSucceeds(Commit, self.make_commit_text())
self.assertCheckSucceeds(Commit, self.make_commit_text(parents=None))
self.assertCheckSucceeds(Commit,
self.make_commit_text(encoding='UTF-8'))
self.assertCheckFails(Commit, self.make_commit_text(tree='xxx'))
self.assertCheckFails(Commit, self.make_commit_text(
parents=[a_sha, 'xxx']))
bad_committer = "some guy without an email address 1174773719 +0000"
self.assertCheckFails(Commit,
self.make_commit_text(committer=bad_committer))
self.assertCheckFails(Commit,
self.make_commit_text(author=bad_committer))
self.assertCheckFails(Commit, self.make_commit_text(author=None))
self.assertCheckFails(Commit, self.make_commit_text(committer=None))
self.assertCheckFails(Commit, self.make_commit_text(
author=None, committer=None))
def test_check_duplicates(self):
# duplicate each of the header fields
for i in xrange(5):
lines = self.make_commit_lines(parents=[a_sha], encoding='UTF-8')
lines.insert(i, lines[i])
text = '\n'.join(lines)
if lines[i].startswith('parent'):
# duplicate parents are ok for now
self.assertCheckSucceeds(Commit, text)
else:
self.assertCheckFails(Commit, text)
def test_check_order(self):
lines = self.make_commit_lines(parents=[a_sha], encoding='UTF-8')
headers = lines[:5]
rest = lines[5:]
# of all possible permutations, ensure only the original succeeds
for perm in permutations(headers):
perm = list(perm)
text = '\n'.join(perm + rest)
if perm == headers:
self.assertCheckSucceeds(Commit, text)
else:
self.assertCheckFails(Commit, text)
_TREE_ITEMS = {
'a.c': (0100755, 'd80c186a03f423a81b39df39dc87fd269736ca86'),
'a': (stat.S_IFDIR, 'd80c186a03f423a81b39df39dc87fd269736ca86'),
'a/c': (stat.S_IFDIR, 'd80c186a03f423a81b39df39dc87fd269736ca86'),
}
_SORTED_TREE_ITEMS = [
TreeEntry('a.c', 0100755, 'd80c186a03f423a81b39df39dc87fd269736ca86'),
TreeEntry('a', stat.S_IFDIR, 'd80c186a03f423a81b39df39dc87fd269736ca86'),
TreeEntry('a/c', stat.S_IFDIR, 'd80c186a03f423a81b39df39dc87fd269736ca86'),
]
class TreeTests(ShaFileCheckTests):
def test_add(self):
myhexsha = "d80c186a03f423a81b39df39dc87fd269736ca86"
x = Tree()
x.add("myname", 0100755, myhexsha)
self.assertEqual(x["myname"], (0100755, myhexsha))
self.assertEqual('100755 myname\0' + hex_to_sha(myhexsha),
x.as_raw_string())
def test_add_old_order(self):
myhexsha = "d80c186a03f423a81b39df39dc87fd269736ca86"
x = Tree()
warnings.simplefilter("ignore", DeprecationWarning)
try:
x.add(0100755, "myname", myhexsha)
finally:
warnings.resetwarnings()
self.assertEqual(x["myname"], (0100755, myhexsha))
self.assertEqual('100755 myname\0' + hex_to_sha(myhexsha),
x.as_raw_string())
def test_simple(self):
myhexsha = "d80c186a03f423a81b39df39dc87fd269736ca86"
x = Tree()
x["myname"] = (0100755, myhexsha)
self.assertEqual('100755 myname\0' + hex_to_sha(myhexsha),
x.as_raw_string())
def test_tree_update_id(self):
x = Tree()
x["a.c"] = (0100755, "d80c186a03f423a81b39df39dc87fd269736ca86")
self.assertEqual("0c5c6bc2c081accfbc250331b19e43b904ab9cdd", x.id)
x["a.b"] = (stat.S_IFDIR, "d80c186a03f423a81b39df39dc87fd269736ca86")
self.assertEqual("07bfcb5f3ada15bbebdfa3bbb8fd858a363925c8", x.id)
def test_tree_iteritems_dir_sort(self):
x = | |
<reponame>RangeKing/PaddleViT
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ConvNeXt train and eval using multiple GPU"""
import sys
import os
import time
import argparse
import random
import math
import numpy as np
import paddle
from datasets import get_dataloader
from datasets import get_dataset
from config import get_config
from config import update_config
from utils import AverageMeter
from utils import get_logger
from utils import write_log
from utils import all_reduce_mean
from utils import skip_weight_decay_fn
from mixup import Mixup
from model_ema import ModelEma
from losses import LabelSmoothingCrossEntropyLoss
from losses import SoftTargetCrossEntropyLoss
from convnext import build_convnext as build_model
def get_arguments():
"""return argumeents, this will overwrite the config by (1) yaml file (2) argument values"""
parser = argparse.ArgumentParser('ConvNeXt')
parser.add_argument('-cfg', type=str, default=None)
parser.add_argument('-dataset', type=str, default=None)
parser.add_argument('-data_path', type=str, default=None)
parser.add_argument('-output', type=str, default=None)
parser.add_argument('-batch_size', type=int, default=None)
parser.add_argument('-batch_size_eval', type=int, default=None)
parser.add_argument('-image_size', type=int, default=None)
parser.add_argument('-accum_iter', type=int, default=None)
parser.add_argument('-pretrained', type=str, default=None)
parser.add_argument('-resume', type=str, default=None)
parser.add_argument('-last_epoch', type=int, default=None)
parser.add_argument('-eval', action='store_true')
parser.add_argument('-amp', action='store_true')
arguments = parser.parse_args()
return arguments
def train(dataloader,
model,
optimizer,
criterion,
epoch,
total_epochs,
total_batches,
debug_steps=100,
accum_iter=1,
model_ema=None,
mixup_fn=None,
amp_grad_scaler=None,
local_logger=None,
master_logger=None):
"""Training for one epoch
Args:
dataloader: paddle.io.DataLoader, dataloader instance
model: nn.Layer, a ViT model
optimizer: nn.optimizer
criterion: nn.XXLoss
epoch: int, current epoch
total_epochs: int, total num of epochs
total_batches: int, total num of batches for one epoch
debug_steps: int, num of iters to log info, default: 100
accum_iter: int, num of iters for accumulating gradients, default: 1
model_ema: ModelEma, model moving average instance
mixup_fn: Mixup, mixup instance, default: None
amp_grad_scaler: GradScaler, if not None pass the GradScaler and enable AMP, default: None
local_logger: logger for local process/gpu, default: None
master_logger: logger for main process, default: None
Returns:
train_loss_meter.avg: float, average loss on current process/gpu
train_acc_meter.avg: float, average acc@1 on current process/gpu
master_loss_meter.avg: float, average loss on all processes/gpus
master_acc_meter.avg: float, average acc@1 on all processes/gpus
train_time: float, training time
"""
time_st = time.time()
train_loss_meter = AverageMeter()
train_acc_meter = AverageMeter()
master_loss_meter = AverageMeter()
master_acc_meter = AverageMeter()
model.train()
optimizer.clear_grad()
for batch_id, data in enumerate(dataloader):
# get data
images = data[0]
label = data[1]
label_orig = label.clone()
batch_size = images.shape[0]
if mixup_fn is not None:
images, label = mixup_fn(images, label_orig)
# forward
with paddle.amp.auto_cast(amp_grad_scaler is not None):
output = model(images)
loss = criterion(output, label)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
loss = loss / accum_iter
# backward and step
if amp_grad_scaler is None: # fp32
loss.backward()
if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):
optimizer.step()
optimizer.clear_grad()
else: # amp
scaled_loss = amp_grad_scaler.scale(loss)
scaled_loss.backward()
if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):
# amp for param group reference: https://github.com/PaddlePaddle/Paddle/issues/37188
amp_grad_scaler.step(optimizer)
amp_grad_scaler.update()
optimizer.clear_grad()
if model_ema is not None and paddle.distributed.get_rank() == 0:
model_ema.update(model)
# average of output and kd_output, same as eval mode
pred = paddle.nn.functional.softmax(output)
acc = paddle.metric.accuracy(pred,
label_orig if mixup_fn else label_orig.unsqueeze(1)).item()
# sync from other gpus for overall loss and acc
master_loss = all_reduce_mean(loss_value)
master_acc = all_reduce_mean(acc)
master_batch_size = all_reduce_mean(batch_size)
master_loss_meter.update(master_loss, master_batch_size)
master_acc_meter.update(master_acc, master_batch_size)
train_loss_meter.update(loss_value, batch_size)
train_acc_meter.update(acc, batch_size)
if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader):
general_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], "
f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Lr: {optimizer.get_lr():04f}, ")
local_message = (general_message +
f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), "
f"Avg Acc: {train_acc_meter.avg:.4f}")
master_message = (general_message +
f"Loss: {master_loss:.4f} ({master_loss_meter.avg:.4f}), "
f"Avg Acc: {master_acc_meter.avg:.4f}")
write_log(local_logger, master_logger, local_message, master_message)
paddle.distributed.barrier()
train_time = time.time() - time_st
return (train_loss_meter.avg,
train_acc_meter.avg,
master_loss_meter.avg,
master_acc_meter.avg,
train_time)
@paddle.no_grad()
def validate(dataloader,
model,
criterion,
total_batches,
debug_steps=100,
local_logger=None,
master_logger=None):
"""Validation for the whole dataset
Args:
dataloader: paddle.io.DataLoader, dataloader instance
model: nn.Layer, a ViT model
total_batches: int, total num of batches for one epoch
debug_steps: int, num of iters to log info, default: 100
local_logger: logger for local process/gpu, default: None
master_logger: logger for main process, default: None
Returns:
val_loss_meter.avg: float, average loss on current process/gpu
val_acc1_meter.avg: float, average top1 accuracy on current processes/gpus
val_acc5_meter.avg: float, average top5 accuracy on current processes/gpus
master_loss_meter.avg: float, average loss on all processes/gpus
master_acc1_meter.avg: float, average top1 accuracy on all processes/gpus
master_acc5_meter.avg: float, average top5 accuracy on all processes/gpus
val_time: float, validation time
"""
model.eval()
val_loss_meter = AverageMeter()
val_acc1_meter = AverageMeter()
val_acc5_meter = AverageMeter()
master_loss_meter = AverageMeter()
master_acc1_meter = AverageMeter()
master_acc5_meter = AverageMeter()
time_st = time.time()
for batch_id, data in enumerate(dataloader):
# get data
images = data[0]
label = data[1]
batch_size = images.shape[0]
output = model(images)
loss = criterion(output, label)
loss_value = loss.item()
pred = paddle.nn.functional.softmax(output)
acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)).item()
acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5).item()
# sync from other gpus for overall loss and acc
master_loss = all_reduce_mean(loss_value)
master_acc1 = all_reduce_mean(acc1)
master_acc5 = all_reduce_mean(acc5)
master_batch_size = all_reduce_mean(batch_size)
master_loss_meter.update(master_loss, master_batch_size)
master_acc1_meter.update(master_acc1, master_batch_size)
master_acc5_meter.update(master_acc5, master_batch_size)
val_loss_meter.update(loss_value, batch_size)
val_acc1_meter.update(acc1, batch_size)
val_acc5_meter.update(acc5, batch_size)
if batch_id % debug_steps == 0:
local_message = (f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Avg Loss: {val_loss_meter.avg:.4f}, "
f"Avg Acc@1: {val_acc1_meter.avg:.4f}, "
f"Avg Acc@5: {val_acc5_meter.avg:.4f}")
master_message = (f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Avg Loss: {master_loss_meter.avg:.4f}, "
f"Avg Acc@1: {master_acc1_meter.avg:.4f}, "
f"Avg Acc@5: {master_acc5_meter.avg:.4f}")
write_log(local_logger, master_logger, local_message, master_message)
paddle.distributed.barrier()
val_time = time.time() - time_st
return (val_loss_meter.avg,
val_acc1_meter.avg,
val_acc5_meter.avg,
master_loss_meter.avg,
master_acc1_meter.avg,
master_acc5_meter.avg,
val_time)
def main_worker(*args):
"""main method for each process"""
# STEP 0: Preparation
paddle.device.set_device('gpu')
paddle.distributed.init_parallel_env()
world_size = paddle.distributed.get_world_size()
local_rank = paddle.distributed.get_rank()
config = args[0]
last_epoch = config.TRAIN.LAST_EPOCH
seed = config.SEED + local_rank
paddle.seed(seed)
np.random.seed(seed)
random.seed(seed)
local_logger, master_logger = get_logger(config.SAVE)
message = (f'----- world_size = {world_size}, local_rank = {local_rank} \n'
f'----- {config}')
write_log(local_logger, master_logger, message)
# STEP 1: Create model
model = build_model(config)
# define model ema
model_ema = None
if not config.EVAL and config.TRAIN.MODEL_EMA and local_rank == 0:
model_ema = ModelEma(model, decay=config.TRAIN.MODEL_EMA_DECAY)
if config.TRAIN.MODEL_EMA_FORCE_CPU:
model_ema.to('cpu')
# STEP 2: Create train and val dataloader
if not config.EVAL:
dataset_train = args[1]
dataloader_train = get_dataloader(config, dataset_train, True, True)
total_batch_train = len(dataloader_train)
message = f'----- Total # of train batch (single gpu): {total_batch_train}'
write_log(local_logger, master_logger, message)
dataset_val = args[2]
dataloader_val = get_dataloader(config, dataset_val, False, True)
total_batch_val = len(dataloader_val)
message = f'----- Total # of val batch (single gpu): {total_batch_val}'
write_log(local_logger, master_logger, message)
# STEP 3: (Optional) Define Mixup function
mixup_fn = None
if (config.TRAIN.MIXUP_PROB > 0 or config.TRAIN.CUTMIX_ALPHA > 0 or
config.TRAIN.CUTMIX_MINMAX is not None):
mixup_fn = Mixup(mixup_alpha=config.TRAIN.MIXUP_ALPHA,
cutmix_alpha=config.TRAIN.CUTMIX_ALPHA,
cutmix_minmax=config.TRAIN.CUTMIX_MINMAX,
prob=config.TRAIN.MIXUP_PROB,
switch_prob=config.TRAIN.MIXUP_SWITCH_PROB,
mode=config.TRAIN.MIXUP_MODE,
label_smoothing=config.TRAIN.SMOOTHING)#
# STEP 4: Define loss/criterion
if mixup_fn is not None:
criterion = SoftTargetCrossEntropyLoss()
elif config.TRAIN.SMOOTHING:
criterion = LabelSmoothingCrossEntropyLoss()
else:
criterion = paddle.nn.CrossEntropyLoss()
# Use CrossEntropyLoss for val
criterion_val = paddle.nn.CrossEntropyLoss()
# STEP 5: Define optimizer and lr_scheduler
if not config.EVAL:
# set lr according to batch size and world size
if config.TRAIN.LINEAR_SCALED_LR is not None:
effective_batch_size = config.DATA.BATCH_SIZE * config.TRAIN.ACCUM_ITER * world_size
config.TRAIN.BASE_LR = (
config.TRAIN.BASE_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR
)
config.TRAIN.WARMUP_START_LR = (
config.TRAIN.WARMUP_START_LR* effective_batch_size / config.TRAIN.LINEAR_SCALED_LR
)
config.TRAIN.END_LR = (
config.TRAIN.END_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR
)
message = (f'Base lr is scaled to: {config.TRAIN.BASE_LR}, '
f'warmup start lr is scaled to: {config.TRAIN.BASE_LR}, '
f'end lr is scaled to: {config.TRAIN.BASE_LR}')
write_log(local_logger, master_logger, message)
# define scaler for amp training
amp_grad_scaler = paddle.amp.GradScaler() if config.AMP else None
# warmup + cosine lr scheduler
if config.TRAIN.WARMUP_EPOCHS > 0:
cosine_lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay(
learning_rate=config.TRAIN.BASE_LR,
T_max=config.TRAIN.NUM_EPOCHS - config.TRAIN.WARMUP_EPOCHS,
eta_min=config.TRAIN.END_LR,
last_epoch=-1) # do not set last epoch, handled in warmup sched get_lr()
lr_scheduler = paddle.optimizer.lr.LinearWarmup(
learning_rate=cosine_lr_scheduler, # use cosine lr sched after warmup
warmup_steps=config.TRAIN.WARMUP_EPOCHS, # only support position integet
start_lr=config.TRAIN.WARMUP_START_LR,
end_lr=config.TRAIN.BASE_LR,
last_epoch=config.TRAIN.LAST_EPOCH)
else:
lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay(
learning_rate=config.TRAIN.BASE_LR,
T_max=config.TRAIN.NUM_EPOCHS,
eta_min=config.TRAIN.END_LR,
last_epoch=config.TRAIN.LAST_EPOCH)
# set gradient clip
if config.TRAIN.GRAD_CLIP:
clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)
else:
clip = None
# set optimizer
optimizer = paddle.optimizer.AdamW(
parameters=model.parameters(),
learning_rate=lr_scheduler, # set to scheduler
beta1=config.TRAIN.OPTIMIZER.BETAS[0],
beta2=config.TRAIN.OPTIMIZER.BETAS[1],
weight_decay=config.TRAIN.WEIGHT_DECAY,
epsilon=config.TRAIN.OPTIMIZER.EPS,
grad_clip=clip,
apply_decay_param_fun=skip_weight_decay_fn(
model, # skip bn and bias
['position_embedding', 'cls_token', 'dist_token']), # skip custom ops
)
# STEP 6: (Optional) Load pretrained model weights for evaluation or finetuning
if | |
'''Our interval is 200 Myr'''
# Prerequisites
# For this test we're not going to use the default data
self.classifier.t_m = 150.0
self.classifier.dt = np.array([
[ 50., 50., 50., 50., 50., ],
[ 50., 50., 50., 50., 50., ],
[ 50., 50., 50., 50., 50., ],
[ 50., 50., 50., 50., 50., ],
[ 50., 50., 50., 50., 50., ],
[ 50., 50., 50., 50., 50., ],
])
self.classifier.is_before_first_acc = np.array([
[ 0, 1, 1, 1, 1, ], # Merger, except in early snapshots
[ 0, 1, 1, 1, 1, ], # Another merger
[ 0, 1, 1, 1, 1, ], # Mass transfer
[ 0, 0, 1, 1, 1, ], # Another test
[ 0, 0, 0, 0, 0, ], # Always part of main galaxy
[ 0, 0, 0, 1, 1, ], # CGM -> main galaxy -> CGM
]).astype( bool )
self.classifier.is_in_other_gal = np.array([
[ 0, 1, 1, 1, 1, 0, ], # Merger, except in early snapshots
[ 0, 0, 0, 1, 1, 1, ], # Another merger
[ 0, 1, 0, 0, 1, 0, ], # Mass transfer
[ 0, 0, 1, 0, 1, 1, ], # Another test
[ 0, 0, 0, 0, 0, 0, ], # Always part of main galaxy
[ 0, 0, 0, 0, 0, 0, ], # CGM -> main galaxy -> CGM
]).astype( bool )
# Correct the number of snapshots, accordingly
self.classifier.n_snap = self.classifier.is_in_other_gal.shape[1]
expected = np.array([
150., # Merger, except in early snapshots
50., # Another merger
50., # Mass transfer
100., # Another test
0., # Always part of main galaxy
0., # CGM -> main galaxy -> CGM
])
actual = self.classifier.get_time_in_other_gal_before_acc_during_interval()
npt.assert_allclose( expected, actual, rtol=1e-3 )
#########################################################################
def test_identify_unaccreted( self ):
# Prerequisites
self.classifier.is_in_main_gal = np.array([
[ 0, 0, 0, 0, 0, ], # Never accreted onto the main galaxy
[ 1, 0, 0, 0, 0, ], # Merger, except in early snapshots
[ 1, 1, 1, 1, 1, ], # Always part of main galaxy
[ 0, 1, 0, 0, 0, ], # CGM -> main galaxy -> CGM
]).astype( bool )
expected = np.array([
1, # Never accreted
0, # Merger, except in early snapshots
0, # Always part of main galaxy
0, # CGM -> main galaxy -> CGM
]).astype( bool )
actual = self.classifier.identify_unaccreted()
npt.assert_allclose( expected, actual, )
########################################################################
def test_identify_pristine( self ):
# Prerequisites
self.classifier.n_particle = 4
self.classifier.is_preprocessed = np.array([
0, # Never accreted
1, # Merger, except in early snapshots
0, # Always part of main galaxy
0, # CGM -> main galaxy -> CGM
]).astype( bool )
self.classifier.is_unaccreted = np.array([
1, # Never accreted
0, # Merger, except in early snapshots
0, # Always part of main galaxy
0, # CGM -> main galaxy -> CGM
]).astype( bool )
expected = np.array([
0,
0, # Merger, except in early snapshots
1, # Always part of main galaxy
1, # CGM -> main galaxy -> CGM
]).astype( bool )
actual = self.classifier.identify_pristine()
npt.assert_allclose( expected, actual, )
#########################################################################
def test_identify_preprocessed( self ):
# Prerequisites
self.classifier.n_particle = 4
self.classifier.time_in_other_gal_before_acc = np.array([
1e3, # Unaccreted
2.404*1e3, # Merger, except in early snapshots
0., # Always part of main galaxy
0., # CGM -> main galaxy -> CGM
])
self.classifier.is_in_main_gal = np.array([
[ 0, 0, 0, 0, 0, ], # Never accreted onto the main galaxy
[ 1, 0, 0, 0, 0, ], # Merger, except in early snapshots
[ 1, 1, 1, 1, 1, ], # Always part of main galaxy
[ 0, 1, 0, 0, 0, ], # CGM -> main galaxy -> CGM
]).astype( bool )
self.classifier.is_before_first_acc = np.array([
[ 1, 1, 1, 1, ], # Never accreted onto the main galaxy
[ 0, 0, 0, 1, ], # Merger, except in early snapshots
[ 0, 0, 0, 1, ], # Always part of main galaxy
[ 0, 0, 0, 1, ], # CGM -> main galaxy -> CGM
]).astype( bool )
self.classifier.is_unaccreted = np.array([
1, # Never accreted
0, # Merger, except in early snapshots
0, # Always part of main galaxy
0, # CGM -> main galaxy -> CGM
]).astype( bool )
expected = np.array([
0, # Never accreted
1, # Merger, except in early snapshots
0, # Always part of main galaxy
0, # CGM -> main galaxy -> CGM
]).astype( bool )
actual = self.classifier.identify_preprocessed()
npt.assert_allclose( expected, actual, )
########################################################################
def test_identify_hitherto_EP( self ):
'''Test that we can identify material classified as EP up until
this point.
'''
# Test data
self.classifier.n_particle = 4
self.classifier.cumulative_time_in_other_gal = np.array([
[ 102., 51., 51., 0, ],
[ 153., 153., 102., 51., ],
[ 153, 102., 51., 0, ],
[ 153, 102., 51., 0, ],
])
actual = self.classifier.identify_hitherto_EP()
expected = np.array([
[ 1, 0, 0, 0, 0, ],
[ 1, 1, 1, 0, 0, ],
[ 1, 1, 0, 0, 0, ],
[ 1, 1, 0, 0, 0, ],
]).astype( bool )
npt.assert_allclose( expected, actual, )
########################################################################
def test_identify_hitherto_NEP( self ):
'''Test that we can identify material classified as NEP up until
this point.
'''
# Test data
self.classifier.n_particle = 4
self.classifier.cumulative_time_in_other_gal = np.array([
[ 102., 51., 51., 0, ],
[ 153., 153., 102., 51., ],
[ 153, 102., 51., 0, ],
[ 153, 102., 51., 0, ],
])
actual = self.classifier.identify_hitherto_NEP()
expected = np.array([
[ 0, 1, 1, 1, 1, ],
[ 0, 0, 0, 1, 1, ],
[ 0, 0, 1, 1, 1, ],
[ 0, 0, 1, 1, 1, ],
]).astype( bool )
npt.assert_allclose( expected, actual, )
########################################################################
def test_identify_unaccreted_EP( self ):
'''Test that we can identiy unaccreted EP gas.
'''
# Test data
self.classifier.n_particle = 4
self.classifier.is_unaccreted = np.array([
1,
1,
1,
0,
]).astype( bool )
self.classifier.is_hitherto_EP = np.array([
[ 1, 0, 0, 0, 0, ],
[ 1, 1, 1, 0, 0, ],
[ 1, 1, 0, 0, 0, ],
[ 1, 1, 0, 0, 0, ],
]).astype( bool )
actual = self.classifier.identify_unaccreted_EP()
expected = np.array([
[ 1, 0, 0, 0, 0, ],
[ 1, 1, 1, 0, 0, ],
[ 1, 1, 0, 0, 0, ],
[ 0, 0, 0, 0, 0, ],
]).astype( bool )
npt.assert_allclose( expected, actual, )
########################################################################
def test_identify_unaccreted_NEP( self ):
'''Test that we can identiy unaccreted EP gas.
'''
# Test data
self.classifier.n_particle = 4
self.classifier.is_unaccreted = np.array([
1,
1,
1,
0,
]).astype( bool )
self.classifier.is_hitherto_NEP = np.array([
[ 0, 1, 1, 1, 1, ],
[ 0, 0, 0, 1, 1, ],
[ 0, 0, 1, 1, 1, ],
[ 0, 0, 1, 1, 1, ],
]).astype( bool )
actual = self.classifier.identify_unaccreted_NEP()
expected = np.array([
[ 0, 1, 1, 1, 1, ],
[ 0, 0, 0, 1, 1, ],
[ 0, 0, 1, 1, 1, ],
[ 0, 0, 0, 0, 0, ],
]).astype( bool )
npt.assert_allclose( expected, actual, )
#########################################################################
def test_identify_mass_transfer( self ):
# Prerequisites
self.classifier.is_preprocessed = np.array([
1, # Merger, except in early snapshots
1, # Mass transfer
0, # Always part of main galaxy
0, # CGM -> main galaxy -> CGM
]).astype( bool )
self.classifier.time_in_other_gal_before_acc_during_interval = np.array([
300., # Merger, except in early snapshots
50., # Mass transfer
0., # Always part of main galaxy
0., # CGM -> main galaxy -> CGM
])
expected = np.array([
0, # Merger, except in early snapshots
1, # Mass Transfer
0, # Always part of main galaxy
0, # CGM -> main galaxy -> CGM
]).astype( bool )
actual = self.classifier.identify_mass_transfer()
npt.assert_allclose( expected, actual, )
#########################################################################
def test_identify_merger( self ):
# Prerequisites
self.classifier.is_preprocessed = np.array([
1, # Merger, except in early snapshots
1, # Mass transfer
0, # Always part of main galaxy
0, # CGM -> main galaxy -> CGM
]).astype( bool )
self.classifier.time_in_other_gal_before_acc_during_interval = np.array([
300., # Merger, except in early snapshots
50., | |
recorder.",
)
def test_get_span_offset_non_analog(self):
"""Test get span and offset of non analog channel"""
return_var = self.gen.ghs_get_span_and_offset("A", 25)
self.assertEqual(
return_var[0],
"InvalidChannelType",
"Failed on get span and offset of non analog channel.",
)
def test_set_get_filter_frequency(self):
"""Test set and get filter type and frequency"""
return_var = self.gen.ghs_set_filter_type_and_frequency(
"A", 1, "Bessel_AA", 32000000.0
)
self.assertEqual(
return_var,
"OK",
"Failed on set filter type and frequency.",
)
(
return_var,
filter_type,
frequency,
) = self.gen.ghs_get_filter_type_and_frequency("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get filter type and frequency.",
)
self.assertEqual(
filter_type,
"Bessel_AA",
"Failed to set filter type.",
)
self.assertEqual(
frequency,
32000000.0,
"Failed to set frequency.",
)
def test_set_filter_frequency_invalid_channel(self):
"""Test set filter type and frequency on invalid channel"""
return_var = self.gen.ghs_set_filter_type_and_frequency(
"Z", 100, "Bessel_AA", 32000000.0
)
self.assertEqual(
return_var,
"InvalidSlotID",
"Failed on set filter type and frequency on invalid channel.",
)
def test_set_filter_frequency_non_analog_channel(self):
"""Test set filter type and frequency on non analog channel"""
return_var = self.gen.ghs_set_filter_type_and_frequency(
"A", 25, "Bessel_AA", 32000000.0
)
self.assertEqual(
return_var,
"InvalidChannelType",
"Failed on set filter type and frequency on non analog channel.",
)
def test_set_filter_frequency_disabled_recorder(self):
"""Test set filter type and frequency of disabled recorder"""
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed to disable recorder.",
)
return_var = self.gen.ghs_set_filter_type_and_frequency(
"A", 1, "Bessel_AA", 32000000.0
)
self.assertEqual(
return_var,
"OK",
"Failed on set filter type and frequency of disabled recorder.",
)
(
return_var,
filter_type,
frequency,
) = self.gen.ghs_get_filter_type_and_frequency("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get filter type and frequency of disabled recorder.",
)
self.assertEqual(
filter_type,
"Bessel_AA",
"Failed to set filter type.",
)
self.assertEqual(
frequency,
32000000.0,
"Failed to set frequency.",
)
def test_get_invalid_filter_frequency(self):
"""Test get filter type and frequency of invalid channel"""
return_var = self.gen.ghs_get_filter_type_and_frequency("Z", 100)
self.assertEqual(
return_var[0],
"InvalidSlotID",
"Failed on get filter type and frequency of invalid channel.",
)
def test_get_filter_frequency_disabled_recorder(self):
"""Test get filter type and frequency of disabled recorder"""
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed to disable recorder.",
)
return_var = self.gen.ghs_get_filter_type_and_frequency("A", 1)
self.assertEqual(
return_var[0],
"OK",
"Failed on get filter type and frequency of disabled recorder.",
)
def test_get_filter_frequency_non_analog(self):
"""Test get filter type and frequency of non analog channel"""
return_var = self.gen.ghs_get_filter_type_and_frequency("A", 25)
self.assertEqual(
return_var[0],
"InvalidChannelType",
"Failed on get filter type and frequency of non analog channel.",
)
def test_set_get_excitation(self):
"""Test set and get excitation type and value"""
return_var = self.gen.ghs_set_excitation("A", 1, "Voltage", 10.0)
self.assertEqual(
return_var,
"OK",
"Failed on set excitation type and value.",
)
(
return_var,
excitation_type,
excitation_value,
) = self.gen.ghs_get_excitation("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get excitation type and value.",
)
self.assertEqual(
excitation_type,
"Voltage",
"Failed to set excitation type.",
)
self.assertEqual(
excitation_value,
10.0,
"Failed to set excitation value.",
)
def test_set_excitation_invalid_channel(self):
"""Test set excitation type and value on invalid channel"""
return_var = self.gen.ghs_set_excitation("Z", 100, "Voltage", 10.0)
self.assertEqual(
return_var,
"InvalidSlotID",
"Failed on set excitation type and value on invalid channel.",
)
def test_set_excitation_non_analog_channel(self):
"""Test set excitation type and value on non analog channel"""
return_var = self.gen.ghs_set_excitation("A", 25, "Voltage", 10.0)
self.assertEqual(
return_var,
"InvalidChannelType",
"Failed on set excitation type and value on non analog channel.",
)
def test_set_excitation_disabled_recorder(self):
"""Test set excitation type and value of disabled recorder"""
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed to disable recorder.",
)
return_var = self.gen.ghs_set_excitation("A", 1, "Voltage", 10.0)
self.assertEqual(
return_var,
"OK",
"Failed on set excitation type and value of disabled recorder.",
)
(
return_var,
excitation_type,
excitation_value,
) = self.gen.ghs_get_excitation("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get excitation type and value of disabled recorder.",
)
self.assertEqual(
excitation_type,
"Voltage",
"Failed to set excitation type.",
)
self.assertEqual(
excitation_value,
10.0,
"Failed to set excitation value.",
)
def test_set_excitation_not_supported(self):
"""Test set excitation type and value with not supported value"""
return_var = self.gen.ghs_set_excitation("A", 1, "Voltage", 15.0)
self.assertEqual(
return_var,
"Adapted",
"Failed on set excitation type and value with not supported type.",
)
def test_get_invalid_excitation(self):
"""Test get excitation type and value of invalid channel"""
return_var = self.gen.ghs_get_excitation("Z", 100)
self.assertEqual(
return_var[0],
"InvalidSlotID",
"Failed on get excitation type and value of invalid channel.",
)
def test_get_excitation_disabled_recorder(self):
"""Test get excitation type and value of disabled recorder"""
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed to disable recorder.",
)
return_var = self.gen.ghs_get_excitation("A", 1)
self.assertEqual(
return_var[0],
"OK",
"Failed on get excitation type and value of disabled recorder.",
)
def test_get_excitation_non_analog(self):
"""Test get excitation type and value of non analog channel"""
return_var = self.gen.ghs_get_excitation("A", 25)
self.assertEqual(
return_var[0],
"InvalidChannelType",
"Failed on get excitation type and value of non analog channel.",
)
def test_set_get_amplifier_mode(self):
"""Test set and get amplifier mode"""
return_var = self.gen.ghs_set_amplifier_mode("A", 1, "Basic")
self.assertEqual(
return_var,
"OK",
"Failed on set amplifier mode.",
)
(
return_var,
amplifier_mode,
) = self.gen.ghs_get_amplifier_mode("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get amplifier mode.",
)
self.assertEqual(
amplifier_mode,
"Basic",
"Failed to set amplifier mode.",
)
def test_set_amplifier_mode_invalid_channel(self):
"""Test set amplifier mode on invalid channel"""
return_var = self.gen.ghs_set_amplifier_mode("Z", 100, "Basic")
self.assertEqual(
return_var,
"InvalidSlotID",
"Failed on set amplifier mode on invalid channel.",
)
def test_set_amplifier_mode_non_analog_channel(self):
"""Test set amplifier mode on non analog channel"""
return_var = self.gen.ghs_set_amplifier_mode("A", 25, "Basic")
self.assertEqual(
return_var,
"InvalidChannelType",
"Failed on set amplifier mode on non analog channel.",
)
def test_set_amplifier_mode_disabled_recorder(self):
"""Test set amplifier mode of disabled recorder"""
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed to disable recorder.",
)
return_var = self.gen.ghs_set_amplifier_mode("A", 1, "Basic")
self.assertEqual(
return_var,
"OK",
"Failed on set amplifier mode of disabled recorder.",
)
(
return_var,
amplifier_mode,
) = self.gen.ghs_get_amplifier_mode("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get amplifier mode of disabled recorder.",
)
self.assertEqual(
amplifier_mode,
"Basic",
"Failed to set amplifier mode.",
)
def test_get_invalid_amplifier_mode(self):
"""Test get amplifier mode of invalid channel"""
return_var = self.gen.ghs_get_amplifier_mode("Z", 100)
self.assertEqual(
return_var[0],
"InvalidSlotID",
"Failed on get amplifier mode of invalid channel.",
)
def test_get_amplifier_mode_disabled_recorder(self):
"""Test get amplifier mode of disabled recorder"""
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed to disable recorder.",
)
return_var = self.gen.ghs_get_amplifier_mode("A", 1)
self.assertEqual(
return_var[0],
"OK",
"Failed on get amplifier mode of disabled recorder.",
)
def test_get_amplifier_mode_non_analog(self):
"""Test get amplifier mode of non analog channel"""
return_var = self.gen.ghs_get_amplifier_mode("A", 25)
self.assertEqual(
return_var[0],
"InvalidChannelType",
"Failed on get amplifier mode of non analog channel.",
)
def test_set_get_technical_units(self):
"""Test set and get technical units, unit multiplier and unit offset"""
return_var = self.gen.ghs_set_technical_units(
"A", 1, "KGS", 10.0, 20.0
)
self.assertEqual(
return_var,
"OK",
"Failed on set technical units, unit multiplier and unit offset.",
)
(
return_var,
units,
multiplier,
offset,
) = self.gen.ghs_get_technical_units("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get technical units, unit multiplier and unit offset.",
)
self.assertEqual(
units,
"KGS",
"Failed to set technical units.",
)
self.assertEqual(
multiplier,
10.0,
"Failed to set unit multiplier.",
)
self.assertEqual(
offset,
20.0,
"Failed to set unit offset.",
)
def test_set_technical_units_invalid_channel(self):
"""Test set technical units, unit multiplier and unit offset on invalid channel"""
return_var = self.gen.ghs_set_technical_units(
"Z", 100, "KGS", 10.0, 20.0
)
self.assertEqual(
return_var,
"InvalidSlotID",
"Failed on set technical units, unit multiplier and unit offset on invalid channel.",
)
def test_set_technical_units_non_analog_channel(self):
"""Test set technical units, unit multiplier and unit offset on non analog channel"""
return_var = self.gen.ghs_set_technical_units(
"A", 25, "KGS", 10.0, 20.0
)
self.assertEqual(
return_var,
"InvalidChannelType",
"Failed on set technical units, unit multiplier and unit offset on non analog channel.",
)
def test_set_technical_units_disabled_recorder(self):
"""Test set technical units, unit multiplier and unit offset of disabled recorder"""
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed to disable recorder.",
)
return_var = self.gen.ghs_set_technical_units(
"A", 1, "KGS", 10.0, 20.0
)
self.assertEqual(
return_var,
"OK",
"Failed on set technical units, unit multiplier and unit offset of disabled recorder.",
)
(
return_var,
units,
multiplier,
offset,
) = self.gen.ghs_get_technical_units("A", 1)
self.assertEqual(
return_var,
"OK",
"Failed on get technical units, unit multiplier and unit offset of disabled recorder.",
)
self.assertEqual(
units,
"KGS",
"Failed to set technical units.",
)
self.assertEqual(
multiplier,
10.0,
"Failed to set unit multiplier.",
)
self.assertEqual(
offset,
20.0,
"Failed to set unit offset.",
)
def test_get_invalid_technical_units(self):
"""Test get technical units, unit multiplier and unit offset of invalid channel"""
return_var = self.gen.ghs_get_technical_units("Z", 100)
self.assertEqual(
return_var[0],
"InvalidSlotID",
"Failed on get technical units, unit multiplier and unit offset of invalid channel.",
)
def test_get_technical_units_disabled_recorder(self):
"""Test get technical units, unit multiplier and unit offset of disabled recorder"""
return_var = self.gen.ghs_set_recorder_enabled("A", "Disable")
self.assertEqual(
return_var,
"OK",
"Failed | |
443. Disabled by default.
"""
return pulumi.get(self, "host_port")
@host_port.setter
def host_port(self, value: Optional[pulumi.Input['ControllerHostPortArgs']]):
pulumi.set(self, "host_port", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Optionally customize the pod hostname.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input['ControllerImageArgs']]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input['ControllerImageArgs']]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="ingressClassByName")
def ingress_class_by_name(self) -> Optional[pulumi.Input[bool]]:
"""
Process IngressClass per name (additionally as per spec.controller).
"""
return pulumi.get(self, "ingress_class_by_name")
@ingress_class_by_name.setter
def ingress_class_by_name(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ingress_class_by_name", value)
@property
@pulumi.getter(name="ingressClassResource")
def ingress_class_resource(self) -> Optional[pulumi.Input['ControllerIngressClassResourceArgs']]:
"""
This section refers to the creation of the IngressClass resource. IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19
"""
return pulumi.get(self, "ingress_class_resource")
@ingress_class_resource.setter
def ingress_class_resource(self, value: Optional[pulumi.Input['ControllerIngressClassResourceArgs']]):
pulumi.set(self, "ingress_class_resource", value)
@property
@pulumi.getter
def keda(self) -> Optional[pulumi.Input['KedaArgs']]:
"""
Mutually exclusive with hpa autoscaling.
"""
return pulumi.get(self, "keda")
@keda.setter
def keda(self, value: Optional[pulumi.Input['KedaArgs']]):
pulumi.set(self, "keda", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
DaemonSet or Deployment.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def lifecycle(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.LifecycleArgs']]:
"""
Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s.
"""
return pulumi.get(self, "lifecycle")
@lifecycle.setter
def lifecycle(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.LifecycleArgs']]):
pulumi.set(self, "lifecycle", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Liveness probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter(name="maxmindLicenseKey")
def maxmind_license_key(self) -> Optional[pulumi.Input[str]]:
"""
Maxmind license key to download GeoLite2 Databases https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases.
"""
return pulumi.get(self, "maxmind_license_key")
@maxmind_license_key.setter
def maxmind_license_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maxmind_license_key", value)
@property
@pulumi.getter
def metrics(self) -> Optional[pulumi.Input['ControllerMetricsArgs']]:
return pulumi.get(self, "metrics")
@metrics.setter
def metrics(self, value: Optional[pulumi.Input['ControllerMetricsArgs']]):
pulumi.set(self, "metrics", value)
@property
@pulumi.getter(name="minAvailable")
def min_available(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_available")
@min_available.setter
def min_available(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_available", value)
@property
@pulumi.getter(name="minReadySeconds")
def min_ready_seconds(self) -> Optional[pulumi.Input[int]]:
"""
minReadySeconds to avoid killing pods before we are ready.
"""
return pulumi.get(self, "min_ready_seconds")
@min_ready_seconds.setter
def min_ready_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_ready_seconds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodeSelector")
def node_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Node labels for controller pod assignment Ref: https://kubernetes.io/docs/user-guide/node-selection/.
"""
return pulumi.get(self, "node_selector")
@node_selector.setter
def node_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "node_selector", value)
@property
@pulumi.getter(name="podAnnotations")
def pod_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to controller pods.
"""
return pulumi.get(self, "pod_annotations")
@pod_annotations.setter
def pod_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "pod_annotations", value)
@property
@pulumi.getter(name="podLabels")
def pod_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
labels to add to the pod container metadata.
"""
return pulumi.get(self, "pod_labels")
@pod_labels.setter
def pod_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "pod_labels", value)
@property
@pulumi.getter(name="podSecurityContext")
def pod_security_context(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]:
"""
Security Context policies for controller pods.
"""
return pulumi.get(self, "pod_security_context")
@pod_security_context.setter
def pod_security_context(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]):
pulumi.set(self, "pod_security_context", value)
@property
@pulumi.getter(name="priorityClassName")
def priority_class_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "priority_class_name")
@priority_class_name.setter
def priority_class_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority_class_name", value)
@property
@pulumi.getter(name="proxySetHeaders")
def proxy_set_headers(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers.
"""
return pulumi.get(self, "proxy_set_headers")
@proxy_set_headers.setter
def proxy_set_headers(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "proxy_set_headers", value)
@property
@pulumi.getter(name="publishService")
def publish_service(self) -> Optional[pulumi.Input['ControllerPublishServiceArgs']]:
"""
Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running.
"""
return pulumi.get(self, "publish_service")
@publish_service.setter
def publish_service(self, value: Optional[pulumi.Input['ControllerPublishServiceArgs']]):
pulumi.set(self, "publish_service", value)
@property
@pulumi.getter(name="readinessProbe")
def readiness_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Readiness probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "readiness_probe")
@readiness_probe.setter
def readiness_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "readiness_probe", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter(name="reportNodeInternalIp")
def report_node_internal_ip(self) -> Optional[pulumi.Input[bool]]:
"""
Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply.
"""
return pulumi.get(self, "report_node_internal_ip")
@report_node_internal_ip.setter
def report_node_internal_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "report_node_internal_ip", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]:
"""
Define requests resources to avoid probe issues due to CPU utilization in busy nodes ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 Ideally, there should be no limits. https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input['ControllerScopeArgs']]:
"""
Limit the scope of the controller.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input['ControllerScopeArgs']]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['ControllerServiceArgs']]:
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['ControllerServiceArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="startupProbe")
def startup_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Startup probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "startup_probe")
@startup_probe.setter
def startup_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "startup_probe", value)
@property
@pulumi.getter
def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls.
"""
return pulumi.get(self, "sysctls")
@sysctls.setter
def sysctls(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "sysctls", value)
@property
@pulumi.getter
def tcp(self) -> Optional[pulumi.Input['ControllerTcpArgs']]:
"""
Allows customization of the tcp-services-configmap.
"""
return pulumi.get(self, "tcp")
@tcp.setter
def tcp(self, value: Optional[pulumi.Input['ControllerTcpArgs']]):
pulumi.set(self, "tcp", value)
@property
@pulumi.getter(name="terminateGracePeriodSeconds")
def terminate_grace_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How long to wait for the drain of connections.
"""
return pulumi.get(self, "terminate_grace_period_seconds")
@terminate_grace_period_seconds.setter
def terminate_grace_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "terminate_grace_period_seconds", value)
@property
@pulumi.getter
def tolerations(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]:
"""
Node tolerations for server scheduling to nodes with taints Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/.
"""
return pulumi.get(self, "tolerations")
@tolerations.setter
def tolerations(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]):
pulumi.set(self, "tolerations", value)
@property
@pulumi.getter(name="topologySpreadConstraints")
def topology_spread_constraints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TopologySpreadConstraintArgs']]]]:
"""
Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/.
"""
return pulumi.get(self, "topology_spread_constraints")
@topology_spread_constraints.setter
def topology_spread_constraints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TopologySpreadConstraintArgs']]]]):
pulumi.set(self, "topology_spread_constraints", value)
@property
@pulumi.getter
def udp(self) -> Optional[pulumi.Input['ControllerUdpArgs']]:
return pulumi.get(self, "udp")
@udp.setter
def udp(self, value: Optional[pulumi.Input['ControllerUdpArgs']]):
pulumi.set(self, "udp", value)
@property
@pulumi.getter(name="updateStrategy")
def update_strategy(self) -> Optional[pulumi.Input['ControllerUpdateStrategyArgs']]:
"""
The update strategy to apply to the Deployment or DaemonSet.
"""
return pulumi.get(self, "update_strategy")
@update_strategy.setter
def update_strategy(self, value: Optional[pulumi.Input['ControllerUpdateStrategyArgs']]):
pulumi.set(self, "update_strategy", value)
@property
@pulumi.getter(name="watchIngressWithoutClass")
def watch_ingress_without_class(self) -> Optional[pulumi.Input[bool]]:
"""
Process Ingress objects without ingressClass annotation/ingressClassName field. Overrides value for --watch-ingress-without-class flag of the controller binary. Defaults to false.
"""
return pulumi.get(self, "watch_ingress_without_class")
@watch_ingress_without_class.setter
def watch_ingress_without_class(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "watch_ingress_without_class", value)
@pulumi.input_type
class KedaScaledObjectArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Custom annotations for ScaledObject resource.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Custom annotations for ScaledObject resource.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@pulumi.input_type
class KedaTriggerArgs:
def __init__(__self__, *,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
type: Optional[pulumi.Input[str]] = None):
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class KedaArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
behavior: Optional[pulumi.Input['AutoscalingBehaviorArgs']] = None,
cooldown_period: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
max_replicas: Optional[pulumi.Input[int]] = None,
min_replicas: Optional[pulumi.Input[int]] = None,
polling_interval: Optional[pulumi.Input[int]] = None,
restore_to_original_replica_count: Optional[pulumi.Input[bool]] = None,
scaled_object: Optional[pulumi.Input['KedaScaledObjectArgs']] = None,
triggers: Optional[pulumi.Input[Sequence[pulumi.Input['KedaTriggerArgs']]]] = None):
"""
:param pulumi.Input[str] api_version: apiVersion changes with keda 1.x vs 2.x: 2.x = keda.sh/v1alpha1, 1.x = keda.k8s.io/v1alpha1.
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if behavior is not None:
pulumi.set(__self__, "behavior", behavior)
if cooldown_period is not None:
pulumi.set(__self__, "cooldown_period", cooldown_period)
| |
Order_Placed = max(0,AI_Order)
else:
# here, the agent action is relative to the order received
Order_Placed = max(0,Order_Received[AI_Entity_Index] + AI_Relative_Order)
else:
Order_Placed = max(0, L_hat[Entity_Index] + alpha_s[Entity_Index] * (
S_prime[Entity_Index] - S - beta[Entity_Index] * SL) + eps)
else:
Order_Placed = max(0, L_hat[Entity_Index] + alpha_s[Entity_Index] * (
S_prime[Entity_Index] - S - beta[Entity_Index] * SL) + eps)
##TURN ON FOR INTEGER ONLY ORDERING
if Integer_Ordering:
Order_Placed = np.round(Order_Placed, 0)
if Entity_Index == 3:
Production_Request = Order_Placed
else:
Order_flows[Entity_Index + 1, (Information_Delay - 1)] = Order_Placed
# End of loop
# Make orders placed by each entity explict
Final_Orders[0:3] = Order_flows[1:, (Information_Delay - 1)]
Final_Orders[3] = Production_Request
fnt_output = {"Order_flows": Order_flows, "Shipping_flows": Shipping_flows, "OH_Inventory": OH_Inventory,
"Backorder": Backorder, "L_hat": L_hat, "Production_Request": Production_Request,
"Entity_Orders": Final_Orders, "Order_Received": Order_Received}
return fnt_output
# Resets the state space to the initial conditions for anothe repisode run
# Note that the output format for this function MUST match the output for the step function
# Any additional clean up or resetting of helper variables should occur eslewhere
def reset(self):
##################
# Assign and reset random game parameters
##################
#### Randomly Draw new teammates if applicable
if self.Random_Teams:
# Randomly draw a team number
Rand_Team = random.randint(min(self.Team_Index), max(self.Team_Index))
# Create a mask of the rows that correspond to that team number
Rand_Team_Mask = np.asarray(self.Team_Index) == Rand_Team
# Filter, using the mask, the arrays that have the data for the team that was drawn
Rand_Team_Theta = np.asarray(self.thetas)[Rand_Team_Mask]
Rand_Team_Alpha = np.asarray(self.alphas)[Rand_Team_Mask]
Rand_Team_Beta = np.asarray(self.betas)[Rand_Team_Mask]
Rand_Team_S_prime = np.asarray(self.S_primes)[Rand_Team_Mask]
# Assemble the team parameters into a named list for later use in the main Beer Game function
Rand_Team_Parameter_df = {"theta": np.ndarray.tolist(Rand_Team_Theta),
"alpha_s": np.ndarray.tolist(Rand_Team_Alpha),
"beta": np.ndarray.tolist(Rand_Team_Beta),
"S_prime": np.ndarray.tolist(Rand_Team_S_prime)}
self.Parameter_df = Rand_Team_Parameter_df
#### Randomly set game horizon if applicable
if self.Random_Horizon == True:
self.horizon = random.randint(self.min_horizon, self.max_horizon)
else:
self.horizon = self.fixed_horizon
#### Randomly set the agent's position on the team
if self.Random_AI_Position:
self.AI_Entity_Index = random.randint(0, 3)
else:
self.AI_Entity_Index = self.AI_Position
##################
# Resetting the global game parameters
##################
# Reset the time period to t=0 for the beginning of the game
self.period = 0
# Reset the various stocks of material both wihtin and without each player's position
self.Order_flows = np.full([4, 2], self.Initial_OrderFlows, dtype=float)
self.Shipping_flows = np.full([4, 2], self.Initial_OrderFlows, dtype=float)
self.OH_Inventory = [self.Initial_Inventory] * 4
self.Backorder = [0] * 4
self.Order_Received = [self.Initial_OrderFlows] * 4
self.L_hat = [self.Initial_OrderFlows] * 4
self.Order_History = np.full([4, self.horizon], 0, dtype=float)
self.Service_rate = [0] * self.horizon
self.OH_Inventory_History = np.full([4, self.horizon], 0, dtype=float)
self.Backlog_History = np.full([4, self.horizon], 0, dtype=float)
self.Production_Request = self.Initial_OrderFlows
self.Final_Orders = [0] * 4 # delete?
self.Amp_Vector = [0] * self.horizon # delete?
self.Reward_Vector = [0] * self.horizon # delete?
# Largely for later debugging and for record keeping, assemble the various items to reset at a global level
# together into a single list
# Output = {"AI_Entity_Index": AI_Entity_Index, "Parameter_df": Parameter_df,"horizon": horizon, "period": period,
# "Orders": Orders, "Order_flows": Order_flows, "Shipping_flows": Shipping_flows,
# "OH_Inventory": OH_Inventory, "Backorder": Backorder, "L_hat": L_hat,
# "Order_History": Order_History, "Service_rate": Service_rate,
# "OH_Inventory_History": OH_Inventory_History, "Backlog_History": Backlog_History,
# "Production_Request": Production_Request, "Amp_Vector": Amp_Vector, "Reward_Vector": Reward_Vector}
# Global_State = Output
# globals().update(Global_State)
##################
# Subset the global parameters to just those the agent is able to observe
##################
# Subset the full global state to just the part the agent has access to
Agent_Order_Received = self.Order_Received[self.AI_Entity_Index]
Agent_OH_Inventory = self.OH_Inventory[self.AI_Entity_Index]
Agent_Backorder = self.Backorder[self.AI_Entity_Index]
if self.AI_Entity_Index == 3:
Agent_Recent_Order = self.Production_Request
else:
Agent_Recent_Order = self.Order_flows[self.AI_Entity_Index + 1, (self.Information_Delay - 1)]
AI_Entity_Index = self.AI_Entity_Index
period = self.period
# Note: The observed state outputted by the reset function MUST match the shape as that from the step function
# and must ONLY consist of the parts of the global state the agent can actually observe
Observed_State = np.array([Agent_Order_Received, Agent_OH_Inventory, Agent_Backorder,
Agent_Recent_Order, period, AI_Entity_Index])
return (Observed_State)
# Takes the action by the agent, along with some simulation specific parameters, and updates the state
# Note that the output format fo this function MUST match the output for the reset function
# def step(self, action, Integer_Ordering, Noisey_Ordering, Parameter_df, AI_Entity_Index, CustOrders, horizon, period, OH_Inventory_History, Backlog_History):
def step(self, action):
# import globally assigned environmental variables
global period, AI_Entity_Index
# Check if the current period is the final one (the Horizon) and return dn=True for 'done' state
# Recall that Python indexes starting at 0! So if Horizon is t=52, need to stop at period = 51
if self.period == (self.horizon - 1):
dn = True
else:
dn = False
# Run the beer game function for a single step
BeerGame_output = self.PirateBeerGame_funct(AI_Entity_Index=self.AI_Entity_Index, AI_Order=action,
Orders=self.Orders[self.period], Order_flows=self.Order_flows,
Shipping_flows=self.Shipping_flows, OH_Inventory=self.OH_Inventory,
Backorder=self.Backorder, L_hat=self.L_hat,
Production_Request=self.Production_Request,
AI_Entity=self.AI_Entity,
Noisey_Ordering=self.Noisey_Ordering,
Integer_Ordering=self.Integer_Ordering,
Parameter_df=self.Parameter_df)
# Note on output of obove function call:
# fnt_output = {"Order_flows": Order_flows, "Shipping_flows": Shipping_flows, "OH_Inventory": OH_Inventory,
# "Backorder": Backorder, "L_hat": L_hat, "Production_Request": Production_Request,
# "Entity_Orders": Final_Orders, "Order_Received": Order_Received}
self.Order_flows = BeerGame_output['Order_flows']
self.Shipping_flows = BeerGame_output['Shipping_flows']
self.OH_Inventory = BeerGame_output['OH_Inventory']
self.Backorder = BeerGame_output['Backorder']
self.L_hat = BeerGame_output['L_hat']
self.Production_Request = BeerGame_output['Production_Request']
self.Order_Received = BeerGame_output['Order_Received']
# Don't use 'Entity_Orders' output right now
info = dict()
# Reward in any time other than the final time is the cost incurred by the AI that round.
# But in the final state, it's the total cost incurred by the entire team!
# Calculation of the running cost incurred so far for the entire team...
self.OH_Inventory_History[:, self.period] = BeerGame_output['OH_Inventory']
self.Backlog_History[:, self.period] = BeerGame_output['Backorder']
# Calculation of the cost incurred by the AI for just this one period...
Period_OH_Inventory = BeerGame_output['OH_Inventory']
Period_Backorder = BeerGame_output['Backorder']
AI_period_OH_Inventory = Period_OH_Inventory[self.AI_Entity_Index]
AI_period_Backorder = Period_Backorder[self.AI_Entity_Index]
AI_period_cost = AI_period_OH_Inventory * self.Holding_Cost + AI_period_Backorder * self.Backorder_Cost
AI_Reward = -AI_period_cost
reward = AI_Reward
#In final round, reward is total team cost, offset by costs incurred by AI so far in order to
# to make the entire episode cost the standard team cost
if dn == True:
Costs_Per_Period = self.OH_Inventory_History * self.Holding_Cost + self.Backlog_History * self.Backorder_Cost
Total_Costs_Per_Entity = np.sum(Costs_Per_Period, 1)
Total_Team_Costs = sum(Total_Costs_Per_Entity)
Team_Reward = -Total_Team_Costs
reward = Team_Reward #+ Total_Costs_Per_Entity[self.AI_Entity_Index]
#normalize final reward by the horizon
#if self.Random_Horizon == True:
# reward = reward/self.horizon
reward = reward / self.horizon
# Alt reward calculation
#reward = AI_Reward + Team_Reward / (self.period + 1) # Team_Reward matters more and more as time goes on?
#### Subset the global state to just the parts the agent has access to
Agent_Order_Received = self.Order_Received[self.AI_Entity_Index]
Agent_OH_Inventory = self.OH_Inventory[self.AI_Entity_Index]
Agent_Backorder = self.Backorder[self.AI_Entity_Index]
if self.AI_Entity_Index == 3:
Agent_Recent_Order = self.Production_Request
else:
Agent_Recent_Order = self.Order_flows[self.AI_Entity_Index + 1, (self.Information_Delay - 1)]
AI_Entity_Index = self.AI_Entity_Index
# Add to the period number
self.period += 1
period = self.period
Observed_State = np.array([Agent_Order_Received, Agent_OH_Inventory, Agent_Backorder,
Agent_Recent_Order, period, AI_Entity_Index])
return Observed_State, reward, dn, info
## Main Code
if __name__ == '__main__':
from gym import Env, spaces
# Import methods to build DQN agent
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy,MaxBoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory
# Get environment and set seed for reproduceability
env = BeerGameEnv()
Set_Random_Seed = True
if Set_Random_Seed:
Random_Seed = 11111111
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(Random_Seed)
random.seed(Random_Seed)
tf.random.set_seed(Random_Seed)
env.action_space.seed(Random_Seed)
# Count number of actions
nb_actions = env.action_space.n
# Build build simple model.
WINDOW_LENGTH = 4
input_shape = env.observation_space.shape
model = Sequential()
model.add(Flatten(input_shape = (WINDOW_LENGTH,) + env.observation_space.shape))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# Configure and compile the DQN agent
memory = SequentialMemory(limit=2000, window_length=WINDOW_LENGTH)
policy = BoltzmannQPolicy()
policy = MaxBoltzmannQPolicy()
#policy = EpsGreedyQPolicy()
#Note, Boltzman policy and DQN is overestimating Q values, causing probabilitiies to explode...
#Double DQN helps mitigate this Q-value overestimation a bit
#Dueling networks appear to allow for a full run
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000,
target_model_update=1e-2, policy=policy, enable_dueling_network=True, dueling_type='avg')
dqn.compile(Adam(lr=1e-4), metrics=['mae'])
mode = "Train"
#mode = "Test"
if mode == "Train":
now = datetime.datetime.now()
dt_string = now.strftime("%Y%m%d_%H%M%S")
ENV_NAME = "Beer_Game_Stocastic_DQN"
print('Training model....')
Full_Hist = dqn.fit(env, nb_steps=1e5, visualize=False, verbose=2)
Training_History = Full_Hist.history
#wieght_filename = f'dqn_{ENV_NAME}_{dt_string}_weights.h5f'
wieght_filename = 'dqn_test_fit.weights'
model_filename='dqn_test_fit_wide'
model_filename | |
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
import os
import re
import tempfile
from pathlib import Path
from typing import Generator, List, Optional, Tuple
import conda_merge
import ruamel.yaml
from azureml._restclient.constants import RunStatus
from azureml.core import Experiment, Run, Workspace, get_run
from azureml.core.conda_dependencies import CondaDependencies
from azureml.exceptions import UserErrorException
from InnerEye.Common import fixed_paths
from InnerEye.Common.common_util import SUBJECT_METRICS_FILE_NAME
DEFAULT_CROSS_VALIDATION_SPLIT_INDEX = -1
EXPERIMENT_RUN_SEPARATOR = ":"
EFFECTIVE_RANDOM_SEED_KEY_NAME = "effective_random_seed"
RUN_RECOVERY_ID_KEY_NAME = "run_recovery_id"
RUN_RECOVERY_FROM_ID_KEY_NAME = "recovered_from"
IS_ENSEMBLE_KEY_NAME = "is_ensemble"
MODEL_ID_KEY_NAME = "model_id"
# The name of the key used to store the cross validation index of the dataset for the run
CROSS_VALIDATION_SPLIT_INDEX_TAG_KEY = "cross_validation_split_index"
PARENT_RUN_ID_KEY_NAME = "parent_run_id"
# This is the folder structure that AzureML generates to store all results for an experiment run.
# azureml is the name of the container
AZUREML_RUN_FOLDER_PREFIX = "dcid."
AZUREML_RUN_FOLDER = "azureml/ExperimentRun/" + AZUREML_RUN_FOLDER_PREFIX
# Global variables for the Run context, to avoid repeated HTTP calls to get it.
RUN_CONTEXT = Run.get_context()
# The Run context of the Hyperdrive parent run. This must be cached to avoid issues with the AzureML SDK,
# which creates worker pools for each call to .parent.
PARENT_RUN_CONTEXT = getattr(RUN_CONTEXT, "parent", None)
INNEREYE_SDK_NAME = "innereye"
INNEREYE_SDK_VERSION = "1.0"
def create_run_recovery_id(run: Run) -> str:
"""
Creates an recovery id for a run so it's checkpoints could be recovered for training/testing
:param run: an instantiated run.
:return: recovery id for a given run in format: [experiment name]:[run id]
"""
return str(run.experiment.name + EXPERIMENT_RUN_SEPARATOR + run.id)
def split_recovery_id(id: str) -> Tuple[str, str]:
"""
Splits a run ID into the experiment name and the actual run.
The argument can be in the format 'experiment_name:run_id',
or just a run ID like user_branch_abcde12_123. In the latter case, everything before the last
two alphanumeric parts is assumed to be the experiment name.
:param id:
:return: experiment name and run name
"""
components = id.strip().split(EXPERIMENT_RUN_SEPARATOR)
if len(components) > 2:
raise ValueError("recovery_id must be in the format: 'experiment_name:run_id', but got: {}".format(id))
elif len(components) == 2:
return components[0], components[1]
else:
recovery_id_regex = r"^(\w+)_\d+_[0-9a-f]+$|^(\w+)_\d+$"
match = re.match(recovery_id_regex, id)
if not match:
raise ValueError("The recovery ID was not in the expected format: {}".format(id))
return (match.group(1) or match.group(2)), id
def fetch_run(workspace: Workspace, run_recovery_id: str) -> Run:
"""
Finds an existing run in an experiment, based on a recovery ID that contains the experiment ID
and the actual RunId. The run can be specified either in the experiment_name:run_id format,
or just the run_id.
:param workspace: the configured AzureML workspace to search for the experiment.
:param run_recovery_id: The Run to find. Either in the full recovery ID format, experiment_name:run_id
or just the run_id
:return: The AzureML run.
"""
experiment, run = split_recovery_id(run_recovery_id)
try:
experiment_to_recover = Experiment(workspace, experiment)
except Exception as ex:
raise Exception(f"Unable to retrieve run {run} in experiment {experiment}: {str(ex)}")
run_to_recover = fetch_run_for_experiment(experiment_to_recover, run)
logging.info("Fetched run #{} {} from experiment {}.".format(run, run_to_recover.number, experiment))
return run_to_recover
def fetch_run_for_experiment(experiment_to_recover: Experiment, run_id: str) -> Run:
"""
:param experiment_to_recover: an experiment
:param run_id: a string representing the Run ID of one of the runs of the experiment
:return: the run matching run_id_or_number; raises an exception if not found
"""
try:
return get_run(experiment=experiment_to_recover, run_id=run_id, rehydrate=True)
except Exception:
available_runs = experiment_to_recover.get_runs()
available_ids = ", ".join([run.id for run in available_runs])
raise (Exception(
"Run {} not found for experiment: {}. Available runs are: {}".format(
run_id, experiment_to_recover.name, available_ids)))
def fetch_runs(experiment: Experiment, filters: List[str]) -> List[Run]:
"""
Fetch the runs in an experiment.
:param experiment: the experiment to fetch runs from
:param filters: a list of run status to include. Must be subset of [Running, Completed, Failed, Canceled].
:return: the list of runs in the experiment
"""
exp_runs = list(experiment.get_runs())
if len(filters) != 0:
if set.issubset(set(filters), ["Running", "Completed", "Failed", "Canceled"]):
runs = [run for run in exp_runs if run.status in filters]
exp_runs = runs
return exp_runs
def fetch_child_runs(run: Run, status: Optional[str] = None,
expected_number_cross_validation_splits: int = 0) -> List[Run]:
"""
Fetch child runs for the provided runs that have the provided AML status (or fetch all by default)
and have a run_recovery_id tag value set (this is to ignore superfluous AML infrastructure platform runs).
:param run: parent run to fetch child run from
:param status: if provided, returns only child runs with this status
:param expected_number_cross_validation_splits: when recovering child runs from AML hyperdrive
sometimes the get_children function fails to retrieve all children. If the number of child runs
retrieved by AML is lower than the expected number of splits, we try to retrieve them manually.
"""
if is_ensemble_run(run):
run_recovery_id = run.get_tags().get(RUN_RECOVERY_FROM_ID_KEY_NAME, None)
if run_recovery_id:
run = fetch_run(run.experiment.workspace, run_recovery_id)
elif PARENT_RUN_CONTEXT:
run = PARENT_RUN_CONTEXT
children_runs = list(run.get_children(tags=RUN_RECOVERY_ID_KEY_NAME))
if 0 < expected_number_cross_validation_splits != len(children_runs):
logging.warning(
f"The expected number of child runs was {expected_number_cross_validation_splits}."
f"Fetched only: {len(children_runs)} runs. Now trying to fetch them manually.")
run_ids_to_evaluate = [f"{create_run_recovery_id(run)}_{i}"
for i in range(expected_number_cross_validation_splits)]
children_runs = [fetch_run(run.experiment.workspace, id) for id in run_ids_to_evaluate]
if status is not None:
children_runs = [child_run for child_run in children_runs if child_run.get_status() == status]
return children_runs
def is_ensemble_run(run: Run) -> bool:
"""Checks if the run was an ensemble of multiple models"""
return run.get_tags().get(IS_ENSEMBLE_KEY_NAME) == 'True'
def to_azure_friendly_string(x: Optional[str]) -> Optional[str]:
"""
Given a string, ensure it can be used in Azure by replacing everything apart from a-zA-Z0-9_ with _,
and replace multiple _ with a single _.
"""
if x is None:
return x
else:
return re.sub('_+', '_', re.sub(r'\W+', '_', x))
def to_azure_friendly_container_path(path: Path) -> str:
"""
Converts a path an Azure friendly container path by replacing "\\", "//" with "/" so it can be in the form: a/b/c.
:param path: Original path
:return: Converted path
"""
return str(path).replace("\\", "/").replace("//", "/").strip("/")
def is_offline_run_context(run_context: Run) -> bool:
"""
Tells if a run_context is offline by checking if it has an experiment associated with it.
:param run_context: Context of the run to check
:return:
"""
return not hasattr(run_context, 'experiment')
def get_run_context_or_default(run: Optional[Run] = None) -> Run:
"""
Returns the context of the run, if run is not None. If run is None, returns the context of the current run.
:param run: Run to retrieve context for. If None, retrieve ocntext of current run.
:return: Run context
"""
return run if run else Run.get_context()
def get_cross_validation_split_index(run: Run) -> int:
"""
Gets the cross validation index from the run's tags or returns the default
:param run: Run context from which to get index
:return: The cross validation split index
"""
if is_offline_run_context(run):
return DEFAULT_CROSS_VALIDATION_SPLIT_INDEX
else:
return int(run.get_tags().get(CROSS_VALIDATION_SPLIT_INDEX_TAG_KEY, DEFAULT_CROSS_VALIDATION_SPLIT_INDEX))
def is_cross_validation_child_run(run: Run) -> bool:
"""
Checks the provided run's tags to determine if it is a cross validation child run
(which is the case if the split index >=0)
:param run: Run to check.
:return: True if cross validation run. False otherwise.
"""
return get_cross_validation_split_index(run) > DEFAULT_CROSS_VALIDATION_SPLIT_INDEX
def strip_prefix(string: str, prefix: str) -> str:
"""
Returns the string without the prefix if it has the prefix, otherwise the string unchanged.
:param string: Input string.
:param prefix: Prefix to remove from input string.
:return: Input string with prefix removed.
"""
if string.startswith(prefix):
return string[len(prefix):]
return string
def _log_conda_dependencies_stats(conda: CondaDependencies, message_prefix: str) -> None:
"""
Write number of conda and pip packages to logs.
:param conda: A conda dependencies object
:param message_prefix: A message to prefix to the log string.
"""
conda_packages_count = len(list(conda.conda_packages))
pip_packages_count = len(list(conda.pip_packages))
logging.info(f"{message_prefix}: {conda_packages_count} conda packages, {pip_packages_count} pip packages")
logging.debug(" Conda packages:")
for p in conda.conda_packages:
logging.debug(f" {p}")
logging.debug(" Pip packages:")
for p in conda.pip_packages:
logging.debug(f" {p}")
def merge_conda_files(files: List[Path], result_file: Path) -> None:
"""
Merges the given Conda environment files using the conda_merge package, and writes the merged file to disk.
:param files: The Conda environment files to read.
:param result_file: The location where the merge results should be written.
"""
# This code is a slightly modified version of conda_merge. That code can't be re-used easily
# it defaults to writing to stdout
env_definitions = [conda_merge.read_file(str(f)) for f in files]
unified_definition = {}
NAME = "name"
CHANNELS = "channels"
DEPENDENCIES = "dependencies"
name = conda_merge.merge_names(env.get(NAME) for env in env_definitions)
if name:
unified_definition[NAME] = name
try:
channels = conda_merge.merge_channels(env.get(CHANNELS) for env in env_definitions)
| |
#!/usr/bin/env python
import collections
import sys
import copy
from functools import cmp_to_key
import argparse
ap = argparse.ArgumentParser(description="Identify teams that can be swapped between games inside matches")
ap.add_argument("infile", help="Input schedule")
ap.add_argument("matchno", type=int, help="Which match number to fiddle with")
ap.add_argument("--auto-alter", action="store_true", help="Print the schedule with specified match patched")
ap.add_argument("--multimatch", action="store_true", help="Consider swapping teams between this and the next match")
ap.add_argument("--matches", type=int, default=0, help="Number of matches in each round")
ap.add_argument("--closeness", type=int, default=0, help="Closeness criteria")
args = ap.parse_args()
if args.multimatch and (args.matches == 0 or args.closeness == 0):
print >>sys.stderr, "Matches and closeness options required for doing multimatch calcs"
sys.exit(1)
if args.multimatch and ((args.matchno + 1) % args.matches) == 0:
print >>sys.stderr, "Can't multi-match schedule over round boundries, skipping this one"
args.multimatch = False
matches = []
lines = [x.strip() for x in open(args.infile)]
for line in lines:
if len(line) > 0 and line[0] == '#':
continue
players = line.split('|')
matches.append(players)
c = collections.defaultdict(collections.Counter)
def calc_faced_in_game(game, container, sub):
for tla in game:
for faces in game:
if sub:
container[tla][faces] -= 1
else:
container[tla][faces] += 1
def calc_faced_in_match(match, container, sub=False):
while len(match) > 4:
calc_faced_in_game(match[0:4], container, sub)
match = match[4:]
calc_faced_in_game(match, container, sub)
# Calculate how many times each team faces each other, except in the selected
# match
cur_match_no = 0
forward_matches = []
middle_idx = 0
for match in matches:
if cur_match_no == args.matchno:
cur_match_no += 1
continue
elif args.multimatch and cur_match_no == args.matchno + 1:
# Store earlier matches for checking closeness criteria
middle_idx = cur_match_no - 1
firstidx = max(0, middle_idx - args.closeness)
forward_matches = matches[firstidx:middle_idx]
cur_match_no += 1
continue
calc_faced_in_match(match, c)
cur_match_no += 1
num_after_matches = min(middle_idx + 2 + args.closeness, len(matches))
after_matches = matches[middle_idx+2:num_after_matches]
# Calculate the teams who'll conflict with players in our matches in multimatch
# mode
forward_teams = []
for match in forward_matches:
forward_teams += match
forward_teams = frozenset(forward_teams)
after_teams = []
for match in after_matches:
after_teams += match
after_teams = frozenset(after_teams)
all_teams = set(c.keys())
# Calculate a dictionary of how many times repeats happen: the size of the
# repeat maps to the number of times it happens. Due to an artifact of how
# this is counted, the "number of times" is twice as large as reality
def calc_scoring(sched):
# Something involving defaults would be better, but requires thought
output = dict()
for i in range(len(matches)):
output[i] = 0
for tla, opponents in sched.iteritems():
del opponents[tla]
faced = opponents.keys()
for opp in faced:
times = opponents[opp]
if times == 0:
continue
output[times] += 1
# Remove repeats with zero count
for i in output.keys():
if output[i] == 0:
del output[i]
return output
# Define a comparator about the score a particular match configuration has.
# A 'better' score is one where the largest magnitude of repeat is less than
# another, i.e. a schedule with some 3-times repeats is better than one with
# any 4-time repeats.
# Failing that, the number of repeats is compared, in reducing magnitude, so
# a schedule with 20 3-time repeats is worse than one with 15 of them.
def scoring_cmp(x, y):
xkeys = x.keys()
ykeys = y.keys()
if xkeys != ykeys:
# One of these dicts has a higher magnitude of repeats than the other.
xkeys = sorted(xkeys, reverse=True)
ykeys = sorted(ykeys, reverse=True)
# Find the highest magnitude of repeat
highest = 0
if xkeys[0] > ykeys[0]:
highest = xkeys[0]
else:
highest = ykeys[0]
# Decrease from there, finding where which schedule, x or y, has a
# magnitude of repeat that the other doesn't
for i in reversed(range(highest+1)):
if i in xkeys and i not in ykeys:
return -1
elif i in ykeys and i not in xkeys:
return 1
return 0
else:
# The schedules have the same set of keys: compare the number of times
# that each magnitude of repeats occurs
xkeys = sorted(xkeys, reverse=True)
for i in xkeys:
if x[i] < y[i]:
return 1
elif x[i] > y[i]:
return -1
return 0
# Select the desired match
the_teams = matches[int(args.matchno)]
first_match = frozenset(the_teams)
second_match = set(the_teams)
if args.multimatch:
the_teams = the_teams + matches[args.matchno + 1]
second_match = frozenset(matches[args.matchno + 1])
# Now enumerate the set of unique matches that can be played with the teams
# in this match, re-ordered. Don't do anything fancy.
unique_games = set()
# Generate all possible 4-team combinations via generating all combinations,
# and canonicalising the order to avoid equivalent orderings being inserted.
from itertools import product
for comb in product(the_teams, repeat=4):
# Duplicate members?
theset = frozenset(comb)
if len(theset) != 4:
continue
if theset not in unique_games:
unique_games.add(theset)
# Combine the set of unique games into a set of matches. Guard against the same
# match but in a different order being found.
unique_matches = set()
for comb in product(unique_games, repeat=2):
# Test that we actually have all 8 players playing in this match.
if not comb[0].isdisjoint(comb[1]):
continue
g1 = comb[0]
g2 = comb[1]
# In multimatch mode, check that the match is either a completely unchanged
# set of teams from either match, or only has one team difference. This
# means we only explore one pair of teams swapping matches, keeping the size
# of exploration feasible.
if args.multimatch:
both = g1 | g2
inter_first = both & first_match
thelen = len(inter_first)
if thelen != 0 and thelen != 1 and thelen != 7 and thelen != 8:
continue
if (g2, g1) in unique_matches:
continue
unique_matches.add((g1, g2))
# In multimatch mode, turn the unique matches set into a set of match pairs.
match_pairs = set()
if args.multimatch:
for comb in product(unique_matches, repeat=2):
m1, m2 = comb
set1, set2 = m1
set3, set4 = m2
if len(set1 | set2 | set3 | set4) != 16:
continue
# That's checked uniqueness. Now look for closeness hazards.
if len(set3 & after_teams) != 0:
continue
if len(set4 & after_teams) != 0:
continue
if len(set1 & forward_teams) != 0:
continue
if len(set2 & forward_teams) != 0:
continue
match_pairs.add(comb)
# Now for some actual scoring. For each match, duplicate the scoring dictionary
# for the rest of the schedule, and add the generated match to that scoring.
def add_generated_match_sched(m, sched, sub):
g1, g2 = m
calc_faced_in_match(list(g1), sched, sub)
calc_faced_in_match(list(g2), sched, sub)
return sched
scorelist = []
if not args.multimatch:
for m in unique_matches:
sched = c
sched = add_generated_match_sched(m, sched, False)
score = calc_scoring(sched)
sched = add_generated_match_sched(m, sched, True)
scorelist.append((score, m))
else:
for m in match_pairs:
m1, m2 = m
sched = c
sched = add_generated_match_sched(m1, sched, False)
sched = add_generated_match_sched(m2, sched, False)
score = calc_scoring(sched)
sched = add_generated_match_sched(m1, sched, True)
sched = add_generated_match_sched(m2, sched, True)
c = sched
scorelist.append((score, m))
def sortlist_cmp(x, y):
# Project out the score, from the match
xs, xm = x
ys, ym = y
return scoring_cmp(xs, ys)
scorelist = [max(scorelist, key=cmp_to_key(sortlist_cmp))]
class bcolours:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
if not args.auto_alter:
if not args.multimatch:
for m in scorelist:
score, match = m
g1, g2 = match
plist = list(g1)
plist += list(g2)
normalised = "|".join(plist)
print "Match " + bcolours.OKGREEN + repr(match) + bcolours.ENDC
print " normalised as " + bcolours.OKBLUE + normalised + bcolours.ENDC
print " scored: " + bcolours.FAIL + repr(score) + bcolours.ENDC
else:
for m in scorelist:
score, match = m
match1, match2 = match
m1g1, m1g2 = match1
m2g1, m2g2 = match2
plist = list(m1g1)
plist += list(m1g2)
normalised1 = "|".join(plist)
plist = list(m2g1)
plist += list(m2g2)
normalised2 = "|".join(plist)
print "Match " + bcolours.OKGREEN + repr(match1) + bcolours.ENDC
print " " + bcolours.OKGREEN + repr(match2) + bcolours.ENDC
print " normalised as " + bcolours.OKBLUE + normalised1 + bcolours.ENDC
print " " + bcolours.OKBLUE + normalised2 + bcolours.ENDC
print " scored: " + bcolours.FAIL + repr(score) + bcolours.ENDC
sys.exit(0)
# Auto alter is enabled: re-read the input file, printing out every line
# except the desired match, replacing it with the optimal match found.
cur_match_no = 0
for line in lines:
if len(line) > 0 and line[0] == '#':
print line
continue
if cur_match_no == args.matchno:
# Replace it. Pick the optimal ordering, which is the last in the list
bestscore, bestmatch = scorelist[-1]
if not args.multimatch:
g1, g2 = bestmatch
plist = list(g1)
plist += list(g2)
print "|".join(plist)
| |
'kwlpumps'
assert str(f) == '/yasup?sup=kwlpumps'
f.query = ''
assert str(f) == '/yasup'
f.path = ''
assert str(f) == ''
f.args['no'] = 'dads'
f.query.params['hi'] = 'gr8job'
assert str(f) == 'no=dads&hi=gr8job'
def test_load(self):
comps = [('', '', {}),
('?', '%3F', {}),
('??a??', '%3F%3Fa%3F%3F', {}),
('??a??=', '', {'?a??': ''}),
('schtoot', 'schtoot', {}),
('sch/toot/YOEP', 'sch/toot/YOEP', {}),
('/sch/toot/YOEP', '/sch/toot/YOEP', {}),
('schtoot?', 'schtoot%3F', {}),
('schtoot?NOP', 'schtoot%3FNOP', {}),
('schtoot?NOP=', 'schtoot', {'NOP': ''}),
('schtoot?=PARNT', 'schtoot', {'': 'PARNT'}),
('schtoot?NOP=PARNT', 'schtoot', {'NOP': 'PARNT'}),
('dog?machine?yes', 'dog%3Fmachine%3Fyes', {}),
('dog?machine=?yes', 'dog', {'machine': '?yes'}),
('schtoot?a=a&hok%20sprm', 'schtoot',
{'a': 'a', 'hok sprm': ''}),
('schtoot?a=a&hok sprm', 'schtoot',
{'a': 'a', 'hok sprm': ''}),
('sch/toot?a=a&hok sprm', 'sch/toot',
{'a': 'a', 'hok sprm': ''}),
('/sch/toot?a=a&hok sprm', '/sch/toot',
{'a': 'a', 'hok sprm': ''}),
]
for fragment, path, query in comps:
f = furl.Fragment()
f.load(fragment)
assert str(f.path) == path
assert f.query.params == query
def test_add(self):
f = furl.Fragment('')
assert f is f.add(path='one two three', args={'a': 'a', 's': 's s'})
assert str(f) == 'one%20two%20three?a=a&s=s+s'
f = furl.Fragment('break?legs=broken')
assert f is f.add(path='horse bones', args={'a': 'a', 's': 's s'})
assert str(f) == 'break/horse%20bones?legs=broken&a=a&s=s+s'
def test_set(self):
f = furl.Fragment('asdf?lol=sup&foo=blorp')
assert f is f.set(path='one two three', args={'a': 'a', 's': 's s'})
assert str(f) == 'one%20two%20three?a=a&s=s+s'
assert f is f.set(path='!', separator=False)
assert f.separator is False
assert str(f) == '!a=a&s=s+s'
def test_remove(self):
f = furl.Fragment('a/path/great/job?lol=sup&foo=blorp')
assert f is f.remove(path='job', args=['lol'])
assert str(f) == 'a/path/great/?foo=blorp'
assert f is f.remove(path=['path', 'great'], args=['foo'])
assert str(f) == 'a/path/great/'
assert f is f.remove(path=['path', 'great', ''])
assert str(f) == 'a/'
assert f is f.remove(fragment=True)
assert str(f) == ''
def test_encoding(self):
f = furl.Fragment()
f.path = "/?:@-._~!$&'()*+,;="
assert str(f) == "/?:@-._~!$&'()*+,;="
f.query = {'a': 'a', 'b b': 'NOPE'}
assert str(f) == "/%3F:@-._~!$&'()*+,;=?a=a&b+b=NOPE"
f.separator = False
assert str(f) == "/?:@-._~!$&'()*+,;=a=a&b+b=NOPE"
f = furl.Fragment()
f.path = "/?:@-._~!$&'()*+,;= ^`<>[]"
assert str(f) == "/?:@-._~!$&'()*+,;=%20%5E%60%3C%3E%5B%5D"
f.query = {'a': 'a', 'b b': 'NOPE'}
assert str(
f) == "/%3F:@-._~!$&'()*+,;=%20%5E%60%3C%3E%5B%5D?a=a&b+b=NOPE"
f.separator = False
assert str(f) == "/?:@-._~!$&'()*+,;=%20%5E%60%3C%3E%5B%5Da=a&b+b=NOPE"
f = furl.furl()
f.fragment = 'a?b?c?d?'
assert f.url == '#a?b?c?d?'
# TODO(grun): Once encoding has been fixed with URLPath and
# FragmentPath, the below line should be:
#
# assert str(f.fragment) == str(f.path) == 'a?b?c?d?'
#
assert str(f.fragment) == 'a?b?c?d?'
def test_nonzero(self):
f = furl.Fragment()
assert not f
f = furl.Fragment('')
assert not f
f = furl.Fragment('asdf')
assert f
f = furl.Fragment()
f.path = 'sup'
assert f
f = furl.Fragment()
f.query = 'a=a'
assert f
f = furl.Fragment()
f.path = 'sup'
f.query = 'a=a'
assert f
f = furl.Fragment()
f.path = 'sup'
f.query = 'a=a'
f.separator = False
assert f
class TestFragmentCompositionInterface(unittest.TestCase):
def test_interface(self):
class tester(furl.FragmentCompositionInterface):
def __init__(self):
furl.FragmentCompositionInterface.__init__(self)
def __setattr__(self, attr, value):
ffci = furl.FragmentCompositionInterface
if not ffci.__setattr__(self, attr, value):
object.__setattr__(self, attr, value)
t = tester()
assert isinstance(t.fragment, furl.Fragment)
assert isinstance(t.fragment.path, furl.Path)
assert isinstance(t.fragment.query, furl.Query)
assert str(t.fragment) == ''
assert t.fragment.separator
assert str(t.fragment.path) == ''
assert str(t.fragment.query) == ''
t.fragment = 'animal meats'
assert isinstance(t.fragment, furl.Fragment)
t.fragment.path = 'pump/dump'
t.fragment.query = 'a=a&s=s+s'
assert isinstance(t.fragment.path, furl.Path)
assert isinstance(t.fragment.query, furl.Query)
assert str(t.fragment.path) == 'pump/dump'
assert t.fragment.path.segments == ['pump', 'dump']
assert not t.fragment.path.isabsolute
assert str(t.fragment.query) == 'a=a&s=s+s'
assert t.fragment.args == t.fragment.query.params == {
'a': 'a', 's': 's s'}
class TestFurl(unittest.TestCase):
def setUp(self):
# Don't hide duplicate Warnings - test for all of them.
warnings.simplefilter("always")
def _param(self, url, key, val):
# Note: urlparse.urlsplit() doesn't separate the query from the
# path for all schemes, only those schemes in the list
# urlparse.uses_query. So, as a result of using
# urlparse.urlsplit(), this little helper function only works
# when provided urls whos schemes are also in
# urlparse.uses_query.
items = urlparse.parse_qsl(urlparse.urlsplit(url).query, True)
return (key, val) in items
def test_scheme(self):
assert furl.furl().scheme is None
assert furl.furl('').scheme is None
# Lowercase.
assert furl.furl('/sup/').set(scheme='PrOtO').scheme == 'proto'
# No scheme.
for url in ['sup.txt', '/d/sup', '#flarg']:
f = furl.furl(url)
assert f.scheme is None and f.url == url
# Protocol relative URLs.
for url in ['//', '//sup.txt', '//arc.io/d/sup']:
f = furl.furl(url)
assert f.scheme == '' and f.url == url
f = furl.furl('//sup.txt')
assert f.scheme == ''
f.scheme = None
assert f.scheme is None and f.url == 'sup.txt'
f.scheme = ''
assert f.scheme == '' and f.url == '//sup.txt'
def test_username_and_password(self):
# Empty usernames and passwords.
for url in ['', 'http://www.pumps.com/']:
f = furl.furl(url)
assert f.username is None and f.password is None
usernames = ['user', 'a-user_NAME$%^&09']
passwords = ['<PASSWORD>', '<PASSWORD>']
baseurl = 'http://www.google.com/'
# Username only.
userurl = 'http://%s@www.google.com/'
for username in usernames:
f = furl.furl(userurl % username)
assert f.username == username and f.password is None
f = furl.furl(baseurl)
f.username = username
assert f.username == username and f.password is None
assert f.url == userurl % username
f = furl.furl(baseurl)
f.set(username=username)
assert f.username == username and f.password is None
assert f.url == userurl % username
f.remove(username=True)
assert f.username is None and f.password is None
assert f.url == baseurl
# Password only.
passurl = 'http://:%s@www.google.com/'
for password in passwords:
f = furl.furl(passurl % password)
assert f.password == password and f.username is None
f = furl.furl(baseurl)
f.password = password
assert f.password == password and f.username is None
assert f.url == passurl % password
f = furl.furl(baseurl)
f.set(password=password)
assert f.password == password and f.username is None
assert f.url == passurl % password
f.remove(password=True)
assert not f.username and not f.password
assert f.url == baseurl
# Username and password.
userpassurl = 'http://%s:%s@www.google.com/'
for username in usernames:
for password in passwords:
f = furl.furl(userpassurl % (username, password))
assert f.username == username and f.password == password
f = furl.furl(baseurl)
f.username = username
f.password = password
assert f.username == username and f.password == password
assert f.url == userpassurl % (username, password)
f = furl.furl(baseurl)
f.set(username=username, password=password)
assert f.username == username and f.password == password
assert f.url == userpassurl % (username, password)
f = furl.furl(baseurl)
f.remove(username=True, password=True)
assert not f.username and not f.password
assert f.url == baseurl
# Username and password in the network location string.
f = furl.furl()
f.netloc = '<EMAIL>'
assert f.username == 'user' and not f.password
assert f.netloc == '<EMAIL>'
f = furl.furl()
f.netloc = ':<EMAIL>'
assert not f.username and f.password == '<PASSWORD>'
assert f.netloc == ':<EMAIL>'
f = furl.furl()
f.netloc = 'user:<PASSWORD>@<EMAIL>'
assert f.username == 'user' and f.password == '<PASSWORD>'
assert f.netloc == 'user:<PASSWORD>@<EMAIL>'
f = furl.furl()
assert f.username is f.password is None
f.username = 'uu'
assert f.username == 'uu' and f.password is None and f.url == 'uu@'
f.password = 'pp'
assert f.username == 'uu' and f.password == 'pp' and f.url == 'uu:pp@'
f.username = ''
assert f.username == '' and f.password == 'pp' and f.url == ':pp@'
f.password = ''
assert f.username == f.password == '' and f.url == ':@'
f.password = None
assert f.username == '' and f.password is None and f.url == '@'
f.username = None
assert f.username is f.password is None and f.url == ''
f.password = ''
assert f.username is None and f.password == '' and f.url == ':@'
def test_basics(self):
url = 'hTtP://www.pumps.com/'
f = furl.furl(url)
assert f.scheme == 'http'
assert f.netloc == 'www.pumps.com'
assert f.host == 'www.pumps.com'
assert f.port == 80
assert str(f.path) == '/'
assert str(f.query) == ''
assert f.args == f.query.params == {}
assert str(f.fragment) == ''
assert f.url == str(f) == url.lower()
assert f.url == furl.furl(f).url == furl.furl(f.url).url
assert f is not f.copy() and f.url == f.copy().url
url = 'HTTPS://wWw.YAHOO.cO.UK/one/two/three?a=a&b=b&m=m%26m#fragment'
f = furl.furl(url)
assert f.scheme == 'https'
assert f.netloc == 'www.yahoo.co.uk'
assert f.host == 'www.yahoo.co.uk'
assert f.port == 443
assert str(f.path) == '/one/two/three'
assert str(f.query) == 'a=a&b=b&m=m%26m'
assert f.args == f.query.params == {'a': 'a', 'b': 'b', 'm': 'm&m'}
assert str(f.fragment) == 'fragment'
assert f.url == str(f) == url.lower()
assert f.url == furl.furl(f).url == furl.furl(f.url).url
assert f is not f.copy() and f.url == f.copy().url
url = 'sup://192.168.1.102:8080///one//a%20b////?s=kwl%20string#frag'
f = furl.furl(url)
assert f.scheme == 'sup'
assert f.netloc == '192.168.1.102:8080'
assert f.host == '192.168.1.102'
assert f.port == 8080
assert str(f.path) == '///one//a%20b////'
assert str(f.query) == 's=kwl+string'
assert f.args == f.query.params == {'s': 'kwl string'}
assert str(f.fragment) == 'frag'
quoted = 'sup://192.168.1.102:8080///one//a%20b////?s=kwl+string#frag'
assert f.url == str(f) == quoted
assert f.url | |
# -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.html import A, URL
from gluon.storage import Storage
from s3 import s3_fullname
T = current.T
settings = current.deployment_settings
"""
Template settings for NYC Prepared
"""
# Pre-Populate
settings.base.prepopulate = ("NYC",)
settings.base.system_name = T("NYC Prepared")
settings.base.system_name_short = T("NYC Prepared")
# Theme (folder to use for views/layout.html)
settings.base.theme = "NYC"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
settings.ui.filter_formstyle = "table_inline"
settings.msg.parser = "NYC"
# Uncomment to Hide the language toolbar
settings.L10n.display_toolbar = False
# Default timezone for users
settings.L10n.utc_offset = "UTC -0500"
# Uncomment these to use US-style dates in English
settings.L10n.date_format = "%m-%d-%Y"
# Start week on Sunday
settings.L10n.firstDOW = 0
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
# Uncomment to disable responsive behavior of datatables
# - Disabled until tested
settings.ui.datatables_responsive = False
# PDF to Letter
settings.base.paper_size = T("Letter")
# Restrict the Location Selector to just certain countries
# NB This can also be over-ridden for specific contexts later
# e.g. Activities filtered to those of parent Project
settings.gis.countries = ("US",)
settings.fin.currencies = {
"USD" : T("United States Dollars"),
}
settings.L10n.languages = OrderedDict([
("en", "English"),
("es", "Español"),
])
# Authentication settings
# These settings should be changed _after_ the 1st (admin) user is
# registered in order to secure the deployment
# Should users be allowed to register themselves?
settings.security.self_registration = "index"
# Do new users need to verify their email address?
settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
settings.auth.registration_requires_approval = True
# Always notify the approver of a new (verified) user, even if the user is automatically approved
#settings.auth.always_notify_approver = False
# Uncomment this to request the Mobile Phone when a user registers
settings.auth.registration_requests_mobile_phone = True
# Uncomment this to request the Organisation when a user registers
settings.auth.registration_requests_organisation = True
# Uncomment this to request the Site when a user registers
#settings.auth.registration_requests_site = True
# Roles that newly-registered users get automatically
#settings.auth.registration_roles = { 0: ["comms_dispatch"]}
#settings.auth.registration_link_user_to = {"staff":T("Staff"),
# #"volunteer":T("Volunteer")
# }
settings.auth.registration_link_user_to_default = "staff"
settings.security.policy = 5 # Controller, Function & Table ACLs
# Enable this to have Open links in IFrames open a full page in a new tab
settings.ui.iframe_opens_full = True
settings.ui.label_attachments = "Media"
settings.ui.update_label = "Edit"
# Uncomment to disable checking that LatLons are within boundaries of their parent
#settings.gis.check_within_parent_boundaries = False
# GeoNames username
settings.gis.geonames_username = "eden_nyc"
# Uncomment to show created_by/modified_by using Names not Emails
settings.ui.auth_user_represent = "name"
# Record Approval
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ("org_organisation",)
# -----------------------------------------------------------------------------
# Audit
def audit_write(method, tablename, form, record, representation):
if not current.auth.user:
# Don't include prepop
return False
if tablename in ("cms_post",
"org_facility",
"org_organisation",
"req_req",
):
# Perform normal Audit
return True
else:
# Don't Audit non user-visible resources
return False
settings.security.audit_write = audit_write
# -----------------------------------------------------------------------------
# CMS
# Uncomment to use Bookmarks in Newsfeed
settings.cms.bookmarks = True
# Uncomment to use have Filter form in Newsfeed be open by default
settings.cms.filter_open = True
# Uncomment to adjust filters in Newsfeed when clicking on locations instead of opening the profile page
settings.cms.location_click_filters = True
# Uncomment to use organisation_id instead of created_by in Newsfeed
settings.cms.organisation = "post_organisation.organisation_id"
# Uncomment to use org_group_id in Newsfeed
settings.cms.organisation_group = "post_organisation_group.group_id"
# Uncomment to use person_id instead of created_by in Newsfeed
settings.cms.person = "person_id"
# Uncomment to use Rich Text editor in Newsfeed
settings.cms.richtext = True
# Uncomment to show Links in Newsfeed
settings.cms.show_links = True
# Uncomment to show Tags in Newsfeed
settings.cms.show_tags = True
# Uncomment to show post Titles in Newsfeed
settings.cms.show_titles = True
# -----------------------------------------------------------------------------
# Inventory Management
# Uncomment to customise the label for Facilities in Inventory Management
settings.inv.facility_label = "Facility"
# Uncomment if you need a simpler (but less accountable) process for managing stock levels
#settings.inv.direct_stock_edits = True
# Uncomment to call Stock Adjustments, 'Stock Counts'
settings.inv.stock_count = True
# Uncomment to not track pack values
settings.inv.track_pack_values = False
settings.inv.send_show_org = False
# Types common to both Send and Receive
settings.inv.shipment_types = {
1: T("Other Warehouse")
}
settings.inv.send_types = {
#21: T("Distribution")
}
settings.inv.send_type_default = 1
settings.inv.item_status = {
#0: current.messages["NONE"],
#1: T("Dump"),
#2: T("Sale"),
#3: T("Reject"),
#4: T("Surplus")
}
# -----------------------------------------------------------------------------
# Organisations
#
# Enable the use of Organisation Groups
settings.org.groups = "Network"
# Make Services Hierarchical
settings.org.services_hierarchical = True
# Set the label for Sites
settings.org.site_label = "Facility"
#settings.org.site_label = "Location"
# Uncomment to show the date when a Site (Facilities-only for now) was last contacted
settings.org.site_last_contacted = True
# Enable certain fields just for specific Organisations
# empty list => disabled for all (including Admin)
#settings.org.dependent_fields = { \
# "pr_person_details.mother_name" : [],
# "pr_person_details.father_name" : [],
# "pr_person_details.company" : [],
# "pr_person_details.affiliations" : [],
# "vol_volunteer.active" : [],
# "vol_volunteer_cluster.vol_cluster_type_id" : [],
# "vol_volunteer_cluster.vol_cluster_id" : [],
# "vol_volunteer_cluster.vol_cluster_position_id" : [],
# }
# Uncomment to use an Autocomplete for Site lookup fields
settings.org.site_autocomplete = True
# Extra fields to search in Autocompletes & display in Representations
settings.org.site_autocomplete_fields = ("organisation_id$name",
"location_id$addr_street",
)
# Uncomment to hide inv & req tabs from Sites
#settings.org.site_inv_req_tabs = True
# -----------------------------------------------------------------------------
def facility_marker_fn(record):
"""
Function to decide which Marker to use for Facilities Map
@ToDo: Legend
"""
db = current.db
s3db = current.s3db
table = db.org_facility_type
ltable = db.org_site_facility_type
query = (ltable.site_id == record.site_id) & \
(ltable.facility_type_id == table.id)
rows = db(query).select(table.name)
types = [row.name for row in rows]
# Use Marker in preferential order
if "Hub" in types:
marker = "warehouse"
elif "Medical Clinic" in types:
marker = "hospital"
elif "Food" in types:
marker = "food"
elif "Relief Site" in types:
marker = "asset"
elif "Residential Building" in types:
marker = "residence"
#elif "Shelter" in types:
# marker = "shelter"
else:
# Unknown
marker = "office"
if settings.has_module("req"):
# Colour code by open/priority requests
reqs = record.reqs
if reqs == 3:
# High
marker = "%s_red" % marker
elif reqs == 2:
# Medium
marker = "%s_yellow" % marker
elif reqs == 1:
# Low
marker = "%s_green" % marker
mtable = db.gis_marker
try:
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
except:
marker = db(mtable.name == "office").select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
return marker
# -----------------------------------------------------------------------------
def org_facility_onvalidation(form):
"""
Default the name to the Street Address
"""
form_vars = form.vars
name = form_vars.get("name", None)
if name:
return
address = form_vars.get("address", None)
if address:
form_vars.name = address
else:
# We need a default
form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id)
# -----------------------------------------------------------------------------
def customise_org_facility_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Tell the client to request per-feature markers
s3db.configure("org_facility", marker_fn=facility_marker_fn)
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method not in ("read", "update"):
types = r.get_vars.get("site_facility_type.facility_type_id__belongs", None)
if not types:
# Hide Private Residences
from s3 import FS
s3.filter = FS("site_facility_type.facility_type_id$name") != "Private Residence"
if r.interactive:
tablename = "org_facility"
table = s3db[tablename]
if not r.component and r.method in (None, "create", "update"):
from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2, S3MultiSelectWidget
field = table.location_id
if r.method in ("create", "update"):
field.label = "" # Gets replaced by widget
levels = ("L2", "L3")
field.requires = IS_LOCATION_SELECTOR2(levels=levels)
field.widget = S3LocationSelectorWidget2(levels=levels,
hide_lx=False,
reverse_lx=True,
show_address=True,
show_postcode=True,
)
table.organisation_id.widget = S3MultiSelectWidget(multiple=False)
if r.get_vars.get("format", None) == "popup":
# Coming from req/create form
# Hide most Fields
from s3 import S3SQLCustomForm, S3SQLInlineComponent
# We default this onvalidation
table.name.notnull = False
table.name.requires = None
crud_form = S3SQLCustomForm(S3SQLInlineComponent(
"site_facility_type",
label = T("Facility Type"),
fields = [("", "facility_type_id")],
multiple = False,
required = True,
),
"name",
"location_id",
)
s3db.configure(tablename,
crud_form = crud_form,
onvalidation = org_facility_onvalidation,
)
return True
s3.prep = custom_prep
return attr
settings.customise_org_facility_controller = customise_org_facility_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_resource(r, tablename):
from gluon.html import DIV, INPUT
from s3 import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget
s3db = current.s3db
if r.tablename == "org_organisation":
if r.id:
# Update form
ctable = s3db.pr_contact
query = (ctable.pe_id == r.record.pe_id) & \
(ctable.contact_method == "RSS") & \
(ctable.deleted == False)
rss = current.db(query).select(ctable.poll,
limitby=(0, 1)
).first()
if rss and not rss.poll:
# Remember that we don't wish to import
rss_import = "on"
else:
# Default
rss_import = None
else:
# Create form: Default
rss_import = None
else:
# Component
if r.component_id:
# Update form
db = current.db
otable = s3db.org_organisation
org = db(otable.id == r.component_id).select(otable.pe_id,
limitby=(0, 1)
).first()
try:
pe_id = org.pe_id
except:
current.log.error("Org %s not found: cannot set | |
if is_admin is None or not is_admin:
resp = make_response(jsonify(error='Not permitted to view this content. Must be an admin user.'), 403)
resp.mimetype = "application/javascript"
return resp
# choose how many images to request
num_labeled_images = request.args.get('num_labeled_images', None)
print('num_labeled_images:', num_labeled_images)
try:
df_input_data = sql_queries.get_all_user_input_data(engine,
user_id=user_id,
label_task_id=label_task_id,
n=num_labeled_images)
resp = make_response(df_input_data.to_json(orient='records'), 200)
resp.mimetype = "application/javascript"
return resp
except Exception as e:
logger.error(e)
resp = make_response(jsonify(error='No input data found for this user and/or label task'), 404)
resp.mimetype = "application/javascript"
return resp
@ebp.route('/image_labeler/api/v1.0/all_data/label_tasks/<int:label_task_id>/users/<user_id>/first/filter/<label_filter>', methods=['GET'])
@fje.jwt_required
def get_first_user_input_data(label_task_id, user_id, label_filter):
"""
Get the ID of the first input_data item that matches the filter.
:param label_task_id:
:param user_id:
:param label_filter
:return:
"""
engine = current_app.config['engine']
# check that the user has permission to get the requested data: admin users can get any user's data, but an
# ordinary user can only get their own data
user_identity = fje.get_jwt_identity()
user_id_from_auth = ua.get_user_id_from_token(user_identity)
# get user ID specified
try:
user_id = int(user_id)
except ValueError:
if user_id == 'own':
user_id = user_id_from_auth
else:
resp = make_response(jsonify(error='Must either specify ".../user_id/own" or ".../user_id/<user_id>"'), 405)
resp.mimetype = "application/javascript"
return resp
if user_id != user_id_from_auth:
is_admin = sql_queries_admin.is_user_an_admin(engine, user_id_from_auth)
if is_admin is None or not is_admin:
resp = make_response(jsonify(error='Not permitted to view this content. Must be an admin user.'), 403)
resp.mimetype = "application/javascript"
return resp
try:
df_input_data = sql_queries.get_first_user_input_data(engine,
user_id=user_id,
label_task_id=label_task_id,
label_filter=label_filter)
json = df_input_data.to_json(orient='records')
# json = '['+df_input_data.to_json()+']'
logger.debug(json)
resp = make_response(json, 200)
resp.mimetype = "application/javascript"
return resp
except Exception as e:
logger.error(e)
resp = make_response(jsonify(error='No input data found for this user and/or label task'), 404)
resp.mimetype = "application/javascript"
return resp
@ebp.route('/image_labeler/api/v1.0/all_data/label_tasks/<int:label_task_id>/users/<user_id>/last/filter/<label_filter>', methods=['GET'])
@fje.jwt_required
def get_last_user_input_data(label_task_id, user_id, label_filter):
"""
Get the ID of the last input_data item that matches the filter.
:param label_task_id:
:param user_id:
:param label_filter
:return:
"""
engine = current_app.config['engine']
# check that the user has permission to get the requested data: admin users can get any user's data, but an
# ordinary user can only get their own data
user_identity = fje.get_jwt_identity()
user_id_from_auth = ua.get_user_id_from_token(user_identity)
# get user ID specified
try:
user_id = int(user_id)
except ValueError:
if user_id == 'own':
user_id = user_id_from_auth
else:
resp = make_response(jsonify(error='Must either specify ".../user_id/own" or ".../user_id/<user_id>"'), 405)
resp.mimetype = "application/javascript"
return resp
if user_id != user_id_from_auth:
is_admin = sql_queries_admin.is_user_an_admin(engine, user_id_from_auth)
if is_admin is None or not is_admin:
resp = make_response(jsonify(error='Not permitted to view this content. Must be an admin user.'), 403)
resp.mimetype = "application/javascript"
return resp
try:
df_input_data = sql_queries.get_last_user_input_data(engine,
user_id=user_id,
label_task_id=label_task_id,
label_filter=label_filter)
json = df_input_data.to_json(orient='records')
# json = '['+df_input_data.to_json()+']'
logger.debug(json)
resp = make_response(json, 200)
resp.mimetype = "application/javascript"
return resp
except Exception as e:
logger.error(e)
resp = make_response(jsonify(error='No input data found for this user and/or label task'), 404)
resp.mimetype = "application/javascript"
return resp
@ebp.route('/image_labeler/api/v1.0/examples/label_tasks/<int:label_task_id>', methods=['GET'])
@fje.jwt_required
def get_label_examples(label_task_id):
"""
Get the example label images for this label task, to show user how to label
:param label_task_id:
:return:
"""
engine = current_app.config['engine']
df_examples = sql_queries.get_example_labelings(engine, label_task_id)
if df_examples is not None:
resp = make_response(df_examples.to_json(orient='records'), 200)
resp.mimetype = "application/javascript"
return resp
else:
resp = make_response(jsonify(error='No example labels found'), 404)
resp.mimetype = "application/javascript"
return resp
@ebp.route('/image_labeler/api/v1.0/labels/input_data/<int:input_data_id>/label_tasks/<int:label_task_id>', methods=['GET'])
@fje.jwt_required
def get_latest_label_history_for_logged_in_user(input_data_id, label_task_id):
"""
Get the latest label history item for a particular user/label task/input data item combination
:param input_data_id:
:param label_task_id:
:return:
"""
engine = current_app.config['engine']
user_identity = fje.get_jwt_identity()
user_id = ua.get_user_id_from_token(user_identity)
df_latest_label = sql_queries.get_latest_label(engine, user_id, label_task_id, input_data_id)
if df_latest_label is not None:
resp = make_response(df_latest_label.to_json(orient='records'), 200)
resp.mimetype = "application/javascript"
return resp
else:
resp = make_response(jsonify(error='No label found'), 404)
resp.mimetype = "application/javascript"
return resp
@ebp.route('/image_labeler/api/v1.0/labels/input_data/<int:input_data_id>/label_tasks/<int:label_task_id>/users/<user_id>', methods=['GET'])
@fje.jwt_required
def get_latest_label_history_for_specified_user(input_data_id, label_task_id, user_id):
"""
Get the latest label history item for a particular user/label task/input data item combination
:param input_data_id:
:param label_task_id:
:param user_id:
:return:
"""
engine = current_app.config['engine']
user_identity = fje.get_jwt_identity()
user_id_from_auth = ua.get_user_id_from_token(user_identity)
if user_id != user_id_from_auth:
is_admin = sql_queries_admin.is_user_an_admin(engine, user_id_from_auth)
if is_admin is None or not is_admin:
resp = make_response(jsonify(error='Not permitted to view this content. Must be an admin user.'), 403)
resp.mimetype = "application/javascript"
return resp
df_latest_label = sql_queries.get_latest_label(engine, user_id, label_task_id, input_data_id)
if df_latest_label is not None:
resp = make_response(df_latest_label.to_json(orient='records'), 200)
resp.mimetype = "application/javascript"
return resp
else:
resp = make_response(jsonify(error='No label found'), 404)
resp.mimetype = "application/javascript"
return resp
@ebp.route('/image_labeler/api/v1.0/label_ids/label_tasks/<int:label_task_id>/input_data/<int:input_data_id>/user/<int:user_id>',
methods=['GET'])
@fje.jwt_required
def get_label_id(user_id, label_task_id, input_data_id):
"""
Store the label for a particular label task, user and input data item
:param user_id: ID of user
:param label_task_id: ID of the label task that we want to retrieve an image for
:param input_data_id: ID of the input data item that has been labeled
:return:
"""
engine = current_app.config['engine']
# get ID of user
user_identity = fje.get_jwt_identity()
user_id_from_auth = ua.get_user_id_from_token(user_identity)
if user_id != user_id_from_auth:
is_admin = sql_queries_admin.is_user_an_admin(engine, user_id_from_auth)
if is_admin is None or not is_admin:
resp = make_response(jsonify(error='Not permitted to view this content. Must be an admin user.'), 403)
resp.mimetype = "application/javascript"
return resp
try:
# find the label that the serialised label corresponds to
label_id = sql_queries.get_label_id(engine,
user_id=user_id,
label_task_id=label_task_id,
input_data_id=input_data_id)
if label_id is None:
resp = make_response(jsonify(error='Could not find label ID'), 404)
resp.mimetype = "application/javascript"
return resp
else:
resp = make_response(jsonify(label_id=label_id), 200)
resp.mimetype = "application/javascript"
return resp
except Exception as e:
logger.error(e)
resp = make_response(jsonify(error='Bad request'), 400)
resp.mimetype = "application/javascript"
return resp
@ebp.route('/image_labeler/api/v1.0/labels/<int:label_id>',
methods=['GET'])
@fje.jwt_required
def get_label(label_id):
"""
Get the label
:param label_id: ID of label
:return:
"""
engine = current_app.config['engine']
# get ID of user
user_identity = fje.get_jwt_identity()
user_id_from_auth = ua.get_user_id_from_token(user_identity)
try:
df_labels = sql_queries.get_label_by_id(engine, label_id)
if df_labels is None:
resp = make_response(jsonify(error='Could not find label'), 404)
resp.mimetype = "application/javascript"
return resp
else:
# get the first label (which should be the only one)
if len(df_labels) == 1:
df_label = df_labels.iloc[0, :]
else:
raise ValueError('Expected single label to be returned')
is_admin = sql_queries_admin.is_user_an_admin(engine, user_id_from_auth)
if df_label['user_id'] == user_id_from_auth or is_admin:
resp = make_response(df_label.to_json(), 200)
resp.mimetype = "application/javascript"
return resp
else:
resp = make_response(jsonify(error='Not permitted to view this content. Must be an admin user.'), 403)
resp.mimetype = "application/javascript"
return resp
except Exception as e:
logger.error(e)
resp = make_response(jsonify(error='Bad request'), 400)
resp.mimetype = "application/javascript"
return resp
# ------------- ADMIN get requests ---------------
@ebp.route('/image_labeler/api/v1.0/users', methods=['GET'])
@fje.jwt_required
def get_all_users():
"""
Get list of users and their details
Requires admin privileges
:return:
"""
engine = current_app.config['engine']
user_identity = fje.get_jwt_identity()
user_id_from_auth = ua.get_user_id_from_token(user_identity)
# check if user is an admin user
is_admin = sql_queries_admin.is_user_an_admin(engine, user_id_from_auth)
if is_admin is None or not is_admin:
resp = make_response(jsonify(error='Not permitted to view this content. Must be an admin user.'), 403)
resp.mimetype = "application/javascript"
return resp
df_users = sql_queries_admin.get_users(engine)
if df_users is not None:
resp = make_response(df_users.to_json(orient='records'), 200)
resp.mimetype = "application/javascript"
return resp
else:
resp = make_response(jsonify(error='No users found'), 404)
resp.mimetype = "application/javascript"
return resp
@ebp.route('/image_labeler/api/v1.0/label_tasks/users/<int:user_id>', methods=['GET'])
@fje.jwt_required
def get_label_tasks_for_user(user_id):
"""
Get list of label tasks that the user has already labeled data for
Requires admin privileges
:param user_id: user ID of the user to get the tasks for
:return:
"""
engine = current_app.config['engine']
user_identity = fje.get_jwt_identity()
user_id_from_auth = ua.get_user_id_from_token(user_identity)
# check if user is an admin user
if user_id != user_id_from_auth:
is_admin = sql_queries_admin.is_user_an_admin(engine, user_id_from_auth)
if is_admin is None or not is_admin:
resp = make_response(jsonify(error='Not permitted to view this content. Must be an admin user.'), 403)
resp.mimetype = "application/javascript"
return resp
df_label_tasks = sql_queries.get_label_tasks(engine, user_id)
if df_label_tasks is not None:
resp = make_response(df_label_tasks.to_json(orient='records'), 200)
resp.mimetype = "application/javascript"
return resp
else:
resp = make_response(jsonify(error='No users found'), 404)
resp.mimetype = "application/javascript"
return resp
@ebp.route('/image_labeler/api/v1.0/missing_input_data', methods=['GET'])
@fje.jwt_required
def find_missing_input_data():
engine = current_app.config['engine']
user_identity = fje.get_jwt_identity()
user_id_from_auth = ua.get_user_id_from_token(user_identity)
# check if user is an admin user
is_admin = sql_queries_admin.is_user_an_admin(engine, user_id_from_auth)
if is_admin is None or not is_admin:
resp = make_response(jsonify(error='Not permitted to view this content. Must be an admin user.'), 403)
resp.mimetype = "application/javascript"
return resp
df_paths = sql_queries_admin.get_missing_input_data(engine)
if df_paths is not None:
paths = df_paths['data_path'].tolist()
missing_paths = [p for p in paths if not os.path.exists(p)]
resp = make_response(jsonify(num_paths_total=len(paths), num_missing_paths=len(missing_paths)), 200)
resp.mimetype = "application/javascript"
return resp
else:
resp = make_response(jsonify(error='No label task found'), 404)
resp.mimetype = "application/javascript"
return resp
# --------------- POST requests ---------------
# Provide a method to create access tokens. The create_access_token()
# function is used to actually generate the token, and you can return
# it | |
<gh_stars>100-1000
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import arg_scope
from utils_fn import *
from ops import *
from loss import *
from metrics import *
class InpaintModel():
def __init__(self, args):
self.model_name = "InpaintModel" # name for checkpoint
self.dataset_name = args.DATASET # TODO: get the name of the data set, it depends the path structure
self.checkpoint_dir = args.CHECKPOINT_DIR
self.sample_dir = args.SAMPLE_DIR
self.result_dir = args.RESULT_DIR
self.log_dir = args.LOG_DIR
self.epoch = args.EPOCH
self.batch_size = args.BATCH_SIZE
self.print_freq = args.PRINT_FREQ
self.save_freq = args.SAVE_FREQ
self.img_size = args.IMG_SHAPES
# yj
def build_inpaint_net(self, x, edge, grad, mask, args=None, reuse=False,
training=True, padding='SAME', name='inpaint_net'):
"""Inpaint network.
Args:
x: incomplete image[-1, 1] with shape of (batch_size, h, w, c)
edge: incomplete edge {0, 1} with shape of (batch_size, h, w)
grad map: incomplete grad with shape of (batch_size, h, w, 6)
mask: mask region {0, 1}
Returns:
complete image, grad map, middle result
"""
x = tf.reshape(x, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], args.IMG_SHAPES[2]])
mask = tf.reshape(mask, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 1])
edge = tf.reshape(edge, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 1])
# grad = tf.reshape(grad, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 6])
xin = x
ones_x = tf.ones_like(x)[:, :, :, 0:1]
x = tf.concat([x, ones_x * edge, ones_x * mask, grad], axis=3) # add a mask channel,the input channel is 4
# encoder-decoder network: channel 64-128-256-128-64
cnum = 64 # initial channel
# a decorate: arg_scope([op1, op2,..], xx,..) means:
# attributes or parameters xx defined here are the default in op1 and op2,..
with tf.variable_scope(name, reuse=reuse), \
arg_scope([gen_conv, gen_deconv],
training=training, padding=padding):
# Encoder
# scale 256 channels activation: relu
x = gen_conv(x, cnum, 7, stride=1, activation=tf.nn.relu, name='en_conv1') # 9 -> 64, ksize=7x7, stride=1
# scale 128
x = gen_conv(x, 2 * cnum, 4, stride=2, activation=tf.nn.relu, name='en_conv2')
# scale 64
x = gen_conv(x, 4 * cnum, 4, stride=2, activation=tf.nn.relu, name='en_conv3')
# res block
x = resnet_blocks(x, 4 * cnum, 3, stride=1, rate=2, block_num=8, activation=tf.nn.relu, name='en_64_8')
# Decoder
# TODO: output scale 64 Down scale = 2 (origin) pool scale = 2 (origin)
# share attention
x = attention(x, 4 * cnum, down_scale=2, pool_scale=2, name='attention_pooling_64')
# out of predict grad map
x_64 = gen_conv(x, 4 * cnum, 5, stride=1, activation=tf.nn.relu, name='out64_grad_out')
x_grad_out_64 = gen_conv(x_64, 6, 1, stride=1, activation=None, name='grad64')
x_out_64 = gen_conv(x_64, 3, 1, stride=1, activation=tf.nn.tanh, name='out64')
# scale 64 - 128
x = tf.concat([x, x_64], axis=3)
x = gen_deconv(x, 2 * cnum, 4, method='deconv', activation=tf.nn.relu, name='de128_conv4_upsample')
# TODO: output scale 128
# share attention
x = attention(x, 2 * cnum, down_scale=2, pool_scale=2, name='attention_pooling_128')
# out of predict grad map
x_128 = gen_conv(x, 2 * cnum, 5, stride=1, activation=tf.nn.relu, name='out128_grad_out')
x_grad_out_128 = gen_conv(x_128, 6, 1, stride=1, activation=None, name='grad128')
x_out_128 = gen_conv(x_128, 3, 1, stride=1, activation=tf.nn.tanh, name='out128')
# scale 128 - 256
x = tf.concat([x, x_128], axis=3)
x = gen_deconv(x, cnum, 4, method='deconv', activation=tf.nn.relu, name='de256_conv5_upsample')
# TODO: output scale 256
# share attention
x = attention(x, cnum, down_scale=2, pool_scale=2, name='attention_pooling_256')
# out of predict grad map
x = gen_conv(x, cnum, 5, stride=1, activation=tf.nn.relu, name='out256_grad_out')
x_grad = gen_conv(x, 6, 1, stride=1, activation=None, name='grad256') # grad map no activation
x = gen_conv(x, 3, 1, stride=1, activation=tf.nn.tanh, name='out256')
return x, x_out_64, x_out_128, x_grad, x_grad_out_64, x_grad_out_128
# yj
def build_discriminator_256(self, x, reuse=False, name='discriminator256', sn=True, training=True):
"""
Patch GAN discriminator component, receptive filed: 70*70
"""
with tf.variable_scope(name, reuse=reuse):
cnum = 64
x = dis_conv(x, cnum, ksize=4, stride=2, name='conv1', sn=sn, training=training) # leaky_relu
x = dis_conv(x, cnum*2, ksize=4, stride=2, name='conv2', sn=sn, training=training)
x = dis_conv(x, cnum*4, ksize=4, stride=2, name='conv3', sn=sn, training=training)
x = dis_conv(x, cnum*8, ksize=4, stride=1, name='conv4', sn=sn, training=training)
x = dis_conv(x, 1, ksize=4, stride=1, name='conv5', activation=None, sn=sn, training=training)
return x
# yj
def build_graph_with_losses(self, x, mask, edge, edge_128, edge_64, args, training=True, reuse=False):
# Orgin image, edge, grad
# image, edge, edge_128, edge_64 = x
grad = tf.image.sobel_edges(x) # normalization?
grad = tf.reshape(grad, [args.BATCH_SIZE, 256, 256, 6]) # 6 channel
# x for image
# x = tf.reshape(image, [args.BATCH_SIZE, args.IMG_SHAPES[0], args.IMG_SHAPES[1],
# args.IMG_SHAPES[2]]) # [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], args.IMG_SHAPES[2]]
# mask = tf.reshape(mask, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 1])
# edge = tf.reshape(edge, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 1])
# edge_128 = tf.reshape(edge_128, [-1, 128, 128, 1])
# edge_64 = tf.reshape(edge_64, [-1, 64, 64, 1])
# incomplete image at full scale
x_incomplete = x * (1. - mask) # mask: 0 for valid pixel, 1 (white) for hole
# incomplete edge at full scale
input_edge = 1 - edge # 0 (black) for edge when save and input, 1 (white) for non edge
edge_incomplete = input_edge * (1 - mask) + mask
# incomplete grad
grad_incomplete = (1. - mask) * grad
# bulid inpaint net
out_256, out_64, out_128, out_grad_256, out_grad_64, out_grad_128 = self.build_inpaint_net(x_incomplete,
edge_incomplete, grad_incomplete,
mask, args, reuse=reuse,
training=training, padding=args.PADDING)
"""##### Losses #####"""
losses = {} # use a dict to collect losses
# TODO: scale 64
# complete image
mask_64 = tf.image.resize_nearest_neighbor(mask, (64, 64))
x_pos_64 = tf.image.resize_nearest_neighbor(x, (64, 64)) # pos input (real)
x_incomplete_64 = x_pos_64 * (1. - mask_64)
x_complete_64 = out_64 * mask_64 + x_incomplete_64
x_neg_64 = x_complete_64 # neg input (fake)
# Auxilary task: edge and grad loss
grad_64 = tf.image.sobel_edges(x_pos_64) # normalization?
grad_64 = tf.reshape(grad_64, [args.BATCH_SIZE, 64, 64, 6]) # 6 channel
grad_incomplete_64 = (1. - mask_64) * grad_64
grad_complete_64 = out_grad_64 * mask_64 + grad_incomplete_64
# more weight for edges?
edge_mask_64 = edge_64 # 1 for edge, 0 for grad, when using feature.canny()
mask_priority_64 = priority_loss_mask(edge_mask_64, ksize=5, sigma=1, iteration=2)
edge_weight_64 = args.EDGE_ALPHA * mask_priority_64 # salient edge
grad_weight_64 = args.GRAD_ALPHA # equaled grad
# error
grad_error_64 = tf.abs(out_grad_64 - grad_64)
# edge loss
losses['edge_l1_loss_64'] = tf.reduce_sum(edge_weight_64 * grad_error_64) / tf.reduce_sum(edge_weight_64) / 6.
# grad pixel level reconstruction loss
if args.GRAD_ALPHA > 0:
losses['grad_l1_loss_64'] = tf.reduce_mean(grad_weight_64 * grad_error_64)
else:
losses['grad_l1_loss_64'] = 0.
# grad patch level loss with implicit nearest neighbor matching
if args.GRAD_MATCHING_ALPHA > 0:
losses['grad_matching_64'] = args.GRAD_MATCHING_ALPHA * grad_matching_loss(out_grad_64, grad_64, args)
else:
losses['grad_matching_64'] = 0.
# Main task: compute losses
# l1 loss
# if args.L1_SCALE > 0.:
loss_caculator = LossCalculator(args.VGG_DIR, x_pos_64) # x_pose_256: real image
losses['l1_loss_fore_64'] = args.L1_SCALE * args.L1_FORE_ALPHA * loss_caculator.l1_loss(x_pos_64, out_64, mask_64,
type='foreground')
losses['l1_loss_back_64'] = args.L1_SCALE * args.L1_BACK_ALPHA * loss_caculator.l1_loss(x_pos_64, out_64, mask_64,
type='background')
# else:
# losses['l1_loss_fore_64'] = 0.
# losses['l1_loss_back_64'] = 0.
self.losses_64 = [losses['l1_loss_fore_64'],
losses['l1_loss_back_64'],
losses['grad_l1_loss_64'],
losses['edge_l1_loss_64'],
losses['grad_matching_64']
]
# Summary
viz_img_64 = [x_pos_64, x_incomplete_64, x_complete_64]
viz_grad_64 = [grad_64[:, :, :, 0:1], grad_incomplete_64[:, :, :, 0:1], grad_complete_64[:, :, :, 0:1]]
self.img_64 = tf.concat(viz_img_64, axis=2)
self.grad_64 = tf.concat(viz_grad_64, axis=2)
all_sum_64 = [tf.summary.scalar("l1_loss_fore_64", losses['l1_loss_fore_64']),
tf.summary.scalar("l1_loss_back_64", losses['l1_loss_back_64']),
tf.summary.image('raw_incomplete_predicted_complete_64',
tf.concat(viz_img_64, axis=2), max_outputs=args.VIZ_MAX_OUT),
tf.summary.image('raw_incomplete_predicted_completed_grad_64',
tf.concat(viz_grad_64, axis=2), max_outputs=args.VIZ_MAX_OUT),
tf.summary.scalar('grad_l1_loss_64', losses['grad_l1_loss_64']),
tf.summary.scalar('edge_l1_loss_64', losses['edge_l1_loss_64']),
tf.summary.scalar('grad_matching_64', losses['grad_matching_64']),
]
# TODO: scale 128
# complete image
mask_128 = tf.image.resize_nearest_neighbor(mask,(128, 128))
x_pos_128 = tf.image.resize_nearest_neighbor(x, (128, 128)) # pos input (real)
x_incomplete_128 = x_pos_128 * (1. - mask_128)
x_complete_128 = out_128 * mask_128 + x_incomplete_128
x_neg_128 = x_complete_128 # neg input (fake)
# Auxilary task: edge and grad loss
grad_128 = tf.image.sobel_edges(x_pos_128) # normalization?
grad_128 = tf.reshape(grad_128, [args.BATCH_SIZE, 128, 128, 6]) # 6 channel
grad_incomplete_128 = (1. - mask_128) * grad_128
grad_complete_128 = out_grad_128 * mask_128 + grad_incomplete_128
# more weight for edges?
# edge weight
edge_mask_128 = edge_128 # 1 for edge, 0 for grad, when using feature.canny()
mask_priority_128 = priority_loss_mask(edge_mask_128, ksize=5, sigma=1, iteration=2)
edge_weight_128 = args.EDGE_ALPHA * mask_priority_128 # salient edge
# grad weight
grad_weight_128 = args.GRAD_ALPHA # equaled grad
# error
grad_error_128 = tf.abs(out_grad_128 - grad_128)
# edge loss
losses['edge_l1_loss_128'] = tf.reduce_sum(edge_weight_128 * grad_error_128) / tf.reduce_sum(edge_weight_128) / 6.
# grad pixel level reconstruction loss
if args.GRAD_ALPHA > 0:
losses['grad_l1_loss_128'] = tf.reduce_mean(grad_weight_128 * grad_error_128)
else:
losses['grad_l1_loss_128'] = 0.
# grad patch level loss with implicit nearest neighbor matching
if args.GRAD_MATCHING_ALPHA > 0:
losses['grad_matching_128'] = args.GRAD_MATCHING_ALPHA * grad_matching_loss(out_grad_128, grad_128, args)
else:
losses['grad_matching_128'] = 0.
# Main task
# compute losses
# if args.L1_SCALE > 0.:
loss_caculator = LossCalculator(args.VGG_DIR, x_pos_128) # x_pose_256: real image
# l1 loss
losses['l1_loss_fore_128'] = args.L1_SCALE * args.L1_FORE_ALPHA * loss_caculator.l1_loss(x_pos_128, out_128, mask_128,
'foreground')
losses['l1_loss_back_128'] = args.L1_SCALE * args.L1_BACK_ALPHA * loss_caculator.l1_loss(x_pos_128, out_128, mask_128,
'background')
# else:
| |
<reponame>bjoern1001001/python-neo
# -*- coding: utf-8 -*-
'''
Tools for use with neo tests.
'''
import hashlib
import os
import numpy as np
import quantities as pq
import neo
from neo.core import objectlist
from neo.core.baseneo import _reference_name, _container_name
from neo.core.container import Container
from neo.io.basefromrawio import proxyobjectlist, EventProxy, EpochProxy
def assert_arrays_equal(a, b, dtype=False):
'''
Check if two arrays have the same shape and contents.
If dtype is True (default=False), then also theck that they have the same
dtype.
'''
assert isinstance(a, np.ndarray), "a is a %s" % type(a)
assert isinstance(b, np.ndarray), "b is a %s" % type(b)
assert a.shape == b.shape, "{} != {}".format(a, b)
# assert a.dtype == b.dtype, "%s and %s not same dtype %s %s" % (a, b,
# a.dtype,
# b.dtype)
try:
assert (a.flatten() == b.flatten()).all(), "{} != {}".format(a, b)
except (AttributeError, ValueError):
try:
ar = np.array(a)
br = np.array(b)
assert (ar.flatten() == br.flatten()).all(), "{} != {}".format(ar, br)
except (AttributeError, ValueError):
assert np.all(a.flatten() == b.flatten()), "{} != {}".format(a, b)
if dtype:
assert a.dtype == b.dtype, "{} and {} not same dtype {} and {}".format(
a, b, a.dtype, b.dtype)
def assert_arrays_almost_equal(a, b, threshold, dtype=False):
'''
Check if two arrays have the same shape and contents that differ
by abs(a - b) <= threshold for all elements.
If threshold is None, do an absolute comparison rather than a relative
comparison.
'''
if threshold is None:
return assert_arrays_equal(a, b, dtype=dtype)
assert isinstance(a, np.ndarray), "a is a %s" % type(a)
assert isinstance(b, np.ndarray), "b is a %s" % type(b)
assert a.shape == b.shape, "{} != {}".format(a, b)
# assert a.dtype == b.dtype, "%s and %b not same dtype %s %s" % (a, b,
# a.dtype,
# b.dtype)
if a.dtype.kind in ['f', 'c', 'i']:
assert (abs(
a - b) < threshold).all(), "abs(%s - %s) max(|a - b|) = %s threshold:%s" \
"" % (a, b, (abs(a - b)).max(), threshold)
if dtype:
assert a.dtype == b.dtype, "{} and {} not same dtype {} and {}".format(
a, b, a.dtype, b.dtype)
def file_digest(filename):
'''
Get the sha1 hash of the file with the given filename.
'''
with open(filename, 'rb') as fobj:
return hashlib.sha1(fobj.read()).hexdigest()
def assert_file_contents_equal(a, b):
'''
Assert that two files have the same size and hash.
'''
def generate_error_message(a, b):
'''
This creates the error message for the assertion error
'''
size_a = os.stat(a).st_size
size_b = os.stat(b).st_size
if size_a == size_b:
return "Files have the same size but different contents"
else:
return "Files have different sizes: a:%d b: %d" % (size_a, size_b)
assert file_digest(a) == file_digest(b), generate_error_message(a, b)
def assert_neo_object_is_compliant(ob, check_type=True):
'''
Test neo compliance of one object and sub objects
(one_to_many_relation only):
* check types and/or presence of necessary and recommended attribute.
* If attribute is Quantities or numpy.ndarray it also check ndim.
* If attribute is numpy.ndarray also check dtype.kind.
check_type=True by default can be set to false for testing ProxyObject
'''
if check_type:
assert type(ob) in objectlist, \
'%s is not a neo object' % (type(ob))
classname = ob.__class__.__name__
# test presence of necessary attributes
for ioattr in ob._necessary_attrs:
attrname, attrtype = ioattr[0], ioattr[1]
# ~ if attrname != '':
if not hasattr(ob, '_quantity_attr'):
assert hasattr(ob, attrname), '{} neo obect does not have {}'.format(
classname, attrname)
# test attributes types
for ioattr in ob._all_attrs:
attrname, attrtype = ioattr[0], ioattr[1]
if (hasattr(ob, '_quantity_attr') and ob._quantity_attr == attrname and (
attrtype == pq.Quantity or attrtype == np.ndarray)):
# object inherits from Quantity (AnalogSignal, SpikeTrain, ...)
ndim = ioattr[2]
assert ob.ndim == ndim, '%s dimension is %d should be %d' % (classname, ob.ndim, ndim)
if attrtype == np.ndarray:
dtp = ioattr[3]
assert ob.dtype.kind == dtp.kind, '%s dtype.kind is %s should be %s' \
'' % (classname, ob.dtype.kind, dtp.kind)
elif hasattr(ob, attrname):
if getattr(ob, attrname) is not None:
obattr = getattr(ob, attrname)
assert issubclass(type(obattr), attrtype), '%s in %s is %s should be %s' \
'' % (attrname, classname,
type(obattr), attrtype)
if attrtype == pq.Quantity or attrtype == np.ndarray:
ndim = ioattr[2]
assert obattr.ndim == ndim, '%s.%s dimension is %d should be %d' \
'' % (classname, attrname, obattr.ndim, ndim)
if attrtype == np.ndarray:
dtp = ioattr[3]
assert obattr.dtype.kind == dtp.kind, '%s.%s dtype.kind is %s should be %s' \
'' % (classname, attrname,
obattr.dtype.kind, dtp.kind)
# test bijectivity : parents and children
for container in getattr(ob, '_single_child_containers', []):
for i, child in enumerate(getattr(ob, container, [])):
assert hasattr(child, _reference_name(
classname)), '%s should have %s attribute (2 way relationship)' \
'' % (container, _reference_name(classname))
if hasattr(child, _reference_name(classname)):
parent = getattr(child, _reference_name(classname))
assert parent == ob, '%s.%s %s is not symmetric with %s.%s' \
'' % (container, _reference_name(classname), i, classname,
container)
# recursive on one to many rel
for i, child in enumerate(getattr(ob, 'children', [])):
try:
assert_neo_object_is_compliant(child)
# intercept exceptions and add more information
except BaseException as exc:
exc.args += ('from {} {} of {}'.format(child.__class__.__name__, i, classname),)
raise
def assert_same_sub_schema(ob1, ob2, equal_almost=True, threshold=1e-10, exclude=None):
'''
Test if ob1 and ob2 has the same sub schema.
Explore all parent/child relationships.
Many_to_many_relationship is not tested
because of infinite recursive loops.
Arguments:
equal_almost: if False do a strict arrays_equal if
True do arrays_almost_equal
exclude: a list of attributes and annotations to ignore in
the comparison
'''
assert type(ob1) == type(ob2), 'type({}) != type({})'.format(type(ob1), type(ob2))
classname = ob1.__class__.__name__
if exclude is None:
exclude = []
if isinstance(ob1, list):
assert len(ob1) == len(ob2), 'lens %s and %s not equal for %s and %s' \
'' % (len(ob1), len(ob2), ob1, ob2)
for i, (sub1, sub2) in enumerate(zip(ob1, ob2)):
try:
assert_same_sub_schema(sub1, sub2, equal_almost=equal_almost, threshold=threshold,
exclude=exclude)
# intercept exceptions and add more information
except BaseException as exc:
exc.args += ('{}[{}]'.format(classname, i),)
raise
return
# test parent/child relationship
for container in getattr(ob1, '_single_child_containers', []):
if container in exclude:
continue
if not hasattr(ob1, container):
assert not hasattr(ob2, container), '%s 2 does have %s but not %s 1' \
'' % (classname, container, classname)
continue
else:
assert hasattr(ob2, container), '{} 1 has {} but not {} 2'.format(classname, container,
classname)
sub1 = getattr(ob1, container)
sub2 = getattr(ob2, container)
assert len(sub1) == len(
sub2), 'theses two %s do not have the same %s number: %s and %s' \
'' % (classname, container, len(sub1), len(sub2))
for i in range(len(getattr(ob1, container))):
# previously lacking parameter
try:
assert_same_sub_schema(sub1[i], sub2[i], equal_almost=equal_almost,
threshold=threshold, exclude=exclude)
# intercept exceptions and add more information
except BaseException as exc:
exc.args += ('from {}[{}] of {}'.format(container, i, classname),)
raise
assert_same_attributes(ob1, ob2, equal_almost=equal_almost, threshold=threshold,
exclude=exclude)
def assert_same_attributes(ob1, ob2, equal_almost=True, threshold=1e-10, exclude=None):
'''
Test if ob1 and ob2 has the same attributes.
Arguments:
equal_almost: if False do a strict arrays_equal if
True do arrays_almost_equal
exclude: a list of attributes and annotations to ignore in
the comparison
'''
classname = ob1.__class__.__name__
if exclude is None:
exclude = []
if not equal_almost:
threshold = None
dtype = True
else:
dtype = False
for ioattr in ob1._all_attrs:
if ioattr[0] in exclude:
continue
attrname, attrtype = ioattr[0], ioattr[1]
# ~ if attrname =='':
if hasattr(ob1, '_quantity_attr') and ob1._quantity_attr == attrname:
# object is hinerited from Quantity (AnalogSignal, SpikeTrain, ...)
try:
assert_arrays_almost_equal(ob1.magnitude, ob2.magnitude, threshold=threshold,
dtype=dtype)
# intercept exceptions and add more information
except BaseException as exc:
exc.args += ('from {} {}'.format(classname, attrname),)
raise
assert ob1.dimensionality.string == ob2.dimensionality.string,\
'Units of %s %s are not the same: %s and %s' \
'' % (classname, attrname, ob1.dimensionality.string, ob2.dimensionality.string)
continue
if not hasattr(ob1, attrname):
assert not hasattr(ob2, attrname), '%s 2 does have %s but not %s 1' \
'' % (classname, attrname, classname)
continue
else:
assert hasattr(ob2, attrname), '%s 1 has %s but not %s 2' \
'' % (classname, attrname, classname)
if getattr(ob1, attrname) is None:
assert getattr(ob2, attrname) is None, 'In %s.%s %s and %s differed' \
'' % (classname, attrname,
getattr(ob1, attrname),
getattr(ob2, attrname))
continue
if getattr(ob2, attrname) is None:
assert getattr(ob1, attrname) is None, 'In %s.%s %s and %s differed' \
'' % (classname, attrname,
getattr(ob1, attrname),
getattr(ob2, attrname))
continue
if attrtype == pq.Quantity:
# Compare magnitudes
mag1 = getattr(ob1, attrname).magnitude
mag2 = getattr(ob2, attrname).magnitude
# print "2. ob1(%s) %s:%s\n | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgssymbollayer.py
---------------------
Date : October 2012
Copyright : (C) 2012 by <NAME>
Email : massimo dot endrighi at geopartner dot it
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, <NAME>'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import qgis # NOQA
import os
from distutils.version import StrictVersion
from qgis.PyQt.Qt import PYQT_VERSION_STR
from qgis.PyQt.QtCore import Qt, QObject, QDir, QFile, QIODevice, QPointF, QSize
from qgis.PyQt.QtXml import QDomDocument
from qgis.PyQt.QtGui import QColor, QImage, QPainter
from qgis.core import (QgsCentroidFillSymbolLayer,
QgsEllipseSymbolLayer,
QgsFillSymbolLayer,
QgsFontMarkerSymbolLayer,
QgsFilledMarkerSymbolLayer,
QgsGradientFillSymbolLayer,
QgsImageFillSymbolLayer,
QgsLinePatternFillSymbolLayer,
QgsLineSymbolLayer,
QgsMarkerLineSymbolLayer,
QgsMarkerSymbolLayer,
QgsReadWriteContext,
QgsPointPatternFillSymbolLayer,
QgsSimpleFillSymbolLayer,
QgsSimpleLineSymbolLayer,
QgsSimpleMarkerSymbolLayer,
QgsSimpleMarkerSymbolLayerBase,
QgsSVGFillSymbolLayer,
QgsSvgMarkerSymbolLayer,
QgsSymbolLayer,
QgsVectorFieldSymbolLayer,
QgsRasterFillSymbolLayer,
QgsShapeburstFillSymbolLayer,
QgsArrowSymbolLayer,
QgsUnitTypes,
QgsFillSymbol,
QgsLineSymbol,
QgsMarkerSymbol,
QgsSymbolLayerUtils,
QgsMapSettings,
QgsGeometry,
QgsFeature,
QgsRenderContext,
QgsRenderChecker,
QgsRectangle,
QgsVectorLayer,
QgsProject,
QgsMultiRenderChecker,
QgsSingleSymbolRenderer,
QgsProperty
)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
# Convenience instances in case you may need them
# not used in this test
start_app()
TEST_DATA_DIR = unitTestDataPath()
if StrictVersion(PYQT_VERSION_STR) < StrictVersion('5.7'):
from qgis.PyQt.QtCore import pyqtWrapperType
EXPECTED_TYPE = pyqtWrapperType
else:
EXPECTED_TYPE = type(QObject)
class TestQgsSymbolLayer(unittest.TestCase):
"""
This class test the sip binding for QgsSymbolLayer descendants
Every class is tested using the createFromSld implementation
An exception is done for:
- QgsLinePatternFillSymbolLayer where createFromSld implementation
returns NULL
- QgsPointPatternFillSymbolLayer where createFromSld implementation
returns NULL
- QgsVectorFieldSymbolLayer where createFromSld implementation
returns NULL
"""
def setUp(self):
self.report = "<h1>Python QgsSymbolLayer Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def testBinding(self):
"""Test python bindings existence."""
mType = type(QgsSymbolLayer)
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsFillSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsGradientFillSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsLinePatternFillSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsPointPatternFillSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsImageFillSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsPointPatternFillSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsGradientFillSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsShapeburstFillSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsSVGFillSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsCentroidFillSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsRasterFillSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsSimpleFillSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsLineSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsMarkerLineSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsArrowSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsSimpleLineSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsMarkerSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsEllipseSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsFontMarkerSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsSimpleMarkerSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsFilledMarkerSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsSvgMarkerSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
try:
mType = type(QgsVectorFieldSymbolLayer)
except:
mType = None
mMessage = 'Expected "%s" got "%s"' % (EXPECTED_TYPE, mType)
assert EXPECTED_TYPE == mType, mMessage
def testGettersSetters(self):
""" test base class getters/setters """
layer = QgsSimpleFillSymbolLayer()
layer.setEnabled(False)
self.assertFalse(layer.enabled())
layer.setEnabled(True)
self.assertTrue(layer.enabled())
layer.setLocked(False)
self.assertFalse(layer.isLocked())
layer.setLocked(True)
self.assertTrue(layer.isLocked())
layer.setRenderingPass(5)
self.assertEqual(layer.renderingPass(), 5)
def testSaveRestore(self):
""" Test saving and restoring base symbol layer properties to xml"""
layer = QgsSimpleFillSymbolLayer()
layer.setEnabled(False)
layer.setLocked(True)
layer.setRenderingPass(5)
symbol = QgsFillSymbol()
symbol.changeSymbolLayer(0, layer)
doc = QDomDocument("testdoc")
elem = QgsSymbolLayerUtils.saveSymbol('test', symbol, doc, QgsReadWriteContext())
restored_symbol = QgsSymbolLayerUtils.loadSymbol(elem, QgsReadWriteContext())
restored_layer = restored_symbol.symbolLayer(0)
self.assertFalse(restored_layer.enabled())
self.assertTrue(restored_layer.isLocked())
self.assertEqual(restored_layer.renderingPass(), 5)
def testClone(self):
""" test that base symbol layer properties are cloned with layer """
layer = QgsSimpleFillSymbolLayer()
layer.setEnabled(False)
layer.setLocked(True)
layer.setRenderingPass(5)
symbol = QgsFillSymbol()
symbol.changeSymbolLayer(0, layer)
cloned_symbol = symbol.clone()
cloned_layer = cloned_symbol.symbolLayer(0)
self.assertFalse(cloned_layer.enabled())
self.assertTrue(cloned_layer.isLocked())
self.assertEqual(cloned_layer.renderingPass(), 5)
def imageCheck(self, name, reference_image, image):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'symbollayer_' + name + ".png"
image.save(file_name, "PNG")
checker = QgsRenderChecker()
checker.setControlPathPrefix("symbol_layer")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.compareImages(name, 20)
self.report += checker.report()
print((self.report))
return result
def testRenderFillLayerDisabled(self):
""" test that rendering a fill symbol with disabled layer works"""
layer = QgsSimpleFillSymbolLayer()
layer.setEnabled(False)
symbol = QgsFillSymbol()
symbol.changeSymbolLayer(0, layer)
image = QImage(200, 200, QImage.Format_RGB32)
painter = QPainter()
ms = QgsMapSettings()
geom = QgsGeometry.fromWkt('Polygon ((0 0, 10 0, 10 10, 0 10, 0 0))')
f = QgsFeature()
f.setGeometry(geom)
extent = geom.constGet().boundingBox()
# buffer extent by 10%
extent = extent.buffered((extent.height() + extent.width()) / 20.0)
ms.setExtent(extent)
ms.setOutputSize(image.size())
context = QgsRenderContext.fromMapSettings(ms)
context.setPainter(painter)
context.setScaleFactor(96 / 25.4) # 96 DPI
painter.begin(image)
image.fill(QColor(255, 255, 255))
symbol.startRender(context)
symbol.renderFeature(f, context)
symbol.stopRender(context)
painter.end()
self.assertTrue(self.imageCheck('symbol_layer', 'symbollayer_disabled', image))
def testRenderFillLayerDataDefined(self):
""" test that rendering a fill symbol with data defined enabled layer works"""
polys_shp = os.path.join(TEST_DATA_DIR, 'polys.shp')
polys_layer = QgsVectorLayer(polys_shp, 'Polygons', 'ogr')
QgsProject.instance().addMapLayer(polys_layer)
layer = QgsSimpleFillSymbolLayer()
layer.setDataDefinedProperty(QgsSymbolLayer.PropertyLayerEnabled, QgsProperty.fromExpression("Name='Lake'"))
layer.setStrokeStyle(Qt.NoPen)
layer.setColor(QColor(100, 150, 150))
symbol = QgsFillSymbol()
symbol.changeSymbolLayer(0, layer)
polys_layer.setRenderer(QgsSingleSymbolRenderer(symbol))
ms = QgsMapSettings()
ms.setOutputSize(QSize(400, 400))
ms.setOutputDpi(96)
ms.setExtent(QgsRectangle(-133, 22, -70, 52))
ms.setLayers([polys_layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(ms)
renderchecker.setControlPathPrefix('symbol_layer')
renderchecker.setControlName('expected_filllayer_ddenabled')
self.assertTrue(renderchecker.runTest('filllayer_ddenabled'))
QgsProject.instance().removeMapLayer(polys_layer)
def testRenderLineLayerDisabled(self):
""" test that rendering a line symbol with disabled layer works"""
layer = QgsSimpleLineSymbolLayer()
layer.setEnabled(False)
symbol = QgsLineSymbol()
symbol.changeSymbolLayer(0, layer)
image = QImage(200, 200, QImage.Format_RGB32)
painter = QPainter()
ms = QgsMapSettings()
geom = QgsGeometry.fromWkt('LineString (0 0,3 4,4 3)')
f = QgsFeature()
f.setGeometry(geom)
extent = geom.constGet().boundingBox()
# buffer extent by 10%
extent = extent.buffered((extent.height() + extent.width()) / 20.0)
ms.setExtent(extent)
ms.setOutputSize(image.size())
context = QgsRenderContext.fromMapSettings(ms)
context.setPainter(painter)
context.setScaleFactor(96 / 25.4) # 96 DPI
painter.begin(image)
image.fill(QColor(255, 255, 255))
symbol.startRender(context)
symbol.renderFeature(f, context)
symbol.stopRender(context)
painter.end()
self.assertTrue(self.imageCheck('symbol_layer', 'symbollayer_disabled', image))
def testRenderLineLayerDataDefined(self):
""" test that rendering a line symbol with data defined enabled layer works"""
lines_shp = os.path.join(TEST_DATA_DIR, 'lines.shp')
lines_layer = QgsVectorLayer(lines_shp, 'Lines', 'ogr')
QgsProject.instance().addMapLayer(lines_layer)
layer = QgsSimpleLineSymbolLayer()
layer.setDataDefinedProperty(QgsSymbolLayer.PropertyLayerEnabled, QgsProperty.fromExpression("Name='Highway'"))
layer.setColor(QColor(100, 150, 150))
layer.setWidth(5)
symbol = QgsLineSymbol()
symbol.changeSymbolLayer(0, layer)
lines_layer.setRenderer(QgsSingleSymbolRenderer(symbol))
ms = QgsMapSettings()
ms.setOutputSize(QSize(400, 400))
ms.setOutputDpi(96)
ms.setExtent(QgsRectangle(-133, 22, -70, 52))
ms.setLayers([lines_layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(ms)
renderchecker.setControlPathPrefix('symbol_layer')
renderchecker.setControlName('expected_linelayer_ddenabled')
self.assertTrue(renderchecker.runTest('linelayer_ddenabled'))
QgsProject.instance().removeMapLayer(lines_layer)
def testRenderMarkerLayerDisabled(self):
""" test that rendering a marker symbol with disabled layer works"""
layer = QgsSimpleMarkerSymbolLayer()
layer.setEnabled(False)
symbol = QgsMarkerSymbol()
symbol.changeSymbolLayer(0, layer)
image = QImage(200, 200, QImage.Format_RGB32)
painter = QPainter()
ms = QgsMapSettings()
geom = QgsGeometry.fromWkt('Point (1 2)')
f = QgsFeature()
f.setGeometry(geom)
extent = QgsRectangle(0, 0, 4, | |
<filename>manim/camera/camera.py
"A camera converts the mobjects contained in a Scene into an array of pixels."
__all__ = ["Camera", "BackgroundColoredVMobjectDisplayer"]
from functools import reduce
import itertools as it
import operator as op
import time
import copy
from PIL import Image
from scipy.spatial.distance import pdist
import cairo
import numpy as np
from .. import logger, config
from ..constants import *
from ..mobject.types.image_mobject import AbstractImageMobject
from ..mobject.mobject import Mobject
from ..mobject.types.point_cloud_mobject import PMobject
from ..mobject.types.vectorized_mobject import VMobject
from ..utils.color import color_to_int_rgba
from ..utils.config_ops import digest_config
from ..utils.images import get_full_raster_image_path
from ..utils.iterables import list_difference_update
from ..utils.simple_functions import fdiv
from ..utils.space_ops import angle_of_vector
from ..utils.space_ops import get_norm
from ..utils.family import extract_mobject_family_members
class Camera(object):
"""Base camera class.
This is the object which takes care of what exactly is displayed
on screen at any given moment.
Some important CONFIG values and local variables to note are:
background_image : :class:`str`, optional
The path to an image that should be the background image.
If not set, the background is filled with `self.background_color`
pixel_height : :class:`int`, optional
The height of the scene in pixels.
"""
CONFIG = {
"background_image": None,
# Note: frame height and width will be resized to match
# the pixel aspect ratio
"frame_center": ORIGIN,
# Points in vectorized mobjects with norm greater
# than this value will be rescaled.
"image_mode": "RGBA",
"n_channels": 4,
"pixel_array_dtype": "uint8",
# z_buff_func is only used if the flag above is set to True.
# round z coordinate to nearest hundredth when comparring
"z_buff_func": lambda m: np.round(m.get_center()[2], 2),
"cairo_line_width_multiple": 0.01,
"use_z_index": True,
}
def __init__(self, video_quality_config, background=None, **kwargs):
"""Initialises the Camera.
Parameters
----------
background : optional
What self.background should be, by default None as will be set later.
**kwargs
Any local variables to be set.
"""
digest_config(self, kwargs, locals())
# All of the following are set to EITHER the value passed via kwargs,
# OR the value stored in the global config dict at the time of
# _instance construction_. Before, they were in the CONFIG dict, which
# is a class attribute and is defined at the time of _class
# definition_. This did not allow for creating two Cameras with
# different configurations in the same session.
for attr in [
"pixel_height",
"pixel_width",
"frame_height",
"frame_width",
"frame_rate",
]:
setattr(self, attr, kwargs.get(attr, config[attr]))
for attr in ["background_color", "background_opacity"]:
setattr(self, f"_{attr}", kwargs.get(attr, config[attr]))
# This one is in the same boat as the above, but it doesn't have the
# same name as the corresponding key so it has to be handled on its own
self.max_allowable_norm = config["frame_width"]
self.rgb_max_val = np.iinfo(self.pixel_array_dtype).max
self.pixel_array_to_cairo_context = {}
# Contains the correct method to process a list of Mobjects of the
# corresponding class. If a Mobject is not an instance of a class in
# this dict (or an instance of a class that inherits from a class in
# this dict), then it cannot be rendered.
self.init_background()
self.resize_frame_shape()
self.reset()
def __deepcopy__(self, memo):
# This is to address a strange bug where deepcopying
# will result in a segfault, which is somehow related
# to the aggdraw library
self.canvas = None
return copy.copy(self)
@property
def background_color(self):
return self._background_color
@background_color.setter
def background_color(self, color):
self._background_color = color
self.init_background()
@property
def background_opacity(self):
return self._background_opacity
@background_opacity.setter
def background_opacity(self, alpha):
self._background_opacity = alpha
self.init_background()
def type_or_raise(self, mobject):
"""Return the type of mobject, if it is a type that can be rendered.
If `mobject` is an instance of a class that inherits from a class that
can be rendered, return the super class. For example, an instance of a
Square is also an instance of VMobject, and these can be rendered.
Therefore, `type_or_raise(Square())` returns True.
Parameters
----------
mobject : :class:`~.Mobject`
The object to take the type of.
Notes
-----
For a list of classes that can currently be rendered, see :meth:`display_funcs`.
Returns
-------
Type[:class:`~.Mobject`]
The type of mobjects, if it can be rendered.
Raises
------
:exc:`TypeError`
When mobject is not an instance of a class that can be rendered.
"""
self.display_funcs = {
VMobject: self.display_multiple_vectorized_mobjects,
PMobject: self.display_multiple_point_cloud_mobjects,
AbstractImageMobject: self.display_multiple_image_mobjects,
Mobject: lambda batch, pa: batch, # Do nothing
}
# We have to check each type in turn because we are dealing with
# super classes. For example, if square = Square(), then
# type(square) != VMobject, but isinstance(square, VMobject) == True.
for _type in self.display_funcs:
if isinstance(mobject, _type):
return _type
else:
raise TypeError(f"Displaying an object of class {_type} is not supported")
def reset_pixel_shape(self, new_height, new_width):
"""This method resets the height and width
of a single pixel to the passed new_heigh and new_width.
Parameters
----------
new_height : int, float
The new height of the entire scene in pixels
new_width : int, float
The new width of the entire scene in pixels
"""
self.pixel_width = new_width
self.pixel_height = new_height
self.init_background()
self.resize_frame_shape()
self.reset()
def resize_frame_shape(self, fixed_dimension=0):
"""
Changes frame_shape to match the aspect ratio
of the pixels, where fixed_dimension determines
whether frame_height or frame_width
remains fixed while the other changes accordingly.
Parameters
----------
fixed_dimension : int
If 0, height is scaled with respect to width
else, width is scaled with respect to height.
"""
pixel_height = self.pixel_height
pixel_width = self.pixel_width
frame_height = self.frame_height
frame_width = self.frame_width
aspect_ratio = fdiv(pixel_width, pixel_height)
if fixed_dimension == 0:
frame_height = frame_width / aspect_ratio
else:
frame_width = aspect_ratio * frame_height
self.frame_height = frame_height
self.frame_width = frame_width
def init_background(self):
"""Initialize the background.
If self.background_image is the path of an image
the image is set as background; else, the default
background color fills the background.
"""
height = self.pixel_height
width = self.pixel_width
if self.background_image is not None:
path = get_full_raster_image_path(self.background_image)
image = Image.open(path).convert(self.image_mode)
# TODO, how to gracefully handle backgrounds
# with different sizes?
self.background = np.array(image)[:height, :width]
self.background = self.background.astype(self.pixel_array_dtype)
else:
background_rgba = color_to_int_rgba(
self.background_color, self.background_opacity
)
self.background = np.zeros(
(height, width, self.n_channels), dtype=self.pixel_array_dtype
)
self.background[:, :] = background_rgba
def get_image(self, pixel_array=None):
"""Returns an image from the passed
pixel array, or from the current frame
if the passed pixel array is none.
Parameters
----------
pixel_array : np.array, list, tuple, optional
The pixel array from which to get an image, by default None
Returns
-------
PIL.Image
The PIL image of the array.
"""
if pixel_array is None:
pixel_array = self.pixel_array
return Image.fromarray(pixel_array, mode=self.image_mode)
def convert_pixel_array(self, pixel_array, convert_from_floats=False):
"""Converts a pixel array from values that have floats in then
to proper RGB values.
Parameters
----------
pixel_array : np.array, list, tuple
Pixel array to convert.
convert_from_floats : bool, optional
Whether or not to convert float values to ints, by default False
Returns
-------
np.array
The new, converted pixel array.
"""
retval = np.array(pixel_array)
if convert_from_floats:
retval = np.apply_along_axis(
lambda f: (f * self.rgb_max_val).astype(self.pixel_array_dtype),
2,
retval,
)
return retval
def set_pixel_array(self, pixel_array, convert_from_floats=False):
"""Sets the pixel array of the camera to the passed pixel array.
Parameters
----------
pixel_array : np.array, list, tuple
The pixel array to convert and then set as the camera's pixel array.
convert_from_floats : bool, optional
Whether or not to convert float values to proper RGB values, by default False
"""
converted_array = self.convert_pixel_array(pixel_array, convert_from_floats)
if not (
hasattr(self, "pixel_array")
and self.pixel_array.shape == converted_array.shape
):
self.pixel_array = converted_array
else:
# Set in place
self.pixel_array[:, :, :] = converted_array[:, :, :]
def set_background(self, pixel_array, convert_from_floats=False):
"""Sets the background to the passed pixel_array after converting
to valid RGB values.
Parameters
----------
pixel_array : np.array, list, tuple
The pixel array to set the background to.
convert_from_floats : bool, optional
Whether or not to convert floats values to proper RGB valid ones, by default False
"""
self.background = self.convert_pixel_array(pixel_array, convert_from_floats)
# TODO, this should live in utils, not as a method of Camera
def make_background_from_func(self, coords_to_colors_func):
"""
Makes a pixel array for the background by using coords_to_colors_func to determine each pixel's color. Each input
pixel's color. Each input to coords_to_colors_func is an (x, y) pair in space (in ordinary space coordinates; not
pixel coordinates), and each output is expected to be an RGBA array of 4 floats.
Parameters
----------
coords_to_colors_func : function
The function whose input is an (x,y) pair of coordinats | |
from sklearn.base import BaseEstimator
from sklearn.metrics import normalized_mutual_info_score, mutual_info_score, silhouette_score, davies_bouldin_score, calinski_harabasz_score, v_measure_score, adjusted_mutual_info_score, log_loss
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
from sklearn import linear_model, naive_bayes
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, cross_val_score
from scipy.linalg import eigh
from scipy.optimize import linear_sum_assignment
from scipy.stats import entropy as get_entropy
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader, ConcatDataset
from torch.nn.utils.rnn import pad_sequence
import progressbar
import copy
import matplotlib.pyplot as plt
import shap
import contextlib
import sys
from collections import defaultdict, Counter
from models import *
from losses import f_loss, reg_betainc
from plots import plot_2d, plot_3d
simple_classifier = linear_model.LogisticRegression(solver = 'lbfgs', n_jobs = -1)
def change_cluster_labels_to_sequential(clusters):
labels = np.unique(clusters)
clusters_to_labels = {cluster:i for i, cluster in enumerate(labels)}
seq_clusters = np.array([clusters_to_labels[cluster] for cluster in clusters])
return seq_clusters
def make_cost_matrix(c1, c2):
c1 = change_cluster_labels_to_sequential(c1)
c2 = change_cluster_labels_to_sequential(c2)
uc1 = np.unique(c1)
uc2 = np.unique(c2)
l1 = uc1.size
l2 = uc2.size
assert(l1 == l2 and np.all(uc1 == uc2)), str(uc1) + " vs " + str(uc2)
m = np.ones([l1, l2])
for i in range(l1):
it_i = np.nonzero(c1 == uc1[i])[0]
for j in range(l2):
it_j = np.nonzero(c2 == uc2[j])[0]
m_ij = np.intersect1d(it_j, it_i)
m[i,j] = -m_ij.size
return m
def get_accuracy(clusters, labels):
cost = make_cost_matrix(clusters, labels)
row_ind, col_ind = linear_sum_assignment(cost)
to_labels = {i: ind for i, ind in enumerate(col_ind)}
clusters_as_labels = list(map(to_labels.get, clusters))
acc = np.sum(clusters_as_labels == labels) / labels.shape[0]
return acc
def tokens_to_tfidf(x):
list_of_strs = [' '.join(str(token) for token in item if token != 0) for item in x]
out = TfidfVectorizer().fit_transform(list_of_strs)
return out
class DummyFile(object):
def write(self, x): pass
def flush(self): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = DummyFile()
yield
sys.stdout = save_stdout
class DEN(BaseEstimator):
def __init__(
self,
n_components = 2,
model = 'auto',
min_neighbors = 1,
max_neighbors = 10,
snn = True,
batch_size = 256,
ignore = 1,
metric = 'euclidean',
neighbors_preprocess = None,
use_gpu = True,
learning_rate = 1e-3,
optimizer_override = None,
epochs = 10,
verbose_level = 3,
random_seed = 37,
gamma = 1,
semisupervised = False,
cluster_subnet_dropout_p = .3,
is_tokens = False,
cluster_subsample_n = 1000,
initial_zero_cutoff = 1e-2,
minimum_zero_cutoff = 1e-7,
update_zero_cutoff = False,
internal_dim = 128,
cluster_subnet_training_epochs = 50,
semisupervised_weight = None,
l2_penalty = 0,
prune_graph = False,
fine_tune_end_to_end = True,
fine_tune_epochs = 50,
simple_classifier = simple_classifier,
final_training_epochs = 20,
final_dropout_p = .3,
min_p = 0,
max_correlation = 0
):
self.n_components = n_components
self.model = model
self.min_neighbors = min_neighbors
self.max_neighbors = max_neighbors
self.snn = snn
self.batch_size = batch_size
self.ignore = ignore
self.metric = metric
self.neighbors_preprocess = neighbors_preprocess
self.use_gpu = use_gpu
self.learning_rate = learning_rate
self.optimizer_override = optimizer_override
self.epochs = epochs
self.verbose_level = verbose_level
self.random_seed = random_seed
self.gamma = gamma
self.semisupervised = semisupervised
self.cluster_subnet_dropout_p = cluster_subnet_dropout_p
self.is_tokens = False # forces TF-IDF preprocessing if preprocessing unspecified
self.cluster_subsample_n = cluster_subsample_n
self.initial_zero_cutoff = initial_zero_cutoff
self.minimum_zero_cutoff = minimum_zero_cutoff
self.update_zero_cutoff = update_zero_cutoff
self.internal_dim = internal_dim
self.cluster_subnet_training_epochs = cluster_subnet_training_epochs
self.semisupervised_weight = semisupervised_weight
self.l2_penalty = l2_penalty
self.prune_graph = prune_graph
self.fine_tune_end_to_end = fine_tune_end_to_end
self.fine_tune_epochs = fine_tune_epochs
self.simple_classifier = simple_classifier
self.final_training_epochs = final_training_epochs
# self.final_model = final_model
self.final_dropout_p = final_dropout_p
self.min_p = min_p
self.max_correlation = max_correlation
self.best_full_net = None
self.best_embedding_net = None
self.optimizer = None
self.semisupervised_model = None
# self.final_model = None
def find_differentiating_features(self, sample, context, n_context_samples = 400, feature_names = None):
assert self.best_full_net is not None, "have not trained a prediction network yet!"
if n_context_samples < context.shape[0]:
context_subsample_inds = np.random.choice(context.shape[0], n_context_samples, replace = False)
context_subsample = context[context_subsample_inds]
else:
context_subsample = context
e = shap.DeepExplainer(self.best_full_net, context_subsample)
if sample.shape[0] == 1:
# only one sample so assuming need to add batch dimension
sample = sample.unsqueeze(0)
with nostdout():
shap_values, indexes = e.shap_values(sample, ranked_outputs = 1)
if type(sample) is not np.ndarray:
sample = sample.cpu().numpy()
if len(context.shape) == 4:
# assuming image
shap_values = [np.swapaxes(np.swapaxes(s, 2, 3), 1, -1) for s in shap_values]
if sample.shape[1] == 1:
# need valid image shape for matplotlib
sample = sample.squeeze(1)
shap.image_plot(shap_values, -sample)
else:
# assuming not image
shap.force_plot(e.expected_value[0], shap_values[0], sample, feature_names = feature_names, matplotlib = True)
def summerize_differentiating_features(self, X, n_samples = 200, n_context_samples = 400):
# split the dataset into clusters and average differentiating features in each cluster
n_samples = min(n_samples, X.shape[0])
sample_inds = np.random.choice(X.shape[0], n_samples, replace = False)
samples = X[sample_inds]
clusters = self.predict(samples)
n_context_samples = min(n_context_samples, X.shape[0])
context_sample_inds = np.random.choice(X.shape[0], n_context_samples, replace = False)
context_samples = X[context_sample_inds]
context_samples = context_samples.to(self.device)
e = shap.DeepExplainer(self.best_full_net, context_samples)
summerizations = defaultdict(lambda : np.zeros(X.shape[1:]))
average_samples = defaultdict(lambda : np.zeros(X.shape[1:]))
counts = Counter(clusters)
self._print_with_verbosity("finding differentiating features across the dataset...", 1)
for cluster, sample in self._progressbar_with_verbosity(zip(clusters, samples), 1, max_value = n_samples):
with nostdout():
shap_values, indexes = e.shap_values(sample.unsqueeze(0), ranked_outputs = 1)
summerizations[cluster] += shap_values[0].squeeze(0) / counts[cluster]
average_samples[cluster] += sample.cpu().numpy() / counts[cluster]
# recall that dictionaries are ordered in Python3
summery = np.array(list(summerizations.values())).squeeze(1)
averages = np.array(list(average_samples.values())).squeeze(1)
shap.image_plot(summery, -averages)
def _print_with_verbosity(self, message, level, strict = False):
if level <= self.verbose_level and (not strict or level == self.verbose_level):
print(message)
def _progressbar_with_verbosity(self, data, level, max_value = None, strict = False):
if level <= self.verbose_level and (not strict or level == self.verbose_level):
for datum in progressbar.progressbar(data, max_value = max_value):
yield datum
else:
for datum in data:
yield datum
def _select_model(self, X, n_outputs = None, dropout_p = 0):
if n_outputs is None:
n_outputs = self.n_components
# not sure if allowed to modify model attribute under sklearn rules
if type(X) is tuple or type(X) is list:
self._print_with_verbosity("assuming token-based data, using bag-of-words model", 1)
self.is_tokens = True
vocab = set()
for x in X:
vocab.update(x)
vocab_size = len(vocab)
to_model = BOWNN(n_outputs, vocab_size, internal_dim = self.internal_dim)
else:
n_dims = len(X.shape)
if n_dims == 2:
self._print_with_verbosity("using fully connected neural network", 1)
to_model = FFNN(n_outputs, X.shape[1], internal_dim = self.internal_dim)
elif n_dims == 4:
self._print_with_verbosity("using convolutional neural network", 1)
n_layers = int(np.log2(min(X.shape[2], X.shape[3])))
to_model = CNN(n_outputs, n_layers, internal_dim = self.internal_dim, p = dropout_p)
# self.model = ClusterNet(X.shape[-1]*X.shape[-2], self.n_components)
else:
assert False, "not sure which neural network to use based off data provided"
return to_model
def _get_near_and_far_pairs_mem_efficient_chunks(self, X, block_size = 512, return_sorted = True):
n_neighbors = self.max_neighbors
closest = []
furthest = []
if type(X) is np.ndarray:
splits = np.array_split(X, max(X.shape[0] // block_size, 1))
max_value = len(splits)
else:
inds = list(range(0, X.shape[0], block_size))
inds.append(None)
splits = (X[inds[i]:inds[i+1]] for i in range(len(inds) - 1))
max_value = len(inds) - 1
self._print_with_verbosity(f"using metric {self.metric} to build nearest neighbors graph", 2)
for first in self._progressbar_with_verbosity(splits, 2, max_value = max_value):
dists = pairwise_distances(first, X, n_jobs = -1, metric = self.metric)
# dists = cdist(first, X, metric = metric)
this_closest = np.argpartition(dists, n_neighbors + 1)[:, :n_neighbors+1]
if return_sorted:
original_set = set(this_closest[-1])
relevant = dists[np.arange(this_closest.shape[0])[:, None], this_closest]
sorted_inds = np.argsort(relevant)
this_closest = this_closest[np.arange(sorted_inds.shape[0])[:, None], sorted_inds]
assert set(this_closest[-1]) == original_set, "something went wrong with sorting"
this_closest = this_closest[:, 1:]
closest.append(this_closest)
probs = dists / np.sum(dists, axis = 1)[:, None]
this_furthest = np.array([np.random.choice(len(probs[i]), n_neighbors, False, probs[i]) for i in range(len(probs))])
furthest.append(np.array(this_furthest))
closest = np.concatenate(closest)
furthest = np.concatenate(furthest)
return closest, furthest
def _build_dataset(self, X, y = None):
# returns Dataset object
neighbors_X = X.view(X.shape[0], -1).cpu().numpy()
if self.is_tokens and self.neighbors_preprocess is None:
self._print_with_verbosity("using tokenized data without neighbors preprocessing so using TF-IDF transform", 2)
self.neighbors_preprocess = tokens_to_tfidf
self.metric = 'cosine'
if self.neighbors_preprocess is not None:
neighbors_X = self.neighbors_preprocess(neighbors_X)
closest, furthest = self._get_near_and_far_pairs_mem_efficient_chunks(neighbors_X)
samples = []
paired = []
# for semisupervised version
# assuming y has positive integer class labels
# and -1 if there is no label
first_label = []
second_label = []
self._print_with_verbosity("building dataset from nearest neighbors graph", 1)
already_paired = set()
for first, seconds in enumerate(closest):
represented = 0
for ind, second in enumerate(seconds[::-1]): # matters if sorted and min_neighbors so closest are last
if self.snn:
if first not in closest[second]:
n_left = len(seconds) - ind
if n_left > self.min_neighbors - represented:
continue
if self.semisupervised and self.prune_graph:
if y[first] != y[second] and y[first] != -1 and y[second] != -1:
continue
represented += 1
if tuple(sorted([first, second])) not in already_paired and first != second:
first_data = X[first]
second_data = X[second]
stack = torch.stack([first_data, second_data])
samples.append(stack)
paired.append(1)
already_paired.add(tuple(sorted([first, second])))
if y is not None:
first_label.append(y[first])
second_label.append(y[second])
else:
first_label.append(-1)
second_label.append(-1)
already_paired = set()
| |
"""
stscan
~~~~~~
Implements the "prospective" space-time permutation scan statistic algorithm.
This was originally described in (1) in reference to disease outbreak
detection. The algorithm is implemented in the software package (2). We
apply it to crime predication as in (3).
We look at events which have occurred in the past, and try to detect "clusters"
which are existing up to the current time. To do this, a simple statistic
which measures deviation was expected randomness is computed for every
possible space/time "cylinder": events which occur is a circular disk in space,
in an interval of time (always ending at the point of prediction). The space/
time cylinder with the largest statistic is deemed the most likely "cluster".
Further clusters are computed by finding the next most likely cluster which
does not intersect (in space only) the existing cluster.
As detailed in (1) and (2) it is possible to use monte-carlo methods to
estimate the p-value of the primary cluster, but for prediction purposes this
is not necessary. As adapted from (3), we use the clusters in order to find
a relative "risk" of crime.
References
~~~~~~~~~~
1. Kulldorff et al, "A Space–Time Permutation Scan Statistic for Disease
Outbreak Detection", PLoS Med 2(3): e59, DOI:10.1371/journal.pmed.0020059
2. Kulldorff M. and Information Management Services, Inc. SaTScanTM v8.0:
Software for the spatial and space-time scan statistics.
http://www.satscan.org/, 2016.
3. Adepeju, <NAME>, "Novel evaluation metrics for sparse spatiotemporal
point process hotspot predictions - a crime case study", International
Journal of Geographical Information Science, 30:11, 2133-2154,
DOI:10.1080/13658816.2016.1159684
"""
from . import predictors
from . import data
import numpy as _np
import collections as _collections
import datetime as _datetime
Cluster = _collections.namedtuple("Cluster", ["centre", "radius"])
def _possible_start_times(timestamps, max_interval_length, end_time):
times = _np.datetime64(end_time) - timestamps
zerotime = _np.timedelta64(0,"s")
times = timestamps[(zerotime <= times) & (times <= max_interval_length)]
if len(times) <= 1:
return times
deltas = times[1:] - times[:-1]
return _np.hstack(([times[0]],times[1:][deltas > zerotime]))
def _possible_space_clusters(points, max_radius=_np.inf):
discs = []
for pt in points.T:
distances = pt[:,None] - points
distances = _np.sqrt(_np.sum(distances**2, axis=0))
distances.sort()
discs.extend(Cluster(pt, r*1.00001) for r in distances if r <= max_radius)
# Reduce number
# Use a tuple here so we can use a set; this is _much_ faster
allmasks = [tuple(_np.sum((points - cluster.centre[:,None])**2, axis=0) <= cluster.radius**2)
for cluster in discs]
masks = []
set_masks = set()
for i,m in enumerate(allmasks):
if m not in set_masks:
masks.append(i)
set_masks.add(m)
return [discs[i] for i in masks]
def grid_timed_points(timed_points, region, grid_size):
"""Return a new instance of :class:`TimedPoints` where each space
coordinate is moved to the centre of each grid cell.
:param timed_points: Input data.
:param region: A `data.RectangularRegion` instance giving the
region to grid to. Only the x,y offset is used.
:param grid_size: The width and height of each grid cell.
"""
offset = _np.array([region.xmin, region.ymin])
newcoords = _np.floor((timed_points.coords - offset[:,None]) / grid_size) + 0.5
newcoords = newcoords * grid_size + offset[:,None]
return data.TimedPoints(timed_points.timestamps, newcoords)
def bin_timestamps(timed_points, offset, bin_length):
"""Return a new instance of :class:`TimedPoints` where each timestamped is
adjusted. Any timestamp between `offset` and `offset + bin_length` is
mapped to `offset`; timestamps between `offset + bin_length` and
`offset + 2 * bin_length` are mapped to `offset + bin_length`, and so
forth.
:param timed_points: Input data.
:param offset: A datetime-like object which is the start of the binning.
:param bin_length: A timedelta-like object which is the length of each bin.
"""
offset = _np.datetime64(offset)
bin_length = _np.timedelta64(bin_length)
new_times = _np.floor((timed_points.timestamps - offset) / bin_length)
new_times = offset + new_times * bin_length
return data.TimedPoints(new_times, timed_points.coords)
class _STSTrainerBase(predictors.DataTrainer):
"""Internal class, abstracting out some common features."""
def __init__(self):
self.geographic_population_limit = 0.5
self.geographic_radius_limit = 3000
self.time_population_limit = 0.5
self.time_max_interval = _np.timedelta64(12, "W")
self.data = None
self.region = None
@property
def region(self):
"""The :class:`data.RectangularRegion` which contains the data; used
by the output to generate grids etc. If set to `None` then will
automatically be the bounding-box of the input data.
"""
if self._region is None:
self.region = None
return self._region
@region.setter
def region(self, value):
if value is None and self.data is not None:
value = self.data.bounding_box
self._region = value
@property
def geographic_population_limit(self):
"""No space disc can contain more than this fraction of the total
number of events.
"""
return self._geo_pop_limit
@geographic_population_limit.setter
def geographic_population_limit(self, value):
if value < 0 or value > 1:
raise ValueError("Should be fraction of total population, so value between 0 and 1")
self._geo_pop_limit = value
@property
def geographic_radius_limit(self):
"""The maximum radius of the space discs."""
return self._geo_max_radius
@geographic_radius_limit.setter
def geographic_radius_limit(self, value):
self._geo_max_radius = value
@property
def time_population_limit(self):
"""No time interval can contain more than this fraction of the total
number of events.start_times
"""
return self._time_pop_limit
@time_population_limit.setter
def time_population_limit(self, value):
if value < 0 or value > 1:
raise ValueError("Should be fraction of total population, so value between 0 and 1")
self._time_pop_limit = value
@property
def time_max_interval(self):
"""The maximum length of a time interval."""
return self._time_max_len
@time_max_interval.setter
def time_max_interval(self, value):
self._time_max_len = _np.timedelta64(value)
def _copy_settings(self, other):
other.geographic_population_limit = self.geographic_population_limit
other.geographic_radius_limit = self.geographic_radius_limit
other.time_population_limit = self.time_population_limit
other.time_max_interval = self.time_max_interval
def bin_timestamps(self, offset, bin_length):
"""Returns a new instance with the underlying timestamped data
adjusted. Any timestamp between `offset` and `offset + bin_length`
is mapped to `offset`; timestamps between `offset + bin_length`
and `offset + 2 * bin_length` are mapped to `offset + bin_length`,
and so forth.
:param offset: A datetime-like object which is the start of the
binning.
:param bin_length: A timedelta-like object which is the length of
each bin.
"""
new = self.clone()
new.data = bin_timestamps(self.data, offset, bin_length)
return new
def grid_coords(self, region, grid_size):
"""Returns a new instance with the underlying coordinate data
adjusted to always be the centre point of grid cells.
:param region: A `data.RectangularRegion` instance giving the
region to grid to. Only the x,y offset is used.
:param grid_size: The width and height of each grid cell.
"""
new = self.clone()
new.data = grid_timed_points(self.data, region, grid_size)
return new
@staticmethod
def _statistic(actual, expected, total):
"""Calculate the log likelihood"""
stat = actual * (_np.log(actual) - _np.log(expected))
stat += (total - actual) * (_np.log(total - actual) - _np.log(total - expected))
return stat
def maximise_clusters(self, clusters, time=None):
"""The prediction method will return the smallest clusters (subject
to each cluster being centred on the coordinates of an event). This
method will enlarge each cluster to the maxmimum radius it can be
without including further events.
:param clusters: List-like object of :class:`Cluster` instances.
:param time: Only data up to and including this time is used when
computing clusters. If `None` then use the last timestamp of the
data.
:return: Array of clusters with larger radii.
"""
events, time = self._events_time(time)
out = []
for disc in clusters:
distances = _np.sum((events.coords - disc.centre[:,None])**2, axis=0)
rr = disc.radius ** 2
new_radius = _np.sqrt(min( dd for dd in distances if dd > rr ))
out.append(Cluster(disc.centre, new_radius))
return out
def to_satscan(self, filename):
"""Writes the training data to two SaTScan compatible files. Does
*not* currently write settings, so these will need to be entered
manually.
:param filename: Saves files "filename.geo" and "filename.cas"
containing the geometry and "cases" repsectively.
"""
def timeformatter(t):
t = _np.datetime64(t, "s")
return str(t)
unique_coords = list(set( (x,y) for x,y in self.data.coords.T ))
with open(filename + ".geo", "w") as geofile:
for i, (x,y) in enumerate(unique_coords):
print("{}\t{}\t{}".format(i+1, x, y), file=geofile)
unique_times = list(set( t for t in self.data.timestamps ))
with open(filename + ".cas", "w") as casefile:
for i, (t) in enumerate(unique_times):
pts = self.data.coords.T[self.data.timestamps == t]
pts = [ (x,y) for x,y in pts ]
import collections
c = collections.Counter(pts)
for pt in c:
index = unique_coords.index(pt)
print("{}\t{}\t{}".format(index+1, c[pt], timeformatter(t)), file=casefile)
def _events_time(self, time=None):
"""If time is `None` set to last event in data. Return data clamped to
time range, and timestamp actually used."""
events = self.data.events_before(time)
if time is None:
time = self.data.timestamps[-1]
return events, time
from . import stscan2 as _stscan2
class STSTrainer(_STSTrainerBase):
"""From past events, produce an instance of :class:`STSResult` which
stores details of the found clusters. Contains a variety of properties
which may | |
[1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
infile.close()
return bets
def func_0cd51dc6e0fb40db809beb7adf5199c7():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
infile.close()
return cc
def func_1c4867a25f1941bcb1dd9a4f74f291c3():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
infile.close()
return placed
def func_46339166d908453395964138572ec4cc():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
infile.close()
return seen
def func_e842db7e952e42e3b17d188db816457f():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
infile.close()
return can_replicate
def func_f6e9c2713ce048afbfc79d1be10a8955():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < | |
from __future__ import absolute_import, division, print_function
import io
from unittest import TestCase
import numpy as np
from math import log
import Bio.PDB
from Bio import AlignIO
from Bio.Seq import Seq
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
from Bio.PDB.PDBExceptions import PDBConstructionWarning
from biostructmap import biostructmap, seqtools, gentests, pdbtools
from biostructmap.seqtools import (_sliding_window, _sliding_window_var_sites,
_construct_sub_align, check_for_uncertain_bases)
from biostructmap.gentests import _tajimas_d
from biostructmap.pdbtools import _euclidean_distance_matrix
from biostructmap.map_functions import _tajimas_d
import warnings
warnings.filterwarnings("ignore", category=PDBConstructionWarning)
STANDARD_AA_3_LETTERS = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY',
'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER',
'THR', 'TRP', 'TYR', 'VAL']
class TestPdbtools(TestCase):
def setUp(self):
self.test_pdb_file = './tests/pdb/1zrl.pdb'
parser = Bio.PDB.PDBParser()
pdbname = self.test_pdb_file
#Get Bio.PDB structure
self.test_structure = parser.get_structure(pdbname, self.test_pdb_file)
self.test_model = self.test_structure[0]
self.test_chain = self.test_model['A']
def test_euclidean_matrix_calculation(self):
mat = _euclidean_distance_matrix(self.test_model, selector="all")
length_to_match = 4908
self.assertEqual(len(mat[0]), length_to_match)
self.assertEqual(len(mat[1]), length_to_match)
#Check that diagonal is all 0
check_diagonal = (mat[0].diagonal() == np.zeros(length_to_match)).all()
self.assertTrue(check_diagonal)
def test_euclidean_matrix_calculation_with_CA(self):
mat = _euclidean_distance_matrix(self.test_model, selector="CA")
length_to_match = 583
self.assertEqual(len(mat[0]), length_to_match)
self.assertEqual(len(mat[1]), length_to_match)
#Check that diagonal is all 0
check_diagonal = (mat[0].diagonal() == np.zeros(length_to_match)).all()
self.assertTrue(check_diagonal)
def test_euclidean_matrix_calculation_with_CB(self):
mat = _euclidean_distance_matrix(self.test_model, selector="CB")
length_to_match = 583
self.assertEqual(len(mat[0]), length_to_match)
self.assertEqual(len(mat[1]), length_to_match)
#Check that diagonal is all 0
check_diagonal = (mat[0].diagonal() == np.zeros(length_to_match)).all()
self.assertTrue(check_diagonal)
def test_nearby_residues_function_with_CA(self):
radius = 15
selector = 'CA'
test_residue = ('A',(' ', 57, ' '))
test_residue_to_match = [48, 49, 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 98, 99, 101, 102, 103, 104, 116, 117,
118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 136,
137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 151]
residues_to_match = {('A',(' ', x, ' ')) for x in test_residue_to_match}
nearby = pdbtools.nearby(self.test_model, radius, selector)
result = nearby[test_residue]
self.assertEqual(result, residues_to_match)
def test_nearby_residues_function_with_all_atoms(self):
radius = 15
selector = 'all'
test_residue = ('A',(' ', 57, ' '))
test_residue_to_match = [8, 11, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
69, 98, 99, 100, 101, 102, 103, 104, 105, 115, 116, 117, 118,
119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
145, 146, 147, 148, 149, 150, 151, 152, 154]
residues_to_match = {('A',(' ', x, ' ')) for x in test_residue_to_match}
nearby = pdbtools.nearby(self.test_model, radius, selector)
result = nearby[test_residue]
self.assertEqual(result, residues_to_match)
def test_get_pdb_sequence(self):
filename = './tests/pdb/1zrl.pdb'
sequence = pdbtools.get_pdb_seq(filename)
to_match = {'A': (
'GRQTSSNNEVLSNCREKRKGMKWDCKKKNDRSNYVCIPDRRIQLCIVNLAII'
'KTYTKETMKDHFIEASKKESQLLLKKNDNKYNSKFCNDLKNSFLDYGHLAMGN'
'DMDFGGYSTKAENKIQEVFKGAHGEISEHKIKNFRKKWWNEFREKLWEAMLSE'
'HKNNINNCKNIPQEELQITQWIKEWHGEFLL'
'ERDNRAKLPKSKCKNNALYEACEKECIDPCMKYRDWIIRSKFEWHTLSKEYETQKVPKENAEN'
'YLIKISENKNDAKVSLLLNNCDAEYSKYCDCKHTTTLVKSVLNGNDNTIKEKREHIDLDDFSK'
'FGCDKNSVDTNTKVWECKKPYKLSTKDVCVPPRRQELCLGNIDRIYDKNLLMIKEHILAIAIY'
'ESRILKRKYKNKDDKEVCKIINKTFADIRDIIGGTDYWNDLSNRKLVGKINTNSNYVHRNKQN'
'DKLFRDEWWKVIKKDVWNVISWVFKDKTVCKEDDIENIPQFFRWFSEWGDDYCQDKTKMIETL'
'KVECKEKPCEDDNCKRKCNSYKEWISKKKEEYNKQAKQYQEYQKGNNYKMYSEFKSIKPEVYL'
'KKYSEKCSNLNFEDEFKEELHSDYKNKCTMCPEVK')
}
self.assertEqual(sequence, to_match)
with self.assertRaises(IOError):
pdbtools.get_pdb_seq('not_a_file')
def test_get_mmcif_seq(self):
filename = './tests/pdb/4nuv.cif'
mmcif_dict = MMCIF2Dict(filename)
sequence = pdbtools.get_mmcif_canonical_seq(mmcif_dict)
to_match = {'C': 'GPTGTENSSQLDFEDVWNSSYGVNDSFPDGDYGA',
'D': 'GPTGTENSSQLDFEDVWNSSYGVNDSFPDGDYGA',
'A': ('ASNTVMKNCNYKRKRRERDWDCNTKKDVCIPDRRYQLCMKELTNLVNNTDT'
'NFHRDITFRKLYLKRKLIYDAAVEGDLLLKLNNYRYNKDFCKDIRWSLGDF'
'GDIIMGTDMEGIGYSKVVENNLRSIFGTDEKAQQRRKQWWNESKAQIWTAM'
'MYSVKKRLKGNFIWICKLNVAVNIEPQIYRWIREWGRDYVSELPTEVQKLK'
'EKCDGKINYTDKKVCKVPPCQNACKSYDQWITRKKNQWDVLSNKFISVKNA'
'EKVQTAGIVTPYDILKQELDEFNEVAFENEINKRDGAYIELCVCSVEEAKK'
'NTQEVVTNVDN'),
'B': ('ASNTVMKNCNYKRKRRERDWDCNTKKDVCIPDRRYQLCMKELTNLVNNTDT'
'NFHRDITFRKLYLKRKLIYDAAVEGDLLLKLNNYRYNKDFCKDIRWSLGDF'
'GDIIMGTDMEGIGYSKVVENNLRSIFGTDEKAQQRRKQWWNESKAQIWTAM'
'MYSVKKRLKGNFIWICKLNVAVNIEPQIYRWIREWGRDYVSELPTEVQKLK'
'EKCDGKINYTDKKVCKVPPCQNACKSYDQWITRKKNQWDVLSNKFISVKNA'
'EKVQTAGIVTPYDILKQELDEFNEVAFENEINKRDGAYIELCVCSVEEAKK'
'NTQEVVTNVDN')}
self.assertDictEqual(sequence, to_match)
def test_tajimas_d_on_structure(self):
#test_sequence_alignment = AlignIO.read('./tests/msa/msa_test_86-104', 'fasta')
test_sequence_alignment = {('A',): biostructmap.SequenceAlignment(
'./tests/msa/msa_test_86-104', 'fasta')}
test_ref_dict = {('A', (' ', x+86, ' ')): ('A', (x*3+1, x*3 + 2, x*3 + 3)) for
x in range(0, 18)}
test_surrounding_residues = [('A', (' ', x, ' ')) for x in range(86, 104)]
result = _tajimas_d(self.test_structure, test_sequence_alignment,
test_surrounding_residues, test_ref_dict)
self.assertEqual(result, -0.7801229937910628)
def test_tajimas_d_on_structure_with_subset_of_reference_residues(self):
#test_sequence_alignment = AlignIO.read('./tests/msa/msa_test_86-104', 'fasta')
test_sequence_alignment = {('A',): biostructmap.SequenceAlignment(
'./tests/msa/msa_test_86-104', 'fasta')}
test_ref_dict = {('A', (' ', x+86, ' ')): ('A', (x*3 + 1, x*3 + 2, x*3 + 3)) for
x in range(18)}
test_surrounding_residues = [('A', (' ', x, ' ')) for x in range(86, 96)]
result = _tajimas_d(self.test_structure, test_sequence_alignment,
test_surrounding_residues, test_ref_dict)
self.assertEqual(result, -0.709896167879475)
class TestSeqtools(TestCase):
def setUp(self):
self.test_file = './tests/msa/MSA_test.fsa'
self.alignment = AlignIO.read(self.test_file, 'fasta')
self.biostructmap_alignment = biostructmap.SequenceAlignment('./tests/msa/MSA_test.fsa')
self.varsites = seqtools._var_site(self.alignment)
def tearDown(self):
pass
def test_var_site(self):
varsites_keys_to_match = [28, 46, 67, 93, 95, 98, 100]
self.assertEqual(sorted(self.varsites.keys()), varsites_keys_to_match)
def test_join_alignments(self):
msa1 = self.alignment[:, 1:2]
msa2 = self.alignment[:, 2:3]
msa3 = self.alignment[:, 3:6]
alignment_dict = {}
for i, msa in enumerate([msa1, msa2, msa3]):
alignment_dict[i] = msa
joined = seqtools._join_alignments(alignment_dict)
self.assertEqual(joined.format('fasta'),
self.alignment[:, 1:6].format('fasta'))
def test_sliding_window(self):
step = 3
length = 10
#Test sliding window output in AlignIO format
slider = _sliding_window(self.alignment, length, step=step, fasta_out=False)
for i, window in enumerate(slider):
to_equal = self.alignment[:, i*step:i*step+length]
to_equal = to_equal.format('fasta')
window_fasta = window.format('fasta')
self.assertEqual(window_fasta, to_equal)
#Test fasta output
slider = _sliding_window(self.alignment, length, step=step, fasta_out=True)
for i, window in enumerate(slider):
to_equal = self.alignment[:, i*step:i*step+length]
to_equal = to_equal.format('fasta')
self.assertEqual(window, to_equal)
#Test reading from file
slider = _sliding_window(self.test_file, length, step=step, fasta_out=False)
for i, window in enumerate(slider):
to_equal = self.alignment[:, i*step:i*step+length]
to_equal = to_equal.format('fasta')
window_fasta = window.format('fasta')
self.assertEqual(window_fasta, to_equal)
#Test with a different step size
slider = _sliding_window(self.alignment, length, step=1, fasta_out=False)
for i, window in enumerate(slider):
to_equal = self.alignment[:, i:i+length]
to_equal = to_equal.format('fasta')
window_fasta = window.format('fasta')
self.assertEqual(window_fasta, to_equal)
def test_sliding_window_var_sites(self):
step = 3
length = 10
slider = _sliding_window_var_sites(self.alignment, length, step)
varsites_keys_to_match = [28, 46, 67, 93, 95, 98, 100]
null_align = self.alignment[:, 0:0]
print(self.alignment)
for i, window in enumerate(slider):
#Check if key within range
in_range = [x for x in varsites_keys_to_match if (step*i) <= x < (step*i + length)]
print(in_range)
if in_range:
window_i = self.alignment[:, in_range[0]:in_range[0]+1]
if len(in_range) > 1:
for x in in_range[1:]:
window_i = window_i + self.alignment[:, x:x+1]
self.assertEqual(window_i.format('fasta'),
window.format('fasta'))
else:
self.assertEqual(window.format('fasta'),
null_align.format('fasta'))
def test_sliding_window_var_sites_with_file(self):
step = 3
length = 10
slider = _sliding_window_var_sites(self.test_file, length, step)
varsites_keys_to_match = [28, 46, 67, 93, 95, 98, 100]
null_align = self.alignment[:, 0:0]
for i, window in enumerate(slider):
#Check if key within range
in_range = [x for x in varsites_keys_to_match if (step*i) <= x < (step*i + length)]
print(in_range)
if in_range:
window_i = self.alignment[:, in_range[0]:in_range[0]+1]
if len(in_range) > 1:
for x in in_range[1:]:
window_i = window_i + self.alignment[:, x:x+1]
self.assertEqual(window_i.format('fasta'),
window.format('fasta'))
else:
self.assertEqual(window.format('fasta'),
null_align.format('fasta'))
def test_blast_sequences(self):
seq1 = "GSNAKFGLWVDGNCEDIPHVNEFPAID"
seq1_bio = Seq(seq1)
seq2 = "NAKFGLWV"
seq2_bio = Seq(seq2)
test_map_forward, test_map_reverse = seqtools.blast_sequences(seq1, seq2)
forward_match = {3: 1, 4: 2, 5: 3, 6: 4, 7: 5, 8: 6, 9: 7, 10: 8}
reverse_match = {1: 3, 2: 4, 3: 5, 4: 6, 5: 7, 6: 8, 7: 9, 8: 10}
#Check alignment for string input
self.assertEqual(forward_match, test_map_forward)
self.assertEqual(reverse_match, test_map_reverse)
#Check alignment for Bio.Seq input
test_map_forward, test_map_reverse = seqtools.blast_sequences(seq1_bio, seq2_bio)
self.assertEqual(forward_match, test_map_forward)
self.assertEqual(reverse_match, test_map_reverse)
def test_pairwise_align_sequences(self):
seq1 = "GSNAKFGLWVDGNCEDIPHVNEFPAID"
seq1_bio = Seq(seq1)
seq2 = "NAKFGLWV"
seq2_bio = Seq(seq2)
test_map_forward, test_map_reverse = seqtools.pairwise_align(seq1, seq2)
forward_match = {3: 1, 4: 2, 5: 3, 6: 4, 7: 5, 8: 6, 9: 7, 10: 8}
reverse_match = {1: 3, 2: 4, 3: 5, 4: 6, 5: 7, 6: 8, 7: 9, 8: 10}
#Check alignment for string input
self.assertEqual(forward_match, test_map_forward)
self.assertEqual(reverse_match, test_map_reverse)
#Check alignment for Bio.Seq input
test_map_forward, test_map_reverse = seqtools.pairwise_align(seq1_bio, seq2_bio)
self.assertEqual(forward_match, test_map_forward)
self.assertEqual(reverse_match, test_map_reverse)
def test_blast_sequences_with_gaps_in_first_sequence(self):
'''Test with reverse input (ie gaps in first sequence)'''
seq1 = "GSNAKFGLWVDGNCEDIPHVNEFPAID"
seq1_bio = Seq(seq1)
seq2 = "NAKFLWVDG"
seq2_bio = Seq(seq2)
test_map_reverse, test_map_forward = seqtools.blast_sequences(seq2, seq1)
forward_match = {3: 1, 4: 2, 5: 3, 6: 4, 8: 5, 9: 6, 10: 7, 11: 8, 12: 9}
reverse_match = {1: 3, 2: 4, 3: 5, 4: 6, 5: 8, 6: 9, 7: 10, 8: 11, 9: 12}
#Check alignment for string input
self.assertEqual(forward_match, test_map_forward)
self.assertEqual(reverse_match, test_map_reverse)
#Check alignment for Bio.Seq input
test_map_forward, test_map_reverse = seqtools.blast_sequences(seq1_bio, seq2_bio)
self.assertEqual(forward_match, test_map_forward)
self.assertEqual(reverse_match, test_map_reverse)
def test_pairwise_align_sequences_with_gaps_in_first_sequence(self):
'''Test with reverse input (ie gaps in first sequence)'''
seq1 = "GSNAKFGLWVDGNCEDIPHVNEFPAID"
seq1_bio = Seq(seq1)
seq2 = "NAKFLWVDG"
seq2_bio = Seq(seq2)
test_map_reverse, test_map_forward = seqtools.pairwise_align(seq2, seq1)
forward_match = {3: 1, 4: 2, 5: 3, 6: 4, 8: 5, 9: 6, 10: 7, 11: 8, 12: 9}
reverse_match = {1: 3, 2: 4, 3: 5, 4: 6, 5: 8, 6: 9, 7: 10, 8: 11, 9: 12}
#Check alignment for string input
self.assertEqual(forward_match, test_map_forward)
self.assertEqual(reverse_match, test_map_reverse)
#Check alignment for Bio.Seq input
test_map_forward, test_map_reverse = seqtools.pairwise_align(seq1_bio, seq2_bio)
self.assertEqual(forward_match, test_map_forward)
self.assertEqual(reverse_match, test_map_reverse)
def test_protein_dna_alignment(self):
'''Need to account for an intron frameshift when converting from DNA to
protein sequence.
'''
dna = 'ATGAAATGTAATATTAGTATATATTTTTTTATGAAATGTAATATTAGTATATATTTTTTT'
protein = 'MKCNISIYFFMKCNISIYFF'
result = seqtools.align_protein_to_dna(protein, dna)
result_keys_to_match = [i for i in range(1, 21)]
result_codons_to_match = [(i*3-2, i*3-1, i*3) for i in range(1, 21)]
self.assertEqual(result_keys_to_match, sorted(result))
sorted_codons = [result[i] for i in sorted(result)]
self.assertEqual(result_codons_to_match, sorted_codons)
def | |
NextIsSelected = None
locals()['None'] = None
NotAdjacent = None
OnlyOneSection = None
PreviousIsSelected = None
SectionPosition = None
SelectedPosition = None
SortDown = None
SortIndicator = None
SortUp = None
StyleOptionType = None
StyleOptionVersion = None
Type = None
Version = None
__new__ = None
class QGraphicsSceneMoveEvent(QGraphicsSceneEvent):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def newPos(*args, **kwargs):
pass
def oldPos(*args, **kwargs):
pass
def setNewPos(*args, **kwargs):
pass
def setOldPos(*args, **kwargs):
pass
__new__ = None
class QStyledItemDelegate(QAbstractItemDelegate):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def createEditor(*args, **kwargs):
pass
def displayText(*args, **kwargs):
pass
def editorEvent(*args, **kwargs):
pass
def eventFilter(*args, **kwargs):
pass
def initStyleOption(*args, **kwargs):
pass
def itemEditorFactory(*args, **kwargs):
pass
def paint(*args, **kwargs):
pass
def setEditorData(*args, **kwargs):
pass
def setItemEditorFactory(*args, **kwargs):
pass
def setModelData(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def updateEditorGeometry(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QGraphicsSceneHoverEvent(QGraphicsSceneEvent):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def lastPos(*args, **kwargs):
pass
def lastScenePos(*args, **kwargs):
pass
def lastScreenPos(*args, **kwargs):
pass
def modifiers(*args, **kwargs):
pass
def pos(*args, **kwargs):
pass
def scenePos(*args, **kwargs):
pass
def screenPos(*args, **kwargs):
pass
def setLastPos(*args, **kwargs):
pass
def setLastScenePos(*args, **kwargs):
pass
def setLastScreenPos(*args, **kwargs):
pass
def setModifiers(*args, **kwargs):
pass
def setPos(*args, **kwargs):
pass
def setScenePos(*args, **kwargs):
pass
def setScreenPos(*args, **kwargs):
pass
__new__ = None
class QTabBar(QWidget):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addTab(*args, **kwargs):
pass
def autoHide(*args, **kwargs):
pass
def changeCurrentOnDrag(*args, **kwargs):
pass
def changeEvent(*args, **kwargs):
pass
def count(*args, **kwargs):
pass
def currentIndex(*args, **kwargs):
pass
def documentMode(*args, **kwargs):
pass
def drawBase(*args, **kwargs):
pass
def elideMode(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def expanding(*args, **kwargs):
pass
def hideEvent(*args, **kwargs):
pass
def iconSize(*args, **kwargs):
pass
def initStyleOption(*args, **kwargs):
pass
def insertTab(*args, **kwargs):
pass
def isMovable(*args, **kwargs):
pass
def isTabEnabled(*args, **kwargs):
pass
def keyPressEvent(*args, **kwargs):
pass
def minimumSizeHint(*args, **kwargs):
pass
def minimumTabSizeHint(*args, **kwargs):
pass
def mouseMoveEvent(*args, **kwargs):
pass
def mousePressEvent(*args, **kwargs):
pass
def mouseReleaseEvent(*args, **kwargs):
pass
def moveTab(*args, **kwargs):
pass
def paintEvent(*args, **kwargs):
pass
def removeTab(*args, **kwargs):
pass
def resizeEvent(*args, **kwargs):
pass
def selectionBehaviorOnRemove(*args, **kwargs):
pass
def setAutoHide(*args, **kwargs):
pass
def setChangeCurrentOnDrag(*args, **kwargs):
pass
def setCurrentIndex(*args, **kwargs):
pass
def setDocumentMode(*args, **kwargs):
pass
def setDrawBase(*args, **kwargs):
pass
def setElideMode(*args, **kwargs):
pass
def setExpanding(*args, **kwargs):
pass
def setIconSize(*args, **kwargs):
pass
def setMovable(*args, **kwargs):
pass
def setSelectionBehaviorOnRemove(*args, **kwargs):
pass
def setShape(*args, **kwargs):
pass
def setTabButton(*args, **kwargs):
pass
def setTabData(*args, **kwargs):
pass
def setTabEnabled(*args, **kwargs):
pass
def setTabIcon(*args, **kwargs):
pass
def setTabText(*args, **kwargs):
pass
def setTabTextColor(*args, **kwargs):
pass
def setTabToolTip(*args, **kwargs):
pass
def setTabWhatsThis(*args, **kwargs):
pass
def setTabsClosable(*args, **kwargs):
pass
def setUsesScrollButtons(*args, **kwargs):
pass
def shape(*args, **kwargs):
pass
def showEvent(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def tabAt(*args, **kwargs):
pass
def tabButton(*args, **kwargs):
pass
def tabData(*args, **kwargs):
pass
def tabIcon(*args, **kwargs):
pass
def tabInserted(*args, **kwargs):
pass
def tabLayoutChange(*args, **kwargs):
pass
def tabRect(*args, **kwargs):
pass
def tabRemoved(*args, **kwargs):
pass
def tabSizeHint(*args, **kwargs):
pass
def tabText(*args, **kwargs):
pass
def tabTextColor(*args, **kwargs):
pass
def tabToolTip(*args, **kwargs):
pass
def tabWhatsThis(*args, **kwargs):
pass
def tabsClosable(*args, **kwargs):
pass
def timerEvent(*args, **kwargs):
pass
def usesScrollButtons(*args, **kwargs):
pass
def wheelEvent(*args, **kwargs):
pass
ButtonPosition = None
LeftSide = None
RightSide = None
RoundedEast = None
RoundedNorth = None
RoundedSouth = None
RoundedWest = None
SelectLeftTab = None
SelectPreviousTab = None
SelectRightTab = None
SelectionBehavior = None
Shape = None
TriangularEast = None
TriangularNorth = None
TriangularSouth = None
TriangularWest = None
__new__ = None
currentChanged = None
staticMetaObject = None
tabBarClicked = None
tabBarDoubleClicked = None
tabCloseRequested = None
tabMoved = None
class QSplitterHandle(QWidget):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def closestLegalPosition(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def mouseMoveEvent(*args, **kwargs):
pass
def mousePressEvent(*args, **kwargs):
pass
def mouseReleaseEvent(*args, **kwargs):
pass
def moveSplitter(*args, **kwargs):
pass
def opaqueResize(*args, **kwargs):
pass
def orientation(*args, **kwargs):
pass
def paintEvent(*args, **kwargs):
pass
def resizeEvent(*args, **kwargs):
pass
def setOrientation(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def splitter(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QStyleOptionTab(QStyleOption):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
cornerWidgets = None
documentMode = None
features = None
icon = None
iconSize = None
leftButtonSize = None
position = None
rightButtonSize = None
row = None
selectedPosition = None
shape = None
text = None
Beginning = None
CornerWidget = None
CornerWidgets = None
End = None
HasFrame = None
LeftCornerWidget = None
Middle = None
NextIsSelected = None
NoCornerWidgets = None
locals()['None'] = None
NotAdjacent = None
OnlyOneTab = None
PreviousIsSelected = None
RightCornerWidget = None
SelectedPosition = None
StyleOptionType = None
StyleOptionVersion = None
TabFeature = None
TabFeatures = None
TabPosition = None
Type = None
Version = None
__new__ = None
class QAbstractSlider(QWidget):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def changeEvent(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def hasTracking(*args, **kwargs):
pass
def invertedAppearance(*args, **kwargs):
pass
def invertedControls(*args, **kwargs):
pass
def isSliderDown(*args, **kwargs):
pass
def keyPressEvent(*args, **kwargs):
pass
def maximum(*args, **kwargs):
pass
def minimum(*args, **kwargs):
pass
def orientation(*args, **kwargs):
pass
def pageStep(*args, **kwargs):
pass
def repeatAction(*args, **kwargs):
pass
def setInvertedAppearance(*args, **kwargs):
pass
def setInvertedControls(*args, **kwargs):
pass
def setMaximum(*args, **kwargs):
pass
def setMinimum(*args, **kwargs):
pass
def setOrientation(*args, **kwargs):
pass
def setPageStep(*args, **kwargs):
pass
def setRange(*args, **kwargs):
pass
def setRepeatAction(*args, **kwargs):
pass
| |
<reponame>ARte-team/ARte
#!/usr/bin/env python3
# Copyright (C) 2018 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import re
import os
import sys
import subprocess
from scapy.all import Ether, IPv6, UDP, \
IPv6ExtHdrHopByHop, IPv6ExtHdrDestOpt, \
IPv6ExtHdrFragment, IPv6ExtHdrRouting, \
ICMPv6ParamProblem, \
sendp, srp1
from testrunner import run
EXT_HDR_NH = {
IPv6ExtHdrHopByHop: 0,
IPv6ExtHdrRouting: 43,
IPv6ExtHdrFragment: 44,
# IPSec headers currently unsupported by scapy
IPv6ExtHdrDestOpt: 60,
# Mobility header currently unsupported by scapy
}
def pktbuf_empty(child):
child.sendline("pktbuf")
child.expect(r"packet buffer: first byte: (?P<first_byte>0x[0-9a-fA-F]+), "
r"last byte: 0x[0-9a-fA-F]+ \(size: (?P<size>\d+)\)")
first_byte = child.match.group("first_byte")
size = child.match.group("size")
child.expect(
r"~ unused: {} \(next: (\(nil\)|0), size: {}\) ~".format(
first_byte, size))
def register_protnum(child, protnum):
child.sendline("ip reg %d" % protnum)
child.expect("Registered to protocol number %d" % protnum)
def unregister(child):
child.sendline("ip unreg")
child.expect(r"Unregistered from protocol number \d")
def test_empty_hop_by_hop_opt_wo_register(child, iface, hw_dst, ll_dst, ll_src):
# Try sending an empty hop-by-hop-option header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / UDP(), iface=iface, verbose=0)
pktbuf_empty(child)
def test_empty_hop_by_hop_opt_w_register(child, iface, hw_dst, ll_dst, ll_src):
# Register to hop-by-hop-option header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrHopByHop])
# Try sending an empty hop-by-hop-option header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / UDP() / "\x01\x02", iface=iface, verbose=0)
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = 17 (UDP), len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 11 00 01 04 00 00 00 00")
child.expect(r"~~ SNIP 1 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_duplicate_hop_by_hop_opt(child, iface, hw_dst, ll_dst, ll_src):
# Try sending two empty hop-by-hop-option header
p = srp1(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrHopByHop() / UDP() / "\x03\x04",
iface=iface, timeout=1, verbose=0)
# should return parameter problem message
assert(p is not None)
assert(ICMPv6ParamProblem in p)
assert(p[ICMPv6ParamProblem].code == 1) # unrecognized next header
assert(p[ICMPv6ParamProblem].ptr >= 40) # after IPv6 header
pktbuf_empty(child)
def test_empty_non_first_hop_by_hop_opt(child, iface, hw_dst, ll_dst, ll_src):
# Try sending empty hop-by-hop-option header after destination option
# header
p = srp1(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrDestOpt() / IPv6ExtHdrHopByHop() / UDP() / "\x05\x06",
iface=iface, timeout=1, verbose=0)
# should return parameter problem message
assert(p is not None)
assert(ICMPv6ParamProblem in p)
assert(p[ICMPv6ParamProblem].code == 1) # unrecognized next header
assert(p[ICMPv6ParamProblem].ptr >= 40) # after IPv6 header
pktbuf_empty(child)
def test_empty_duplicate_non_first_hop_by_hop_opt(child, iface, hw_dst, ll_dst,
ll_src):
# Try sending empty hop-by-hop-option header after destination option
# header and another hop-by-hop-option header
p = srp1(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrDestOpt() / IPv6ExtHdrHopByHop() /
UDP() / "\x07\x08",
iface=iface, timeout=1, verbose=0)
# should return parameter problem message
assert(p is not None)
assert(ICMPv6ParamProblem in p)
assert(p[ICMPv6ParamProblem].code == 1) # unrecognized next header
assert(p[ICMPv6ParamProblem].ptr >= 48) # after IPv6 header and HopByHopOpt
pktbuf_empty(child)
def test_empty_routing_header_wo_register(child, iface, hw_dst, ll_dst, ll_src):
# Try sending an empty routing header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrRouting() / UDP(), iface=iface, verbose=0)
pktbuf_empty(child)
def test_empty_routing_header_w_register(child, iface, hw_dst, ll_dst, ll_src):
# Register to routing header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrRouting])
# Try sending an empty routing header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrRouting() / UDP() / "\x01\x02", iface=iface, verbose=0)
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = 17 (UDP), len = 0x00, routing type = 0, segments left = 0
child.expect(r"00000000 11 00 00 00 00 00 00 00")
child.expect(r"~~ SNIP 1 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrRouting]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_fragment_header_wo_register(child, iface, hw_dst, ll_dst, ll_src):
# Try sending an empty fragment header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrFragment() / UDP(), iface=iface, verbose=0)
pktbuf_empty(child)
def test_empty_fragment_header_w_register(child, iface, hw_dst, ll_dst, ll_src):
# Register to fragment header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrFragment])
# Try sending an empty fragment header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrFragment() / UDP() / "\x01\x02", iface=iface, verbose=0)
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = 17 (UDP), reserved = 0x00, fragment offset = 0, res = 0, M = 0
child.expect(r"00000000 11 00 00 00 00 00 00 00")
child.expect(r"~~ SNIP 1 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrFragment]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_dest_opt_wo_register(child, iface, hw_dst, ll_dst, ll_src):
# Try sending an empty Destination-Option header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrDestOpt() / UDP(), iface=iface, verbose=0)
pktbuf_empty(child)
def test_empty_dest_opt_w_register(child, iface, hw_dst, ll_dst, ll_src):
# Register to Destination-Option header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrDestOpt])
# Try sending an empty Destination-Option header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrDestOpt() / UDP() / "\x01\x02", iface=iface, verbose=0)
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = 17 (UDP), len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 11 00 01 04 00 00 00 00")
child.expect(r"~~ SNIP 1 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrDestOpt]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_mixed1_w_hop_opt_registered(child, iface, hw_dst, ll_dst, ll_src):
# Register to hop-by-hop-option header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrHopByHop])
# Try sending a packet with a number of extension headers in recommended
# order: https://tools.ietf.org/html/rfc8200#section-4.1
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrDestOpt() / IPv6ExtHdrRouting() /
IPv6ExtHdrFragment() / IPv6ExtHdrDestOpt() / UDP() / "\x01\x02",
iface=iface, verbose=0)
# Hop-by-hop option with payload
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = IPv6ExtHdrDestOpt, len = 0x00, PadN option (0x01) of length 0x04
# NH = IPv6ExtHdrRouting, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00 "
r"{:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt],
EXT_HDR_NH[IPv6ExtHdrRouting]
))
# NH = IPv6ExtHdrFragment, len = 0x00, routing type = 0, segments left = 0
# NH = IPv6ExtHdrDestOpt, reserved = 0x00, fragment offset = 0, res = 0, M = 0
child.expect(r"00000010 {:02X} 00 00 00 00 00 00 00 "
r"{:02X} 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrFragment],
EXT_HDR_NH[IPv6ExtHdrDestOpt]
))
# NH = 17 (UDP), len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000020 11 00 01 04 00 00 00 00")
# IPv6 header
child.expect(r"~~ SNIP 1 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_mixed1_w_rt_hdr_registered(child, iface, hw_dst, ll_dst, ll_src):
# Register to routing header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrRouting])
# Try sending a packet with a number of extension headers in recommended
# order: https://tools.ietf.org/html/rfc8200#section-4.1
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrDestOpt() / IPv6ExtHdrRouting() /
IPv6ExtHdrFragment() / IPv6ExtHdrDestOpt() / UDP() / "\x01\x02",
iface=iface, verbose=0)
# Routing header with payload
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = IPv6ExtHdrFragment, len = 0x00, routing type = 0, segments left = 0
# NH = IPv6ExtHdrDestOpt, reserved = 0x00, fragment offset = 0, res = 0, M = 0
child.expect(r"00000000 {:02X} 00 00 00 00 00 00 00 "
r"{:02X} 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrFragment],
EXT_HDR_NH[IPv6ExtHdrDestOpt]
))
# NH = 17 (UDP), len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000010 11 00 01 04 00 00 00 00")
# Destination option 1
child.expect(r"~~ SNIP 1 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrRouting, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrRouting]))
# Hop-by-hop option
child.expect(r"~~ SNIP 2 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrDestOpt, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt]))
# IPv6 header
child.expect(r"~~ SNIP 3 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_mixed1_w_frag_hdr_registered(child, iface, hw_dst, ll_dst, ll_src):
# Register to fragment header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrFragment])
# Try sending a packet with a number of extension headers in recommended
# order: https://tools.ietf.org/html/rfc8200#section-4.1
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrDestOpt() / IPv6ExtHdrRouting() /
IPv6ExtHdrFragment() / IPv6ExtHdrDestOpt() / UDP() / "\x01\x02",
iface=iface, verbose=0)
# Routing | |
#!/usr/bin/python
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# loc8tr.py - Locator Traceroute - Traceroute paths to LISP RLOCs
#
# Last update: Wed Sep 26 14:13:59 PDT 2018
#
# Usage: python loc8tr.py [-n] [-d] [-hp <host>:<port>]
# [-up <username>:<password>]
#
# -n:
# Do not do DNS lookups on traceroute hops
# -d:
# Produce more output.
# -hp <host>:<port>:
# Host address and Port number to get to lispers.net restful API. When
# <port> is a negative number that means that http rather than https
# is used.
# -up <username>:<password>:
# Supply username and password for lispers.net map-cache query.
#
# This application is run on an xTR. Typically a ITR or RTR so the map-cache
# can be retreived to find all the active RLOCs being used for each map-cache
# entry. The applicaiton assumes the lispers.net xTR implementation is running
# and retrieves the map-cache using this restful interface:
#
# curl --silent --insecure -u "root:" \
# https://localhost:8080/lisp/api/data/map-cache
#
# The app will do some JSON fetching and build an RLOC data structure to
# start traceroute to each RLOC. It will then put the results in a local
# file system to be viewed later.
#
# This app will create a directory in the current directory named:
#
# loc8ator-<data>-<time>
#
# And creates 3 files:
#
# loc8tr.log - Output from this script in human readable form.
# loc8tr.json - JSON for the RLOC-cache
# loc8tr-mc.json - JSON for the data pulled from the lispers.net xTR
#
# Most output is self-explanatory. You will find that a green "!" means the
# traceroute made it to the RLOC. You will find a red "X", the traceroute
# fell short of finding the RLOC.
#
if 64 - 64: i11iIiiIii
import commands
import json
import sys
import socket
import os
import datetime
import string
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
ooO0oo0oO0 = "root:"
if ( "-up" in sys . argv ) :
ooO0oo0oO0 = sys . argv . index ( "-up" )
ooO0oo0oO0 = sys . argv [ ooO0oo0oO0 + 1 ]
oo00 = ooO0oo0oO0 . split ( ":" )
if ( len ( oo00 ) == 1 ) :
print "Invalid syntax for username:password pair"
exit ( 1 )
if 88 - 88: iII111i . oO0o % ooOoO0o
if 66 - 66: iII111i
if 30 - 30: iIii1I11I1II1 * iIii1I11I1II1 . II111iiii - oO0o
ooO00oOoo = "https"
O0OOo = "localhost:8080"
if ( "-hp" in sys . argv ) :
O0OOo = sys . argv . index ( "-hp" )
O0OOo = sys . argv [ O0OOo + 1 ]
oo00 = O0OOo . split ( ":" )
if ( len ( oo00 ) == 1 ) :
print "Invalid syntax for host:port pair"
exit ( 1 )
if 8 - 8: o0oOOo0O0Ooo * I1ii11iIi11i * iIii1I11I1II1 . IiII / IiII % IiII
i11 = oo00 [ 1 ]
if ( i11 [ 0 ] == "-" ) :
ooO00oOoo = "http"
i11 = i11 [ 1 : : ]
if 41 - 41: I1Ii111 . ooOoO0o * IiII % i11iIiiIii
if ( i11 . isdigit ( ) == False ) :
print "Invalid syntax for port"
exit ( 1 )
if 74 - 74: iII111i * IiII
O0OOo = oo00 [ 0 ] + ":" + i11
if 82 - 82: iIii1I11I1II1 % IiII
oOo0oooo00o = 'curl --silent --insecure -u "{}" {}://{}/lisp/api/data/map-cache' . format ( ooO0oo0oO0 , ooO00oOoo , O0OOo )
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
i11IiIiiIIIII = "traceroute -n -m 15 -q 1 -w 1 {}"
if 22 - 22: Ii1I * O0 / o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
oo0Ooo0 = datetime . datetime . now ( ) . strftime ( "%m-%d-%y-%H-%M-%S" )
I1I11I1I1I = "loc8tr-{}" . format ( oo0Ooo0 )
OooO0OO = "loc8tr.log"
iiiIi = "loc8tr.json"
IiIIIiI1I1 = "loc8tr-mc.json"
if 86 - 86: i11iIiiIii + Ii1I + ooOoO0o * I11i + o0oOOo0O0Ooo
if 61 - 61: OoO0O00 / i11iIiiIii
if 34 - 34: OoooooooOO + iIii1I11I1II1 + i11iIiiIii - I1ii11iIi11i + i11iIiiIii
if 65 - 65: OoOoOO00
if 6 - 6: I1IiiI / Oo0Ooo % Ii1I
if 84 - 84: i11iIiiIii . o0oOOo0O0Ooo
if 100 - 100: Ii1I - Ii1I - I1Ii111
if 20 - 20: OoooooooOO
if 13 - 13: i1IIi - Ii1I % oO0o / iIii1I11I1II1 % iII111i
if 97 - 97: i11iIiiIii
II1i1Ii11Ii11 = { }
iII11i = 0
O0O00o0OOO0 = 1
Ii1iIIIi1ii = 2
o0oo0o0O00OO = 3
o0oO = 4
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
i1iiI11I = ( "-n" in sys . argv )
iiii = ( "-d" in sys . argv )
if 54 - 54: I1ii11iIi11i * OOooOOo
if 13 - 13: IiII + OoOoOO00 - OoooooooOO + I1Ii111 . iII111i + OoO0O00
if 8 - 8: iIii1I11I1II1 . I1IiiI - iIii1I11I1II1 * Ii1I
if 61 - 61: o0oOOo0O0Ooo / OoO0O00 + ooOoO0o * oO0o / oO0o
if 75 - 75: i1IIi / OoooooooOO - O0 / OoOoOO00 . II111iiii - i1IIi
if 71 - 71: OOooOOo + Ii1I * OOooOOo - OoO0O00 * o0oOOo0O0Ooo
if 65 - 65: O0 % I1IiiI . I1ii11iIi11i % iIii1I11I1II1 / OOooOOo % I1Ii111
if 51 - 51: i11iIiiIii . I1IiiI + II111iiii
if 10 - 10: I1ii11iIi11i * ooOoO0o * II111iiii % Ii1I . OOooOOo + I1Ii111
if 19 - 19: OoOoOO00 - I1IiiI . OOooOOo / IiII
if 33 - 33: I1Ii111 / I1ii11iIi11i % I1IiiI + ooOoO0o / OoO0O00
def OOOoO0O0o ( tr ) :
O0o0Ooo = [ ]
for O00 in tr . split ( "\n" ) [ 1 : : ] :
iI1Ii11iII1 = O00 . split ( )
if ( iI1Ii11iII1 [ 1 ] . find ( "Invalid" ) != - 1 ) :
O0o0Ooo . append ( [ "?" , "?" ] )
continue
if 51 - 51: II111iiii * OoO0O00 % o0oOOo0O0Ooo * II111iiii % I1ii11iIi11i / ooOoO0o
O0o0Ooo . append ( [ iI1Ii11iII1 [ 1 ] , iI1Ii11iII1 [ - 2 ] ] )
if 49 - 49: o0oOOo0O0Ooo
return ( O0o0Ooo )
if 35 - 35: OoOoOO00 - OoooooooOO / I1ii11iIi11i % i1IIi
if 78 - 78: I11i
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
def iI1ii1Ii ( ) :
os . system ( "mkdir {}" . format ( I1I11I1I1I ) )
os . system ( "touch {}/" . format ( I1I11I1I1I , OooO0OO ) )
os . system ( "touch {}/" . format ( I1I11I1I1I , iiiIi ) )
os . system ( "touch {}/" . format ( I1I11I1I1I , IiIIIiI1I1 ) )
if 92 - 92: OoOoOO00
if 26 - 26: iII111i . I1Ii111
| |
itemsize = 4
else:
raise NotImplementedError (`bits`)
else:
itemsize = bits/8
# in order to allocate the numpy array, we must count the directories:
# code borrowed from TIFF.iter_images():
depth = 0
while True:
depth += 1
if self.LastDirectory():
break
self.ReadDirectory()
self.SetDirectory(0)
# we proceed assuming all directories have the same properties from above.
layer_size = width * height * itemsize
total_size = layer_size * depth
arr = np.zeros((depth, height, width), typ)
if compression == COMPRESSION_NONE:
ReadStrip = self.ReadRawStrip
else:
ReadStrip = self.ReadEncodedStrip
layer = 0
while True:
pos = 0
elem = None
for strip in range (self.NumberOfStrips()):
if elem is None:
elem = ReadStrip(strip, arr.ctypes.data + layer * layer_size + pos, layer_size)
elif elem:
elem = ReadStrip(strip, arr.ctypes.data + layer * layer_size + pos, min(layer_size - pos, elem))
pos += elem
if self.LastDirectory():
break
self.ReadDirectory()
layer += 1
self.SetDirectory(0)
return arr
class CZ_LSMInfo:
def __init__(self, tiff):
self.tiff = tiff
self.filename = tiff.FileName()
self.offset = tiff.GetField(TIFFTAG_CZ_LSMINFO)
self.extract_info()
def extract_info (self):
if self.offset is None:
return
f = libtiff.TIFFFileno(self.tiff)
fd = os.fdopen(f, 'r')
pos = fd.tell()
self.offset = self.tiff.GetField(TIFFTAG_CZ_LSMINFO)
print os.lseek(f, 0, 1)
print pos
#print libtiff.TIFFSeekProc(self.tiff, 0, 1)
fd.seek(0)
print struct.unpack ('HH', fd.read (4))
print struct.unpack('I',fd.read (4))
print struct.unpack('H',fd.read (2))
fd.seek(self.offset)
d = [('magic_number', 'i4'),
('structure_size', 'i4')]
print pos, np.rec.fromfile(fd, d, 1)
fd.seek(pos)
#print hex (struct.unpack('I', fd.read (4))[0])
#fd.close()
def __str__ (self):
return '%s: %s' % (self.filename, self.offset)
libtiff.TIFFOpen.restype = TIFF
libtiff.TIFFOpen.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
libtiff.TIFFFileName.restype = ctypes.c_char_p
libtiff.TIFFFileName.argtypes = [TIFF]
libtiff.TIFFFileno.restype = ctypes.c_int
libtiff.TIFFFileno.argtypes = [TIFF]
libtiff.TIFFCurrentRow.restype = ctypes.c_uint32
libtiff.TIFFCurrentRow.argtypes = [TIFF]
libtiff.TIFFCurrentStrip.restype = c_tstrip_t
libtiff.TIFFCurrentStrip.argtypes = [TIFF]
libtiff.TIFFCurrentTile.restype = c_ttile_t
libtiff.TIFFCurrentTile.argtypes = [TIFF]
libtiff.TIFFCurrentDirectory.restype = c_tdir_t
libtiff.TIFFCurrentDirectory.argtypes = [TIFF]
libtiff.TIFFLastDirectory.restype = ctypes.c_int
libtiff.TIFFLastDirectory.argtypes = [TIFF]
libtiff.TIFFReadDirectory.restype = ctypes.c_int
libtiff.TIFFReadDirectory.argtypes = [TIFF]
libtiff.TIFFWriteDirectory.restype = ctypes.c_int
libtiff.TIFFWriteDirectory.argtypes = [TIFF]
libtiff.TIFFSetDirectory.restype = ctypes.c_int
libtiff.TIFFSetDirectory.argtypes = [TIFF, c_tdir_t]
libtiff.TIFFFileno.restype = ctypes.c_int
libtiff.TIFFFileno.argtypes = [TIFF]
libtiff.TIFFGetMode.restype = ctypes.c_int
libtiff.TIFFGetMode.argtypes = [TIFF]
libtiff.TIFFIsTiled.restype = ctypes.c_int
libtiff.TIFFIsTiled.argtypes = [TIFF]
libtiff.TIFFIsByteSwapped.restype = ctypes.c_int
libtiff.TIFFIsByteSwapped.argtypes = [TIFF]
libtiff.TIFFIsUpSampled.restype = ctypes.c_int
libtiff.TIFFIsUpSampled.argtypes = [TIFF]
libtiff.TIFFIsMSB2LSB.restype = ctypes.c_int
libtiff.TIFFIsMSB2LSB.argtypes = [TIFF]
libtiff.TIFFGetField.restype = ctypes.c_int
libtiff.TIFFGetField.argtypes = [TIFF, c_ttag_t, ctypes.c_void_p]
libtiff.TIFFSetField.restype = ctypes.c_int
libtiff.TIFFSetField.argtypes = [TIFF, c_ttag_t, ctypes.c_void_p] # last item is reset in TIFF.SetField method
libtiff.TIFFNumberOfStrips.restype = c_tstrip_t
libtiff.TIFFNumberOfStrips.argtypes = [TIFF]
libtiff.TIFFReadRawStrip.restype = c_tsize_t
libtiff.TIFFReadRawStrip.argtypes = [TIFF, c_tstrip_t, c_tdata_t, c_tsize_t]
libtiff.TIFFWriteRawStrip.restype = c_tsize_t
libtiff.TIFFWriteRawStrip.argtypes = [TIFF, c_tstrip_t, c_tdata_t, c_tsize_t]
libtiff.TIFFReadEncodedStrip.restype = c_tsize_t
libtiff.TIFFReadEncodedStrip.argtypes = [TIFF, c_tstrip_t, c_tdata_t, c_tsize_t]
libtiff.TIFFWriteEncodedStrip.restype = c_tsize_t
libtiff.TIFFWriteEncodedStrip.argtypes = [TIFF, c_tstrip_t, c_tdata_t, c_tsize_t]
libtiff.TIFFStripSize.restype = c_tsize_t
libtiff.TIFFStripSize.argtypes = [TIFF]
libtiff.TIFFRawStripSize.restype = c_tsize_t
libtiff.TIFFRawStripSize.argtypes = [TIFF, c_tstrip_t]
# For adding custom tags (must be void pointer otherwise callback seg faults
libtiff.TIFFMergeFieldInfo.restype = ctypes.c_int32
libtiff.TIFFMergeFieldInfo.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint32]
# Tile Support
# TODO:
# TIFFTileRowSize64
# TIFFTileSize64
# TIFFVTileSize
# TIFFVTileSize64
libtiff.TIFFTileRowSize.restype = c_tsize_t
libtiff.TIFFTileRowSize.argtypes = [TIFF]
libtiff.TIFFTileSize.restype = c_tsize_t
libtiff.TIFFTileSize.argtypes = [TIFF]
libtiff.TIFFComputeTile.restype = c_ttile_t
libtiff.TIFFComputeTile.argtypes = [TIFF, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, c_tsample_t]
libtiff.TIFFCheckTile.restype = ctypes.c_int
libtiff.TIFFCheckTile.argtypes = [TIFF, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, c_tsample_t]
libtiff.TIFFNumberOfTiles.restype = c_ttile_t
libtiff.TIFFNumberOfTiles.argtypes = [TIFF]
libtiff.TIFFReadTile.restype = c_tsize_t
libtiff.TIFFReadTile.argtypes = [TIFF, c_tdata_t, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, c_tsample_t]
libtiff.TIFFWriteTile.restype = c_tsize_t
libtiff.TIFFWriteTile.argtypes = [TIFF, c_tdata_t, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, c_tsample_t]
libtiff.TIFFReadEncodedTile.restype = ctypes.c_int
libtiff.TIFFReadEncodedTile.argtypes = [TIFF, ctypes.c_ulong, ctypes.c_char_p, ctypes.c_ulong]
libtiff.TIFFReadRawTile.restype = c_tsize_t
libtiff.TIFFReadRawTile.argtypes = [TIFF, c_ttile_t, c_tdata_t, c_tsize_t]
libtiff.TIFFReadRGBATile.restype = ctypes.c_int
libtiff.TIFFReadRGBATile.argtypes = [TIFF, ctypes.c_uint32, ctypes.c_uint32, ctypes.POINTER(ctypes.c_uint32)]
libtiff.TIFFWriteEncodedTile.restype = c_tsize_t
libtiff.TIFFWriteEncodedTile.argtypes = [TIFF, c_ttile_t, c_tdata_t, c_tsize_t]
libtiff.TIFFWriteRawTile.restype = c_tsize_t
libtiff.TIFFWriteRawTile.argtypes = [TIFF, c_ttile_t, c_tdata_t, c_tsize_t]
libtiff.TIFFDefaultTileSize.restype = None
libtiff.TIFFDefaultTileSize.argtypes = [TIFF, ctypes.c_uint32, ctypes.c_uint32]
libtiff.TIFFClose.restype = None
libtiff.TIFFClose.argtypes = [TIFF]
# Support for TIFF warning and error handlers:
TIFFWarningHandler = ctypes.CFUNCTYPE(None,
ctypes.c_char_p, # Module
ctypes.c_char_p, # Format
ctypes.c_void_p) # va_list
TIFFErrorHandler = ctypes.CFUNCTYPE(None,
ctypes.c_char_p, # Module
ctypes.c_char_p, # Format
ctypes.c_void_p) # va_list
# This has to be at module scope so it is not garbage-collected
_null_warning_handler = TIFFWarningHandler(lambda module, fmt, va_list: None)
_null_error_handler = TIFFErrorHandler(lambda module, fmt, va_list: None)
def suppress_warnings():
libtiff.TIFFSetWarningHandler(_null_warning_handler)
def suppress_errors():
libtiff.TIFFSetErrorHandler(_null_error_handler)
def _test_custom_tags():
def _tag_write():
a = TIFF.open("/tmp/libtiff_test_custom_tags.tif", "w")
a.SetField("ARTIST", "MY NAME")
a.SetField("LibtiffTestByte", 42)
a.SetField("LibtiffTeststr", "FAKE")
a.SetField("LibtiffTestuint16", 42)
a.SetField("LibtiffTestMultiuint32", (1,2,3,4,5,6,7,8,9,10))
a.SetField("XPOSITION", 42.0)
a.SetField("PRIMARYCHROMATICITIES", (1.0, 2, 3, 4, 5, 6))
arr = np.ones((512,512), dtype=np.uint8)
arr[:,:] = 255
a.write_image(arr)
print "Tag Write: SUCCESS"
def _tag_read():
a = TIFF.open("/tmp/libtiff_test_custom_tags.tif", "r")
tmp = a.read_image()
assert tmp.shape==(512,512),"Image read was wrong shape (%r instead of (512,512))" % (tmp.shape,)
tmp = a.GetField("XPOSITION")
assert tmp == 42.0,"XPosition was not read as 42.0"
tmp = a.GetField("ARTIST")
assert tmp=="MY NAME","Artist was not read as 'MY NAME'"
tmp = a.GetField("LibtiffTestByte")
assert tmp==42,"LibtiffTestbyte was not read as 42"
tmp = a.GetField("LibtiffTestuint16")
assert tmp==42,"LibtiffTestuint16 was not read as 42"
tmp = a.GetField("LibtiffTestMultiuint32")
assert tmp==[1,2,3,4,5,6,7,8,9,10],"LibtiffTestMultiuint32 was not read as [1,2,3,4,5,6,7,8,9,10]"
tmp = a.GetField("LibtiffTeststr")
assert tmp=="FAKE","LibtiffTeststr was not read as 'FAKE'"
tmp = a.GetField("PRIMARYCHROMATICITIES")
assert tmp==[1.0,2.0,3.0,4.0,5.0,6.0],"PrimaryChromaticities was not read as [1.0,2.0,3.0,4.0,5.0,6.0]"
print "Tag Read: SUCCESS"
# Define a C structure that says how each tag should be used
test_tags = [
TIFFFieldInfo(40100, 1, 1, TIFFDataType.TIFF_BYTE, FIELD_CUSTOM, True, False, "LibtiffTestByte"),
TIFFFieldInfo(40103, 10, 10, TIFFDataType.TIFF_LONG, FIELD_CUSTOM, True, False, "LibtiffTestMultiuint32"),
TIFFFieldInfo(40102, 1, 1, TIFFDataType.TIFF_SHORT, FIELD_CUSTOM, True, False, "LibtiffTestuint16"),
TIFFFieldInfo(40101, -1, -1, TIFFDataType.TIFF_ASCII, FIELD_CUSTOM, True, False, "LibtiffTeststr")
]
# Add tags to the libtiff library
test_extender = add_tags(test_tags) # Keep pointer to extender object, no gc
_tag_write()
_tag_read()
def _test_tile_write():
a = TIFF.open("/tmp/libtiff_test_tile_write.tiff", "w")
# 1D Arrays (doesn't make much sense to tile)
assert a.SetField("ImageWidth", 3000)==1,"could not set ImageWidth tag" #1D,2D,3D
assert a.SetField("ImageLength", 1)==1,"could not set ImageLength tag" #1D
assert a.SetField("ImageDepth", 1)==1,"could not set ImageDepth tag" #1D,2D
# Must be multiples of 16
assert a.SetField("TileWidth", 512)==1,"could not set TileWidth tag"
assert a.SetField("TileLength", 528)==1,"could not set TileLength tag"
assert a.SetField("BitsPerSample", 8)==1,"could not set BitsPerSample tag"
assert a.SetField("Compression", COMPRESSION_NONE)==1,"could not set Compression tag"
data_array = np.array(range(500)*6).astype(np.uint8)
assert a.write_tiles(data_array)==(512*528)*6,"could not write tile images" #1D
a.WriteDirectory()
print "Tile Write: Wrote array of shape %r" % (data_array.shape,)
# 2D Arrays
assert a.SetField("ImageWidth", 3000)==1,"could not set ImageWidth tag" #1D,2D,3D
assert a.SetField("ImageLength", 2500)==1,"could not set ImageLength tag" #2D,3D
assert a.SetField("ImageDepth", 1)==1,"could not set ImageDepth tag" #1D,2D
# Must be multiples of 16
assert a.SetField("TileWidth", 512)==1,"could not set TileWidth tag"
assert a.SetField("TileLength", 528)==1,"could not set TileLength tag"
assert a.SetField("BitsPerSample", 8)==1,"could not set BitsPerSample tag"
assert a.SetField("Compression", COMPRESSION_NONE)==1,"could not set Compression tag"
data_array = np.tile(range(500), (2500,6)).astype(np.uint8)
assert a.write_tiles(data_array)==(512*528) * 5 * 6,"could not write tile images" #2D
a.WriteDirectory()
print "Tile Write: Wrote array of shape %r" % (data_array.shape,)
# 3D Arrays
assert a.SetField("ImageWidth", 3000)==1,"could not set ImageWidth tag" #1D,2D,3D
assert a.SetField("ImageLength", 2500)==1,"could not set ImageLength tag" #2D,3D
assert a.SetField("ImageDepth", 3)==1,"could not set ImageDepth tag" #3D
assert a.SetField("TileWidth", 512)==1,"could not set TileWidth tag"
assert a.SetField("TileLength", 528)==1,"could not set TileLength tag"
assert a.SetField("BitsPerSample", 8)==1,"could not set BitsPerSample tag"
assert a.SetField("Compression", COMPRESSION_NONE)==1,"could not set Compression tag"
data_array = np.tile(range(500), (3,2500,6)).astype(np.uint8)
assert a.write_tiles(data_array)==(512*528) * 5 * 6 * 3,"could not write tile images" #3D
a.WriteDirectory()
print "Tile Write: Wrote array of shape %r" % (data_array.shape,)
print "Tile Write: SUCCESS"
def _test_tile_read(filename=None):
import sys
if filename is None:
if len(sys.argv) != 2:
print "Run `libtiff.py <filename>` for testing."
return
filename = sys.argv[1]
a = TIFF.open(filename, "r")
# 1D Arrays (doesn't make much sense to tile)
a.SetDirectory(0)
iwidth = tmp = a.GetField("ImageWidth")
assert tmp is not None,"ImageWidth tag must be defined for reading tiles"
ilength = tmp = a.GetField("ImageLength")
assert tmp is not None,"ImageLength tag must be defined for reading tiles"
idepth = tmp = a.GetField("ImageDepth")
assert tmp is not None,"ImageDepth tag must be defined for reading tiles"
tmp = a.GetField("TileWidth")
assert tmp is not None,"TileWidth tag must be defined for reading tiles"
tmp = a.GetField("TileLength")
assert tmp is not None,"TileLength tag must be defined for reading tiles"
tmp = a.GetField("BitsPerSample")
assert tmp is not None,"BitsPerSample tag must be defined for reading tiles"
tmp = a.GetField("Compression")
assert tmp is not None,"Compression tag must be defined for reading tiles"
data_array = a.read_tiles()
print "Tile Read: Read array of shape %r" % (data_array.shape,)
assert data_array.shape==(iwidth,),"tile data read was the wrong shape"
test_array = np.array(range(500)*6).astype(np.uint8).flatten()
assert np.nonzero(data_array.flatten() != test_array)[0].shape[0] == 0,"tile data read was not the same as the expected data"
print "Tile Read: Data is the same as expected from tile write test"
# 2D Arrays (doesn't make much sense to tile)
a.SetDirectory(1)
iwidth = tmp = a.GetField("ImageWidth")
assert tmp is not None,"ImageWidth tag must be defined for reading tiles"
ilength = tmp = a.GetField("ImageLength")
assert tmp is not None,"ImageLength tag must be defined for reading tiles"
idepth = tmp = a.GetField("ImageDepth")
assert tmp is not None,"ImageDepth tag must be defined for reading tiles"
tmp = a.GetField("TileWidth")
assert tmp is not None,"TileWidth tag must be defined for reading | |
the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].interaction[3].code),
force_bytes("delete"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].interaction[3].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].interaction[4].code),
force_bytes("history-instance"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].interaction[4].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].interaction[5].code),
force_bytes("history-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].interaction[5].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].interaction[6].code),
force_bytes("create"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].interaction[6].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].interaction[7].code),
force_bytes("search-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].interaction[7].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].profile),
force_bytes("http://hl7.org/fhir/StructureDefinition/AppointmentResponse"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].referencePolicy[0]),
force_bytes("literal"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].referencePolicy[1]),
force_bytes("logical"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchInclude[0]),
force_bytes("AppointmentResponse.actor"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchInclude[1]),
force_bytes("AppointmentResponse.practitioner"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchInclude[2]),
force_bytes("AppointmentResponse.patient"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchInclude[3]),
force_bytes("AppointmentResponse.appointment"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchInclude[4]),
force_bytes("AppointmentResponse.location"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[0].definition),
force_bytes(
"http://hl7.org/fhir/SearchParameter/AppointmentResponse-actor"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[0].documentation),
force_bytes(
"The Person, Location/HealthcareService or Device that this appointment response replies for"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[0].name),
force_bytes("actor"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[0].type),
force_bytes("reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[1].definition),
force_bytes(
"http://hl7.org/fhir/SearchParameter/AppointmentResponse-identifier"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[1].documentation),
force_bytes("An Identifier in this appointment response"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[1].name),
force_bytes("identifier"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[1].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[2].definition),
force_bytes(
"http://hl7.org/fhir/SearchParameter/AppointmentResponse-practitioner"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[2].documentation),
force_bytes("This Response is for this Practitioner"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[2].name),
force_bytes("practitioner"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[2].type),
force_bytes("reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[3].definition),
force_bytes(
"http://hl7.org/fhir/SearchParameter/AppointmentResponse-part-status"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[3].documentation),
force_bytes("The participants acceptance status for this appointment"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[3].name),
force_bytes("part-status"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[3].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[4].definition),
force_bytes(
"http://hl7.org/fhir/SearchParameter/AppointmentResponse-patient"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[4].documentation),
force_bytes("This Response is for this Patient"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[4].name),
force_bytes("patient"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[4].type),
force_bytes("reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[5].definition),
force_bytes(
"http://hl7.org/fhir/SearchParameter/AppointmentResponse-appointment"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[5].documentation),
force_bytes("The appointment that the response is attached to"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[5].name),
force_bytes("appointment"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[5].type),
force_bytes("reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[6].definition),
force_bytes(
"http://hl7.org/fhir/SearchParameter/AppointmentResponse-location"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[6].documentation),
force_bytes("This Response is for this Location"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[6].name),
force_bytes("location"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchParam[6].type),
force_bytes("reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].searchRevInclude[0]),
force_bytes("ImagingStudy.basedon"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[5].type),
force_bytes("AppointmentResponse"),
)
self.assertTrue(inst.rest[0].resource[6].conditionalCreate)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].conditionalDelete),
force_bytes("multiple"),
)
self.assertTrue(inst.rest[0].resource[6].conditionalUpdate)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[0].code),
force_bytes("read"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[0].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[1].code),
force_bytes("vread"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[1].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[2].code),
force_bytes("update"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[2].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[3].code),
force_bytes("delete"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[3].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[4].code),
force_bytes("history-instance"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[4].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[5].code),
force_bytes("history-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[5].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[6].code),
force_bytes("create"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[6].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[7].code),
force_bytes("search-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].interaction[7].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].profile),
force_bytes("http://hl7.org/fhir/StructureDefinition/AuditEvent"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].referencePolicy[0]),
force_bytes("literal"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].referencePolicy[1]),
force_bytes("logical"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchInclude[0]),
force_bytes("AuditEvent.agent"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchInclude[1]),
force_bytes("AuditEvent.source"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchInclude[2]),
force_bytes("AuditEvent.patient"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchInclude[3]),
force_bytes("AuditEvent.entity"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[0].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/AuditEvent-date"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[0].documentation),
force_bytes("Time when the event was recorded"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[0].name),
force_bytes("date"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[0].type),
force_bytes("date"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[1].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/AuditEvent-entity-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[1].documentation),
force_bytes("Type of entity involved"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[1].name),
force_bytes("entity-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[1].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[2].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/AuditEvent-agent"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[2].documentation),
force_bytes("Identifier of who"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[2].name),
force_bytes("agent"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[2].type),
force_bytes("reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[3].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/AuditEvent-address"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[3].documentation),
force_bytes("Identifier for the network access point of the user device"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[3].name),
force_bytes("address"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[3].type),
force_bytes("string"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[4].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/AuditEvent-entity-role"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[4].documentation),
force_bytes("What role the entity played"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[4].name),
force_bytes("entity-role"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[4].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[5].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/AuditEvent-source"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[5].documentation),
force_bytes("The identity of source detecting the event"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[5].name),
force_bytes("source"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[5].type),
force_bytes("reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[6].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/AuditEvent-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[6].documentation),
force_bytes("Type/identifier of event"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[6].name),
force_bytes("type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[6].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[7].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/AuditEvent-altid"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[7].documentation),
force_bytes("Alternative User identity"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[7].name),
force_bytes("altid"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[7].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[8].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/AuditEvent-site"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[8].documentation),
force_bytes("Logical source location within the enterprise"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[8].name),
force_bytes("site"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[8].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[9].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/AuditEvent-agent-name"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[9].documentation),
force_bytes("Human friendly name for the agent"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[9].name),
force_bytes("agent-name"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].searchParam[9].type),
force_bytes("string"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[6].type), force_bytes("AuditEvent")
)
self.assertTrue(inst.rest[0].resource[7].conditionalCreate)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].conditionalDelete),
force_bytes("multiple"),
)
self.assertTrue(inst.rest[0].resource[7].conditionalUpdate)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[0].code),
force_bytes("read"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[0].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[1].code),
force_bytes("vread"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[1].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[2].code),
force_bytes("update"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[2].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[3].code),
force_bytes("delete"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[3].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[4].code),
force_bytes("history-instance"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[4].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[5].code),
force_bytes("history-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[5].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[6].code),
force_bytes("create"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[6].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[7].code),
force_bytes("search-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].interaction[7].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].profile),
force_bytes("http://hl7.org/fhir/StructureDefinition/Basic"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].referencePolicy[0]),
force_bytes("literal"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].referencePolicy[1]),
force_bytes("logical"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchInclude[0]),
force_bytes("Basic.subject"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchInclude[1]),
force_bytes("Basic.patient"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchInclude[2]),
force_bytes("Basic.author"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[0].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/Basic-identifier"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[0].documentation),
force_bytes("Business identifier"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[0].name),
force_bytes("identifier"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[0].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[1].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/Basic-code"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[1].documentation),
force_bytes("Kind of Resource"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[1].name),
force_bytes("code"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[1].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[2].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/Basic-subject"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[2].documentation),
force_bytes("Identifies the focus of this resource"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[2].name),
force_bytes("subject"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[2].type),
force_bytes("reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[3].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/Basic-created"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[3].documentation),
force_bytes("When created"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[3].name),
force_bytes("created"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[3].type),
force_bytes("date"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[4].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/Basic-patient"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[4].documentation),
force_bytes("Identifies the focus of this resource"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[4].name),
force_bytes("patient"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[4].type),
force_bytes("reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[5].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/Basic-author"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[5].documentation),
force_bytes("Who created"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[5].name),
force_bytes("author"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].searchParam[5].type),
force_bytes("reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[7].type), force_bytes("Basic")
)
self.assertTrue(inst.rest[0].resource[8].conditionalCreate)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].conditionalDelete),
force_bytes("multiple"),
)
self.assertTrue(inst.rest[0].resource[8].conditionalUpdate)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[0].code),
force_bytes("read"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[0].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[1].code),
force_bytes("vread"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[1].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[2].code),
force_bytes("update"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[2].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[3].code),
force_bytes("delete"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[3].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[4].code),
force_bytes("history-instance"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[4].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[5].code),
force_bytes("history-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[5].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[6].code),
force_bytes("create"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[6].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[7].code),
force_bytes("search-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].interaction[7].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].profile),
force_bytes("http://hl7.org/fhir/StructureDefinition/Binary"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].referencePolicy[0]),
force_bytes("literal"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].referencePolicy[1]),
force_bytes("logical"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[8].type), force_bytes("Binary")
)
self.assertTrue(inst.rest[0].resource[9].conditionalCreate)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].conditionalDelete),
force_bytes("multiple"),
)
self.assertTrue(inst.rest[0].resource[9].conditionalUpdate)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[0].code),
force_bytes("read"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[0].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[1].code),
force_bytes("vread"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[1].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[2].code),
force_bytes("update"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[2].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[3].code),
force_bytes("delete"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[3].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[4].code),
force_bytes("history-instance"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[4].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[5].code),
force_bytes("history-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[5].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[6].code),
force_bytes("create"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[9].interaction[6].documentation),
force_bytes(
"Implemented per the specification (or Insert other doco here)"
| |
layers_dict["decoder_deconv_1"] = decoder_deconv_1
layers_dict["decoder_deconv_2"] = decoder_deconv_2
layers_dict["decoder_deconv_3_upsamp"] = decoder_deconv_3_upsamp
layers_dict["decoder_mean_squash"] = decoder_mean_squash
self._layers_dict = layers_dict
self.encoder = None
self.decoder = None
# entire model
self.vae = models.Model(x, y)
super().create_model()
def train_model(self) -> None:
super().train_model()
# Build encoder and decoder from saved layers
layers_dict = self._layers_dict
encoder = models.Model(layers["x"], layers["z_mean"])
latent_dim = self.get_latent_dim()
decoder_input = layers.Input(shape=(latent_dim,))
_hid_decoded = layers_dict["decoder_hid"](decoder_input)
_up_decoded = layers_dict["decoder_upsample"](_hid_decoded)
_reshape_decoded = layers_dict["decoder_reshape"](_up_decoded)
_deconv_1_decoded = layers_dict["decoder_deconv_1"](_reshape_decoded)
_deconv_2_decoded = layers_dict["decoder_deconv_2"](_deconv_1_decoded)
_x_decoded_relu = layers_dict["decoder_deconv_3_upsamp"](_deconv_2_decoded)
_x_decoded_mean_squash = layers_dict["decoder_mean_squash"](_x_decoded_relu)
decoder = models.Model(decoder_input, _x_decoded_mean_squash)
self.encoder = encoder
self.decoder = decoder
class DFCPictureVAE(VAEContainer):
def __init__(self, model_config: str, config: dict, verbose: int = 1):
self._conditional = False
super().__init__(model_config, config, verbose)
def create_model(self, trained: bool = False) -> None:
self.check_if_data_shape_is_present()
latent_size = self.get_latent_dim()
input_shape = self._data_shape
# loss weighting parameters
alpha = 0.5 # kl-loss
beta = 1 # reconstruction-loss
# encoder params
encoder_filters = [32, 64, 128, 256]
encoder_kernels = (4, 4)
encoder_stride = 2
encoder_batch_norm = True
encoder_num_layers = 4
# decoder params
decoder_num_layers = 4
decoder_filters = [128, 64, 32, 3]
decoder_kernels = (3, 3)
decoder_stride = 1
decoder_batch_norm = [True, True, True, False]
decoder_activations = [True, True, True, False]
decoder_upsampling_size = (2, 2)
# build encoder
encoder_inp = layers.Input(shape=input_shape, name="encoder_Input")
encoder_deep = model_helper.get_deep_conv(
"encoder",
encoder_num_layers,
encoder_inp,
encoder_filters,
encoder_kernels,
encoder_stride,
encoder_batch_norm,
)
encoder_flat = layers.Flatten(name="encoder_Flatten")(encoder_deep)
latent_mu = layers.Dense(latent_size, name="latent_mu")(encoder_flat)
latent_sigma = layers.Dense(latent_size, name="latent_sigma")(encoder_flat)
z = layers.Lambda(model_helper.sampling, output_shape=(latent_size,), name="z")(
[latent_mu, latent_sigma]
)
encoder = models.Model(
encoder_inp, [latent_mu, latent_sigma, z], name="encoder"
)
# build decoder
decoder_inp = layers.Input(z.shape[1:], name="decoder_Input")
decoder_dense = layers.Dense(4096, name="decoder_Dense")(decoder_inp)
decoder_reshape = layers.Reshape((4, 4, 256), name="decoder_Reshape")(
decoder_dense
)
decoder_outp = model_helper.get_deep_conv_with_upsampling(
"decoder",
decoder_num_layers,
decoder_reshape,
decoder_upsampling_size,
decoder_filters,
decoder_kernels,
decoder_stride,
decoder_batch_norm,
decoder_activations,
)
decoder = models.Model(decoder_inp, decoder_outp, name="decoder")
vae_outp = decoder(encoder(encoder_inp)[-1])
vae = models.Model(encoder_inp, vae_outp, name="vae")
selected_vgg_layers = ["block1_conv1", "block1_conv2", "block2_conv1"]
vgg19 = tf.keras.applications.VGG19(include_top=True, weights="imagenet")
"""Perceptual loss for the DFC VAE"""
outputs = [vgg19.get_layer(l).output for l in selected_vgg_layers]
model = models.Model(vgg19.input, outputs)
encoder_inp = tf.image.resize(encoder_inp, [224, 224])
vae_outp = tf.image.resize(vae_outp, [224, 224])
h1_list = model(encoder_inp)
h2_list = model(vae_outp)
reconstruction_loss = 0.0
for h1, h2 in zip(h1_list, h2_list):
h1 = K.batch_flatten(h1)
h2 = K.batch_flatten(h2)
reconstruction_loss = reconstruction_loss + K.sum(
K.square(h1 - h2), axis=-1
)
kl_loss = -0.5 * K.sum(
1 + latent_sigma - K.square(latent_mu) - K.exp(latent_sigma),
axis=-1,
)
vae_loss = beta * reconstruction_loss + alpha * kl_loss
vae.add_loss(vae_loss if self.get_per_example_loss() else K.mean(vae_loss))
self.encoder = encoder
self.decoder = decoder
self.vae = vae
super().create_model()
def train_model(self) -> None:
super().train_model()
class BasicPictureVAE(VAEContainer):
def __init__(self, model_config: str, config: dict, verbose: int = 1):
self._conditional = True
super().__init__(model_config, config, verbose)
def create_model(self, trained: bool = False) -> None:
"""Create VAE model
Args:
trained (bool, optional): Indicates if model was already trained before.
Not used in GAN subclass. Defaults to False.
Raises:
Exception: Raised when data is not present and data shape cannot be determined.
"""
self.check_if_data_shape_is_present()
dim_y = self.data.get_num_unique_labels()
dim_z = self.get_latent_dim()
# Taken from keras VAE Example https://keras.io/examples/generative/vae/
# Altered to conditional VAE
flat_img_size = np.prod(self._data_shape) # [:2])
intermediate_dim = 512
# Seperate inputs for sample and label
input_x = layers.Input(shape=(flat_img_size,), name="Input_x")
input_y = layers.Input(shape=(dim_y,), name="Input_y")
# VAE model = encoder + decoder
# build encoder model
# Concatenate layer instead of single input layer
# inputs = layers.Input(shape=input_shape, name='encoder_input')
inputs = layers.concatenate([input_x, input_y], axis=1)
x = layers.Dense(intermediate_dim, activation="relu")(inputs)
z_mean = layers.Dense(dim_z, name="z_mean")(x)
z_log_var = layers.Dense(dim_z, name="z_log_var")(x)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = layers.Lambda(model_helper.sampling, output_shape=(dim_z,), name="z")(
[z_mean, z_log_var]
)
z_cond = layers.concatenate([z, input_y], axis=1)
# instantiate encoder model
encoder = models.Model(
[input_x, input_y], [z_mean, z_log_var, z_cond], name="encoder"
)
# build decoder model
latent_inputs = layers.Input(shape=(dim_z + dim_y,), name="z_sampling")
x = layers.Dense(intermediate_dim, activation="relu")(latent_inputs)
outputs = layers.Dense(flat_img_size, activation="sigmoid")(x)
# instantiate decoder model
decoder = models.Model(latent_inputs, outputs, name="decoder")
# instantiate VAE model
outputs = decoder(encoder([input_x, input_y])[2])
if log_gradients := self.get_log_gradients():
gradient_file = os.path.join(self.get_model_dir(), "gradient_file")
if log_gradients == "memory_efficient":
cvae = model_helper.MemoryEfficientGradientModel(
[input_x, input_y],
outputs,
name="vae_mlp",
gradient_file=gradient_file,
)
else:
cvae = model_helper.GradientModel(
[input_x, input_y],
outputs,
name="vae_mlp",
gradient_file=gradient_file,
)
else:
cvae = models.Model([input_x, input_y], outputs, name="vae_mlp")
reconstruction_loss = (
losses.binary_crossentropy(input_x, outputs) * flat_img_size
)
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = -0.5 * K.sum(kl_loss, axis=-1)
vae_loss = reconstruction_loss + kl_loss
cvae.add_loss(vae_loss if self.get_per_example_loss() else K.mean(vae_loss))
self.encoder = encoder
self.decoder = decoder
self.vae = cvae
super().create_model()
def train_model(self) -> None:
x_train, _, x_val, y_train, _, y_val = self.data.unravel()
# ? reshape
# TODO How to handle color channel (cifar)?
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_val = x_val.reshape((len(x_val), np.prod(x_val.shape[1:])))
data = {"x_train": x_train, "y_train": y_train, "x_val": x_val, "y_val": y_val}
super().train_model(data)
class PlainPictureVAE(VAEContainer):
def __init__(self, model_config: str, config: dict, verbose: int = 1):
self._conditional = False
super().__init__(model_config, config, verbose)
def create_model(self, trained: bool = False) -> None:
self.check_if_data_shape_is_present()
latent_size = self.get_latent_dim()
input_shape = self._data_shape
# loss weighting parameters
alpha = 1 # kl-loss
beta = 1 # reconstruction-loss
# encoder params
encoder_filters = [32, 64, 128, 256]
encoder_kernels = (4, 4)
encoder_stride = 2
encoder_batch_norm = False # True
encoder_num_layers = 4
# decoder params
decoder_num_layers = 4
decoder_filters = [128, 64, 32, 3]
decoder_kernels = (3, 3)
decoder_stride = 1
decoder_batch_norm = False # [True, True, True, False]
decoder_activations = [True, True, True, False]
decoder_upsampling_size = (2, 2)
# build encoder
encoder_inp = layers.Input(shape=input_shape, name="encoder_Input")
encoder_deep = model_helper.get_deep_conv(
"encoder",
encoder_num_layers,
encoder_inp,
encoder_filters,
encoder_kernels,
encoder_stride,
encoder_batch_norm,
)
encoder_flat = layers.Flatten(name="encoder_Flatten")(encoder_deep)
latent_mu = layers.Dense(latent_size, name="latent_mu")(encoder_flat)
latent_sigma = layers.Dense(latent_size, name="latent_sigma")(encoder_flat)
z = layers.Lambda(model_helper.sampling, output_shape=(latent_size,), name="z")(
[latent_mu, latent_sigma]
)
encoder = models.Model(
encoder_inp, [latent_mu, latent_sigma, z], name="encoder"
)
# build decoder
decoder_inp = layers.Input(z.shape[1:], name="decoder_Input")
decoder_dense = layers.Dense(4096, name="decoder_Dense")(decoder_inp)
decoder_reshape = layers.Reshape((4, 4, 256), name="decoder_Reshape")(
decoder_dense
)
decoder_outp = model_helper.get_deep_conv_with_upsampling(
"decoder",
decoder_num_layers,
decoder_reshape,
decoder_upsampling_size,
decoder_filters,
decoder_kernels,
decoder_stride,
decoder_batch_norm,
decoder_activations,
)
decoder = models.Model(decoder_inp, decoder_outp, name="decoder")
vae_outp = decoder(encoder(encoder_inp)[-1])
if log_gradients := self.get_log_gradients():
gradient_file = os.path.join(self.get_model_dir(), "gradient_file")
if log_gradients == "memory_efficient":
vae = model_helper.MemoryEfficientGradientModel(
encoder_inp,
vae_outp,
name="vae",
gradient_file=gradient_file,
)
else:
vae = model_helper.GradientModel(
encoder_inp,
vae_outp,
name="vae",
gradient_file=gradient_file,
)
else:
vae = models.Model(encoder_inp, vae_outp, name="vae")
flat_input = tf.reshape(encoder_inp, (-1, np.prod(input_shape)))
flat_prediction = tf.reshape(vae_outp, (-1, np.prod(input_shape)))
# reconstruction_loss = losses.mean_squared_logarithmic_error(
# flat_input, flat_prediction
# ) * np.prod(input_shape)
reconstruction_loss = losses.mean_squared_error(
flat_input, flat_prediction
) * np.prod(input_shape)
kl_loss = -0.5 * K.sum(
1 + latent_sigma - K.square(latent_mu) - K.exp(latent_sigma),
axis=-1,
)
vae_loss = beta * reconstruction_loss + alpha * kl_loss
vae.add_loss(vae_loss if self.get_per_example_loss() else K.mean(vae_loss))
self.encoder = encoder
self.decoder = decoder
self.vae = vae
super().create_model()
def train_model(self) -> None:
super().train_model()
class ConditionalPlainPictureVAE(VAEContainer):
def __init__(self, model_config: str, config: dict, verbose: int = 1):
self._conditional = True
super().__init__(model_config, config, verbose)
def create_model(self, trained: bool = False) -> None:
self.check_if_data_shape_is_present()
latent_size = self.get_latent_dim()
input_shape = self._data_shape
dim_y = self.data.get_num_unique_labels()
lower_bound = self.get_lower_bound()
# loss weighting parameters
alpha = 1 # kl-loss
beta = 1 # reconstruction-loss
# encoder params
encoder_filters = [32, 64, 128, 256]
encoder_kernels = (4, 4)
encoder_stride = 2
encoder_batch_norm = False
encoder_num_layers = 4
# decoder params
decoder_num_layers = 4
decoder_filters = [128, 64, 32, 3]
decoder_kernels = (3, 3)
decoder_stride = 1
decoder_batch_norm = False # [True, True, True, False]
decoder_activations = [True, True, True, False]
decoder_upsampling_size = (2, 2)
# build encoder
encoder_inp = layers.Input(shape=input_shape, name="encoder_input")
label_inp = layers.Input(shape=(dim_y,), name="label_input")
encoder_deep = model_helper.get_deep_conv(
"encoder",
encoder_num_layers,
encoder_inp,
encoder_filters,
encoder_kernels,
encoder_stride,
encoder_batch_norm,
)
encoder_flat = layers.Flatten(name="encoder_Flatten")(encoder_deep)
# conc_enc_flat = layers.concatenate([encoder_flat, label_inp], axis=1)
conc_enc_flat = encoder_flat
if (lower_bound is not None) and (lower_bound > 0):
latent_mu = layers.Dense(
latent_size,
name="latent_mu",
activation=model_helper.custom_scaled_tanh,
)(conc_enc_flat)
latent_sigma = layers.Dense(
latent_size,
name="latent_sigma",
activation=model_helper.sigma_bound(lower_bound=lower_bound),
)(conc_enc_flat)
else:
latent_mu = layers.Dense(latent_size, name="latent_mu")(conc_enc_flat)
latent_sigma = layers.Dense(latent_size, name="latent_sigma")(conc_enc_flat)
z = layers.Lambda(model_helper.sampling, output_shape=(latent_size,), name="z")(
[latent_mu, latent_sigma]
)
z = layers.concatenate([z, label_inp], axis=1)
encoder = models.Model(
[encoder_inp, label_inp], [latent_mu, latent_sigma, z], name="encoder"
)
# build decoder
decoder_inp = layers.Input(z.shape[1:], name="decoder_input")
decoder_dense = layers.Dense(4096, name="decoder_Dense")(decoder_inp)
decoder_reshape = layers.Reshape((4, 4, 256), name="decoder_Reshape")(
decoder_dense
)
decoder_outp = model_helper.get_deep_conv_with_upsampling(
"decoder",
decoder_num_layers,
decoder_reshape,
decoder_upsampling_size,
decoder_filters,
decoder_kernels,
decoder_stride,
decoder_batch_norm,
decoder_activations,
)
decoder = models.Model(decoder_inp, decoder_outp, name="decoder")
vae_outp = | |
address, items in blocks:
if address <= start <= endex <= address + len(items):
return items[(start - address):(endex - address)]
else:
raise ValueError('contiguous slice not found')
else:
items = []
for address in range(start, endex, step):
index = locate_at(blocks, address)
if index is None:
raise ValueError('contiguous slice not found')
block = blocks[index]
items.append(block[1][address - block[0]])
items = self.items_join(items)
return items
else:
key = key.__index__()
if key < 0:
key %= self.endex
index = locate_at(blocks, key)
if index is None:
return self.items_type()
else:
address, items = blocks[index]
key -= address
return items[key]
def __setitem__(
self: 'Memory',
key: Union[slice, int],
value: Optional[ItemSequence],
) -> None:
r"""Writes data.
Arguments:
key (slice or int):
Selection range or address.
value (items):
Items to write at the selection address.
If `value` is null, the range is cleared.
Note:
Setting a single item requires `value` to be of :attr:`items_type`
with unitary length.
Note:
This method is not optimized for a :class:`slice` where its `step`
is an :obj:`int` different from 1.
See Also:
:meth:`Memory.write`
:meth:`Memory.clear`
Examples:
+---+---+---+---+---+---+---+---+---+
| 4 | 5 | 6 | 7 | 8 | 9 | 10| 11| 12|
+===+===+===+===+===+===+===+===+===+
| |[A | B | C]| |[x | y | z]| |
+---+---+---+---+---+---+---+---+---+
| |[A]| | | | |[y | z]| |
+---+---+---+---+---+---+---+---+---+
| |[A | B | C]| |[x | y | z]| |
+---+---+---+---+---+---+---+---+---+
| |[A]| |[C]| | | y | z]| |
+---+---+---+---+---+---+---+---+---+
| |[A | 1 | C]| |[2 | y | z]| |
+---+---+---+---+---+---+---+---+---+
>>> memory = Memory(items_type=str)
>>> memory.blocks = [(5, 'ABC'), (9, 'xyz')]
>>> memory[7:10] = None
>>> memory.blocks
[(5, 'AB'), (10, 'yz')]
>>> memory[7] = 'C'
>>> memory[-3] = 'x'
>>> memory.blocks == [(5, 'ABC'), (9, 'xyz')]
True
>>> memory[6:12:3] = None
>>> memory.blocks
[(5, 'A'), (7, 'C'), (10, 'yz')]
>>> memory[6:12:3] = '123'
>>> memory.blocks
[(5, 'A1C'), (9, '2yz')]
~~~
+---+---+---+---+---+---+---+---+---+---+---+---+
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10| 11|
+===+===+===+===+===+===+===+===+===+===+===+===+
| | | | | |[A | B | C]| |[x | y | z]|
+---+---+---+---+---+---+---+---+---+---+---+---+
|[$]| |[A | B | C]| |[x | y | z]| | | |
+---+---+---+---+---+---+---+---+---+---+---+---+
|[$]| |[A | B]|[4 | 5 | 6]|[7 | 8]|[y | z]| |
+---+---+---+---+---+---+---+---+---+---+---+---+
|[$]| |[A | B]|[4 | 5]|[< | >]|[8]|[y | z]| |
+---+---+---+---+---+---+---+---+---+---+---+---+
>>> memory = Memory(items_type=str, automerge=False)
>>> memory.blocks = [(5, 'ABC'), (9, 'xyz')]
>>> memory[0:4] = '$'
>>> memory.blocks
[(0, '$'), (2, 'ABC'), (6, 'xyz')]
>>> memory[4:7] = '45678'
>>> memory.blocks
[(0, '$'), (2, 'AB'), (4, '456'), (7, '78'), (9, 'yz')]
>>> memory[6:8] = '<>'
>>> memory.blocks
[(0, '$'), (2, 'AB'), (4, '45'), (6, '<>'), (8, '8'), (9, 'yz')]
"""
blocks = self.blocks
if isinstance(key, slice):
start, endex, step = key.start, key.stop, key.step
length = self.endex
start, endex, step = straighten_slice(start, endex, step, length)
if value:
if step is None or step == 1:
length = len(value)
if length < endex - start:
blocks = delete(blocks, start + length, endex)
blocks = write(blocks, (start, value))
elif endex - start < length:
split = endex - start
blocks = write(blocks, (start, value[:split]))
blocks = insert(blocks, (endex, value[split:]))
else:
blocks = write(blocks, (start, value))
else:
count = min((endex - start) // step, len(value))
for index in range(count):
items = value[index:(index + 1)]
blocks = write(blocks, (start, items))
start += step
else:
if step is None or step == 1:
blocks = clear(blocks, start, endex)
else:
for address in range(start, endex, step):
blocks = clear(blocks, address, address + 1)
else:
key = key.__index__()
if key < 0:
key %= self.endex
if value:
if len(value) != 1:
raise ValueError('not a single item')
blocks = write(blocks, (key, value))
else:
blocks = clear(blocks, key, key + 1)
if self.automerge:
blocks = merge(blocks, join=self.items_join)
self.blocks = blocks
def __delitem__(
self: 'Memory',
key: Union[slice, int],
) -> None:
r"""Deletes data.
Arguments:
key (slice or int):
Deletion range or address.
Note:
This method is not optimized for a :class:`slice` with its `step`
different from either ``None`` or 1.
See Also:
:meth:`Memory.delete`
Examples:
+---+---+---+---+---+---+---+---+---+---+---+---+
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10| 11|
+===+===+===+===+===+===+===+===+===+===+===+===+
| |[A | B | C | D]| |[$]| |[x | y | z]| |
+---+---+---+---+---+---+---+---+---+---+---+---+
| |[A | B | C | y | z]| | | | | | |
+---+---+---+---+---+---+---+---+---+---+---+---+
>>> memory = Memory(items_type=str)
>>> memory.blocks = [(1, 'ABCD'), (6, '$'), (8, 'xyz')]
>>> del memory[4:9]
>>> memory.blocks
[(1, 'ABCyz')]
~~~
+---+---+---+---+---+---+---+---+---+---+---+---+
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10| 11|
+===+===+===+===+===+===+===+===+===+===+===+===+
| |[A | B | C | D]| |[$]| |[x | y | z]| |
+---+---+---+---+---+---+---+---+---+---+---+---+
| |[A | B | C]|[y | z]| | | | | | |
+---+---+---+---+---+---+---+---+---+---+---+---+
>>> memory = Memory(items_type=str, automerge=False)
>>> memory.blocks = [(1, 'ABCD'), (6, '$'), (8, 'xyz')]
>>> del memory[4:9]
>>> memory.blocks
[(1, 'ABC'), (4, 'yz')]
~~~
+---+---+---+---+---+---+---+---+---+---+---+---+
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10| 11|
+===+===+===+===+===+===+===+===+===+===+===+===+
| |[A | B | C | D]| |[$]| |[x | y | z]| |
+---+---+---+---+---+---+---+---+---+---+---+---+
| |[A | B | C | D]| |[$]| |[x | z]| | |
+---+---+---+---+---+---+---+---+---+---+---+---+
| |[A | B | D]| |[$]| |[x | z]| | | |
+---+---+---+---+---+---+---+---+---+---+---+---+
| |[A | D]| | |[x]| | | | | | |
+---+---+---+---+---+---+---+---+---+---+---+---+
>>> memory = Memory(items_type=str)
>>> memory.blocks = [(1, 'ABCD'), (6, '$'), (8, 'xyz')]
>>> del memory[-2]
>>> memory.blocks
[(1, 'ABCD'), (6, '$'), (8, 'xz')]
>>> del memory[3]
>>> memory.blocks
[(1, 'ABD'), (5, '$'), (7, 'xz')]
>>> del memory[2:10:3]
>>> memory.blocks
[(1, 'AD'), (5, 'x')]
"""
blocks = self.blocks
if isinstance(key, slice):
start, endex, step = key.start, key.stop, key.step
length = self.endex
start, endex, step = straighten_slice(start, endex, step, length)
if step is None or step == 1:
blocks = delete(blocks, start, endex)
else:
for address in reversed(range(start, endex, step)):
blocks = delete(blocks, address, address + 1)
else:
key = key.__index__()
if key < 0:
key %= self.endex
blocks = delete(blocks, key, key + 1)
if self.automerge:
blocks = merge(blocks, join=self.items_join)
self.blocks = blocks
def append(
self: 'Memory',
value: ItemSequence,
) -> None:
r"""Appends some items.
Arguments:
value (items):
Items to append.
Note:
Appending a single item requires `value` to be of
:attr:`items_type` with unitary length.
Examples:
>>> memory = Memory(items_type=str)
>>> memory.append('$')
>>> memory.blocks
[(0, '$')]
~~~
>>> memory = Memory(items_type=list)
>>> memory.append([3])
>>> memory.blocks
[(0, [3])]
"""
blocks = self.blocks
if blocks:
start, items = blocks[-1]
items = items + value
blocks[-1] = (start, items)
else:
blocks = [(0, value)]
self.blocks = blocks
def extend(
self: 'Memory',
items: ItemSequence,
) -> None:
r"""Concatenates items.
Equivalent to ``self += items``.
Arguments:
items (items):
Items to append at the end of the current virtual space.
If instance of :class:`list`, it is interpreted as a sequence
of non-overlapping blocks, sorted by start address.
"""
self.__iadd__(items)
@property
def start(self: 'Memory') -> int:
r"""Inclusive start address.
This property holds the inclusive start address of the virtual space.
By default, it is the current minimum inclusive start address of
:attr:`blocks`.
Returns:
int: The inclusive start address, or 0.
Examples:
>>> Memory().start
0
~~~
+---+---+---+---+---+---+---+---+---+
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+===+===+===+===+===+===+===+===+===+
| |[A | B | C]| |[x | y | z]| |
+---+---+---+---+---+---+---+---+---+
>>> memory = Memory(items_type=str)
>>> memory.blocks = [(1, 'ABC'), (5, 'xyz')]
>>> memory.start
1
"""
blocks = self.blocks
if blocks:
start, _ = blocks[0]
return start
else:
return 0
@property
def endex(self: 'Memory') -> int:
r"""Exclusive end | |
<gh_stars>0
import logging
from typing import List, Tuple
from climsoft_api.api.form_daily2 import schema as form_daily2_schema
from climsoft_api.utils.query import get_count
from fastapi.exceptions import HTTPException
from opencdms.models.climsoft import v4_1_1_core as models
from sqlalchemy.orm.session import Session
logger = logging.getLogger("ClimsoftFormDaily2Service")
logging.basicConfig(level=logging.INFO)
def get_or_404(
db_session: Session,
station_id: str,
element_id: int,
yyyy: int,
mm: int,
hh: int
):
form_daily2 = (
db_session.query(models.FormDaily2)
.filter_by(stationId=station_id)
.filter_by(elementId=element_id)
.filter_by(yyyy=yyyy)
.filter_by(mm=mm)
.filter_by(hh=hh)
.first()
)
if not form_daily2:
raise HTTPException(
status_code=404,
detail=_("FormDaily2 does not exist.")
)
return form_daily2
def create(
db_session: Session,
data: form_daily2_schema.CreateFormDaily2
) -> form_daily2_schema.FormDaily2:
form_daily2 = models.FormDaily2(**data.dict())
db_session.add(form_daily2)
db_session.commit()
return form_daily2_schema.FormDaily2.from_orm(form_daily2)
def get(
db_session: Session,
station_id: str,
element_id: int,
yyyy: int,
mm: int,
hh: int
) -> form_daily2_schema.FormDaily2:
form_daily2 = get_or_404(
db_session,
station_id,
element_id,
yyyy,
mm,
hh
)
return form_daily2_schema.FormDaily2.from_orm(form_daily2)
def query(
db_session: Session,
station_id: str = None,
element_id: int = None,
yyyy: int = None,
mm: int = None,
hh: int = None,
day01: str = None,
day02: str = None,
day03: str = None,
day04: str = None,
day05: str = None,
day06: str = None,
day07: str = None,
day08: str = None,
day09: str = None,
day10: str = None,
day11: str = None,
day12: str = None,
day13: str = None,
day14: str = None,
day15: str = None,
day16: str = None,
day17: str = None,
day18: str = None,
day19: str = None,
day20: str = None,
day21: str = None,
day22: str = None,
day23: str = None,
day24: str = None,
day25: str = None,
day26: str = None,
day27: str = None,
day28: str = None,
day29: str = None,
day30: str = None,
day31: str = None,
flag01: str = None,
flag02: str = None,
flag03: str = None,
flag04: str = None,
flag05: str = None,
flag06: str = None,
flag07: str = None,
flag08: str = None,
flag09: str = None,
flag10: str = None,
flag11: str = None,
flag12: str = None,
flag13: str = None,
flag14: str = None,
flag15: str = None,
flag16: str = None,
flag17: str = None,
flag18: str = None,
flag19: str = None,
flag20: str = None,
flag21: str = None,
flag22: str = None,
flag23: str = None,
flag24: str = None,
flag25: str = None,
flag26: str = None,
flag27: str = None,
flag28: str = None,
flag29: str = None,
flag30: str = None,
flag31: str = None,
period01: str = None,
period02: str = None,
period03: str = None,
period04: str = None,
period05: str = None,
period06: str = None,
period07: str = None,
period08: str = None,
period09: str = None,
period10: str = None,
period11: str = None,
period12: str = None,
period13: str = None,
period14: str = None,
period15: str = None,
period16: str = None,
period17: str = None,
period18: str = None,
period19: str = None,
period20: str = None,
period21: str = None,
period22: str = None,
period23: str = None,
period24: str = None,
period25: str = None,
period26: str = None,
period27: str = None,
period28: str = None,
period29: str = None,
period30: str = None,
period31: str = None,
total: str = None,
signature: str = None,
entry_datetime: str = None,
temperature_units: str = None,
precip_units: str = None,
cloud_height_units: str = None,
vis_units: str = None,
limit: int = 25,
offset: int = 0,
) -> Tuple[int, List[form_daily2_schema.FormDaily2]]:
"""
This function builds a query based on the given parameter and returns
`limit` numbers of `form_daily2s` row skipping
`offset` number of rows
"""
q = db_session.query(models.FormDaily2)
if station_id is not None:
q = q.filter_by(stationId=station_id)
if element_id is not None:
q = q.filter_by(elementId=element_id)
if yyyy is not None:
q = q.filter_by(yyyy=yyyy)
if mm is not None:
q = q.filter_by(mm=mm)
if hh is not None:
q = q.filter_by(hh=hh)
if day01 is not None:
q = q.filter_by(day01=day01)
if day02 is not None:
q = q.filter_by(day02=day02)
if day03 is not None:
q = q.filter_by(day03=day03)
if day04 is not None:
q = q.filter_by(day04=day04)
if day05 is not None:
q = q.filter_by(day05=day05)
if day06 is not None:
q = q.filter_by(day06=day06)
if day07 is not None:
q = q.filter_by(day07=day07)
if day08 is not None:
q = q.filter_by(day08=day08)
if day09 is not None:
q = q.filter_by(day09=day09)
if day10 is not None:
q = q.filter_by(day10=day10)
if day11 is not None:
q = q.filter_by(day11=day11)
if day12 is not None:
q = q.filter_by(day12=day12)
if day13 is not None:
q = q.filter_by(day13=day13)
if day14 is not None:
q = q.filter_by(day14=day14)
if day15 is not None:
q = q.filter_by(day15=day15)
if day16 is not None:
q = q.filter_by(day16=day16)
if day17 is not None:
q = q.filter_by(day17=day17)
if day18 is not None:
q = q.filter_by(day18=day18)
if day19 is not None:
q = q.filter_by(day19=day19)
if day20 is not None:
q = q.filter_by(day20=day20)
if day21 is not None:
q = q.filter_by(day21=day21)
if day22 is not None:
q = q.filter_by(day22=day22)
if day23 is not None:
q = q.filter_by(day23=day23)
if day24 is not None:
q = q.filter_by(day24=day24)
if day25 is not None:
q = q.filter_by(day25=day25)
if day26 is not None:
q = q.filter_by(day26=day26)
if day27 is not None:
q = q.filter_by(day27=day27)
if day28 is not None:
q = q.filter_by(day28=day28)
if day29 is not None:
q = q.filter_by(day29=day29)
if day30 is not None:
q = q.filter_by(day30=day30)
if day31 is not None:
q = q.filter_by(day31=day31)
if flag01 is not None:
q = q.filter_by(flag01=flag01)
if flag02 is not None:
q = q.filter_by(flag02=flag02)
if flag03 is not None:
q = q.filter_by(flag03=flag03)
if flag04 is not None:
q = q.filter_by(flag04=flag04)
if flag05 is not None:
q = q.filter_by(flag05=flag05)
if flag06 is not None:
q = q.filter_by(flag06=flag06)
if flag07 is not None:
q = q.filter_by(flag07=flag07)
if flag08 is not None:
q = q.filter_by(flag08=flag08)
if flag09 is not None:
q = q.filter_by(flag09=flag09)
if flag10 is not None:
q = q.filter_by(flag10=flag10)
if flag11 is not None:
q = q.filter_by(flag11=flag11)
if flag12 is not None:
q = q.filter_by(flag12=flag12)
if flag13 is not None:
q = q.filter_by(flag13=flag13)
if flag14 is not None:
q = q.filter_by(flag14=flag14)
if flag15 is not None:
q = q.filter_by(flag15=flag15)
if flag16 is not None:
q = q.filter_by(flag16=flag16)
if flag17 is not None:
q = q.filter_by(flag17=flag17)
if flag18 is not None:
q = q.filter_by(flag18=flag18)
if flag19 is not None:
q = q.filter_by(flag19=flag19)
if flag20 is not None:
q = q.filter_by(flag20=flag20)
if flag21 is not None:
q = q.filter_by(flag21=flag21)
if flag22 is not None:
q = q.filter_by(flag22=flag22)
if flag23 is not None:
q = q.filter_by(flag23=flag23)
if flag24 is not None:
q = q.filter_by(flag24=flag24)
if flag25 is not None:
q = q.filter_by(flag25=flag25)
if flag26 is not None:
q = q.filter_by(flag26=flag26)
if flag27 is not None:
q = q.filter_by(flag27=flag27)
if flag28 is not None:
q = q.filter_by(flag28=flag28)
if flag29 is not None:
q = q.filter_by(flag29=flag29)
if flag30 is not None:
q = q.filter_by(flag30=flag30)
if flag31 is not None:
q = q.filter_by(flag31=flag31)
if period01 is not None:
q = q.filter_by(period01=period01)
if period02 is not None:
q = q.filter_by(period02=period02)
if period03 is not None:
q = q.filter_by(period03=period03)
if period04 is not None:
q = q.filter_by(period04=period04)
if period05 is not None:
q = q.filter_by(period05=period05)
if period06 is not None:
q = q.filter_by(period06=period06)
if period07 is not None:
q = q.filter_by(period07=period07)
if period08 is not None:
q = q.filter_by(period08=period08)
if period09 is not None:
q = q.filter_by(period09=period09)
if period10 is not None:
q = q.filter_by(period10=period10)
if period11 is not None:
q = q.filter_by(period11=period11)
if period12 is not None:
q = q.filter_by(period12=period12)
if period13 is not None:
q = q.filter_by(period13=period13)
if period14 is not None:
q = q.filter_by(period14=period14)
if period15 is not None:
q = q.filter_by(period15=period15)
if period16 is not None:
q = q.filter_by(period16=period16)
if period17 is not None:
q = q.filter_by(period17=period17)
if period18 is not None:
q = q.filter_by(period18=period18)
if period19 is not None:
q = q.filter_by(period19=period19)
if period20 is not None:
q = q.filter_by(period20=period20)
if period21 is not None:
q = q.filter_by(period21=period21)
if period22 is not None:
| |
secrets.choice([0-randremove_notes, 0, randremove_notes])))
events_matrix.append(rec_event)
min_note = int(min(min_note, rec_event[4]))
max_note = int(max(max_note, rec_event[4]))
ev += 1
itrack +=1 # Going to next track...
#print('Doing some heavy pythonic sorting...Please stand by...')
#print('Removing zero pitches and zero velocities events')
events_matrix1 = [i for i in events_matrix if i[4] > 0 and i[5] > 0] # removing zero pitches and zero velocities events
events_matrix = events_matrix1
events_matrix1 = []
for event in events_matrix:
seen = set()
event1 = [x for x in event if x not in seen and not seen.add(x)]
events_matrix1.append(event1)
events_matrix = []
events_matrix = events_matrix1
#print('Sorting input by start time...')
events_matrix.sort(key=lambda x: x[1]) # Sorting input by start time
#print('Grouping by start time. This will take a while...')
values = set(map(lambda x:x[1], events_matrix)) # Non-multithreaded function version just in case
groups = [[y for y in events_matrix if y[1]==x and len(y) == 6] for x in values] # Grouping notes into chords while discarting bad notes...
chords_list1 = []
#print('Removing single note/excessive events, sorting events by pitch, and creating a chords list...')
for items in groups:
if len(items) >= minimum_number_of_notes_per_chord: # Removing single note events
items.sort(reverse=True, key=lambda x: x[4]) # Sorting events by pitch
chords_list1.append(items) # Creating final chords list
#print('Removing duplicate pitches from chords and creating a final chords list...')
chords_list = []
chord = []
chord1 = []
chord2 = []
for chord in chords_list1:
seen = set()
chord1 = [x for x in chord if x[4] not in seen and not seen.add(x[4])]
chord2 = [x for x in chord1 if len(x) == 6] # Removing bad note events from chords
chords_list.append(chord2)
chords_list_track = [i for i in chords_list if i != []]
chords_list = []
chords_list.extend(chords_list_track)
#print('Extracting melody...')
melody_list = []
#print('Sorting events...')
for items in groups:
items.sort(reverse=True, key=lambda x: x[4]) # Sorting events by pitch
melody_list.append(items) # Creating final chords list
#print('Removing duplicates if any...')
for item in melody_list:
seen = set()
mel = [x for x in item if x[1] not in seen and not seen.add(x[1])]
melody1.extend(mel)
#print('Removing bad notes if any...')
for item in melody1:
if len(item) == 6:
melody.append(item)
#print('Final sorting by start time...')
melody.sort(reverse=False, key=lambda x: x[1]) # Sorting events by start time
return chords_list, melody
###################################################################################
def Tegridy_Chords_Converter(chords_list, melody_list, song_name, melody_notes_in_chords=True):
'''Tegridy Chords Coverter
Inputs: Tegridy MIDI chords_list (as is)
Tegridy MIDI melody_list (as is)
Name of the song as plain string
Include or exclude melody notes in each chord. Def. is to include.
Outputs: Converted chords_list with melody_notes and song name
Converted melody_list with song name
Project Los Angeles
Tegridy Code 2020'''
temp_chords_list = []
chords_list_final = []
melody_list_final = []
temp_chords_list = [[song_name, 0, 0, 0, 0, 0]]
melody_list_final = [song_name, 0, 0, 0, 0, 0]
debug = False
for notez in melody_list:
if melody_notes_in_chords:
temp_chords_list.append([notez])
melody_list_final.append(notez)
for chord in chords_list:
if notez[1] == chord[0][1]:
temp_chords_list.append(chord[1:])
'''# Gonna use a dic here to join chords by start-time :)
record_dict = defaultdict(list)
for chords in temp_chords_list:
if len(chords) > 0:
record_dict[chords[0][1]].extend(chords)
temp_chords_list = list(record_dict.values())'''
chords_list_final = []
#print('Sorting chords notes by pitch/removing empty chords if any...')
chords_list_final.append(temp_chords_list[0])
for chordz in temp_chords_list[1:]:
if len(chordz) > 0:
if debug: print(chordz)
chordz.sort(reverse=True, key=lambda x: x[4]) # Sorting events by pitch
chords_list_final.append(chordz) # Creating final chords list
chords_list_final[0] = [[song_name + '_with_' + str(len(chords_list_final)-1) + '_Chords', 0, 0, len(chords_list_final)-1, 0, 0]]
melody_list_final[0] = [song_name + '_with_' + str(len(melody_list_final)-1) + '_Notes', 0, 0, len(melody_list_final)-1, 0, 0]
chords_list_final.append([['song_end', chords_list_final[:-1][1], 0, len(chords_list_final)-1, 0, 1]])
melody_list_final.append(['song_end', melody_list_final[:-1][1], 0, len(melody_list_final)-1, 0, 1])
first_song = False
return chords_list_final, melody_list_final
###################################################################################
def Tegridy_MIDI_TXT_Processor(dataset_name,
converted_chords_list,
converted_melody_list,
simulate_velocity=False,
line_by_line_output=False,
represent_every_number_of_chords = 0,
chords_duration_multiplier = 1,
pad_chords_with_stops=False,
chords_beat_divider = 100):
'''Tegridy MIDI to TXT Processor
Input: Dataset name
Tegridy MIDI chords_list and melody_list (as is)
Simulate velocity or not
Line-by-line switch (useful for the AI models tokenizers and other specific purposes)
Represent events every so many steps. Def. is 0. == do not represent.
Chords durations multiplier. Def. = 1
Pad chords with timed rests or not. Helps with some NLP implementations
Chords beat divider/denominator. This essentially creates a beat for AI models to keep in mind. Default is 100 = 10 beats per second.
Output: TXT encoded MIDI events as plain txt/plain str
Number of processed chords
Number of bad/skipped chords (for whatever reason)
Project Los Angeles
Tegridy Code 2020'''
debug = False
song_chords_count = 0
number_of_chords_recorded = 0
number_of_bad_chords_recorded = 0
chord_start_time = 0
first_song = True
rpz = 1
previous_start_time = 0
beat = 0
if dataset_name != '':
TXT_string = str(dataset_name)
else:
TXT_string = ''
if line_by_line_output:
TXT_string += '\n'
else:
TXT_string += ' '
for chord in tqdm.auto.tqdm(converted_chords_list):
try:
if chord[0][3] > 15:
song_dur = int(chord[0][3])
if len(chord) > 1:
durs_chord = int(max(list(zip(*chord))[2]) * chords_duration_multiplier)
chord_duration = durs_chord
else:
chord_duration = int(chord[0][2] * chords_duration_multiplier)
if simulate_velocity:
chord_velocity = chord[0][4]
else:
chord_velocity = chord[0][5]
chord_start_time = chord[0][1]
if chord_duration == 0 and chord_velocity == 0:
if not str(chord[0][0]) == 'song_end':
if not first_song:
TXT_string += 'SONG=END_' + str(song_chords_count) + '_Chords'
if line_by_line_output:
TXT_string += '\n'
else:
TXT_string += ' '
TXT_string += 'SONG=' + str(chord[0][0])
if line_by_line_output:
TXT_string += '\n'
else:
TXT_string += ' '
song_chords_count = 1
else:
TXT_string += 'SONG=' + str(chord[0][0])
if line_by_line_output:
TXT_string += '\n'
else:
TXT_string += ' '
song_chords_count = 1
else:
TXT_string += 'SONG=END_' + str(song_chords_count-1) + '_Chords'
if line_by_line_output:
TXT_string += '\n'
else:
TXT_string += ' '
else:
beat = int((abs(int(chord_start_time - previous_start_time))) / chords_beat_divider)
if pad_chords_with_stops:
if (chord_start_time - previous_start_time - 1) > 0:
TXT_string += str(abs(int(chord_start_time - previous_start_time) - 1)) + '-' + str(0) + '-' + str(0) + '-' +str(0) + '-' + str(beat) + '-' + str(str(0) + '/' + str(0))
if line_by_line_output:
TXT_string += '\n'
else:
TXT_string += ' '
TXT_string += str(abs(int(chord_start_time - previous_start_time))) + '-' + str(chord_duration) + '-' + str(chord[0][3]) + '-' + str(chord_velocity) + '-' + str(beat)
previous_start_time = chord_start_time
for note in chord:
TXT_string += '-' + str(note[4]) + '/' + str(chord_duration - int(note[2] * chords_duration_multiplier))
# Representation of events
if represent_every_number_of_chords > 0:
if rpz == represent_every_number_of_chords:
TXT_string += '#' + str(song_dur)
rpz = 0
if line_by_line_output:
TXT_string += '\n'
else:
TXT_string += ' '
if debug: print(chord)
song_chords_count += 1
number_of_chords_recorded += 1
rpz += 1
except:
if debug: print('Bad chord. Skipping...')
number_of_bad_chords_recorded += 1
continue
return TXT_string, number_of_chords_recorded, number_of_bad_chords_recorded
###################################################################################
def Tegridy_TXT_MIDI_Processor(input_string,
line_by_line_dataset = False,
dataset_MIDI_events_time_denominator = 10,
number_of_ticks_per_quarter = 425,
start_from_this_generated_event = 0,
remove_generated_silence_if_needed = False,
silence_offset_from_start = 75000,
simulate_velocity = False,
output_signature = 'TMIDI-TXT-MIDI',
list_of_MIDI_patches = [0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 0, 0, 0, 0, 0, 0]):
'''Tegridy TXT to MIDI Processor
Input: Input TXT string in the TMIDI-TXT format
Input is line-by-line or one-line
Used dataset time denominator
Number of ticks per quater for output MIDI
Start from this input event (skip this many from start)
Is there a generated silence or not
Silence offset in MIDI ticks from start
Simulate velocity (V = max(Pitch))
Output MIDI signature
List of 16 desired MIDI patch numbers for the output MIDI. Def. is MuseNet compatible patch list.
Output: NOTE: For now only 1st recorded TXT performance converted to MIDI.
Raw/binary MIDI data that can be recorded to a file with standard python functions.
Detected number of input notes
Recorded number of output notes
Detailed created MIDI stats in the MIDI.py module format (MIDI.score2stats)
Project Los Angeles
Tegridy Code 2020'''
debug = False
if line_by_line_dataset:
input_string = input_string.split() # for new datasets
else:
input_string = input_string.split(' ') # | |
Science Mode.
"""
ModeName = ScienceMode[0]
Settings = ScienceMode[3]
###################################################
"Synchronize simulation Timesteps with OHB Data"
Mode_start_date = ephem.Date(
ephem.Date(ScienceMode[1]) + ephem.second * Timestamp_fraction_of_second
)
TimeDifferenceRest = round(
(abs(Mode_start_date - OHB_StartTime) / ephem.second) % Timestep, 0
)
if TimeDifferenceRest == 0:
StartingTimeRelative2StartOfMode = 0
elif Mode_start_date < OHB_StartTime:
Mode_start_date = ephem.Date(
Mode_start_date + ephem.second * TimeDifferenceRest
)
StartingTimeRelative2StartOfMode = TimeDifferenceRest
else:
Mode_start_date = ephem.Date(
Mode_start_date + ephem.second * (Timestep - TimeDifferenceRest)
)
StartingTimeRelative2StartOfMode = Timestep - TimeDifferenceRest
"Simulation length"
Mode_end_date = ephem.Date(ScienceMode[2])
duration = (Mode_end_date - Mode_start_date) * 24 * 3600
"If duration of current Mode is very long -> increase timestep to decrease runtime"
if duration > 12 * 3600:
Timestep = 180
Logger.info(
ScienceMode[0]
+ " has a duration longer than 12 h . Timestep is set to: "
+ str(Timestep)
)
timesteps = int(floor(duration / Timestep)) + 1
else:
timesteps = int(floor(duration / Timestep)) + 1
"Timestep of logging"
log_timestep = 1800
#######################################################
#############################################################
"Determine the science mode, which in turn determines the behaviour of the simulation"
if (
ModeName == "Mode120"
or ModeName == "Mode121"
or ModeName == "Mode122"
or ModeName == "Mode123"
or ModeName == "Mode124"
):
Simulator_Select = "Mode12X"
freeze_start = Settings["freeze_start"]
freeze_duration = Settings["freeze_duration"]
pointing_altitude = Settings["pointing_altitude"]
freeze_flag = 0
if ModeName == "Mode120":
Color = (0, 1, 0)
elif ModeName == "Mode121":
Color = (0, 1, 0.5)
elif ModeName == "Mode122":
Color = (0, 1, 1)
elif ModeName == "Mode123":
Color = (0.5, 0, 0)
elif ModeName == "Mode124":
Color = (0.5, 0, 0.5)
elif ModeName == "Mode1" or ModeName == "Mode2" or ModeName == "Mode5":
Simulator_Select = "ModeX"
pointing_altitude = Timeline_settings["StandardPointingAltitude"]
if ModeName == "Mode1":
Color = (0, 0, 0.5)
elif ModeName == "Mode2":
Color = (0, 0, 1)
elif ModeName == "Mode5":
Color = (0, 0.5, 0)
# elif( ModeName == 'Mode5'):
# Simulator_Select = 'ModeX'
# pointing_altitude = Settings['pointing_altitude']
# Color = (0,0.5,0)
elif (
ModeName == "Mode130"
or ModeName == "Mode131"
or ModeName == "Mode132"
or ModeName == "Mode133"
or ModeName == "Mode134"
):
Simulator_Select = "Mode13X"
pointing_altitude = Settings["pointing_altitude"]
if ModeName == "Mode130":
Color = (0.5, 0, 1)
elif ModeName == "Mode131":
Color = (0.5, 0.5, 0)
elif ModeName == "Mode132":
Color = (0.5, 0.5, 0.5)
elif ModeName == "Mode133":
Color = (0.5, 0.5, 1)
elif ModeName == "Mode134":
Color = (0.5, 1, 0)
elif ModeName == "Mode100":
Simulator_Select = "Mode100"
pointing_altitude = Settings["pointing_altitude_from"]
pointing_altitude_to = Settings["pointing_altitude_to"]
pointing_altitude_interval = Settings["pointing_altitude_interval"]
NumberOfCMDStepsForEachAltitude = 11
pointing_duration = (
Settings["pointing_duration"]
+ Timeline_settings["pointing_stabilization"]
+ NumberOfCMDStepsForEachAltitude * Timeline_settings["CMD_separation"]
+ Timeline_settings["CCDSYNC_Waittime"]
)
timestamp_change_of_pointing_altitude = pointing_duration
Color = (0, 0.5, 0.5)
elif ModeName == "Mode110":
Simulator_Select = "Mode110"
pointing_altitude = Settings["pointing_altitude_from"]
pointing_altitude_to = Settings["pointing_altitude_to"]
sweep_rate = Settings["sweep_rate"]
pointing_stabilization = Timeline_settings["pointing_stabilization"]
CMD_separation = Timeline_settings["CMD_separation"]
Color = (0, 0.5, 0.1)
else:
return Data_MATS, Data_LP, Time
############################################################################
"Pre-allocate space"
lat_MATS = zeros((timesteps, 1))
long_MATS = zeros((timesteps, 1))
alt_MATS = zeros((timesteps, 1))
r_MATS = zeros((timesteps, 3))
r_MATS_unit_vector = zeros((timesteps, 3))
r_MATS_ECEF = zeros((timesteps, 3))
v_MATS = zeros((timesteps, 3))
v_MATS_ECEF = zeros((timesteps, 3))
v_MATS_unit_vector = zeros((timesteps, 3))
normal_orbit = zeros((timesteps, 3))
lat_LP_estimated = zeros((timesteps, 1))
Yaw_function = zeros((timesteps, 1))
optical_axis = zeros((timesteps, 3))
optical_axis_ECEF = zeros((timesteps, 3))
r_LP = zeros((timesteps, 3))
r_LP_ECEF = zeros((timesteps, 3))
r_V_offset_normal = zeros((timesteps, 3))
r_H_offset_normal = zeros((timesteps, 3))
MATS_P = zeros((timesteps, 1))
yaw_offset_angle = zeros((timesteps, 1))
pitch_MATS = zeros((timesteps, 1))
roll_MATS = zeros((timesteps, 1))
Euler_angles = zeros((timesteps, 3))
z_SLOF = zeros((timesteps, 3))
RA_optical_axis = zeros((timesteps, 1))
Dec_optical_axis = zeros((timesteps, 1))
lat_LP = zeros((timesteps, 1))
long_LP = zeros((timesteps, 1))
alt_LP = zeros((timesteps, 1))
normal_orbit = zeros((timesteps, 3))
normal_orbit_ECEF = zeros((timesteps, 3))
current_time = zeros((timesteps, 1))
MATS_skyfield = EarthSatellite(TLE[0], TLE[1])
###################################################################################
"Start of Simulation"
for t in range(timesteps):
t = t
if Simulator_Select == "Mode100":
"Increment the pointing altitude as defined by Mode100"
if (
t * Timestep + StartingTimeRelative2StartOfMode
>= timestamp_change_of_pointing_altitude
and pointing_altitude_to > pointing_altitude
):
pointing_altitude += pointing_altitude_interval
timestamp_change_of_pointing_altitude += pointing_duration
elif Simulator_Select == "Mode110":
"Perform sweep as defined by Mode110"
"Check if the sweep is positive or negative"
if sweep_rate > 0:
if (
t * Timestep + StartingTimeRelative2StartOfMode
> pointing_stabilization + 11 * CMD_separation
and pointing_altitude_to > pointing_altitude
):
pointing_altitude += sweep_rate * Timestep
elif pointing_altitude_to <= pointing_altitude:
pointing_altitude = pointing_altitude_to
elif sweep_rate < 0:
if (
t * Timestep + StartingTimeRelative2StartOfMode
> pointing_stabilization + 11 * CMD_separation
and pointing_altitude_to < pointing_altitude
):
pointing_altitude += sweep_rate * Timestep
elif pointing_altitude_to >= pointing_altitude:
pointing_altitude = pointing_altitude_to
elif (
Simulator_Select == "Mode12X"
and t * Timestep + StartingTimeRelative2StartOfMode
>= freeze_duration + freeze_start
):
"Looking at StandardPointingAltitude after attitude freeze for Mode12X"
pointing_altitude = Timeline_settings["StandardPointingAltitude"]
else:
"Looking at pointing_altitude"
pass
"Increment Time"
current_time = ephem.Date(Mode_start_date + ephem.second * (Timestep * t))
current_time_datetime = ephem.Date(current_time).datetime()
"Only log data at certain intervals depending on log_timestep"
if t * Timestep % log_timestep == 0:
LogFlag = True
else:
LogFlag = False
"Run the satellite simulation for the current time"
Satellite_dict = Library.Satellite_Simulator(
MATS_skyfield,
current_time,
Timeline_settings,
pointing_altitude / 1000,
LogFlag,
Logger,
)
"Save results"
r_MATS[t] = Satellite_dict["Position [km]"]
v_MATS[t] = Satellite_dict["Velocity [km/s]"]
normal_orbit[t] = Satellite_dict["OrbitNormal"]
r_V_offset_normal[t] = Satellite_dict["Normal2V_offset"]
r_H_offset_normal[t] = Satellite_dict["Normal2H_offset"]
MATS_P[t] = Satellite_dict["OrbitalPeriod [s]"]
alt_MATS[t] = Satellite_dict["Altitude [km]"]
lat_MATS[t] = Satellite_dict["Latitude [degrees]"]
long_MATS[t] = Satellite_dict["Longitude [degrees]"]
optical_axis[t] = Satellite_dict["OpticalAxis"]
Dec_optical_axis[t] = Satellite_dict["Dec_OpticalAxis [degrees]"]
RA_optical_axis[t] = Satellite_dict["RA_OpticalAxis [degrees]"]
pitch_MATS[t] = Satellite_dict["Pitch [degrees]"]
lat_LP_estimated[t] = Satellite_dict["EstimatedLatitude_LP [degrees]"]
Yaw_function[t] = Satellite_dict["Yaw [degrees]"]
v_MATS_unit_vector[t, 0:3] = v_MATS[t, 0:3] / norm(v_MATS[t, 0:3])
r_MATS_unit_vector[t, 0:3] = r_MATS[t, 0:3] / norm(r_MATS[t, 0:3])
"Coordinate transformations and calculations"
(
r_MATS_ECEF[t, 0],
r_MATS_ECEF[t, 1],
r_MATS_ECEF[t, 2],
) = MATS_coordinates.eci2ecef(
r_MATS[t, 0] * 1000,
r_MATS[t, 1] * 1000,
r_MATS[t, 2] * 1000,
current_time_datetime,
)
(
optical_axis_ECEF[t, 0],
optical_axis_ECEF[t, 1],
optical_axis_ECEF[t, 2],
) = MATS_coordinates.eci2ecef(
optical_axis[t, 0],
optical_axis[t, 1],
optical_axis[t, 2],
current_time_datetime,
)
(
r_LP_ECEF[t, 0],
r_LP_ECEF[t, 1],
r_LP_ECEF[t, 2],
) = MATS_coordinates.ecef2tanpoint(
r_MATS_ECEF[t][0],
r_MATS_ECEF[t][1],
r_MATS_ECEF[t][2],
optical_axis_ECEF[t, 0],
optical_axis_ECEF[t, 1],
optical_axis_ECEF[t, 2],
)
lat_LP[t], long_LP[t], alt_LP[t] = MATS_coordinates.ECEF2lla(
r_LP_ECEF[t, 0], r_LP_ECEF[t, 1], r_LP_ECEF[t, 2]
)
r_LP[t, 0], r_LP[t, 1], r_LP[t, 2] = MATS_coordinates.ecef2eci(
r_LP_ECEF[t, 0], r_LP_ECEF[t, 1], r_LP_ECEF[t, 2], current_time_datetime
)
(
v_MATS_ECEF[t, 0],
v_MATS_ECEF[t, 1],
v_MATS_ECEF[t, 2],
) = MATS_coordinates.eci2ecef(
v_MATS[t, 0], v_MATS[t, 1], v_MATS[t, 2], current_time_datetime
)
(
normal_orbit_ECEF[t, 0],
normal_orbit_ECEF[t, 1],
normal_orbit_ECEF[t, 2],
) = MATS_coordinates.eci2ecef(
normal_orbit[t, 0],
normal_orbit[t, 1],
normal_orbit[t, 2],
current_time_datetime,
)
# orbangle_between_LP_MATS_array_dotproduct[t] = arccos( dot(r_MATS_unit_vector[t], r_LP[t]) / norm(r_LP[t]) ) / pi*180
"Freezing the attitude"
# if( Simulator_Select == 'Mode12X' and t*Timestep+Timestep > freeze_start and t*Timestep <= freeze_duration+freeze_start):
if (
Simulator_Select == "Mode12X"
and t * Timestep + StartingTimeRelative2StartOfMode > freeze_start
and t * Timestep + StartingTimeRelative2StartOfMode
<= freeze_duration + freeze_start
):
"Save the pointing for the exact time when attitude freeze is initiated"
if freeze_flag == 0:
"Exact timing of Attitude freeze"
current_time_freeze = ephem.Date(
ephem.Date(ScienceMode[1]) + ephem.second * (freeze_start)
)
"Run the satellite simulation for the freeze time"
Satellite_dict = Library.Satellite_Simulator(
MATS_skyfield,
current_time_freeze,
Timeline_settings,
pointing_altitude / 1000,
LogFlag,
Logger,
)
"Save results"
r_V_offset_normal_Freeze = Satellite_dict["Normal2V_offset"]
r_H_offset_normal_Freeze = Satellite_dict["Normal2H_offset"]
optical_axis_Freeze = Satellite_dict["OpticalAxis"]
freeze_flag = 1
"Maintain the same optical axis as the simulation progresses during the freeze"
optical_axis[t, :] = optical_axis_Freeze
(
optical_axis_ECEF[t, 0],
optical_axis_ECEF[t, 1],
optical_axis_ECEF[t, 2],
) = MATS_coordinates.eci2ecef(
optical_axis[t, 0],
optical_axis[t, 1],
optical_axis[t, 2],
current_time_datetime,
)
r_V_offset_normal[t, :] = r_V_offset_normal_Freeze
r_H_offset_normal[t, :] = r_H_offset_normal_Freeze
(
r_LP_ECEF[t, 0],
r_LP_ECEF[t, 1],
r_LP_ECEF[t, 2],
) = MATS_coordinates.ecef2tanpoint(
r_MATS_ECEF[t][0],
r_MATS_ECEF[t][1],
r_MATS_ECEF[t][2],
optical_axis_ECEF[t, 0],
optical_axis_ECEF[t, 1],
optical_axis_ECEF[t, 2],
)
lat_LP[t], long_LP[t], alt_LP[t] = MATS_coordinates.ECEF2lla(
r_LP_ECEF[t, 0], r_LP_ECEF[t, 1], r_LP_ECEF[t, 2]
)
r_LP[t, 0], r_LP[t, 1], r_LP[t, 2] = MATS_coordinates.ecef2eci(
r_LP_ECEF[t, 0], r_LP_ECEF[t, 1], r_LP_ECEF[t, 2], current_time_datetime
)
Dec_optical_axis[t] = (
arctan(
optical_axis[t, 2]
/ sqrt(optical_axis[t, 0] ** 2 + optical_axis[t, 1] ** 2)
)
/ pi
* 180
)
RA_optical_axis[t] = (
arccos(
dot([1, 0, 0], [optical_axis[t, 0], optical_axis[t, 1], 0])
/ norm([optical_axis[t, 0], optical_axis[t, 1], 0])
)
/ pi
* 180
)
if optical_axis[t, 1] < 0:
RA_optical_axis[t] = 360 - RA_optical_axis[t]
"Define SLOF | |
<reponame>eulerkaku/movement_validation
# -*- coding: utf-8 -*-
"""
Velocity calculation methods: used in locomotion and in path features
"""
from __future__ import division
import warnings
import numpy as np
__ALL__ = ['get_angles',
'get_partition_angles',
'h__computeAngularSpeed',
'compute_velocity',
'get_frames_per_sample']
def get_angles(segment_x, segment_y, head_to_tail=False):
""" Obtain the "angle" of a subset of the 49 points
of a worm, for each frame.
#TODO: Implement the explain function check here ...
Parameters
----------
segment_x, segment_y: numpy arrays of shape (p,n) where
p is the size of the partition of the 49 points
n is the number of frames in the video
head_to_tail: bool
True means the worm points are ordered head to tail.
Returns
-------
A numpy array of shape (n) and stores the worm body's "angle"
(in degrees) for each frame of video
"""
if(not head_to_tail):
# reverse the worm points so we go from tail to head
segment_x = segment_x[::-1,:]
segment_y = segment_y[::-1,:]
# Diff calculates each point's difference between the segment's points
# then we take the mean of these differences for each frame
# ignore mean of empty slice from np.nanmean
with warnings.catch_warnings():
# This warning arises when all values are NaN in an array
# This occurs in not for all values but only for some rows, other rows
# may be a mix of valid and NaN values
warnings.simplefilter("ignore")
# with np.errstate(invalid='ignore'): #doesn't work, numpy warning
# is not of the invalid type, just says "mean of empty slice"
average_diff_x = np.nanmean(
np.diff(segment_x, n=1, axis=0), axis=0) # shape (n)
average_diff_y = np.nanmean(
np.diff(segment_y, n=1, axis=0), axis=0) # shape (n)
# angles has shape (n) and stores the worm body's "angle"
# for each frame of video
angles = np.degrees(np.arctan2(average_diff_y, average_diff_x))
return angles
def get_partition_angles(nw, partition_key, data_key='skeletons',
head_to_tail=False):
"""
Obtain the "angle" of a subset of the 49 points of a worm for each frame
#TODO: I have no idea what this is actually doing, what is an "angle" for
a body part
Parameters:
-----------
nw : Normalized Worm (class name???)
partition_key :
data_key : str
???? - what is this ?????
head_to_tail : bool
=True means the worm points are order head to tail.
Returns:
--------
numpy array of shape (n)
Stores the worm body's "angle" (in degrees) for each frame of video.
"""
# the shape of both segment_x and segment_y is (partition length, n)
segment_x, segment_y = nw.get_partition(partition_key, data_key, True)
return get_angles(segment_x, segment_y, head_to_tail)
def h__computeAngularSpeed(fps, segment_x, segment_y,
left_I, right_I, ventral_mode):
"""
TODO: What does this do????
Parameters:
-----------
fps :
segment_x :
The x's of the partition being considered. shape (p,n)
segment_y :
The y's of the partition being considered. shape (p,n)
left_I :
The angle's first point (frame?)
right_I :
The angle's second point
ventral_mode :
0, 1, or 2, specifying that the ventral side is...
0 = unknown
1 = clockwise
2 = anticlockwise
Returns:
--------
a numpy array of shape n, in units of degrees per second
"""
# Compute the body part direction for each frame
point_angle_d = get_angles(segment_x, segment_y, head_to_tail=False)
angular_speed = point_angle_d[right_I] - point_angle_d[left_I]
# Correct any jumps that result during the subtraction process
# i.e. 1 - 359 ~= -358
# by forcing -180 <= angular_speed[i] <= 180
angular_speed = (angular_speed + 180) % (360) - 180
# Change units from degrees per frame to degrees per second
angular_speed = angular_speed * (1 / fps)
# Sign the direction for dorsal/ventral locomotion.
# if ventral_mode is anything but anticlockwise, then negate angular_speed:
if(ventral_mode < 2):
angular_speed = -angular_speed
return angular_speed
def h__getVelocityIndices(frames_per_sample, good_frames_mask):
"""
For each point, we calculate the velocity using frames prior to and following
a frame. Given that some frames are not valid (have NaN), we move the index
backward (prior frame) or forward (following frame), essentially slightly
widening the time frame over which the velocity is computed.
This function determines what the indices are that each frame will use to
calculate the velocity at that frame. For example, at frame 5 we might decide
to use frames 2 and 8.
Parameters:
-----------
frames_per_sample : int
Our sample scale, in frames. The integer must be odd.
good_frames_mask :
Shape (num_frames), false if underlying angle is NaN
OUTPUTS:
keep_mask : shape (num_frames), this is used to indicate
which original frames have valid velocity values,
and which don't.
NOTE: sum(keep_mask) == n_valid_velocity_values
left_I : shape (n_valid_velocity_values), for a given sample, this
indicates the index to the left of (less than) the sample
that should be used to calculate the velocity
right_I : shape (n_valid_velocity_values)
"""
"""
Approach, rather than interating over each frame, we iterate over the
possible shifts. Since this tends to be significantly less than the # of
frames, we save a bit of time in the execution.
"""
# Require that frames_per_sample be an odd integer
assert(type(frames_per_sample) == int)
assert(frames_per_sample % 2 == 1)
num_frames = len(good_frames_mask)
# Create a "half" scale
# NOTE: Since the scale is odd, the half
# scale will be even, because we subtract 1
scale_minus_1 = frames_per_sample - 1
half_scale = int(scale_minus_1 / 2)
# First frame for which we can assign a valid velocity:
start_index = half_scale
# Final frame for which we can assign a valid velocity, plus one:
end_index = num_frames - half_scale
# These are the indices we will use to compute the velocity. We add
# a half scale here to avoid boundary issues. We'll subtract it out later.
# See below for more details
middle_I = np.array(np.arange(start_index, end_index, 1) + half_scale,
dtype='int32')
# @MichaelCurrie: Wouldn't this make more sense?
#middle_I = np.arange(start_index, end_index + half_scale, 1) + half_scale
"""
Our start_index frame can only have one valid start frame (frame 0)
afterwards it is invalid. In other words, if frame 0 is not good, we
can't check frame -1, or -2.
However, in general I'd prefer to avoid doing some check on the bounds
of the frames, i.e. for looking at starting frames, I'd like to avoid
checking if the frame value is 0 or greater.
To get around this we'll pad with bad values (which we'll never match)
then shift the final indices. In this way, we can check these "frames",
as they will have positive indices.
e.g.
scale = 5
half_scale = 2
This means the first frame in which we could possibly get a valid
velocity is frame 2, computed using frames 0 and 4
F F F T T <- example good_frames_mask_padded values
0 1 2 <- true indices (frame numbers)
0 1 2 3 4 <- temporary indices
NOTE: Now if frame 0 is bad, we can go left by half_scale + 1 to temp
index 1 (frame -1) or even further to temp_index 0 (frame -2). we'll
never use those values however because below we state that the values
at those indices are bad (see good_frames_mask_padded)
"""
# This tells us whether each value is useable or not for velocity
# Better to do this out of the loop.
# For real indices (frames 1:num_frames), we rely on whether or not the
# mean position is NaN, for fake padding frames they can never be good so we
# set them to be false
stub_mask = np.zeros(half_scale, dtype=bool)
good_frames_mask_padded = \
np.concatenate((stub_mask, good_frames_mask, stub_mask))
# These will be the final indices from which we estimate the velocity.
# i.e. delta_position(I) = position(right_indices(I)) -
# position(left_indices(I))
left_I = np.empty(len(middle_I), dtype='int32')
right_I = np.empty(len(middle_I), dtype='int32')
# numpy integer arrays cannot accept NaN, which is a float concept, but
# filling them with NaN fills them with the largest negative number
# possible, -2**31. We can easily filter for this later.
left_I.fill(np.NaN)
right_I.fill(np.NaN)
| |
"""supports/wraps nx_graphs from NetworkX"""
import copy
import logging
import math
from pathlib import PurePath
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import networkx as nx
# library
import numpy as np
# import sknw # must pip install sknw
from networkx.algorithms import tree
from skimage import io
from skimage.measure import approximate_polygon
# local
from pyamiimage.ami_image import AmiImage
from pyamiimage.ami_plot import AmiLine, X, Y
from pyamiimage.ami_util import AmiUtil
from pyamiimage.svg import BBox
from pyamiimage.text_box import TextBox
from pyamiimage.sknw import build_sknw
logger = logging.getLogger(__name__)
"""
==========================================================================
==============================GRAPH=======================================
==========================================================================
"""
class AmiGraph:
"""holds AmiNodes and AmiEdges
may also hold subgraphs
"""
"""nx_graph is a NetworkX graph which holds nodes and edges
and which can be used to compute other graph functionality (e.g. edges on nodes).
Here we wrap its functionality in Ami* classes. This is because
(a) it's hard for
newcomers like me to remember all the syntax (which is almost C-like "dict-of-dicts-of-dict-of-dicts)
or has many Views (rather than functions)
(b) There are 4 different types of graphs with different syntaxes. This code started as
simple graphs nx_graph[i][j][properties] and then moved to multigraph nx_graph[i][j][branch][properties]
This is meant to help. If it doesn't, I'm sorry! and you can revert to native nx_graph. Here's a sample:
>>>
class MultiAdjacencyView(AdjacencyView):
An MultiAdjacencyView is a Read-only Map of Maps of Maps of Maps.
It is a View into a dict-of-dict-of-dict-of-dict data structure.
The inner level of dict is read-write. But the
outer levels are read-only.
See Also
========
AtlasView: View into dict-of-dict
AdjacencyView: View into dict-of-dict-of-dict
<<<
That's what AmiGraph tries to hide. I hope it works.
The components are:
* AmiGraph (wraps nx_graph and derived quantities such as lists of nodes and edges)
* AmiNode (wraps nx_graph.nodes[i]). nx_nodes are by default ints and should be kept as such
* AmiEdge (wraps nx_graph.edges - a pair of ints)
* AmiIsland (a discrete "component" of the graph)
Our default is Multigraphs as images can contain paths which create loops and join two nodes in to or more
different ways. This means that edges require three indexes, start, end and branch. This means checking that
this has been introduced universally.
(I'm hoping that Simple graphs can also be switched on/off)
Currently working with the idea that every edge has
(i, j) for simple graph
(i, j, branch) for multigraph (requires keys=True)
"""
logger = logging.getLogger("ami_graph")
def __init__(self, nx_graph, generate_nodes=False, nd_skeleton=None):
"""create from nodes and edges"""
if nx_graph is None:
raise Exception(f"nx_graph cannot be None")
self.nx_graph = nx_graph
self.ami_edges = None
self.ami_nodes = None
self.ami_island_list = None
self.nd_skeleton = nd_skeleton
self.ami_edge_dict = None
self.generate_nodes = generate_nodes
self.node_dict = None
self.centroid_xy = None
# sets up all nodes and edges
self.read_nx_graph(nx_graph)
assert self.nx_graph is not None, f"ami_graph.nx_graph should not be None"
return
@classmethod
def create_ami_graph_from_arbitrary_image_file(cls, file, interactive=False):
assert file.exists()
nx_graph = AmiGraph.create_nx_graph_from_arbitrary_image_file(file, interactive=interactive)
return AmiGraph(nx_graph)
# def get_nx_graph(self):
# """try to avoid circular imports or attribute not found"""
# return self.nx_graph
#
def get_or_create_ami_node(self, node_id):
"""create from node_id or rerieve from node_dict
use this method rather than AmiNode() constructor
Stores AmiNodes in self.node_dict
:param node_id: node_id (should exist in nx_graph but not checked yet
:return: AmiNode (None if node_id is None)
"""
if node_id is None:
return None
if self.node_dict is None:
self.node_dict = dict()
if node_id not in self.node_dict:
ami_node = AmiNode(node_id, ami_graph=self, _private=True)
self.node_dict[node_id] = ami_node
else:
ami_node = self.node_dict[node_id]
return ami_node
# AmiGraph
def get_ami_edge_from_nx_id(self, nx_edge):
"""Wrapper for get_or_create_ami_edge_from_ids() to create ami_edge
:param nx_edge:
:return: ami_edge (or None)
"""
if nx_edge is None:
return None
branch_id = nx_edge[2] if self.nx_graph.is_multigraph() else None
ami_edge = self.get_or_create_ami_edge_from_ids(nx_edge[0], nx_edge[1], branch_id=branch_id)
return ami_edge
def get_or_create_ami_edge_from_ids(self, node_id1, node_id2, branch_id=None):
"""create or lookup AmiEdge from node_ids, or retrievs from edge_dict
prefer this to AmiEdge constructor
key is (sorted(node_id1, node_id2)),
looks up AmiNode to check validity of node_ids
If there are multiple branches between two nodes they must have different branch_ids
(It's up to the user to manage this). Adding an edge without a branch_id will replace
the current one)
:param node_id1: start of edge
:param node_id2: end of edge
:param branch_id: id there are multiple branches; up to the user to manage these
:return: None if start_id or end_id is None
"""
if type(node_id1) is not int or type(node_id2) is not int:
raise ValueError(f"node_ids must be ints , found: {node_id1}, {node_id2}")
if self.nx_graph.is_multigraph:
if type(branch_id) is not int:
raise ValueError(f"branch_id for multigraph must be int, found {type(branch_id)}")
key = (node_id1, node_id2, branch_id) if node_id1 < node_id2 \
else (node_id2, node_id1, branch_id)
else:
key = (node_id1, node_id2) if node_id1 < node_id2 else (node_id2, node_id1)
if self.ami_edge_dict is None:
self.ami_edge_dict = dict()
# new edge?
if key not in self.ami_edge_dict:
ami_edge = self.create_and_index_new_edge(key, node_id1, node_id2, branch_id)
# existing edge/s
else:
ami_edge = self.ami_edge_dict[key]
return ami_edge
def create_and_index_new_edge(self, key, node_id1, node_id2, branch_id):
"""create new Edge and index it in edge_dict"""
ami_edge = AmiEdge(self, node_id1, node_id2, branch_id=branch_id, _private=True)
self.ami_edge_dict[key] = ami_edge
return ami_edge
def add_raw_node(self, raw_node, fail_on_duplicate=False):
"""add a raw node either a string or string-indexed dict
if already a dict, deepcopy it
if a primitive make a node_dict and start it with raw_node as id
:raw_node: node to add, must have key
:fail_on_duplicate: if true fail if key already exists
"""
if raw_node is not None:
# ami_node = AmiNode()
key = raw_node.key if type(raw_node) is dict else str(raw_node)
key = "n" + str(key)
if key in self.node_dict and fail_on_duplicate:
raise AmiGraphError(f"cannot add same node twice {key}")
if type(raw_node) is dict:
self.node_dict[key] = copy.deepcopy(raw_node)
else:
self.node_dict[key] = "node" # store just the key at present
else:
self.logger.warning("node cannot be None")
# def read_edges(self, edges):
# # self.ami_edges = edges
# if len(self.node_dict.keys()) == 0 and self.generate_nodes:
# self.generate_nodes_from_edges()
# for i, edge in enumerate(edges):
# idx = "e" + str(i)
# self.add_edge(edge, idx)
#
# def add_edge(self, raw_edge, idx, fail_on_duplicate=True):
# if fail_on_duplicate and idx in self.ami_edge_dict.keys():
# raise ValueError("duplicate edge")
#
# if raw_edge is None:
# raise AmiGraphError("cannot add edge=None")
# edge1 = ("n" + str(raw_edge[0]), "n" + str(raw_edge[1]))
# self.ami_edge_dict[idx] = edge1
#
# AmiGraph
# def generate_nodes_from_edges(self):
# if self.ami_edges is not None:
# for edge in self.ami_edges:
# self.add_raw_node(edge[0])
# self.add_raw_node(edge[1])
#
# @classmethod
# def create_ami_graph_from_skeleton(cls, nd_skeleton):
# """Uses Sknw to create a graph object within a new AmiGraph"""
# # currently only called in a test
# nx_graph = sknw.build_sknw(nd_skeleton)
# ami_graph = AmiGraph(nx_graph, nd_skeleton=nd_skeleton)
# return ami_graph
def _ingest_graph_info(self):
if self.nx_graph is None:
self.logger.warning("Null graph")
return
nx_island_list = list(nx.connected_components(self.nx_graph))
if nx_island_list is None or len(nx_island_list) == 0:
self.logger.warning("No islands")
return
AmiGraph.assert_nx_island_info(nx_island_list)
nx_edgelist = self.get_edge_list_ids_through_maximum_spanning_edges()
AmiGraph.debug_edges_and_nodes(nx_edgelist, debug_count=7)
nodes = self.nx_graph.nodes
self.node_dict = {i: (nodes[node][AmiNode.CENTROID][0], nodes[node][AmiNode.CENTROID][1])
for i, node in enumerate(nodes)}
self.ami_island_list = []
for nx_island in nx_island_list:
ami_island = self.create_ami_island(nx_island)
self.ami_island_list.append(ami_island)
return
@classmethod
def assert_nx_island_info(cls, nx_island_list):
nx_island0 = nx_island_list[0]
assert type(nx_island0) is set
assert len(nx_island0) > 0
elem0 = list(nx_island0)[0]
assert type(elem0) is int, f"island elem are {type(elem0)}"
# AmiGraph
@classmethod
def debug_edges_and_nodes(cls, nx_edgelist, debug_count=5):
pts_index = 2
for edge in nx_edgelist[:debug_count]:
pts_ = edge[pts_index]['pts']
logger.warning("points", pts_)
edgelist_pts_ = nx_edgelist[0][2]['pts']
for step in edgelist_pts_[:debug_count]:
logger.warning("step", step)
pass
def get_edge_list_ids_through_maximum_spanning_edges(self):
"""
:return: list of edges as ids
"""
mst = tree.maximum_spanning_edges(self.nx_graph, algorithm="kruskal", data=True)
# mst = tree.minimum_spanning_tree(graph, algorithm="kruskal")
nx_edgelist = list(mst)
return nx_edgelist
@classmethod
def set_bbox_pixels_to_color(cls, bbox, image, colorx=255):
"""sets all pixels in box to uniform color
:param bbox:
:param image:
:param colorx:
:return: modified image
"""
xx = bbox[0]
yy = bbox[1]
image[xx[0]:xx[1], yy[0]:yy[1]] = colorx
return image
def __str__(self):
s = "nodes: " + str(self.ami_nodes) + \
"\n edges: " + str(self.ami_edges)
return s
def read_nx_graph(self, nx_graph):
"""
Read and unpack NetworkX graph.
This may change as a result of changing data models
the nx_graph may be the fundamental data structure
:param nx_graph:
:return:
"""
# this may be the critical data structure and the others are convenience
self.nx_graph = nx_graph
self.get_or_create_all_ami_edges()
self.read_nx_nodes()
ingest = False
if ingest:
self._ingest_graph_info()
| |
<reponame>SeraphRoy/PyPy-Functional<filename>pypy/module/cpyext/pyerrors.py
import os
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter import pytraceback
from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, CONST_STRING
from pypy.module.exceptions.interp_exceptions import W_RuntimeWarning
from pypy.module.cpyext.pyobject import (
PyObject, PyObjectP, make_ref, from_ref, Py_DecRef)
from pypy.module.cpyext.state import State
from pypy.module.cpyext.import_ import PyImport_Import
from rpython.rlib import rposix, jit
@cpython_api([PyObject, PyObject], lltype.Void)
def PyErr_SetObject(space, w_type, w_value):
"""This function is similar to PyErr_SetString() but lets you specify an
arbitrary Python object for the "value" of the exception."""
state = space.fromcache(State)
state.set_exception(OperationError(w_type, w_value))
@cpython_api([PyObject, CONST_STRING], lltype.Void)
def PyErr_SetString(space, w_type, message_ptr):
message = rffi.charp2str(message_ptr)
PyErr_SetObject(space, w_type, space.newtext(message))
@cpython_api([PyObject], lltype.Void, error=CANNOT_FAIL)
def PyErr_SetNone(space, w_type):
"""This is a shorthand for PyErr_SetObject(type, Py_None)."""
PyErr_SetObject(space, w_type, space.w_None)
@cpython_api([], PyObject, result_borrowed=True)
def PyErr_Occurred(space):
state = space.fromcache(State)
if state.operror is None:
return None
return state.operror.w_type # borrowed ref
@cpython_api([], lltype.Void)
def PyErr_Clear(space):
state = space.fromcache(State)
state.clear_exception()
@cpython_api([PyObject], PyObject)
def PyExceptionInstance_Class(space, w_obj):
return space.type(w_obj)
@cpython_api([PyObjectP, PyObjectP, PyObjectP], lltype.Void)
def PyErr_Fetch(space, ptype, pvalue, ptraceback):
"""Retrieve the error indicator into three variables whose addresses are passed.
If the error indicator is not set, set all three variables to NULL. If it is
set, it will be cleared and you own a reference to each object retrieved. The
value and traceback object may be NULL even when the type object is not.
This function is normally only used by code that needs to handle exceptions or
by code that needs to save and restore the error indicator temporarily."""
state = space.fromcache(State)
operror = state.clear_exception()
if operror:
ptype[0] = make_ref(space, operror.w_type)
pvalue[0] = make_ref(space, operror.get_w_value(space))
ptraceback[0] = make_ref(space, operror.get_w_traceback(space))
else:
ptype[0] = lltype.nullptr(PyObject.TO)
pvalue[0] = lltype.nullptr(PyObject.TO)
ptraceback[0] = lltype.nullptr(PyObject.TO)
@cpython_api([PyObject, PyObject, PyObject], lltype.Void)
def PyErr_Restore(space, w_type, w_value, w_traceback):
"""Set the error indicator from the three objects. If the error indicator is
already set, it is cleared first. If the objects are NULL, the error
indicator is cleared. Do not pass a NULL type and non-NULL value or
traceback. The exception type should be a class. Do not pass an invalid
exception type or value. (Violating these rules will cause subtle problems
later.) This call takes away a reference to each object: you must own a
reference to each object before the call and after the call you no longer own
these references. (If you don't understand this, don't use this function. I
warned you.)
This function is normally only used by code that needs to save and restore the
error indicator temporarily; use PyErr_Fetch() to save the current
exception state."""
state = space.fromcache(State)
if w_type is None:
state.clear_exception()
return
state.set_exception(OperationError(w_type, w_value))
Py_DecRef(space, w_type)
Py_DecRef(space, w_value)
Py_DecRef(space, w_traceback)
@cpython_api([PyObjectP, PyObjectP, PyObjectP], lltype.Void)
def PyErr_NormalizeException(space, exc_p, val_p, tb_p):
"""Under certain circumstances, the values returned by PyErr_Fetch() below
can be "unnormalized", meaning that *exc is a class object but *val is
not an instance of the same class. This function can be used to instantiate
the class in that case. If the values are already normalized, nothing happens.
The delayed normalization is implemented to improve performance."""
operr = OperationError(from_ref(space, exc_p[0]),
from_ref(space, val_p[0]))
operr.normalize_exception(space)
Py_DecRef(space, exc_p[0])
Py_DecRef(space, val_p[0])
exc_p[0] = make_ref(space, operr.w_type)
val_p[0] = make_ref(space, operr.get_w_value(space))
@cpython_api([], rffi.INT_real, error=0)
def PyErr_BadArgument(space):
"""This is a shorthand for PyErr_SetString(PyExc_TypeError, message), where
message indicates that a built-in operation was invoked with an illegal
argument. It is mostly for internal use. In CPython this function always
raises an exception and returns 0 in all cases, hence the (ab)use of the
error indicator."""
raise oefmt(space.w_TypeError, "bad argument type for built-in operation")
@cpython_api([], lltype.Void)
def PyErr_BadInternalCall(space):
raise oefmt(space.w_SystemError, "Bad internal call!")
@cpython_api([], PyObject, error=CANNOT_FAIL)
def PyErr_NoMemory(space):
"""This is a shorthand for PyErr_SetNone(PyExc_MemoryError); it returns NULL
so an object allocation function can write return PyErr_NoMemory(); when it
runs out of memory.
Return value: always NULL."""
PyErr_SetNone(space, space.w_MemoryError)
@cpython_api([PyObject], PyObject)
def PyErr_SetFromErrno(space, w_type):
"""
This is a convenience function to raise an exception when a C library function
has returned an error and set the C variable errno. It constructs a
tuple object whose first item is the integer errno value and whose
second item is the corresponding error message (gotten from strerror()),
and then calls PyErr_SetObject(type, object). On Unix, when the
errno value is EINTR, indicating an interrupted system call,
this calls PyErr_CheckSignals(), and if that set the error indicator,
leaves it set to that. The function always returns NULL, so a wrapper
function around a system call can write return PyErr_SetFromErrno(type);
when the system call returns an error.
Return value: always NULL."""
PyErr_SetFromErrnoWithFilename(space, w_type,
lltype.nullptr(rffi.CCHARP.TO))
@cpython_api([PyObject, rffi.CCHARP], PyObject)
def PyErr_SetFromErrnoWithFilename(space, w_type, llfilename):
"""Similar to PyErr_SetFromErrno(), with the additional behavior that if
filename is not NULL, it is passed to the constructor of type as a third
parameter. In the case of exceptions such as IOError and OSError,
this is used to define the filename attribute of the exception instance.
Return value: always NULL."""
# XXX Doesn't actually do anything with PyErr_CheckSignals.
if llfilename:
filename = rffi.charp2str(llfilename)
w_filename = space.newbytes(filename)
else:
w_filename = space.w_None
PyErr_SetFromErrnoWithFilenameObject(space, w_type, w_filename)
@cpython_api([PyObject, PyObject], PyObject)
@jit.dont_look_inside # direct use of _get_errno()
def PyErr_SetFromErrnoWithFilenameObject(space, w_type, w_value):
"""Similar to PyErr_SetFromErrno(), with the additional behavior that if
w_value is not NULL, it is passed to the constructor of type as a
third parameter. In the case of exceptions such as IOError and OSError,
this is used to define the filename attribute of the exception instance.
Return value: always NULL."""
# XXX Doesn't actually do anything with PyErr_CheckSignals.
errno = rffi.cast(lltype.Signed, rposix._get_errno())
msg = os.strerror(errno)
if w_value:
w_error = space.call_function(w_type,
space.newint(errno),
space.newtext(msg),
w_value)
else:
w_error = space.call_function(w_type,
space.newint(errno),
space.newtext(msg))
raise OperationError(w_type, w_error)
@cpython_api([], rffi.INT_real, error=-1)
def PyErr_CheckSignals(space):
"""
This function interacts with Python's signal handling. It checks whether a
signal has been sent to the processes and if so, invokes the corresponding
signal handler. If the signal module is supported, this can invoke a
signal handler written in Python. In all cases, the default effect for
SIGINT is to raise the KeyboardInterrupt exception. If an
exception is raised the error indicator is set and the function returns -1;
otherwise the function returns 0. The error indicator may or may not be
cleared if it was previously set."""
# XXX implement me
return 0
@cpython_api([PyObject, PyObject], rffi.INT_real, error=CANNOT_FAIL)
def PyErr_GivenExceptionMatches(space, w_given, w_exc):
"""Return true if the given exception matches the exception in exc. If
exc is a class object, this also returns true when given is an instance
of a subclass. If exc is a tuple, all exceptions in the tuple (and
recursively in subtuples) are searched for a match."""
if (space.isinstance_w(w_given, space.w_BaseException) or
space.is_oldstyle_instance(w_given)):
w_given_type = space.exception_getclass(w_given)
else:
w_given_type = w_given
return space.exception_match(w_given_type, w_exc)
@cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL)
def PyErr_ExceptionMatches(space, w_exc):
"""Equivalent to PyErr_GivenExceptionMatches(PyErr_Occurred(), exc). This
should only be called when an exception is actually set; a memory access
violation will occur if no exception has been raised."""
w_type = PyErr_Occurred(space)
return PyErr_GivenExceptionMatches(space, w_type, w_exc)
@cpython_api([PyObject, CONST_STRING, rffi.INT_real], rffi.INT_real, error=-1)
def PyErr_WarnEx(space, w_category, message_ptr, stacklevel):
"""Issue a warning message. The category argument is a warning category (see
below) or NULL; the message argument is a message string. stacklevel is a
positive number giving a number of stack frames; the warning will be issued from
the currently executing line of code in that stack frame. A stacklevel of 1
is the function calling PyErr_WarnEx(), 2 is the function above that,
and so forth.
This function normally prints a warning message to sys.stderr; however, it is
also possible that the user has specified that warnings are to be turned into
errors, and in that case this will raise an exception. It is also possible that
the function raises an exception because of a problem with the warning machinery
(the implementation imports the warnings module to do the heavy lifting).
The return value is 0 if no exception is raised, or -1 if an exception
is raised. (It is not possible to determine whether a warning message is
actually printed, nor what the reason is for the exception; this is
intentional.) If an exception is raised, the caller should do its normal
exception handling (for example, Py_DECREF() owned references and return
an error value).
Warning categories must | |
<reponame>tidoust/bikeshed
# coding=utf-8
#
# Copyright © 2013 Hewlett-Packard Development Company, L.P.
#
# This work is distributed under the W3C® Software License [1]
# in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# [1] http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231
#
import constructs, tokenizer
import itertools
def _name(thing):
return thing.name if (thing) else ''
class Production(object):
def __init__(self, tokens):
self._leadingSpace = self._whitespace(tokens)
self._tail = None
self._semicolon = ''
def _didParse(self, tokens, includeTrailingSpace = True):
self._trailingSpace = self._whitespace(tokens) if (includeTrailingSpace) else ''
def _whitespace(self, tokens):
whitespace = tokens.whitespace()
return whitespace.text if (whitespace) else ''
def __str__(self):
return self.__unicode__()
def __unicode__(self):
output = self._leadingSpace + self._unicode()
output += ''.join([unicode(token) for token in self._tail]) if (self._tail) else ''
return output + unicode(self._semicolon) + self._trailingSpace
def _markup(self, generator):
generator.addText(self._unicode())
return self
def markup(self, generator):
generator.addText(self._leadingSpace)
target = self._markup(generator)
if (target._tail):
generator.addText(''.join([unicode(token) for token in target._tail]))
generator.addText(unicode(target._semicolon))
if (self != target):
generator.addText(target._trailingSpace)
generator.addText(self._trailingSpace)
def _consumeSemicolon(self, tokens, consumeTail = True):
if (Symbol.peek(tokens, ';')):
self._semicolon = Symbol(tokens, ';', False)
elif (not Symbol.peek(tokens, '}')):
if (consumeTail):
skipped = tokens.syntaxError((';', '}'))
if (0 < len(skipped)):
self._tail = skipped[:-1]
tokens.restore(skipped[-1])
self._semicolon = Symbol(tokens, ';', False) if (Symbol.peek(tokens, ';')) else ''
else:
tokens.syntaxError(None)
else:
tokens.syntaxError(None)
class Symbol(Production):
@classmethod
def peek(cls, tokens, symbol):
token = tokens.pushPosition()
return tokens.popPosition(token and token.isSymbol(symbol))
def __init__(self, tokens, symbol = None, includeTrailingSpace = True):
Production.__init__(self, tokens)
self.symbol = tokens.next().text
if (symbol):
assert(self.symbol == symbol)
self._didParse(tokens, includeTrailingSpace)
def _unicode(self):
return self.symbol
def _markup(self, generator):
if (self.symbol in tokenizer.Tokenizer.SymbolIdents):
generator.addKeyword(self.symbol)
else:
generator.addText(self.symbol)
return self
def __repr__(self):
return self.symbol.encode('ascii', 'replace')
class IntegerType(Production): # "short" | "long" ["long"]
@classmethod
def peek(cls, tokens):
token = tokens.pushPosition()
if (token and token.isSymbol()):
if ('long' == token.text):
token = tokens.pushPosition()
tokens.popPosition(token and token.isSymbol('long'))
return tokens.popPosition(True)
return tokens.popPosition('short' == token.text)
return tokens.popPosition(False)
def __init__(self, tokens):
Production.__init__(self, tokens)
self._space = None
token = tokens.next()
if ('long' == token.text):
self.type = 'long'
token = tokens.sneakPeek()
if (token and token.isSymbol('long')):
self._space = self._whitespace(tokens)
self.type += ' ' + tokens.next().text
else:
self.type = token.text
self._didParse(tokens, False)
def _unicode(self):
if (self._space):
return self._space.join(self.type.split(' '))
return self.type
def _markup(self, generator):
if (self._space):
keywords = self.type.split(' ')
generator.addKeyword(keywords[0])
generator.addText(self._space)
generator.addKeyword(keywords[1])
else:
generator.addKeyword(self.type)
return self
def __repr__(self):
return '[IntegerType: ' + self.type + ']'
class UnsignedIntegerType(Production): # "unsigned" IntegerType | IntegerType
@classmethod
def peek(cls, tokens):
if (IntegerType.peek(tokens)):
return True
tokens.pushPosition(False)
if (Symbol.peek(tokens, 'unsigned')):
return tokens.popPosition(IntegerType.peek(tokens))
return tokens.popPosition(False)
def __init__(self, tokens): #
Production.__init__(self, tokens)
self.unsigned = Symbol(tokens, 'unsigned') if (Symbol.peek(tokens, 'unsigned')) else None
self.type = IntegerType(tokens)
self._didParse(tokens, False)
def _unicode(self):
return (unicode(self.unsigned) + self.type._unicode()) if (self.unsigned) else self.type._unicode()
def _markup(self, generator):
if (self.unsigned):
self.unsigned.markup(generator)
return self.type._markup(generator)
def __repr__(self):
return '[UnsignedIntegerType: ' + ('[unsigned]' if (self.unsigned) else '') + repr(self.type) + ']'
class FloatType(Production): # "float" | "double"
@classmethod
def peek(cls, tokens):
token = tokens.pushPosition()
return tokens.popPosition(token and (token.isSymbol('float') or token.isSymbol('double')))
def __init__(self, tokens):
Production.__init__(self, tokens)
token = tokens.next()
self.type = token.text
self._didParse(tokens, False)
def _unicode(self):
return self.type
def _markup(self, generator):
generator.addKeyword(self.type)
return self
def __repr__(self):
return '[FloatType: ' + self.type.encode('ascii', 'replace') + ']'
class UnrestrictedFloatType(Production): # "unrestricted" FloatType | FloatType
@classmethod
def peek(cls, tokens):
if (FloatType.peek(tokens)):
return True
tokens.pushPosition(False)
if (Symbol.peek(tokens, 'unrestricted')):
return tokens.popPosition(FloatType.peek(tokens))
return tokens.popPosition(False)
def __init__(self, tokens): #
Production.__init__(self, tokens)
self.unrestricted = Symbol(tokens, 'unrestricted') if (Symbol.peek(tokens, 'unrestricted')) else None
self.type = FloatType(tokens)
self._didParse(tokens, False)
def _unicode(self):
return (unicode(self.unrestricted) + unicode(self.type)) if (self.unrestricted) else unicode(self.type)
def _markup(self, generator):
if (self.unrestricted):
self.unrestricted.markup(generator)
return self.type._markup(generator)
def __repr__(self):
return '[UnrestrictedFloatType: ' + ('[unrestricted]' if (self.unrestricted) else '') + repr(self.type) + ']'
class PrimitiveType(Production): # UnsignedIntegerType | UnrestrictedFloatType | "boolean" | "byte" | "octet"
@classmethod
def peek(cls, tokens):
if (UnsignedIntegerType.peek(tokens) or UnrestrictedFloatType.peek(tokens)):
return True
token = tokens.pushPosition()
if (token and token.isSymbol()):
return tokens.popPosition(('boolean' == token.text) or ('byte' == token.text) or ('octet' == token.text))
return tokens.popPosition(False)
def __init__(self, tokens):
Production.__init__(self, tokens)
if (UnsignedIntegerType.peek(tokens)):
self.type = UnsignedIntegerType(tokens)
elif (UnrestrictedFloatType.peek(tokens)):
self.type = UnrestrictedFloatType(tokens)
else:
self.type = tokens.next().text
self._didParse(tokens, False)
def _unicode(self):
if (isinstance(self.type, basestring)):
return unicode(self.type)
return self.type._unicode()
def _markup(self, generator):
if (isinstance(self.type, basestring)):
generator.addKeyword(self.type)
return self
return self.type._markup(generator)
def __repr__(self):
return '[PrimitiveType: ' + repr(self.type) + ']'
class ConstType(Production): # PrimitiveType [Null] | identifier [Null]
@classmethod
def peek(cls, tokens):
if (PrimitiveType.peek(tokens)):
Symbol.peek(tokens, '?')
return True
token = tokens.pushPosition()
if (token and token.isIdentifier()):
Symbol.peek(tokens, '?')
return tokens.popPosition(True)
return tokens.popPosition(False)
def __init__(self, tokens):
Production.__init__(self, tokens)
if (PrimitiveType.peek(tokens)):
self.type = PrimitiveType(tokens)
else:
self.type = tokens.next().text
self.null = Symbol(tokens, '?', False) if (Symbol.peek(tokens, '?')) else None
self._didParse(tokens)
def _unicode(self):
return unicode(self.type) + (unicode(self.null) if (self.null) else '')
def _markup(self, generator):
if (isinstance(self.type, basestring)):
generator.addTypeName(self.type)
if (self.null):
generator.addText(self.null)
return self.null
return self
generator.addPrimitiveType(self.type)
if (self.null):
self.null.markup(generator)
return self
def __repr__(self):
return '[ConstType: ' + repr(self.type) + (' [null]' if (self.null) else '') + ']'
class FloatLiteral(Production): # float | "-Infinity" | "Infinity" | "NaN"
@classmethod
def peek(cls, tokens):
token = tokens.pushPosition()
if (token and token.isFloat()):
return tokens.popPosition(True)
return tokens.popPosition(token and token.isSymbol(('-Infinity', 'Infinity', 'NaN')))
def __init__(self, tokens): #
Production.__init__(self, tokens)
self.value = tokens.next().text
self._didParse(tokens)
def _unicode(self):
return self.value
def _markup(self, generator):
if (self.value in tokenizer.Tokenizer.SymbolIdents):
generator.addKeyword(self.value)
else:
generator.addText(self.value)
return self
def __repr__(self):
return '[FloatLiteral: ' + self.value.encode('ascii', 'replace') + ']'
class ConstValue(Production): # "true" | "false" | FloatLiteral | integer | "null"
@classmethod
def peek(cls, tokens):
if (FloatLiteral.peek(tokens)):
return True
token = tokens.pushPosition()
return tokens.popPosition(token and (token.isSymbol(('true', 'false', 'null')) or token.isInteger()))
def __init__(self, tokens): #
Production.__init__(self, tokens)
if (FloatLiteral.peek(tokens)):
self.value = FloatLiteral(tokens)
else:
self.value = tokens.next().text
self._didParse(tokens)
def _unicode(self):
return unicode(self.value)
def _markup(self, generator):
if (isinstance(self.value, basestring)):
if (self.value in tokenizer.Tokenizer.SymbolIdents):
generator.addKeyword(self.value)
else:
generator.addText(self.value)
return self
return self.value._markup(generator)
def __repr__(self):
return '[ConstValue: ' + repr(self.value) + ']'
class EnumValue(Production): # string
@classmethod
def peek(cls, tokens):
token = tokens.pushPosition()
return tokens.popPosition(token and token.isString())
def __init__(self, tokens):
Production.__init__(self, tokens)
self.value = tokens.next().text
self._didParse(tokens)
def _markup(self, generator):
generator.addEnumValue(self.value)
return self
def _unicode(self):
return self.value
def __repr__(self):
return '[EnumValue: ' + self.value.encode('ascii', 'replace') + ']'
class EnumValueList(Production): # EnumValue ["," EnumValue]... [","]
@classmethod
def peek(cls, tokens):
tokens.pushPosition(False)
if (EnumValue.peek(tokens)):
token = tokens.pushPosition()
if (token and token.isSymbol(',')):
token = tokens.sneakPeek()
if (token and token.isSymbol('}')):
return tokens.popPosition(tokens.popPosition(True))
return tokens.popPosition(tokens.popPosition(EnumValueList.peek(tokens)))
tokens.popPosition(False)
return tokens.popPosition(True)
return tokens.popPosition(False)
def __init__(self, tokens):
Production.__init__(self, tokens)
self.values = []
self._commas = []
while (tokens.hasTokens()):
self.values.append(EnumValue(tokens))
if (Symbol.peek(tokens, ',')):
self._commas.append(Symbol(tokens, ','))
token = tokens.sneakPeek()
if ((not token) or token.isSymbol('}')):
tokens.didIgnore(',')
break
continue
break
self._didParse(tokens)
def _markup(self, generator):
for value, _comma in itertools.izip_longest(self.values, self._commas, fillvalue = ''):
value.markup(generator)
if (_comma):
_comma.markup(generator)
return self
def _unicode(self):
return ''.join([unicode(value) + unicode(comma) for value, comma in itertools.izip_longest(self.values, self._commas, fillvalue = '')])
def __repr__(self):
return '[EnumValueList: ' + ''.join([repr(value) for value in self.values]) + ']'
class TypeSuffix(Production): # "[" "]" [TypeSuffix] | "?" [TypeSuffixStartingWithArray]
@classmethod
def peek(cls, tokens):
tokens.pushPosition(False)
if (Symbol.peek(tokens, '[')):
if (Symbol.peek(tokens, ']')):
TypeSuffix.peek(tokens)
return tokens.popPosition(True)
elif (Symbol.peek(tokens, '?')):
TypeSuffixStartingWithArray.peek(tokens)
return tokens.popPosition(True)
return tokens.popPosition(False)
def __init__(self, tokens):
Production.__init__(self, tokens)
if (Symbol.peek(tokens, '[')):
self._openBracket = Symbol(tokens, '[')
self._closeBracket = Symbol(tokens, ']', False)
self.suffix = TypeSuffix(tokens) if (TypeSuffix.peek(tokens)) else None
self.array = True
self.null = None
else:
self.null = Symbol(tokens, '?', False)
self.suffix = TypeSuffixStartingWithArray(tokens) if (TypeSuffixStartingWithArray.peek(tokens)) else None
self.array = False
self._openBracket = None
self._closeBracket = None
self._didParse(tokens, False)
def _unicode(self):
output = (unicode(self._openBracket) + unicode(self._closeBracket)) if (self.array) else ''
output += unicode(self.null) if (self.null) else ''
return output + (unicode(self.suffix) if (self.suffix) else '')
def __repr__(self):
output = '[TypeSuffix: ' + ('[array] ' if (self.array) else '') + ('[null] ' if (self.null) else '')
return output + (repr(self.suffix) if (self.suffix) else '') + ']'
class TypeSuffixStartingWithArray(Production): # "[" "]" [TypeSuffix]
@classmethod
def peek(cls, tokens):
tokens.pushPosition(False)
if (Symbol.peek(tokens, '[')):
if (Symbol.peek(tokens, ']')):
TypeSuffix.peek(tokens)
return tokens.popPosition(True)
return tokens.popPosition(False)
def __init__(self, tokens):
Production.__init__(self, tokens)
self._openBracket = Symbol(tokens, '[')
self._closeBracket = Symbol(tokens, ']', False)
self.suffix = TypeSuffix(tokens) if (TypeSuffix.peek(tokens)) else None
self._didParse(tokens, False)
def _unicode(self):
return unicode(self._openBracket) + unicode(self._closeBracket) + (unicode(self.suffix) if (self.suffix) else '')
def __repr__(self):
return '[TypeSuffixStartingWithArray: ' + (repr(self.suffix) if (self.suffix) else '') + ']'
class SingleType(Production): # NonAnyType | "any" [TypeSuffixStartingWithArray]
@classmethod
def peek(cls, tokens):
if (NonAnyType.peek(tokens)):
return True
tokens.pushPosition(False)
if (Symbol.peek(tokens, 'any')):
TypeSuffixStartingWithArray.peek(tokens)
return tokens.popPosition(True)
| |
import torch
import numpy as np
import random
import torch.nn.functional as F
from torch import nn
from utils.nodepiece_tokenizer import NodePiece_Tokenizer
from torch.nn import TransformerEncoderLayer, TransformerEncoder
from tqdm import tqdm
from collections import defaultdict
from typing import Optional
from torch_geometric.data import Data
class NodePieceEncoder(nn.Module):
def __init__(self, config: dict, tokenizer: NodePiece_Tokenizer, rel_embs: nn.Embedding, graph: Data):
super(NodePieceEncoder, self).__init__()
self.tokenizer = tokenizer
self.pooler = config['POOLER']
self.policy = "sum"
self.use_rels = False
self.nearest = config['NEAREST']
self.use_neighbor_rels = False
self.sample_rels = config['SAMPLE_RELS']
self.graph = graph
if not self.use_rels:
self.policy = "sum"
self.random_hashes = config['RANDOM_HASHES']
self.subbatch = config['SUBBATCH']
self.embedding_dim = config['EMBEDDING_DIM']
self.real_embedding_dim = self.embedding_dim // 2
self.max_seq_len = config['MAX_PATH_LEN']
self.sample_paths = config['MAX_PATHS']
self.use_distances = config['USE_DISTANCES']
self.hid_dim = config['T_HIDDEN']
self.drop_prob = config['T_DROP']
self.num_heads = config['T_HEADS']
self.num_layers = config['T_LAYERS']
self.num_entities = config['NUM_ENTITIES']
self.num_relations = config['NUM_RELATIONS']
self.device = config['DEVICE']
self.no_anc = config['NO_ANC']
if self.pooler == "mlp":
self.set_enc = nn.Sequential(
nn.Linear(self.embedding_dim if self.policy != "cat" else 2 * self.embedding_dim, self.hid_dim), nn.Dropout(self.drop_prob),
nn.ReLU(),
nn.Linear(self.hid_dim, self.hid_dim), nn.Dropout(self.drop_prob), nn.ReLU(),
nn.Linear(self.hid_dim, self.hid_dim),
)
self.set_dec = nn.Sequential(
nn.Linear(self.hid_dim, self.hid_dim), nn.Dropout(self.drop_prob), nn.ReLU(),
nn.Linear(self.hid_dim, self.hid_dim), nn.Dropout(self.drop_prob), nn.ReLU(),
nn.Linear(self.hid_dim, self.embedding_dim)
)
elif self.pooler == "cat":
self.set_enc = nn.Sequential(
nn.Linear(self.embedding_dim * (self.sample_paths + self.sample_rels), self.embedding_dim * 2), nn.Dropout(self.drop_prob),
nn.ReLU(),
# nn.Linear(embedding_dim * 4, embedding_dim * 2), nn.Dropout(drop_prob), nn.ReLU(),
nn.Linear(self.embedding_dim * 2, self.embedding_dim)
) if not self.no_anc else nn.Sequential(
nn.Linear(self.embedding_dim * self.sample_rels, self.embedding_dim * 2), nn.Dropout(self.drop_prob),
nn.ReLU(),
nn.Linear(self.embedding_dim * 2, self.embedding_dim))
elif self.pooler == "trf":
encoder_layer = TransformerEncoderLayer(
d_model=self.embedding_dim if self.policy != "cat" else 2 * self.embedding_dim,
nhead=self.num_heads if self.policy != "cat" else 2 * self.num_heads,
dim_feedforward=self.hid_dim,
dropout=self.drop_prob,
)
self.set_enc = TransformerEncoder(encoder_layer=encoder_layer, num_layers=self.num_layers)
if self.policy == "cat":
self.linear = nn.Linear(2 * self.embedding_dim, self.embedding_dim)
self.rel_gnn = False
self.tokenizer.token2id[self.tokenizer.NOTHING_TOKEN] = len(tokenizer.token2id)
self.anchor_embeddings = nn.Embedding(len(tokenizer.token2id), embedding_dim=self.embedding_dim,
padding_idx=self.tokenizer.token2id[tokenizer.PADDING_TOKEN])
self.relation_embeddings = rel_embs
self.dist_emb = nn.Embedding(self.max_seq_len, embedding_dim=self.embedding_dim)
if self.random_hashes == 0:
if not self.nearest:
# subsample paths, need to align them with distances
sampled_paths = {
entity: random.sample(paths, k=min(self.sample_paths, len(paths)))
for entity, paths in self.tokenizer.vocab.items()
}
elif self.nearest:
# sort paths by length first and take K of them
prev_max_len = max(len(path) for k, v in self.tokenizer.vocab.items() for path in v)
sampled_paths = {
entity: sorted(paths, key=lambda x: len(x))[:min(self.sample_paths, len(paths))]
for entity, paths in self.tokenizer.vocab.items()
}
self.max_seq_len = max(len(path) for k, v in sampled_paths.items() for path in v)
print(
f"Changed max seq len from {prev_max_len} to {self.max_seq_len} after keeping {self.sample_paths} shortest paths")
hashes = [
[self.tokenizer.token2id[path[0]] for path in paths] + [
self.tokenizer.token2id[tokenizer.PADDING_TOKEN]] * (self.sample_paths - len(paths))
for entity, paths in sampled_paths.items()
]
distances = [
[len(path) - 1 for path in paths] + [0] * (self.sample_paths - len(paths))
for entity, paths in sampled_paths.items()
]
self.hashes = torch.tensor(hashes, dtype=torch.long, device=self.device)
self.distances = torch.tensor(distances, dtype=torch.long, device=self.device)
else:
# in this case, we bypass distances and won't use relations in the encoder
self.anchor_embeddings = nn.Embedding(self.random_hashes, embedding_dim=self.embedding_dim)
hashes = [
random.sample(list(range(self.random_hashes)), self.sample_paths)
for i in range(self.num_entities)
]
# _PRIMES = [
# 31, 43, 59, 61, 73, 97, 103, 113, 137, 149, 157, 173, 181, 193, 211, 223
# ]
# self.num_buckets = self.random_hashes
# self.anchor_embeddings = nn.Embedding(self.num_buckets * self.num_hashes, embedding_dim=embedding_dim // self.num_hashes)
# self.hash_projector = nn.Sequential(
# nn.Linear(self.embedding_dim, self.embedding_dim),
# nn.ReLU(),
# nn.Linear(self.embedding_dim, self.embedding_dim)
# )
# primes = _PRIMES[:self.num_hashes]
# hashes = [
# [(((i+1) * prime) % self.num_buckets) + k*self.num_buckets for k, prime in enumerate(primes)]
# for i in range(triples.num_entities)
# ]
self.hashes = torch.tensor(hashes, dtype=torch.long, device=self.device)
self.distances = torch.zeros((self.num_entities, self.sample_paths), dtype=torch.long,
device=self.device)
if self.use_neighbor_rels:
# create a feature matrix where rows are used relations in a 1-hop neighbourhood around each node
unique_sp = self.triples_factory.mapped_triples[:, [0, 1]].unique(dim=0, return_counts=False)
self.relation_features = torch.zeros((self.num_entities, self.num_relations * 2),
dtype=torch.float, requires_grad=False,
device=self.device) # features matrix
self.relation_features[unique_sp[:, 0], unique_sp[:, 1]] = 1.0 # counts.float().to(self.device)
# self.relation_features = torch.nn.functional.normalize(self.relation_features, p=1, dim=1)
self.projection = nn.Sequential(
nn.Linear(self.embedding_dim + self.num_relations, self.hid_dim),
nn.ReLU(),
nn.Linear(self.hid_dim, self.embedding_dim)
)
if self.sample_rels > 0:
pad_idx = self.num_relations * 2
e2r = defaultdict(set)
edge_index = self.graph.edge_index
edge_type = self.graph.edge_type
for i, src_node in enumerate(edge_index[0]):
e2r[src_node.item()].add(edge_type[i].item())
len_stats = [len(v) for k, v in e2r.items()]
print(
f"Unique relations per node - min: {min(len_stats)}, avg: {np.mean(len_stats)}, 66th perc: {np.percentile(sorted(len_stats), 66)}, max: {max(len_stats)} ")
unique_1hop_relations = [
random.sample(e2r[i], k=min(self.sample_rels, len(e2r[i]))) + [pad_idx] * (
self.sample_rels - min(len(e2r[i]), self.sample_rels))
for i in range(self.num_entities)
]
self.unique_1hop_relations = torch.tensor(unique_1hop_relations, dtype=torch.long, device=self.device)
def reset_parameters(self):
if self.pooler != "avg":
for module in self.set_enc.modules():
if module is self:
continue
if hasattr(module, "reset_parameters"):
module.reset_parameters()
if self.pooler == "mlp":
for module in self.set_dec.modules():
if module is self:
continue
if hasattr(module, "reset_parameters"):
module.reset_parameters()
if self.use_neighbor_rels:
for module in self.projection.modules():
if module is self:
continue
if hasattr(module, "reset_parameters"):
module.reset_parameters()
# if self.random_hashes != 0:
# for module in self.hash_projector.modules():
# if module is self:
# continue
# if hasattr(module, "reset_parameters"):
# module.reset_parameters()
torch.nn.init.xavier_uniform_(self.anchor_embeddings.weight)
torch.nn.init.xavier_uniform_(self.dist_emb.weight)
if self.use_rels == "joint":
torch.nn.init.xavier_uniform_(self.node_types.weight)
if self.random_hashes == 0:
with torch.no_grad():
self.anchor_embeddings.weight[self.tokenizer.token2id[self.tokenizer.PADDING_TOKEN]] = torch.zeros(self.embedding_dim)
self.dist_emb.weight[0] = torch.zeros(self.embedding_dim)
# if self.use_rels == "trf":
# self.rel_pos.weight[0] = torch.zeros(self.embedding_dim)
# phases randomly between 0 and 2 pi
# phases = 2 * np.pi * torch.rand(self.num_relations, self.real_embedding_dim, device=self.device)
# relations = torch.stack([torch.cos(phases), torch.sin(phases)], dim=-1).detach()
# assert torch.allclose(torch.norm(relations, p=2, dim=-1), phases.new_ones(size=(1, 1)))
# self.relation_embeddings.weight.data[:-1] = relations.view(self.num_relations, self.embedding_dim)
# self.relation_embeddings.weight.data[-1] = torch.zeros(self.embedding_dim)
def pool_anchors(self, anc_embs: torch.FloatTensor, mask: Optional[torch.BoolTensor] = None) -> torch.FloatTensor:
"""
input shape: (bs, num_anchors, emb_dim)
output shape: (bs, emb_dim)
"""
if self.pooler == "set":
pooled = self.set_enc(anc_embs)
elif self.pooler == "cat":
anc_embs = anc_embs.view(anc_embs.shape[0], -1)
pooled = self.set_enc(anc_embs) if self.sample_paths != 1 else anc_embs
elif self.pooler == "trf" or self.pooler == "moe":
if self.use_rels != "joint":
pooled = self.set_enc(anc_embs.transpose(1, 0)) # output shape: (seq_len, bs, dim)
else:
pooled = self.set_enc(anc_embs.transpose(1, 0), src_key_padding_mask=mask)
pooled = pooled.mean(dim=0) # output shape: (bs, dim)
if self.policy == "cat":
pooled = self.linear(pooled)
elif self.pooler == "perc":
pooled = self.set_enc(anc_embs)
elif self.pooler == "mlp":
pooled = self.set_dec(self.set_enc(anc_embs).mean(-2))
else:
pooled = anc_embs.mean(dim=1)
return pooled
def encode_rels(self, rel_hashes: torch.LongTensor, weights: Optional[torch.FloatTensor] = None) -> torch.FloatTensor:
# input: (bs, num_anchors, max_seq_len)
bs, num_paths, seq_len = rel_hashes.shape
rel_hashes = rel_hashes.view(bs * num_paths, seq_len)
if weights is not None:
weights = weights.view(bs * num_paths, seq_len)
pad_mask = rel_hashes != self.triples_factory.num_relations
rel_hashes = self.relation_embeddings(rel_hashes) # (bs*num_paths, seq_len, hid_dim)
if self.use_rels == "lstm" or self.use_rels == "gru":
rel_hashes, _ = self.rel_enc(rel_hashes) # (bs, seq_len, hid_dim)
rel_hashes = self.rel_proj(rel_hashes[:, -1, :]) # (bs, emb_dim)
elif self.use_rels == "mlp":
accumulator = torch.zeros((rel_hashes.shape[0], rel_hashes.shape[-1]), dtype=torch.float, device=rel_hashes.device)
enc, dec = self.rel_enc[0], self.rel_enc[1]
for i in range(seq_len-1):
pair = rel_hashes[:, i:i+2].view(-1, 2 * self.embedding_dim) # (bs*num_anc, 2 * 100)
pair = enc(pair)
accumulator += pair
rel_hashes = dec(accumulator)
elif self.use_rels == "avg":
if weights is None:
rel_hashes = (rel_hashes * pad_mask.float().unsqueeze(-1)).sum(-2) / pad_mask.float().sum(-1).clamp_min(1.0).unsqueeze(-1)
else:
rel_hashes = (rel_hashes * weights.unsqueeze(-1)).sum(-2) / pad_mask.float().sum(-1).clamp_min(1.0).unsqueeze(-1)
elif self.use_rels == "avg+":
pos = torch.arange(seq_len, dtype=torch.long, device=rel_hashes.device).repeat(bs * num_paths, 1)
pos = self.rel_pos(pos)
rel_hashes = torch.cat([rel_hashes, pos], dim=-1)
rel_hashes = self.rel_enc(rel_hashes)
rel_hashes = (rel_hashes * pad_mask.float().unsqueeze(-1)).sum(-2) / pad_mask.float().sum(-1).clamp_min(1.0).unsqueeze(-1)
elif self.use_rels == "trf":
temp = torch.zeros((rel_hashes.shape[1], rel_hashes.shape[0], rel_hashes.shape[2]), dtype=torch.float, device=rel_hashes.device)
positions = torch.arange(0, seq_len, dtype=torch.long, device=self.device)
positions = self.rel_pos(positions)
nnz_paths = rel_hashes[pad_mask.sum(1) > 0]
nnz_paths += positions
nnz_paths = self.rel_enc(nnz_paths.transpose(1, 0), src_key_padding_mask=~pad_mask[pad_mask.sum(1) > 0]) # (seq_len, bs, dim)
nnz_paths[torch.isnan(nnz_paths)] = 1.0 # for numerical stability of empty paths with NOTHING tokens
nnz_paths[torch.isinf(nnz_paths)] = 1.0 # for numerical stability of empty paths with NOTHING tokens
temp[:, pad_mask.sum(1) > 0, :] = nnz_paths
rel_hashes = temp
rel_hashes = (rel_hashes * pad_mask.t().float().unsqueeze(-1)).sum(0) / pad_mask.float().sum(1).clamp_min(1.0).unsqueeze(-1)
#rel_hashes = rel_hashes.mean(0)
elif self.use_rels == "int":
# replace padding 0's with 1's to prevent nans in the rotate computation
rel_hashes[~pad_mask] = 1.0
start = rel_hashes[:, 0, :]
for i in range(1, seq_len):
target = rel_hashes[:, i, :]
interaction = self.pairwise_interaction_function(start.view(-1, self.real_embedding_dim, 2), target.view(-1, self.real_embedding_dim, 2))
start = interaction
rel_hashes = interaction
return rel_hashes.view(bs, num_paths, self.embedding_dim)
def encode_by_index(self, entities: torch.LongTensor) -> torch.FloatTensor:
hashes, dists = self.hashes[entities], self.distances[entities]
anc_embs = self.anchor_embeddings(hashes)
mask = None
if self.use_distances:
dist_embs = self.dist_emb(dists)
anc_embs += dist_embs
if self.no_anc:
anc_embs = torch.tensor([], device=self.device)
if self.use_rels:
rel_hashes = self.rel_hash[entities] # (bs, num_relations)
path_weights = self.path_weights[entities] if self.use_mc else None
# if self.rel_gnn:
# self.relation_embeddings.weight.data = self.gnn_encoder(self.relation_embeddings.weight, self.edge_index)
if self.use_rels | |
>>> print(s.to_markdown(tablefmt="grid"))
+----+----------+
| | animal |
+====+==========+
| 0 | elk |
+----+----------+
| 1 | pig |
+----+----------+
| 2 | dog |
+----+----------+
| 3 | quetzal |
+----+----------+
"""
return self.to_frame().to_markdown(
buf, mode, index, storage_options=storage_options, **kwargs
)
# ----------------------------------------------------------------------
def items(self) -> Iterable[tuple[Hashable, Any]]:
"""
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Iterate over (column name, Series) pairs.
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
>>> s = pd.Series(['A', 'B', 'C'])
>>> for index, value in s.items():
... print(f"Index : {index}, Value : {value}")
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
"""
return zip(iter(self.index), iter(self))
@Appender(items.__doc__)
def iteritems(self) -> Iterable[tuple[Hashable, Any]]:
return self.items()
# ----------------------------------------------------------------------
# Misc public methods
def keys(self) -> Index:
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
"""
return self.index
def to_dict(self, into=dict):
"""
Convert Series to {label -> value} dict or dict-like object.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_dict()
{0: 1, 1: 2, 2: 3, 3: 4}
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd)
defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
"""
# GH16122
into_c = com.standardize_mapping(into)
return into_c((k, maybe_box_native(v)) for k, v in self.items())
def to_frame(self, name=None) -> DataFrame:
"""
Convert Series to DataFrame.
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
DataFrame
DataFrame representation of Series.
Examples
--------
>>> s = pd.Series(["a", "b", "c"],
... name="vals")
>>> s.to_frame()
vals
0 a
1 b
2 c
"""
if name is None:
df = self._constructor_expanddim(self)
else:
df = self._constructor_expanddim({name: self})
return df
def _set_name(self, name, inplace=False) -> Series:
"""
Set the Series name.
Parameters
----------
name : str
inplace : bool
Whether to modify `self` directly or return a copy.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
ser = self if inplace else self.copy()
ser.name = name
return ser
@Appender(
"""
Examples
--------
>>> ser = pd.Series([390., 350., 30., 20.],
... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed")
>>> ser
Falcon 390.0
Falcon 350.0
Parrot 30.0
Parrot 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(["a", "b", "a", "b"]).mean()
a 210.0
b 185.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(ser > 100).mean()
Max Speed
False 25.0
True 370.0
Name: Max Speed, dtype: float64
**Grouping by Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed")
>>> ser
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Animal
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level="Type").mean()
Type
Captive 210.0
Wild 185.0
Name: Max Speed, dtype: float64
We can also choose to include `NA` in group keys or not by defining
`dropna` parameter, the default setting is `True`:
>>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan])
>>> ser.groupby(level=0).sum()
a 3
b 3
dtype: int64
>>> ser.groupby(level=0, dropna=False).sum()
a 3
b 3
NaN 3
dtype: int64
>>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot']
>>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed")
>>> ser.groupby(["a", "b", "a", np.nan]).mean()
a 210.0
b 350.0
Name: Max Speed, dtype: float64
>>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()
a 210.0
b 350.0
NaN 20.0
Name: Max Speed, dtype: float64
"""
)
@Appender(generic._shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool | lib.NoDefault = no_default,
observed: bool = False,
dropna: bool = True,
) -> SeriesGroupBy:
from pandas.core.groupby.generic import SeriesGroupBy
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
# error: Argument "squeeze" to "SeriesGroupBy" has incompatible type
# "Union[bool, NoDefault]"; expected "bool"
return SeriesGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze, # type: ignore[arg-type]
observed=observed,
dropna=dropna,
)
# ----------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series.
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series.
Returns
-------
int or Series (if level specified)
Number of non-null values in the Series.
See Also
--------
DataFrame.count : Count non-NA cells for each column or row.
Examples
--------
>>> s = pd.Series([0.0, 1.0, np.nan])
>>> s.count()
2
"""
if level is None:
return notna(self._values).sum().astype("int64")
else:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. ser.count(level=1) should use ser.groupby(level=1).count().",
FutureWarning,
stacklevel=2,
)
if not isinstance(self.index, MultiIndex):
raise ValueError("Series.count level is only valid with a MultiIndex")
index = self.index
assert isinstance(index, MultiIndex) # for mypy
if isinstance(level, str):
level = index._get_level_number(level)
lev = index.levels[level]
level_codes = np.array(index.codes[level], subok=False, copy=True)
mask = level_codes == -1
if mask.any():
level_codes[mask] = cnt = len(lev)
lev = lev.insert(cnt, lev._na_value)
obs = level_codes[notna(self._values)]
out = np.bincount(obs, minlength=len(lev) or None)
return self._constructor(out, index=lev, dtype="int64").__finalize__(
self, method="count"
)
def mode(self, dropna=True) -> Series:
"""
Return the mode(s) of the Series.
The mode is the value that appears most often. There can be multiple modes.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
Series
Modes of the Series in sorted order.
"""
# TODO: Add option for bins like value_counts()
return algorithms.mode(self, dropna=dropna)
def unique(self) -> ArrayLike:
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
Returns
-------
ndarray or ExtensionArray
The unique values returned as a NumPy array. See Notes.
See Also
--------
unique : Top-level unique method for any 1-d array-like object.
Index.unique : Return Index with unique values from an Index object.
Notes
-----
Returns the unique values as a NumPy array. In case of an
extension-array backed Series, a new
:class:`~api.extensions.ExtensionArray` of that type with just
the unique values is returned. This includes
* Categorical
* Period
* Datetime with Timezone
* Interval
* Sparse
* IntegerNA
See Examples section.
Examples
--------
>>> pd.Series([2, 1, 3, 3], name='A').unique()
array([2, 1, 3])
>>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern')
... for _ in range(3)]).unique()
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[ns, US/Eastern]
An Categorical will return categories in the order of
appearance and with the same dtype.
>>> pd.Series(pd.Categorical(list('baabc'))).unique()
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'),
... ordered=True)).unique()
['b', 'a', 'c']
Categories (3, object): ['a' < 'b' < 'c']
"""
return super().unique()
@overload
def drop_duplicates(self, keep=..., inplace: Literal[False] = ...) -> Series:
...
@overload
def drop_duplicates(self, keep, inplace: Literal[True]) -> None:
...
@overload
def drop_duplicates(self, *, inplace: Literal[True]) -> None:
...
@overload
def drop_duplicates(self, keep=..., inplace: bool = ...) -> Series | None:
...
def drop_duplicates(self, keep="first", inplace=False) -> Series | None:
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop | |
#!/usr/bin/python
from __future__ import print_function
#######################################################################
# GoodVibes.py #
# Evaluation of quasi-harmonic thermochemistry from Gaussian. #
# Partion functions are evaluated from vibrational frequencies #
# and rotational temperatures from the standard output. #
# The rigid-rotor harmonic oscillator approximation is used as #
# standard for all frequencies above a cut-off value. Below this, #
# two treatments can be applied: #
# (a) low frequencies are shifted to the cut-off value (as per #
# Cramer-Truhlar) #
# (b) a free-rotor approximation is applied below the cut-off (as #
# per Grimme). In this approach, a damping function interpolates #
# between the RRHO and free-rotor entropy treatment of Svib to #
# avoid a discontinuity. #
# Both approaches avoid infinitely large values of Svib as wave- #
# numbers tend to zero. With a cut-off set to 0, the results will be #
# identical to standard values output by the Gaussian program. #
# The free energy can be evaluated for variable temperature, #
# concentration, vibrational scaling factor, and with a haptic #
# correction of the translational entropy in different solvents, #
# according to the amount of free space available. #
#######################################################################
####### Written by: <NAME> and <NAME> ##############
####### Last modified: Dec 11, 2017 #################################
#######################################################################
import os.path, sys, math, textwrap, time
from glob import glob
from optparse import OptionParser
from .vib_scale_factors import scaling_data, scaling_refs
# PHYSICAL CONSTANTS
GAS_CONSTANT, PLANCK_CONSTANT, BOLTZMANN_CONSTANT, SPEED_OF_LIGHT, AVOGADRO_CONSTANT, AMU_to_KG, atmos = 8.3144621, 6.62606957e-34, 1.3806488e-23, 2.99792458e10, 6.0221415e23, 1.66053886E-27, 101.325
# UNIT CONVERSION
j_to_au = 4.184 * 627.509541 * 1000.0
# version number
__version__ = "2.0.1"
stars = " " + "*" * 128
# some literature references
grimme_ref = "<NAME>. 2012, 18, 9955-9964"
truhlar_ref = "<NAME>.; <NAME>.; <NAME>.; <NAME>. Phys. Chem. B 2011, 115, 14556-14562"
goodvibes_ref = "Funes-Ardoiz, I.; <NAME>. (2016). GoodVibes: GoodVibes v1.0.2. http://doi.org/10.5281/zenodo.595246"
#Some useful arrays
periodictable = ["","H","He","Li","Be","B","C","N","O","F","Ne","Na","Mg","Al","Si","P","S","Cl","Ar","K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br","Kr","Rb","Sr","Y","Zr",
"Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb","Te","I","Xe","Cs","Ba","La","Ce","Pr","Nd","Pm","Sm","Eu","Gd","Tb","Dy","Ho","Er","Tm","Yb","Lu","Hf","Ta","W","Re","Os","Ir","Pt","Au","Hg","Tl",
"Pb","Bi","Po","At","Rn","Fr","Ra","Ac","Th","Pa","U","Np","Pu","Am","Cm","Bk","Cf","Es","Fm","Md","No","Lr","Rf","Db","Sg","Bh","Hs","Mt","Ds","Rg","Uub","Uut","Uuq","Uup","Uuh","Uus","Uuo"]
def elementID(massno):
if massno < len(periodictable): return periodictable[massno]
else: return "XX"
# Enables output to terminal and to text file
class Logger:
def __init__(self, filein, suffix, append):
self.log = open(filein+"_"+append+"."+suffix, 'w' )
def Write(self, message):
print(message, end='')
self.log.write(message)
def Fatal(self, message):
print(message+"\n")
self.log.write(message + "\n"); self.Finalize()
sys.exit(1)
def Finalize(self):
self.log.close()
# Enables output of optimized coordinates to a single xyz-formatted file
class XYZout:
def __init__(self, filein, suffix, append):
self.xyz = open(filein+"_"+append+"."+suffix, 'w' )
def Writetext(self, message):
self.xyz.write(message + "\n")
def Writecoords(self, atoms, coords):
for n, carts in enumerate(coords):
self.xyz.write('{:>1}'.format(atoms[n]))
for cart in carts: self.xyz.write('{:13.6f}'.format(cart))
self.xyz.write('\n')
def Finalize(self):
self.xyz.close()
#Read molecule data from a compchem output file
class getoutData:
def __init__(self, file):
with open(file) as f: data = f.readlines()
program = 'none'
for line in data:
if line.find("Gaussian") > -1: program = "Gaussian"; break
def getATOMTYPES(self, outlines, program):
if program == "Gaussian":
for i, line in enumerate(outlines):
if line.find("Input orientation") >-1 or line.find("Standard orientation") > -1:
self.ATOMTYPES, self.CARTESIANS, self.ATOMICTYPES, carts = [], [], [], outlines[i+5:]
for j, line in enumerate(carts):
if line.find("-------") > -1: break
self.ATOMTYPES.append(elementID(int(line.split()[1])))
self.ATOMICTYPES.append(int(line.split()[2]))
if len(line.split()) > 5: self.CARTESIANS.append([float(line.split()[3]),float(line.split()[4]),float(line.split()[5])])
else: self.CARTESIANS.append([float(line.split()[2]),float(line.split()[3]),float(line.split()[4])])
getATOMTYPES(self, data, program)
# Read gaussian output for a single point energy
def sp_energy(file):
spe, program, data = 'none', 'none', []
if os.path.exists(os.path.splitext(file)[0]+'.log'):
with open(os.path.splitext(file)[0]+'.log') as f: data = f.readlines()
elif os.path.exists(os.path.splitext(file)[0]+'.out'):
with open(os.path.splitext(file)[0]+'.out') as f: data = f.readlines()
else:
raise ValueError("File {} does not exist".format(file))
for line in data:
if line.find("Gaussian") > -1: program = "Gaussian"; break
if line.find("* O R C A *") > -1: program = "Orca"; break
for line in data:
if program == "Gaussian":
if line.strip().startswith('SCF Done:'): spe = float(line.strip().split()[4])
if program == "Orca":
if line.strip().startswith('FINAL SINGLE POINT ENERGY'): spe = float(line.strip().split()[4])
return spe
# Read output for the level of theory and basis set used
def level_of_theory(file):
with open(file) as f: data = f.readlines()
level, bs = 'none', 'none'
for line in data:
if line.strip().find('\\Freq\\') > -1:
try: level, bs = (line.strip().split("\\")[4:6])
except IndexError: pass
# remove the restricted R or unrestricted U label
if level[0] == 'R' or level[0] == 'U': level = level[1:]
return level+"/"+bs
# translational energy evaluation (depends on temperature)
def calc_translational_energy(temperature):
"""
Calculates the translational energy (J/mol) of an ideal gas - i.e. non-interactiing molecules so molar energy = Na * atomic energy
This approximation applies to all energies and entropies computed within
Etrans = 3/2 RT!
"""
energy = 1.5 * GAS_CONSTANT * temperature
return energy
# rotational energy evaluation (depends on molecular shape and temperature)
def calc_rotational_energy(zpe, symmno, temperature, linear):
"""
Calculates the rotaional energy (J/mol)
Etrans = 0 (atomic) ; RT (linear); 3/2 RT (non-linear)
"""
if zpe == 0.0: energy = 0.0
elif linear == 1: energy = GAS_CONSTANT * temperature
else: energy = 1.5 * GAS_CONSTANT * temperature
return energy
# vibrational energy evaluation (depends on frequencies, temperature and scaling factor: default = 1.0)
def calc_vibrational_energy(frequency_wn, temperature, freq_scale_factor):
"""
Calculates the vibrational energy contribution (J/mol). Includes ZPE (0K) and thermal contributions
Evib = R * Sum(0.5 hv/k + (hv/k)/(e^(hv/KT)-1))
"""
factor = [(PLANCK_CONSTANT * freq * SPEED_OF_LIGHT * freq_scale_factor) / (BOLTZMANN_CONSTANT * temperature) for freq in frequency_wn]
energy = [entry * GAS_CONSTANT * temperature * (0.5 + (1.0 / (math.exp(entry) - 1.0))) for entry in factor]
return sum(energy)
# vibrational Zero point energy evaluation (depends on frequencies and scaling factor: default = 1.0)
def calc_zeropoint_energy(frequency_wn, freq_scale_factor):
"""
Calculates the vibrational ZPE (J/mol)
EZPE = Sum(0.5 hv/k)
"""
factor = [PLANCK_CONSTANT * freq * SPEED_OF_LIGHT * freq_scale_factor / BOLTZMANN_CONSTANT for freq in frequency_wn]
energy = [0.5 * entry * GAS_CONSTANT for entry in factor]
return sum(energy)
# Computed the amount of accessible free space (ml per L) in solution accesible to a solute immersed in bulk solvent, i.e. this is the volume not occupied by solvent molecules, calculated using literature values for molarity and B3LYP/6-31G* computed molecular volumes.
def get_free_space(solv):
"""
Calculates the free space in a litre of bulk solvent, based on Shakhnovich and Whitesides (J. Org. Chem. 1998, 63, 3821-3830)
"""
solvent_list = ["none", "H2O", "toluene", "DMF", "AcOH", "chloroform"]
molarity = [1.0, 55.6, 9.4, 12.9, 17.4, 12.5] #mol/l
molecular_vol = [1.0, 27.944, 149.070, 77.442, 86.10, 97.0] #Angstrom^3
nsolv = 0
for i in range(0,len(solvent_list)):
if solv == solvent_list[i]: nsolv = i
solv_molarity = molarity[nsolv]
solv_volume = molecular_vol[nsolv]
if nsolv > 0:
V_free = 8 * ((1E27/(solv_molarity * AVOGADRO_CONSTANT)) ** 0.333333 - solv_volume ** 0.333333) ** 3
freespace = V_free * solv_molarity * AVOGADRO_CONSTANT * 1E-24
else: freespace = 1000.0
return freespace
# translational entropy evaluation (depends on mass, concentration, temperature, solvent free space: default = 1000.0)
def calc_translational_entropy(molecular_mass, conc, temperature, solv):
"""
Calculates the translational entropic contribution (J/(mol*K)) of an ideal gas. Needs the molecular mass. Convert mass in amu to kg; conc in mol/l to number per m^3
Strans = R(Ln(2pimkT/h^2)^3/2(1/C)) + 1 + 3/2)
"""
lmda = ((2.0 * math.pi * molecular_mass * AMU_to_KG * BOLTZMANN_CONSTANT * temperature)**0.5) / PLANCK_CONSTANT
freespace = get_free_space(solv)
Ndens = conc * 1000 * AVOGADRO_CONSTANT / (freespace/1000.0)
entropy = GAS_CONSTANT * (2.5 + math.log(lmda**3 / Ndens))
return entropy
# electronic entropy evaluation (depends on multiplicity)
def calc_electronic_entropy(multiplicity):
"""
Calculates the electronic entropic contribution (J/(mol*K)) of the molecule
Selec = R(Ln(multiplicity)
"""
entropy = GAS_CONSTANT * (math.log(multiplicity))
return entropy
# rotational entropy evaluation (depends on molecular shape and temp.)
def calc_rotational_entropy(zpe, linear, symmno, rotemp, temperature):
"""
Calculates the rotational entropy (J/(mol*K))
Strans = 0 (atomic) ; R(Ln(q)+1) (linear); R(Ln(q)+3/2) (non-linear)
"""
# monatomic
if rotemp == [0.0,0.0,0.0] or zpe == 0.0: entropy = 0.0
else:
if len(rotemp) == 1: # diatomic or linear
qrot = temperature/rotemp[0]
else:
qrot = math.pi*temperature**3/(rotemp[0]*rotemp[1]*rotemp[2])
qrot = qrot ** 0.5
if linear == 1: entropy = GAS_CONSTANT * (math.log(qrot / symmno) + 1)
else: entropy = GAS_CONSTANT * (math.log(qrot / symmno) + 1.5)
return entropy
# rigid rotor harmonic oscillator (RRHO) entropy evaluation - this is the default treatment
def calc_rrho_entropy(frequency_wn, temperature, freq_scale_factor):
"""
Entropic contributions (J/(mol*K)) according to a rigid-rotor harmonic-oscillator description for a list of vibrational modes
Sv = RSum(hv/(kT(e^(hv/KT)-1) - ln(1-e^(-hv/kT)))
"""
factor = [PLANCK_CONSTANT * freq * SPEED_OF_LIGHT * freq_scale_factor / BOLTZMANN_CONSTANT / temperature for freq in frequency_wn]
entropy = [entry * GAS_CONSTANT / (math.exp(entry) - 1) - GAS_CONSTANT * math.log(1 - math.exp(-entry)) for entry in factor]
return entropy
# | |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0924962,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.27671,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 9.4469e-07,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.20269,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.01201e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.181222,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.292305,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.147546,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.621073,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.207264,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.22847,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 1.91191e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00760127,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.054967,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0562161,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0549689,
'Execution Unit/Register Files/Runtime Dynamic': 0.0638174,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.1158,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.304408,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.59736,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00234258,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00234258,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00210065,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000846156,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000807549,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00759336,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0203073,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.054042,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.43753,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.212065,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.183551,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.82288,
'Instruction Fetch Unit/Runtime Dynamic': 0.477559,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0357829,
'L2/Runtime Dynamic': 0.00696773,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.99701,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.848868,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0569365,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0569365,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.26587,
'Load Store Unit/Runtime Dynamic': 1.1866,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.140396,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.280792,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0498269,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0501343,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.213733,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0354463,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.455436,
'Memory Management Unit/Runtime Dynamic': 0.0855806,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.3979,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 4.45596e-06,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0081763,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0924691,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
"""""
GBDX Notebook: "Identifying Destroyed Buildings with Multispectral Imagery"
Link: https://notebooks.geobigdata.io/hub/notebooks/5b47cfb82486966ea89b75fd?tab=code
Author: <NAME>
Date created: 7/5/2018
Date last modified: 7/13/2018
Python Version: 2.7.15
"""
import cPickle
import folium
from functools import partial
from gbdxtools import CatalogImage, IdahoImage
import geojson
from IPython.display import HTML, display
import jinja2
import json
from matplotlib import pyplot as plt, colors
import numpy as np
import os
from past.utils import old_div
import pickle
import pyproj
from rasterio import features
import requests
from scipy import ndimage as ndi
from shapely import geometry, ops
from shapely.geometry import shape, geo, box
from skimage import filters, morphology, measure, color, segmentation, exposure
from skimage.measure import label, regionprops
from sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score
#CONSTANTS
buildings_geojson_link = 'https://s3.amazonaws.com/gbdx-training/burnt_areas/Nuns_SonomaCounty_Glenn_selected_labelled.geojson'
RF_model_link = 'https://s3.amazonaws.com/gbdx-training/burnt_areas/rf_allseg_model.pkl'
"""Helper functions for the GBDX Notebook."""
def pixels_as_features(image, include_gabors=True):
"""Calculates remote sensing indices and gabor filters(optional).
Returns image features of image bands, remote sensing indices, and gabor filters."""
# roll axes to conventional row,col,depth
img = np.rollaxis(image, 0, 3)
rsi = calc_rsi(image)
if include_gabors is True:
gabors = calc_gabors(image)
stack = np.dstack([img, rsi, gabors])
else:
stack = np.dstack([img, rsi])
feats = stack.ravel().reshape(stack.shape[0] * stack.shape[1], stack.shape[2])
return feats
def calc_rsi(image):
"""Remote sensing indices for vegetation, built-up, and bare soil."""
# roll axes to conventional row,col,depth
img = np.rollaxis(image, 0, 3)
# bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral
COAST = img[:, :, 0]
B = img[:, :, 1]
G = img[:, :, 2]
Y = img[:, :, 3]
R = img[:, :, 4]
RE = img[:, :, 5]
NIR1 = img[:, :, 6]
NIR2 = img[:, :, 7]
arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))
dd = (2 * NIR1 - R) - (G - B)
gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5
gndvi = old_div((NIR1 - G), (NIR1 + G))
ndre = old_div((NIR1 - RE), (NIR1 + RE))
ndvi = old_div((NIR1 - R), (NIR1 + R))
ndvi35 = old_div((G - R), (G + R))
ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))
nirry = old_div((NIR1), (R + Y))
normnir = old_div(NIR1, (NIR1 + R + G))
psri = old_div((R - B), RE)
rey = old_div((RE - Y), (RE + Y))
rvi = old_div(NIR1, R)
sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69
vi1 = old_div((10000 * NIR1), (RE) ** 2)
vire = old_div(NIR1, RE)
br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))
gr = old_div(G, R)
rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))
###Built-Up indices
wvbi = old_div((COAST - RE), (COAST + RE))
wvnhfd = old_div((RE - COAST), (RE + COAST))
###SIs
evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))
L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES
savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))
msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)
bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))
rgi = old_div(R, G)
bri = old_div(B, R)
rsi = np.stack(
[arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,
wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],
axis=2)
return rsi
def power(image, kernel):
"""Normalize images for better comparison."""
image = old_div((image - image.mean()), image.std())
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap') ** 2 +
ndi.convolve(image, np.imag(kernel), mode='wrap') ** 2)
def calc_gabors(image, frequency=1, theta_vals=[0, 1, 2, 3]):
"""Calculate gabor."""
# convert to gray scale
img = exposure.equalize_hist(color.rgb2gray(image.rgb(blm=True)))
results_list = []
for theta in theta_vals:
theta = theta / 4. * np.pi
kernel = filters.gabor_kernel(frequency, theta=theta)
# Save kernel and the power image for each image
results_list.append(power(img, kernel))
gabors = np.rollaxis(np.dstack([results_list]), 0, 3)
return gabors
def get_link(model_url):
"""Fetch the RF model pickle file or the building footprints geojson."""
response = requests.get(model_url)
return response.content
#partials
get_model = partial(get_link, model_url=RF_model_link) #gets RF model response content
get_geojson = partial(get_link, model_url=buildings_geojson_link) #gets building geojson response content
def reproject(geom, from_proj='EPSG:4326', to_proj='EPSG:26942'):
"""Project from ESPG:4326 to ESPG:26942."""
tfm = partial(pyproj.transform, pyproj.Proj(init=from_proj), pyproj.Proj(init=to_proj))
return ops.transform(tfm, geom)
def km2_area(polygons):
"""Get area in km^2 after reprojection."""
reprojected_polygons = [reproject(p) for p in polygons]
return ops.cascaded_union(reprojected_polygons).area * 1e-6
def clean(img):
"""Clean the binary image by removing small holes and objects."""
label_img = label(img, connectivity=2)
props = sorted(regionprops(label_img), key=lambda x: x.area)
clean = morphology.binary_closing(img)
clean = morphology.remove_small_holes(clean)
return morphology.remove_small_objects(clean,
int(np.floor(props[-1].area) / 10), connectivity=2)
def to_geojson(shapes, buildings):
"""Converts the shapes into geojson.
This function will combine the burn scar region and buildings into geojson.
Burn scar polygon in red, buildings polygon all in blue."""
#append burn scar region polygons to geojson
if type(shapes) == list:
results = ({
'type': 'Feature',
'properties': {'raster_val': v, 'color': 'red'},
'geometry': s.__geo_interface__}
for i, (s, v)
in enumerate(shapes))
else:
results = ({
'type': 'Feature',
'properties': {'raster_val': v, 'color': 'red'},
'geometry': s}
for i, (s, v)
in enumerate(shapes))
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b['properties']['BuildingID'], 'color': 'blue'},
'geometry': b['geometry']}
for i, b
in enumerate(buildings['features']))
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def geojson_to_polygons(js_):
"""Convert the geojson into Shapely Polygons.
Keep burn scar polygons as red.
Mark all building polygons labelled as ('yellow', False) and will be changed later."""
burnt_polys = []
building_polys = []
for i, feat in enumerate(js_['features']):
o = {
"coordinates": feat['geometry']['coordinates'],
"type": feat['geometry']['type']
}
s = json.dumps(o)
# convert to geojson.geometry.Polygon
g1 = geojson.loads(s)
# covert to shapely.geometry.polygon.Polygon
g2 = shape(g1)
if feat['properties']['color'] == 'red': # red for the burnt region
burnt_polys.append(g2)
else: # for the building poly
building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',
False]]) # mark building polygons as 'yellow' for non-burnt for now
return burnt_polys, building_polys
def label_building_polys(burnt_polys, building_polys):
"""Labels the building polygons as ('blue', True) if the building is destroyed."""
for b in building_polys:
for r in burnt_polys:
if b[0].intersects(r):
b[1] = [b[1][0], 'blue', True] # mark building polygon as 'blue' if found in burnt region
continue
def to_geojson_burnt(burnt_polys, building_polys):
"""Convert shapes into geojson with new labelled building footprints. """
results = ({
'type': 'Feature',
'properties': {'color': 'red'},
'geometry': geo.mapping(r)}
for r in burnt_polys)
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b[1][0], 'color': b[1][1]},
'geometry': geo.mapping(b[0])}
for b in building_polys)
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def to_geojson_groundtruth(burnt_polys, data_labelled):
"""Convert shapes into geojson for the groundtruth."""
results = ({
'type': 'Feature',
'properties': {'color': 'red'},
'geometry': geo.mapping(r)}
for r in burnt_polys)
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b['properties']['BuildingID'], 'color': b['properties']['color'],
'Burnt_Label': b['properties']['Burnt_Label']},
'geometry': b['geometry']}
for b in data_labelled['features'])
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def geojson_to_polygons_groundtruth(js_):
"""Convert geojson to polygons for the groundtruth map."""
burnt_polys = []
building_polys = []
for i, feat in enumerate(js_['features']):
o = {
"coordinates": feat['geometry']['coordinates'],
"type": feat['geometry']['type']
}
s = json.dumps(o)
# convert to geojson.geometry.Polygon
g1 = geojson.loads(s)
# covert to shapely.geometry.polygon.Polygon
g2 = shape(g1)
if feat['properties']['color'] == 'red': # red for the burnt region
burnt_polys.append(g2)
else: # for the building poly
if feat['properties']['Burnt_Label']:
building_polys.append([g2, [feat['properties']['BuildingID'], 'blue',
True]]) # mark building polygons as 'blue' for burnt for now
else:
building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',
False]]) # mark building polygons as 'yellow' for non-burnt for now
return burnt_polys, building_polys
def accuracy_measures(predictions, trues):
"""Accuracy measures for the predictions of the method vs the groundtruth.
Prints a confusion matrix, accuracy, misclassifcation rate, true positieve rate, false positive rate, specificity, precision, prevalence.
Returns the accuracy score, precision score, and recall score."""
tn, fp, fn, tp = confusion_matrix(trues, predictions).ravel()
print "\t(tn, fp, fn, tp) =", (tn, fp, fn, tp)
# how often is classifier correct?
print "\tAccuracy = {:.2%}".format(float(tp + | |
is rejected.
"""
mock_check_for_resource_operations.return_value = False
response = self.authenticated_regular_client.delete(self.url_for_workspace_resource)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
mock_delete_file.delay.assert_not_called()
# check that the resource still exists
Resource.objects.get(pk=self.regular_user_workspace_resource.pk)
def test_other_users_cannot_delete_resource(self):
"""
Test that another regular users can't delete someone else's Workpace.
"""
response = self.authenticated_other_client.delete(self.url_for_unattached)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_other_user_cannot_view_resource_detail(self):
"""
Test that another regular user can't view the Workpace detail.
"""
response = self.authenticated_other_client.get(self.url_for_unattached)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_users_cannot_change_owner(self):
'''
Regular users cannot change the owner of a Resource. That
would amount to assigning a Resource to someone else- do not
want that.
'''
payload = {'owner_email':self.regular_user_2.email}
response = self.authenticated_regular_client.put(
self.url_for_unattached, payload, format='json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
payload = {'owner_email':self.regular_user_2.email}
response = self.authenticated_regular_client.put(
self.url_for_workspace_resource, payload, format='json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_cannot_directly_edit_resource_workspace(self):
'''
Test that the put/patch to the resources endpoint
ignores any request to change teh workspace
'''
# get the workspace to which the resource is assigned:
all_workspaces = self.regular_user_workspace_resource.workspaces.all()
workspace1 = all_workspaces[0]
# get another workspace owned by that user:
all_user_workspaces = Workspace.objects.filter(
owner=self.regular_user_workspace_resource.owner
)
other_workspaces = [x for x in all_user_workspaces if not x==workspace1]
if len(other_workspaces) == 0:
raise ImproperlyConfigured('Need to create another Workspace for'
' user {user_email}'.format(
user_email=self.regular_user_workspace_resource.owner.email
)
)
other_workspace = other_workspaces[0]
payload = {'workspace': other_workspace.pk}
# try for a resource already attached to a workspace
response = self.authenticated_regular_client.put(
self.url_for_workspace_resource, payload, format='json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_user_cannot_change_active_status(self):
'''
The `is_active` boolean cannot be altered by a regular user.
`is_active` is used to block edits while validation is processing, etc.
The `is_active` is ignored for requests from regular users
so there is no 400 returned. Rather, we check that the flag
has not changed.
'''
# check that it was not active to start:
self.assertTrue(self.regular_user_workspace_resource.is_active)
payload = {'is_active': False}
response = self.authenticated_regular_client.put(
self.url_for_workspace_resource, payload, format='json'
)
r = Resource.objects.get(pk=self.regular_user_workspace_resource.pk)
self.assertTrue(r.is_active)
def test_admin_cannot_change_active_status(self):
'''
The `is_active` boolean cannot be reset via the API, even by
an admin
'''
# find the status at the start:
initial_status = self.regular_user_unattached_resource.is_active
final_status = not initial_status
payload = {'is_active': final_status}
response = self.authenticated_admin_client.put(
self.url_for_unattached, payload, format='json'
)
r = Resource.objects.get(pk=self.regular_user_unattached_resource.pk)
# check that the bool changed:
self.assertEqual(r.is_active, initial_status)
def test_user_cannot_change_status_message(self):
'''
The `status` string canNOT be reset by a regular user
'''
# check that it was not active to start:
orig_status = self.regular_user_unattached_resource.status
payload = {'status': 'something'}
response = self.authenticated_regular_client.put(
self.url_for_unattached, payload, format='json'
)
r = Resource.objects.get(pk=self.regular_user_unattached_resource.pk)
self.assertTrue(r.status == orig_status)
def test_admin_can_change_status_message(self):
'''
The `status` string can be reset by an admin
'''
# check that it was not active to start:
orig_status = self.active_resource.status
payload = {'status': 'something'}
response = self.authenticated_admin_client.put(
self.url_for_active_resource, payload, format='json'
)
r = Resource.objects.get(pk=self.active_resource.pk)
self.assertFalse(r.status == orig_status)
def test_user_cannot_change_date_added(self):
'''
Once the Resource has been added, there is no editing
of the DateTime.
'''
orig_datetime = self.regular_user_unattached_resource.creation_datetime
original_pk = self.regular_user_unattached_resource.pk
date_str = 'May 20, 2018 (16:00:07)'
payload = {'created': date_str}
response = self.authenticated_regular_client.put(
self.url_for_unattached, payload, format='json'
)
# since the field is ignored, it will not raise any exception.
# Still want to check that the object is unchanged:
r = Resource.objects.get(pk=original_pk)
self.assertEqual(orig_datetime, r.creation_datetime)
orig_datestring = orig_datetime.strftime('%B %d, %Y, (%H:%M:%S)')
self.assertTrue(orig_datestring != date_str)
def test_user_cant_make_resource_public(self):
'''
Regular users are not allowed to effect public/private
chanage on Resources
'''
private_resources = Resource.objects.filter(
owner = self.regular_user_1,
is_active = True,
is_public = False
)
if len(private_resources) > 0:
private_resource = private_resources[0]
url = reverse(
'resource-detail',
kwargs={'pk':private_resource.pk}
)
payload = {'is_public': True}
response = self.authenticated_regular_client.put(
url, payload, format='json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
r = Resource.objects.get(pk=private_resource.pk)
self.assertFalse(r.is_public)
else:
raise ImproperlyConfigured('To properly run this test, you'
' need to have at least one public Resource.')
def test_admin_user_can_make_resource_public(self):
'''
Admin users are allowed to effect public/private
chanage on Resources
'''
private_resources = Resource.objects.filter(
owner = self.regular_user_1,
is_active = True,
is_public = False
)
if len(private_resources) > 0:
private_resource = private_resources[0]
url = reverse(
'resource-detail',
kwargs={'pk':private_resource.pk}
)
payload = {'is_public': True}
response = self.authenticated_admin_client.put(
url, payload, format='json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
r = Resource.objects.get(pk=private_resource.pk)
self.assertTrue(r.is_public)
else:
raise ImproperlyConfigured('To properly run this test, you'
' need to have at least one public Resource.')
def test_user_cant_make_resource_private(self):
'''
If a Resource was public, regular users can't make it private
'''
active_and_public_resources = Resource.objects.filter(
is_active = True,
is_public = True,
owner = self.regular_user_1
)
if len(active_and_public_resources) == 0:
raise ImproperlyConfigured('To properly run this test, you'
' need to have at least one public AND active Resource.')
r = active_and_public_resources[0]
url = reverse(
'resource-detail',
kwargs={'pk':r.pk}
)
payload = {'is_public': False}
response = self.authenticated_regular_client.put(
url, payload, format='json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
updated_resource = Resource.objects.get(pk=r.pk)
self.assertTrue(updated_resource.is_public)
def test_admin_user_can_make_resource_private(self):
'''
If a Resource was public, admin users can make it private
'''
active_and_public_resources = Resource.objects.filter(
is_active = True,
is_public = True,
owner = self.regular_user_1
)
if len(active_and_public_resources) == 0:
raise ImproperlyConfigured('To properly run this test, you'
' need to have at least one public AND active Resource.')
r = active_and_public_resources[0]
url = reverse(
'resource-detail',
kwargs={'pk':r.pk}
)
payload = {'is_public': False}
response = self.authenticated_admin_client.put(
url, payload, format='json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
updated_resource = Resource.objects.get(pk=r.pk)
self.assertFalse(updated_resource.is_public)
def test_cannot_make_changes_when_inactive(self):
'''
Test that no changes can be made when the resource is inactive.
'''
self.assertFalse(self.inactive_resource.is_active)
# just try to change the path as an example
payload = {'path': '/some/path/to/file.txt'}
response = self.authenticated_admin_client.put(
self.url_for_inactive_resource, payload, format='json'
)
self.assertTrue(response.status_code == status.HTTP_400_BAD_REQUEST)
def test_admin_can_change_path(self):
'''
Path is only relevant for internal/database use so
users cannot change that. Admins may, however
'''
self.assertTrue(self.active_resource.is_active)
original_path = self.active_resource.path
new_path = '/some/new/path.txt'
payload = {'path': new_path}
response = self.authenticated_admin_client.put(
self.url_for_active_resource, payload, format='json'
)
# query db for that same Resource object and verify that the path
# has not been changed:
obj = Resource.objects.get(pk=self.active_resource.pk)
self.assertEqual(obj.path, new_path)
self.assertFalse(obj.path == original_path)
def test_user_cannot_change_path(self):
'''
Path is only relevant for internal/database use so
users cannot change that.
'''
original_path = self.regular_user_unattached_resource.path
payload = {'path': '/some/new/path.txt'}
response = self.authenticated_regular_client.put(
self.url_for_unattached, payload, format='json'
)
# query db for that same Resource object and verify that the path
# has not been changed:
obj = Resource.objects.get(pk=self.regular_user_unattached_resource.pk)
self.assertEqual(obj.path, original_path)
def test_user_can_change_resource_name(self):
'''
Users may change the Resource name. This does NOT
change anything about the path, etc.
'''
original_name = self.active_resource.name
payload = {'name': 'newname.txt'}
response = self.authenticated_regular_client.put(
self.url_for_active_resource, payload, format='json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
json_obj = response.json()
self.assertTrue(json_obj['name'], 'newname.txt')
# just double check that the original name wasn't the same
# by chance
self.assertTrue(original_name != 'newname.txt')
@mock.patch('api.serializers.resource.api_tasks')
def test_changing_resource_type_resets_status(self,
mock_api_tasks):
'''
If an attempt is made to change the resource type
ensure that the resource has its "active" state
set to False and that the status changes.
Since the validation can take some time, it will call
the asynchronous validation process.
'''
current_resource_type = self.active_resource.resource_type
other_types = set(
[x[0] for x in DATABASE_RESOURCE_TYPES]
).difference(set([current_resource_type]))
newtype = list(other_types)[0]
# verify that we are actually changing the type
# in this request (i.e. not a trivial test)
self.assertFalse(
newtype == current_resource_type
)
payload = {'resource_type': newtype}
response = self.authenticated_regular_client.put(
self.url_for_active_resource, payload, format='json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
r = Resource.objects.get(pk=self.active_resource.pk)
# active state set to False
self.assertFalse(r.is_active)
# check that the validation method was called.
mock_api_tasks.validate_resource.delay.assert_called_with(self.active_resource.pk, newtype)
def test_setting_workspace_to_null_fails(self):
'''
Test that directly setting the workspace to null fails.
Users can't change a Resource's workspace. They can only
remove unused Resources from a Workspace.
'''
payload = {'workspace': None}
# get the original set of workspaces for the resource
orig_workspaces = [x.pk for x in self.regular_user_workspace_resource.workspaces.all()]
# try for an attached resource
response = self.authenticated_regular_client.put(
self.url_for_workspace_resource, payload, format='json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# query the resource again, see that the workspaces have not
# changed
rr = Resource.objects.get(pk=self.regular_user_workspace_resource.pk)
current_workspaces = [x.pk for x in rr.workspaces.all()]
self.assertEqual(current_workspaces, orig_workspaces)
# try for an unattached resource
# get the original set of workspaces for the resource
orig_workspaces = [x.pk for x in self.regular_user_unattached_resource.workspaces.all()]
response = self.authenticated_regular_client.put(
self.url_for_unattached, payload, format='json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# query the resource again, see that the workspaces have not
| |
import fnmatch
import os
from collections import OrderedDict, defaultdict
from conans.client.conanfile.configure import run_configure_method
from conans.client.generators.text import TXTGenerator
from conans.client.graph.build_mode import BuildMode
from conans.client.graph.graph import BINARY_BUILD, Node, CONTEXT_HOST, CONTEXT_BUILD
from conans.client.graph.graph_binaries import RECIPE_CONSUMER, RECIPE_VIRTUAL, BINARY_EDITABLE, \
BINARY_UNKNOWN
from conans.client.graph.graph_builder import DepsGraphBuilder
from conans.errors import ConanException, conanfile_exception_formatter
from conans.model.conan_file import get_env_context_manager
from conans.model.graph_info import GraphInfo
from conans.model.graph_lock import GraphLock, GraphLockFile
from conans.model.ref import ConanFileReference
from conans.paths import BUILD_INFO
from conans.util.files import load
class _RecipeBuildRequires(OrderedDict):
def __init__(self, conanfile, default_context):
super(_RecipeBuildRequires, self).__init__()
build_requires = getattr(conanfile, "build_requires", [])
if not isinstance(build_requires, (list, tuple)):
build_requires = [build_requires]
self._default_context = default_context
for build_require in build_requires:
self.add(build_require, context=self._default_context)
def add(self, build_require, context):
if not isinstance(build_require, ConanFileReference):
build_require = ConanFileReference.loads(build_require)
self[(build_require.name, context)] = build_require
def __call__(self, build_require, force_host_context=False):
context = CONTEXT_HOST if force_host_context else self._default_context
self.add(build_require, context)
def __str__(self):
items = ["{} ({})".format(br, ctxt) for (_, ctxt), br in self.items()]
return ", ".join(items)
class GraphManager(object):
def __init__(self, output, cache, remote_manager, loader, proxy, resolver, binary_analyzer):
self._proxy = proxy
self._output = output
self._resolver = resolver
self._cache = cache
self._remote_manager = remote_manager
self._loader = loader
self._binary_analyzer = binary_analyzer
def load_consumer_conanfile(self, conanfile_path, info_folder,
deps_info_required=False, test=False):
"""loads a conanfile for local flow: source, imports, package, build
"""
try:
graph_info = GraphInfo.load(info_folder)
lock_path = os.path.join(info_folder, "conan.lock")
graph_lock_file = GraphLockFile.load(lock_path, self._cache.config.revisions_enabled)
graph_lock = graph_lock_file.graph_lock
self._output.info("Using lockfile: '{}/conan.lock'".format(info_folder))
profile_host = graph_lock_file.profile_host
profile_build = graph_lock_file.profile_build
self._output.info("Using cached profile from lockfile")
except IOError: # Only if file is missing
graph_lock = None
# This is very dirty, should be removed for Conan 2.0 (source() method only)
profile_host = self._cache.default_profile
profile_host.process_settings(self._cache)
profile_build = None
name, version, user, channel = None, None, None, None
else:
name, version, user, channel, _ = graph_info.root
profile_host.process_settings(self._cache, preprocess=False)
# This is the hack of recovering the options from the graph_info
profile_host.options.update(graph_info.options)
if profile_build:
profile_build.process_settings(self._cache, preprocess=False)
if conanfile_path.endswith(".py"):
lock_python_requires = None
if graph_lock and not test: # Only lock python requires if it is not test_package
node_id = graph_lock.get_consumer(graph_info.root)
lock_python_requires = graph_lock.python_requires(node_id)
conanfile = self._loader.load_consumer(conanfile_path,
profile_host=profile_host,
name=name, version=version,
user=user, channel=channel,
lock_python_requires=lock_python_requires)
if profile_build:
conanfile.settings_build = profile_build.processed_settings.copy()
conanfile.settings_target = None
if test:
conanfile.display_name = "%s (test package)" % str(test)
conanfile.output.scope = conanfile.display_name
run_configure_method(conanfile, down_options=None, down_ref=None, ref=None)
else:
conanfile = self._loader.load_conanfile_txt(conanfile_path, profile_host=profile_host)
load_deps_info(info_folder, conanfile, required=deps_info_required)
return conanfile
def load_graph(self, reference, create_reference, graph_info, build_mode, check_updates, update,
remotes, recorder, apply_build_requires=True):
""" main entry point to compute a full dependency graph
"""
root_node = self._load_root_node(reference, create_reference, graph_info)
deps_graph = self._resolve_graph(root_node, graph_info, build_mode, check_updates, update,
remotes, recorder,
apply_build_requires=apply_build_requires)
# Run some validations once the graph is built
self._validate_graph_provides(deps_graph)
return deps_graph
def _load_root_node(self, reference, create_reference, graph_info):
""" creates the first, root node of the graph, loading or creating a conanfile
and initializing it (settings, options) as necessary. Also locking with lockfile
information
"""
profile = graph_info.profile_host
graph_lock = graph_info.graph_lock
profile.dev_reference = create_reference # Make sure the created one has develop=True
if isinstance(reference, list): # Install workspace with multiple root nodes
conanfile = self._loader.load_virtual(reference, profile, scope_options=False)
# Locking in workspaces not implemented yet
return Node(ref=None, context=CONTEXT_HOST, conanfile=conanfile, recipe=RECIPE_VIRTUAL)
# create (without test_package), install|info|graph|export-pkg <ref>
if isinstance(reference, ConanFileReference):
return self._load_root_direct_reference(reference, graph_lock, profile)
path = reference # The reference must be pointing to a user space conanfile
if create_reference: # Test_package -> tested reference
return self._load_root_test_package(path, create_reference, graph_lock, profile)
# It is a path to conanfile.py or conanfile.txt
root_node, ref = self._load_root_consumer(path, graph_lock, profile, graph_info.root)
graph_info.root = ref # To store it for later calls
return root_node
def _load_root_consumer(self, path, graph_lock, profile, ref):
""" load a CONSUMER node from a user space conanfile.py or conanfile.txt
install|info|create|graph <path>
:path full path to a conanfile
:graph_lock: might be None, information of lockfiles
:profile: data to inject to the consumer node: settings, options
:ref: previous reference of a previous command. Can be used for finding itself in
the lockfile, or to initialize
"""
if path.endswith(".py"):
lock_python_requires = None
if graph_lock:
if ref.name is None:
# If the graph_info information is not there, better get what we can from
# the conanfile
conanfile = self._loader.load_basic(path)
ref = ConanFileReference(ref.name or conanfile.name,
ref.version or conanfile.version,
ref.user, ref.channel, validate=False)
node_id = graph_lock.get_consumer(ref)
lock_python_requires = graph_lock.python_requires(node_id)
conanfile = self._loader.load_consumer(path, profile,
name=ref.name,
version=ref.version,
user=ref.user,
channel=ref.channel,
lock_python_requires=lock_python_requires)
ref = ConanFileReference(conanfile.name, conanfile.version,
ref.user, ref.channel, validate=False)
root_node = Node(ref, conanfile, context=CONTEXT_HOST, recipe=RECIPE_CONSUMER, path=path)
else:
conanfile = self._loader.load_conanfile_txt(path, profile, ref=ref)
root_node = Node(None, conanfile, context=CONTEXT_HOST, recipe=RECIPE_CONSUMER,
path=path)
if graph_lock: # Find the Node ID in the lock of current root
node_id = graph_lock.get_consumer(root_node.ref)
root_node.id = node_id
return root_node, ref
def _load_root_direct_reference(self, reference, graph_lock, profile):
""" When a full reference is provided:
install|info|graph <ref> or export-pkg .
:return a VIRTUAL root_node with a conanfile that requires the reference
"""
if not self._cache.config.revisions_enabled and reference.revision is not None:
raise ConanException("Revisions not enabled in the client, specify a "
"reference without revision")
conanfile = self._loader.load_virtual([reference], profile)
root_node = Node(ref=None, conanfile=conanfile, context=CONTEXT_HOST, recipe=RECIPE_VIRTUAL)
if graph_lock: # Find the Node ID in the lock of current root
graph_lock.find_require_and_lock(reference, conanfile)
return root_node
def _load_root_test_package(self, path, create_reference, graph_lock, profile):
""" when a test_package/conanfile.py is provided, together with the reference that is
being created and need to be tested
:return a CONSUMER root_node with a conanfile.py with an injected requires to the
created reference
"""
test = str(create_reference)
# do not try apply lock_python_requires for test_package/conanfile.py consumer
conanfile = self._loader.load_consumer(path, profile, user=create_reference.user,
channel=create_reference.channel)
conanfile.display_name = "%s (test package)" % str(test)
conanfile.output.scope = conanfile.display_name
# Injecting the tested reference
require = conanfile.requires.get(create_reference.name)
if require:
require.ref = require.range_ref = create_reference
else:
conanfile.requires.add_ref(create_reference)
ref = ConanFileReference(conanfile.name, conanfile.version,
create_reference.user, create_reference.channel, validate=False)
root_node = Node(ref, conanfile, recipe=RECIPE_CONSUMER, context=CONTEXT_HOST, path=path)
if graph_lock:
graph_lock.find_require_and_lock(create_reference, conanfile)
return root_node
def _resolve_graph(self, root_node, graph_info, build_mode, check_updates,
update, remotes, recorder, apply_build_requires=True):
build_mode = BuildMode(build_mode, self._output)
profile_host = graph_info.profile_host
graph_lock = graph_info.graph_lock
deps_graph = self._load_graph(root_node, check_updates, update,
build_mode=build_mode, remotes=remotes,
recorder=recorder,
profile_host=profile_host,
profile_build=graph_info.profile_build,
apply_build_requires=apply_build_requires,
graph_lock=graph_lock)
# THIS IS NECESSARY to store dependencies options in profile, for consumer
# FIXME: This is a hack. Might dissapear if graph for local commands is always recomputed
graph_info.options = root_node.conanfile.options.values
if root_node.ref:
graph_info.root = root_node.ref
if graph_info.graph_lock is None:
graph_info.graph_lock = GraphLock(deps_graph, self._cache.config.revisions_enabled)
version_ranges_output = self._resolver.output
if version_ranges_output:
self._output.success("Version ranges solved")
for msg in version_ranges_output:
self._output.info(" %s" % msg)
self._output.writeln("")
self._resolver.clear_output()
build_mode.report_matches()
return deps_graph
@staticmethod
def _get_recipe_build_requires(conanfile, default_context):
conanfile.build_requires = _RecipeBuildRequires(conanfile, default_context)
if hasattr(conanfile, "build_requirements"):
with get_env_context_manager(conanfile):
with conanfile_exception_formatter(str(conanfile), "build_requirements"):
conanfile.build_requirements()
return conanfile.build_requires
def _recurse_build_requires(self, graph, builder, check_updates,
update, build_mode, remotes, profile_build_requires, recorder,
profile_host, profile_build, graph_lock, apply_build_requires=True,
nodes_subset=None, root=None):
"""
:param graph: This is the full dependency graph with all nodes from all recursions
"""
default_context = CONTEXT_BUILD if profile_build else CONTEXT_HOST
self._binary_analyzer.evaluate_graph(graph, build_mode, update, remotes, nodes_subset, root)
if not apply_build_requires:
return
for node in graph.ordered_iterate(nodes_subset):
# Virtual conanfiles doesn't have output, but conanfile.py and conanfile.txt do
# FIXME: To be improved and build a explicit model for this
if node.recipe == RECIPE_VIRTUAL:
continue
# Packages with PACKAGE_ID_UNKNOWN might be built in the future, need build requires
if (node.binary not in (BINARY_BUILD, BINARY_EDITABLE, BINARY_UNKNOWN)
and node.recipe != RECIPE_CONSUMER):
continue
package_build_requires = self._get_recipe_build_requires(node.conanfile, default_context)
str_ref = str(node.ref)
# Compute the update of the current recipe build_requires when updated with the
# downstream profile-defined build-requires
new_profile_build_requires = []
for pattern, build_requires in profile_build_requires.items():
if ((node.recipe == RECIPE_CONSUMER and pattern == "&") or
(node.recipe != RECIPE_CONSUMER and pattern == "&!") or
fnmatch.fnmatch(str_ref, pattern)):
for build_require in build_requires:
br_key = (build_require.name, default_context)
if br_key in package_build_requires: # Override defined
# this is a way to have only one package Name for all versions
# (no conflicts)
# but the dict key is not used at all
package_build_requires[br_key] = build_require
# Profile one or in different context
elif build_require.name != node.name or default_context != node.context:
new_profile_build_requires.append((build_require, default_context))
def _recurse_build_requires(br_list, transitive_build_requires):
nodessub = builder.extend_build_requires(graph, node, br_list, check_updates,
update, remotes, profile_host,
profile_build, graph_lock)
self._recurse_build_requires(graph, builder, check_updates, update, build_mode,
remotes, transitive_build_requires, recorder,
profile_host, profile_build, graph_lock,
nodes_subset=nodessub, root=node)
if package_build_requires:
if default_context == CONTEXT_BUILD:
br_build, br_host = [], []
for (_, ctxt), it in package_build_requires.items():
if ctxt == CONTEXT_BUILD:
br_build.append((it, ctxt))
else:
br_host.append((it, ctxt))
if br_build:
_recurse_build_requires(br_build, profile_build.build_requires)
if br_host:
| |
<gh_stars>100-1000
#!/usr/bin/env python3
"""Downloader of sample audio data
Configuration are in the directory downloder_conf.
Usage:
download_speech_corpus.py <config> [-h] [-q] [-f] [-m]
Parameters:
<config> The path of configuration file
-h, --help Show this help and exit
-q, --quiet Don't show any messages about progress
-f, --force Overwrite existing corpus files
"""
import os
import re
import shutil
import urllib.parse
import urllib.request
from fnmatch import fnmatchcase
from pathlib import Path
from tempfile import TemporaryDirectory
import yaml
from docopt import docopt
class UserOption:
"""
Class (structure) that contains flags given in Parameters
-
Parameters
----------
expected : str
string that expresses the pattern
"""
def __init__(self, verbose=True, force=False):
self.verbose = verbose
self.force = force
class FixedStrPattern:
"""
Class of fixed-string pattern
Uses duck-typing to commonalize interface `match` for a time.
Parameters
----------
expected : str
string that expresses the pattern
"""
def __init__(self, expected):
self.expected = expected
def match(self, target):
"""
Check the given string matches the pattern.
"""
return target == self.expected
def __repr__(self):
return "{}({})".format(self.__class__.__name__, repr(self.expected))
class PartialMatchFixedStrPattern(FixedStrPattern):
"""
Class of partial match fixed string pattern.
e.g.::
PartialMatchFixedStrPattern("44").match("dataset1/44_1k/001.wav")
=> True
"""
def match(self, target):
return self.expected in target
class RegExPattern:
"""
Class of regular expression pattern
Parameters
----------
expected : str
string that expresses the pattern
"""
def __init__(self, expected):
self.expected = re.compile(expected)
def match(self, target):
# SREmatch()
return bool(self.expected.match(target))
def __repr__(self):
return "{}({})".format(
self.__class__.__name__, repr(self.expected.pattern)
)
class GlobPattern(FixedStrPattern):
"""
Class of glob pattern
Like: SF* MF*
Parameters
----------
expected : str
string that expresses the pattern
"""
def match(self, target):
return fnmatchcase(target, self.expected)
def generate_pattern_from_obj(pattern_obj, DefaultPattern=FixedStrPattern):
"""
Generates an appropriate pattern from object from YAML.
Parameters
----------
pattern_obj : Union[str, Dict[str, str]]
Parsed YAML object.
Only one object indicated by these expressions is allowed.
e.g.::
pattern -> fixed string (default; changeable by 2nd argument)
regex: pattern
regexp: pattern
-> regexp
glob: pattern -> glob
DefaultPattern
Pattern class used when `glob` or `regex(p)` is not designated.
Returns
-------
Object
Generated pattern instance.
"""
if isinstance(pattern_obj, str):
return DefaultPattern(pattern_obj)
elif isinstance(pattern_obj, dict) and len(pattern_obj) == 1:
pattern_type, pattern = next(iter(pattern_obj.items())) # first item
if pattern_type in {"regex", "regexp"}:
return RegExPattern(pattern)
elif pattern_type == "glob":
return GlobPattern(pattern)
else:
raise ValueError("Unknown pattern type: `{}`.".format(pattern))
else:
raise ValueError("Pattern object must be str or dictionary w/ 1 item.")
class PatternList:
"""
List class of patterns that provides the method `match`.
Parameters
----------
patterns : List[Pattern]
List of patterns
"""
def __init__(self, patterns):
self.patterns = (
patterns if isinstance(patterns, list) else list(patterns)
)
@classmethod
def from_obj(cls, patterns_obj, DefaultPattern=FixedStrPattern):
"""
Constructs instance from object from YAML.
objects must be one of like::
foo
regex: foo
- foo
- glob: bar
- regexp: baz
This is used to express `only` or `except` clauses.
Parameters
----------
patterns_obj : Union[str, Dict[str, str],
Iterable[Union[str, Dict[str, str]]]]
object generated by parsing YAML
DefaultPattern
Pattern class used when `glob` or `regex(p)` is not designated.
"""
# foo or [pattern type]: foo
if isinstance(patterns_obj, (str, dict)):
return cls([generate_pattern_from_obj(
patterns_obj,
DefaultPattern
)])
else: # itemized using list or something
return cls(
[
generate_pattern_from_obj(pattern_obj, DefaultPattern)
for pattern_obj in patterns_obj
]
)
def match(self, target):
"""
Checks if the string match any of patterns.
Parameters
----------
target : str
the string to check
"""
return any((pattern.match(target) for pattern in self.patterns))
class ExtensionList:
"""
Class of extension list to search for audio files in directories.
Parameters
----------
extensions : Union[List[str], str]
extensions of audio. e.g. `wav`, [`.mp3`, `opus`], or `.m4a`.
"""
def __init__(self, extensions):
extensions = (
[extensions] if isinstance(extensions, str) else extensions
)
if not isinstance(extensions, list):
raise ValueError("extensions list must be an instance of list.")
self.extensions = [
extension.lstrip(".") for extension in extensions
] # remove . from .wav for example
def itemize_in_directory(self, directory, recurse=False):
"""
Search for audio files with the designated extensions in the directory.
Parameters
----------
directory : Path
The path of the directory where audio files are searched for.
recurse : bool
`True` if audio files in subdirectories must be searched.
Returns
-------
Generator[Path, None, None]
paths of audio files.
"""
query_prefix = ("**/" if recurse else "") + "*."
for extension in self.extensions:
yield from directory.glob(query_prefix + extension)
class BaseNameFilter:
"""
Class to filter paths of directories.
This class looks only at basename of designated paths.
Parameters
----------
only : PatternList
patterns directories must match
This corresponds to `only` clauses.
excepted : PatternList
patterns directories must not match
This corresponds to `except` clauses.
"""
def __init__(self, only, excepted):
self.only = only
self.excepted = excepted
def filter(self, path_list):
"""
Filters list of paths of directories.
Allows paths that match any of patterns in `only` clause and
none of patterns in `except` clause to pass through.
Parameters
----------
path_list : Iterable[Path]
list of paths
Returns
-------
Generator[Path, None, None]
list of paths
"""
yield from filter(
lambda path: (self.only is None or self.only.match(path.name))
and (self.excepted is None or not self.excepted.match(path.name)),
path_list,
)
@classmethod
def from_obj(cls, only, excepted):
"""
Generates an instance from objects genrated by parsing YAML.
Parameters
----------
only : Union[str, Dict[str, str],
Iterable[Union[str, Dict[str, str]]]]
parsed contents in `only` clause.
excepted : Union[str, Dict[str, str],
Iterable[Union[str, Dict[str, str]]]]
parsed contents in `except` claus.
"""
return cls(
None if only is None else PatternList.from_obj(only),
None if excepted is None else PatternList.from_obj(excepted),
)
class PosixRelativePathFilter(BaseNameFilter):
"""
This filter looks at relative POSIX-style paths
from designated directories.
"""
def filter(self, path_list, root_dir):
"""
Filters list of paths of directories.
Allows paths that match any of patterns in `only` clause and
none of patterns in `except` clause to pass through.
Parameters
----------
path_list : Iterable[Path]
list of paths
root_dir : Path
path of directory that paths in `pat_list` are converted
to relative forms from.
e.g. `"dataset1/001.wav"` is passed
to `self.only` and `self.except` when::
path_list = ["tmp/root/speaker1/dataset1/001.wav"]
root_dir = "tmp/root/speaker1"
Returns
-------
Generator[Path, None, None]
list of paths
"""
yield from filter(
lambda path: (
self.only is None or self.only.match(
path.relative_to(root_dir).as_posix()
)
)
and (
self.excepted is None or not self.excepted.match(
path.relative_to(root_dir).as_posix()
)
),
path_list,
)
@classmethod
def from_obj(cls, only, excepted):
"""
Generates an instance from objects genrated by parsing YAML.
Parameters
----------
only : Union[str, Dict[str, str],
Iterable[Union[str, Dict[str, str]]]]
parsed contents in `only` clause.
excepted : Union[str, Dict[str, str],
Iterable[Union[str, Dict[str, str]]]]
parsed contents in `except` claus.
"""
return cls(
None if only is None
else PatternList.from_obj(only, PartialMatchFixedStrPattern),
None if excepted is None
else PatternList.from_obj(excepted, PartialMatchFixedStrPattern),
)
class GlobalConfiguration:
"""
Configuration in `config` clause in configuration file.
Parameters
----------
config : Dict(YAML-parsed objects)
Contents in `config` clause.
"""
def __init__(self, config):
if not isinstance(config, dict):
raise ValueError("The argument must be a dictionary.")
# default: only wave files are stored
self.extensions = ExtensionList(config.get("extensions", ["wav"]))
class DataArchive:
"""
Corresponds to each recipe in `files` clause.
It corresponds to one archive file and audio files in it.
Parameters
----------
file_config: Dict[str, Any]
parsed contents of each element in `files` clause.
global_config : Dict[str, Any]
parsed contents in `config` clause.
user_option : UserOption
User option of this program (verbose etc.).
"""
def __init__(self, file_config, global_config, user_option):
self.name = file_config["name"]
self.src_url = file_config["src"]
# Leading `/` throws away the path of a directory
# where a archive file is extracted
self.audio_root_relative = file_config["root"].lstrip("/")
self.global_config = global_config
self.user_option = user_option
self.file_path_filter = BaseNameFilter.from_obj(
file_config.get("only"), file_config.get("except")
)
if "each_dir" in file_config:
self.each_dir_filter = PosixRelativePathFilter.from_obj(
file_config["each_dir"].get("only"),
file_config["each_dir"].get("except")
)
self.recurse_subdir = (
lambda f: f if isinstance(f, bool) else False
)(file_config["each_dir"].get("recurse"))
else:
self.each_dir_filter = PosixRelativePathFilter(None, None)
self.recurse_subdir = False
def download(self, dest_root):
"""
Downloads archive and extracts audio files.
Parameters
----------
dest_root : Path
the root path where directories
that contains audio files are placed.
"""
with TemporaryDirectory() as working_dir:
working_dir = Path(working_dir) # convert from str
if re.match(r"^(https?|ftp)://", self.src_url):
# download archive and extract files in the working directory.
if self.user_option.verbose:
print(
"Downloading",
self.name,
"from",
self.src_url,
"..."
)
archive_path = DataArchive._download_file(
self.src_url, working_dir
)
else:
if self.src_url.startswith("file:///"):
# posix: file:///home/user/ -> /home/user
# windows: files:///C:/Users/user/ -> C:/Users/user/
archive_path = self.src_url.replace(
"file://" if os.name == "posix" else "file:///", "", 1
)
else:
archive_path | |
de Pedras - RN', 'pt': 'Lagoa de Pedras - RN'},
'55843693':{'en': 'Touros - RN', 'pt': 'Touros - RN'},
'55843694':{'en': 'Monte das Gameleiras - RN', 'pt': 'Monte das Gameleiras - RN'},
'55843695':{'en': 'Lagoa de Velhos - RN', 'pt': 'Lagoa de Velhos - RN'},
'55843696':{'en': u('Cai\u00e7ara do Norte - RN'), 'pt': u('Cai\u00e7ara do Norte - RN')},
'55843697':{'en': 'Parazinho - RN', 'pt': 'Parazinho - RN'},
'55843737':{'en': 'Parnamirim - RN', 'pt': 'Parnamirim - RN'},
'55844102':{'en': 'Natal - RN', 'pt': 'Natal - RN'},
'55844109':{'en': u('Mossor\u00f3 - RN'), 'pt': u('Mossor\u00f3 - RN')},
'5585':{'en': u('Cear\u00e1'), 'pt': u('Cear\u00e1')},
'55853004':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853012':{'en': 'Caucaia - CE', 'pt': 'Caucaia - CE'},
'55853014':{'en': u('Maracana\u00fa - CE'), 'pt': u('Maracana\u00fa - CE')},
'55853022':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853031':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853048':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853051':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853084':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853092':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853104':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853111':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853113':{'en': 'Aquiraz - CE', 'pt': 'Aquiraz - CE'},
'55853123':{'en': 'Caucaia - CE', 'pt': 'Caucaia - CE'},
'55853133':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853181':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853182':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853198':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'558532':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853213':{'en': 'Caucaia - CE', 'pt': 'Caucaia - CE'},
'55853260':{'en': u('Eus\u00e9bio - CE'), 'pt': u('Eus\u00e9bio - CE')},
'55853301':{'en': 'Beberibe - CE', 'pt': 'Beberibe - CE'},
'55853302':{'en': u('Apuiar\u00e9s - CE'), 'pt': u('Apuiar\u00e9s - CE')},
'55853304':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853305':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853306':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853315':{'en': u('S\u00e3o Gon\u00e7alo do Amarante - CE'), 'pt': u('S\u00e3o Gon\u00e7alo do Amarante - CE')},
'55853319':{'en': 'Chorozinho - CE', 'pt': 'Chorozinho - CE'},
'55853320':{'en': 'Paramoti - CE', 'pt': 'Paramoti - CE'},
'55853321':{'en': 'Guaramiranga - CE', 'pt': 'Guaramiranga - CE'},
'55853322':{'en': 'Ocara - CE', 'pt': 'Ocara - CE'},
'55853323':{'en': u('Teju\u00e7uoca - CE'), 'pt': u('Teju\u00e7uoca - CE')},
'55853324':{'en': 'Caridade - CE', 'pt': 'Caridade - CE'},
'55853325':{'en': 'Pacoti - CE', 'pt': 'Pacoti - CE'},
'55853326':{'en': 'Capistrano - CE', 'pt': 'Capistrano - CE'},
'55853328':{'en': 'Mulungu - CE', 'pt': 'Mulungu - CE'},
'55853329':{'en': 'Aratuba - CE', 'pt': 'Aratuba - CE'},
'55853331':{'en': 'Barreira - CE', 'pt': 'Barreira - CE'},
'55853332':{'en': u('Reden\u00e7\u00e3o - CE'), 'pt': u('Reden\u00e7\u00e3o - CE')},
'55853334':{'en': 'Cascavel - CE', 'pt': 'Cascavel - CE'},
'55853336':{'en': 'Horizonte - CE', 'pt': 'Horizonte - CE'},
'55853337':{'en': 'Aracoiaba - CE', 'pt': 'Aracoiaba - CE'},
'55853338':{'en': 'Beberibe - CE', 'pt': 'Beberibe - CE'},
'55853339':{'en': u('Palm\u00e1cia - CE'), 'pt': u('Palm\u00e1cia - CE')},
'55853341':{'en': 'Maranguape - CE', 'pt': 'Maranguape - CE'},
'55853342':{'en': 'Caucaia - CE', 'pt': 'Caucaia - CE'},
'55853343':{'en': u('Canind\u00e9 - CE'), 'pt': u('Canind\u00e9 - CE')},
'55853344':{'en': 'Paracuru - CE', 'pt': 'Paracuru - CE'},
'55853345':{'en': 'Pacatuba - CE', 'pt': 'Pacatuba - CE'},
'55853346':{'en': u('Itapag\u00e9 - CE'), 'pt': u('Itapag\u00e9 - CE')},
'55853347':{'en': u('Baturit\u00e9 - CE'), 'pt': u('Baturit\u00e9 - CE')},
'55853348':{'en': 'Pacajus - CE', 'pt': 'Pacajus - CE'},
'55853351':{'en': 'Trairi - CE', 'pt': 'Trairi - CE'},
'55853352':{'en': 'Pentecoste - CE', 'pt': 'Pentecoste - CE'},
'55853353':{'en': 'Uruburetama - CE', 'pt': 'Uruburetama - CE'},
'55853355':{'en': u('S\u00e3o Lu\u00eds do Curu - CE'), 'pt': u('S\u00e3o Lu\u00eds do Curu - CE')},
'55853356':{'en': u('Apuiar\u00e9s - CE'), 'pt': u('Apuiar\u00e9s - CE')},
'55853357':{'en': 'General Sampaio - CE', 'pt': 'General Sampaio - CE'},
'55853358':{'en': 'Tururu - CE', 'pt': 'Tururu - CE'},
'55853361':{'en': 'Aquiraz - CE', 'pt': 'Aquiraz - CE'},
'55853362':{'en': 'Aquiraz - CE', 'pt': 'Aquiraz - CE'},
'55853363':{'en': 'Paraipaba - CE', 'pt': 'Paraipaba - CE'},
'55853364':{'en': 'Umirim - CE', 'pt': 'Umirim - CE'},
'55853365':{'en': u('Canind\u00e9 - CE'), 'pt': u('Canind\u00e9 - CE')},
'55853366':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853369':{'en': 'Maranguape - CE', 'pt': 'Maranguape - CE'},
'55853371':{'en': u('Maracana\u00fa - CE'), 'pt': u('Maracana\u00fa - CE')},
'55853372':{'en': u('S\u00e3o Gon\u00e7alo do Amarante - CE'), 'pt': u('S\u00e3o Gon\u00e7alo do Amarante - CE')},
'55853373':{'en': 'Acarape - CE', 'pt': 'Acarape - CE'},
'55853375':{'en': 'Pindoretama - CE', 'pt': 'Pindoretama - CE'},
'55853376':{'en': u('Guai\u00faba - CE'), 'pt': u('Guai\u00faba - CE')},
'55853377':{'en': 'Itaitinga - CE', 'pt': 'Itaitinga - CE'},
'55853381':{'en': u('Maracana\u00fa - CE'), 'pt': u('Maracana\u00fa - CE')},
'55853382':{'en': u('Maracana\u00fa - CE'), 'pt': u('Maracana\u00fa - CE')},
'55853383':{'en': u('Maracana\u00fa - CE'), 'pt': u('Maracana\u00fa - CE')},
'55853384':{'en': u('Maracana\u00fa - CE'), 'pt': u('Maracana\u00fa - CE')},
'55853387':{'en': 'Caucaia - CE', 'pt': 'Caucaia - CE'},
'55853403':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853404':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'5585345':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853461':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853462':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853463':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853464':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853468':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'5585347':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853475':{'en': 'Caucaia - CE', 'pt': 'Caucaia - CE'},
'5585348':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'5585349':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853650':{'en': 'Carnaubal - CE', 'pt': 'Carnaubal - CE'},
'55853667':{'en': 'Itarema - CE', 'pt': 'Itarema - CE'},
'55853813':{'en': u('S\u00e3o Paulo - SP'), 'pt': u('S\u00e3o Paulo - SP')},
'55853877':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853923':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55853924':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55854003':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55854007':{'en': u('Maracana\u00fa - CE'), 'pt': u('Maracana\u00fa - CE')},
'55854042':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55854062':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55854102':{'en': 'Fortaleza - CE', 'pt': 'Fortaleza - CE'},
'55854117':{'en': u('Maracana\u00fa - CE'), 'pt': u('Maracana\u00fa - CE')},
'55855672':{'en': u('S\u00e3o Paulo - SP'), 'pt': u('S\u00e3o Paulo - SP')},
'5586':{'en': u('Piau\u00ed'), 'pt': u('Piau\u00ed')},
'55862106':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55862107':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863081':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863086':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863087':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863089':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863122':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863131':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863133':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863194':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863198':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863212':{'en': 'Timon - MA', 'pt': 'Timon - MA'},
'55863214':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863216':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863219':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'5586322':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863239':{'en': u('Santo Ant\u00f4nio dos Milagres - PI'), 'pt': u('Santo Ant\u00f4nio dos Milagres - PI')},
'55863240':{'en': u('Cabeceiras do Piau\u00ed - PI'), 'pt': u('Cabeceiras do Piau\u00ed - PI')},
'55863241':{'en': 'Boa Hora - PI', 'pt': 'Boa Hora - PI'},
'55863242':{'en': 'Barras - PI', 'pt': 'Barras - PI'},
'55863243':{'en': 'Porto - PI', 'pt': 'Porto - PI'},
'55863244':{'en': 'Miguel Alves - PI', 'pt': 'Miguel Alves - PI'},
'55863245':{'en': u('Nossa Senhora dos Rem\u00e9dios - PI'), 'pt': u('Nossa Senhora dos Rem\u00e9dios - PI')},
'55863247':{'en': u('Castelo do Piau\u00ed - PI'), 'pt': u('Castelo do Piau\u00ed - PI')},
'55863248':{'en': 'Buriti dos Montes - PI', 'pt': 'Buriti dos Montes - PI'},
'55863249':{'en': u('S\u00e3o Miguel do Tapuio - PI'), 'pt': u('S\u00e3o Miguel do Tapuio - PI')},
'55863250':{'en': u('Prata do Piau\u00ed - PI'), 'pt': u('Prata do Piau\u00ed - PI')},
'55863251':{'en': u('S\u00e3o Jo\u00e3o da Serra - PI'), 'pt': u('S\u00e3o Jo\u00e3o da Serra - PI')},
'55863252':{'en': 'Campo Maior - PI', 'pt': 'Campo Maior - PI'},
'55863253':{'en': u('Juazeiro do Piau\u00ed - PI'), 'pt': u('Juazeiro do Piau\u00ed - PI')},
'55863254':{'en': u('Assun\u00e7\u00e3o do Piau\u00ed - PI'), 'pt': u('Assun\u00e7\u00e3o do Piau\u00ed - PI')},
'55863256':{'en': u('Alto Long\u00e1 - PI'), 'pt': u('Alto Long\u00e1 - PI')},
'55863258':{'en': 'Monsenhor Gil - PI', 'pt': 'Monsenhor Gil - PI'},
'55863259':{'en': u('Lagoa do Piau\u00ed - PI'), 'pt': u('Lagoa do Piau\u00ed - PI')},
'55863260':{'en': u('Demerval Lob\u00e3o - PI'), 'pt': u('<NAME>\u00e3o - PI')},
'55863262':{'en': 'Altos - PI', 'pt': 'Altos - PI'},
'55863263':{'en': 'Cocal de | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Functions for enabling AMP (automatic mixed precision)."""
__all__ = ['init', 'init_trainer', 'scale_loss', 'unscale', 'convert_model',
'convert_hybrid_block', 'list_lp16_ops', 'list_fp32_ops',
'list_lp16_fp32_ops', 'list_conditional_fp32_ops',
'list_widest_type_cast', 'list_loss_output_functions', 'list_lp16_use_fp32_params',
'convert_symbol']
from array import array
import ctypes
import logging
import contextlib
import numpy as np
from ... import symbol
from ...context import gpu
from ...symbol import Symbol
from ...symbol import contrib as symbol_contrib
from ... import ndarray
from ...ndarray import NDArray, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
from . import lists
from ...gluon import trainer
from ... import base
from ...base import c_str_array, SymbolHandle, check_call, _LIB, mx_uint, c_array_buf
from ... import optimizer as opt
from .loss_scaler import LossScaler
bfloat16 = np.dtype([('bfloat16', np.uint16)])
def _cast_symbol_NDArray(s, dtype):
float_types_gpu = (np.float16, np.float32)
float_types_cpu = (bfloat16, np.float32)
if isinstance(s, Symbol):
return symbol.amp_cast(s, dtype=dtype)
elif isinstance(s, NDArray):
if (s.dtype != dtype and s.dtype in float_types_gpu and s.context.device_type != 'cpu'):
return ndarray.amp_cast(s, dtype=dtype)
elif (s.dtype != dtype and s.dtype in float_types_cpu and s.context.device_type == 'cpu'):
return ndarray.amp_cast(s, dtype=dtype)
else:
return s
else:
return s
def _get_fun_to_wrap(name, module, submodule_dict):
module_internal = getattr(module, "_internal")
prefix = base._get_op_name_prefix(name)
if len(prefix) > 0:
if prefix != '_random_' or name.endswith('_like'):
func_name = name[len(prefix):]
cur_module = submodule_dict[prefix]
else:
func_name = name
cur_module = module_internal
elif name.startswith('_'):
func_name = name
cur_module = module_internal
else:
func_name = name
cur_module = module
return func_name, cur_module
def _wrap_symbol_functions(module, target_dtype, target_precision_ops=None,
conditional_fp32_ops=None, fp32_ops=None):
def _ndarray_wrapper(f, target_dtype, fp32_param=None, cond_arg=None):
def _new_fun(*args, **kwargs):
if cond_arg is not None:
if (cond_arg[0] not in kwargs or
kwargs[cond_arg[0]] not in cond_arg[1]):
return f(*args, **kwargs)
if fp32_param:
new_args = []
for i, x in enumerate(args):
if fp32_param[i]:
new_args.append(x)
else:
new_args.append(_cast_symbol_NDArray(x, target_dtype))
else:
new_args = list(map(lambda x: _cast_symbol_NDArray(x, target_dtype), args))
args = tuple(new_args)
if fp32_param:
new_kwargs = {}
for k, v in kwargs.items():
if k in fp32_param:
new_kwargs[k] = v
else:
new_kwargs[k] = _cast_symbol_NDArray(v, target_dtype)
kwargs = new_kwargs
else:
kwargs = {k: _cast_symbol_NDArray(v, target_dtype) for k, v in kwargs.items()}
return f(*args, **kwargs)
_new_fun.__name__ = f.__name__
_new_fun.__module__ = f.__module__
_new_fun.__doc__ = f.__doc__
return _new_fun
def _symbol_wrapper(f, target_dtype, fp32_param=None, cond_arg=None):
def _new_fun(*args, **kwargs):
if cond_arg is not None:
if (cond_arg[0] not in kwargs or
kwargs[cond_arg[0]] not in cond_arg[1]):
return f(*args, **kwargs)
sym = f(*args, **kwargs)
inputs = sym.get_children()
aux = sym.list_auxiliary_states()
if fp32_param:
new_inputs = []
for i, x in enumerate(inputs):
if (x.name in aux) or fp32_param[i]:
new_inputs.append(x)
else:
new_inputs.append(_cast_symbol_NDArray(x, target_dtype))
inputs = new_inputs
else:
inputs = list(map(lambda x: _cast_symbol_NDArray(x, target_dtype)
if x.name not in aux else x, inputs))
atomic_sym = sym._gen_atomic_symbol()
wrapped_sym = atomic_sym(*inputs)
wrapped_sym._set_attr(name=sym.name)
return wrapped_sym
_new_fun.__name__ = f.__name__
_new_fun.__module__ = f.__module__
_new_fun.__doc__ = f.__doc__
return _new_fun
def _symbol_widest_wrapper(f):
def _new_fun(*args, **kwargs):
symbols = []
is_symbol = False
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, (Symbol, NDArray)):
symbols.append((args, i, arg))
is_symbol = is_symbol or isinstance(arg, Symbol)
for k, arg in kwargs.items():
if isinstance(arg, (Symbol, NDArray)):
symbols.append((kwargs, k, arg))
is_symbol = is_symbol or isinstance(arg, Symbol)
if not is_symbol:
# NDArray case
widest_type = target_dtype
for _, _, arg in symbols:
if isinstance(arg, NDArray):
if arg.dtype == np.float32:
widest_type = np.float32
for arr, index, arg in symbols:
if arg.dtype != widest_type and arg.dtype == target_dtype:
arr[index] = ndarray.amp_cast(arg, dtype=widest_type)
else:
# Symbol case
sym_to_check = list(map(lambda x: x[2], symbols))
casted_syms = symbol.amp_multicast(*sym_to_check, num_outputs=len(sym_to_check))
symbols = list(map(lambda x_y: (x_y[0][0], x_y[0][1], x_y[1]),
zip(symbols, casted_syms)))
for arr, index, arg in symbols:
arr[index] = arg
return f(*args, **kwargs)
_new_fun.__name__ = f.__name__
_new_fun.__module__ = f.__module__
_new_fun.__doc__ = f.__doc__
return _new_fun
_wrapper = _symbol_wrapper if module in (symbol, Symbol, symbol_contrib) else _ndarray_wrapper
submodule_dict = {}
for op_name_prefix in base._OP_NAME_PREFIX_LIST:
submodule_dict[op_name_prefix] =\
getattr(module, op_name_prefix[1:-1])
fp32_param_list = list_lp16_use_fp32_params(target_dtype)
wrap_list = target_precision_ops if target_precision_ops is not None \
else list_lp16_ops(target_dtype)
for fun_name in wrap_list:
try:
fun_name, cur_module = _get_fun_to_wrap(fun_name, module, submodule_dict)
f_to_wrap = getattr(cur_module, fun_name)
fp32_param = fp32_param_list[fun_name] if (fp32_param_list and fun_name in fp32_param_list) else None
setattr(cur_module, fun_name, _wrapper(f_to_wrap, target_dtype, fp32_param=fp32_param))
if cur_module == module:
setattr(module.op, fun_name, _wrapper(f_to_wrap, target_dtype, fp32_param=fp32_param))
except AttributeError:
raise
wrap_list = fp32_ops if fp32_ops is not None else list_fp32_ops(target_dtype)
for fun_name in wrap_list:
try:
fun_name, cur_module = _get_fun_to_wrap(fun_name, module, submodule_dict)
f_to_wrap = getattr(cur_module, fun_name)
setattr(cur_module, fun_name, _wrapper(f_to_wrap, np.float32))
if cur_module == module:
setattr(module.op, fun_name, _wrapper(f_to_wrap, np.float32))
except AttributeError:
raise
wrap_list = conditional_fp32_ops if conditional_fp32_ops is not None \
else list_conditional_fp32_ops(target_dtype)
for fun_name, arg, arg_values in wrap_list:
try:
fun_name, cur_module = _get_fun_to_wrap(fun_name, module, submodule_dict)
f_to_wrap = getattr(cur_module, fun_name)
setattr(cur_module, fun_name, _wrapper(f_to_wrap, np.float32, cond_arg=(arg, arg_values)))
if cur_module == module:
setattr(module.op, fun_name, _wrapper(f_to_wrap, np.float32, cond_arg=(arg, arg_values)))
except AttributeError:
raise
for fun_name in list_widest_type_cast(target_dtype):
try:
fun_name, cur_module = _get_fun_to_wrap(fun_name, module, submodule_dict)
f_to_wrap = getattr(cur_module, fun_name)
setattr(cur_module, fun_name, _symbol_widest_wrapper(f_to_wrap))
if cur_module == module:
setattr(module.op, fun_name, _symbol_widest_wrapper(f_to_wrap))
except AttributeError:
raise
def _wrap_loss_output_functions(module, ls, target_dtype):
if module == ndarray:
def _wrapper(f):
def _scaling_wrapper(*args, **kwargs):
if 'grad_scale' in kwargs:
kwargs['grad_scale'] = kwargs['grad_scale'] * ls.loss_scale
else:
kwargs['grad_scale'] = ls.loss_scale
return f(*args, **kwargs)
_scaling_wrapper.__name__ = f.__name__
_scaling_wrapper.__module__ = f.__module__
_scaling_wrapper.__doc__ = f.__doc__
return _scaling_wrapper
else:
def _wrapper(f):
def _warning_wrapper(*args, **kwargs):
logging.warning("%s does not support dynamic loss scaling "
"in symbolic and hybridized execution.", f.__name__)
return f(*args, **kwargs)
_warning_wrapper.__name__ = f.__name__
_warning_wrapper.__module__ = f.__module__
_warning_wrapper.__doc__ = f.__doc__
return _warning_wrapper
for fun_name in list_loss_output_functions(target_dtype):
try:
f_to_wrap = getattr(module, fun_name)
setattr(module, fun_name, _wrapper(f_to_wrap))
except AttributeError:
pass
_amp_initialized = False
_amp_loss_scale_initialized = False
_loss_scaler = None
@contextlib.contextmanager
def scale_loss(loss, optimizer_or_trainer):
assert optimizer_or_trainer._amp_loss_scaler is not None, \
'Loss scaler is not initialized, did you forget to call amp.init_trainer()?'
optimizer_or_trainer._scale = (optimizer_or_trainer._amp_original_scale /
optimizer_or_trainer._amp_loss_scaler.loss_scale)
if isinstance(loss, (list, tuple)):
yield [l * optimizer_or_trainer._amp_loss_scaler.loss_scale for l in loss]
else:
yield optimizer_or_trainer._amp_loss_scaler.loss_scale * loss
def init(target_dtype='float16', target_precision_ops=None,
conditional_fp32_ops=None, fp32_ops=None):
"""Initialize AMP (automatic mixed precision).
This needs to be done before model creation.
Parameters
----------
target_dtype : {'float16', 'bfloat16'}
Target low precision type for AMP. Currently only float16 and bfloat16 are supported.
target_precision_ops : list of string
Override the list of functions casted to target_dtype. Entries in this list
are names of the functions casted to target_dtype.
conditional_fp32_ops : list of (string, string, list of string)
Override the list of functions conditionally casted to FP32. The format
of the list is (name of the function, name of the parameter, list of
values of the parameter that make the function be casted to FP32).
fp32_ops : list of string
Override the list of functions casted to FP32. Entries in this list
are names of the functions casted to FP32.
"""
global _amp_initialized
global _loss_scaler
if not _amp_initialized:
assert target_dtype in ['float16', np.float16, 'bfloat16', bfloat16], \
"AMP currently supports only float16 or bfloat16 as a target_dtype"
_amp_initialized = True
logging.info("Using AMP")
if target_dtype == "bfloat16":
target_dtype = bfloat16
else:
target_dtype = np.dtype(target_dtype)
_wrap_symbol_functions(symbol, target_dtype, target_precision_ops,
conditional_fp32_ops, fp32_ops)
_wrap_symbol_functions(ndarray, target_dtype, target_precision_ops,
conditional_fp32_ops, fp32_ops)
_loss_scaler = LossScaler()
_wrap_loss_output_functions(ndarray, _loss_scaler, target_dtype)
_wrap_loss_output_functions(symbol, _loss_scaler, target_dtype)
def init_trainer(optimizer_or_trainer):
"""Initialize trainer or optimizer to work with AMP dynamic loss scaling.
Parameters
----------
optimizer_or_trainer : Optimizer or Trainer
MXNet Optimizer or Gluon trainer to initialize with AMP
"""
global _amp_loss_scale_initialized
global _amp_initialized
global _loss_scaler
assert _amp_initialized, "AMP not initialized, did you forget to call amp.init()?"
if not _amp_loss_scale_initialized:
_amp_loss_scale_initialized = True
loss_scaler = _loss_scaler
else:
loss_scaler = LossScaler()
#_wrap_output
if isinstance(optimizer_or_trainer, trainer.Trainer):
optimizer_or_trainer._amp_loss_scaler = loss_scaler
optimizer_or_trainer._amp_original_scale = optimizer_or_trainer._scale
elif isinstance(optimizer_or_trainer, opt.Optimizer):
# TODO(ptredak): make it work with the optimizer
raise TypeError("AMP is currently only compatible with Gluon Trainer")
else:
raise TypeError("optimizer_or_trainer should be a Gluon Trainer or "
"an optimizer, instead is %s" % type(optimizer_or_trainer))
def unscale(optimizer_or_trainer):
"""Check and unscale the gradients manually. This function should | |
import copy
import os
import torch
import torchvision
import warnings
import math
import utils.misc
import numpy as np
import os.path as osp
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import models.modified_resnet_cifar as modified_resnet_cifar
import models.modified_resnetmtl_cifar as modified_resnetmtl_cifar
import models.modified_linear as modified_linear
from PIL import Image
from torch.optim import lr_scheduler
from torchvision import datasets, transforms
from tensorboardX import SummaryWriter
from utils.compute_features import compute_features
from utils.process_mnemonics import process_mnemonics
from utils.compute_accuracy import compute_accuracy
from trainer.incremental import incremental_train_and_eval
from utils.misc import *
from utils.process_fp import process_inputs_fp
warnings.filterwarnings('ignore')
class Trainer(object):
def __init__(self, the_args):
self.args = the_args
self.log_dir = './logs/'
if not osp.exists(self.log_dir):
os.mkdir(self.log_dir)
self.save_path = self.log_dir + self.args.dataset + '_nfg' + str(self.args.nb_cl_fg) + '_ncls' + str(self.args.nb_cl) + '_nproto' + str(self.args.nb_protos)
self.save_path += '_' + self.args.method
if not osp.exists(self.save_path):
os.mkdir(self.save_path)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023))])
self.transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023))])
self.trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=self.transform_train)
self.testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=self.transform_test)
self.evalset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=self.transform_test)
self.network = modified_resnet_cifar.resnet32
self.network_mtl = modified_resnetmtl_cifar.resnetmtl32
self.lr_strat_first_phase = [int(160*0.5), int(160*0.75)]
self.lr_strat = [int(self.args.epochs*0.5), int(self.args.epochs*0.75)]
self.dictionary_size = self.args.dictionary_size
def map_labels(self, order_list, Y_set):
map_Y = []
for idx in Y_set:
map_Y.append(order_list.index(idx))
map_Y = np.array(map_Y)
return map_Y
def train(self):
self.train_writer = SummaryWriter(logdir=self.save_path)
dictionary_size = self.dictionary_size
top1_acc_list_cumul = np.zeros((int(self.args.num_classes/self.args.nb_cl), 4, self.args.nb_runs))
top1_acc_list_ori = np.zeros((int(self.args.num_classes/self.args.nb_cl), 4, self.args.nb_runs))
X_train_total = np.array(self.trainset.train_data)
Y_train_total = np.array(self.trainset.train_labels)
X_valid_total = np.array(self.testset.test_data)
Y_valid_total = np.array(self.testset.test_labels)
np.random.seed(1993)
for iteration_total in range(self.args.nb_runs):
order_name = osp.join(self.save_path, "seed_{}_{}_order_run_{}.pkl".format(1993, self.args.dataset, iteration_total))
print("Order name:{}".format(order_name))
if osp.exists(order_name):
print("Loading orders")
order = utils.misc.unpickle(order_name)
else:
print("Generating orders")
order = np.arange(self.args.num_classes)
np.random.shuffle(order)
utils.misc.savepickle(order, order_name)
order_list = list(order)
print(order_list)
np.random.seed(self.args.random_seed)
X_valid_cumuls = []
X_protoset_cumuls = []
X_train_cumuls = []
Y_valid_cumuls = []
Y_protoset_cumuls = []
Y_train_cumuls = []
alpha_dr_herding = np.zeros((int(self.args.num_classes/self.args.nb_cl),dictionary_size,self.args.nb_cl),np.float32)
prototypes = np.zeros((self.args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3]))
for orde in range(self.args.num_classes):
prototypes[orde,:,:,:,:] = X_train_total[np.where(Y_train_total==order[orde])]
start_iter = int(self.args.nb_cl_fg/self.args.nb_cl)-1
for iteration in range(start_iter, int(self.args.num_classes/self.args.nb_cl)):
if iteration == start_iter:
last_iter = 0
tg_model = self.network(num_classes=self.args.nb_cl_fg)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("Out_features:", out_features)
ref_model = None
free_model = None
ref_free_model = None
elif iteration == start_iter+1:
last_iter = iteration
ref_model = copy.deepcopy(tg_model)
print("Fusion Mode: "+self.args.fusion_mode)
tg_model = self.network(num_classes=self.args.nb_cl_fg)
ref_dict = ref_model.state_dict()
tg_dict = tg_model.state_dict()
tg_dict.update(ref_dict)
tg_model.load_state_dict(tg_dict)
tg_model.to(self.device)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("Out_features:", out_features)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features, self.args.nb_cl)
new_fc.fc1.weight.data = tg_model.fc.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = out_features*1.0 / self.args.nb_cl
else:
last_iter = iteration
ref_model = copy.deepcopy(tg_model)
in_features = tg_model.fc.in_features
out_features1 = tg_model.fc.fc1.out_features
out_features2 = tg_model.fc.fc2.out_features
print("Out_features:", out_features1+out_features2)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features1+out_features2, self.args.nb_cl)
new_fc.fc1.weight.data[:out_features1] = tg_model.fc.fc1.weight.data
new_fc.fc1.weight.data[out_features1:] = tg_model.fc.fc2.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = (out_features1+out_features2)*1.0 / (self.args.nb_cl)
if iteration > start_iter:
cur_lamda = self.args.lamda * math.sqrt(lamda_mult)
else:
cur_lamda = self.args.lamda
actual_cl = order[range(last_iter*self.args.nb_cl,(iteration+1)*self.args.nb_cl)]
indices_train_10 = np.array([i in order[range(last_iter*self.args.nb_cl,(iteration+1)*self.args.nb_cl)] for i in Y_train_total])
indices_test_10 = np.array([i in order[range(last_iter*self.args.nb_cl,(iteration+1)*self.args.nb_cl)] for i in Y_valid_total])
X_train = X_train_total[indices_train_10]
X_valid = X_valid_total[indices_test_10]
X_valid_cumuls.append(X_valid)
X_train_cumuls.append(X_train)
X_valid_cumul = np.concatenate(X_valid_cumuls)
X_train_cumul = np.concatenate(X_train_cumuls)
Y_train = Y_train_total[indices_train_10]
Y_valid = Y_valid_total[indices_test_10]
Y_valid_cumuls.append(Y_valid)
Y_train_cumuls.append(Y_train)
Y_valid_cumul = np.concatenate(Y_valid_cumuls)
Y_train_cumul = np.concatenate(Y_train_cumuls)
if iteration == start_iter:
X_valid_ori = X_valid
Y_valid_ori = Y_valid
else:
X_protoset = np.concatenate(X_protoset_cumuls)
Y_protoset = np.concatenate(Y_protoset_cumuls)
if self.args.rs_ratio > 0:
scale_factor = (len(X_train) * self.args.rs_ratio) / (len(X_protoset) * (1 - self.args.rs_ratio))
rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor))
rs_num_samples = int(len(X_train) / (1 - self.args.rs_ratio))
print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples))
X_train = np.concatenate((X_train,X_protoset),axis=0)
Y_train = np.concatenate((Y_train,Y_protoset))
print('Batch of classes number {0} arrives'.format(iteration+1))
map_Y_train = np.array([order_list.index(i) for i in Y_train])
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
is_start_iteration = (iteration == start_iter)
if iteration > start_iter:
old_embedding_norm = tg_model.fc.fc1.weight.data.norm(dim=1, keepdim=True)
average_old_embedding_norm = torch.mean(old_embedding_norm, dim=0).to('cpu').type(torch.DoubleTensor)
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
novel_embedding = torch.zeros((self.args.nb_cl, num_features))
for cls_idx in range(iteration*self.args.nb_cl, (iteration+1)*self.args.nb_cl):
cls_indices = np.array([i == cls_idx for i in map_Y_train])
assert(len(np.where(cls_indices==1)[0])==dictionary_size)
self.evalset.test_data = X_train[cls_indices].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0])
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size, shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
cls_features = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
norm_features = F.normalize(torch.from_numpy(cls_features), p=2, dim=1)
cls_embedding = torch.mean(norm_features, dim=0)
novel_embedding[cls_idx-iteration*self.args.nb_cl] = F.normalize(cls_embedding, p=2, dim=0) * average_old_embedding_norm
tg_model.to(self.device)
tg_model.fc.fc2.weight.data = novel_embedding.to(self.device)
self.trainset.train_data = X_train.astype('uint8')
self.trainset.train_labels = map_Y_train
if iteration > start_iter and self.args.rs_ratio > 0 and scale_factor > 1:
print("Weights from sampling:", rs_sample_weights)
index1 = np.where(rs_sample_weights>1)[0]
index2 = np.where(map_Y_train<iteration*self.args.nb_cl)[0]
assert((index1==index2).all())
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples)
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.args.train_batch_size, shuffle=False, sampler=train_sampler, num_workers=self.args.num_workers)
else:
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.args.train_batch_size,
shuffle=True, num_workers=self.args.num_workers)
self.testset.test_data = X_valid_cumul.astype('uint8')
self.testset.test_labels = map_Y_valid_cumul
testloader = torch.utils.data.DataLoader(self.testset, batch_size=self.args.test_batch_size,
shuffle=False, num_workers=self.args.num_workers)
print('Max and min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train)))
print('Max and min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul)))
ckp_name = osp.join(self.save_path, 'run_{}_iteration_{}_model.pth'.format(iteration_total, iteration))
ckp_name_free = osp.join(self.save_path, 'run_{}_iteration_{}_free_model.pth'.format(iteration_total, iteration))
print('Checkpoint name:', ckp_name)
if iteration==start_iter and self.args.resume_fg:
print("Loading first group models from checkpoint")
tg_model = torch.load(self.args.ckpt_dir_fg)
elif self.args.resume and os.path.exists(ckp_name):
print("Loading models from checkpoint")
tg_model = torch.load(ckp_name)
else:
if iteration > start_iter:
ref_model = ref_model.to(self.device)
ignored_params = list(map(id, tg_model.fc.fc1.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, tg_model.parameters())
base_params = filter(lambda p: p.requires_grad,base_params)
base_params = filter(lambda p: p.requires_grad,base_params)
tg_params_new =[{'params': base_params, 'lr': self.args.base_lr2, 'weight_decay': self.args.custom_weight_decay}, {'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}]
tg_model = tg_model.to(self.device)
tg_optimizer = optim.SGD(tg_params_new, lr=self.args.base_lr2, momentum=self.args.custom_momentum, weight_decay=self.args.custom_weight_decay)
else:
tg_params = tg_model.parameters()
tg_model = tg_model.to(self.device)
tg_optimizer = optim.SGD(tg_params, lr=self.args.base_lr1, momentum=self.args.custom_momentum, weight_decay=self.args.custom_weight_decay)
if iteration > start_iter:
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=self.lr_strat, gamma=self.args.lr_factor)
else:
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=self.lr_strat_first_phase, gamma=self.args.lr_factor)
print("Incremental train")
if iteration > start_iter:
tg_model = incremental_train_and_eval(self.args.epochs, tg_model, ref_model, free_model, ref_free_model, tg_optimizer, tg_lr_scheduler, trainloader, testloader, iteration, start_iter, cur_lamda, self.args.dist, self.args.K, self.args.lw_mr)
else:
tg_model = incremental_train_and_eval(self.args.epochs, tg_model, ref_model, free_model, ref_free_model, tg_optimizer, tg_lr_scheduler, trainloader, testloader, iteration, start_iter, cur_lamda, self.args.dist, self.args.K, self.args.lw_mr)
torch.save(tg_model, ckp_name)
if self.args.dynamic_budget:
nb_protos_cl = self.args.nb_protos
else:
nb_protos_cl = int(np.ceil(self.args.nb_protos*100./self.args.nb_cl/(iteration+1)))
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
for iter_dico in range(last_iter*self.args.nb_cl, (iteration+1)*self.args.nb_cl):
self.evalset.test_data = prototypes[iter_dico].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0])
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
mu = np.mean(D,axis=1)
index1 = int(iter_dico/self.args.nb_cl)
index2 = iter_dico % self.args.nb_cl
alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0
w_t = mu
iter_herding = 0
iter_herding_eff = 0
while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000:
tmp_t = np.dot(w_t,D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if alpha_dr_herding[index1,ind_max,index2] == 0:
alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding
iter_herding += 1
w_t = w_t+mu-D[:,ind_max]
X_protoset_cumuls = []
Y_protoset_cumuls = []
class_means = np.zeros((64,100,2))
for iteration2 in range(iteration+1):
for iter_dico in range(self.args.nb_cl):
current_cl = order[range(iteration2*self.args.nb_cl,(iteration2+1)*self.args.nb_cl)]
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
mapped_prototypes2 = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D2 = mapped_prototypes2.T
D2 = D2/np.linalg.norm(D2,axis=0)
alph = alpha_dr_herding[iteration2,:,iter_dico]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
X_protoset_cumuls.append(prototypes[iteration2*self.args.nb_cl+iter_dico,np.where(alph==1)[0]])
Y_protoset_cumuls.append(order[iteration2*self.args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0])))
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
alph = np.ones(dictionary_size)/dictionary_size
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
current_means = class_means[:, order[range(0,(iteration+1)*self.args.nb_cl)]]
class_means = np.zeros((64,100,2))
for iteration2 in range(iteration+1):
for iter_dico in range(self.args.nb_cl):
current_cl = order[range(iteration2*self.args.nb_cl,(iteration2+1)*self.args.nb_cl)]
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
mapped_prototypes2 = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D2 = mapped_prototypes2.T
D2 = D2/np.linalg.norm(D2,axis=0)
alph = alpha_dr_herding[iteration2,:,iter_dico]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
alph = np.ones(dictionary_size)/dictionary_size
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
torch.save(class_means, osp.join(self.save_path, 'run_{}_iteration_{}_class_means.pth'.format(iteration_total, iteration)))
current_means = class_means[:, order[range(0,(iteration+1)*self.args.nb_cl)]]
is_start_iteration = (iteration == start_iter)
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
print('Computing accuracy for first-phase classes')
self.evalset.test_data = X_valid_ori.astype('uint8')
self.evalset.test_labels = map_Y_valid_ori
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size, shuffle=False, num_workers=self.args.num_workers)
ori_acc, fast_fc = compute_accuracy(tg_model, free_model, tg_feature_model, current_means, X_protoset_cumuls, Y_protoset_cumuls, evalloader, order_list, is_start_iteration=is_start_iteration, maml_lr=self.args.maml_lr, maml_epoch=self.args.maml_epoch)
top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T
self.train_writer.add_scalar('ori_acc/LwF', float(ori_acc[0]), iteration)
self.train_writer.add_scalar('ori_acc/iCaRL', float(ori_acc[1]), iteration)
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
print('Computing accuracy for all seen classes')
self.evalset.test_data = X_valid_cumul.astype('uint8')
self.evalset.test_labels = map_Y_valid_cumul
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size, shuffle=False, num_workers=self.args.num_workers)
cumul_acc, _ = compute_accuracy(tg_model, free_model, tg_feature_model, current_means, X_protoset_cumuls, Y_protoset_cumuls, evalloader, order_list, is_start_iteration=is_start_iteration, fast_fc=fast_fc, maml_lr=self.args.maml_lr, maml_epoch=self.args.maml_epoch)
top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T
| |
#!/usr/bin/env python
# Copyright (c) 2019 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Summary of useful helper functions for scenarios
"""
import math
import shapely.geometry
import shapely.affinity
import numpy as np
import carla
from agents.tools.misc import vector
from agents.navigation.local_planner import RoadOption
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
def get_distance_along_route(route, target_location):
"""
Calculate the distance of the given location along the route
Note: If the location is not along the route, the route length will be returned
"""
wmap = CarlaDataProvider.get_map()
covered_distance = 0
prev_position = None
found = False
# Don't use the input location, use the corresponding wp as location
target_location_from_wp = wmap.get_waypoint(target_location).transform.location
for position, _ in route:
location = target_location_from_wp
# Don't perform any calculations for the first route point
if not prev_position:
prev_position = position
continue
# Calculate distance between previous and current route point
interval_length_squared = ((prev_position.x - position.x) ** 2) + ((prev_position.y - position.y) ** 2)
distance_squared = ((location.x - prev_position.x) ** 2) + ((location.y - prev_position.y) ** 2)
# Close to the current position? Stop calculation
if distance_squared < 0.01:
break
if distance_squared < 400 and not distance_squared < interval_length_squared:
# Check if a neighbor lane is closer to the route
# Do this only in a close distance to correct route interval, otherwise the computation load is too high
starting_wp = wmap.get_waypoint(location)
wp = starting_wp.get_left_lane()
while wp is not None:
new_location = wp.transform.location
new_distance_squared = ((new_location.x - prev_position.x) ** 2) + (
(new_location.y - prev_position.y) ** 2)
if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id):
break
if new_distance_squared < distance_squared:
distance_squared = new_distance_squared
location = new_location
else:
break
wp = wp.get_left_lane()
wp = starting_wp.get_right_lane()
while wp is not None:
new_location = wp.transform.location
new_distance_squared = ((new_location.x - prev_position.x) ** 2) + (
(new_location.y - prev_position.y) ** 2)
if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id):
break
if new_distance_squared < distance_squared:
distance_squared = new_distance_squared
location = new_location
else:
break
wp = wp.get_right_lane()
if distance_squared < interval_length_squared:
# The location could be inside the current route interval, if route/lane ids match
# Note: This assumes a sufficiently small route interval
# An alternative is to compare orientations, however, this also does not work for
# long route intervals
curr_wp = wmap.get_waypoint(position)
prev_wp = wmap.get_waypoint(prev_position)
wp = wmap.get_waypoint(location)
if prev_wp and curr_wp and wp:
if wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id:
# Roads match, now compare the sign of the lane ids
if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or
np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)):
# The location is within the current route interval
covered_distance += math.sqrt(distance_squared)
found = True
break
covered_distance += math.sqrt(interval_length_squared)
prev_position = position
return covered_distance, found
def get_crossing_point(actor):
"""
Get the next crossing point location in front of the ego vehicle
@return point of crossing
"""
wp_cross = CarlaDataProvider.get_map().get_waypoint(actor.get_location())
while not wp_cross.is_intersection:
wp_cross = wp_cross.next(2)[0]
crossing = carla.Location(x=wp_cross.transform.location.x,
y=wp_cross.transform.location.y, z=wp_cross.transform.location.z)
return crossing
def get_geometric_linear_intersection(ego_actor, other_actor):
"""
Obtain a intersection point between two actor's location by using their waypoints (wp)
@return point of intersection of the two vehicles
"""
wp_ego_1 = CarlaDataProvider.get_map().get_waypoint(ego_actor.get_location())
wp_ego_2 = wp_ego_1.next(1)[0]
x_ego_1 = wp_ego_1.transform.location.x
y_ego_1 = wp_ego_1.transform.location.y
x_ego_2 = wp_ego_2.transform.location.x
y_ego_2 = wp_ego_2.transform.location.y
wp_other_1 = CarlaDataProvider.get_world().get_map().get_waypoint(other_actor.get_location())
wp_other_2 = wp_other_1.next(1)[0]
x_other_1 = wp_other_1.transform.location.x
y_other_1 = wp_other_1.transform.location.y
x_other_2 = wp_other_2.transform.location.x
y_other_2 = wp_other_2.transform.location.y
s = np.vstack([(x_ego_1, y_ego_1), (x_ego_2, y_ego_2), (x_other_1, y_other_1), (x_other_2, y_other_2)])
h = np.hstack((s, np.ones((4, 1))))
line1 = np.cross(h[0], h[1])
line2 = np.cross(h[2], h[3])
x, y, z = np.cross(line1, line2)
if z == 0:
return (float('inf'), float('inf'))
intersection = carla.Location(x=x / z, y=y / z, z=0)
return intersection
def get_location_in_distance(actor, distance):
"""
Obtain a location in a given distance from the current actor's location.
Note: Search is stopped on first intersection.
@return obtained location and the traveled distance
"""
waypoint = CarlaDataProvider.get_map().get_waypoint(actor.get_location())
traveled_distance = 0
while not waypoint.is_intersection and traveled_distance < distance:
waypoint_new = waypoint.next(1.0)[-1]
traveled_distance += waypoint_new.transform.location.distance(waypoint.transform.location)
waypoint = waypoint_new
return waypoint.transform.location, traveled_distance
def get_location_in_distance_from_wp(waypoint, distance, stop_at_junction=True):
"""
Obtain a location in a given distance from the current actor's location.
Note: Search is stopped on first intersection.
@return obtained location and the traveled distance
"""
traveled_distance = 0
while not (waypoint.is_intersection and stop_at_junction) and traveled_distance < distance:
wp_next = waypoint.next(1.0)
if wp_next:
waypoint_new = wp_next[-1]
traveled_distance += waypoint_new.transform.location.distance(waypoint.transform.location)
waypoint = waypoint_new
else:
break
return waypoint.transform.location, traveled_distance
def get_waypoint_in_distance(waypoint, distance):
"""
Obtain a waypoint in a given distance from the current actor's location.
Note: Search is stopped on first intersection.
@return obtained waypoint and the traveled distance
"""
traveled_distance = 0
while not waypoint.is_intersection and traveled_distance < distance:
waypoint_new = waypoint.next(1.0)[-1]
traveled_distance += waypoint_new.transform.location.distance(waypoint.transform.location)
waypoint = waypoint_new
return waypoint, traveled_distance
def generate_target_waypoint_list(waypoint, turn=0):
"""
This method follow waypoints to a junction and choose path based on turn input.
Turn input: LEFT -> -1, RIGHT -> 1, STRAIGHT -> 0
@returns a waypoint list from the starting point to the end point according to turn input
"""
reached_junction = False
threshold = math.radians(0.1)
plan = []
while True:
wp_choice = waypoint.next(2)
if len(wp_choice) > 1:
reached_junction = True
waypoint = choose_at_junction(waypoint, wp_choice, turn)
else:
waypoint = wp_choice[0]
plan.append((waypoint, RoadOption.LANEFOLLOW))
# End condition for the behavior
if turn != 0 and reached_junction and len(plan) >= 3:
v_1 = vector(
plan[-2][0].transform.location,
plan[-1][0].transform.location)
v_2 = vector(
plan[-3][0].transform.location,
plan[-2][0].transform.location)
angle_wp = math.acos(
np.dot(v_1, v_2) / abs((np.linalg.norm(v_1) * np.linalg.norm(v_2))))
if angle_wp < threshold:
break
elif reached_junction and not plan[-1][0].is_intersection:
break
return plan, plan[-1][0]
def generate_target_waypoint_list_multilane(waypoint, change='left', # pylint: disable=too-many-return-statements
distance_same_lane=10, distance_other_lane=25,
total_lane_change_distance=25, check=True,
lane_changes=1, step_distance=2):
"""
This methods generates a waypoint list which leads the vehicle to a parallel lane.
The change input must be 'left' or 'right', depending on which lane you want to change.
The default step distance between waypoints on the same lane is 2m.
The default step distance between the lane change is set to 25m.
@returns a waypoint list from the starting point to the end point on a right or left parallel lane.
The function might break before reaching the end point, if the asked behavior is impossible.
"""
plan = []
plan.append((waypoint, RoadOption.LANEFOLLOW)) # start position
option = RoadOption.LANEFOLLOW
# Same lane
distance = 0
while distance < distance_same_lane:
next_wps = plan[-1][0].next(step_distance)
if not next_wps:
return None, None
next_wp = next_wps[0]
distance += next_wp.transform.location.distance(plan[-1][0].transform.location)
plan.append((next_wp, RoadOption.LANEFOLLOW))
if change == 'left':
option = RoadOption.CHANGELANELEFT
elif change == 'right':
option = RoadOption.CHANGELANERIGHT
else:
# ERROR, input value for change must be 'left' or 'right'
return None, None
lane_changes_done = 0
lane_change_distance = total_lane_change_distance / lane_changes
# Lane change
while lane_changes_done < lane_changes:
# Move forward
next_wps = plan[-1][0].next(lane_change_distance)
if not next_wps:
return None, None
next_wp = next_wps[0]
# Get the side lane
if change == 'left':
if check and str(next_wp.lane_change) not in ['Left', 'Both']:
return None, None
side_wp = next_wp.get_left_lane()
else:
if check and str(next_wp.lane_change) not in ['Right', 'Both']:
return None, None
side_wp = next_wp.get_right_lane()
if not side_wp or side_wp.lane_type != carla.LaneType.Driving:
return None, None
# Update the plan
plan.append((side_wp, option))
lane_changes_done += 1
# Other lane
distance = 0
while distance < distance_other_lane:
next_wps = plan[-1][0].next(step_distance)
if not next_wps:
return None, None
next_wp = next_wps[0]
distance += next_wp.transform.location.distance(plan[-1][0].transform.location)
plan.append((next_wp, RoadOption.LANEFOLLOW))
target_lane_id = plan[-1][0].lane_id
return plan, target_lane_id
def generate_target_waypoint(waypoint, turn=0):
"""
This method follow waypoints to a junction and choose path based on turn input.
Turn input: LEFT -> -1, RIGHT -> 1, STRAIGHT -> 0
@returns a waypoint list according to turn input
"""
sampling_radius = 1
reached_junction = False
wp_list = []
while True:
wp_choice = waypoint.next(sampling_radius)
# Choose path at intersection
if not reached_junction and (len(wp_choice) > 1 or wp_choice[0].is_junction):
reached_junction = True
waypoint = choose_at_junction(waypoint, wp_choice, turn)
else:
waypoint = wp_choice[0]
wp_list.append(waypoint)
# End condition for the behavior
if reached_junction and not wp_list[-1].is_junction:
break
return wp_list[-1]
def generate_target_waypoint_in_route(waypoint, route):
"""
This method follow waypoints to a junction
@returns a waypoint list according to turn input
"""
wmap = CarlaDataProvider.get_map()
reached_junction = False
# Get the route location
shortest_distance = float('inf')
for index, route_pos in enumerate(route):
wp = route_pos[0]
trigger_location = waypoint.transform.location
| |
0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_16(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {32: 0}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_17(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {48: 0}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_18(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912: 0}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_19(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_20(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_21(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_22(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_23(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_24(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {16: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_25(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {32: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_26(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {48: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_27(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912: 1}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_28(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935: 57896044618658097711785492504343953926634992332820282019728792003956564819952}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_29(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 57896044618658097711785492504343953926634992332820282019728792003956564819952}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_30(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 57896044618658097711785492504343953926634992332820282019728792003956564819952}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_31(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance | |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
from azure.cli.core.commands.parameters import (get_three_state_flag,
get_enum_type,
resource_group_name_type)
from azure.mgmt.security.models._security_center_enums import Enum69
from knack.arguments import CLIArgumentType
from ._validators import (validate_alert_status,
validate_auto_provisioning_toggle,
validate_pricing_tier,
validate_assessment_status_code)
from .actions import AppendBaselines, AppendBaseline
name_arg_type = CLIArgumentType(options_list=('--name', '-n'), metavar='NAME', help='name of the resource to be fetched')
home_region_arg_type = CLIArgumentType(options_list=('--home-region', '-hr'), metavar='HOMEREGION', help='home region that was selected for the subscription')
location_arg_type = CLIArgumentType(options_list=('--location', '-l'), metavar='LOCATION', help='location of the resource')
# Alerts
alert_status_arg_type = CLIArgumentType(options_list=('--status'), metavar='STATUS', help='target status of the alert. possible values are "dismiss" and "activate"')
# Alerts Suppression Rules
suppression_rule_name_arg_type = CLIArgumentType(options_list=('--rule-name'), metavar='RULENAME', help='The unique name of the alerts suppression rule.')
suppression_alert_type_arg_type = CLIArgumentType(options_list=('--alert-type'), metavar='ALERTTYPE', help='Type of the alert to automatically suppress. For all alert types, use "*".')
suppression_reason_arg_type = CLIArgumentType(options_list=('--reason'), metavar='REASON', help='The reason for dismissing the alert.')
suppression_expiration_date_utc_arg_type = CLIArgumentType(options_list=('--expiration-date-utc'), metavar='EXPIRATIONDATEUTC', help='Expiration date of the rule, if value is not provided or provided as null this field will default to the maximum allowed expiration date.')
suppression_state_arg_type = CLIArgumentType(options_list=('--state'), metavar='STATE', help='Possible states of the rule. Possible values are "Enabled" and "Disabled".')
suppression_comment_arg_type = CLIArgumentType(options_list=('--comment'), metavar='COMMENT', help='Any comment regarding the rule.')
suppression_all_of_arg_type = CLIArgumentType(options_list=('--all-of'), metavar='ALLOF', help='The suppression conditions. Should be provided in a json array format.')
suppression_rule_scope_field_arg_type = CLIArgumentType(options_list=('--field'), metavar='FIELD', help='Entity name.')
suppression_rule_scope_contains_arg_type = CLIArgumentType(options_list=('--contains-substring'), metavar='CONTAINSSUBSTRING', help='The string to scope the suppression rule by.')
suppression_rule_scope_any_of_arg_type = CLIArgumentType(options_list=('--any-of'), metavar='ANYOF', help='A list of strings to scope the suppression rule by.')
# Atp
storage_account_arg_type = CLIArgumentType(options_list=('--storage-account'), metavar='NAME', help='Name of an existing Storage account.')
cosmos_db_account_arg_type = CLIArgumentType(options_list=('--cosmosdb-account'), metavar='NAME', help='Name of an existing Cosmos DB account.')
# Sql Vulnerability Assessment
va_sql_vm_resource_id_arg_type = CLIArgumentType(options_list=('--vm-resource-id'), metavar='VMRESOURCEID', help='Resource ID of the scanned machine. For On-Premise machines, please provide your workspace resource ID')
va_sql_workspace_id_arg_type = CLIArgumentType(options_list=('--workspace-id'), metavar='WORKSPACEID', help='The ID of the workspace connected to the scanned machine')
va_sql_server_name_arg_type = CLIArgumentType(options_list=('--server-name'), metavar='SERVERNAME', help='The name of the scanned server')
va_sql_database_name_arg_type = CLIArgumentType(options_list=('--database-name'), metavar='DATABASENAME', help='The name of the scanned database')
va_sql_scan_id_arg_type = CLIArgumentType(options_list=('--scan-id'), metavar='SCANID', help='The ID of the scan')
va_sql_rule_id_arg_type = CLIArgumentType(options_list=('--rule-id'), metavar='RULEID', help='The ID of the scanned rule. Format: "VAXXXX", where XXXX indicates the number of the rule')
va_sql_baseline_single_arg_type = CLIArgumentType(options_list=('--baseline', '-b'), metavar='BASELINE', help='Baseline records to be set. The following example will set a baseline with two records: --baseline line1_w1 line1_w2 line1_w3 --baseline line2_w1 line2_w2 line2_w3', action=AppendBaseline, nargs='+')
va_sql_baseline_multiple_arg_type = CLIArgumentType(options_list=('--baseline', '-b'), metavar='BASELINE', help='Baseline records to be set. The following example will set a baseline for two rules: --baseline rule=VA1111 line1_w1 line1_w2 --baseline rule=VA2222 line1_w1 line1_w2 line1_w3 --baseline rule=VA1111 line2_w1 line2_w2', action=AppendBaselines, nargs='+')
va_sql_vm_name_arg_type = CLIArgumentType(options_list=('--vm-name'), metavar='VMNAME', help='Provide the name of the machine, for On-Premise resources only')
va_sql_agent_id_arg_type = CLIArgumentType(options_list=('--agent-id'), metavar='AGENTID', help='Provide the ID of the agent on the scanned machine, for On-Premise resources only')
va_sql_vm_uuid_arg_type = CLIArgumentType(options_list=('--vm-uuid'), metavar='VMUUID', help='Provide the UUID of the scanned machine, for On-Premise resources only')
# Auto Provisioning
auto_provisioning_auto_provision_arg_type = CLIArgumentType(options_list=('--auto-provision'), metavar='AUTOPROVISION', help='Automatic provisioning toggle. possible values are "On" or "Off"')
# Contacts
contact_email_arg_type = CLIArgumentType(options_list=('--email'), metavar='EMAIL', help='E-mail of the security contact')
contact_phone_arg_type = CLIArgumentType(options_list=('--phone'), metavar='PHONE', help='Phone of the security contact')
contact_alert_notifications_arg_type = CLIArgumentType(options_list=('--alert-notifications'), metavar='ALERTNOTIFICATIONS', help='Whether to send mail notifications to the security contacts')
contact_alerts_admins_arg_type = CLIArgumentType(options_list=('--alerts-admins'), metavar='ALERTADMINS', help='Whether to send mail notifications to the subscription administrators')
# Pricing
pricing_tier_arg_type = CLIArgumentType(options_list=('--tier'), metavar='TIER', help='pricing tier type')
# Workspace settings
workspace_setting_target_workspace_arg_type = CLIArgumentType(options_list=('--target-workspace'), metavar='TARGETWORKSPACE', help='An ID of the workspace resource that will hold the security data')
# Assessments
assessment_assessed_resource_id_arg_type = CLIArgumentType(options_list=('--assessed-resource-id'), metavar='ASSESSEDRESOURCEID', help='The target resource for this assessment')
assessment_additional_data_arg_type = CLIArgumentType(options_list=('--additional-data'), metavar='ADDITIONALDATA', help='Data that is attached to the assessment result for better investigations or status clarity')
assessment_status_code_arg_type = CLIArgumentType(options_list=('--status-code'), metavar='STATUSCODE', help='Progremmatic code for the result of the assessment. can be "Healthy", "Unhealthy" or "NotApplicable"')
assessment_status_cause_arg_type = CLIArgumentType(options_list=('--status-cause'), metavar='STATUSCAUSE', help='Progremmatic code for the cause of the assessment result')
assessment_status_description_arg_type = CLIArgumentType(options_list=('--status-description'), metavar='STATUSDESCRIPTION', help='Human readable description of the cause of the assessment result')
# Assessment metadata
assessment_metadata_display_name_arg_type = CLIArgumentType(options_list=('--display-name'), metavar='DISPLAYNAME', help='Human readable title for this object')
assessment_metadata_remediation_description_arg_type = CLIArgumentType(options_list=('--remediation-description'), metavar='REMEDIATIONDESCRIPTION', help='Detailed string that will help users to understand the different ways to mitigate or fix the security issue')
assessment_metadata_description_arg_type = CLIArgumentType(options_list=('--description'), metavar='DESCRIPTION', help='Detailed string that will help users to understand the assessment and how it is calculated')
assessment_metadata_severity_arg_type = CLIArgumentType(options_list=('--severity'), metavar='SEVERITY', help='Indicates the importance of the security risk if the assessment is unhealthy')
# Sub Assessment
sub_assessment_assessment_name_arg_type = CLIArgumentType(options_list=('--assessment-name'), metavar='ASSESSMENTNAME', help='Name of the assessment resource')
# IoT Solution
iot_solution_name_arg_type = CLIArgumentType(options_list=('--solution-name'), metavar='SOLUTIONNAME', help='Name of the IoT Security solution')
iot_solution_display_name_arg_type = CLIArgumentType(options_list=('--display-name'), metavar='DISPLAYNAME', help='Resource display name')
iot_solution_iot_hubs_arg_type = CLIArgumentType(options_list=('--iot-hubs'), metavar='IOTHUBS', help='IoT Hub resource IDs')
# Regulatory Compliance
regulatory_compliance_standard_name = CLIArgumentType(option_list=('--standard-name'), metave='STANDARDNAME', help='The compliance standard name')
regulatory_compliance_control_name = CLIArgumentType(option_list=('--control-name'), metave='CONTROLNAME', help='The compliance control name')
# Adaptive Network hardenings
adaptive_network_hardenings_resource_namespace = CLIArgumentType(option_list=('--resource_namespace'), metave='RESOURCENAMESPACE', help='The Namespace of the resource')
adaptive_network_hardenings_resource_resource_type = CLIArgumentType(option_list=('--resource_type'), metave='RESOURCETYPE', help='The type of the resource')
adaptive_network_hardenings_resource_resource_name = CLIArgumentType(option_list=('--resource_name'), metave='RESOURCENAME', help='Name of the resource')
adaptive_network_hardenings_resource_adaptive_network_hardenings_resource_name = CLIArgumentType(option_list=('--adaptive_network_hardenings_resource_name'), metave='ADAPTIVENETWORKHARDENINGSRESOURCENAME', help='Adaptive Network Hardening resource name')
# Adaptive Application Controls
adaptive_application_controls_group_name = CLIArgumentType(option_list=('--group-name'), metave='GROUPNAME', help='Name of an application control VM/server group')
# Automations
automation_scopes_arg_type = CLIArgumentType(options_list=('--scopes'), metavar='SCOPES', help='A collection of scopes on which the security automations logic is applied')
automation_sources_arg_type = CLIArgumentType(options_list=('--sources'), metavar='SOURCES', help='A collection of the source event types which evaluate the security automation set of rules')
automation_actions_arg_type = CLIArgumentType(options_list=('--actions'), metavar='ACTIONS', help='A collection of the actions which are triggered if all the configured rules evaluations, within at least one rule set, are true')
automation_etag_arg_type = CLIArgumentType(options_list=('--etag'), metavar='ETAG', help='Entity tag is used for comparing two or more entities from the same requested resource')
automation_tags_arg_type = CLIArgumentType(options_list=('--tags'), metavar='TAGS', help='A list of key value pairs that describe the resource')
automation_description_arg_type = CLIArgumentType(options_list=('--description'), metavar='DESCRIPTION', help='The security automation description')
automation_isEnabled_arg_type = CLIArgumentType(options_list=('--isEnabled'), metavar='ISENABLED', help='Indicates whether the security automation is enabled')
automation_scope_description = CLIArgumentType(options_list=('--description'), metavar='DESCRIPTION', help='The resources scope description')
automation_scope_path = CLIArgumentType(options_list=('--scope-path'), metavar='SCOPEPATH', help='The resources scope path. Can be the subscription on which the automation is defined on or a resource group under that subscription (fully qualified Azure resource IDs)')
automation_rule_expected_value = CLIArgumentType(options_list=('--expected-value'), metavar='EXPECTEDVALUE', help='The expected value')
automation_rule_operator = CLIArgumentType(options_list=('--operator'), metavar='OPERATOR', help='A valid comparer operator to use. A case-insensitive comparison will be applied for String PropertyType')
automation_rule_property_j_path = CLIArgumentType(options_list=('--property-j-path'), metavar='PROPERTYJPATH', help='The JPath of the entity model property that should be checked')
automation_rule_property_type = CLIArgumentType(options_list=('--property-type'), metavar='PROPERTYTYPE', help='The data type of the compared operands (string, integer, floating point number or a boolean [true/false]]')
automation_rule_set_rules = CLIArgumentType(options_list=('--rules'), metavar='RULES', help='A rule which is evaluated upon event interception. The rule is configured by comparing a specific value from the event model to an expected value. This comparison is done by using one of the supported operators set')
automation_source_event_source = CLIArgumentType(options_list=('--event-source'), metavar='EVENTSOURCE', help='A valid event source type')
automation_source_rule_sets = CLIArgumentType(options_list=('--rule-sets'), metavar='RULESETS', help='A set of rules which evaluate upon event interception. A logical disjunction is applied between defined rule sets (logical "or").')
automation_action_logic_app_resource_id = CLIArgumentType(options_list=('--logic-app-resource-id'), metavar='LOGICAPPRESOURCEID', help='The triggered Logic App Azure Resource ID. This can also reside on other subscriptions, given that you have permissions to trigger the Logic App')
automation_action_logic_app_uri = CLIArgumentType(options_list=('--uri'), metavar='URI', help='The Logic App trigger URI endpoint (it will not be included in any response)')
automation_action_event_hub_connection_string = CLIArgumentType(options_list=('--connection-string'), metavar='', help='The target Event Hub connection string (it will not be included in any response)')
automation_action_event_hub_resource_id = CLIArgumentType(options_list=('--event-hub-resource-id'), metavar='EVENTHUBRESOURCEID', help='The target Event Hub Azure Resource ID')
automation_action_event_hub_sas_policy_name = CLIArgumentType(options_list=('--sas-policy-name'), metavar='SASPOLICYNAME', help='The target Event Hub SAS policy name')
automation_action_workspace_resource_id = CLIArgumentType(options_list=('--workspace-resource-id'), metavar='WORKSPACERESOURCEID', help='The fully qualified Log Analytics Workspace Azure Resource ID')
# pylint: disable=too-many-branches
def load_arguments(self, _):
for scope in ['alert',
'alerts-suppression-rule',
'atp',
'va sql',
'task',
'setting',
'contact',
'auto-provisioning-setting',
'discovered-security-solution',
'external-security-solution',
'jit-policy',
'location',
'pricing',
'topology',
'workspace-setting',
'assessment',
'assessment-metadata',
'sub-assessment',
'iot-solution',
'iot-analytics',
'iot-alerts',
'iot-recommendations',
'regulatory-compliance-standards',
'regulatory-compliance-controls',
'regulatory-compliance-assessments',
'adaptive-application-controls',
'adaptive_network_hardenings',
'allowed_connections',
'secure-scores',
'secure-score-controls',
'secure-score-control-definitions',
'setting',
'automation'
]:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'resource_group_name',
options_list=['--resource-group', '-g'],
arg_type=resource_group_name_type)
c.argument(
'resource_name',
arg_type=name_arg_type)
c.argument(
'location',
arg_type=location_arg_type)
c.argument(
'vm_resource_id',
arg_type=va_sql_vm_resource_id_arg_type)
c.argument(
'workspace_id',
arg_type=va_sql_workspace_id_arg_type)
c.argument(
'server_name',
arg_type=va_sql_server_name_arg_type)
c.argument(
'database_name',
arg_type=va_sql_database_name_arg_type)
c.argument(
'vm_name',
arg_type=va_sql_vm_name_arg_type)
c.argument(
'agent_id',
arg_type=va_sql_agent_id_arg_type)
c.argument(
'vm_uuid',
arg_type=va_sql_vm_uuid_arg_type)
with self.argument_context('security atp storage') as c:
c.argument(
'storage_account_name',
arg_type=storage_account_arg_type)
with self.argument_context('security atp cosmosdb') as c:
c.argument(
'cosmos_db_account_name',
arg_type=cosmos_db_account_arg_type)
for scope in ['regulatory-compliance-controls']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'standard_name',
arg_type=regulatory_compliance_standard_name)
for scope in ['regulatory-compliance-assessments']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'standard_name',
arg_type=regulatory_compliance_standard_name)
c.argument(
'control_name',
arg_type=regulatory_compliance_control_name)
for scope in ['alert update']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'status',
validator=validate_alert_status,
arg_type=alert_status_arg_type)
for scope in ['alerts-suppression-rule update']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'rule_name',
arg_type=suppression_rule_name_arg_type)
c.argument(
'alert_type',
arg_type=suppression_alert_type_arg_type)
c.argument(
'reason',
arg_type=suppression_reason_arg_type)
c.argument(
'expiration_date_utc',
arg_type=suppression_expiration_date_utc_arg_type)
c.argument(
'state',
arg_type=suppression_state_arg_type)
c.argument(
'comment',
arg_type=suppression_comment_arg_type)
for scope in ['alerts-suppression-rule show']:
| |
'and' and 'word-separator' are used as separator
between the last two arguments.
If more than two arguments are given, other arguments are
joined using MediaWiki message 'comma-separator'.
:param args: text to be expanded
"""
needed_mw_messages = ('and', 'comma-separator', 'word-separator')
if not args:
return ''
try:
msgs = self.mediawiki_messages(needed_mw_messages)
except KeyError:
raise NotImplementedError(
'MediaWiki messages missing: {}'.format(needed_mw_messages))
args = list(args)
concat = msgs['and'] + msgs['word-separator']
return msgs['comma-separator'].join(
args[:-2] + [concat.join(args[-2:])])
@deprecated_args(string='text')
def expand_text(self, text: str, title=None, includecomments=None) -> str:
"""Parse the given text for preprocessing and rendering.
e.g expand templates and strip comments if includecomments
parameter is not True. Keeps text inside
<nowiki></nowiki> tags unchanges etc. Can be used to parse
magic parser words like {{CURRENTTIMESTAMP}}.
:param text: text to be expanded
:type text: str
:param title: page title without section
:type title: str
:param includecomments: if True do not strip comments
:type includecomments: bool
"""
if not isinstance(text, str):
raise ValueError('text must be a string')
if not text:
return ''
req = self._simple_request(action='expandtemplates', text=text)
if title is not None:
req['title'] = title
if includecomments is True:
req['includecomments'] = ''
if self.mw_version > '1.24wmf7':
key = 'wikitext'
req['prop'] = key
else:
key = '*'
return req.submit()['expandtemplates'][key]
def getcurrenttimestamp(self):
"""
Return the server time as a MediaWiki timestamp string.
It calls :py:obj:`server_time` first so it queries the server to
get the current server time.
:return: the server time
:rtype: str (as 'yyyymmddhhmmss')
"""
return self.server_time().totimestampformat()
def server_time(self):
"""
Return a Timestamp object representing the current server time.
It uses the 'time' property of the siteinfo 'general'. It'll force a
reload before returning the time.
:return: the current server time
:rtype: :py:obj:`Timestamp`
"""
return pywikibot.Timestamp.fromISOformat(
self.siteinfo.get('time', expiry=True))
def getmagicwords(self, word):
"""Return list of localized "word" magic words for the site."""
if not hasattr(self, '_magicwords'):
magicwords = self.siteinfo.get('magicwords', cache=False)
self._magicwords = {item['name']: item['aliases']
for item in magicwords}
if word in self._magicwords:
return self._magicwords[word]
return [word]
@remove_last_args(('default', ))
def redirect(self):
"""Return the localized #REDIRECT keyword."""
# return the magic word without the preceding '#' character
return self.getmagicwords('redirect')[0].lstrip('#')
@deprecated('redirect_regex', since='20210103')
def redirectRegex(self): # noqa: N802
"""Return a compiled regular expression matching on redirect pages."""
return self.redirect_regex
@property
def redirect_regex(self):
"""Return a compiled regular expression matching on redirect pages.
Group 1 in the regex match object will be the target title.
"""
# NOTE: this is needed, since the API can give false positives!
try:
keywords = {s.lstrip('#') for s in self.getmagicwords('redirect')}
keywords.add('REDIRECT') # just in case
pattern = '(?:' + '|'.join(keywords) + ')'
except KeyError:
# no localized keyword for redirects
pattern = None
return super().redirectRegex(pattern)
@remove_last_args(('default', ))
def pagenamecodes(self):
"""Return list of localized PAGENAME tags for the site."""
return self.getmagicwords('pagename')
@remove_last_args(('default', ))
def pagename2codes(self):
"""Return list of localized PAGENAMEE tags for the site."""
return self.getmagicwords('pagenamee')
def _build_namespaces(self):
_namespaces = {}
for nsdata in self.siteinfo.get('namespaces', cache=False).values():
ns = nsdata.pop('id')
if ns == 0:
canonical_name = nsdata.pop('*')
custom_name = canonical_name
else:
custom_name = nsdata.pop('*')
canonical_name = nsdata.pop('canonical')
default_case = Namespace.default_case(ns)
if 'case' not in nsdata:
nsdata['case'] = default_case or self.siteinfo['case']
elif default_case is not None:
assert default_case == nsdata['case'], \
'Default case is not consistent'
namespace = Namespace(ns, canonical_name, custom_name, **nsdata)
_namespaces[ns] = namespace
for item in self.siteinfo.get('namespacealiases'):
ns = int(item['id'])
try:
namespace = _namespaces[ns]
except KeyError:
pywikibot.warning(
'Broken namespace alias "{}" (id: {}) on {}'.format(
item['*'], ns, self))
else:
if item['*'] not in namespace:
namespace.aliases.append(item['*'])
return _namespaces
def has_extension(self, name):
"""Determine whether extension `name` is loaded.
:param name: The extension to check for, case sensitive
:type name: str
:return: If the extension is loaded
:rtype: bool
"""
extensions = self.siteinfo['extensions']
for ext in extensions:
if 'name' in ext and ext['name'] == name:
return True
return False
@property
def siteinfo(self):
"""Site information dict."""
return self._siteinfo
def dbName(self): # noqa: N802
"""Return this site's internal id."""
return self.siteinfo['wikiid']
@property
def lang(self):
"""Return the code for the language of this Site."""
return self.siteinfo['lang']
def version(self) -> str:
"""Return live project version number as a string.
Use :py:obj:`pywikibot.site.mw_version` to compare MediaWiki versions.
"""
try:
version = self.siteinfo.get('generator', expiry=1).split(' ')[1]
except APIError:
msg = 'You have no API read permissions.'
if not self.logged_in():
msg += ' Seems you are not logged in.'
pywikibot.error(msg)
raise
if MediaWikiVersion(version) < '1.23':
raise RuntimeError(
'Pywikibot "{}" does not support MediaWiki "{}".\n'
'Use Pywikibot prior to "6.0" branch instead.'
.format(pywikibot.__version__, version))
return version
@property
def mw_version(self):
"""Return self.version() as a MediaWikiVersion object.
Cache the result for 24 hours.
:rtype: MediaWikiVersion
"""
mw_ver, cache_time = getattr(self, '_mw_version_time', (None, None))
if mw_ver is None or time.time() - cache_time > 60 * 60 * 24:
mw_ver = MediaWikiVersion(self.version())
self._mw_version_time = mw_ver, time.time()
return mw_ver
@property
def has_image_repository(self):
"""Return True if site has a shared image repository like Commons."""
code, fam = self.shared_image_repository()
return bool(code or fam)
@property
def has_data_repository(self):
"""Return True if site has a shared data repository like Wikidata."""
return self.data_repository() is not None
def image_repository(self):
"""Return Site object for image repository e.g. commons."""
code, fam = self.shared_image_repository()
if bool(code or fam):
return pywikibot.Site(code, fam, self.username())
return None
def data_repository(self):
"""
Return the data repository connected to this site.
:return: The data repository if one is connected or None otherwise.
:rtype: pywikibot.site.DataSite or None
"""
def handle_warning(mod, warning):
return (mod == 'query' and re.match(
r'Unrecognized value for parameter [\'"]meta[\'"]: wikibase',
warning))
req = self._request(
expiry=7, parameters={'action': 'query', 'meta': 'wikibase'})
req._warning_handler = handle_warning
data = req.submit()
if 'query' in data and 'wikibase' in data['query']:
data = data['query']['wikibase']['repo']['url']
url = data['base'] + data['scriptpath'] + '/index.php'
try:
return pywikibot.Site(url=url, user=self.username(),
interface='DataSite')
except SiteDefinitionError as e:
pywikibot.warning('Site "{}" supports wikibase at "{}", but '
'creation failed: {}.'.format(self, url, e))
return None
else:
assert 'warnings' in data
return None
def is_image_repository(self):
"""Return True if Site object is the image repository."""
return self is self.image_repository()
def is_data_repository(self):
"""Return True if its data repository is itself."""
# fixme: this was an identity check
return self == self.data_repository()
def page_from_repository(self, item):
"""
Return a Page for this site object specified by Wikibase item.
:param item: id number of item, "Q###",
:type item: str
:return: Page, or Category object given by Wikibase item number
for this site object.
:rtype: pywikibot.Page or None
:raises pywikibot.exceptions.UnknownExtensionError: site has no
Wikibase extension
:raises NotimplementedError: method not implemented for a Wikibase site
"""
if not self.has_data_repository:
raise UnknownExtensionError(
'Wikibase is not implemented for {}.'.format(self))
if self.is_data_repository():
raise NotImplementedError(
'page_from_repository method is not implemented for '
'Wikibase {}.'.format(self))
repo = self.data_repository()
dp = pywikibot.ItemPage(repo, item)
try:
page_title = dp.getSitelink(self)
except NoPageError:
return None
page = pywikibot.Page(self, page_title)
if page.namespace() == Namespace.CATEGORY:
page = pywikibot.Category(page)
return page
def nice_get_address(self, title):
"""Return shorter URL path to retrieve page titled 'title'."""
# 'title' is expected to be URL-encoded already
return self.siteinfo['articlepath'].replace('$1', title)
def namespace(self, num, all=False):
"""Return string containing local name of namespace 'num'.
If optional argument 'all' is true, return all recognized
values for this namespace.
:param num: Namespace constant.
:type num: int
:param all: If True return a Namespace object. Otherwise
return the namespace name.
:return: local name or Namespace object
:rtype: str or Namespace
"""
if all:
return self.namespaces[num]
return self.namespaces[num][0]
def _update_page(self, page, query, verify_imageinfo: bool = False):
"""Update page attributes.
:param page: page object to be updated
:param query: a api.QueryGenerator
:param verify_imageinfo: if given, every pageitem is checked
whether 'imageinfo' is missing. In that case an exception
is raised.
:raises NoPageError: 'missing' key is found in pageitem
:raises PageRelatedError: 'imageinfo' is missing in pageitem
"""
for pageitem in query:
if not self.sametitle(pageitem['title'],
page.title(with_section=False)):
raise InconsistentTitleError(page, pageitem['title'])
api.update_page(page, pageitem, query.props)
if verify_imageinfo and 'imageinfo' not in pageitem:
if 'missing' in pageitem:
raise NoPageError(page)
raise PageRelatedError(
page, 'loadimageinfo: Query on {} returned no imageinfo')
def loadpageinfo(self, page, preload=False):
"""Load page info from api and store in page attributes.
:see: https://www.mediawiki.org/wiki/API:Info
"""
title = page.title(with_section=False)
inprop = 'protection'
if preload:
inprop += | |
########################################################################
#
# Date: 2014 Authors: <NAME>
#
# <EMAIL>
#
# The Scripps Research Institute (TSRI)
# Molecular Graphics Lab
# La Jolla, CA 92037, USA
#
# Copyright: <NAME> and TSRI
#
#########################################################################
#
# $Header: /opt/cvs/PmvApp/GUI/Qt/dashboard.py,v 1.25 2014/07/23 00:34:44 sanner Exp $
#
# $Id: dashboard.py,v 1.25 2014/07/23 00:34:44 sanner Exp $
#
## Selections:
## when nothing is selected, selecting somethign add current selection to to the tree
## only one selection can be active at any time.
## selected items are added to the current selection
## the active selection has a yellow background in the Tree widget
## clicking on the active selection (i.e. with yellow background) make the active selection None
## There is no range or complex combinations of selections (i.e. multiple selections can not be
## made blue and operated on as one can do with molecular fragments and groups
##
import weakref, os
from PySide import QtCore, QtGui
from MolKit.molecule import Atom, Molecule, MoleculeSet, MoleculeGroup
from MolKit.protein import Residue, Chain, Protein
from mglutil.util.callback import CallbackFunction
from mglutil.util.packageFilePath import findFilePath
PMVICONPATH = findFilePath('Icons', 'PmvApp.GUI')
from PmvGUI import GridGroup
from PmvApp.Pmv import Selection
class ResTreeWidgetItem(QtGui.QTreeWidgetItem):
def __lt__(self, other):
column = self.treeWidget().sortColumn()
key1 = self.text(column)
key2 = other.text(column)
return int(key1[3:]) < int(key2[3:])
class Dashboard(QtGui.QTreeWidget):
def __init__(self, pmvGUI, parent=None):
self.pmvGUI = pmvGUI
#self.objToTreeitem = {}
#self.treeitemToObj = {}
QtGui.QTreeWidget.__init__(self, parent)
self.setColumnCount(1)
self.setHeaderLabels(['objects', ])
self.currentItemChanged.connect(self.onSetCurrentItem)
self.itemExpanded.connect(self.onItemExpanded)
self.itemDoubleClicked.connect(self.showHide)
## self.itemClicked.connect(self.onItemClick)
## self.itemActivated.connect(self.onItemActivated)
self.setAlternatingRowColors(True)
self.setAutoScroll(True)
self.setAcceptDrops(True)
self.setDragEnabled(True)
#self.setDropIndicatorShown(True) #is default
# this will move the actual node
self.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
self.currentSeleItem = None # will be a TreeWidgetItem when a selection is active
from PmvApp.selectionCmds import SelectionEvent
self.pmvGUI.pmv.eventHandler.registerListener(
SelectionEvent, self.selectionEventHandler)
from PmvApp.deleteCmds import DeleteObjectEvent
self.pmvGUI.pmv.eventHandler.registerListener(
DeleteObjectEvent, self.deleteObjectEventHandler)
#root = self.invisibleRootItem()
#root.setFlags(root.flags() & ~QtCore.Qt.ItemIsDropEnabled)
self.colors = {
'molecules':QtGui.QColor(255, 255, 255),
'currentSelection':QtGui.QColor(253, 253, 150),
'namedSelections':QtGui.QColor(119, 158, 203),
'groups':QtGui.QColor(255, 105, 97),
'grids':QtGui.QColor(255, 179, 71),
'molecules':QtGui.QColor(255, 255, 255),
'currentSelection':QtGui.QColor(253, 253, 150),
'namedSelections':QtGui.QColor(255, 255, 255),
'groups':QtGui.QColor(255, 255, 255),
'grids':QtGui.QColor(255, 255, 255),
'white':QtGui.QColor(255, 255, 255),
}
self.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
#selModel = self.selectionModel()
#.setSelectionFlag(QtGui.QAbstractItemView.ToggleCurrent)
self.itemExpanded.connect(self.onItemExpanded)
#selModel.selectionChanged.connect(self.onSelectionChange)
self.itemSelectionChanged.connect(self.onItemSelectionChange)
def mousePressEvent(self, e):
## only left mouse button press are handled as mouse press in the tree
self._MouseBUtton = e.buttons()
#print 'mousePress', int(e.buttons())
if e.buttons()==QtCore.Qt.LeftButton:
#item = self.itemAt(e.pos())
#self.setCurrentItem(item)
#item.setExpanded(not item.isExpanded())
QtGui.QTreeWidget.mousePressEvent(self, e)
def onItemSelectionChange(self):
#
# Here we enforce rules for extended selections
# 1 - objects have to be of the same class
# 2 - the roots can not be a selection and something else
selectedItems = self.selectedItems()
## when first item is selected we remember class of selected object
## and the root of the item
if len(selectedItems)==1:
self._selectionClass = selectedItems[0]._pmvObject.__class__
self._selectionRootObj = self.getRootItem(selectedItems[0])._pmvObject
#print 'FIRST PICK', self._selectionClass, self._selectionRootObj.name
else:
if not hasattr(self, '_selectionClass'): # we got here with multiple selection before
# could set the class
# deselect everythign and return
selModel = self.selectionModel()
for index in selModel.selection().indexes():
selModel.select(index, selModel.Deselect)
return
## for all elements in the selection we verify that they are of the proper type
## and under the right root
#print 'selected:'
selModel = self.selectionModel()
for index in selModel.selection().indexes():
item = self.itemFromIndex(index)
rootObj = self.getRootItem(item)._pmvObject
# objects need to be of the same class
if not isinstance(item._pmvObject, self._selectionClass):
selModel.select(index, selModel.Deselect)
elif isinstance(self._selectionRootObj, Selection):
# if objects are f the same class they also nee to have compatible roots
if not isinstance(rootObj, Selection):
selModel.select(index, selModel.Deselect)
elif not isinstance(self._selectionRootObj, Selection):
if isinstance(rootObj, Selection):
selModel.select(index, selModel.Deselect)
#else:
# print 'keeping2', item.text(0)
def deleteObjectEventHandler(self, event):
mol = event.object
print 'Handling delete', mol.name
def removeItem(self, items):
for k, item in items.items():
print 'remove %s under %s'%(item.text(0), self.getRootItem(item).text(0))
def removeEmptyBranches(self, item, selection, root):
#print 'Entering', item.text(0), item, item.childCount()
#import pdb
#pdb.set_trace()
for i in range(item.childCount()):
child = item.child(i)
if hasattr(child, '_pmvObject'): # dummyChild does not have it
selectedAtoms = selection.inter(child._pmvObject.findType(Atom))
if len(selectedAtoms)==0:
#try:
del child._pmvObject._treeItems[root]
#except KeyError:
# import pdb
# pdb.set_trace()
#print 'deleting2', child.text(0), child, child._pmvObject.name
child.parent().removeChild(child)
else:
self.removeEmptyBranches(child, selection, root)
## def onItemClick(self, item, column):
## print 'mouse press intercepted', item, column
## def onItemActivated(self, item, column):
## import pdb
## pdb.set_trace()
## print 'mouse activaed intercepted', item, column
def selectionEventHandler(self, event):
self._selectionEventHandler(event.setOn, event.setOff, event.new, event.old)
def _selectionEventHandler(self, setOn, setOff, new, old):
#print 'SELECTIONEVENT', setOn, setOff
app = self.pmvGUI.pmv
item = None
#print 'DDDD', app.activeSelection, app.curSelection
#import pdb
#pdb.set_trace()
if app.curSelection.empty():
# the selection is empty and was shown in the tree => we remove it
if hasattr(app.activeSelection,'_treeItems') and app.activeSelection is app.curSelection:
item = app.activeSelection._treeItems.keys()[0]
self.invisibleRootItem().removeChild(item)
del self.pmvGUI.activeSelection._treeItems
self.pmvGUI.activeSelection = None # remember that no selection is active
return
##
## selection in PMV is empty
if app.activeSelection.empty():
if self.pmvGUI.activeSelection is None: # no selection is active in the dashboard
return
##
## named selection is active
if app.activeSelection != app.curSelection:
# we clear the sub tree to reflect empty selection
# but keep this selection as active with top entry in dashboard
item = app.activeSelection._treeItems.keys()[0]
for i in range(item.childCount()):
item.removeChild(item.child(i))
item.dummyChild = QtGui.QTreeWidgetItem(item)
item.setExpanded(False)
return
##
## selection is NOT empty
else:
##
## not selection is active, i.e. the selection is in app.curSelection
if self.pmvGUI.activeSelection is None:
# make curSelection the currently active selection
app.activeSelection = app.curSelection
self.pmvGUI.activeSelection = app.curSelection
# add current selection to the tree if needed
if hasattr(app.curSelection, '_treeItems'):
self.setCurrentSelection(app.curSelection._treeItems.keys()[0])
else:
self.addObject(app.curSelection, None, 'Current Selection',
color=self.colors['currentSelection'])
return
else:
item = app.activeSelection._treeItems.keys()[0]
if item:
if self.isItemExpanded(item):
# remove nodes of deselected atoms
for atom in setOff:
root = item
if hasattr(atom, '_treeItems') and \
atom._treeItems.has_key(root):
#print 'FAGA', atom.name, atom._treeItems.keys()
item = atom._treeItems[root]
parent = item.parent()
parent.removeChild(item)
#print 'deleting1', item.text(0), item
del atom._treeItems[root]
while parent.childCount()==0:
grandParent = parent.parent()
print 'FUGU removing', parent.text(0)
grandParent.removeChild(parent)
del parent._pmvObject._treeItems[root]
parent = grandParent
if len(setOff):
self.removeEmptyBranches(item, self.pmvGUI.activeSelection.atoms, root)
# add nodes of selected atoms
parents = []
for atom in setOn:
root = item
# find first ancestor that is shown in tree
if hasattr(atom, '_treeItems') and atom._treeItems.has_key(root):
#print 'FUGU12'
continue # already selected
obj = atom
while not hasattr(obj.parent, '_treeItems') or \
not obj.parent._treeItems.has_key(root):
if obj.parent is None:
break
else:
obj = obj.parent
#print 'FAGA', atom, obj, obj.parent, obj.parent._treeItems[root]._pmvObject.name
#print 'FAGAO', obj.name
if obj.parent is None:
parent = root
else:
parent = obj.parent._treeItems[root]
if parent.isExpanded():
newItem = self.addObject(obj, parent, obj.name.replace(' ', '_'))
parents.append(parent)
# sort residues
for parent in parents:
parent.sortChildren(0, QtCore.Qt.AscendingOrder)
def getObjectsForTreeItem(self, item):
# gets all the objects in the subtree rooted at item
# obj = self.treeitemToObj[item]
obj = item._pmvObject
root = self.getRootItem(item)
if isinstance(root._pmvObject, Selection):
# for selections return the intersection of the selection
# with the atoms of the node corresponding to item
return obj.atoms.findType(Atom).inter(root._pmvObject.atoms)
elif isinstance(obj, MoleculeGroup):
# for groups return all molecules in subtree
objects = []
for n in range(item.childCount()):
child = item.child(n)
if isinstance(child._pmvObject, MoleculeGroup):
objects.extend( self.getObjectsForTreeItem(child) )
else:
objects.append( child._pmvObject )
return MoleculeSet(objects)
else:
#print 'getObjectsForTreeItem', obj.__class__
# for other nodes, i.e. Molecule, chains, Residues and Atoms
klass = obj.setClass
return klass([obj])
#return obj
def getIcon(self, obj):
if isinstance(obj, Atom):
icon = os.path.join(PMVICONPATH, "atom.png")
elif isinstance(obj, Residue):
icon = os.path.join(PMVICONPATH, "sidechain.png")
elif isinstance(obj, Chain):
icon = os.path.join(PMVICONPATH, "chain.png")
elif isinstance(obj, Molecule):
icon = os.path.join(PMVICONPATH, "molecule.png")
elif isinstance(obj, MoleculeGroup):
icon = os.path.join(PMVICONPATH, "group.png")
elif isinstance(obj, Selection):
icon = os.path.join(PMVICONPATH, "selection.png")
else:
icon = os.path.join(PMVICONPATH, "python.gif")
return QtGui.QIcon(icon)
def getColor(self, obj):
if isinstance(obj, Protein):
return self.colors['molecules']
elif isinstance(obj, Selection):
if obj.name==u'Current Selection':
return self.colors['currentSelection']
else:
return self.colors['namedSelections']
elif isinstance(obj, MoleculeGroup):
return self.colors['groups']
else:
return None
def addObject(self, obj, parent, name, color=None):
# obj is a pmv object such as molecule or group
# parent is a tree item
#if name == 'OH':
# import pdb
# pdb.set_trace()
#if color is None:
# color = self.getColor(obj)
if parent is None: # add a root object (not draggable)
root = item = QtGui.QTreeWidgetItem(self)
#item.setFlags(item.flags() & ~QtCore.Qt.ItemIsDragEnabled)
#self.objToTreeitem[root] = {}
else:
if isinstance(obj, Residue):
item = ResTreeWidgetItem(parent)
else:
item = QtGui.QTreeWidgetItem(parent)
root = self.getRootItem(item)
if isinstance(obj, MoleculeGroup):
pass
elif isinstance(obj, Protein):
# molecules inside selections cannot be dragged
if isinstance(root._pmvObject, Selection):
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsDragEnabled)
# disallow dropping on Proteins
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsDropEnabled)
pass
else: # | |
data_transfer_template_for_random_access_buffers_file.read()
data_transfer_template_for_random_access_buffers_file.close()
data_transfer_template_for_continuous_access_buffers_file = open(
'non_chunked_data_transfer_template_for_continuous_access_buffers.h', 'r')
data_transfer_template_for_continuous_access_buffers = data_transfer_template_for_continuous_access_buffers_file.read()
data_transfer_template_for_continuous_access_buffers_file.close()
else:
data_transfer_template_for_non_random_access_buffers_file = open(
'chunked_data_transfer_template_for_non_random_access_buffers.h', 'r')
data_transfer_template_for_non_random_access_buffers = data_transfer_template_for_non_random_access_buffers_file.read()
data_transfer_template_for_non_random_access_buffers_file.close()
data_transfer_template_for_random_access_buffers_file = open(
'chunked_data_transfer_template_for_random_access_buffers.h', 'r')
data_transfer_template_for_random_access_buffers = data_transfer_template_for_random_access_buffers_file.read()
data_transfer_template_for_random_access_buffers_file.close()
data_transfer_template_for_continuous_access_buffers_file = open(
'chunked_data_transfer_template_for_continuous_access_buffers.h', 'r')
data_transfer_template_for_continuous_access_buffers = data_transfer_template_for_continuous_access_buffers_file.read()
data_transfer_template_for_continuous_access_buffers_file.close()
code_for_per_slice_data_transfers = ''
code_for_one_time_data_transfers = ''
code_for_one_time_data_transfers_inside_the_while_loop = '' # This is only used for random access output buffers if
# the GPU is the preferred device.
for b in buffers:
if b.access_direction == buffer_spec.access_direction.IN or \
b.access_direction == buffer_spec.access_direction.INOUT or \
(b.access_direction == buffer_spec.access_direction.OUT and b.access_pattern == buffer_spec.access_pattern.RANDOM):
if b.access_pattern == buffer_spec.access_pattern.RANDOM or \
(not indices_are_used_only_for_memory_accesses and
not b.access_pattern == buffer_spec.access_pattern.REDUCTION_INTERMEDIATE_RESULTS and
not b.access_pattern == buffer_spec.access_pattern.REDUCTION):
code = data_transfer_template_for_random_access_buffers\
.replace('%ADDITIONAL_ELEMENTS_FOR_THE_THREAD_WITH_THE_HIGHEST_ID_IN_EACH_BLOCK%',
str(b.number_of_overlapping_accesses)) \
.replace('%BUFFER_ELEMENT_COUNT%', b.element_count) \
.replace('%DESTINATION_BUFFER%', 'h.cuda.' + b.buffer_name + '_d')\
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')')\
.replace('%DESTINATION_POINTER%', 'd_ptr') \
.replace('%SOURCE_BUFFER%', b.buffer_name) \
.replace('%SOURCE_POINTER%', 'h_ptr')\
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyHostToDevice')
if b.access_direction == buffer_spec.access_direction.OUT and preferred_device == Device.GPU:
code_for_one_time_data_transfers_inside_the_while_loop += code
else:
code_for_one_time_data_transfers += code
elif b.access_pattern == buffer_spec.access_pattern.REDUCTION_INTERMEDIATE_RESULTS:
data_transfer_template_for_random_access_buffers_file = open(
'non_chunked_data_transfer_template_for_random_access_buffers.h', 'r')
data_transfer_template_for_random_access_buffers = data_transfer_template_for_random_access_buffers_file.read()
data_transfer_template_for_random_access_buffers_file.close()
code = data_transfer_template_for_random_access_buffers\
.replace('%ADDITIONAL_ELEMENTS_FOR_THE_THREAD_WITH_THE_HIGHEST_ID_IN_EACH_BLOCK%',
str(b.number_of_overlapping_accesses)) \
.replace('%BUFFER_ELEMENT_COUNT%', b.element_count) \
.replace('%DESTINATION_BUFFER%', 'h.cuda.' + b.buffer_name + '_d')\
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')')\
.replace('%DESTINATION_POINTER%', 'd_ptr') \
.replace('%SOURCE_BUFFER%', b.buffer_name) \
.replace('%SOURCE_POINTER%', 'h_ptr')\
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyHostToDevice')
code_for_one_time_data_transfers += code
elif b.access_pattern == buffer_spec.access_pattern.BY_THREAD_ID:
code_for_per_slice_data_transfers += data_transfer_template_for_non_random_access_buffers\
.replace('%BUFFER_NAME_HOST%', b.buffer_name)\
.replace('%ADDITIONAL_ELEMENTS_FOR_THE_THREAD_WITH_THE_HIGHEST_ID_IN_EACH_BLOCK%',
str(b.number_of_overlapping_accesses))\
.replace('%OFFSET_SHIFT%', 'BLOCK_SIZE_X')\
.replace('%BUFFER_NAME_DEVICE%', b.buffer_name + '_d')\
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')')\
.replace('%DESTINATION_POINTER%', 'd_ptr')\
.replace('%NUMBER_OF_ELEMENTS_ACCESSED_PER_BLOCK_OR_THREAD_ID%',
str(b.number_of_elements_accessed_per_block_or_thread_id -
b.number_of_overlapping_accesses)) \
.replace('%SOURCE_POINTER%', 'h_ptr')\
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyHostToDevice')
elif b.access_pattern == buffer_spec.access_pattern.BY_BLOCK_ID:
code_for_per_slice_data_transfers += data_transfer_template_for_non_random_access_buffers \
.replace('%ADDITIONAL_ELEMENTS_FOR_THE_THREAD_WITH_THE_HIGHEST_ID_IN_EACH_BLOCK%',
str(b.number_of_overlapping_accesses)) \
.replace('%BUFFER_NAME_HOST%', b.buffer_name) \
.replace('%BUFFER_NAME_DEVICE%', b.buffer_name + '_d') \
.replace('%DESTINATION_POINTER%', 'd_ptr') \
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')')\
.replace('%NUMBER_OF_ELEMENTS_ACCESSED_PER_BLOCK_OR_THREAD_ID%',
str(b.number_of_elements_accessed_per_block_or_thread_id)) \
.replace('%OFFSET_SHIFT%', '1') \
.replace('%SOURCE_POINTER%', 'h_ptr')\
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyHostToDevice')
elif b.access_pattern == buffer_spec.access_pattern.SUCCESSIVE_SUBSECTIONS:
ea = str(b.number_of_elements_accessed_per_block_or_thread_id)
code_for_per_slice_data_transfers += data_transfer_template_for_non_random_access_buffers \
.replace('%ADDITIONAL_ELEMENTS_FOR_THE_THREAD_WITH_THE_HIGHEST_ID_IN_EACH_BLOCK%', '0') \
.replace('%BUFFER_NAME_HOST%', b.buffer_name) \
.replace('%BUFFER_NAME_DEVICE%', b.buffer_name + '_d') \
.replace('%DESTINATION_POINTER%', 'd_ptr') \
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')') \
.replace('%NUMBER_OF_ELEMENTS_ACCESSED_PER_BLOCK_OR_THREAD_ID%', ea) \
.replace('%OFFSET_SHIFT%', '1') \
.replace('%SOURCE_POINTER%', 'h_ptr') \
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyHostToDevice')
elif b.access_pattern == buffer_spec.access_pattern.CONTINUOUS:
buffer_resizing_code = ''
if b.is_accessed_indirectly: #and not any_of_the_buffers_is_random_access:
template_file = open('reallocate_indirectly_accessed_buffers_template.h', 'r')
buffer_resizing_code = template_file.read()
template_file.close()
buffer_resizing_code = buffer_resizing_code.replace('%BUFFER_NAME%', b.buffer_name)
code_for_per_slice_data_transfers += data_transfer_template_for_continuous_access_buffers \
.replace('%BUFFER_NAME_HOST%', b.buffer_name) \
.replace('%BUFFER_NAME_DEVICE%', b.buffer_name + '_d') \
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')') \
.replace('%DESTINATION_POINTER%', 'd_ptr')\
.replace('%SOURCE_POINTER%', 'h_ptr') \
.replace('%START_INDEX_LAMBDA%', b.lambda_to_compute_start_index_for_continuous_accesses)\
.replace('%FINAL_INDEX_LAMBDA%', b.lambda_to_compute_final_index_for_continuous_accesses)\
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyHostToDevice')\
.replace('//%CHECK_IF_INDIRECTLY_ACCESSED_BUFFERS_NEED_TO_BE_RESIZED%', buffer_resizing_code) \
.replace('%DEVICE_BUFFER_INDEX%', '0') # 'start_index' if any_of_the_buffers_is_random_access else '0')\
elif b.access_pattern == buffer_spec.access_pattern.INDIRECT:
template = template.replace(placeholder_for_buffer_sizes,
'size_t ' + b.buffer_name + '_current_size_in_bytes = 0;\n' + placeholder_for_buffer_sizes)
malloc_template_file = open('malloc_for_indirectly_accessed_buffers_template.h', 'r')
malloc_template = malloc_template_file.read()
malloc_template_file.close()
malloc_template = malloc_template\
.replace('%START_INDEX_LAMBDA%', b.lambda_to_compute_start_index_for_indirect_accesses)\
.replace('%FINAL_INDEX_LAMBDA%', b.lambda_to_compute_final_index_for_indirect_accesses)\
.replace('%INTERMEDIATE_BUFFER%', b.intermediate_buffer_for_indirect_accesses)\
.replace('%BUFFER_NAME%', b.buffer_name)
data_transfer_template_file = open('chunked_data_transfer_template_for_random_access_buffers.h', 'r')
data_transfer_template = data_transfer_template_file.read()
data_transfer_template_file.close()
data_transfer_template = data_transfer_template\
.replace('%BUFFER_NAME_HOST%', '&' + b.buffer_name + '[indirect_start_index]')\
.replace('%BUFFER_ELEMENT_COUNT%', 'elements')\
.replace('%BUFFER_NAME_DEVICE%', b.buffer_name + '_d') \
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')')\
.replace('%DESTINATION_POINTER%', 'd_ptr')\
.replace('%SOURCE_POINTER%', 'h_ptr')\
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyHostToDevice')
code_for_per_slice_data_transfers += malloc_template.replace('%DATA_TRANSFER%', data_transfer_template)
elif b.access_pattern == buffer_spec.access_pattern.REDUCTION:
code_for_per_slice_data_transfers += data_transfer_template_for_non_random_access_buffers\
.replace('%BUFFER_NAME_HOST%', b.buffer_name) \
.replace('%ADDITIONAL_ELEMENTS_FOR_THE_THREAD_WITH_THE_HIGHEST_ID_IN_EACH_BLOCK%',
str(b.number_of_overlapping_accesses)) \
.replace('%OFFSET_SHIFT%', '1') \
.replace('%BUFFER_NAME_DEVICE%', b.buffer_name + '_d') \
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')') \
.replace('%DESTINATION_POINTER%', 'd_ptr') \
.replace('%NUMBER_OF_ELEMENTS_ACCESSED_PER_BLOCK_OR_THREAD_ID%',
str(b.number_of_elements_accessed_per_block_or_thread_id)) \
.replace('%SOURCE_POINTER%', 'h_ptr') \
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyHostToDevice')
else:
print('Error: Unknown access pattern')
traceback.print_stack()
sys.exit(1)
# Remove placeholders that will not be needed anymore
template.replace(placeholder_for_buffer_sizes, '')
# Add a variable for the return value of cudaMemcpy only if we use cudaMemcpy.
if code_for_one_time_data_transfers != '':
code_for_one_time_data_transfers = 'cudaError err;\n' + code_for_one_time_data_transfers
# Return the results
template = template.replace('//%DATA_TRANSFER_TO_DEVICE%', code_for_per_slice_data_transfers)\
.replace('//%ONE_TIME_DATA_TRANSFERS_TO_THE_GPU%', code_for_one_time_data_transfers)\
.replace('//%TRANSFER_RANDOM_ACCESS_OUTPUT_BUFFER_IF_THE_GPU_IS_THE_PREFERRED_DEVICE%',
code_for_one_time_data_transfers_inside_the_while_loop)
return template
def insert_code_for_data_transfers_from_the_gpu(indices_are_used_only_for_memory_accesses: bool,
is_reduction: bool,
buffers: List[buffer_spec.Buffer],
is_idempotent: bool,
preferred_device: Device,
template: str):
if is_idempotent and preferred_device == Device.CPU:
data_transfer_template_file = open('chunked_data_transfer_template_for_non_random_access_buffers.h', 'r')
else:
data_transfer_template_file = open('non_chunked_data_transfer_template_for_non_random_access_buffers.h', 'r')
data_transfer_template = data_transfer_template_file.read()
data_transfer_template_file.close()
at_least_one_output_buffer_is_not_random_access = False
for b in buffers:
if b.access_pattern != buffer_spec.access_pattern.RANDOM:
if b.access_direction == buffer_spec.access_direction.IN:
pass
elif b.access_direction == buffer_spec.access_direction.OUT:
at_least_one_output_buffer_is_not_random_access = True
elif b.access_direction == buffer_spec.access_direction.INOUT:
at_least_one_output_buffer_is_not_random_access = True
else:
print('Error: Unknown access direction.')
sys.exit(1)
if (at_least_one_output_buffer_is_not_random_access and indices_are_used_only_for_memory_accesses) or is_reduction:
f = open('transfer_data_from_device_function_stub_template.h', 'r')
data_transfer_function_stub = f.read()
f.close()
f = open('transfer_data_from_device_function_call_template.h', 'r')
data_transfer_function_call = f.read()
f.close()
template = template \
.replace('//%TRANSFER_DATA_FROM_DEVICE_FUNCTION_CALL%', data_transfer_function_call) \
.replace('//%TRANSFER_DATA_FROM_DEVICE_FUNCTION_STUB%', data_transfer_function_stub)
else:
template = template \
.replace('//%TRANSFER_DATA_FROM_DEVICE_FUNCTION_CALL%', '') \
.replace('//%TRANSFER_DATA_FROM_DEVICE_FUNCTION_STUB%', '')
data_transfer_code = ''
code_for_one_time_data_transfers = ''
for b in buffers:
if (b.access_direction == buffer_spec.access_direction.OUT or \
b.access_direction == buffer_spec.access_direction.INOUT) and \
b.data_kind != buffer_spec.data_kind.INTERIM_RESULTS:
if b.access_pattern == buffer_spec.access_pattern.RANDOM or \
b.access_pattern == buffer_spec.access_pattern.REDUCTION_INTERMEDIATE_RESULTS or \
(not indices_are_used_only_for_memory_accesses and
not b.access_pattern == buffer_spec.access_pattern.REDUCTION_INTERMEDIATE_RESULTS and
not b.access_pattern == buffer_spec.access_pattern.REDUCTION):
code_for_one_time_data_transfers += cuda_memcpy_template.replace('%DESTINATION_BUFFER%', b.buffer_name) \
.replace('%SOURCE_BUFFER%', 'h.cuda.' + b.buffer_name + '_d') \
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')') \
.replace('%ELEMENT_COUNT%', b.element_count) \
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyDeviceToHost')
elif b.access_pattern == buffer_spec.access_pattern.BY_THREAD_ID:
data_transfer_code += data_transfer_template.replace('%BUFFER_NAME_HOST%', b.buffer_name) \
.replace('%ADDITIONAL_ELEMENTS_FOR_THE_THREAD_WITH_THE_HIGHEST_ID_IN_EACH_BLOCK%',
str(b.number_of_overlapping_accesses)) \
.replace('%OFFSET_SHIFT%', 'BLOCK_SIZE_X')\
.replace('%BUFFER_NAME_DEVICE%', b.buffer_name + '_d') \
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')')\
.replace('%DESTINATION_POINTER%', 'h_ptr') \
.replace('%NUMBER_OF_ELEMENTS_ACCESSED_PER_BLOCK_OR_THREAD_ID%',
str(b.number_of_elements_accessed_per_block_or_thread_id - b.number_of_overlapping_accesses)) \
.replace('%SOURCE_POINTER%', 'd_ptr') \
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyDeviceToHost')
elif b.access_pattern == buffer_spec.access_pattern.BY_BLOCK_ID:
data_transfer_code += data_transfer_template.replace('%BUFFER_NAME_HOST%', b.buffer_name) \
.replace('%ADDITIONAL_ELEMENTS_FOR_THE_THREAD_WITH_THE_HIGHEST_ID_IN_EACH_BLOCK%',
str(b.number_of_overlapping_accesses)) \
.replace('%OFFSET_SHIFT%', '1')\
.replace('%BUFFER_NAME_DEVICE%', b.buffer_name + '_d') \
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')')\
.replace('%DESTINATION_POINTER%', 'h_ptr') \
.replace('%NUMBER_OF_ELEMENTS_ACCESSED_PER_BLOCK_OR_THREAD_ID%',
str(b.number_of_elements_accessed_per_block_or_thread_id)) \
.replace('%SOURCE_POINTER%', 'd_ptr') \
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyDeviceToHost')
elif b.access_pattern == buffer_spec.access_pattern.SUCCESSIVE_SUBSECTIONS:
ae = str(b.number_of_elements_accessed_per_block_or_thread_id)
data_transfer_code += data_transfer_template.replace('%BUFFER_NAME_HOST%', b.buffer_name) \
.replace('%ADDITIONAL_ELEMENTS_FOR_THE_THREAD_WITH_THE_HIGHEST_ID_IN_EACH_BLOCK%', '0') \
.replace('%OFFSET_SHIFT%', '1') \
.replace('%BUFFER_NAME_DEVICE%', b.buffer_name + '_d') \
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')') \
.replace('%DESTINATION_POINTER%', 'h_ptr') \
.replace('%NUMBER_OF_ELEMENTS_ACCESSED_PER_BLOCK_OR_THREAD_ID%', ae) \
.replace('%SOURCE_POINTER%', 'd_ptr') \
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyDeviceToHost')
elif b.access_pattern == buffer_spec.access_pattern.CONTINUOUS:
if is_idempotent and preferred_device == Device.CPU:
data_transfer_template_for_continuous_access_buffers_file = open(
'chunked_data_transfer_template_for_continuous_access_buffers.h', 'r')
else:
data_transfer_template_for_continuous_access_buffers_file = open(
'non_chunked_data_transfer_template_for_continuous_access_buffers.h', 'r')
data_transfer_template_for_continuous_access_buffers = data_transfer_template_for_continuous_access_buffers_file.read()
data_transfer_template_for_continuous_access_buffers_file.close()
data_transfer_code += data_transfer_template_for_continuous_access_buffers \
.replace('%BUFFER_NAME_HOST%', b.buffer_name) \
.replace('%BUFFER_NAME_DEVICE%', b.buffer_name + '_d') \
.replace('%ELEMENT_SIZE%', 'sizeof(' + b.type + ')') \
.replace('%DESTINATION_POINTER%', 'h_ptr') \
.replace('%SOURCE_POINTER%', 'd_ptr') \
.replace('%START_INDEX_LAMBDA%', b.lambda_to_compute_start_index_for_continuous_accesses) \
.replace('%FINAL_INDEX_LAMBDA%', b.lambda_to_compute_final_index_for_continuous_accesses) \
.replace('%TRANSFER_DIRECTION%', 'cudaMemcpyDeviceToHost')\
.replace('%DEVICE_BUFFER_INDEX%', '0') # 'start_index' if any_of_the_buffers_is_random_access else '0')
else:
print('Error: Unknown access pattern')
traceback.print_stack()
sys.exit(1)
# Add a variable for the return value of cudaMemcpy only if we use cudaMemcpy.
if code_for_one_time_data_transfers != '':
code_for_one_time_data_transfers = 'cudaError err;\n' + code_for_one_time_data_transfers
template = template.replace('//%DATA_TRANSFER_FROM_DEVICE%', data_transfer_code)\
.replace('//%ONE_TIME_DATA_TRANSFERS_FROM_THE_DEVICE%', code_for_one_time_data_transfers)
return template
def insert_texture_object_instantiations(buffers: List[buffer_spec.Buffer], t: str) -> str:
code = ''
for b in buffers:
if b.use_texture_on_gpu:
code += b.buffer_name + '_tex_reader_struct ' + b.buffer_name + '_tex_reader;\n'
code += '//%CUDA_KERNEL_CODE%'
return t.replace('//%CUDA_KERNEL_CODE%', code)
def insert_cuda_kernel(
any_of_the_buffers_is_random_access: bool,
buffers: List[buffer_spec.Buffer],
indices_are_used_only_for_memory_accesses: bool,
kernel_code: str,
scalar_parameters: List[str],
scalar_parameter_types: Dict[str, str],
template: str) -> str:
get_iteration_replacement = '(blockDim.x * blockIdx.x + threadIdx.x)'
get_iteration_group_replacement = 'blockIdx.x'
if (not indices_are_used_only_for_memory_accesses): # or any_of_the_buffers_is_random_access:
get_iteration_replacement = '((blockDim.x * blockIdx.x + threadIdx.x) + _batch_offset_x * blockDim.x)'
get_iteration_group_replacement = '(blockIdx.x + _batch_offset_x)'
kernel_code = kernel_code.replace('GET_ITERATION()', get_iteration_replacement)
kernel_code = kernel_code.replace('GET_ITERATION_WITHIN_BATCH()', 'threadIdx.x')
kernel_code = kernel_code.replace('GET_BATCH_ID()', get_iteration_group_replacement)
kernel_code = kernel_code.replace('GET_2D_ITERATION_WITHIN_BATCH_X', 'threadIdx.x')
kernel_code = kernel_code.replace('GET_2D_ITERATION_WITHIN_BATCH_Y', 'threadIdx.y')
kernel_code = kernel_code.replace('GET_2D_ITERATION_X', '(blockDim.x * (blockIdx.x + _batch_offset_x) + threadIdx.x)')
kernel_code = kernel_code.replace('GET_2D_ITERATION_Y', '(blockDim.y * (blockIdx.y + _batch_offset_y) + threadIdx.y)')
kernel_code = insert_offset_computations_for_buffers_that_contain_indices(buffers, kernel_code)
kernel_code = insert_cuda_texture_readers(buffers, kernel_code)
template = template.replace('//%CUDA_KERNEL_CODE%', kernel_code)
parameters = ''
for b in buffers:
parameters += b.type + ' *' + b.buffer_name + ',\n'
for sp in scalar_parameters:
parameters += scalar_parameter_types[sp] + ' ' + sp + ',\n'
parameters += 'size_t _batch_offset_x,\n'
parameters += 'size_t _batch_offset_y,\n'
return template.replace('//%CUDA_KERNEL_PARAMETER_DECLS%', parameters[:-2])
def insert_cuda_kernel_call(buffers: List[buffer_spec.Buffer], is_2d_kernel: bool, is_reduction: bool, t: str) -> str:
if is_reduction:
kernel_call_code = 'const size_t num_blocks = 64;\n'\
'const size_t block_size = 256;\n'\
'const size_t smem_size = block_size * sizeof(%TYPE%);\n' \
'reduce<<<num_blocks, block_size, smem_size>>>(' \
' h.cuda.%SOURCE_BUFFER_NAME%_d, h.cuda.intermediate_results_d, slice_sizes[0]);'
kernel_call_code = kernel_call_code.replace('%SOURCE_BUFFER_NAME%', buffers[1].buffer_name)\
.replace('%TYPE%', buffers[1].type)
else:
if is_2d_kernel:
kernel_call_code = 'dim3 grid_size(slice_sizes[0], slice_sizes[1]);'\
'dim3 block_size(BLOCK_SIZE_X, BLOCK_SIZE_Y);'\
'cuda_kernel<<<grid_size, block_size>>>(' \
' //%CUDA_KERNEL_PARAMETERS%);'
else:
kernel_call_code = 'cuda_kernel<<<slice_sizes[0], BLOCK_SIZE_X>>>(' \
' //%CUDA_KERNEL_PARAMETERS%);'
return t.replace('//%CUDA_KERNEL_CALL%', kernel_call_code)
def insert_cuda_kernel_parameters(
buffers: List[buffer_spec.Buffer],
indices_are_used_only_for_memory_accesses: bool,
scalar_parameters: List[str],
template: str):
parameters = ''
for b in buffers:
parameters += 'h.cuda.' + b.buffer_name + '_d' + ',\n'
for sp in scalar_parameters:
parameters += sp + ',\n'
parameters += 'offsets[0],\n'
parameters += 'offsets[1],\n'
return template.replace('//%CUDA_KERNEL_PARAMETERS%', parameters[:-2])
def insert_cuda_barrier(template: str):
return template.replace('device_hopper::batch_barrier()', '__syncthreads()')
def insert_code_that_exits_the_manager_thread_if_the_slice_is_aborted(
code: str,
is_idempotent: bool,
is_reduction: bool,
preferred_device: Device):
if not is_reduction:
code = code.replace('//%REDUCTION_RETURN_IF_THE_SLICE_IS_ABORTED%\n', '')
if (is_idempotent and preferred_device == Device.CPU) or \
(is_reduction and preferred_device == Device.CPU):
code_that_returns_from_the_manager_thread = 'if (abort_slice) | |
# ECOR 1051 Milestone 3 P8: Final Filter Function Code
# Team 109
# Date of Submission: April 2, 2020
# Team Members:
# <NAME> 101143478 (Team Leader)
# <NAME> 101148917
# <NAME> 101150112
from Cimpl import *
from simple_Cimpl_filters import grayscale
#Red Filter Function
def red_channel(image: Image) -> Image:
""" Author: <NAME> (101150112)
(Cimpl.Image) -> Cimpl.Image
Function changes each pixel of the final image to only contain red (g and b
have the value of 0), in other words this function copies the initial image
and produces and returns the same image except with a red filter.
>>> red_channel(original_image)
Image is displayed.
"""
new_image = copy(image)
for pixel in image:
x, y, (r, g, b) = pixel
new_colour = create_color(r, 0, 0) #This sets b and g to be zero, leaving only red
set_color(new_image, x, y, new_colour)
return new_image
#Green Filter Function
def green_channel(initial_image: Image) -> Image:
""" Author: <NAME> (101143478)
(Cimpl.Image) -> Cimpl.Image
Function changes each pixel of the final image to only contain green (r and
b have the value of 0), in other words this function copies the initial
image and produces and returns the same image except with a green filter.
>>> green_channel(original_image)
Image is displayed.
"""
final_image = copy(initial_image)
for pixel in initial_image:
x, y, (r, g, b) = pixel
final_colour = create_color(0, g, 0) #This sets b and r to be zero, leaving only green
set_color(final_image, x, y, final_colour)
return final_image
#Blue Filter Function
def blue_channel(original: Image) -> Image:
""" Author: <NAME> (101148917)
(Cimpl.Image) -> Cimpl.Image
Function changes each pixel of the final image to only contain blue (r and
g have the value of 0), in other words this function copies the initial
image and produces and returns the same image except with a blue filter.
>>>blue_channel(original_image)
Image is displayed.
"""
new = copy(original)
for pixel in new:
x, y, (r, g, b) = pixel
new_colour = create_color(0, 0, b) #This sets r and g to be zero, leaving only blue
set_color(new, x, y, new_colour)
return new
#Combine Filter Function
def combine (image1: Image, image2: Image, image3: Image) -> Image:
"""Author: <NAME> (101143478)
(Cimpl.Image, Cimpl.Image, Cimpl.Image) -> Cimpl.Image
Function changes each pixel of the final image to contain the combined rgb
colour by adding each r, g, and b value of each pixel in the three
filter images and produces and returns the same image except with the
combined filter.
>>> combine (red_image, green_image, blue_image)
Image is displayed.
"""
final_image = copy(image1)
for pixel in final_image:
(x, y, (r, g, b)) = pixel
(r1, g1, b1) = get_color(image1, x, y)
(r2, g2, b2) = get_color(image2, x, y)
(r3, g3, b3) = get_color(image3, x, y)
total_red = (r1 + r2 + r3)
total_green = (g1 + g2 + g3)
total_blue = (b1 + b2 + b3)
final_colour = create_color(total_red, total_green, total_blue)
set_color(final_image, x, y, final_colour)
return final_image
#Two Tone Filter Function
def two_tone(image: Image, colour1: str, colour2: str) -> Image:
"""" Author: <NAME> (101143478)
(Cimpl.Image, str, str) -> Cimpl.Image
This function accepts 3 parameters (an image and two strings which are the
names of colours chosen by the user) and returns a copy of the image with
only those chosen two colours (tones). The brightness of each pixel in the
image is what determines what colour it will be changed it. The range of
brightness is as follows:
0 - 127 -> first colour
128 - 255 -> second colour
>>>two_tone(original_image, "black", "white")
Displays image with those two tones.
"""
new_image = copy(image)
colours = [colour1, colour2] #List for inputed strings
rgb_list = [] #New list for rgb values
for i in range(len(colours)):
if colours[i] == "black":
rgb_list += (0, 0, 0)
elif colours[i] == "white":
rgb_list += (255, 255, 255)
elif colours[i] == "red":
rgb_list += (255, 0, 0)
elif colours[i] == "lime":
rgb_list += (0, 255, 0)
elif colours[i] == "blue" :
rgb_list += (0, 0, 255)
elif colours[i] == "yellow":
rgb_list += (255, 255, 0)
elif colours[i] == "cyan":
rgb_list += (0, 255, 255)
elif colours[i] == "magenta":
rgb_list += (255, 0, 255)
elif colours[i] == "gray":
rgb_list += (128, 128, 128)
colour1 = create_color(rgb_list[0], rgb_list[1], rgb_list[2])
colour2 = create_color(rgb_list[3], rgb_list[4], rgb_list[5])
for pixel in image:
x, y, (r, g, b) = pixel
avg = (r + g +b) / 3
if avg < 128:
set_color(new_image, x, y, colour1)
else:
set_color(new_image, x, y, colour2)
return new_image
#Three Tone Filter Function
def three_tone(image: Image, colour1: str, colour2: str, colour3: str) -> Image:
""" Author: <NAME> (101143478)
(Cimpl.Image, str, str, str) -> Cimpl.Image
This function accepts 4 parameters (an image and three strings which are the
names of colours chosen by the user) and returns a copy of the image with
only those chosen three colours (tones). The brightness of each pixel in the
image is what determines what colour it will be changed it. The range of
brightness is as follows:
0 - 84 -> first colour
85 - 170 -> second colour
171 - 255 -> third colour
>>>three_tone(original_image, "black", "white", "yellow")
Displays image with those three tones.
"""
new_image = copy(image)
colours= [colour1, colour2, colour3] #List for inputted strings
rgb_list = [] #New list for rgb values
for i in range(len(colours)):
if colours[i] == "black":
rgb_list += (0, 0, 0)
elif colours[i] == "white":
rgb_list += (255, 255, 255)
elif colours[i] == "red":
rgb_list += (255, 0, 0)
elif colours[i] == "lime":
rgb_list += (0, 255, 0)
elif colours[i] == "blue" :
rgb_list += (0, 0, 255)
elif colours[i] == "yellow":
rgb_list += (255, 255, 0)
elif colours[i] == "cyan":
rgb_list += (0, 255, 255)
elif colours[i] == "magenta":
rgb_list += (255, 0, 255)
elif colours[i] == "gray":
rgb_list += (128, 128, 128)
colour1 = create_color(rgb_list[0], rgb_list[1], rgb_list[2])
colour2 = create_color(rgb_list[3], rgb_list[4], rgb_list[5])
colour3 = create_color(rgb_list[6], rgb_list[7], rgb_list[8])
for pixel in image:
x, y, (r, g, b) = pixel
avg = (r + g +b) / 3
if avg < 84:
set_color(new_image, x, y, colour1)
elif 84 < avg < 170:
set_color(new_image, x, y, colour2)
else:
set_color(new_image, x, y, colour3)
return new_image
#Extreme Contrast Filter Function
def extreme_contrast(image: Image) -> Image:
""" Author: <NAME> (101150112)
(Cimpl.Image) -> Cimpl.Image
This function accepts 1 parameter (an image chosen by the user) and returns
a copy of the image returns a copy of an image in which the contrast between
the pixels has been maximized (extreme contrast filter applied to it).
To create this filter the rgb values are manipulated as follows:
value between 0 - 127 -> value is changed to 0
value between 128 - 1255 -> value is changed to 255
>>>extreme_contrast(original_image)
*Displays image with extreme contrast filter*
"""
contrast_image = copy(image)
for pixel in image:
x, y, (r, g, b) = pixel
image_color= get_color(contrast_image, x, y)
if image_color[0] <= 127:
r = 0
elif image_color[0] > 127:
r = 255
if image_color[1] <= 127:
g = 0
elif image_color[1] > 127:
g = 255
if image_color[2] <= 127:
b = 0
elif image_color[2] > 127:
b = 255
contrast = create_color(r, g, b)
set_color(contrast_image, x, y, contrast)
return contrast_image
#Sepia Tinting Filter Function
def sepia(image: Image) -> Image:
""" Author: <NAME> (101148917)
(Cimpl.Image) -> Cimpl.Image
Passes the input file (picture) through a sepia filter, first by | |
"""This file is part of DeepLens which is released under MIT License and
is copyrighted by the University of Chicago. This project is developed by
the database group (chidata).
tiered_videoio.py uses opencv (cv2) to read and write files to disk. It contains
primitives to encode and decode archived and regular video formats for a tiered
storage system.
"""
from deeplens.tiered_manager.tiered_file import *
from deeplens.constants import *
from deeplens.struct import *
from deeplens.header import *
from deeplens.utils.clip import *
from deeplens.simple_manager.file import *
from deeplens.utils.frame_xform import *
import cv2
import os
from os import path
import time
import shutil
from pathlib import Path
from datetime import datetime
# TODO: fix headers and figure out what we're storing as crop group
def write_video_auto(vstream, \
output, \
encoding, \
header_info,
output_extern = None, \
scratch = DEFAULT_TEMP, \
frame_rate=DEFAULT_FRAME_RATE, \
header_cmp=RAW):
"""write_video_clips takes a stream of video and writes
it to disk. It includes the specified header
information as a part of the video file. The difference is that
it writes a video to disk/external storage from a stream in clips of a specified
size
Args:
vstream - a videostream or videotransform
output - output file
header_info - header information
clip_size - how many frames in each clip
scratch - temporary space to use
frame_rate - the frame_rate of the video
header_cmp - compression if any on the header
output_extern - if the file goes to external
storage, specify directory
"""
# Define the codec and create VideoWriter object
start = True
seq = 0
output_files = []
global_time_header = {}
header = {}
update_global_header = ObjectHeader(global_time_header,\
store_bounding_boxes=False, offset=header_info['offset'])
out_vids = []
r_names = []
file_names = []
crops = []
crop_positions = []
for frame in vstream:
if start or frame['split']:
# write previous cropped clip segments to storage
base_seq = seq
if not start:
for i in range(len(crops) + 1):
if output_extern:
ref_name = os.path.join(scratch, r_name)
ref_file = add_ext(ref_name, '.txt')
write_ref_file(ref_file, file_names[i])
file_names[i] = ref_file #ref_file becomes the video
ext = '.ref'
else:
ext = '.seq'
header_dict = header.getHeader()
if i == 0 and len(crops) != 0:
header_dict['crop_group'] = base_seq + len(crops)
elif len(crops) != 0:
header_dict = crop_header(header_dict, crops[i - 1])
header_dict['crop_id'] = base_seq
if i != 0:
header_dict['crop_position'] = crop_positions[i - 1]
header_dict['seq'] = seq
output_files.append(build_fmt_file(header_dict, \
file_names[i], \
scratch, \
add_ext(output, ext, seq), \
header_cmp, \
RAW,
r_names[i]))
out_vids[i].release()
seq += 1
r_names = []
file_names = []
out_vids = []
header.reset()
crops = frame['crop']
#tmp file for the video
for i in range(len(crops) + 1):
crop = crops[i - 1]
if i != 0:
crop_positions.append({0: (crop[0], crop[1])}) # we store the top left corner
r_name = get_rnd_strng()
if output_extern:
output_extern_seq = output_extern + str(seq + i)
if not os.path.exists(output_extern_seq):
os.mkdir(output_extern_seq)
seg_name = os.path.join(output_extern_seq, r_name)
file_names.append(output_extern_seq)
else:
seg_name = os.path.join(scratch, r_name)
file_name = add_ext(seg_name, AVI, seq + i)
fourcc = cv2.VideoWriter_fourcc(*encoding)
if not output_extern:
file_names.append(file_name)
r_names.append(r_name)
if i == 0:
width = vstream.width
height = vstream.height
else:
width = abs(crops[i - 1][0] - crops[i - 1][2])
height = abs(crops[i - 1][1] - crops[i - 1][3])
out_vid = cv2.VideoWriter(file_name,
fourcc,
frame_rate,
(width, height),
True)
out_vids.append(out_vid)
start = False
update_crop = False
# update cropped frames
if len(frame['crop']) != 0:
crops = frame['crop'] #note that even if we change the size/location of the crops, they remain in the same clip
update_crop = True
i = 0
if len(crops) == 0:
out_vids[i].write(frame['data'])
i +=1
else:
out_vids[i].write(reverse_crop(frame['data'], crops))
i +=1
for cr in crops:
if update_crop:
crop_positions[i][frame['frame']] = (cr[0], cr[1])
fr = crop_box(frame['data'], cr)
out_vids[i].write(fr)
i +=1
header.update(frame)
global_time_header.update(frame)
# write last segment
base_seq = seq
for i in range(len(crops) + 1):
if output_extern:
ref_name = os.path.join(scratch, r_name)
ref_file = add_ext(ref_name, '.txt')
write_ref_file(ref_file, file_names[i])
file_names[i] = ref_file #ref_file becomes the video
ext = '.ref'
else:
ext = '.seq'
header_dict = header.getHeader()
if i == 0 and len(crops) != 0:
header_dict['crop_group'] = base_seq + len(crops)
elif len(crops) != 0:
header_dict = crop_header(header_dict, crops[i - 1])
header_dict['crop_id'] = base_seq
header_dict['seq'] = seq
output_files.append(build_fmt_file(header_dict, \
file_names[i], \
scratch, \
add_ext(output, ext, seq), \
header_cmp, \
RAW,
r_names[i]))
out_vids[i].release()
seq += 1
output_files.append(write_block(global_time_header.getHeader(), \
None ,\
add_ext(output, '.start')))
return output_files
def _update_storage_header(file_path, header):
StorageHeader(header).update()
write_block(header, None, file_path)
#gets a file of a particular index and if there are external files
#consider parallelizing this better
def file_get(file):
parsed = ncpy_unstack_block(file)
if '.head' in parsed[0]:
head, video = 0, 1
else:
head, video = 1, 0
header = unstack_block(parsed[head], DEFAULT_TEMP, compression_hint=RAW)
header_data = read_block(header[0])
return header_data, parsed[video], is_ref_name(file), parsed[head]
def _all_files(output):
rtn = []
seq = 0
while True:
file = add_ext(output, '.seq', seq)
if os.path.exists(file):
rtn.append(file)
seq += 1
continue
file = add_ext(output, '.ref', seq)
if os.path.exists(file):
rtn.append(file)
seq += 1
continue
break
return rtn
#delete a video
def delete_video_if_exists(output):
start_file = add_ext(output, '.start')
if not os.path.exists(start_file):
return
print(output)
os.remove(start_file)
seq = 0
while True:
f = add_ext(output, '.seq', seq)
is_seq = path.exists(f)
if is_seq:
shutil.rmtree(f)
seq += 1
continue
f = add_ext(output, '.ref', seq)
is_ref = path.exists(f)
if is_ref:
parsed = ncpy_unstack_block(f)
if '.head' in parsed[0]:
video_ref = parsed[1]
else:
video_ref = parsed[0]
seq_name = read_ref_file(video_ref)
shutil.rmtree(seq_name)
shutil.rmtree(f)
seq += 1
continue
break
#counter using the start and end
def move_to_extern_if(output, condition, output_extern, threads=None):
"""move_to_extern_if takes a written archive file and writes to extern if
those video clips that satisfy a certain header condition.
Args:
output (string) - internal url
condition (lambda) - a condition on the header content
output_extern (string) - external url
"""
if threads == None:
# this isn't optimal because we could easily skip over groups
pre_parsed = {file: file_get(file) for file in _all_files(output)}
else:
pre_parsed = threads.map(file_get, _all_files(output))
rtn = None
for (header, clip, is_extern, _) in pre_parsed.items():
if condition(header):
if not is_extern:
clip_file = os.path.basename(clip)
seq = header_data['seq']
extern_dir = output_extern + str(seq)
if not os.path.exists(extern_dir):
os.mkdir(extern_dir)
vid_file = os.path.join(extern_dir, clip_file)
os.rename(clip, vid_file)
seq_dir = os.path.dirname(clip)
ref_dir = seq_dir[:seq_dir.rfind('.')]
ref_dir = add_ext(ref_dir, '.ref')
shutil.move(seq_dir, ref_dir)
clip_string = clip_file[:clip_file.rfind('.')]
clip_string = add_ext(clip_string, '.txt')
ref_file = os.path.join(ref_dir, clip_string)
write_ref_file(ref_file, extern_dir)
rtn = output
return rtn
def move_from_extern_if(output, condition, threads=None):
"""move_to_extern_if takes a written archive file and writes to extern if
those video clips that satisfy a certain header condition.
Args:
output (string) - directory
condition (lambda) - a condition on the header content
output_extern (string) - external directory
"""
if threads == None:
pre_parsed = {file_get(file) for file in _all_files(output)}
else:
pre_parsed = threads.map(file_get, _all_files(output))
rtn = []
for (header, ref_file, is_extern, _) in pre_parsed.items():
if condition(header):
if is_extern:
extern_dir = read_ref_file(ref_file)
if extern_dir.endswith('/'):
extern_dir = extern_dir[:-1]
base_dir = os.path.join(Path(output).parent, os.path.basename(extern_dir))
seq_dir = add_ext(base_dir, '.seq')
ref_dir = add_ext(base_dir, '.ref')
delete_ref_file(ref_file)
shutil.move(ref_dir, seq_dir)
files = os.listdir(extern_dir)
for f in files:
if f.endswith('.avi'):
f_full = os.path.join(extern_dir, f)
shutil.move(f_full, seq_dir)
os.rmdir(extern_dir)
rtn = output
return rtn
def check_extern_if(output, condition, threads=None):
"""move_to_extern_if takes a written archive file and writes to extern if
those video clips that satisfy a certain header condition.
Args:
output (string) - archive file
condition (lambda) - a condition on the header content
scratch (string) - a temporary file path
"""
seq = 0
if threads == None:
pre_parsed = {file_get(file) for file in _all_files(output)}
else:
pre_parsed = threads.map(file_get, _all_files(output))
rtn = []
for (header, clip, is_extern, _) in pre_parsed.items():
if condition(header):
if is_extern:
return True
return False
#counter using the start and end
def read_if(output, condition, clip_size=5, scratch = DEFAULT_TEMP, threads=None):
"""read_if takes a written archive file and reads only
those video clips that satisfy a certain header condition.
Args:
output (string) - archive file
condition (lambda) - a condition on the header content
scratch (string) - a temporary file path
"""
#read the meta data
seg_start_data = read_block(add_ext(output, '.start'))
clips = clip_boundaries(seg_start_data['start'],\
seg_start_data['end'],\
clip_size)
boundaries = []
streams = []
relevant_clips = set()
if threads == None:
pre_parsed = [file_get(file) for file in _all_files(output)]
else:
pre_parsed = threads.map(file_get, _all_files(output))
for header, clip, is_extern, | |
Extract data
dfs = {}
for cruise_name in cruise_files.keys():
print('Extracting: ', cruise_name, cruise_files[cruise_name])
# cruise_name = cruise_files.keys()[0]
df = pd.read_excel(folder+cruise_files[cruise_name])
names_dict = {
'Date': 'date', 'UTC': 'date', 'time (UTC)': 'time', 'lat': 'LAT',
'lon': 'LON'
}
if verbose:
print(df.head())
df.rename(columns=names_dict, inplace=True)
if verbose:
print(df.head())
# convert dates to datetime
# def _convert_datetime(x):
# return (270-atan2(x['date'],x['GMAO_UWND'])*180/pi)%360
# df['datetime'] = df.apply( f, axis=1)
df['datetime'] = df['date'].astype(str)+' '+df['time'].astype(str)
df['datetime'] = pd.to_datetime(df['datetime'])
df.index = df['datetime'].values
if verbose:
print(df.head())
dfs[cruise_name] = df[['datetime', 'LON', 'LAT']]
return dfs
def TEST_AND_PROCESS_iodide_cruise_output(just_process_surface_data=False):
"""
Process, plot (test values), then save planeflight values to csv
"""
# Local variables
wd = '/scratch/ts551/GC/v10-01_HAL/'
files_dict = {
'SOE-8': wd+'run.ClBr.Iodide2015.SOE-8',
'IIOE-1': wd+'run.ClBr.Iodide2016.IIOE-1',
'SOE-9': wd+'run.ClBr.Iodide2017.SOE-9',
}
# Test surface output
if just_process_surface_data:
extra_str = 'surface'
dfs = {}
for key_ in files_dict.keys():
wd = files_dict[key_]+'/plane_flight_logs_{}/'.format(extra_str)
df = process_planeflight_files(wd=wd)
dfs[key_] = df
get_test_plots_surface_pf_output(df=df,
name='{} ({})'.format(key_, extra_str))
# Save the output as .csv
for key_ in dfs.keys():
savetitle = 'GC_planeflight_compiled_output_for_{}_{}.csv'
savetitle = savetitle.format(key_, extra_str)
savetitle = AC.rm_spaces_and_chars_from_str(savetitle)
dfs[key_].to_csv(savetitle)
# - Process the output files for column values
else:
specs = ['O3', 'BrO', 'IO', 'CH2O']
extra_str = 'column'
dfs = {}
file_str = 'GC_planeflight_compiled_output_for_{}_{}_II.csv'
for key_ in files_dict.keys():
# for key_ in ['IIOE-1']:
print(key_)
pf_wd = files_dict[key_]+'/plane_flight_logs_{}/'.format(extra_str)
df = process_planeflight_files(wd=pf_wd)
# now process to column values
df = process_planeflight_column_files(wd=files_dict[key_], df=df)
dfs[key_] = df
# Save the output as .csv
savetitle = file_str.format(key_, extra_str)
df['datetime'] = df.index
df.to_csv(AC.rm_spaces_and_chars_from_str(savetitle))
# Test plots?
for key_ in files_dict.keys():
savetitle = file_str.format(key_, extra_str)
df = pd.read_csv(AC.rm_spaces_and_chars_from_str(savetitle))
df.index = pd.to_datetime(df['datetime'])
get_test_plots_surface_pf_output(df=df,
name='{} ({})'.format(
key_, extra_str),
specs=specs, units='molec cm$^{-2}$',
scale=1)
def process_planeflight_column_files(wd=None, df=None, res='4x5', debug=False):
"""
Process column of v/v values into single values for total column
"""
# wd=files_dict[key_]; df = dfs[ key_ ]; res='4x5'
specs = ['O3', u'BrO', u'IO', u'CH2O', u'GLYX']
timestamps = list(sorted(set(df.index)))
timestamps_with_duplicates = []
RMM_air = AC.constants('RMM_air')
AVG = AC.constants('AVG')
specs = ['O3', 'BrO', 'IO', 'CH2O']
# get lon lat array of time in troposphere
TPS = AC.get_GC_output(wd=wd+'/', vars=['TIME_TPS__TIMETROP'],
trop_limit=True)
# convert this to boolean (<1 == not strat)
TPS[TPS != 1] = 9999.9
TPS[TPS == 1] = False
TPS[TPS == 9999.9] = True
# And dates
CTM_DATES = AC.get_gc_datetime(wd=wd+'/')
CTM_months = np.array([i.month for i in CTM_DATES])
# a EPOCH = datetime.datetime(1970,1,1)
# CTM_EPOCH = np.array([ (i.month-EPOCH).total_seconds() for i in CTM_DATES ])
# Also get grid of surface area ( m^2 ) and convert to cm2
S_AREA = AC.get_surface_area(res=res) * 10000
A_M = AC.get_GC_output(wd, vars=['BXHGHT_S__AD'], trop_limit=True,
dtype=np.float64)
# VOL = AC.get_volume_np( wd=wd, res=res, s_area=S_AREA[...,None])
big_data_l = []
dates = []
# for ts in timestamps[::1000]: # Test processing on first 1000 points
n_timestamps = len(timestamps)
for n_ts, ts in enumerate(timestamps):
print('progress= {:.3f} %'.format((float(n_ts) / n_timestamps)*100.))
tmp_df = df.loc[df.index == ts]
if debug:
print(ts, tmp_df.shape)
# List of pressures (one set = 47 )
PRESS_ = tmp_df['PRESS'].values
# special condition for where there is more than column set
# for a timestamp
# assert( len(PRESS) == 47 )
if len(PRESS_) != 47:
timestamps_with_duplicates += [ts]
prt_str = 'WARNING: DOUBLE UP IN TIMESTEP:{} ({}, shape={})'
print(prt_str.format(ts, len(PRESS_), tmp_df.shape))
print('Just using 1st 47 values')
tmp_df = tmp_df[0:47]
dates += [ts]
else:
dates += [ts]
# Now reverse data (as outputted from highest to lowest)
tmp_df = tmp_df.loc[::-1]
# select everyother value?
# lon select locations
LAT_ = tmp_df['LAT'].values
LON_ = tmp_df['LON'].values
# check there is only one lat and lon
assert len(set(LAT_)) == 1
assert len(set(LON_)) == 1
# - Select 3D vars from ctm.nc file
# get LON, LAT index of box
LON_ind = AC.get_gc_lon(LON_[0], res=res)
LAT_ind = AC.get_gc_lat(LAT_[0], res=res)
# time_ind = AC.find_nearest( CTM_EPOCH, (ts-EPOCH).total_seconds() )
time_ind = AC.find_nearest(CTM_months, ts.month)
# tropspause height? ('TIME_TPS__TIMETROP)
TPS_ = TPS[LON_ind, LAT_ind, :, time_ind]
# Select surface area of grid box
S_AREA_ = S_AREA[LON_ind, LAT_ind, 0]
# comput column by spec
A_M_ = A_M[LON_ind, LAT_ind, :, time_ind]
# Number of molecules per grid box
MOLECS_ = (((A_M_*1E3) / RMM_air) * AVG)
# Extract for species
data_l = []
for spec in specs:
# Get species in v/v
data_ = tmp_df[spec].values
# Mask for troposphere
data_ = np.ma.array(data_[:38], mask=TPS_)
# Get number of molecules
data_ = (data_ * MOLECS_).sum()
# Convert to molecs/cm2
data_ = data_ / S_AREA_
# Store data
data_l += [data_]
# Save location
data_l += [LON_[0], LAT_[0]]
# Save data for all specs
big_data_l += [data_l]
# Convert to DataFrame.
df_col = pd.DataFrame(big_data_l)
df_col.index = dates # timestamps[::1000]
df_col.columns = specs + ['LON', 'LAT']
print(df_col.shape)
return df_col
def process_planeflight_files(wd=None):
"""
Process planeflight files to pd.DataFrame
"""
import glob
import seaborn as sns
sns.set_context("paper", font_scale=0.75)
# Get planeflight data
files = glob.glob(wd+'plane.log.*')
print(wd, len(files), files[0])
names, POINTS = AC.get_pf_headers(files[0])
dfs = [AC.pf_csv2pandas(file=i, vars=names) for i in files]
df = pd.concat(dfs)
# Rename axis
TRA_XXs = [i for i in df.columns if ('TRA_' in i)]
TRA_dict = dict(
zip(TRA_XXs, [v10_ClBrI_TRA_XX_2_name(i) for i in TRA_XXs]))
df.rename(columns=TRA_dict, inplace=True)
return df
def get_test_plots_surface_pf_output(wd=None, name='Planeflight',
df=None, specs=None, units=None, scale=1,
show_plot=False):
"""
Test model output at surface for Indian sgip cruises
"""
import seaborn as sns
sns.set(color_codes=True)
# Get data
if isinstance(df, type(None)):
df = process_planeflight_files(wd=wd, name=name)
# Now add summary plots
dpi = 320
savetitle = 'GC_planeflight_summary_plots_for_{}_V'.format(name)
savetitle = AC.rm_spaces_and_chars_from_str(savetitle)
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi, no_dstr=True)
# Locations outputted for?
title = 'Locations of {} output'.format(name)
fig, ax = plt.subplots()
AC.plot_lons_lats_spatial_on_map(title=title, f_size=15,
lons=df['LON'].values, lats=df['LAT'].values,
fig=fig, ax=ax)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=True)
if show_plot:
plt.show()
# Timeseries of key species
if isinstance(specs, type(None)):
key_spec = ['O3', 'NO', 'NO2', 'OH', 'HO2', 'IO', 'BrO']
extras = ['SO4', 'DMS', 'CH2O', ]
species = ['OH', 'HO2', 'GLYX']
specs = key_spec + extras + species
specs += ['LON', 'LAT']
met = ['GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP',
'GMAO_UWND', 'GMAO_VWND']
specs += met
print(specs)
for spec in specs:
fig, ax = plt.subplots()
if isinstance(units, type(None)):
units, scale = AC.tra_unit(spec, scale=True)
try:
spec_LaTeX = AC.latex_spec_name(spec)
except:
spec_LaTeX = spec
print(spec, units, spec_LaTeX, scale)
dates = pd.to_datetime(df.index).values
plt.plot(dates, df[spec].values*scale)
plt.ylabel('{} ({})'.format(spec, units))
title_str = "Timeseries of modelled '{}' during {}"
plt.title(title_str.format(spec_LaTeX, name))
plt.xticks(rotation=45)
plt.subplots_adjust(bottom=0.15)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=True)
if show_plot:
plt.show()
plt.close()
# Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi, no_dstr=True)
def mk_data_files4Indian_seasurface_paper(res='0.125x0.125'):
"""
Make data files for the indian ocean surface iodide paper
"""
AreasOfInterest = {
'SubT_NA': ('NASW', 'NATR', 'NASE', ),
'SubT_SA': ('SATL',),
'SubT_NP': (u'NPSW', 'NPTG'),
'SubT_SP': ('SPSG',),
'SubT_SI': ('ISSG',),
}
AreasOfInterest_Names = AreasOfInterest.copy()
# Get dictionaries of province numbers and names
num2prov = LonghurstProvinceFileNum2Province(
None, invert=True, rtn_dict=True)
MRnum2prov = MarineRegionsOrg_LonghurstProvinceFileNum2Province(
None, invert=True, rtn_dict=True)
Rnum2prov = RosieLonghurstProvinceFileNum2Province(
None, invert=True, rtn_dict=True)
# Convert regions to the LP numbers
PrtStr = "{} = Requested province: {} - R's #={}, MIT(GitHub) #={}, LH(2010) #={}"
for key_ in AreasOfInterest.keys():
for a_ in AreasOfInterest[key_]:
print(PrtStr.format(
key_, a_, Rnum2prov[a_], num2prov[a_], MRnum2prov[a_]))
nums = [MRnum2prov[i] for i in AreasOfInterest[key_]]
AreasOfInterest[key_] = nums
# - Get data all together
Filename = 'Oi_prj_predicted_iodide_0.125x0.125_No_Skagerrak_WITH_Provinces.nc'
# folder = '/work/home/ts551/data/iodide/'
folder = './'
ds = xr.open_dataset(folder + Filename)
params = ['Chance2014_STTxx2_I',
'MacDonald2014_iodide', 'Ensemble_Monthly_mean']
vars2use = params + ['LonghurstProvince']
ds = ds[vars2use]
# Also add the features of interest
Filename = 'Oi_prj_feature_variables_0.125x0.125_WITH_Provinces.nc'
ds2 = xr.open_dataset(folder + Filename)
vars2add = ['WOA_MLDpt', 'WOA_Nitrate', 'WOA_TEMP', 'WOA_Salinity']
for var in vars2add:
ds[var] = ds2[var]
# Add axis X/Y assignment
attrs = ds['lat'].attrs
attrs["axis"] = 'Y'
ds['lat'].attrs = attrs
attrs = ds['lon'].attrs
attrs["axis"] = 'X'
ds['lon'].attrs = attrs
# - Now extract the data and check the locations being extracted
# Make files with the data of interest.
file_str = 'Oi_OS_Longhurst_provinces_{}_{}_{}.{}'
for key_ in AreasOfInterest.keys():
nums = AreasOfInterest[key_]
ds_tmp = ds.where(np.isin(ds.LonghurstProvince.values, nums))
# - Plot a diagnostic figure
fig, ax = plt.subplots()
ds_tmp['LonghurstProvince'].mean(dim='time').plot(ax=ax)
# get names and numbers of assigned areas
Names = AreasOfInterest_Names[key_]
nums = [str(i) for i in AreasOfInterest[key_]]
# Add a title
nums = [str(i) for i in nums]
title = "For '{}' ({}), \n plotting #(s)={}"
title = title.format(key_, ', '.join(Names), ', '.join(nums))
plt.title(title)
# Save to png
png_filename = file_str.format(key_, '', res, 'png')
plt.savefig(png_filename, dpi=dpi)
plt.close()
# - What is the area extent of the data
var2use | |
<filename>phy/plot/interact.py
# -*- coding: utf-8 -*-
"""Common layouts."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import logging
import numpy as np
from phylib.utils import emit
from phylib.utils.geometry import get_non_overlapping_boxes, get_closest_box
from .base import BaseLayout
from .transform import Scale, Range, Subplot, Clip, NDC
from .utils import _get_texture, _in_polygon
from .visuals import LineVisual, PolygonVisual
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Grid
#------------------------------------------------------------------------------
class Grid(BaseLayout):
"""Layout showing subplots arranged in a 2D grid.
Constructor
-----------
shape : tuple or str
Number of rows, cols in the grid.
shape_var : str
Name of the GLSL uniform variable that holds the shape, when it is variable.
box_var : str
Name of the GLSL variable with the box index.
has_clip : boolean
Whether subplots should be clipped.
Note
----
To be used in a grid, a visual must define `a_box_index` (by default) or another GLSL
variable specified in `box_var`.
"""
margin = .075
n_dims = 2
active_box = (0, 0)
_scaling = (1., 1.)
def __init__(self, shape=(1, 1), shape_var='u_grid_shape', box_var=None, has_clip=True):
super(Grid, self).__init__(box_var=box_var)
self.shape_var = shape_var
self._shape = shape
ms = 1 - self.margin
mc = 1 - self.margin
# Define the GPU transforms of the Grid layout.
# 1. Global scaling.
self.gpu_transforms.add(Scale(self._scaling, gpu_var='u_grid_scaling'))
# 2. Margin.
self.gpu_transforms.add(Scale((ms, ms)))
# 3. Clipping for the subplots.
if has_clip:
self.gpu_transforms.add(Clip([-mc, -mc, +mc, +mc]))
# 4. Subplots.
self.gpu_transforms.add(Subplot(
# The parameters of the subplots are callable as they can be changed dynamically.
shape=lambda: self._shape, index=lambda: self.active_box,
shape_gpu_var=self.shape_var, index_gpu_var=self.box_var))
def attach(self, canvas):
"""Attach the grid to a canvas."""
super(Grid, self).attach(canvas)
canvas.gpu_transforms += self.gpu_transforms
canvas.inserter.insert_vert(
"""
attribute vec2 {};
uniform vec2 {};
uniform vec2 u_grid_scaling;
""".format(self.box_var, self.shape_var),
'header', origin=self)
def add_boxes(self, canvas, shape=None):
"""Show subplot boxes."""
shape = shape or self.shape
assert isinstance(shape, tuple)
n, m = shape
n_boxes = n * m
a = 1 - .0001
pos = np.array([[-a, -a, +a, -a],
[+a, -a, +a, +a],
[+a, +a, -a, +a],
[-a, +a, -a, -a],
])
pos = np.tile(pos, (n_boxes, 1))
box_index = []
for i in range(n):
for j in range(m):
box_index.append([i, j])
box_index = np.vstack(box_index)
box_index = np.repeat(box_index, 8, axis=0)
boxes = LineVisual()
# We exclude this interact when adding the visual.
canvas.add_visual(boxes, clearable=False)
boxes.set_data(pos=pos)
boxes.set_box_index(box_index)
canvas.update()
def get_closest_box(self, pos):
"""Get the box index (i, j) closest to a given position in NDC coordinates."""
x, y = pos
rows, cols = self.shape
j = np.clip(int(cols * (1. + x) / 2.), 0, cols - 1)
i = np.clip(int(rows * (1. - y) / 2.), 0, rows - 1)
return i, j
def update_visual(self, visual):
"""Update a visual."""
super(Grid, self).update_visual(visual)
if self.shape_var in visual.program:
visual.program[self.shape_var] = self._shape
visual.program['u_grid_scaling'] = self._scaling
@property
def shape(self):
"""Return the grid shape."""
return self._shape
@shape.setter
def shape(self, value):
self._shape = value
self.update()
@property
def scaling(self):
"""Return the grid scaling."""
return self._scaling
@scaling.setter
def scaling(self, value):
self._scaling = value
self.update()
#------------------------------------------------------------------------------
# Boxed
#------------------------------------------------------------------------------
class Boxed(BaseLayout):
"""Layout showing plots in rectangles at arbitrary positions. Used by the waveform view.
The boxes are specified via their center positions and optional sizes, in which case
an iterative algorithm is used to find the largest box size that will not make them overlap.
Constructor
----------
box_pos : array-like (2D, shape[1] == 2)
Position of the centers of the boxes.
box_var : str
Name of the GLSL variable with the box index.
keep_aspect_ratio : boolean
Whether to keep the aspect ratio of the bounds.
Note
----
To be used in a boxed layout, a visual must define `a_box_index` (by default) or another GLSL
variable specified in `box_var`.
"""
margin = .1
n_dims = 1
active_box = 0
_box_scaling = (1., 1.)
_layout_scaling = (1., 1.)
_scaling_param_increment = 1.1
def __init__(self, box_pos=None, box_var=None, keep_aspect_ratio=False):
super(Boxed, self).__init__(box_var=box_var)
self._key_pressed = None
self.keep_aspect_ratio = keep_aspect_ratio
self.update_boxes(box_pos)
self.gpu_transforms.add(Range(
NDC, lambda: self.box_bounds[self.active_box],
from_gpu_var='vec4(-1, -1, 1, 1)', to_gpu_var='box_bounds'))
def attach(self, canvas):
"""Attach the boxed interact to a canvas."""
super(Boxed, self).attach(canvas)
canvas.gpu_transforms += self.gpu_transforms
canvas.inserter.insert_vert("""
#include "utils.glsl"
attribute float {};
uniform sampler2D u_box_pos;
uniform float n_boxes;
uniform vec2 u_box_size;
uniform vec2 u_layout_scaling;
""".format(self.box_var), 'header', origin=self)
canvas.inserter.insert_vert("""
// Fetch the box bounds for the current box (`box_var`).
vec2 box_pos = fetch_texture({}, u_box_pos, n_boxes).xy;
box_pos = (2 * box_pos - 1); // from [0, 1] (texture) to [-1, 1] (NDC)
box_pos = box_pos * u_layout_scaling;
vec4 box_bounds = vec4(box_pos - u_box_size, box_pos + u_box_size);
""".format(self.box_var), 'start', origin=self)
def update_visual(self, visual):
"""Update a visual."""
super(Boxed, self).update_visual(visual)
box_pos = _get_texture(self.box_pos, (0, 0), self.n_boxes, [-1, 1])
box_pos = box_pos.astype(np.float32)
if 'u_box_pos' in visual.program:
logger.log(5, "Update visual with interact Boxed.")
visual.program['u_box_pos'] = box_pos
visual.program['n_boxes'] = self.n_boxes
visual.program['u_box_size'] = np.array(self.box_size) * np.array(self._box_scaling)
visual.program['u_layout_scaling'] = self._layout_scaling
def update_boxes(self, box_pos):
"""Update the box positions and automatically-computed size."""
self.box_pos, self.box_size = get_non_overlapping_boxes(box_pos)
def add_boxes(self, canvas):
"""Show the boxes borders."""
n_boxes = len(self.box_pos)
a = 1 + .05
pos = np.array([[-a, -a, +a, -a],
[+a, -a, +a, +a],
[+a, +a, -a, +a],
[-a, +a, -a, -a],
])
pos = np.tile(pos, (n_boxes, 1))
boxes = LineVisual()
box_index = np.repeat(np.arange(n_boxes), 8)
canvas.add_visual(boxes, clearable=False)
boxes.set_data(pos=pos, color=(.5, .5, .5, 1))
boxes.set_box_index(box_index)
canvas.update()
# Change the box bounds, positions, or size
#--------------------------------------------------------------------------
@property
def n_boxes(self):
"""Total number of boxes."""
return len(self.box_pos)
@property
def box_bounds(self):
"""Bounds of the boxes."""
bs = np.array(self.box_size)
return np.c_[self.box_pos - bs, self.box_pos + bs]
def get_closest_box(self, pos):
"""Get the box closest to some position."""
return get_closest_box(pos, self.box_pos, self.box_size)
# Box scaling
#--------------------------------------------------------------------------
def _increment_box_scaling(self, cw=1., ch=1.):
self._box_scaling = (self._box_scaling[0] * cw, self._box_scaling[1] * ch)
self.update()
@property
def box_scaling(self):
return self._box_scaling
def expand_box_width(self):
return self._increment_box_scaling(cw=self._scaling_param_increment)
def shrink_box_width(self):
return self._increment_box_scaling(cw=1. / self._scaling_param_increment)
def expand_box_height(self):
return self._increment_box_scaling(ch=self._scaling_param_increment)
def shrink_box_height(self):
return self._increment_box_scaling(ch=1. / self._scaling_param_increment)
# Layout scaling
#--------------------------------------------------------------------------
def _increment_layout_scaling(self, cw=1., ch=1.):
self._layout_scaling = (self._layout_scaling[0] * cw, self._layout_scaling[1] * ch)
self.update()
@property
def layout_scaling(self):
return self._layout_scaling
def expand_layout_width(self):
return self._increment_layout_scaling(cw=self._scaling_param_increment)
def shrink_layout_width(self):
return self._increment_layout_scaling(cw=1. / self._scaling_param_increment)
def expand_layout_height(self):
return self._increment_layout_scaling(ch=self._scaling_param_increment)
def shrink_layout_height(self):
return self._increment_layout_scaling(ch=1. / self._scaling_param_increment)
class Stacked(Boxed):
"""Layout showing a number of subplots stacked vertically.
Parameters
----------
n_boxes : int
Number of boxes to stack vertically.
box_var : str
Name of the GLSL variable with the box index.
origin : str
top or bottom
Note
----
To be used in a boxed layout, a visual must define `a_box_index` (by default) or another GLSL
variable specified in `box_var`.
"""
margin = 0
_origin = 'bottom'
def __init__(self, n_boxes, box_var=None, origin=None):
self._origin = origin or self._origin
assert self._origin in ('top', 'bottom')
box_pos = self.get_box_pos(n_boxes)
super(Stacked, self).__init__(box_pos, box_var=box_var, keep_aspect_ratio=False)
@property
def n_boxes(self):
"""Number of boxes."""
return len(self.box_pos)
@n_boxes.setter
def n_boxes(self, n_boxes):
if n_boxes >= 1:
self.update_boxes(self.get_box_pos(n_boxes))
def get_box_pos(self, n_boxes):
"""Return the box bounds for a given number of stacked boxes."""
# Signal bounds.
b = np.zeros((n_boxes, 2))
b[:, 1] = np.linspace(-1, 1, n_boxes)
if self._origin == 'top':
b = b[::-1, :]
return b
@property
def origin(self):
"""Whether to show the channels from top to bottom (`top` option, the default), or from
bottom to top (`bottom`)."""
return self._origin
@origin.setter
def origin(self, value):
self._origin = value
self.update_boxes(self.get_box_pos(self.n_boxes))
self.update()
def attach(self, canvas):
"""Attach the stacked interact to a canvas."""
BaseLayout.attach(self, canvas)
canvas.gpu_transforms += self.gpu_transforms
canvas.inserter.insert_vert("""
#include "utils.glsl"
attribute float {};
uniform float n_boxes;
uniform bool u_top_origin;
uniform vec2 u_box_size;
""".format(self.box_var), 'header', origin=self)
canvas.inserter.insert_vert("""
float margin = .1 / n_boxes;
float a = 1 - 2. / n_boxes + margin;
float b = -1 + 2. / n_boxes - margin;
float u = (u_top_origin ? (n_boxes - 1. - {bv}) : {bv}) / max(1., n_boxes - 1.);
float y0 = -1 + u * (a + 1);
float y1 = b + u * (1 - b);
float ym = .5 * (y0 + y1);
float yh = u_box_size.y * (y1 - ym);
y0 = ym - yh;
y1 = ym + yh;
vec4 box_bounds = vec4(-1., y0, +1., y1);
""".format(bv=self.box_var), 'before_transforms', origin=self)
def update_visual(self, visual):
"""Update a visual."""
BaseLayout.update_visual(self, visual)
if 'n_boxes' in visual.program:
visual.program['n_boxes'] = self.n_boxes
visual.program['u_box_size'] = self._box_scaling
visual.program['u_top_origin'] = self._origin == 'top'
#------------------------------------------------------------------------------
# Interactive tools
#------------------------------------------------------------------------------
class Lasso(object):
"""Draw a polygon with the mouse and find the points that belong to the inside of the
| |
<reponame>RobertoRoos/ingenialink-python
import time
import threading
import canopen
import struct
import xml.etree.ElementTree as ET
from .._utils import *
from .constants import *
from ..servo import SERVO_STATE
from .._ingenialink import ffi, lib
from .dictionary import DictionaryCANOpen
from .registers import Register, REG_DTYPE, REG_ACCESS
SERIAL_NUMBER = Register(
identifier='', units='', subnode=1, idx="0x26E6", subidx="0x00", cyclic='CONFIG',
dtype=REG_DTYPE.U32, access=REG_ACCESS.RO
)
PRODUCT_CODE = Register(
identifier='', units='', subnode=1, idx="0x26E1", subidx="0x00", cyclic='CONFIG',
dtype=REG_DTYPE.U32, access=REG_ACCESS.RO
)
SOFTWARE_VERSION = Register(
identifier='', units='', subnode=1, idx="0x26E4", subidx="0x00", cyclic='CONFIG',
dtype=REG_DTYPE.STR, access=REG_ACCESS.RO
)
REVISION_NUMBER = Register(
identifier='', units='', subnode=1, idx="0x26E2", subidx="0x00", cyclic='CONFIG',
dtype=REG_DTYPE.U32, access=REG_ACCESS.RO
)
STATUS_WORD_REGISTERS = {
1: Register(
identifier='', units='', subnode=1, idx="0x6041", subidx="0x00", cyclic='CYCLIC_TX',
dtype=REG_DTYPE.U16, access=REG_ACCESS.RO
),
2: Register(
identifier='', units='', subnode=2, idx="0x6841", subidx="0x00", cyclic='CYCLIC_TX',
dtype=REG_DTYPE.U16, access=REG_ACCESS.RO
),
3: Register(
identifier='', units='', subnode=3, idx="0x7041", subidx="0x00", cyclic='CYCLIC_TX',
dtype=REG_DTYPE.U16, access=REG_ACCESS.RO
)
}
CONTROL_WORD_REGISTERS = {
1: Register(
identifier='', units='', subnode=1, idx="0x2010", subidx="0x00", cyclic='CYCLIC_RX',
dtype=REG_DTYPE.U16, access=REG_ACCESS.RW
),
2: Register(
identifier='', units='', subnode=2, idx="0x2810", subidx="0x00", cyclic='CYCLIC_RX',
dtype=REG_DTYPE.U16, access=REG_ACCESS.RW
),
3: Register(
identifier='', units='', subnode=3, idx="0x3010", subidx="0x00", cyclic='CYCLIC_RX',
dtype=REG_DTYPE.U16, access=REG_ACCESS.RW
)
}
STORE_ALL_REGISTERS = {
1: Register(
identifier='', units='', subnode=1, idx="0x26DB", subidx="0x00", cyclic='CONFIG',
dtype=REG_DTYPE.U32, access=REG_ACCESS.RW
),
2: Register(
identifier='', units='', subnode=2, idx="0x2EDB", subidx="0x00", cyclic='CONFIG',
dtype=REG_DTYPE.U32, access=REG_ACCESS.RW
),
3: Register(
identifier='', units='', subnode=3, idx="0x36DB", subidx="0x00", cyclic='CONFIG',
dtype=REG_DTYPE.U32, access=REG_ACCESS.RW
)
}
class DriveStatusThread(threading.Thread):
def __init__(self, parent):
""" Constructor, setting initial variables """
super(DriveStatusThread, self).__init__()
self.__parent = parent
self.__stop = False
def run(self):
while not self.__stop:
for subnode in range(1, self.__parent.subnodes):
try:
status_word = self.__parent.raw_read(STATUS_WORD_REGISTERS[subnode], subnode=subnode)
state = self.__parent.status_word_decode(status_word)
self.__parent.set_state(state, subnode=subnode)
except Exception as e:
print('IL: Error getting drive status. Exception: {}'.format(e))
time.sleep(1.5)
def activate_stop_flag(self):
self.__stop = True
class Servo(object):
def __init__(self, net, node, dict, boot_mode=False):
self.__net = net
self.__node = node
self.__dict = DictionaryCANOpen(dict)
self.__info = {}
self.__state = {
1: lib.IL_SERVO_STATE_NRDY,
2: lib.IL_SERVO_STATE_NRDY,
3: lib.IL_SERVO_STATE_NRDY
}
self.__observers = []
self.__lock = threading.RLock()
self.__units_torque = None
self.__units_pos = None
self.__units_vel = None
self.__units_acc = None
self.__name = "Drive"
self.__drive_status_thread = None
if not boot_mode:
self.init_info()
def init_info(self):
serial_number = self.raw_read(SERIAL_NUMBER)
product_code = self.raw_read(PRODUCT_CODE)
sw_version = self.raw_read(SOFTWARE_VERSION)
revision_number = self.raw_read(REVISION_NUMBER)
hw_variant = 'A'
# Set the current state of servo
status_word = self.raw_read(STATUS_WORD_REGISTERS[1])
state = self.status_word_decode(status_word)
self.set_state(state, 1)
self.__info = {
'serial': serial_number,
'name': self.__name,
'sw_version': sw_version,
'hw_variant': hw_variant,
'prod_code': product_code,
'revision': revision_number
}
self.__drive_status_thread = DriveStatusThread(self)
self.__drive_status_thread.start()
def stop_drive_status_thread(self):
if self.__drive_status_thread is not None and self.__drive_status_thread.is_alive():
self.__drive_status_thread.activate_stop_flag()
self.__drive_status_thread.join()
self.__drive_status_thread = None
def emcy_subscribe(self, callback):
pass
def emcy_unsubscribe(self, callback):
pass
def get_reg(self, reg, subnode=1):
if isinstance(reg, Register):
_reg = reg
elif isinstance(reg, str):
_dict = self.__dict
if not _dict:
raise ValueError('No dictionary loaded')
if reg not in _dict.regs[subnode]:
raise TypeError('Invalid register')
_reg = _dict.regs[subnode][reg]
else:
raise TypeError('Invalid register')
return _reg
def raw_read(self, reg, subnode=1):
""" Raw read from servo.
Args:
reg (Register): Register.
Returns:
int: Otained value
Raises:
TypeError: If the register type is not valid.
"""
_reg = self.get_reg(reg, subnode)
access = _reg.access
if access == REG_ACCESS.WO:
raise TypeError('Register is Write-only')
value = None
dtype = _reg.dtype
error_raised = None
try:
self.__lock.acquire()
if dtype == REG_DTYPE.S8:
value = int.from_bytes(
self.__node.sdo.upload(int(str(_reg.idx), 16), int(str(_reg.subidx), 16)),
"little",
signed=True
)
elif dtype == REG_DTYPE.S16:
value = int.from_bytes(
self.__node.sdo.upload(int(str(_reg.idx), 16), int(str(_reg.subidx), 16)),
"little",
signed=True
)
elif dtype == REG_DTYPE.S32:
value = int.from_bytes(
self.__node.sdo.upload(int(str(_reg.idx), 16), int(str(_reg.subidx), 16)),
"little",
signed=True
)
elif dtype == REG_DTYPE.FLOAT:
[value] = struct.unpack('f', self.__node.sdo.upload(int(str(_reg.idx), 16), int(str(_reg.subidx), 16)))
elif dtype == REG_DTYPE.STR:
value = self.__node.sdo.upload(int(str(_reg.idx), 16), int(str(_reg.subidx), 16)).decode("utf-8")
else:
value = int.from_bytes(
self.__node.sdo.upload(int(str(_reg.idx), 16), int(str(_reg.subidx), 16)),
"little"
)
except Exception as e:
print(_reg.identifier + " : " + str(e))
error_raised = Exception("Read error")
finally:
self.__lock.release()
if error_raised is not None:
raise error_raised
return value
def read(self, reg, subnode=1):
""" Read from servo.
Args:
reg (str, Register): Register.
Returns:
float: Otained value
Raises:
TypeError: If the register type is not valid.
"""
return self.raw_read(reg, subnode=subnode)
def change_sdo_timeout(self, value):
self.__node.sdo.RESPONSE_TIMEOUT = value
def write(self, reg, data, confirm=True, extended=0, subnode=1):
return self.raw_write(reg, data, confirm=True, extended=0, subnode=subnode)
def raw_write(self, reg, data, confirm=True, extended=0, subnode=1):
""" Raw write to servo.
Args:
reg (Register): Register.
data (int): Data.
confirm (bool, optional): Confirm write.
extended (int, optional): Extended frame.
Raises:
TypeError: If any of the arguments type is not valid or
unsupported.
"""
_reg = self.get_reg(reg, subnode)
if _reg.access == REG_ACCESS.RO:
raise TypeError('Register is Read-only')
# auto cast floats if register is not float
if _reg.dtype == REG_DTYPE.FLOAT:
data = float(data)
elif _reg.dtype == REG_DTYPE.DOMAIN:
pass
else:
data = int(data)
error_raised = None
try:
self.__lock.acquire()
if _reg.dtype == REG_DTYPE.FLOAT:
self.__node.sdo.download(int(str(_reg.idx), 16), int(str(_reg.subidx), 16),
struct.pack('f', data))
elif _reg.dtype == REG_DTYPE.DOMAIN:
self.__node.sdo.download(int(str(_reg.idx), 16), int(str(_reg.subidx), 16), data)
else:
bytes_length = 2
signed = False
if _reg.dtype == REG_DTYPE.U8:
bytes_length = 1
elif _reg.dtype == REG_DTYPE.S8:
bytes_length = 1
signed = True
elif _reg.dtype == REG_DTYPE.U16:
bytes_length = 2
elif _reg.dtype == REG_DTYPE.S16:
bytes_length = 2
signed = True
elif _reg.dtype == REG_DTYPE.U32:
bytes_length = 4
elif _reg.dtype == REG_DTYPE.S32:
bytes_length = 4
signed = True
self.__node.sdo.download(int(str(_reg.idx), 16), int(str(_reg.subidx), 16),
data.to_bytes(bytes_length, byteorder='little', signed=signed))
except Exception as e:
print(_reg.identifier + " : " + str(e))
error_raised = Exception("Write error")
finally:
self.__lock.release()
if error_raised is not None:
raise error_raised
def get_all_registers(self):
for obj in self.__node.object_dictionary.values():
print('0x%X: %s' % (obj.index, obj.name))
if isinstance(obj, canopen.objectdictionary.Record):
for subobj in obj.values():
print(' %d: %s' % (subobj.subindex, subobj.name))
def dict_storage_read(self, new_path):
"""Read all dictionary registers content and put it to the dictionary
storage."""
with open(self.__dict.dict, 'r') as xml_file:
tree = ET.parse(xml_file)
root = tree.getroot()
axis = tree.findall('*/Device/Axes/Axis')
if axis:
# Multiaxis
registers = root.findall('./Body/Device/Axes/Axis/Registers/Register')
else:
# Single axis
registers = root.findall('./Body/Device/Registers/Register')
for element in registers:
try:
if element.attrib['access'] == 'rw':
subnode = int(element.attrib['subnode'])
storage = self.raw_read(element.attrib['id'], subnode=subnode)
element.set('storage', str(storage))
# Update register object
reg = self.__dict.regs[subnode][element.attrib['id']]
reg.storage = storage
reg.storage_valid = 1
except BaseException as e:
print("Exception during dict_storage_read, register " + element.attrib['id'] + ": ", str(e))
tree.write(new_path)
xml_file.close()
def dict_storage_write(self, path):
"""Write current dictionary storage to the servo drive."""
with open(path, 'r') as xml_file:
tree = ET.parse(xml_file)
root = tree.getroot()
axis = tree.findall('*/Device/Axes/Axis')
if axis:
# Multiaxis
registers = root.findall('./Body/Device/Axes/Axis/Registers/Register')
else:
# Single axis
registers = root.findall('./Body/Device/Registers/Register')
for element in registers:
try:
if 'storage' in element.attrib and element.attrib['access'] == 'rw':
self.raw_write(element.attrib['id'], float(element.attrib['storage']),
subnode=int(element.attrib['subnode'])
)
except BaseException as e:
print("Exception during dict_storage_write, register " + element.attrib['id'] + ": ", str(e))
def store_all(self, subnode=1):
""" Store all servo current parameters to the NVM. """
r = 0
try:
self.raw_write(STORE_ALL_REGISTERS[subnode], 0x65766173, subnode=subnode)
except:
r = -1
return r
def dict_load(self, dict_f):
""" Load dictionary.
Args:
dict_f (str): Dictionary.
"""
try:
self.__dict = DictionaryCANOpen(dict_f)
except Exception as e:
print("Error loading a dictionary")
def state_subscribe(self, cb):
""" Subscribe to state changes.
Args:
cb: Callback
Returns:
int: Assigned slot.
"""
r = len(self.__observers)
self.__observers.append(cb)
return r
def status_word_decode(self, status_word):
if (status_word & IL_MC_PDS_STA_NRTSO_MSK) == IL_MC_PDS_STA_NRTSO:
state = lib.IL_SERVO_STATE_NRDY
elif (status_word & IL_MC_PDS_STA_SOD_MSK) == IL_MC_PDS_STA_SOD:
state = lib.IL_SERVO_STATE_DISABLED
elif (status_word & IL_MC_PDS_STA_RTSO_MSK) == IL_MC_PDS_STA_RTSO:
state = lib.IL_SERVO_STATE_RDY
elif (status_word & IL_MC_PDS_STA_SO_MSK) == IL_MC_PDS_STA_SO:
state = lib.IL_SERVO_STATE_ON
elif (status_word & IL_MC_PDS_STA_OE_MSK) == IL_MC_PDS_STA_OE:
state = lib.IL_SERVO_STATE_ENABLED
elif (status_word & IL_MC_PDS_STA_QSA_MSK) == IL_MC_PDS_STA_QSA:
state = lib.IL_SERVO_STATE_QSTOP
elif (status_word & IL_MC_PDS_STA_FRA_MSK) == IL_MC_PDS_STA_FRA:
state = lib.IL_SERVO_STATE_FAULTR
elif (status_word & IL_MC_PDS_STA_F_MSK) == IL_MC_PDS_STA_F:
state = lib.IL_SERVO_STATE_FAULT
else:
state = lib.IL_SERVO_STATE_NRDY
return SERVO_STATE(state)
def set_state(self, state, subnode):
current_state = self.__state[subnode]
if current_state != state:
self.state[subnode] = state
for callback in self.__observers:
callback(state, None, subnode)
def status_word_wait_change(self, status_word, timeout, subnode=1):
r = 0
start_time = int(round(time.time() * 1000))
actual_status_word = self.raw_read(STATUS_WORD_REGISTERS[subnode], subnode=1)
while actual_status_word == status_word:
current_time = int(round(time.time() * 1000))
time_diff = (current_time - start_time)
if time_diff > timeout:
r = lib.IL_ETIMEDOUT
return r
actual_status_word = self.raw_read(STATUS_WORD_REGISTERS[subnode], subnode=1)
return r
def fault_reset(self, subnode=1):
r = 0
retries = 0
status_word = self.raw_read(STATUS_WORD_REGISTERS[subnode], subnode=subnode)
state = self.status_word_decode(status_word)
self.set_state(state, subnode)
while self.state[subnode].value == lib.IL_SERVO_STATE_FAULT or self.state[subnode].value == lib.IL_SERVO_STATE_FAULTR:
# Check if faulty, if so try to reset (0->1)
if retries == FAULT_RESET_RETRIES:
return lib.IL_ESTATE
status_word = self.raw_read(STATUS_WORD_REGISTERS[subnode], subnode=subnode)
self.raw_write(CONTROL_WORD_REGISTERS[subnode], 0, subnode=subnode)
self.raw_write(CONTROL_WORD_REGISTERS[subnode], IL_MC_CW_FR, subnode=subnode)
# Wait until statusword changes
r = self.status_word_wait_change(status_word, PDS_TIMEOUT, subnode=1)
if r < 0:
return r
retries += 1
return r
def enable(self, timeout=2000, subnode=1):
""" Enable PDS. """
| |
v = max(abs(max_stress), abs(min_stress))
if abs(v) < 1e-5:
v = 1e-5
scaling_factor = abs(1./v)
scaling_factors.append(scaling_factor)
ax3.set_xlim(min_x, max_x)
if plot_over_time:
ax3.set_xlabel('t')
margin = abs(max_value - min_value) * 0.1
ax3.set_ylim(min_value - margin, max_value + margin)
ax3.set_ylabel('Other components')
if len(solution_components) > 5:
ncol = max(1,len(solution_components)/10)
plt.subplots_adjust(right=0.66, top=0.94, bottom=0.18)
ax3.legend(prop={'size': 6, }, ncol=ncol, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., frameon=False)
else:
plt.subplots_adjust(right=0.66, top=0.94, bottom=0.18)
ax3.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., frameon=False)
return top_text,
def animate(i):
global top_text, solution_name, solution_component, last_length, compute_stress
##################
# 2D plot of main solution component
# display data
if show_specified_fields:
for i,(field_variable_name, component_name) in enumerate(specified_fields):
xdata = []
sdata = []
for d in data:
solution_values = py_reader.get_values(d, field_variable_name, component_name)
if solution_values is None:
print("field_variable_name: {}, component_name: {}".format(field_variable_name, component_name))
print("d: {}".format(d))
xdata.append(d['currentTime'])
sdata.append(solution_values[0])
# refresh the line object that is the graph of the curve
lines_2D[i].set_data(xdata,sdata)
else:
# plot over time instead of geometry (for cellml single instance)
if plot_over_time:
xdata = []
sdata = []
for d in data:
solution_values = py_reader.get_values(d, solution_name, solution_component)
xdata.append(d['currentTime'])
sdata.append(solution_values[0])
else:
# plot over geometry
xdata = py_reader.get_values(data[i], "geometry", geometry_component)
sdata = py_reader.get_values(data[i], solution_name, solution_component)
# handle Hermite that have derivative values saved
if data[i]["basisFunction"] == "Hermite" and data[i]["onlyNodalValues"] == False:
def hermite0(xi):
return 1 - 3*xi*xi + 2*xi*xi*xi
def hermite1(xi):
return xi * (xi-1) * (xi-1)
def hermite2(xi):
return xi*xi * (3 - 2*xi)
def hermite3(xi):
return xi*xi * (xi-1)
n_elements = data[i]["nElements"][0]
n = 20
new_xdata = np.zeros(n_elements*n)
new_sdata = np.zeros(n_elements*n)
#print("n entries: {}, new_xdata:{}".format(n_elements*n, new_xdata))
#print("xdata: {}".format(xdata))
for el_no in range(n_elements):
c0 = sdata[2*el_no+0]
c1 = sdata[2*el_no+1]
c2 = sdata[2*el_no+2]
c3 = sdata[2*el_no+3]
#print("parsed coefficients: {} {} {} {}".format(c0,c1,c2,c3))
for j in range(n):
xi = float(j)/n
x = xdata[2*el_no+0]*hermite0(xi) + xdata[2*el_no+1]*hermite1(xi) + xdata[2*el_no+2]*hermite2(xi) + xdata[2*el_no+3]*hermite3(xi)
#print("xi={}, x={}".format(xi,x))
new_xdata[el_no*n+j] = x
new_sdata[el_no*n+j] = c0*hermite0(xi) + c1*hermite1(xi) + c2*hermite2(xi) + c3*hermite3(xi)
#print("xi={}, s={:.2e}={:.2e}*{:.2e}+ {:.2e}*{:.2e}+ {:.2e}*{:.2e}+ {:.2e}*{:.2e}".format(xi,new_sdata[el_no*n+j],c0,hermite0(xi),c1,hermite1(xi),c2,hermite2(xi),c3,hermite3(xi)))
xdata = new_xdata
sdata = new_sdata
elif data[i]["basisFunction"] == "Lagrange" and data[i]["basisOrder"] == 2:
def q0(xi):
return (2*xi - 1) * (xi-1)
def q1(xi):
return 4*(xi - xi*xi)
def q2(xi):
return 2*xi*xi - xi
n_elements = data[i]["nElements"][0]
n = 20
new_xdata = np.zeros(n_elements*n)
new_sdata = np.zeros(n_elements*n)
#print("n entries: {}, new_xdata:{}".format(n_elements*n, new_xdata))
#print("xdata: {}".format(xdata))
for el_no in range(n_elements):
c0 = sdata[2*el_no+0]
c1 = sdata[2*el_no+1]
c2 = sdata[2*el_no+2]
#print("parsed coefficients: {} {} {}".format(c0,c1,c2))
for j in range(n):
xi = float(j)/n
x = xdata[2*el_no+0]*q0(xi) + xdata[2*el_no+1]*q1(xi) + xdata[2*el_no+2]*q2(xi)
#print("xi={}, x={} {}*{}+ {}*{}+ {}*{}".format(xi,x,xdata[2*el_no+0],q0(xi),xdata[2*el_no+1],q1(xi),xdata[2*el_no+2],q2(xi)))
new_xdata[el_no*n+j] = x
new_sdata[el_no*n+j] = c0*q0(xi) + c1*q1(xi) + c2*q2(xi)
xdata = new_xdata
sdata = new_sdata
# refresh the line object that is the graph of the curve
line_2D.set_data(xdata,sdata)
##################
# 3D plot of geometry
if show_geometry:
# retrieve all values
xdata = py_reader.get_values(data[0], "geometry", "x")
ydata = py_reader.get_values(data[0], "geometry", "y")
zdata = py_reader.get_values(data[0], "geometry", "z")
min_s = min(sdata)
max_s = max(sdata)
# plot line segments with corresponding color
for j in range(len(xdata)):
normalized_value = (float)(sdata[j] - min_s) / (max_s - min_s)
lines_3D[j].set_data([xdata[j:j+2], ydata[j:j+2]])
lines_3D[j].set_3d_properties(zdata[j:j+2])
lines_3D[j].set_color(plt.cm.jet(normalized_value))
##################
# 2D plot of other components
if show_components:
for (j,component_name) in enumerate(solution_components):
# do not plot main component
if j == 0:
continue
# plot over time instead of geometry
if plot_over_time:
xdata = []
data_comp = []
if plot_over_time:
for d in data:
solution_values = py_reader.get_values(d, solution_name, component_name)
xdata.append(d['currentTime'])
data_comp.append(solution_values[0])
else:
data_comp = py_reader.get_values(data[i], solution_name, component_name)
# refresh the line object that is the graph of the curve
line_comp[j].set_data(xdata,np.array(data_comp)*scaling_factors[j])
# compute stress
if compute_stress:
xdata = []
data_comp = []
for d in data:
A_1 = py_reader.get_values(d, solution_name, "razumova/A_1")
A_2 = py_reader.get_values(d, solution_name, "razumova/A_2")
stress = ((( (A_1/140)*0.0+ (A_2/140)*0.05) - 0.000107)/0.0021)*0.840625
xdata.append(d['currentTime'])
data_comp.append(stress[0])
# refresh the line object that is the graph of the curve
line_comp[-1].set_data(xdata,np.array(data_comp)*scaling_factors[-1])
# display timestep
if not plot_over_time:
if 'timeStepNo' in data[i]:
timestep = data[i]['timeStepNo']
if 'currentTime' in data[i]:
current_time = data[i]['currentTime']
max_timestep = len(data)-1
t = "timestep {}/{}, t = {}".format(timestep, max_timestep, current_time)
if last_length > len(t):
t += " "*(last_length-len(t))
last_length = len(t)
top_text.set_text(t)
return top_text,
interval = 5000.0 / len(data)
if len(data) == 1 or plot_over_time:
init()
animate(0)
plt.savefig("fig.pdf")
else:
# create animation
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=len(data), interval=interval, blit=False)
try:
anim.save("anim.mp4")
except:
print("An error occured during the animation.")
# create plot with first and last dataset
# plot first dataset
plt.clf()
init()
line_2D, = ax1.plot([], [], '+-', color=(1.0,0.9,0.8), lw=2, label="t=0")
animate(0)
# plot last dataset
i = len(data)-1
if 'currentTime' in data[i]:
current_time = data[i]['currentTime']
line_2D, = ax1.plot([], [], '+-', color=(1.0,0.9,0.8), lw=2, label="t={}".format(current_time))
animate(i)
max_timestep = len(data)-1
if 'timeStepNo' in data[i]:
timestep = data[i]['timeStepNo']
top_text.set_text("timesteps 0 and {}".format(timestep))
if show_geometry:
ax2.set_title("geometry for t={}".format(current_time))
plt.sca(ax1)
#ax.add_line(line0)
#ax.add_line(line1)
plt.legend()
plt.savefig("fig.pdf")
if show_plot:
plt.show()
####################
# 2D
if dimension == 2:
field_variable_names = py_reader.get_field_variable_names(data[0])
solution_name = "solution"
if "solution" not in field_variable_names:
for field_variable_name in field_variable_names:
if field_variable_name != "geometry":
component_names = py_reader.get_component_names(data[0], field_variable_name)
if len(component_names) == 1:
solution_name = field_variable_name
break
component_names = py_reader.get_component_names(data[0], solution_name)
solution_component = component_names[0]
# classical 2D scalar field variables, like in Laplace eq.
if "displacements" not in field_variable_names:
debug = False
min_value, max_value = py_reader.get_min_max(data, solution_name, solution_component)
min_x, max_x = py_reader.get_min_max(data, "geometry", "x")
min_y, max_y = py_reader.get_min_max(data, "geometry", "y")
print( "value range: [{}, {}]".format(min_value, max_value))
# prepare plot
fig = plt.figure(1)
margin = abs(max_value - min_value) * 0.1
ax = fig.add_subplot(111, projection='3d', xlim=(min_x, max_x), ylim=(min_y, max_y), zlim=(min_value-margin, max_value+margin))
# surface = ax.plot_surface([], [], [], cmap=cm.coolwarm, linewidth=1,rstride=1,cstride=1) # needed? error with python3
text = plt.figtext(0.15,0.85,"timestep",size=20)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# create mesh
if data[0]["basisFunction"] == "Lagrange":
n_average_nodes_1D_per_element = data[0]["basisOrder"]
elif data[0]["basisFunction"] == "Hermite":
n_average_nodes_1D_per_element = 1
if data[0]["meshType"] == "StructuredRegularFixed" or data[0]["meshType"] == "RegularFixed" or data[0]["meshType"] == "StructuredDeformable":
if debug:
print( "basisfunction: [{}], basisOrder: [{}]".format(data[0]["basisFunction"], data[0]["basisOrder"]))
nEntries = []
for i in range(dimension):
if "nElements" in data[0]:
nElements = data[0]["nElements"][i]
else:
nElements = data[0]["nElementsLocal"][i]
nEntries.append(n_average_nodes_1D_per_element * nElements + 1)
nEntries = nEntries[::-1] # reverse list
x_positions = py_reader.get_values(data[0], "geometry", "x")
y_positions = py_reader.get_values(data[0], "geometry", "y")
X = np.reshape(x_positions, nEntries)
Y = np.reshape(y_positions, nEntries)
if debug:
print( "nEntries: ", nEntries)
print( "x_positions: ", x_positions)
print( "X: ",X)
print( "y_positions: ", y_positions)
print( "Y: ",Y)
#print "x_positions shape: {}".format(len(x_positions))
elif data[0]["meshType"] == "UnstructuredDeformable":
if not data[0]["onlyNodalValues"]:
print( "Error: onlyNodalValues is False, set to True in OutputWriter config!")
x_positions = py_reader.get_values(data[0], "geometry", "x")
y_positions = py_reader.get_values(data[0], "geometry", "y")
X = x_positions
Y = y_positions
triangles = []
for elemental_dofs in data[0]["elementalDofs"]:
# for linear Lagrange and Hermite
if n_average_nodes_1D_per_element == 1:
triangles.append([elemental_dofs[0], elemental_dofs[1], elemental_dofs[3]])
triangles.append([elemental_dofs[0], elemental_dofs[3], elemental_dofs[2]])
else: # for quadratic Lagrange
triangles.append([elemental_dofs[0], elemental_dofs[1], elemental_dofs[4]])
triangles.append([elemental_dofs[0], elemental_dofs[4], elemental_dofs[3]])
triangles.append([elemental_dofs[4], elemental_dofs[1], elemental_dofs[2]])
triangles.append([elemental_dofs[4], elemental_dofs[2], elemental_dofs[5]])
triangles.append([elemental_dofs[4], elemental_dofs[5], elemental_dofs[6]])
triangles.append([elemental_dofs[4], elemental_dofs[6], elemental_dofs[7]])
triangles.append([elemental_dofs[4], elemental_dofs[6], elemental_dofs[3]])
triangles.append([elemental_dofs[4], elemental_dofs[7], elemental_dofs[6]])
def animate(i):
ax.clear()
# display data
solution_shaped = py_reader.get_values(data[i], solution_name, solution_component)
try:
Z = np.reshape(solution_shaped, nEntries)
except:
Z = solution_shaped
if debug:
try:
print( "x shape: {}, y shape: {}, z shape: {}".format(X.shape, Y.shape, Z.shape))
except:
pass
# for unstructured grid use plot_trisurf
if data[0]["meshType"] == "UnstructuredDeformable":
plot = ax.plot_trisurf(X, Y, triangles, Z, cmap=cm.coolwarm, linewidth=1)
# for structured grids use plot_surface
else:
plot = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, linewidth=1,rstride=1,cstride=1)
ax.set_zlim(min_value-margin, max_value+margin)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# display timestep
if 'timeStepNo' in data[i]:
timestep = data[i]['timeStepNo']
max_timestep = data[-1]['timeStepNo']
if 'currentTime' in data[i]:
current_time = data[i]['currentTime']
if timestep == -1 or timestep == 0 or timestep == 1:
text.set_text("t = {}".format(current_time))
else:
text.set_text("timestep {}/{}, t = {}".format(timestep, max_timestep, | |
c.config and c.config.create_nonexistent_directories
else:
create = (g.app and g.app.config and
g.app.config.create_nonexistent_directories)
if c: theDir = g.os_path_expandExpression(theDir, c=c)
dir1 = theDir = g.os_path_normpath(theDir)
ok = g.os_path_isdir(dir1) and g.os_path_exists(dir1)
if ok:
return ok
if not force and not create:
return False
# Split theDir into all its component parts.
paths = []
while theDir:
head, tail = g.os_path_split(theDir)
if tail:
paths.append(tail)
theDir = head
else:
paths.append(head)
break
path = ""
paths.reverse()
for s in paths:
path = g.os_path_finalize_join(path, s)
if not g.os_path_exists(path):
try:
if testing:
g.trace('***making', path)
else:
os.mkdir(path)
if verbose and not testing and not g.app.unitTesting:
g.red("created directory:", path)
except Exception:
if verbose: g.error("exception creating directory:", path)
g.es_exception()
return None
return dir1 # All have been created.
#@+node:ekr.20071114113736: *3* g.makePathRelativeTo
def makePathRelativeTo(fullPath, basePath):
if fullPath.startswith(basePath):
s = fullPath[len(basePath):]
if s.startswith(os.path.sep):
s = s[len(os.path.sep):]
return s
return fullPath
#@+node:ekr.20090520055433.5945: *3* g.openWithFileName
def openWithFileName(fileName, old_c=None, gui=None):
"""Create a Leo Frame for the indicated fileName if the file exists.
returns the commander of the newly-opened outline.
"""
return g.app.loadManager.loadLocalFile(fileName, gui, old_c)
#@+node:ekr.20150306035851.7: *3* g.readFileIntoEncodedString
def readFileIntoEncodedString(fn, silent=False):
'''Return the raw contents of the file whose full path is fn.'''
try:
with open(fn, 'rb') as f:
return f.read()
except IOError:
if not silent:
g.error('can not open', fn)
except Exception:
if not silent:
g.error('readFileIntoEncodedString: exception reading %s' % (fn))
g.es_exception()
return None
#@+node:ekr.20100125073206.8710: *3* g.readFileIntoString
def readFileIntoString(fileName,
encoding='utf-8', # BOM may override this.
kind=None, # @file, @edit, ...
verbose=True,
):
'''Return the contents of the file whose full path is fileName.
Return (s,e)
s is the string, converted to unicode, or None if there was an error.
e is the encoding of s, computed in the following order:
- The BOM encoding if the file starts with a BOM mark.
- The encoding given in the # -*- coding: utf-8 -*- line for python files.
- The encoding given by the 'encoding' keyword arg.
- None, which typically means 'utf-8'.
'''
if not fileName:
if verbose: g.trace('no fileName arg given')
return None, None
if g.os_path_isdir(fileName):
if verbose: g.trace('not a file:', fileName)
return None, None
if not g.os_path_exists(fileName):
if verbose: g.error('file not found:', fileName)
return None, None
try:
e = None
with open(fileName, 'rb') as f:
s = f.read()
# Fix #391.
if not s:
return '', None
# New in Leo 4.11: check for unicode BOM first.
e, s = g.stripBOM(s)
if not e:
# Python's encoding comments override everything else.
junk, ext = g.os_path_splitext(fileName)
if ext == '.py':
e = g.getPythonEncodingFromString(s)
s = g.toUnicode(s, encoding=e or encoding)
return s, e
except IOError:
# Translate 'can not open' and kind, but not fileName.
if verbose:
g.error('can not open', '', (kind or ''), fileName)
except Exception:
g.error('readFileIntoString: unexpected exception reading %s' % (fileName))
g.es_exception()
return None, None
#@+node:ekr.20160504062833.1: *3* g.readFileToUnicodeString
def readFileIntoUnicodeString(fn, encoding=None, silent=False):
'''Return the raw contents of the file whose full path is fn.'''
try:
with open(fn, 'rb') as f:
s = f.read()
return g.toUnicode(s, encoding=encoding)
except IOError:
if not silent:
g.error('can not open', fn)
except Exception:
g.error('readFileIntoUnicodeString: unexpected exception reading %s' % (fn))
g.es_exception()
return None
#@+node:ekr.20031218072017.3120: *3* g.readlineForceUnixNewline
#@+at <NAME> 9/7/2002
#
# The Unix readline() routine delivers "\r\n" line end strings verbatim,
# while the windows versions force the string to use the Unix convention
# of using only "\n". This routine causes the Unix readline to do the
# same.
#@@c
def readlineForceUnixNewline(f, fileName=None):
try:
s = f.readline()
except UnicodeDecodeError:
g.trace('UnicodeDecodeError: %s' % (fileName), f, g.callers())
s = ''
if len(s) >= 2 and s[-2] == "\r" and s[-1] == "\n":
s = s[0: -2] + "\n"
return s
#@+node:ekr.20031218072017.3124: *3* g.sanitize_filename
def sanitize_filename(s):
"""
Prepares string s to be a valid file name:
- substitute '_' for whitespace and special path characters.
- eliminate all other non-alphabetic characters.
- convert double quotes to single quotes.
- strip leading and trailing whitespace.
- return at most 128 characters.
"""
result = []
for ch in s:
if ch in string.ascii_letters:
result.append(ch)
elif ch == '\t':
result.append(' ')
elif ch == '"':
result.append("'")
elif ch in '\\/:|<>*:._':
result.append('_')
s = ''.join(result).strip()
while len(s) > 1:
n = len(s)
s = s.replace('__', '_')
if len(s) == n:
break
return s[: 128]
#@+node:ekr.20060328150113: *3* g.setGlobalOpenDir
def setGlobalOpenDir(fileName):
if fileName:
g.app.globalOpenDir = g.os_path_dirname(fileName)
# g.es('current directory:',g.app.globalOpenDir)
#@+node:ekr.20031218072017.3125: *3* g.shortFileName & shortFilename
def shortFileName(fileName, n=None):
'''Return the base name of a path.'''
if n is not None:
g.trace('"n" keyword argument is no longer used')
return g.os_path_basename(fileName) if fileName else ''
shortFilename = shortFileName
#@+node:ekr.20150610125813.1: *3* g.splitLongFileName
def splitLongFileName(fn, limit=40):
'''Return fn, split into lines at slash characters.'''
aList = fn.replace('\\', '/').split('/')
n, result = 0, []
for i, s in enumerate(aList):
n += len(s)
result.append(s)
if i + 1 < len(aList):
result.append('/')
n += 1
if n > limit:
result.append('\n')
n = 0
return ''.join(result)
#@+node:ekr.20050104135720: *3* g.Used by tangle code & leoFileCommands
#@+node:ekr.20031218072017.1241: *4* g.update_file_if_changed
# This is part of the tangle code.
def update_file_if_changed(c, file_name, temp_name):
"""Compares two files.
If they are different, we replace file_name with temp_name.
Otherwise, we just delete temp_name. Both files should be closed."""
if g.os_path_exists(file_name):
if filecmp.cmp(temp_name, file_name):
kind = 'unchanged'
ok = g.utils_remove(temp_name)
else:
kind = '***updating'
mode = g.utils_stat(file_name)
ok = g.utils_rename(c, temp_name, file_name, mode)
else:
kind = 'creating'
# 2010/02/04: g.utils_rename no longer calls
# makeAllNonExistentDirectories
head, tail = g.os_path_split(file_name)
ok = True
if head:
ok = g.makeAllNonExistentDirectories(head, c=c)
if ok:
ok = g.utils_rename(c, temp_name, file_name)
if ok:
g.es('', '%12s: %s' % (kind, file_name))
else:
g.error("rename failed: no file created!")
g.es('', file_name, " may be read-only or in use")
#@+node:ekr.20050104123726.3: *4* g.utils_remove
def utils_remove(fileName, verbose=True):
try:
os.remove(fileName)
return True
except Exception:
if verbose:
g.es("exception removing:", fileName)
g.es_exception()
return False
#@+node:ekr.20031218072017.1263: *4* g.utils_rename
def utils_rename(c, src, dst, verbose=True):
'''Platform independent rename.'''
# Don't call g.makeAllNonExistentDirectories.
# It's not right to do this here!!
# head, tail = g.os_path_split(dst)
# if head: g.makeAllNonExistentDirectories(head,c=c)
try:
shutil.move(src, dst)
return True
except Exception:
if verbose:
g.error('exception renaming', src, 'to', dst)
g.es_exception(full=False)
return False
#@+node:ekr.20050104124903: *4* g.utils_chmod
def utils_chmod(fileName, mode, verbose=True):
if mode is None:
return
try:
os.chmod(fileName, mode)
except Exception:
if verbose:
g.es("exception in os.chmod", fileName)
g.es_exception()
#@+node:ekr.20050104123726.4: *4* g.utils_stat
def utils_stat(fileName):
'''Return the access mode of named file, removing any setuid, setgid, and sticky bits.'''
try:
mode = (os.stat(fileName))[0] & (7 * 8 * 8 + 7 * 8 + 7) # 0777
except Exception:
mode = None
return mode
#@+node:ekr.20190114061452.26: *3* g.writeFile
def writeFile(contents, encoding, fileName):
'''Create a file with the given contents.'''
try:
if g.isUnicode(contents):
contents = g.toEncodedString(contents, encoding=encoding)
# 'wb' preserves line endings.
with open(fileName, 'wb') as f:
f.write(contents)
return True
except Exception:
g.es_exception()
return False
#@+node:ekr.20031218072017.3151: ** g.Finding & Scanning
#@+node:ekr.20140602083643.17659: *3* g.find_word
def find_word(s, word, i=0):
'''
Return the index of the first occurance of word in s, or -1 if not found.
g.find_word is *not* the same as s.find(i,word);
g.find_word ensures that only word-matches are reported.
'''
while i < len(s):
progress = i
i = s.find(word, i)
if i == -1:
return -1
# Make sure we are at the start of a word.
if i > 0:
ch = s[i - 1]
if ch == '_' or ch.isalnum():
i += len(word)
continue
if g.match_word(s, i, word):
return i
i += len(word)
assert progress < i
return -1
#@+node:ekr.20170220103251.1: *3* g.findRootWithPredicate
def findRootsWithPredicate(c, root, predicate=None):
'''
Commands often want to find one or more **roots**, given a position p.
A root is the position of any node matching a predicate.
This function formalizes the search order used by the pylint, pyflakes and
the rst3 commands, returning a list of zero or more found roots.
'''
seen = []
roots = []
if predicate is None:
# A useful default predicate for python.
# pylint: disable=function-redefined
def predicate(p):
return p.isAnyAtFileNode() and p.h.strip().endswith('.py')
# 1. Search p's tree.
for p in root.self_and_subtree(copy=False):
if predicate(p) and p.v not in seen:
seen.append(p.v)
roots.append(p.copy())
if roots:
return roots
# 2. Look up the tree.
for p in root.parents():
if predicate(p):
return [p.copy()]
# 3. Expand the search if root is a clone.
clones = []
| |
product_mapping=[];
product_mapping = self.reactionMapping['products_metaboliteMappings'][product_cnt].convert_stringMapping2ArrayMapping();
# check that the product positions == product elements
if len(self.reactionMapping['products_positions_tracked'][product_cnt])!=len(self.reactionMapping['products_elements_tracked'][product_cnt]):
print('inconsistent products_positions and products_elements');
products_elements_positions_check = False;
# check that the product positions == product mapping
if len(self.reactionMapping['products_positions_tracked'][product_cnt])!=len(product_mapping):
print('inconsistent products_positions and products_mapping');
products_elements_mapping_check = False;
# check that the product elements == product mapping
if len(self.reactionMapping['products_elements_tracked'][product_cnt])!=len(product_mapping):
print('inconsistent products_elements and products_mapping');
products_positions_mapping_check = False;
products_positions_tracked_cnt += len(self.reactionMapping['products_positions_tracked'][product_cnt]);
products_elements_tracked_cnt += len(self.reactionMapping['products_elements_tracked'][product_cnt]);
products_mappings_cnt += len(product_mapping);
products_mappings.append(product_mapping);
#check elemental balance
if reactants_positions_tracked_cnt != products_positions_tracked_cnt:
print('the length of reactants_positions_tracked does not match the length of products_positions_tracked');
element_balance_check = False;
if reactants_elements_tracked_cnt != products_elements_tracked_cnt:
print('reactants_elements_tracked does not match the length of products_elements_tracked');
element_balance_check = False;
if reactants_mappings_cnt != products_mappings_cnt:
print('the length of reactants_mapping does not match the length of products_mapping');
element_balance_check = False;
#check 1-to-1 mapping
reactants_mappings_list = [];
for reactants_mapping in reactants_mappings:
reactants_mappings_list.extend(reactants_mapping);
# check for duplicate reactant mappings
reactants_mappings_unique = list(set(reactants_mappings_list));
if len(reactants_mappings_list)!=len(reactants_mappings_unique):
print('duplicate reactants_mappings found');
mapping_check = False;
products_mappings_list = [];
for products_mapping in products_mappings:
products_mappings_list.extend(products_mapping);
# check for duplicate product mappings
products_mappings_unique = list(set(products_mappings_list));
if len(products_mappings_list)!=len(products_mappings_unique):
print('duplicate products_mappings found');
mapping_check = False;
# check that each product mapping has a matching reactant mapping, and vice versa
for reactant_cnt,reactant in enumerate(reactants_mappings):
print('checking reactant mapping ' + self.reactionMapping['reactants_ids_tracked'][reactant_cnt]);
for mapping_cnt,mapping in enumerate(reactant):
if not mapping in products_mappings_list:
print('no mapping found for reactant mapping ' + mapping + ' and position ' + str(mapping_cnt));
mapping_check = False;
for product_cnt,product in enumerate(products_mappings):
print('checking product mapping ' + self.reactionMapping['products_ids_tracked'][product_cnt]);
for mapping_cnt,mapping in enumerate(product):
if not mapping in reactants_mappings_list:
print('no mapping found for product mapping ' + mapping + ' and position ' + str(mapping_cnt));
mapping_check = False;
if not element_balance_check or not mapping_check:
print('check reaction mapping');
return reactants_ids_stoichiometry_check,reactants_elements_positions_check,reactants_elements_mapping_check,reactants_positions_mapping_check,\
products_ids_stoichiometry_check,products_elements_positions_check,products_elements_mapping_check,products_positions_mapping_check,\
element_balance_check,mapping_check;
def clear_elementsAndPositions(self):
'''Clear the reactants/products elements/positions'''
self.reactionMapping['reactants_elements_tracked']=None;
self.reactionMapping['reactants_positions_tracked']=None;
self.reactionMapping['products_elements_tracked']=None;
self.reactionMapping['products_positions_tracked']=None;
class stage02_isotopomer_mappingUtilities():
def __init__(self):
self.stage02_isotopomer_query = stage02_isotopomer_query();
def make_missingMetaboliteMappings(self,experiment_id_I,model_id_I=[],mapping_id_rxns_I=[],mapping_id_mets_I=[],mapping_id_new_I=None):
'''Make atom mapping metabolites from atom mapping reactions, QC atom mapping reactions;
and create a new set of metabolite mappings that correspond to the current reaction mappings that need to be QC/QA'd'''
#Input:
# experiment_id_I = experiment_id
# model_id_I = model_id
# mapping_id_rxns_I = reaction mapping id (#default atomMappingMetabolite mapping id to add new metabolites to)
# mapping_id_mets_I = existing metabolite mappings to use when making the new metabolite mappings
# mapping_id_new_I = name of mapping id for the new metabolite mappings
#Output:
# default: new metabolite mappings will be added for the mapping id of the reactions
# existing metabolite mappings will not be added
# mapping_id_new_I != None: new metabolite mappings will be added for the mapping id specified
#get model ids:
if model_id_I:
model_ids = model_id_I;
else:
model_ids = [];
model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I);
for model_id in model_ids:
#get mapping ids
if mapping_id_rxns_I and mapping_id_mets_I:
mapping_ids_rxns=mapping_id_rxns_I;
mapping_ids_mets=mapping_id_mets_I;
elif mapping_id_rxns_I:
mapping_ids_rxns=mapping_id_rxns_I;
else:
mapping_ids_rxns=[];
mapping_ids_rxns=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id);
for mapping_cnt,mapping_id_rxns in enumerate(mapping_ids_rxns):
# get the metabolite mappings
if mapping_id_rxns_I and mapping_id_mets_I:
mappings=self.stage02_isotopomer_query.get_atomMappingMetabolites_mappingID_dataStage02IsotopomerAtomMappingReactionsAndAtomMappingMetabolites(mapping_id_rxns,mapping_ids_mets[mapping_cnt]);
else:
mappings = self.stage02_isotopomer_query.get_atomMappingMetabolites_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_rxns);
# remove duplicates
duplicate_ind = [];
for d1_cnt,d1 in enumerate(mappings):
for d2_cnt in range(d1_cnt+1,len(mappings)):
if d1['mapping_id'] == mappings[d2_cnt]['mapping_id'] and \
d1['met_id'] == mappings[d2_cnt]['met_id'] and \
d1['met_elements'] == mappings[d2_cnt]['met_elements'] and \
d1['met_atompositions'] == mappings[d2_cnt]['met_atompositions'] and \
d1['met_symmetry_elements'] == mappings[d2_cnt]['met_symmetry_elements'] and \
d1['met_symmetry_atompositions'] == mappings[d2_cnt]['met_symmetry_atompositions']:
duplicate_ind.append(d2_cnt);
duplicate_ind_unique=list(set(duplicate_ind));
# copy out unique metabolites
data_O = [];
for d1_cnt,d1 in enumerate(mappings):
if d1_cnt in duplicate_ind_unique:
continue;
else:
if mapping_id_new_I: d1['mapping_id']=mapping_id_new_I; # change to the new mapping
data_O.append(d1);
met_ids = [x['met_id'] for x in data_O];
met_ids_unique = list(set(met_ids));
data_mets_cnt = {};
for met in met_ids_unique:
data_mets_cnt[met] = 0;
for d in data_O:
data_mets_cnt[d['met_id']] += 1;
# add data to the database
if mapping_id_new_I:
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingMetabolites(data_O);
else:
data_add_O = [];
for d in data_O:
# check to see if the metabolite is already in the database
mapping_row = {};
mapping_row = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_rxns,d['met_id']);
if not mapping_row: data_add_O.append(d);
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingMetabolites(data_add_O);
def make_missingReactionMappings(self,experiment_id_I,model_id_I=[],mapping_id_rxns_I=[],mapping_id_mets_I=[],mapping_id_new_I=None):
'''Update missing or incomplete reaction mappings for the current mapping from the matching metabolite mappings,
and optionally, from the previous reaction mappings'''
#Note: prior to running, remove all reaction mappings that are not used.
imm = stage02_isotopomer_metaboliteMapping();
data_O = [];
#get model ids:
if model_id_I:
model_ids = model_id_I;
else:
model_ids = [];
model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I);
for model_id in model_ids:
#get all reactions in the model:
reactions = [];
reactions = self.stage02_isotopomer_query.get_rows_modelID_dataStage02IsotopomerModelReactions(model_id);
#get mapping ids
if mapping_id_rxns_I and mapping_id_mets_I:
mapping_ids_rxns=mapping_id_rxns_I;
mapping_ids_mets=mapping_id_mets_I;
elif mapping_id_rxns_I:
mapping_ids_rxns=mapping_id_rxns_I;
else:
mapping_rxns=[];
mapping_rxns=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id);
for mapping_cnt,mapping_id_rxns in enumerate(mapping_ids_rxns):
missing_reactions_O = [];
missing_metabolites_O = [];
for reaction_cnt,reaction in enumerate(reactions):
#get the current reaction mappings
mapping_rxns = [];
mapping_rxns = self.stage02_isotopomer_query.get_row_mappingIDAndRxnID_dataStage02IsotopomerAtomMappingReactions(mapping_id_rxns,reaction['rxn_id']);
#if mapping_rxns: # atom mapping for the reaction already exists and is used
# continue;
if mapping_id_new_I:
mapping_id_current = mapping_id_new_I;
else:
mapping_id_current = mapping_id_rxns;
data_tmp={'mapping_id':mapping_id_current,
'rxn_id':reaction['rxn_id'],
'rxn_description':None,
'reactants_stoichiometry_tracked':[],
'products_stoichiometry_tracked':[],
'reactants_ids_tracked':[],
'products_ids_tracked':[],
'reactants_mapping':[],
'products_mapping':[],
'rxn_equation':reaction['equation'],
'products_elements_tracked':[],
'products_positions_tracked':[],
'reactants_elements_tracked':[],
'reactants_positions_tracked':[],
'used_':True,
'comment_':''};
#check if the reactants or products are tracked
tracked_reactants = [];
for reactant in reaction['reactants_ids']:
tracked_reactant = {};
if mapping_id_mets_I:
tracked_reactant = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_ids_mets[mapping_cnt],reactant);
else:
tracked_reactant = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_rxns,reactant);
if tracked_reactant:
tracked_reactants.append(tracked_reactant);
tracked_products = [];
for product in reaction['products_ids']:
tracked_product = {};
if mapping_id_mets_I:
tracked_product = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_ids_mets[mapping_cnt],product);
else:
tracked_product = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_rxns,product);
if tracked_product:
tracked_products.append(tracked_product);
if tracked_reactants or tracked_products:
#check if the reaction is missing or is missing a tracked metabolite
tracked_reaction = {};
tracked_reaction = self.stage02_isotopomer_query.get_row_mappingIDAndRxnID_dataStage02IsotopomerAtomMappingReactions(mapping_id_rxns,reaction['rxn_id']);
if tracked_reaction:
missing_reactants = [];
# get the stoichiometry for each reactant
tracked_reaction_reactant_ids_stoich = {};
for tracked_reactant_id_cnt,tracked_reactant_id in enumerate(tracked_reaction['reactants_ids_tracked']):
tracked_reaction_reactant_ids_stoich[tracked_reactant_id] = 0;
for tracked_reactant_id_cnt,tracked_reactant_id in enumerate(tracked_reaction['reactants_ids_tracked']):
tracked_reaction_reactant_ids_stoich[tracked_reactant_id] += abs(tracked_reaction['reactants_stoichiometry_tracked'][tracked_reactant_id_cnt]);
#copy existing data
data_tmp['reactants_ids_tracked'].extend(tracked_reaction['reactants_ids_tracked']);
data_tmp['reactants_stoichiometry_tracked'].extend(tracked_reaction['reactants_stoichiometry_tracked']);
data_tmp['reactants_mapping'].extend(tracked_reaction['reactants_mapping']);
data_tmp['reactants_elements_tracked'].extend(tracked_reaction['reactants_elements_tracked']);
data_tmp['reactants_positions_tracked'].extend(tracked_reaction['reactants_positions_tracked']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
for tracked_reactant in tracked_reactants:
if tracked_reactant['met_id'] in tracked_reaction['reactants_ids_tracked']:
# check for matching stoichiometry
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['reactants_ids']):
if met_id == tracked_reactant['met_id']:
reaction_stoich = abs(reaction['reactants_stoichiometry'][met_id_cnt]);
break;
unbalanced_stoich = reaction_stoich - tracked_reaction_reactant_ids_stoich[tracked_reactant['met_id']];
if tracked_reaction_reactant_ids_stoich[tracked_reactant['met_id']] != reaction_stoich:
for stoich_cnt in range(int(unbalanced_stoich)):
missing_reactants.append(tracked_reactant);
#add missing data
data_tmp['reactants_ids_tracked'].append(tracked_reactant['met_id']);
data_tmp['reactants_stoichiometry_tracked'].append(0);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_reactant['met_id']:tracked_reactant['met_elements'][0]},stoich_cnt)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['reactants_mapping'].append(new_mapping);
#data_tmp['reactants_mapping'].append('');
data_tmp['reactants_elements_tracked'].append(tracked_reactant['met_elements']);
data_tmp['reactants_positions_tracked'].append(tracked_reactant['met_atompositions']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
data_tmp['used_']=False;
data_tmp['comment_']+=tracked_reactant['met_id']+',';
else:
missing_reactants.append(tracked_reactant);
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['reactants_ids']):
if met_id == tracked_reactant['met_id']:
reaction_stoich = reaction['reactants_stoichiometry'][met_id_cnt];
break;
#add missing data
data_tmp['reactants_ids_tracked'].append(tracked_reactant['met_id']);
data_tmp['reactants_stoichiometry_tracked'].append(reaction_stoich);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_reactant['met_id']:tracked_reactant['met_elements'][0]},0)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['reactants_mapping'].append(new_mapping);
#data_tmp['reactants_mapping'].append('');
data_tmp['reactants_elements_tracked'].append(tracked_reactant['met_elements']);
data_tmp['reactants_positions_tracked'].append(tracked_reactant['met_atompositions']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
data_tmp['used_']=False;
data_tmp['comment_']+=tracked_reactant['met_id']+',';
missing_products = [];
# get the stoichiometry for each product
tracked_reaction_product_ids_stoich = {};
for tracked_product_id_cnt,tracked_product_id in enumerate(tracked_reaction['products_ids_tracked']):
tracked_reaction_product_ids_stoich[tracked_product_id] = 0;
for tracked_product_id_cnt,tracked_product_id in enumerate(tracked_reaction['products_ids_tracked']):
tracked_reaction_product_ids_stoich[tracked_product_id] += abs(tracked_reaction['products_stoichiometry_tracked'][tracked_product_id_cnt]);
#copy existing data
data_tmp['products_ids_tracked'].extend(tracked_reaction['products_ids_tracked']);
data_tmp['products_stoichiometry_tracked'].extend(tracked_reaction['products_stoichiometry_tracked']);
data_tmp['products_mapping'].extend(tracked_reaction['products_mapping']);
data_tmp['products_elements_tracked'].extend(tracked_reaction['products_elements_tracked']);
data_tmp['products_positions_tracked'].extend(tracked_reaction['products_positions_tracked']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
for tracked_product in tracked_products:
if tracked_product['met_id'] in tracked_reaction['products_ids_tracked']:
# check for matching stoichiometry
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['products_ids']):
if met_id == tracked_product['met_id']:
reaction_stoich = abs(reaction['products_stoichiometry'][met_id_cnt]);
break;
unbalanced_stoich = reaction_stoich - tracked_reaction_product_ids_stoich[tracked_product['met_id']];
if tracked_reaction_product_ids_stoich[tracked_product['met_id']] != reaction_stoich:
for stoich_cnt in range(int(unbalanced_stoich)):
missing_products.append(tracked_product);
#add missing data
data_tmp['products_ids_tracked'].append(tracked_product['met_id']);
data_tmp['products_stoichiometry_tracked'].append(0);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_product['met_id']:tracked_product['met_elements'][0]},stoich_cnt)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['products_mapping'].append(new_mapping);
#data_tmp['products_mapping'].append('');
data_tmp['products_elements_tracked'].append(tracked_product['met_elements']);
data_tmp['products_positions_tracked'].append(tracked_product['met_atompositions']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
data_tmp['used_']=False;
data_tmp['comment_']+=tracked_product['met_id']+',';
else:
missing_products.append(tracked_product);
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['products_ids']):
if met_id == tracked_product['met_id']:
reaction_stoich = abs(reaction['products_stoichiometry'][met_id_cnt]);
break;
#add missing data
data_tmp['products_ids_tracked'].append(tracked_product['met_id']);
data_tmp['products_stoichiometry_tracked'].append(reaction_stoich);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_product['met_id']:tracked_product['met_elements'][0]},0)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['products_mapping'].append(new_mapping);
#data_tmp['products_mapping'].append('');
data_tmp['products_elements_tracked'].append(tracked_product['met_elements']);
data_tmp['products_positions_tracked'].append(tracked_product['met_atompositions']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
data_tmp['used_']=False;
data_tmp['comment_']+=tracked_product['met_id']+',';
if missing_reactants or missing_products:
tmp = {};
tmp = tracked_reaction;
tmp.update({'missing_reactants':missing_reactants});
tmp.update({'missing_products':missing_products});
tmp.update({'equation':reaction['equation']})
missing_metabolites_O.append(tmp);
else:
tmp = {};
tmp = reaction;
tmp.update({'tracked_reactants':tracked_reactants});
tmp.update({'tracked_products':tracked_products});
missing_reactions_O.append(reaction);
for tracked_reactant in tracked_reactants:
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['reactants_ids']):
if met_id == tracked_reactant['met_id']:
reaction_stoich = reaction['reactants_stoichiometry'][met_id_cnt];
break;
#add missing data
data_tmp['reactants_ids_tracked'].append(tracked_reactant['met_id']);
data_tmp['reactants_stoichiometry_tracked'].append(reaction_stoich);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_reactant['met_id']:tracked_reactant['met_elements'][0]},0)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['reactants_mapping'].append(new_mapping);
#data_tmp['reactants_mapping'].append('');
data_tmp['reactants_elements_tracked'].append(tracked_reactant['met_elements']);
data_tmp['reactants_positions_tracked'].append(tracked_reactant['met_atompositions']);
data_tmp['rxn_description']=None;
data_tmp['used_']=False;
data_tmp['comment_']=reaction['rxn_id'];
for tracked_product in tracked_products:
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['products_ids']):
if met_id == tracked_product['met_id']:
reaction_stoich = abs(reaction['products_stoichiometry'][met_id_cnt]);
break;
#add missing data
data_tmp['products_ids_tracked'].append(tracked_product['met_id']);
data_tmp['products_stoichiometry_tracked'].append(reaction_stoich);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_product['met_id']:tracked_product['met_elements'][0]},0)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['products_mapping'].append(new_mapping);
#data_tmp['products_mapping'].append('');
data_tmp['products_elements_tracked'].append(tracked_product['met_elements']);
data_tmp['products_positions_tracked'].append(tracked_product['met_atompositions']);
data_tmp['rxn_description']=None;
data_tmp['used_']=False;
data_tmp['comment_']=reaction['rxn_id'];
data_O.append(data_tmp);
#self.print_missingReactionMappings(missing_reactions_O,missing_metabolites_O);
return missing_reactions_O,missing_metabolites_O;
#add data to the database:
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingReactions(data_O);
def print_missingReactionMappings(self,missing_reactions_I,missing_metabolites_I):
'''print missing reaction mappings to the screen'''
#missing reactions
script = '';
for missing_reaction in missing_reactions_I:
script+= missing_reaction['rxn_id']+'\t'+missing_reaction['equation']+'\t'+str(missing_reaction['reactants_ids'])+'\t'+str(missing_reaction['products_ids'])+'\t';
for tracked_reactant in missing_reaction['tracked_reactants']:
script+= tracked_reactant['met_id']+',';
script+= '\t'
for tracked_product in missing_reaction['tracked_products']:
script+= tracked_product['met_id']+',';
script+='\n'
print(script)
#missing metabolites
script = '';
for missing_metabolite in missing_metabolites_I:
script+= missing_metabolite['rxn_id']+'\t'+missing_metabolite['equation']+'\t'+str(missing_metabolite['reactants_ids_tracked'])+'\t'+str(missing_metabolite['products_ids_tracked'])+'\t';
for tracked_reactant in missing_metabolite['missing_reactants']:
script+= tracked_reactant['met_id']+',';
script+= '\t'
for tracked_product in missing_metabolite['missing_products']:
script+= tracked_product['met_id']+',';
script+='\n'
print(script)
def find_inconsistentMetaboliteMappings(self,experiment_id_I,model_id_I=[],mapping_id_I=[]):
'''Find inconsistencies in the atom mapping by comparing the metabolite information | |
import re
import os
import math
import google
import requests
import pickle
from hashlib import sha512
from dateutil.parser import parse
from entities.acurerate_attributes import P, C, T
from urllib.parse import urlparse
from string import ascii_letters
# Go to Settings - get package google.cloud.translate, google.cloud.core
#from google.cloud import translate
class AcureRateUtils(object):
def __init__(self):
pass
@staticmethod
# def import_non_local(name, custom_name=None):
# import importlib.util, sys
#
# #spec = importlib.util.spec_from_file_location("module.name", "/path/to/file.py")
# spec = importlib.util.spec_from_file_location("google", "C:\Python353\Lib\site-packages\google-1.9.3.dist-info\file.py")
# foo = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(foo)
# #foo.MyClass()
#
# return foo
@staticmethod
def translate(str, source_language, target_language):
# -----------------------------------------------------
# Language codes can be found here: https://cloud.google.com/translate/docs/languages
# -----------------------------------------------------
# TODO: we shouldn't be doing this here (on every call..!) Move somewhere else... :-)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = './Satori-5834c119ed73.json'
# Instantiates a client
translate_client = translate.Client()
# The text to translate
text = u'Hello, world!'
# The target language
target = 'ru'
# Translates some text into Russian
translation = translate_client.translate(text, target_language=target)
translation = translate_client.translate(u'דורון הרצליך', target_language='en')
print(u'Text: {}'.format(text))
print(u'Translation: {}'.format(translation['translatedText']))
translated_name = name
return translated_name
# [END translate_quickstart]
@staticmethod
def object_hash(_obj):
h = pickle.dumps(_obj)
s = hash(h)
#s = sha512(h).hexdigest()
return s
@staticmethod
def dict_compare(d1, d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
same = set(o for o in intersect_keys if d1[o] == d2[o])
return added, removed, modified, same
@staticmethod
def is_alphanumeric(str):
valid = re.match('^[\w-]+$', str) is not None
return valid
@staticmethod
def valid_name(str):
return all(c in ascii_letters + '-. ' for c in str)
@staticmethod
def aliasize(the_string):
if the_string is None:
return None
alias = the_string.lower()
# Remove incorporation suffixes ("Ltd", "Inc", etc.)
alias = AcureRateUtils.remove_incorporation_suffix(alias)
# Check if we have "strA/strB" or "strA,strB" or "strA(...)"
alias = AcureRateUtils.get_until_char(alias, '/')
alias = AcureRateUtils.get_until_char(alias, ',')
alias = AcureRateUtils.get_until_char(alias, '(')
# Remove dots, hyphens, dbl-spaces, etc.
alias = alias.replace(" ", " ")
alias = alias.replace(".", " ")
alias = alias.replace("-", " ")
# Strip leading/trailing spaces
return alias.strip()
@staticmethod
def tokenize_full_name(full_name):
tokens = full_name.split(" ")
if len(tokens) == 1:
return (tokens[0], None, None)
elif len(tokens) == 2:
return (tokens[0], None, tokens[1])
else:
return (tokens[0], tokens[1], tokens[2])
@staticmethod
def get_until_char(the_string, char):
try:
s = the_string.index(char)
return the_string[:s]
except Exception as e:
return the_string
@staticmethod
def remove_parenthesized_content(the_string, to_the_right=False):
# to_the_right=False: "Ploni (mr) Almoni" --> "<NAME>" (removed dbl-space in the middle!)
# to_the_right=False: "Ploni (mr)" --> "Ploni" (removed space at the end!)
# to_the_right=True: "Ploni (mr) Almoni" --> "Ploni" (removed also the space at the end!)
# if no closing parenthesis, original string is returned
# if multiple parenthesis, only one is removed... "ploni (a) bb (b) almoni" --> "ploni bb (b) almoni"
if the_string is None:
return None
try:
s = the_string.index("(")
if to_the_right:
cleaned_str = the_string[:s]
else:
e = the_string.index(")")
cleaned_str = the_string[:s] + the_string[e+1:]
except Exception as e:
cleaned_str = the_string
return cleaned_str.replace(" ", " ").strip()
@staticmethod
# x = {"age": 2, "color": "red"}
# y = {"color": "yellow", "shoe size": 42}
# z = {"age": 2, "color": "yellow", "shoe size": 42}
def merge_two_dicts(x, y):
'''Given two dicts, merge them into a new dict as a shallow copy.'''
z = x.copy()
z.update(y)
return z
@staticmethod
# Given a map: { "attr-name-in-source": "attr-name-in-target" }
# Copy all fields from source_dict[attr-name-in-source] to target_dict[attr-name-in-target]
def dict2dict(source_dict, target_dict, themap):
try:
for k, v in themap.items():
if k in source_dict:
target_dict[v] = source_dict[k]
except Exception as e:
print(e)
raise e
return target_dict
@staticmethod
def contains_words(string, key_words):
words = set(re.findall(r'\w+', string))
key_words_set = set(key_words)
u = words & key_words_set
if len(u) > 0:
return True
return False
@staticmethod
def obj2string(obj, delimiter=', '):
if obj is None:
return ''
formatted = delimiter.join(['%s: %s' % (key, value) for (key, value) in obj.__dict__.items()])
return formatted
academic_prefixes = ['prof', 'prof.', 'professor', 'dr', 'dr.', 'doctor']
@staticmethod
def announce(webhook, payload):
if webhook is None or payload is None:
return None
r = requests.post(webhook, payload)
return r
@staticmethod
def _longest_common_substring(strings_list):
substring = ''
if len(strings_list) > 1 and len(strings_list[0]) > 0:
for i in range(len(strings_list[0])):
for j in range(len(strings_list[0]) - i + 1):
if j > len(substring) and all(strings_list[0][i:i + j] in x for x in strings_list):
substring = strings_list[0][i:i + j]
return substring
@staticmethod
def google_search(site, query):
full_query_line = 'site:%s %s' % (site, query)
res = google.search(full_query_line, tld='com', lang='en', num=3, start=0, stop=2, pause=2.0)
matches = []
for url in res:
if url.lower().find(site) == 0:
matches.append(url)
return matches
@staticmethod
def boxit(logger_func, msg):
l = len(msg)
logger_func('-' * (l + 2))
logger_func(' %s ' % msg)
logger_func('-' * (l + 2))
@staticmethod
def is_academic_domain(domain):
if not domain:
return False
# TODO: should be replaced by predefined list in DB, return also the country, etc.
if domain.endswith('ac.il') or domain.endswith('ac.uk') or domain.endswith('edu'):
return True
return False
@staticmethod
def is_academic_prefix(prefix):
if prefix in AcureRateUtils.academic_prefixes:
return True
return False
# See more info here: https://en.wikipedia.org/wiki/Types_of_business_entity
# TODO: auto-generate those - make sure we also have the ',' or ', ' in the beginning, like in 'ltd' below.
company_incoporation_symbols = ['ltd', 'ltd.', 'limited',
'inc', 'inc.', 'incorporated', 'incorporation',
'co', 'corp', 'corp.', 'corporated', 'corporation',
'holdings',
'plc', 'plc.', 'p.l.c', 'p.l.c.',
'pllc', 'pllc.', 'p.l.l.c', 'p.l.l.c.' # US: Professional Limited Liability Company
'llc', 'llc.', 'l.l.c', 'l.l.c.' # US: Limited Liability Company
'lp', 'lp.', 'l.p', 'l.p.' 'limited partnership', # Limited Partnership
'llp', 'llp.', 'l.l.p', 'l.l.p.' 'limited liability partnership',
'lllp', 'lllp.', 'l.l.l.p', 'l.l.l.p.' 'limited liability limited partnership',
'gmbh', 'gmbh.', 'gesellschaft mit beschränkter haftung',
'gesmbh', 'gesmbh.', 'ges.m.b.H']
@staticmethod
def remove_incorporation_suffix2(company_name):
tokens = re.split(';|,| ', company_name)
num_tokens = len(tokens)
for t in tokens:
if t in AcureRateUtils.company_incoporation_symbols:
t = ''
break
return 'something....'
@staticmethod
def remove_incorporation_suffix(company_name):
for symbol in AcureRateUtils.company_incoporation_symbols:
s = ' %s' % symbol
if company_name.lower().endswith(s):
i = company_name.lower().find(s)
return company_name[:i]
s = ',%s' % symbol
if company_name.lower().endswith(s):
i = company_name.lower().find(s)
return company_name[:i]
s = ', %s' % symbol
if company_name.lower().endswith(s):
i = company_name.lower().find(s)
return company_name[:i]
return company_name
@staticmethod
def clean_company_name(company_name):
company_name_clean = AcureRateUtils.remove_parenthesized_content(company_name)
company_name_clean = AcureRateUtils.remove_incorporation_suffix(company_name_clean)
return company_name_clean
@staticmethod
def is_same_company__ignore_incoporation(company_str1, company_str2):
if company_str1 is None or company_str2 is None:
return False
c1 = company_str1.lower().strip()
c2 = company_str2.lower().strip()
for symbol in AcureRateUtils.company_incoporation_symbols:
s = ' %s' % symbol
if c1.endswith(s):
c1 = c1.replace(s, '')
if c2.endswith(s):
c2 = c2.replace(s, '')
return c1 == c2
@staticmethod
def is_company_public(company):
if company is None:
return False
return C.STOCK_SYMBOL in company['deduced'] and company['deduced'][C.STOCK_SYMBOL].strip() != ""
@staticmethod
def get_employees_range(num_employees):
b = math.floor(math.log10(num_employees))
lower = math.pow(10, b)
higher = math.pow(10, b+1)
range = "%d-%d" % (lower, higher)
return range
seniority_table = {
C.RANGE_1_10: [T.ROLE_PRESIDENT, T.ROLE_OFFICER, T.SENIORITY_FOUNDER, T.SENIORITY_BOARD, T.SENIORITY_CLEVEL, T.SENIORITY_SVP, T.SENIORITY_VP, T.SENIORITY_DIRECTOR, T.ROLE_SITE_MANAGER],
C.RANGE_10_100: [T.ROLE_PRESIDENT, T.ROLE_OFFICER, T.SENIORITY_FOUNDER, T.SENIORITY_BOARD, T.SENIORITY_CLEVEL, T.SENIORITY_SVP, T.SENIORITY_VP],
C.RANGE_100_1000: [T.ROLE_PRESIDENT, T.ROLE_OFFICER, T.SENIORITY_FOUNDER, T.SENIORITY_BOARD, T.SENIORITY_CLEVEL, T.SENIORITY_SVP, T.SENIORITY_VP],
C.RANGE_1000_10000: [T.ROLE_PRESIDENT, T.ROLE_OFFICER, T.SENIORITY_FOUNDER, T.SENIORITY_BOARD, T.SENIORITY_CLEVEL, T.SENIORITY_VP, T.SENIORITY_SVP, T.SENIORITY_EVP],
C.RANGE_10000_100000: [T.ROLE_PRESIDENT, T.ROLE_OFFICER, T.SENIORITY_FOUNDER, T.SENIORITY_BOARD, T.SENIORITY_CLEVEL, T.SENIORITY_VP, T.SENIORITY_SVP, T.SENIORITY_EVP]}
@staticmethod
def is_senior(company, title_line):
# Need to determine if person is senior, dependant on company Size, IPO, Years, etc.
employees_range = company[C.DEDUCED].get(C.EMPLOYEES_RANGE, C.RANGE_1_10)
titles = AcureRateUtils.normalized_titles(title_line)
for title, seniority, area in titles:
if title in AcureRateUtils.seniority_table[employees_range] or seniority in AcureRateUtils.seniority_table[employees_range]:
return True
return False
@staticmethod
def is_founder(title):
if title is None:
return False
return AcureRateUtils.examine_string(
string=title,
tokens_case_insensitive=['Co Founder', 'Co-Founder' 'Founding Member'],
words_case_insensitive=['Founder', 'CoFounder'])
@staticmethod
def is_ceo(title):
if title is None:
return False
return AcureRateUtils.examine_string(
string=title,
tokens_case_sensitive=['CEO'],
tokens_case_insensitive=['c.e.o', 'chief executive officer', 'chief execution officer'],
words_case_insensitive=['ceo'])
@staticmethod
def is_cfo(title):
if title is None:
return False
return AcureRateUtils.examine_string(
string=title,
tokens_case_sensitive=['CFO'],
tokens_case_insensitive=['c.f.o', 'chief finance officer', 'chief financial officer', 'chief financials officer',
'senior financial analyst', 'senior financials analyst'],
words_case_insensitive=['cfo'])
@staticmethod
def is_board_member(title):
if title is None:
return False
return AcureRateUtils.examine_string(
string=title,
tokens_case_sensitive=[],
tokens_case_insensitive=['chairman', 'chairman of the board', 'advisory board', 'senior advisor', 'board advisor', 'board member'],
words_case_insensitive=[])
@staticmethod
def is_investor(title):
if title is None:
return False
return AcureRateUtils.examine_string(
string=title,
#words_case_insensitive=['investor', 'angel', 'investment'])
words_case_insensitive=['investor', 'angel'])
@staticmethod
def is_investment_lead(investment_info):
if investment_info and 'lead' in investment_info.lower():
return True
return False
@staticmethod
def get_investment_round(investment_info):
if investment_info is None:
return None
info = investment_info.lower()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2018-02-02 14:06:34
import datetime
import json
import logging
import math
import random
import time
import numpy as np
import scipy.stats as st
from dateutil import parser
import iblrig.ambient_sensor as ambient_sensor
import iblrig.bonsai as bonsai
import iblrig.misc as misc
from iblrig.check_sync_pulses import sync_check
from iblrig.iotasks import ComplexEncoder
log = logging.getLogger("iblrig")
class AdaptiveContrast(object):
"""Adaptive contrast trials happen whenever staircase contrasts do not.
In any particular trial a contrast is drawn from a self.contrasts pool
pseudo-randomly. This pool is loaded from the last trial of the previous
session (in data['ac']['contrasts']). If previous session is inexistant
contrast pool is initialized with [1., 0.5].
The number of correct trials in a buffer of self.buffer_size is calculated
and compared to a binomial distribution with alpha = 0.05 at differnt
probabilities (self._update_contrasts). if enough correct trials are
achieved the contrasts pool is increased. Similiarly to the contrasts pool
the buffer is also loaded from the previous session (data['ac']['buffer'])
every time a contrast value is added to the contrasts pool the buffer is
reset.
"""
def __init__(self, sph):
self.type = "AdaptiveContrast"
self.use_me = sph.ADAPTIVE_CONTRAST
self.all_contrasts = sph.CONTRAST_SET
self.init_contrasts = sph.AC_INIT_CONTRASTS
self.buffer_size = sph.AC_BUFFER_SIZE
self.perf_crit = self._min_trials_at(sph.AC_PERF_CRIT, 0.05)
self.ntrials_to_six = sph.AC_NTRIALS_TO_SIX
self.ntrials_to_zero = sph.AC_NTRIALS_TO_ZERO
self.ntrials_to_remove_50 = sph.AC_NTRIALS_TO_REMOVE_50
self.ntrials = 0
self.previous_session = sph.PREVIOUS_DATA_FILE
self.last_trial_data = sph.LAST_TRIAL_DATA
self.ntrials_125 = self._init_ntrials_125()
self.contrast_set = self._init_contrast_set()
self.buffer = self._init_buffer()
self.value = self._init_contrast()
self.trial_correct = None
self.last_trial_data = None
def reprJSON(self):
return self.__dict__
def _init_ntrials_125(self):
if self.previous_session is None or not self.last_trial_data:
out = 0
else:
out = self.last_trial_data["ac"]["ntrials_125"]
return out
def _init_contrast_set(self):
if self.previous_session is None or not self.last_trial_data:
_contrasts = self.init_contrasts
else:
_contrasts = self.last_trial_data["ac"]["contrast_set"]
return _contrasts
def _init_buffer(self):
if self.previous_session is None or not self.last_trial_data:
_buffer = np.zeros((2, self.buffer_size, len(self.all_contrasts))).tolist()
else:
_buffer = self.last_trial_data["ac"]["buffer"]
if len(_buffer) > 2:
_buffer = np.zeros(
(2, self.buffer_size, len(self.all_contrasts))
).tolist()
return _buffer
def _init_contrast(self):
_contrast = np.random.choice(self.contrast_set)
return _contrast
def _reset_buffer(self):
self.buffer = np.zeros((2, self.buffer_size, len(self.all_contrasts))).tolist()
def _update_buffer(self, prev_position):
_buffer = np.asarray(self.buffer)
side_idx = 0 if prev_position < 0 else 1
contrast_idx = self.contrast_set.index(self.value)
col = _buffer[side_idx, :, contrast_idx]
col = np.roll(col, -1)
col[-1] = int(self.trial_correct)
_buffer[side_idx, :, contrast_idx] = col
self.buffer = _buffer.tolist()
def _update_contrast(self):
if (self.use_me) and (0 in self.contrast_set):
# p = [1/(n-1 + 0.5)] * (n - 1)
n_1 = len(self.contrast_set) - 1
z = n_1 + 0.5
p = [1 / z] * (n_1 + 1)
p[-1] *= 0.5
self.value = np.random.choice(self.contrast_set, p=p)
elif self.use_me:
self.value = np.random.choice(self.contrast_set)
else:
self.value = np.random.choice(self.all_contrasts)
def _update_contrast_set(self):
# Update counter of how mant trials were performend with 0.125 contrast
if 0.125 in self.contrast_set:
self.ntrials_125 += 1
# Sum correct left AND right trials for all contrasts
_ntrials = np.sum(self.buffer, axis=1)
# Define crit for next contrast insert
pass_crit = _ntrials > self.perf_crit
# Check if both left AND right have passed criterion
pass_idx = np.bitwise_and(pass_crit[0], pass_crit[1])
# If 1.0 and 0.5 contrasts pass add 0.25
if pass_idx[0] and pass_idx[1] and 0.25 not in self.contrast_set:
self.contrast_set.append(0.25)
if pass_idx[2] and 0.125 not in self.contrast_set:
self.contrast_set.append(0.125)
# Add 6% contrast if ntrials after introducing 0.125 have elapsed
if self.ntrials_125 >= self.ntrials_to_six and 0.0625 not in self.contrast_set:
self.contrast_set.append(0.0625)
# Add 0% contrast if ntrials after introducing 0.125 have elapsed
if self.ntrials_125 >= self.ntrials_to_zero and 0.0 not in self.contrast_set:
self.contrast_set.append(0.0)
# Remove 50% contrast if ntrials after introducing 0.125 have elapsed
if self.ntrials_125 >= self.ntrials_to_remove_50 and 0.5 in self.contrast_set:
self.contrast_set.pop(1)
self.contrast_set = sorted(self.contrast_set)
self.contrast_set.reverse()
def _min_trials_at(self, prob, alpha):
return int(
sum(
1 - st.binom.cdf(range(self.buffer_size), self.buffer_size, prob)
>= alpha
)
)
def trial_completed(self, trial_correct):
self.ntrials += 1
self.trial_correct = trial_correct
def next_trial(self, prev_position):
"""Updates obj with behavioral outcome from trial.trial_completed
and calculates next contrast"""
self._update_buffer(prev_position)
self._update_contrast_set()
self._update_contrast()
self.trial_correct = None
class RepeatContrast(object):
"""Dummy trial object will count repeat trials and reset contrast to none
if trial correct"""
def __init__(self):
self.type = "RepeatContrast"
self.ntrials = 0
self._contrast = None
self.trial_correct = None
# add if not correct contrast don't repeat
def reprJSON(self):
d = self.__dict__
c = {"value": self.value}
d.update(c)
return d # "\n\n " + str(d) + "\n\n"
@property
def value(self):
return self._contrast
@value.setter
def value(self, previous_contrast):
self._contrast = previous_contrast
def trial_completed(self, trial_correct):
self.ntrials += 1
self.trial_correct = trial_correct
def next_trial(self, prev_position):
"""Updates obj with behavioral outcome from trial.trial_completed
and keeps contrast in case of mistake and sets contrast to None in
case of correct trial -> exits from repeat trials"""
if self.trial_correct:
self.value = None
else:
self.value = self.value
self.trial_correct = None
class TrialParamHandler(object):
"""All trial parameters for the current trial.
On self.trial_completed a JSON serializable string containing state/event
data and all the parameters is returned to be printed out and saved by
pybpod under the stdout flag.
self.next_trial calls the update functions of all related objects
"""
def __init__(self, sph):
# Constants from settings
self.init_datetime = parser.parse(sph.PYBPOD_SESSION)
self.task_protocol = sph.PYBPOD_PROTOCOL
self.data_file_path = sph.DATA_FILE_PATH
self.data_file = open(self.data_file_path, "a")
self.position_set = sph.STIM_POSITIONS
self.repeat_on_error = sph.REPEAT_ON_ERROR
self.repeat_contrasts = sph.REPEAT_CONTRASTS
self.threshold_events_dict = sph.ROTARY_ENCODER.THRESHOLD_EVENTS
self.quiescent_period_base = sph.QUIESCENT_PERIOD
self.quiescent_period = self.quiescent_period_base + misc.texp()
self.response_window = sph.RESPONSE_WINDOW
self.interactive_delay = sph.INTERACTIVE_DELAY
self.iti_error = sph.ITI_ERROR
self.iti_correct_target = sph.ITI_CORRECT
self.osc_client = sph.OSC_CLIENT
self.stim_freq = sph.STIM_FREQ
self.stim_angle = sph.STIM_ANGLE
self.stim_gain = sph.STIM_GAIN
self.stim_sigma = sph.STIM_SIGMA
self.stim_reverse = 0
self.out_tone = sph.OUT_TONE
self.out_noise = sph.OUT_NOISE
self.out_stop_sound = sph.OUT_STOP_SOUND
self.poop_count = sph.POOP_COUNT
self.save_ambient_data = sph.RECORD_AMBIENT_SENSOR_DATA
self.as_data = {
"Temperature_C": -1,
"AirPressure_mb": -1,
"RelativeHumidity": -1,
}
# Reward amount
self.reward_amount = sph.REWARD_AMOUNT
self.reward_valve_time = sph.REWARD_VALVE_TIME
self.iti_correct = self.iti_correct_target - self.reward_valve_time
# Init trial type objects
self.ac = AdaptiveContrast(sph)
self.rc = RepeatContrast()
# Initialize parameters that may change every trial
self.contrast_set = sph.CONTRAST_SET
self.trial_num = 0
self.position = random.choice(sph.STIM_POSITIONS)
self.stim_probability_left = sph.STIM_PROBABILITY_LEFT
self.stim_phase = 0.0
self.event_error = self.threshold_events_dict[self.position]
self.event_reward = self.threshold_events_dict[-self.position]
self.movement_left = self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[0]]
self.movement_right = self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[1]]
# Outcome related parmeters
self.contrast = self.ac
self.current_contrast = self.contrast.value
self.signed_contrast = self.contrast.value * np.sign(self.position)
self.signed_contrast_buffer = [self.signed_contrast]
# Trial Completed params
self.elapsed_time = 0
self.behavior_data = []
self.response_time = None
self.response_time_buffer = []
self.response_buffer = [0] * sph.RESPONSE_BUFFER_LENGTH
self.response_side_buffer = []
self.trial_correct = None
self.ntrials_correct = 0
self.water_delivered = 0
self.non_rc_ntrials = self.trial_num - self.rc.ntrials
def reprJSON(self):
return self.__dict__
def trial_completed(self, behavior_data):
"""Update outcome variables using bpod.session.current_trial
Check trial for state entries, first value of first tuple """
# Update elapsed_time
self.elapsed_time = datetime.datetime.now() - self.init_datetime
self.behavior_data = behavior_data
correct = ~np.isnan(self.behavior_data["States timestamps"]["correct"][0][0])
error = ~np.isnan(self.behavior_data["States timestamps"]["error"][0][0])
no_go = ~np.isnan(self.behavior_data["States timestamps"]["no_go"][0][0])
assert correct or error or no_go
# Add trial's response time to the buffer
self.response_time = misc.get_trial_rt(self.behavior_data)
self.response_time_buffer.append(self.response_time)
# Update response buffer -1 for left, 0 for nogo, and 1 for rightward
if (correct and self.position < 0) or (error and self.position > 0):
self.response_buffer = misc.update_buffer(self.response_buffer, 1)
self.response_side_buffer.append(1)
elif (correct and self.position > 0) or (error and self.position < 0):
self.response_buffer = misc.update_buffer(self.response_buffer, -1)
self.response_side_buffer.append(-1)
elif no_go:
self.response_buffer = misc.update_buffer(self.response_buffer, 0)
self.response_side_buffer.append(0)
# Update the trial_correct variable
self.trial_correct = bool(correct)
# Increment the trial correct counter
self.ntrials_correct += self.trial_correct
# Update the water delivered
if self.trial_correct:
self.water_delivered += self.reward_amount
# Propagate outcome to contrast object
self.contrast.trial_completed(self.trial_correct)
# Update non repeated trials
self.non_rc_ntrials = self.trial_num - self.rc.ntrials
# SAVE TRIAL DATA
params = self.__dict__.copy()
# open data_file is not serializable, convert to str
params["data_file"] = str(params["data_file"])
params["osc_client"] = "osc_client_pointer"
params["init_datetime"] = params["init_datetime"].isoformat()
params["elapsed_time"] = str(params["elapsed_time"])
# Delete buffered data
# params['stim_probability_left_buffer'] = ''
# params['position_buffer'] = ''
# params['contrast_buffer'] = ''
params["signed_contrast_buffer"] = ""
params["response_time_buffer"] = ""
params["response_side_buffer"] = ""
# params['trial_correct_buffer'] = ''
out = json.dumps(params, cls=ComplexEncoder)
self.data_file.write(out)
self.data_file.write("\n")
self.data_file.close()
# If more than 42 trials save transfer_me.flag
if self.trial_num == 42:
misc.create_flags(self.data_file_path, self.poop_count)
return self
def check_stop_criterions(self):
return misc.check_stop_criterions(
self.init_datetime, self.response_time_buffer, self.trial_num
)
def check_sync_pulses(self):
return sync_check(self)
def save_ambient_sensor_data(self, bpod_instance, destination):
if self.save_ambient_data:
self.as_data = ambient_sensor.get_reading(
bpod_instance, save_to=destination
)
return self.as_data
else:
log.info("Ambient Sensor data disabled in task settings")
null_measures = {
"Temperature_C": -1,
"AirPressure_mb": -1,
"RelativeHumidity": -1,
}
self.as_data = null_measures
return self.as_data
def show_trial_log(self):
msg = f"""
##########################################
TRIAL NUM: {self.trial_num}
STIM POSITION: {self.position}
STIM CONTRAST: {self.contrast.value}
STIM PHASE: {self.stim_phase}
STIM PROB LEFT: {self.stim_probability_left}
RESPONSE TIME: | |
= 'direct',
xray_structure = xrs).f_calc()
return get_map_from_map_coeffs(map_coeffs=weight_f_array,
crystal_symmetry=crystal_symmetry)
def get_bounds_for_helical_symmetry(params,
box=None,crystal_symmetry=None):
original_cell=box.map_data.all()
new_cell=box.map_box.all()
z_first=box.gridding_first[2]
z_last=box.gridding_last[2]
assert z_last>=z_first
z_middle=(z_first+z_last)//2
delta_z=crystal_symmetry.unit_cell().parameters()[5]/box.map_data.all()[2]
n_z_max= int(0.5+
params.map_modification.restrict_z_distance_for_helical_symmetry/delta_z)
new_z_first=max(z_first,z_middle-n_z_max)
new_z_last=min(z_last,z_middle+n_z_max)
lower_bounds=deepcopy(box.gridding_first)
upper_bounds=deepcopy(box.gridding_last)
lower_bounds[2]=new_z_first
upper_bounds[2]=new_z_last
return lower_bounds,upper_bounds
def check_memory(map_data,ratio_needed,maximum_fraction_to_use=0.90,
maximum_map_size=1,
out=sys.stdout):
map_size=map_data.size()/(1024*1024*1024)
if maximum_map_size and map_size>maximum_map_size:
raise Sorry("Maximum map size for this tool is %s GB" %(maximum_map_size))
needed_memory=ratio_needed*map_size
from libtbx.utils import guess_total_memory # returns total memory
bytes_total_memory=guess_total_memory()
if bytes_total_memory:
total_memory=bytes_total_memory/(1024*1024*1024)
else:
total_memory=None
print("\nMap size is " +\
"%.2f GB. This will require about %.1f GB of memory" %(
map_size,needed_memory) +"\nfor this stage of analysis\n", file=out)
if total_memory:
print("Total memory on this computer is about %.1f GB." %(
total_memory), file=out)
if (needed_memory>= 0.5* total_memory):
print("\n ** WARNING: It is possible that this computer may not"+\
" have **\n *** sufficient memory to complete this job. ***\n", file=out)
if (needed_memory >= maximum_fraction_to_use*total_memory):
raise Sorry("This computer does not have sufficient "+
"memory (%.0f GB needed) \nto run this job" %(needed_memory))
def get_params(args,map_data=None,crystal_symmetry=None,
half_map_data_list=None,
sharpening_target_pdb_inp=None,
ncs_object=None,
sequence=None,
out=sys.stdout):
params=get_params_from_args(args)
print("\nSegment_and_split_map\n", file=out)
print("Command used: %s\n" %(
" ".join(['segment_and_split_map']+args)), file=out)
master_params.format(python_object=params).show(out=out)
# Set space-group defaults
if params.crystal_info.use_sg_symmetry:
if params.map_modification.restrict_map_size is None:
params.map_modification.restrict_map_size=False
if params.crystal_info.is_crystal is None:
params.crystal_info.is_crystal=True
else:
if params.map_modification.restrict_map_size is None:
params.map_modification.restrict_map_size=True
if params.crystal_info.is_crystal is None:
params.crystal_info.is_crystal=False
# Turn off files if desired
if params.control.write_files is False:
params.output_files.magnification_map_file=None
params.output_files.magnification_map_file = None
params.output_files.magnification_ncs_file = None
params.output_files.shifted_map_file = None
params.output_files.shifted_sharpened_map_file = None
params.output_files.sharpened_map_file = None
params.output_files.shifted_pdb_file = None
params.output_files.shifted_ncs_file = None
params.output_files.shifted_used_ncs_file = None
params.output_files.box_map_file = None
params.output_files.box_mask_file = None
params.output_files.write_output_maps = False
params.output_files.remainder_map_file = None
params.output_files.output_info_file = None
params.output_files.restored_pdb = None
params.output_files.output_weight_map_pickle_file = None
from cctbx.maptbx.auto_sharpen import set_sharpen_params
params=set_sharpen_params(params,out)
if params.input_files.seq_file and not params.crystal_info.sequence and \
not sequence:
if not params.crystal_info.sequence:
if sequence:
params.crystal_info.sequence=sequence
else:
params.crystal_info.sequence=open(params.input_files.seq_file).read()
print("Read sequence from %s" %(params.input_files.seq_file), file=out)
if not params.crystal_info.resolution and (
params.map_modification.b_iso is not None or \
params.map_modification.auto_sharpen
or params.map_modification.resolution_dependent_b or
params.map_modification.b_sharpen):
raise Sorry("Need resolution for segment_and_split_map with sharpening")
if params.map_modification.auto_sharpen and (
params.map_modification.b_iso is not None or
params.map_modification.b_sharpen is not None or
params.map_modification.resolution_dependent_b is not None):
print("Turning off auto_sharpen as it is not compatible with "+\
"b_iso, \nb_sharpen, or resolution_dependent_b", file=out)
params.map_modification.auto_sharpen=False
if params.control.write_files and \
params.output_files.output_directory and \
(not os.path.isdir(params.output_files.output_directory)):
os.mkdir(params.output_files.output_directory)
if not params.output_files.output_directory:
params.output_files.output_directory=""
# Test to see if we can use adjusted_sa as target and use box_map with it
if (params.map_modification.residual_target=='adjusted_sa' or
params.map_modification.sharpening_target=='adjusted_sa') and \
(params.map_modification.box_in_auto_sharpen or
params.map_modification.density_select_in_auto_sharpen):
print("Checking to make sure we can use adjusted_sa as target...", end=' ', file=out)
try:
from phenix.autosol.map_to_model import iterated_solvent_fraction
except Exception as e:
raise Sorry("Please either set box_in_auto_sharpen=False and "+
"\ndensity_select_in_auto_sharpen=False or \n"+\
"set residual_target=kurtosis and sharpening_target=kurtosis")
print("OK", file=out)
if not half_map_data_list: half_map_data_list=[]
if params.input_files.info_file:
map_data=None
pdb_hierarchy=None
from libtbx import easy_pickle
print("Loading tracking data from %s" %(
params.input_files.info_file), file=out)
tracking_data=easy_pickle.load(params.input_files.info_file)
return params,map_data,half_map_data_list,pdb_hierarchy,tracking_data,None
else:
tracking_data=info_object()
tracking_data.set_params(params)
# PDB file
if params.input_files.pdb_file:
print("\nInput PDB file to be used to identify region to work with: %s\n" %(
params.input_files.pdb_file), file=out)
pdb_inp = iotbx.pdb.input(file_name=params.input_files.pdb_file)
pdb_hierarchy = pdb_inp.construct_hierarchy()
pdb_atoms = pdb_hierarchy.atoms()
pdb_atoms.reset_i_seq()
tracking_data.set_input_pdb_info(file_name=params.input_files.pdb_file,
n_residues=pdb_hierarchy.overall_counts().n_residues)
else:
pdb_hierarchy=None
if map_data:
pass # ok
elif params.input_files.map_file:
from iotbx import mrcfile
ccp4_map=iotbx.mrcfile.map_reader(
file_name=params.input_files.map_file)
if not crystal_symmetry:
crystal_symmetry=ccp4_map.crystal_symmetry() # 2018-07-18
tracking_data.set_full_crystal_symmetry(
crystal.symmetry(ccp4_map.unit_cell().parameters(),
ccp4_map.space_group_number))
tracking_data.set_full_unit_cell_grid(ccp4_map.unit_cell_grid)
map_data=ccp4_map.data.as_double()
else:
raise Sorry("Need ccp4 map")
if not crystal_symmetry:
raise Sorry("Need crystal_symmetry")
if params.input_files.half_map_file:
if len(params.input_files.half_map_file) != 2:
raise Sorry("Please supply none or two half_map_file values")
from iotbx import mrcfile
half_map_data_list=[]
half_map_data_list.append(iotbx.mrcfile.map_reader(
file_name=params.input_files.half_map_file[0]).data.as_double())
half_map_data_list.append(iotbx.mrcfile.map_reader(
file_name=params.input_files.half_map_file[1]).data.as_double())
# Get the NCS object
ncs_obj,dummy_tracking_data=get_ncs(params=params,
ncs_object=ncs_object,out=out)
if (not params.map_modification.auto_sharpen or
params.map_modification.b_iso is not None) and (
not params.crystal_info.molecular_mass and
not params.crystal_info.solvent_content and
not params.input_files.seq_file and not params.crystal_info.sequence and
not sequence):
params.crystal_info.solvent_content=get_iterated_solvent_fraction(
crystal_symmetry=crystal_symmetry,
verbose=params.control.verbose,
resolve_size=params.control.resolve_size,
mask_padding_fraction=\
params.segmentation.mask_padding_fraction,
fraction_of_max_mask_threshold=\
params.segmentation.fraction_of_max_mask_threshold,
cell_cutoff_for_solvent_from_mask=\
params.segmentation.cell_cutoff_for_solvent_from_mask,
mask_resolution=params.crystal_info.resolution,
map=map_data,
out=out)
if params.crystal_info.solvent_content:
print("Estimated solvent content: %.2f" %(
params.crystal_info.solvent_content), file=out)
else:
raise Sorry("Unable to estimate solvent content...please supply "+
"solvent_content \nor molecular_mass")
if params.map_modification.auto_sharpen or \
params.map_modification.b_iso is not None or \
params.map_modification.b_sharpen is not None or \
params.map_modification.resolution_dependent_b is not None:
# Sharpen the map
print("Auto-sharpening map before using it", file=out)
local_params=deepcopy(params)
if tracking_data.solvent_fraction: # XXX was previously always done but may not have been set
local_params.crystal_info.solvent_content=tracking_data.solvent_fraction
from cctbx.maptbx.auto_sharpen import run as auto_sharpen
acc=map_data.accessor()
map_data,new_map_coeffs,new_crystal_symmetry,new_si=auto_sharpen(
args=[],params=local_params,
map_data=map_data,
crystal_symmetry=crystal_symmetry,
write_output_files=False,
pdb_inp=sharpening_target_pdb_inp,
ncs_obj=ncs_obj,
return_map_data_only=False,
return_unshifted_map=True,
half_map_data_list=half_map_data_list,
n_residues=tracking_data.n_residues,
ncs_copies=ncs_obj.max_operators(),
out=out)
tracking_data.b_sharpen=new_si.b_sharpen
if not tracking_data.solvent_fraction:
tracking_data.solvent_fraction=new_si.solvent_fraction
if tracking_data.params.output_files.sharpened_map_file:
sharpened_map_file=os.path.join(
tracking_data.params.output_files.output_directory,
tracking_data.params.output_files.sharpened_map_file)
sharpened_map_data=map_data.deep_copy()
if acc is not None: # offset the map to match original if possible
sharpened_map_data.reshape(acc)
print("Gridding of sharpened map:", file=out)
print("Origin: ",sharpened_map_data.origin(), file=out)
print("All: ",sharpened_map_data.all(), file=out)
print("\nWrote sharpened map in original location with "+\
"origin at %s\nto %s" %(
str(sharpened_map_data.origin()),sharpened_map_file), file=out)
# NOTE: original unit cell and grid
write_ccp4_map(tracking_data.full_crystal_symmetry,
sharpened_map_file,sharpened_map_data,
output_unit_cell_grid=tracking_data.full_unit_cell_grid,)
params.input_files.map_file=sharpened_map_file # overwrite map_file name here
# done with any sharpening
params.map_modification.auto_sharpen=False# so we don't do it again later
params.map_modification.b_iso=None
params.map_modification.b_sharpen=None
params.map_modification.resolution_dependent_b=None
if params.control.sharpen_only:
print("Stopping after sharpening", file=out)
return
# check on size right away
if params.control.memory_check:
# map_box and mask generation use about 50GB of memory for
# map with 1 billion elements
check_memory(map_data=map_data,maximum_map_size=None,
ratio_needed=50,out=out)
if params.map_modification.magnification and \
params.map_modification.magnification!=1.0:
print("\nAdjusting magnification by %7.3f\n" %(
params.map_modification.magnification), file=out)
if ncs_obj:
# Magnify ncs
print("NCS before applying magnification...", file=out)
ncs_obj.format_all_for_group_specification(out=out)
ncs_obj=ncs_obj.adjust_magnification(
magnification=params.map_modification.magnification)
if params.output_files.magnification_ncs_file:
file_name=os.path.join(params.output_files.output_directory,
params.output_files.magnification_ncs_file)
print("Writing NCS after magnification of %7.3f to %s" %(
params.map_modification.magnification,file_name), file=out)
ncs_obj.format_all_for_group_specification(out=out)
ncs_obj.format_all_for_group_specification(file_name=file_name)
params.input_files.ncs_file=file_name
else:
raise Sorry("Need magnification_ncs_file defined if magnification is"+
" applied \nto input NCS file")
# Magnify map
shrunk_uc = []
for i in range(3):
shrunk_uc.append(
crystal_symmetry.unit_cell().parameters()[i] *
params.map_modification.magnification )
uc_params=crystal_symmetry.unit_cell().parameters()
from cctbx import uctbx
new_unit_cell=uctbx.unit_cell(
parameters=(shrunk_uc[0],shrunk_uc[1],shrunk_uc[2],
uc_params[3],uc_params[4],uc_params[5]))
print("Original unit cell: (%7.4f, %7.4f, %7.4f, %7.4f, %7.4f, %7.4f)" %(
crystal_symmetry.unit_cell().parameters()), file=out)
crystal_symmetry=crystal.symmetry(
unit_cell=new_unit_cell,
space_group=crystal_symmetry.space_group())
print("New unit cell: (%7.4f, %7.4f, %7.4f, %7.4f, %7.4f, %7.4f)" %(
crystal_symmetry.unit_cell().parameters()), file=out)
# magnify original unit cell too..
cell=list(tracking_data.full_crystal_symmetry.unit_cell().parameters())
for i in range(3):
cell[i]=cell[i]*params.map_modification.magnification
tracking_data.set_full_crystal_symmetry(
crystal.symmetry(tuple(cell),ccp4_map.space_group_number))
print("New original (full unit cell): "+\
" (%7.4f, %7.4f, %7.4f, %7.4f, %7.4f, %7.4f)" %(
tracking_data.full_crystal_symmetry.unit_cell.parameters()), file=out)
if params.output_files.magnification_map_file:
file_name=os.path.join(params.output_files.output_directory,
params.output_files.magnification_map_file)
# write out magnified map (our working map) (before shifting it)
print("\nWriting magnification map (input map with "+\
"magnification of %7.3f \n" %(params.map_modification.magnification) +\
"applied) to %s \n" %(file_name), file=out)
#write_ccp4_map(crystal_symmetry,file_name,map_data)
# NOTE: original unit cell and grid
write_ccp4_map(tracking_data.full_crystal_symmetry,
file_name,map_data,
output_unit_cell_grid=tracking_data.original_unit_cell_grid,)
params.input_files.map_file=file_name
else:
raise Sorry("Need a file name to write out magnification_map_file")
params.map_modification.magnification=None # no longer need it.
tracking_data.set_input_map_info(file_name=params.input_files.map_file,
crystal_symmetry=crystal_symmetry,
origin=map_data.origin(),
all=map_data.all())
tracking_data.set_crystal_symmetry(crystal_symmetry=crystal_symmetry)
tracking_data.set_original_crystal_symmetry(crystal_symmetry=crystal_symmetry)
tracking_data.set_accessor(acc=map_data.accessor())
# Save center of map
map_symmetry_center=get_center_of_map(map_data,crystal_symmetry)
# Check for helical ncs...if present we may try to cut map at +/- 1 turn
params.map_modification.restrict_z_distance_for_helical_symmetry=\
get_max_z_range_for_helical_symmetry(params,out=out)
# either use map_box with density_select=True or just shift the map
if params.segmentation.density_select:
print("\nTrimming map to density...", file=out)
args= ["output_format=ccp4"]
if params.segmentation.density_select_threshold is not None:
print("Threshold for density selection will be: %6.2f \n"%(
params.segmentation.density_select_threshold), file=out)
args.append("density_select_threshold=%s" %(
params.segmentation.density_select_threshold))
if params.segmentation.get_half_height_width is not None:
args.append("get_half_height_width=%s" %(
params.segmentation.get_half_height_width))
if params.input_files.ncs_file:
args.append("symmetry_file=%s" %(params.input_files.ncs_file))
if params.input_files.pdb_file:
args.append("pdb_file=%s" %(params.input_files.pdb_file))
args.append("ccp4_map_file=%s" %(params.input_files.map_file))
file_name_prefix=os.path.join(params.output_files.output_directory,
"density_select")
args.append("output_file_name_prefix=%s" %(file_name_prefix))
from mmtbx.command_line.map_box import run as run_map_box
args.append("keep_input_unit_cell_and_grid=False") # for new defaults
if params.segmentation.lower_bounds and params.segmentation.upper_bounds:
bounds_supplied=True
print("\nRunning map_box with supplied bounds", file=out)
box=run_map_box(args,
map_data=map_data,
ncs_object=ncs_obj,
crystal_symmetry=crystal_symmetry,
lower_bounds=params.segmentation.lower_bounds,
upper_bounds=params.segmentation.upper_bounds,
write_output_files=params.output_files.write_output_maps,
log=out)
else:
bounds_supplied=False
box=run_map_box(["density_select=True"]+args,
map_data=map_data,
crystal_symmetry=crystal_symmetry,
ncs_object=ncs_obj,
write_output_files=params.output_files.write_output_maps,
log=out)
# Run again to select au box
shifted_unique_closest_sites=None
selected_au_box=None
if params.segmentation.select_au_box is None and box.ncs_object and \
box.ncs_object.max_operators() >= params.segmentation.n_ops_to_use_au_box:
params.segmentation.select_au_box=True
print("Setting select_au_box to True as there are %d operators" %(
box.ncs_object.max_operators()), file=out)
if params.segmentation.select_au_box and not bounds_supplied:
lower_bounds,upper_bounds,unique_closest_sites=get_bounds_for_au_box(
params, box=box,out=out) #unique_closest_sites relative to original map
if lower_bounds and upper_bounds:
bounds_supplied=True
selected_au_box=True
score,ncs_cc=score_ncs_in_map(map_data=box.map_box,
allow_score_with_pg=False,
sites_orth=unique_closest_sites+box.shift_cart,
ncs_object=box.ncs_object,ncs_in_cell_only=True,
crystal_symmetry=box.box_crystal_symmetry,out=null_out())
print("NCS CC before rerunning box: %7.2f SCORE: %7.1f OPS: %d " %(
ncs_cc,score,box.ncs_object.max_operators()), file=out)
print("\nRunning map-box again with boxed range ...", file=out)
del box
box=run_map_box(args,lower_bounds=lower_bounds,
map_data=map_data,
crystal_symmetry=crystal_symmetry,
ncs_object=ncs_obj,
upper_bounds=upper_bounds, log=out)
box.map_box=box.map_box.as_double() # Do we need double?
shifted_unique_closest_sites=unique_closest_sites+box.shift_cart
# Or run again for helical symmetry
elif params.map_modification.restrict_z_distance_for_helical_symmetry and \
not bounds_supplied:
bounds_supplied=True
lower_bounds,upper_bounds=get_bounds_for_helical_symmetry(params,
crystal_symmetry=crystal_symmetry,box=box)
print("\nRunning map-box again with restricted Z range ...", file=out)
box=run_map_box(args,
map_data=map_data,
crystal_symmetry=crystal_symmetry,
ncs_object=ncs_obj,
lower_bounds=lower_bounds,upper_bounds=upper_bounds,
write_output_files=params.output_files.write_output_maps,
log=out)
#-----------------------------
if bounds_supplied and box.ncs_object:
print("Selecting remaining NCS operators", file=out)
box.ncs_object=select_remaining_ncs_ops(
map_data=box.map_box,
crystal_symmetry=box.box_crystal_symmetry,
closest_sites=shifted_unique_closest_sites,
random_points=params.reconstruction_symmetry.random_points,
ncs_object=box.ncs_object,
out=out)
score,ncs_cc=score_ncs_in_map(map_data=box.map_box,
allow_score_with_pg=False,
ncs_object=box.ncs_object,ncs_in_cell_only=True,
sites_orth=shifted_unique_closest_sites,
crystal_symmetry=box.box_crystal_symmetry,out=null_out())
if score is not None:
print("NCS CC after selections: %7.2f SCORE: %7.1f OPS: %d" %(
ncs_cc,score,box.ncs_object.max_operators()), file=out)
#-----------------------------
origin_shift=box.shift_cart
# Note: moving cell with (0,0,0) in | |
#!/usr/bin/python
# -*- coding: ascii -*-
#
# Copyright and User License
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright <EMAIL> for the
# European Organization for Nuclear Research (CERN)
#
# Please consult the flair documentation for the license
#
# DISCLAIMER
# ~~~~~~~~~~
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY, OF
# SATISFACTORY QUALITY, AND FITNESS FOR A PARTICULAR PURPOSE
# OR USE ARE DISCLAIMED. THE COPYRIGHT HOLDERS AND THE
# AUTHORS MAKE NO REPRESENTATION THAT THE SOFTWARE AND
# MODIFICATIONS THEREOF, WILL NOT INFRINGE ANY PATENT,
# COPYRIGHT, TRADE SECRET OR OTHER PROPRIETARY RIGHT.
#
# LIMITATION OF LIABILITY
# ~~~~~~~~~~~~~~~~~~~~~~~
# THE COPYRIGHT HOLDERS AND THE AUTHORS SHALL HAVE NO
# LIABILITY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL,
# CONSEQUENTIAL, EXEMPLARY, OR PUNITIVE DAMAGES OF ANY
# CHARACTER INCLUDING, WITHOUT LIMITATION, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES, LOSS OF USE, DATA OR PROFITS,
# OR BUSINESS INTERRUPTION, HOWEVER CAUSED AND ON ANY THEORY
# OF CONTRACT, WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT
# LIABILITY OR OTHERWISE, ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGES.
#
# Author: <EMAIL>
# Date: 29-Nov-2009
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import os
import time
import fnmatch
from stat import *
if "_" not in dir(): _ = lambda x: x
try:
from Tkinter import *
import tkMessageBox as messagebox
except ImportError:
from tkinter import *
import tkinter.messagebox as messagebox
import tkExtra
_DIR_TYPE = " <DIR>"
_FILE_TYPE = "-file-"
_LINK_TYPE = "-link-"
_BACKUP_TYPE = "-backup-"
_TIME_FORMAT = "%Y.%m.%d %H:%M:%S"
DISABLE_FILE = "DarkGray"
COLORS = {
"gz": "Red",
"tgz": "Red",
"zip": "Red",
"tbz": "Red",
"rpm": "Red",
"deb": "Red",
"flair":"DarkGreen",
"fluka":"#109010",
"inp": "#109010",
"out": "#109010",
"log": "#109010",
"err": "Red",
"lis": "#109010",
"py": "Blue",
"f": "Blue",
"F": "Blue",
"for": "Blue",
"FOR": "Blue",
"c": "Blue",
"C": "Blue",
"cc": "Blue",
"cpp": "Blue",
"a": "Blue",
"so": "Blue",
"eps": "Purple",
"ps": "Purple",
"gif": "Purple",
"png": "Purple",
"jpg": "Purple",
"bmp": "Purple",
"tif": "Purple",
"vxl": "DarkRed",
"dcm": "DarkRed",
"ngc": "Brown",
"nc" : "Brown",
"probe": "DarkOrange",
"stl" : "DarkOrange",
_LINK_TYPE: "DarkCyan",
_BACKUP_TYPE: "DarkGray",
_DIR_TYPE: "DarkBlue",
}
DESCRIPTION = {
"gz": "Package gzip",
"tgz": "Package tgz",
"zip": "Package zip",
"tbz": "Package tbz",
"rpm": "Package rpm",
"deb": "Package deb",
"flair":"FLAIR",
"fluka":"Input",
"inp": "Input",
"out": "Output",
"log": "Log",
"err": "Error",
"lis": "Listing",
"py": "Python",
"f": "Fortran",
"F": "Fortran",
"for": "Fortran",
"FOR": "Fortran",
"c": "C",
"C": "C",
"cc": "C++",
"cpp": "C++",
"a": "Lib a",
"so": "Lib so",
"eps": "Image eps",
"gif": "Image gif",
"jpg": "Image jpg",
"png": "Image png",
"bmp": "Image bmp",
"ps": "Image ps",
"tif": "Image tif",
"vxl": "Voxel",
"dcm": "Dicom",
_LINK_TYPE : _LINK_TYPE,
_BACKUP_TYPE : _BACKUP_TYPE,
_DIR_TYPE : _DIR_TYPE,
}
# Converted from GIF to base64 PhotoImage
# base64.encodestring(open(iconfile,"rb").read())
_ICON = """
R0lGODlhEAAQAMZcABAQECEQECEYEDEgITEwIUI4MVJJQmNJIWNZQmNhWvw/Afw+DnNhUvxADv9B
D3NlSoRpIZRxIYRxUvRbM4R5Y6V5IfxhHvpnGvlsAZSCY7WCEPFwM5SKc5yKa5SKhPJ4QMaKENaK
EMaSAJyShPqDHqWShNaSAOuDZdaSEP+BSaWahP+LPqWilLWihLWilOWWfueiIeeiMbWqlP+ZauKg
<KEY>
"""
_history = []
#-------------------------------------------------------------------------------
def append2History(path):
global _history
if not path: return
if path not in _history:
_history.append(path)
#-------------------------------------------------------------------------------
def fileTypeColor(filename):
fn = os.path.basename(filename)
dot = fn.rfind(".")
if dot>=0:
ext = fn[dot+1:].lower()
else:
ext = _FILE_TYPE
color = None # Default
try: s = os.lstat(filename)
except: return ext,color
mode = s[ST_MODE]
isdir = S_ISDIR(mode)
islnk = S_ISLNK(mode)
if isdir:
color = COLORS.get(_DIR_TYPE)
ext = DESCRIPTION.get(_DIR_TYPE)
elif islnk:
color = COLORS.get(_LINK_TYPE)
ext = DESCRIPTION.get(_LINK_TYPE)
else:
if fn[-1] == "~":
color = COLORS.get(_BACKUP_TYPE)
else:
color = COLORS.get(ext,color)
ext = DESCRIPTION.get(ext,ext)
return ext,color
#===============================================================================
# FileDialog
#===============================================================================
class FileDialog(Toplevel):
_active = False # Avoid re-entrance of the dialog if by accident
# someone double clicks a button
sort = None
width = -1
height = -1
sash = None
newfolder = None
# ----------------------------------------------------------------------
def __init__(self, title=None,
master=None,
initialdir=None,
initialfile=None,
defaultextension=None,
multiple=False,
filetypes=None,
**kw):
Toplevel.__init__(self)
self.transient(master)
self.title(title)
self.protocol("WM_DELETE_WINDOW", self.close)
FileDialog._active = True
if title is None: title = self._title
self.dirframe = Frame(self)
self.dirframe.pack(side=TOP, fill=X)
Label(self.dirframe, text=_("Directory:")).grid(
row=0, column=0)
self.downButton = Label(self.dirframe, text=u"\u25BC",
padx=3, pady=1, relief=RAISED)
self.downButton.bind("<Button-1>", self.history)
self.downButton.grid(row=0, column=99)
if FileDialog.newfolder is None:
FileDialog.newfolder = PhotoImage(data=_ICON)
Button(self.dirframe, image=FileDialog.newfolder,
padx=3, pady=3,
command=self.newFolder).grid(
row=0, column=100)
self.dirframe.grid_columnconfigure(98,weight=1)
self.buttons = []
self.multiple = multiple
if multiple:
selectmode = EXTENDED
else:
selectmode = BROWSE
self.fileList = tkExtra.ColorMultiListbox(self,
((_("Name"), 30, None),
(_("Type"), 12, None),
(_("Size"), 8, None),
(_("Date"), 17, None)),
height=20,
selectmode=selectmode)
self.fileList.pack(expand=YES, fill=BOTH)
self.fileList.setPopupMenu(
[('Rename', 0, self.rename),
('Delete', 0, self.delete),
('New Folder', 0, self.newFolder) ])
self.fileList.bindList("<Double-1>", self.double)
self.fileList.bindList('<Return>', self.double)
self.fileList.bindList('<F2>', self.rename)
self.fileList.bindList("<Key-BackSpace>", self.upDir)
self.fileList.bind("<<ListboxSelect>>", self.select)
self.fileList.bind("<<ListboxSort>>", self.sortChanged)
self.fileList.bind("<Configure>", self.resize)
frame = Frame(self)
frame.pack(side=BOTTOM, fill=X)
l = Label(frame, text=_("File name:"))
l.grid(row=0, column=0, sticky=E)
self.filename = Entry(frame, background="White")
self.filename.grid(row=0, column=1, sticky=EW)
self.ok = Button(frame, text=_("Open"), command=self.openFilename)
self.ok.grid(row=0, column=2, sticky=EW)
l = Label(frame, text=_("Files of type:"))
l.grid(row=1, column=0, sticky=E)
self.typeCombo = tkExtra.Combobox(frame, command=self.typeChange)
self.typeCombo.grid(row=1, column=1, sticky=NSEW)
self.filter = None
self.filetypes = {}
if filetypes:
if isinstance(filetypes[0],str):
filetypes = (filetypes,)
self.filter = filetypes[0]
for desc,ext in filetypes:
if isinstance(ext,str):
s = "%s (%s)"%(desc,ext)
ext = (ext,)
else:
s = "%s (%s)"%(desc, ",".join(ext))
self.typeCombo.insert(END, s)
self.filetypes[s] = ext
b = Button(frame, text=_("Cancel"), command=self.cancel)
b.grid(row=1, column=2, sticky=EW)
frame.grid_columnconfigure(1, weight=1)
self.bind("<Return>", self.openFilename)
self.bind("<Escape>", self.cancel)
# Variables
# 1st set the path if any
if initialdir:
self.path = os.path.abspath(initialdir)
else: # otherwise to the current directory
self.path = os.getcwd()
if initialfile:
# if a file is specified and has a different path
initialfile = os.path.abspath(initialfile)
d,n = os.path.split(initialfile)
if d != "": self.path = d
self.filename.insert(0, n)
self.filename.select_range(0,END)
# Flags
self.hidden = False # Show hidden files
self.links = True # Show links
self.dirs = True # Show directories
self.files = True # Show files
self.seldir = False # Select directory instead of file
self.selFile = "" # Selected files
append2History(self.path)
# popup history
self._popup = None
self._historyOldLen = len(_history)
# ----------------------------------------------------------------------
def show(self):
self.deiconify()
if FileDialog.width > 0:
self.geometry("%dx%d" \
%(FileDialog.width, FileDialog.height))
self.buttonPath(self.path)
self.typeCombo.set(self.typeCombo.get(0)) # will fill the files
try:
self.lift()
self.focus_set()
self.filename.focus_set()
self.wait_visibility()
self.grab_set()
self.wait_window()
except TclError:
pass
return self.selFile
# ----------------------------------------------------------------------
# Restore sash size on creation
# ----------------------------------------------------------------------
def resize(self, event):
if FileDialog.sash is not None:
FileDialog.sash.reverse()
# XXX XXX ERROR: paneframe doesn't update the width/height
# so all sash placement is wrong
self.fileList.paneframe.update()
#self.fileList.paneframe.update_idletasks()
if FileDialog.sash is not None:
n = len(FileDialog.sash)-1
for i,x in enumerate(FileDialog.sash):
self.fileList.paneframe.sash_place(n-i,x,1)
FileDialog.sash = None
# ----------------------------------------------------------------------
# Create buttons for the path
# ----------------------------------------------------------------------
def buttonPath(self, path):
path = path.split(os.sep)
if path[0] == "": path[0] = os.sep
if path[-1] == "": del path[-1]
lp = len(path)
lb = len(self.buttons)
i = 0
same = True
while i < min(lp, lb):
b = self.buttons[i]
if path[i] != b["text"]:
b["text"] = path[i]
same = False
i += 1
if lb < lp: # Create new buttons
while i < lp:
self.buttons.append(Button(self.dirframe,
text=path[i],
command=lambda s=self,b=i:s.button(b),
padx=1))
i += 1
self.buttons[-1].grid(row=0, column=i)
elif lp < lb and not same: # Use existing buttons
while i < lb:
self.buttons[i].grid_forget()
i += 1
del self.buttons[lp:]
for i in range(lp):
self.buttons[i]["foreground"] = "black"
self.buttons[i]["activeforeground"] = "black"
self.buttons[lp-1]["foreground"] = "blue"
self.buttons[lp-1]["activeforeground"] = "blue"
for i in range(lp, len(self.buttons)):
self.buttons[i]["foreground"] = "darkgray"
self.buttons[i]["activeforeground"] = "darkgray"
# ----------------------------------------------------------------------
def button(self, b):
path = [ x["text"] for x in self.buttons[0:b+1] ]
if path[0] == os.sep:
path = os.sep + os.sep.join(path[1:])
else:
path = os.sep.join(path)
if path=="": path=os.sep
self.fileList.focus_set()
self.changePath(path)
# ----------------------------------------------------------------------
def history(self, event=None):
if self._popup is not None:
self._historyDestroy()
return
self.downButton["relief"] = SUNKEN
self._popup = Toplevel(self)
self._popup.transient(self)
self._popup.overrideredirect(1)
self._popup.withdraw()
self._popup.bind('<Escape>', self._historyDestroy)
self._popup.bind('<FocusOut>', self._historyFocusOut)
x = self.buttons[0].winfo_rootx()
y = self.buttons[0].winfo_rooty() + self.buttons[0].winfo_height() - 2
w = self.downButton.winfo_rootx() + self.downButton.winfo_width() - x
h = self.fileList.winfo_height() - 20
self._popup.geometry('%dx%d+%d+%d' % (w,h,x,y))
sb = Scrollbar(self._popup, orient=VERTICAL, takefocus=False)
sb.pack(side=RIGHT, fill=Y)
self._popupList = Listbox(self._popup,
background="White",
selectmode=BROWSE,
takefocus=True,
yscrollcommand=sb.set)
self._popupList.pack(side=LEFT, fill=BOTH, expand=YES)
self._popupList.bind("<ButtonRelease-1>", self._historyClick)
self._popupList.bind("<Return>", self._historyClick)
sb.config(command=self._popupList.yview)
for h in sorted(_history):
self._popupList.insert(END, h)
self._popupList.selection_set(0)
self._popupList.activate(0)
self.grab_release()
self._popup.deiconify()
self._popup.lift()
self._popup.focus_set()
self._popupList.focus_set()
self._popup.update_idletasks()
# ----------------------------------------------------------------------
def _historyFocusOut(self, event):
try:
f = self.focus_get()
except KeyError:
pass
else:
if f == self._popup or f == self._popupList:
return
self._historyDestroy()
# ----------------------------------------------------------------------
def _historyClick(self, event):
try:
sel = self._popupList.curselection()[0]
self.changePath(self._popupList.get(sel))
self._historyDestroy()
except:
pass
# ----------------------------------------------------------------------
def _historyDestroy(self, event=None):
self.downButton["relief"] = RAISED
self._popup.destroy()
self._popup = None
self.grab_set()
self.focus_set()
return "break"
# ----------------------------------------------------------------------
def upDir(self, event):
if not tkExtra.ExListbox._search:
self.changePath(os.path.dirname(self.path))
else:
event.widget.handleKey(event)
# ----------------------------------------------------------------------
def changePath(self, path):
if path[-1] != os.sep: path += os.sep
path = os.path.abspath(path)
try: os.lstat(path)
except OSError:
messagebox.showerror(_("Error"),
_("Cannot access path \"%s\"")%(path),
parent=self)
return
self.buttonPath(path)
self.path = path
self.fill()
append2History(self.path)
# ----------------------------------------------------------------------
def fill(self, path=None):
self.fileList.delete(0,END)
self.fileList.listbox(0).resetSearch()
if path is None: path = self.path
# Populate list but sorted
try:
for fn in os.listdir(path):
if not self.hidden and fn[0]==".": continue
filename = os.path.join(path, fn)
ext, color = fileTypeColor(filename)
try: s = os.lstat(filename)
except: continue
size = 0
islnk = S_ISLNK(s[ST_MODE])
if islnk:
try: s = os.stat(filename)
except: continue
isdir = S_ISDIR(s[ST_MODE])
if self.filter is not None and not isdir and not islnk:
match = False
for pat in self.filter:
if fnmatch.fnmatch(fn, pat):
match = True
break
else:
match = True
if isdir:
if not self.dirs: continue
elif islnk:
if not self.links: continue
else:
size = s[ST_SIZE]
if match:
self.fileList.insert(END, (fn, ext, size,
time.strftime(_TIME_FORMAT,
time.localtime(s[ST_MTIME]))))
if not self.files and not isdir:
self.fileList.setColor(END, DISABLE_FILE)
elif color:
self.fileList.setColor(END, color)
except OSError:
messagebox.showerror(_("Error"),
_("Error listing folder \"%s\"")%(path),
parent=self)
if FileDialog.sort is None:
self.fileList.sort(0, False) # First short by name
# Move all directories to top
self.fileList.sort(1, False) # then by type
FileDialog.sort = None
else:
self.fileList.restoreSort(FileDialog.sort)
# Find item to select
fn = self.filename.get()
if self.seldir: fn = os.path.basename(fn)
for i in range(self.fileList.size()):
if fn == self.fileList.listbox(0).get(i):
self.fileList.see(i)
self.fileList.activate(i)
self.fileList.selection_set(i)
break
else:
self.fileList.see(0)
self.fileList.activate(0)
self.fileList.selection_set(0)
# ----------------------------------------------------------------------
def sortChanged(self, event=None):
FileDialog.sort = self.fileList.saveSort()
# ----------------------------------------------------------------------
def open(self, fn):
if self.seldir and self.path == fn:
self.selFile = self.path
# Single file selection
elif fn.find('","')<0:
# Check for path
try:
filename = os.path.join(self.path, fn)
s = os.stat(filename)
if S_ISDIR(s[ST_MODE]):
self.changePath(filename)
self.filename.delete(0,END)
return
except OSError:
pass
# Check for a pattern
if fn.find('*')>=0 or fn.find('?')>=0:
self.filter = (fn,)
self.fill()
return
# Check for extension
if self.filter:
fn,ext = os.path.splitext(filename)
if ext == "":
ffn,ffext = os.path.splitext(self.filter[0])
if ffext!="":
filename = fn+ffext
if self.multiple:
self.selFile = [filename]
else:
self.selFile = filename
# Multiple file selection
else:
self.selFile = [os.path.join(self.path,f) \
for f in fn[1:-1].split('","')]
if self.check():
global _history
# Delete all temporary directories and keep only the last one
if len(_history) > self._historyOldLen:
del _history[self._historyOldLen:]
append2History(self.path)
self.close()
# ----------------------------------------------------------------------
# Open the filename entered in the entry box
# ----------------------------------------------------------------------
def openFilename(self, event=None):
fn = self.filename.get()
# Single file selection?
if fn=="":
self.select()
fn = self.filename.get()
if fn=="": return
self.open(fn)
# ----------------------------------------------------------------------
def check(self): return True
# ----------------------------------------------------------------------
def cancel(self, event=None):
if self._popup is not None:
self._historyDestroy()
else:
self.selFile = ""
self.close()
# ----------------------------------------------------------------------
def close(self):
FileDialog._active = False
FileDialog.width = self.winfo_width()
FileDialog.height = self.winfo_height()
FileDialog.sash = [self.fileList.paneframe.sash_coord(i)[0]
for i in range(len(self.fileList.listboxes())-1)]
tkExtra.ExListbox.resetSearch()
self.grab_release()
self.destroy()
# ----------------------------------------------------------------------
def double(self, event):
sel = self.fileList.curselection()
if len(sel)!=1: return
item = self.fileList.get(sel[0])
if item[1] == _DIR_TYPE:
self.changePath(os.path.join(self.path, item[0]))
return "break"
elif item[1] == _LINK_TYPE:
# maybe a directory?
path = os.path.join(self.path, item[0])
try:
s = os.stat(path)
if S_ISDIR(s[ST_MODE]):
self.changePath(path)
return "break"
except: pass
self.openFilename()
# ----------------------------------------------------------------------
# Select current file from listbox
# ----------------------------------------------------------------------
def select(self, event=None):
sel = self.fileList.curselection()
if len(sel)==1:
item = self.fileList.get(sel[0])[0]
fn = os.path.join(self.path, item)
if self.seldir:
try:
s = os.stat(fn)
if not S_ISDIR(s[ST_MODE]):
fn = os.path.dirname(fn)
except OSError:
pass
self.filename.delete(0, END)
self.filename.insert(0, fn)
else:
try:
s = os.stat(fn)
if not S_ISDIR(s[ST_MODE]):
self.filename.delete(0, END)
self.filename.insert(0, item)
except OSError:
pass
else:
lget = self.fileList.get
files = ["\"%s\""%(lget(i)[0]) for i in sel]
if files:
self.filename.delete(0, END)
self.filename.insert(0, ",".join(files))
elif self.seldir:
self.filename.delete(0, END)
self.filename.insert(0, self.path)
# ----------------------------------------------------------------------
def typeChange(self, event=None):
pat = self.typeCombo.get()
self.filter = self.filetypes.get(pat,None)
self.fill()
if self.filter is None or self.seldir: return
# Change extension if needed
first = None
filename = self.filename.get()
if filename == "" or "," in filename: return
fn,ext = os.path.splitext(filename)
for i in self.filter:
f,e = os.path.splitext(i)
if first is None and e: first = e
if e == ext: return
else:
if first:
# not found, change the filename to the first extension
self.filename.delete(0, END)
self.filename.insert(0, fn+first)
# ----------------------------------------------------------------------
def newFolder(self):
self.fileList.insert(END, (_("NewFolder"), _DIR_TYPE, 0,
time.strftime(_TIME_FORMAT,
time.localtime(time.time()))))
self.fileList.see(END)
self.fileList.selection_clear(0,END)
self.fileList.selection_set(END)
self.fileList.activate(END)
edit = tkExtra.InPlaceEdit(self.fileList.listbox(0))
if edit.value:
try:
os.mkdir(os.path.join(self.path, edit.value))
except OSError:
messagebox.showerror(_("Error"),
_("Error creating folder \"%s\"")%(edit.value),
parent=self)
self.fileList.delete(END)
return
self.fileList.selection_set(END)
self.fileList.see(END)
self.fileList.setColor(END, COLORS.get(_DIR_TYPE))
self.select()
else:
try:
self.fileList.delete(END)
except TclError:
pass
# ----------------------------------------------------------------------
def rename(self,event=None):
fn = self.fileList.listbox(0).get(ACTIVE)
edit = tkExtra.InPlaceEdit(self.fileList.listbox(0))
if edit.value and edit.value != fn:
try:
os.rename(os.path.join(self.path, fn),
os.path.join(self.path, edit.value))
except OSError:
messagebox.showerror(_("Error"),
_("Error renaming \"%s\" to \"%s\"") \
%(fn, edit.value),
parent=self)
self.select()
# ----------------------------------------------------------------------
def delete(self):
sel = map(int,self.fileList.curselection())
sel.reverse()
if not sel: return
try:
for i in sel:
fn = self.fileList.listbox(0).get(i)
filename = os.path.join(self.path, fn)
s = os.lstat(filename)
if S_ISDIR(s[ST_MODE]):
os.rmdir(filename)
else:
os.remove(filename)
self.fileList.delete(i)
except OSError:
messagebox.showerror(_("Error"),
_("Error deleting file \"%s\"")%(fn),
parent=self)
self.select()
#===============================================================================
class OpenDialog(FileDialog):
_title = _("Open")
# ----------------------------------------------------------------------
# Check | |
<filename>UnoInsp/src/unoinsp.py
#!/opt/libreoffice5.2/program/python
# -*- coding: utf-8 -*-
import gettext
import os
import sys
if sys.platform.startswith('win'): # Windowsの場合。
import locale
if os.getenv('LANG') is None: # 環境変数LANGがない場合
lang, enc = locale.getdefaultlocale() # これで日本語の場合('ja_JP', 'cp932')が返る。
os.environ['LANG'] = lang # LANGにja_JPを代入。
lodir = os.path.join(os.path.abspath(os.path.dirname(__file__)),"locale") # このスクリプトと同じファルダにあるlocaleフォルダの絶対パスを取得。
t = gettext.translation("unoinsp",lodir,fallback=True) # Translations インスタンスを取得。
_ = t.gettext # _にt.gettext関数を代入。
import re #正規表現モジュール。
import platform # OS名の取得に使用。
from com.sun.star.uno.TypeClass import SERVICE, INTERFACE, PROPERTY, INTERFACE_METHOD, INTERFACE_ATTRIBUTE
from com.sun.star.beans import PropertyValue
CSS = "com.sun.star" # IDL名の先頭から省略する部分。
REG_IDL = re.compile(r'(?<!\w)\.[\w\.]+') # IDL名を抽出する正規表現オブジェクト。
REG_I = re.compile(r'(?<!\w)\.[\w\.]+\.X[\w]+') # インターフェイス名を抽出する正規表現オブジェクト。
REG_E = re.compile(r'(?<!\w)\.[\w\.]+\.[\w]+Exception') # 例外名を抽出する正規表現オブジェクト。
ST_OMI = {'.uno.XInterface', '.lang.XTypeProvider', '.lang.XServiceInfo', '.uno.XWeak', '.lang.XComponent', '.lang.XInitialization', '.lang.XMain', '.uno.XAggregation', '.lang.XUnoTunnel'} # 結果を出力しないインターフェイス名の集合の初期値。
LST_KEY = ["SERVICE", "INTERFACE", "PROPERTY", "INTERFACE_METHOD", "INTERFACE_ATTRIBUTE"] # dic_fnのキーのリスト。
class ObjInsp: # XSCRIPTCONTEXTを引数にしてインスタンス化する。第二引数をTrueにするとローカルのAPIリファレンスへのリンクになる。
def __init__(self, XSCRIPTCONTEXT, offline=False):
ctx = XSCRIPTCONTEXT.getComponentContext() # コンポーネントコンテクストを取得。
self.st_omi = set() # 出力を抑制するインターフェイスの集合。
self.stack = list() # スタック。
self.lst_output = list() # 出力行を収納するリスト。
self.dic_fn = dict() # 出力方法を決める関数を入れる辞書。
self.prefix = "http://api.libreoffice.org/docs/idl/ref/" if not offline else "file://" + get_path(ctx) + "/sdk/docs/idl/ref/" # offlineがTrueのときはローカルのAPIリファレンスへのリンクを張る。
self.tdm = ctx.getByName('/singletons/com.sun.star.reflection.theTypeDescriptionManager') # TypeDescriptionManagerをシングルトンでインスタンス化。
def tree(self, obj, lst_supr=None): # 修飾無しでprint()で出力。PyCharmでの使用を想定。
self._init(lst_supr) # 初期化関数
self.dic_fn = dict(zip(LST_KEY, [self.lst_output.append for i in range(len(LST_KEY))])) # すべてself.lst_output.appendする。
if isinstance(obj, str): # objが文字列(IDL名)のとき
self._ext_desc_idl(obj)
else: # objが文字列以外の時
self._ext_desc(obj)
self._removeBranch(" ") # 不要な枝を削除。
print("\n".join(self.lst_output))
def itree(self, obj, lst_supr=None): # アンカータグをつけて出力。IPython Notebookでの使用を想定
self._init(lst_supr) # 初期化関数
self._output_setting() # IDL名にリンクをつけて出力するための設定。
self.lst_output.append("<tt>") # 等幅フォントのタグを指定。
if isinstance(obj, str): # objが文字列(IDL名)のとき
self._ext_desc_idl(obj)
else: # objが文字列以外の時
self._ext_desc(obj)
self._removeBranch(" ") # 不要な枝を削除。
self.lst_output.append("</tt>") # 等速フォントのタグを閉じる。
from IPython.display import display, HTML # IPython Notebook用
display(HTML("<br/>".join(self.lst_output))) # IPython Notebookに出力。
def wtree(self, obj, lst_supr=None): # ウェブブラウザの新たなタブに出力。マクロやPyCharmでの使用を想定。
self._init(lst_supr) # 初期化関数
self._output_setting() # IDL名にリンクをつけて出力するための設定。
self.lst_output.append('<!DOCTYPE html><html><head><meta http-equiv="content-language" content="ja"><meta charset="UTF-8"></head><body><tt>') # 出力行を収納するリストを初期化。等幅フォントのタグを指定。
if isinstance(obj, str): # objが文字列(IDL名)のとき
self._ext_desc_idl(obj)
else:
self._ext_desc(obj)
self._removeBranch(" ") # 不要な枝を削除。
self.lst_output = [i + "<br>" for i in self.lst_output] # リストの各要素の最後に改行タグを追加する。
self.lst_output.append("</tt></body></html>") # 等速フォントのタグを閉じる。
with open('workfile.html', 'w', encoding='UTF-8') as f: # htmlファイルをUTF-8で作成。すでにあるときは上書き。
f.writelines(self.lst_output) # シークエンスデータをファイルに書き出し。
import webbrowser
webbrowser.open_new_tab(f.name) # デフォルトのブラウザの新しいタブでhtmlファイルを開く。
def _removeBranch(self,s): # 不要な枝を削除。引数は半角スペース。" "か" "
i = None; # 縦棒の位置を初期化。
n = len(self.lst_output) # 出力行数を取得。
for j in reversed(range(n)): # 出力行を下からみていく。
line = self.lst_output[j] # 行の内容を取得。
if j == n - 1 : # 最終行のとき
if "│" in line: # 本来あってはならない縦棒があるとき。
i = line.find("│") # 縦棒の位置を取得。
self._replaceBar(j, line, s*2) # 不要な縦棒を空白に置換。
else:
break # 最終行に縦棒がなければループを出る。
else:
if "│" in line[i]: # 消去するべき縦棒があるとき
self._replaceBar(j, line, s*2) # 不要な縦棒を空白に置換。
else: # 縦棒が途切れたとき
line = line.replace("├─","└─",1) # 下向きの分岐を消す。
self.lst_output[j] = line
break
def _replaceBar(self, j, line, ss): # 不要な縦棒を空白に置換。
line = line.replace("│",ss,1)
self.lst_output[j] = line
def _init(self, lst_supr): # 初期化関数。出力を抑制するインターフェイス名のリストを引数とする。
# self.st_omi = ST_OMI.copy() # 結果を出力しないインターフェイス名の集合の初期化。
self.lst_output = list() # 出力行を収納するリストを初期化。
if lst_supr: # 第2引数があるとき
if isinstance(lst_supr, list): # lst_suprがリストのとき
st_supr = set([i.replace(CSS, "") for i in lst_supr]) # "com.sun.star"を省いて集合に変換。
if "core" in st_supr: # coreというキーワードがあるときはST_OMIの要素に置換する。
st_supr.remove("core")
st_supr.update(ST_OMI)
# self.st_omi = st_supr.symmetric_difference(ST_OMI) # デフォルトでcoreインターフェースを出力しないとき。lst_suprとST_OMIに共通しない要素を取得。
self.st_omi = st_supr # デフォルトですべて出力するとき。
else: # 引数がリスト以外のとき
self.lst_output.append(_("The second argument should be specified as a list of IDL interface names.")) # 第2引数はIDLインターフェイス名のリストで指定してください。
self.stack = list() # スタックを初期化。
def _output_setting(self): # IDL名にリンクをつけて出力するための設定。
self.dic_fn = dict(zip(LST_KEY, [self._fn for i in range(len(LST_KEY))])) # 一旦すべての値をself._fnにする。
self.dic_fn["SERVICE"] = self._fn_s # サービス名を出力するときの関数を指定。
self.dic_fn["INTERFACE"] = self._fn_i # インターフェイス名を出力するときの関数を指定。
def _fn_s(self, item_with_branch): # サービス名にアンカータグをつける。
self._make_link("service", REG_IDL, item_with_branch)
def _fn_i(self, item_with_branch): # インターフェイス名にアンカータグをつける。
self._make_link("interface", REG_I, item_with_branch)
def _make_link(self, typ, regex, item_with_branch):
idl = regex.findall(item_with_branch) # 正規表現でIDL名を抽出する。
if idl:
lnk = "<a href='" + self.prefix + typ + "com_1_1sun_1_1star" + idl[0].replace(".", "_1_1") + ".html' target='_blank'>" + idl[0] + "</a>" # サービス名のアンカータグを作成。
self.lst_output.append(item_with_branch.replace(" ", " ").replace(idl[0], lnk)) # 半角スペースを置換後にサービス名をアンカータグに置換。
else:
self.lst_output.append(item_with_branch.replace(" ", " ")) # 半角スペースを置換。
def _fn(self, item_with_branch): # サービス名とインターフェイスを出力するときの関数。
idl = set(REG_IDL.findall(item_with_branch)) # 正規表現でIDL名を抽出する。
inf = REG_I.findall(item_with_branch) # 正規表現でインターフェイス名を抽出する。
exc = REG_E.findall(item_with_branch) # 正規表現で例外名を抽出する。
idl.difference_update(inf, exc) # IDL名のうちインターフェイス名と例外名を除く。
idl = list(idl) # 残ったIDL名はすべてStructと考えて処理する。
item_with_branch = item_with_branch.replace(" ", " ") # まず半角スペースをHTMLに置換する。
for i in inf: # インターフェイス名があるとき。
item_with_branch = self._make_anchor("interface", i, item_with_branch)
for i in exc: # 例外名があるとき。
item_with_branch = self._make_anchor("exception", i, item_with_branch)
for i in idl: # インターフェイス名と例外名以外について。
item_with_branch = self._make_anchor("struct", i, item_with_branch)
self.lst_output.append(item_with_branch)
def _make_anchor(self, typ, i, item_with_branch):
lnk = "<a href='" + self.prefix + typ + "com_1_1sun_1_1star" + i.replace(".", "_1_1") + ".html' target='_blank' style='text-decoration:none;'>" + i + "</a>" # 下線はつけない。
return item_with_branch.replace(i, lnk)
def _ext_desc(self, obj, flag=False): # オブジェクトがサポートするIDLから末裔を抽出する。flagはオブジェクトが直接インターフェイスをもっているときにTrueになるフラグ。
self.lst_output.append("pyuno object") # treeの根に表示させるもの。
if hasattr(obj, "getSupportedServiceNames"): # オブジェクトがサービスを持っているとき。
flag = True if hasattr(obj, "getTypes") else False # サービスを介さないインターフェイスがあるときフラグを立てる。
st_ss = set([i for i in obj.getSupportedServiceNames() if self._idl_check(i)]) # サポートサービス名一覧からTypeDescriptionオブジェクトを取得できないサービス名を除いた集合を得る。
st_sups = set() # 親サービスを入れる集合。
if len(st_ss) > 1: # サポートしているサービス名が複数ある場合。
self.stack = [self.tdm.getByHierarchicalName(i) for i in st_ss] # サポートサービスのTypeDescriptionオブジェクトをスタックに取得。
while self.stack: # スタックがある間実行。
j = self.stack.pop() # サービスのTypeDescriptionオブジェクトを取得。
t_std = j.getMandatoryServices() + j.getOptionalServices() # 親サービスのタプルを取得。
lst_std = [i for i in t_std if not i.Name in st_sups] # 親サービスのTypeDescriptionオブジェクトのうち既に取得した親サービスにないものだけを取得。
self.stack.extend(lst_std) # スタックに新たなサービスのTypeDescriptionオブジェクトのみ追加。
st_sups.update([i.Name for i in lst_std]) # 既に取得した親サービス名の集合型に新たに取得したサービス名を追加。
st_ss.difference_update(st_sups) # オブジェクトのサポートサービスのうち親サービスにないものだけにする=これがサービスの末裔。
self.stack = [self.tdm.getByHierarchicalName(i) for i in st_ss] # TypeDescriptionオブジェクトに変換。
if self.stack: self.stack.sort(key=lambda x: x.Name, reverse=True) # Name属性で降順に並べる。
self._make_tree(flag)
if hasattr(obj, "getTypes"): # サポートしているインターフェイスがある場合。
flag = False
st_si = set([i.typeName.replace(CSS, "") for i in obj.getTypes()]) # サポートインターフェイス名を集合型で取得。
lst_si = sorted(list(st_si.difference(self.st_omi)), reverse=True) # 除外するインターフェイス名を除いて降順のリストにする。
self.stack = [self.tdm.getByHierarchicalName(i if not i[0] == "." else CSS + i) for i in lst_si] # TypeDescriptionオブジェクトに変換。CSSが必要。
self._make_tree(flag)
if not (hasattr(obj, "getSupportedServiceNames") or hasattr(obj, "getTypes")): # サポートするサービスやインターフェイスがないとき。
self.lst_output.append(_("There is no service or interface to support.")) # サポートするサービスやインターフェイスがありません。
def _ext_desc_idl(self, idl): # objがIDL名のとき。
if idl[0] == ".": # 先頭が.で始まっているとき
idl = CSS + idl # com.sun.starが省略されていると考えて、com.sun.starを追加する。
j = self._idl_check(idl) # IDL名からTypeDescriptionオブジェクトを取得。
if j:
typcls = j.getTypeClass() # jのタイプクラスを取得。
if typcls == INTERFACE or typcls == SERVICE: # jがサービスかインターフェイスのとき。
self.lst_output.append(idl) # treeの根にIDL名を表示
self.stack = [j] # TypeDescriptionオブジェクトをスタックに取得
self._make_tree(flag=False)
else: # サービスかインターフェイス以外のときは未対応。
self.lst_output.append(idl + _(" is not a service name or an interface name, so it is not supported yet.")) # はサービス名またはインターフェイス名ではないので未対応です。
else: # TypeDescriptionオブジェクトを取得できなかったとき。
self.lst_output.append(idl + _(" is not an IDL name.")) # はIDL名ではありません。
def _idl_check(self, idl): # IDL名からTypeDescriptionオブジェクトを取得。
try:
j = self.tdm.getByHierarchicalName(idl) # IDL名からTypeDescriptionオブジェクトを取得。
except:
j = None
return j
def _make_tree(self, flag): # 末裔から祖先を得て木を出力する。flagはオブジェクトが直接インターフェイスをもっているときにTrueになるフラグ。
if self.stack: # 起点となるサービスかインターフェイスがあるとき。
lst_level = [1 for i in self.stack] # self.stackの要素すべてについて階層を取得。
indent = " " # インデントを設定。
m = 0 # 最大文字数を初期化。
inout_dic = {(True, False): "[in]", (False, True): "[out]", (True, True): "[inout]"} # メソッドの引数のinout変換辞書。
t_itd = tuple() # インターフェイスのTypeDescriptionオブジェクトの入れ物を初期化。
t_md = tuple() # メソッドのTypeDescriptionオブジェクトの入れ物を初期化。
t_spd = tuple() # サービス属性のTypeDescriptionオブジェクトの入れ物を初期化。
while self.stack: # スタックがある間実行。
j = self.stack.pop() # スタックからTypeDescriptionオブジェクトをpop。
level = lst_level.pop() # jの階層を取得。
typcls = j.getTypeClass() # jのタイプクラスを取得。
branch = ["", ""] # 枝をリセット。jがサービスまたはインターフェイスのときjに直接つながる枝は1番の要素に入れる。それより左の枝は0番の要素に加える。
if level > 1: # 階層が2以上のとき。
p = 1 # 処理開始する階層を設定。
if flag: # サービスを介さないインターフェイスがあるとき
branch[0] = "│ " # 階層1に立枝をつける。
p = 2 # 階層2から処理する。
for i in range(p, level): # 階層iから出た枝が次行の階層i-1の枝になる。
branch[0] += "│ " if i in lst_level else indent # iは枝の階層ではなく、枝のより上の行にあるその枝がでた階層になる。
if typcls == INTERFACE or typcls == SERVICE: # jがサービスかインターフェイスのとき。
if level == 1 and flag: # 階層1かつサービスを介さないインターフェイスがあるとき
branch[1] = "├─" # 階層1のときは下につづく分岐をつける。
else:
branch[1] = "├─" if level in lst_level else "└─" # スタックに同じ階層があるときは"├─" 。
else: # jがインターフェイスかサービス以外のとき。
branch[1] = indent # 横枝は出さない。
if level in lst_level: # スタックに同じ階層があるとき。
typcls2 = self.stack[lst_level.index(level)].getTypeClass() # スタックにある同じ階層のものの先頭の要素のTypeClassを取得。
if typcls2 == INTERFACE or typcls2 == SERVICE: branch[1] = "│ " # サービスかインターフェイスのとき。横枝だったのを縦枝に書き換える。
if typcls == INTERFACE_METHOD: # jがメソッドのとき。
typ = j.ReturnType.Name.replace(CSS, "") # 戻り値の型を取得。
if typ[1] == "]": typ = typ.replace("]", "") + "]" # 属性がシークエンスのとき[]の表記を修正。
stack2 = list(j.Parameters)[::-1] # メソッドの引数について逆順(降順ではない)にスタック2に取得。
if not stack2: # 引数がないとき。
branch.append(typ.rjust(m) + " " + j.MemberName.replace(CSS, "") + "()") # 「戻り値の型(固定幅mで右寄せ) メソッド名()」をbranchの3番の要素に取得。
self.dic_fn["INTERFACE_METHOD"]("".join(branch)) # 枝をつけてメソッドを出力。
else: # 引数があるとき。
m3 = max([len(i.Type.Name.replace(CSS, "")) for i in stack2]) # 引数の型の最大文字数を取得。
k = stack2.pop() # 先頭の引数を取得。
inout = inout_dic[(k.isIn(), k.isOut())] # 引数の[in]の判定、[out]の判定
typ2 = k.Type.Name.replace(CSS, "") # 引数の型を取得。
if typ2[1] == "]": typ2 = typ2.replace("]", "") + "]" # 引数の型がシークエンスのとき[]の表記を修正。
branch.append(typ.rjust(m) + " " + j.MemberName.replace(CSS, "") + "( " + inout + " " + typ2.rjust(m3) + " " + k.Name.replace(CSS, "")) # 「戻り値の型(固定幅で右寄せ) メソッド名(inout判定 引数の型(固定幅m3で左寄せ) 引数名」をbranchの3番の要素に取得。
m2 = len(typ.rjust(m) + " " + j.MemberName.replace(CSS, "") + "( ") # メソッドの引数の部分をインデントする文字数を取得。
if stack2: # 引数が複数あるとき。
branch.append(",") # branchの4番の要素に「,」を取得。
self.dic_fn["INTERFACE_METHOD"]("".join(branch)) # 枝をつけてメソッド名とその0番の引数を出力。
del branch[2:] # branchの2番以上の要素は破棄する。
while stack2: # 1番以降の引数があるとき。
k = stack2.pop()
inout = inout_dic[(k.isIn(), k.isOut())] # 引数の[in]の判定、[out]の判定
typ2 = k.Type.Name.replace(CSS, "") # 引数の型を取得。
| |
<filename>ai4water/postprocessing/SeqMetrics/_regression.py<gh_stars>10-100
import warnings
from math import sqrt
from typing import Union
from scipy.stats import gmean, kendalltau
import numpy as np
from .utils import _geometric_mean, _mean_tweedie_deviance, _foo, list_subclass_methods
from ._SeqMetrics import Metrics, EPS
class RegressionMetrics(Metrics):
"""
Calculates more than 100 regression performance metrics related to sequence data.
Example
---------
```python
>>>import numpy as np
>>>from ai4water.postprocessing.SeqMetrics import RegressionMetrics
>>>t = np.random.random(10)
>>>p = np.random.random(10)
>>>errors = RegressionMetrics(t,p)
>>>all_errors = errors.calculate_all()
```
"""
def __init__(self, *args, **kwargs):
"""
Initializes `Metrics`.
args and kwargs go to parent class 'Metrics'.
"""
super().__init__(*args, **kwargs)
self.all_methods = list_subclass_methods(RegressionMetrics, True,
additional_ignores=['calculate_hydro_metrics',
#'calculate_scale_dependent_metrics',
#'calculate_scale_independent_metrics'
])
# if arrays contain negative values, following three errors can not be computed
for array in [self.true, self.predicted]:
assert len(array) > 0, "Input arrays should not be empty"
if len(array[array < 0.0]) > 0:
self.all_methods = [m for m in self.all_methods if m not in ('mean_gamma_deviance',
'mean_poisson_deviance',
'mean_square_log_error')]
if (array <= 0).any(): # mean tweedie error is not computable
self.all_methods = [m for m in self.all_methods if m not in ('mean_gamma_deviance',
'mean_poisson_deviance')]
def _hydro_metrics(self) -> list:
"""Names of metrics related to hydrology"""
return self._minimal() + [
'fdc_flv', 'fdc_fhv',
'kge', 'kge_np', 'kge_mod', 'kge_bound', 'kgeprime_c2m', 'kgenp_bound',
'nse', 'nse_alpha', 'nse_beta', 'nse_mod', 'nse_bound']
@staticmethod
def _scale_independent_metrics() -> list:
"""Names of scale independent metrics."""
return ['mape', 'r2', 'nse']
@staticmethod
def _scale_dependent_metrics() -> list:
"""Names of scale dependent metrics."""
return ['mse', 'rmse', 'mae']
@staticmethod
def _minimal() -> list:
"""some minimal and basic metrics"""
return ['r2', 'mape', 'nrmse', 'corr_coeff', 'rmse', 'mae', 'mse', 'mpe', 'mase']
def abs_pbias(self) -> float:
"""Absolute Percent bias"""
_apb = 100.0 * sum(abs(self.predicted - self.true)) / sum(self.true) # Absolute percent bias
return float(_apb)
def acc(self) -> float:
"""Anomaly correction coefficient.
Reference:
[Langland et al., 2012](https://doi.org/10.3402/tellusa.v64i0.17531).
Miyakoda et al., 1972. Murphy et al., 1989."""
a = self.predicted - np.mean(self.predicted)
b = self.true - np.mean(self.true)
c = np.std(self.true, ddof=1) * np.std(self.predicted, ddof=1) * self.predicted.size
return float(np.dot(a, b / c))
def adjusted_r2(self) -> float:
"""Adjusted R squared."""
k = 1
n = len(self.predicted)
adj_r = 1 - ((1 - self.r2()) * (n - 1)) / (n - k - 1)
return float(adj_r)
def agreement_index(self) -> float:
"""
Agreement Index (d) developed by [Willmott, 1981](https://doi.org/10.1080/02723646.1981.10642213).
It detects additive and pro-portional differences in the observed and
simulated means and vari-ances [Moriasi et al., 2015](https://doi.org/10.13031/trans.58.10715).
It is overly sensitive to extreme values due to the squared
differences [2]. It can also be used as a substitute for R2 to
identify the degree to which model predic-tions
are error-free [2].
.. math::
d = 1 - \\frac{\\sum_{i=1}^{N}(e_{i} - s_{i})^2}{\\sum_{i=1}^{N}(\\left | s_{i} - \\bar{e}
\\right | + \\left | e_{i} - \\bar{e} \\right |)^2}
[2] Legates and McCabe, 199
"""
agreement_index = 1 - (np.sum((self.true - self.predicted) ** 2)) / (np.sum(
(np.abs(self.predicted - np.mean(self.true)) + np.abs(self.true - np.mean(self.true))) ** 2))
return float(agreement_index)
def aic(self, p=1) -> float:
"""
[Akaike’s Information Criterion](https://doi.org/10.1007/978-1-4612-1694-0_15)
Modifying from https://github.com/UBC-MDS/RegscorePy/blob/master/RegscorePy/aic.py
"""
assert p > 0
self.assert_greater_than_one # noac
n = len(self.true)
resid = np.subtract(self.predicted, self.true)
rss = np.sum(np.power(resid, 2))
return float(n * np.log(rss / n) + 2 * p)
def aitchison(self, center='mean') -> float:
"""Aitchison distance. used in [Zhang et al., 2020](https://doi.org/10.5194/hess-24-2505-2020)"""
lx = np.log(self.true)
ly = np.log(self.predicted)
if center.upper() == 'MEAN':
m = np.mean
elif center.upper() == 'MEDIAN':
m = np.median
else:
raise ValueError
clr_x = lx - m(lx)
clr_y = ly - m(ly)
d = (sum((clr_x - clr_y) ** 2)) ** 0.5
return float(d)
def amemiya_adj_r2(self) -> float:
"""Amemiya’s Adjusted R-squared"""
k = 1
n = len(self.predicted)
adj_r = 1 - ((1 - self.r2()) * (n + k)) / (n - k - 1)
return float(adj_r)
def amemiya_pred_criterion(self) -> float:
"""Amemiya’s Prediction Criterion"""
k = 1
n = len(self.predicted)
return float(((n + k) / (n - k)) * ( 1 /n) * self.sse())
def bias(self) -> float:
"""
Bias as shown in https://doi.org/10.1029/97WR03495 and given by
[Gupta et al., 1998](https://doi.org/10.1080/02626667.2018.1552002
.. math::
Bias=\\frac{1}{N}\\sum_{i=1}^{N}(e_{i}-s_{i})
"""
bias = np.nansum(self.true - self.predicted) / len(self.true)
return float(bias)
def bic(self, p=1) -> float:
"""
Bayesian Information Criterion
Minimising the BIC is intended to give the best model. The
model chosen by the BIC is either the same as that chosen by the AIC, or one
with fewer terms. This is because the BIC penalises the number of parameters
more heavily than the AIC [1].
Modified after https://github.com/UBC-MDS/RegscorePy/blob/master/RegscorePy/bic.py
[1]: https://otexts.com/fpp2/selecting-predictors.html#schwarzs-bayesian-information-criterion
"""
assert p >= 0
n = len(self.true)
return float(n * np.log(self.sse() / n) + p * np.log(n))
def brier_score(self) -> float:
"""
Adopted from https://github.com/PeterRochford/SkillMetrics/blob/master/skill_metrics/brier_score.py
Calculates the Brier score (BS), a measure of the mean-square error of
probability forecasts for a dichotomous (two-category) event, such as
the occurrence/non-occurrence of precipitation. The score is calculated
using the formula:
BS = sum_(n=1)^N (f_n - o_n)^2/N
where f is the forecast probabilities, o is the observed probabilities
(0 or 1), and N is the total number of values in f & o. Note that f & o
must have the same number of values, and those values must be in the
range [0,1].
https://data.library.virginia.edu/a-brief-on-brier-scores/
Output:
BS : Brier score
Reference:
<NAME>, 1950: Verification of forecasts expressed in terms
of probabilities. Mon. We. Rev., 78, 1-23.
<NAME>, 1995: Statistical Methods in the Atmospheric Sciences.
Cambridge Press. 547 pp.
"""
# Check for valid values
index = np.where(np.logical_or(self.predicted < 0, self.predicted > 1))
if np.sum(index) > 0:
msg = 'Forecast has values outside interval [0,1].'
raise ValueError(msg)
index = np.where(np.logical_and(self.true != 0, self.true != 1))
if np.sum(index) > 0:
msg = 'Observed has values not equal to 0 or 1.'
raise ValueError(msg)
# Calculate score
bs = np.sum(np.square(self.predicted - self.true)) / len(self.predicted)
return bs
def corr_coeff(self) -> float:
"""
Correlation Coefficient
.. math::
r = \\frac{\\sum ^n _{i=1}(e_i - \\bar{e})(s_i - \\bar{s})}{\\sqrt{\\sum ^n _{i=1}(e_i - \\bar{e})^2}
\\sqrt{\\sum ^n _{i=1}(s_i - \\bar{s})^2}}
"""
correlation_coefficient = np.corrcoef(self.true, self.predicted)[0, 1]
return float(correlation_coefficient)
def covariance(self) -> float:
"""
Covariance
.. math::
Covariance = \\frac{1}{N} \\sum_{i=1}^{N}((e_{i} - \\bar{e}) * (s_{i} - \\bar{s}))
"""
obs_mean = np.mean(self.true)
sim_mean = np.mean(self.predicted)
covariance = np.mean((self.true - obs_mean) * (self.predicted - sim_mean))
return float(covariance)
def cronbach_alpha(self) -> float:
"""
It is a measure of internal consitency of data
https://stats.idre.ucla.edu/spss/faq/what-does-cronbachs-alpha-mean/
https://stackoverflow.com/a/20799687/5982232
"""
itemscores = np.stack([self.true, self.predicted])
itemvars = itemscores.var(axis=1, ddof=1)
tscores = itemscores.sum(axis=0)
nitems = len(itemscores)
return float(nitems / (nitems - 1.) * (1 - itemvars.sum() / tscores.var(ddof=1)))
def centered_rms_dev(self) -> float:
"""
Modified after https://github.com/PeterRochford/SkillMetrics/blob/master/skill_metrics/centered_rms_dev.py
Calculates the centered root-mean-square (RMS) difference between true and predicted
using the formula:
(E')^2 = sum_(n=1)^N [(p_n - mean(p))(r_n - mean(r))]^2/N
where p is the predicted values, r is the true values, and
N is the total number of values in p & r.
Output:
CRMSDIFF : centered root-mean-square (RMS) difference (E')^2
"""
# Calculate means
pmean = np.mean(self.predicted)
rmean = np.mean(self.true)
# Calculate (E')^2
crmsd = np.square((self.predicted - pmean) - (self.true - rmean))
crmsd = np.sum(crmsd) / self.predicted.size
crmsd = np.sqrt(crmsd)
return float(crmsd)
def cosine_similarity(self) -> float:
"""[cosine similary](https://en.wikipedia.org/wiki/Cosine_similarity)
It is a judgment of orientation and not magnitude: two vectors with
the same orientation have a cosine similarity of 1, two vectors oriented
at 90° relative to each other have a similarity of 0, and two vectors diametrically
opposed have a similarity of -1, independent of their magnitude.
"""
return float(np.dot(self.true.reshape(-1,),
self.predicted.reshape(-1,)) /
(np.linalg.norm(self.true) * np.linalg.norm(self.predicted)))
def decomposed_mse(self) -> float:
"""
Decomposed MSE developed by Kobayashi and Salam (2000)
.. math ::
dMSE = (\\frac{1}{N}\\sum_{i=1}^{N}(e_{i}-s_{i}))^2 + SDSD + LCS
SDSD = (\\sigma(e) - \\sigma(s))^2
LCS = 2 \\sigma(e) \\sigma(s) * (1 - \\frac{\\sum ^n _{i=1}(e_i - \\bar{e})(s_i - \\bar{s})}
{\\sqrt{\\sum ^n _{i=1}(e_i - \\bar{e})^2} \\sqrt{\\sum ^n _{i=1}(s_i - \\bar{s})^2}})
"""
e_std = np.std(self.true)
s_std = np.std(self.predicted)
bias_squared = self.bias() ** 2
sdsd = (e_std - s_std) ** 2
lcs = 2 * e_std * s_std * (1 - self.corr_coeff())
decomposed_mse = bias_squared + sdsd + lcs
return float(decomposed_mse)
def euclid_distance(self) -> float:
"""Euclidian distance
Referneces: Kennard et al., 2010
"""
| |
import os
import re
import redis
import pickle
import zlib
import warnings
import pygsheets
import pandas as pd
from copy import deepcopy
from django.conf import settings
from rest_framework import status
# Global module variables
REDIS_EXPIRATION_TIME = os.getenv('MEDICINE_REDIS_EXPIRATION') or 3600
REDIS_HOSTNAME = os.getenv('MEDICINE_REDIS_HOST') or 'localhost'
REDIS_PORT = os.getenv('MEDICINE_REDIS_PORT') or 6379
SHEET_KEY = '<KEY>' # The key for the Google Sheets document containing the data.
REDIS_SHEET_KEYS = {
'defence': {
'sheet': 'defence',
'redis': 'MEDICINE-DEFENCE-DF'},
'meds': {
'sheet': 'meds',
'redis': 'MEDICINE-MEDS-DF',
}
}
# Custom exceptions for identifying issues within the medicine reader.
class UnknownDataframeError(Exception):
pass
class GoogleConnectionError(Exception):
pass
class CountryCodeError(Exception):
pass
class RedisConnectionError(Exception):
pass
class UnknownItemError(IndexError):
pass
# Helper function that API views call to reduce repetition
def call_medicine_reader(data_name, method, params={}):
return_object = {
'data': {},
'status': status.HTTP_200_OK,
}
try:
reader = MedicineReader()
except GoogleConnectionError:
return_object['data']['message'] = 'There was a problem retrieving the data.'
return_object['status'] = status.HTTP_500_INTERNAL_SERVER_ERROR
return return_object
except RedisConnectionError:
return_object['data']['message'] = 'There was a problem with the caching the server.'
return_object['status'] = status.HTTP_500_INTERNAL_SERVER_ERROR
return return_object
try:
return_object['data'][data_name] = getattr(reader, method)(**params)
except CountryCodeError:
return_object['data']['message'] = f'The country code "{params["country_code"]}" was not recognised.'
return_object['status'] = status.HTTP_404_NOT_FOUND
return return_object
except UnknownDataframeError:
return_object['data']['message'] = f'The dataframe "{params["df_str"]}" was not recognised.'
return_object['status'] = status.HTTP_404_NOT_FOUND
return return_object
except UnknownItemError:
return_object['data']['message'] = f'The specified item with number {params["item_index"]} was not recognised.'
return_object['status'] = status.HTTP_404_NOT_FOUND
return return_object
# Helper Functions
url_validator = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def is_valid_url(url):
return re.match(url_validator, url) is not None
def split_and_filter_links(links_text):
"""
Takes the raw string from the links cell, and returns a nicely formatted list of (valid) URLs.
"""
replacements = (',', '\n')
for r in replacements:
links_text = links_text.replace(r, ' ')
res = links_text.split()
return [link for link in res if is_valid_url(link)]
def get_language_codes(df):
res = []
for column in df.columns:
if column.startswith('Name'):
res.append(column.split()[-1]) # language code: last after whitespace
return res
def get_country_codes(df):
res = []
for column in df.columns:
if column.startswith('Links'):
res.append(column.split()[-1]) # language code: last after whitespace
return res
class MedicineReader(object):
'''
Reads the spreadsheet and returns dataframes with items.
Place the client_secret.json file into the same directory; don't add the file to git!
'''
def __init__(self, using_cache=True):
'''
NB: Possible problem in the future - if the user waits too long,
pygsheets might stop working and we'll have to authorize again.
todo: test it and find a solution
'''
redis_conn = None
self.__dataframes = {}
if using_cache:
redis_conn = self.connect_to_cache()
if self.check_cache(redis_conn):
return # If we get here, the cache was populated and loaded.
# If we get here, the cache was not being used, or was not populated (fully).
self.get_sheets(using_cache, redis_conn)
# todo: add the remaining 3 sheets? (they are currently too noisy to convert to df)
def connect_to_cache(self):
"""
Attempts to connect to the Redis cache, and returns the object if successful.
Raises an exception if this fails.
"""
try:
redis_conn = redis.StrictRedis(host=REDIS_HOSTNAME,
port=REDIS_PORT,
db=0)
redis_conn.exists('test') # Forces the connection to establish; failures are captured from here.
except redis.exceptions.ConnectionError:
raise RedisConnectionError("Could not connect to the Redis host.")
return redis_conn
def check_cache(self, redis_conn):
"""
Checks to see if the Redis cache has the necessary keys to use.
Returns True if all keys are present; False otherwise.
Sets the self.__dataframes instance variable.
"""
present_dataframes = 0
for df_sheet, df_values in REDIS_SHEET_KEYS.items():
if redis_conn.exists(df_values['redis']):
self.__dataframes[df_sheet] = pickle.loads(zlib.decompress(redis_conn.get(df_values['redis'])))
present_dataframes += 1
if len(REDIS_SHEET_KEYS.keys()) == present_dataframes:
return True
return False
def get_sheets(self, use_cache, redis_conn=None):
"""
Connects to the Google Sheets API to extract the necessary data.
Populates the cache if required; sets the object's instance variables.
"""
try:
client = pygsheets.authorize(service_file=settings.GOOGLE_API_SECRET_PATH)
table = client.open_by_key(SHEET_KEY)
worksheets = table.worksheets()
except Exception as e:
raise GoogleConnectionError(f"An exception occurred connecting to the Google Sheets API. {str(e)}")
# Correct as of 2022-03-07
# Keys must match the keys in REDIS_SHEET_KEYS!
sheets = {'meds': worksheets[0],
'defence': worksheets[1],
'info': worksheets[2],
'issues': worksheets[3],
'goals_reached': worksheets[4]}
# First 4 rows in worksheeets 1-2 are for the info, not part of the actual table
for df_sheet, df_values in REDIS_SHEET_KEYS.items():
self.__dataframes[df_sheet] = sheets[df_values['sheet']].get_as_df(start='A5')
if use_cache:
redis_conn.setex(df_values['redis'],
REDIS_EXPIRATION_TIME,
zlib.compress(pickle.dumps(self.__dataframes[df_sheet])))
def get_current_dataframes(self):
return self.__dataframes.keys()
def get_items_list(self, df_str):
"""
Given a dataframe string identifier (e.g., meds/defence), returns a list of items for that particular type.
"""
if df_str not in self.__dataframes:
raise UnknownDataframeError(f'The dataframe "{df_str}" is not recognised.')
df = self.__dataframes[df_str]
index = list(df.index)
lang_codes = get_language_codes(df)
item_names_by_language = []
for i in range(len(df)):
new_entry = {}
for code in lang_codes:
new_entry[code] = df[f'Name {code}'].iloc[i]
item_names_by_language.append(new_entry)
item_priorities = list(df['Priority'])
res = []
for i in range(len(index)):
res.append({'row_number': index[i],
'item_names_by_language': item_names_by_language[i],
'is_high_priority': item_priorities[i].lower() == 'high'})
return res
def get_items_list_for_country(self, df_str, country_code):
"""
Given a dataframe string identifier (e.g., meds/defence) and a country code (e.g., PL), returns a list of items available for that country with valid links.
"""
if df_str not in self.__dataframes:
raise UnknownDataframeError(f'The dataframe "{df_str}" is not recognised.')
df = self.__dataframes[df_str]
country_codes = get_country_codes(df)
if country_code not in country_codes:
raise CountryCodeError(f'Country code "{country_code}" not recognised.')
index = list(df.index)
lang_codes = get_language_codes(df)
item_names_by_language = []
for i in range(len(df)):
new_entry = {}
for code in lang_codes:
new_entry[code] = df[f'Name {code}'].iloc[i]
item_names_by_language.append(new_entry)
item_priorities = list(df['Priority'])
item_order_count = list(df['Ordered'])
item_need_count = list(df['Need #'])
res = []
for i in range(len(index)):
links_raw = df[f'Links {country_code}'].iloc[i]
links_valid = split_and_filter_links(links_raw)
if not links_valid: # Skip this row; no valid links were found.
continue
res.append({'row_number': index[i],
'item_names_by_language': item_names_by_language[i],
'is_high_priority': item_priorities[i].lower() == 'high',
'number_ordered': item_order_count[i],
'number_needed': None if item_need_count[i] == '' else item_need_count[i],
})
return res
def get_links_for_item(self, df_str, country_code, item_index):
'''
df_str: a dataframe identifier for a table from our spreadsheet, already filtered by country!
country_code: ES, PL, ..
item_index: number of the row in df that contains our item
returns a list of valid links for buying the item
'''
if df_str not in self.__dataframes:
raise UnknownDataframeError(f'The dataframe "{df_str}" is not recognised.')
df = self.__dataframes[df_str]
country_codes = get_country_codes(df)
if country_code not in country_codes:
raise CountryCodeError(f'Country code "{country_code}" not recognised.')
try:
row = df.iloc[item_index]
except IndexError:
raise UnknownItemError(f'Item number {item_index} not found.')
links_cell = row['Links {0}'.format(country_code)]
links_list = split_and_filter_links(links_cell)
if not links_list:
warnings.warn('No valid URLs found for this item!')
return links_list
def get_language_codes(self, df_str):
"""
df_str: a dataframe identifier for a table from our spreadsheet, already filtered by country!
"""
if df_str not in self.__dataframes:
raise UnknownDataframeError(f'The dataframe "{df_str}" is not recognised.')
df = self.__dataframes[df_str]
return get_language_codes(df)
# def select_country(df, country_code):
# '''
# df: a dataframe with a table (from our spreadsheet)
# country_code: ES, PL, etc.
# '''
# # Filter out all columns for other countries except "Name EN"
# columns_filtered = []
# for column in df.columns:
# if column != "Name EN" and column.startswith(('Name', 'Links')) and not column.endswith(country_code):
# continue
# columns_filtered.append(column)
# df_res = deepcopy(df[columns_filtered])
# return df_res
# def get_links_for_item(df, country_code, item_index):
# '''
# df: a dataframe with a table from our spreadsheet, already filtered by country!
# country_code: ES, PL, ..
# item_index: number of the row in df that contains our item
# returns a list of valid links for buying the item
# '''
# country_codes = get_country_codes(df)
# if country_code not in country_codes:
# raise CountryCodeError(f'Country code not found: {country_code}')
# try:
# row = df.iloc[item_index]
# except IndexError:
# raise UnknownItemError(f'Item number {item_index} not found')
# links_cell = row['Links {0}'.format(country_code)]
# links_list = split_and_filter_links(links_cell)
# if not links_list:
# warnings.warn('No valid URLs found for this item!')
# return links_list
# def get_items_list(df):
# index = list(df.index)
# lang_codes = get_language_codes(df)
# item_names_by_language = []
# for i in range(len(df)):
# new_entry = {}
# for code in lang_codes:
# new_entry[code] = df['Name '+code].iloc[i]
# item_names_by_language.append(new_entry)
# item_priorities = list(df['Priority'])
# res = []
# for i in range(len(index)):
# res.append({'row_number': index[i],
# 'item_names_by_language': item_names_by_language[i],
# 'is_high_priority': item_priorities[i].lower() == 'high'})
# return res
# def get_items_list_for_country(df, country_code):
# '''
# The user has selected the country, we give the items available
# (ones that have valid links)
# '''
# country_codes = get_country_codes(df)
# if country_code not in country_codes:
# raise CountryCodeError('Country code not found: '+country_code)
# index = list(df.index)
# lang_codes = get_language_codes(df)
# item_names_by_language | |
#!/usr/bin/python
import socket
import time
import picamera
import threading #should probably change threads to processes. Sometime.
import multiprocessing #using processes instead of threads for frame advance.
import filmCap
from time import sleep
import RPi.GPIO as GPIO
from filmCap import config
from filmCap import codes
import logging
loglevel = logging.DEBUG #SET TO [CRITICAL|ERROR|WARNING|INFO|DEBUG]
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
#FOR SERVER-BASED EXPOSURE CONTROL
imgbrightness = 100
imgbrightness2 = 80
brt_max_single = 150
brt_max_double = 170
brt_min_single = 70
brt_min_double = 50
calibrate_motor_every = 100
framecount = 0
missed_frames = 0
img_format = 'jpeg' #Works with other formats, but large sizes make it much slower.
jpeg_quality = 100
auto_advance = False #this determines whether we advance after each photo
#turned on/off by commands from client
readerExitEvent = threading.Event()
cap_event = multiprocessing.Event()
exit_event = multiprocessing.Event()
def setupConns(img_socket, ctrl_socket):
global ctrl_reader, readerExitEvent
readerExitEvent.clear()
logging.info("Waiting for client connection...")
config.img_conn = img_socket.accept()[0].makefile('wb')
config.ctrl_conn = ctrl_socket.accept()[0].makefile('rb')
logging.info("Client Connections Established")
ctrl_reader=filmCap.NonBlockingStreamReader(config.ctrl_conn, readerExitEvent)
def releaseClient():
global img_socket, ctrl_socket, ctrl_reader, readerExitEvent
readerExitEvent.set()
ctrl_reader.thr.join()
config.img_conn.flush()
config.img_conn.close()
config.ctrl_conn.flush()
config.ctrl_conn.close()
logging.info("Client connections released")
setupConns(img_socket, ctrl_socket) #wait for new connection from client
def sendquit():
config.img_conn.write("t") #to tell server we're done sending
config.img_conn.flush()
def processCmd(cmdstr):
global fc, auto_advance
cam=config.cam
c=filmCap.codes
if not cmdstr:
logging.debug("Empty Command String")
return 0
cmdstr=cmdstr.replace("\n", "")
logging.debug(cmdstr)
cmd=cmdstr[0]
setting=cmdstr[1:]
#preview settings
if cmd == c.MOTOR_FWD:
cam.tmpmode=cam.mode
if cam.tmpmode==cam.CAPTURING:
cam.mode=cam.OFF
fc.motor_fwd(50)
elif cmd == c.MOTOR_REV:
cam.tmpmode=cam.mode
if cam.tmpmode==cam.CAPTURING:
cam.mode=cam.OFF
fc.motor_rev(50)
elif cmd == c.MOTOR_FFD:
cam.tmpmode=cam.mode
if cam.tmpmode==cam.CAPTURING:
cam.mode=cam.OFF
fc.motor_fwd(100)
elif cmd == c.MOTOR_FREV:
cam.tmpmode=cam.mode
if cam.tmpmode==cam.CAPTURING:
cam.mode=cam.OFF
fc.motor_rev(100)
elif cmd == c.MOTOR_STOP:
fc.motor_stop()
cam.mode=cam.tmpmode
elif cmd == c.LIGHT_OFF:
fc.light_off()
elif cmd == c.LIGHT_ON:
fc.light_on()
elif cmd == c.PREVIEW_ON:
cam.mode = cam.PREVIEWING
elif cmd == c.PREVIEW_OFF:
cam.mode = cam.OFF
elif cmd == c.AUTOEXP_ON:
cam.exposure_mode = "auto"
elif cmd == c.AUTOEXP_OFF:
cam.exposure_mode = "off"
elif cmd == c.SETX:
cam.setx(int(setting))
elif cmd == c.SETY:
cam.sety(int(setting))
elif cmd == c.SETZ:
cam.setz(int(setting))
# Color Settings
elif cmd == c.GAIN_RED:
cam.set_yuv(0,float(setting)/100)
elif cmd == c.GAIN_BLUE:
cam.set_yuv(1,float(setting)/100)
elif cmd == c.BRIGHTNESS:
cam.brightness = int(setting)
elif cmd == c.EXP_COMP:
cam.exposure_compensation = int(setting)
elif cmd == c.SATURATION:
cam.saturation = int(setting)
elif cmd == c.CONTRAST:
cam.contrast = int(setting)
elif cmd == c.FIX_GAINS:
cam.fixAndSendGains()
elif cmd == c.AWB_MODE:
cam.awb_mode = setting
#Advanced controls
elif cmd == c.DRC:
cam.drc_strength = setting
elif cmd == c.HFLIP_ON:
cam.hflip = True
elif cmd == c.HFLIP_OFF:
cam.hflip = False
elif cmd == c.VFLIP_ON:
cam.vflip = True
elif cmd == c.VFLIP_OFF:
cam.vflip = False
elif cmd == c.BW_ON:
cam.color_effects = (128,128)
elif cmd == c.BW_OFF:
cam.color_effects = None
elif cmd == c.SHARPNESS:
cam.sharpness = int(setting)
elif cmd == c.VID_PORT_ON:
cam.vidcap = True
elif cmd == c.VID_PORT_OFF:
cam.vidcap = False
elif cmd == c.SET_SIZE:
cam.setResize(int(setting))
#Capture settings for tab switch
elif cmd == c.CAPTURE_ON:
if cam.mode != cam.CAPTURING:
fc.light_on()
cam.startCaptureMode()
take_a_photo(0)
elif cmd == c.CAPTURE_OFF:
if cam.mode == cam.CAPTURING:
cam.startPreviewMode()
#Capture settings
elif cmd == c.BRACKETING_SHOTS:
cam.bracketing=int(setting)
cam.sendss=True
elif cmd == c.BRACKETING_STOPS:
cam.stops = float(setting)
cam.sendss=True
elif cmd == c.FIX_EXPOSURE:
cam.setExposure()
take_a_photo(0) #take a single photo and stream it.
elif cmd == c.SS_SLOWER:
cam.ss = int(cam.ss * 1.23)
cam.shutter_speed = int(cam.ss)
cam.sendss = True
elif cmd == c.SS_FASTER:
cam.ss = int(cam.ss * .81)
cam.shutter_speed = int(cam.ss)
cam.sendss = True
elif cmd == c.QUIT:
config.exitEvent.set()
elif cmd == c.CLIENT_QUIT:
cam.mode = cam.OFF
sendquit()
time.sleep(2)
releaseClient()
elif cmd == c.TEST_PHOTO:
take_a_photo(0) #take a single photo and stream it.
cam.sendss = True
elif cmd == c.START_CAPTURE:
auto_advance=True
fc.motor_wake()
take_a_photo(0)
elif cmd == c.STOP_CAPTURE:
auto_advance=False
#fc.motor_sleep()
elif cmd == c.RESUME_CAPTURE:
global cap_event
auto_advance=True
fc.motor_wake()
#Don't take a photo, b/c we should have got the last frame. Just advance a frame.
cap_event.set()
#fc.motor_sleep()
elif cmd == c.CAP_FRAME_ADV:
cam.mode=cam.OFF
num=int(setting) if setting else 1
fc.motor.fwdFrame(num)
cam.mode=cam.CAPTURING
take_a_photo(0) #take a single photo and stream it.
elif cmd == c.CAP_FRAME_REV:
cam.mode=cam.OFF
auto_advance = False
num=int(setting) if setting else 1
fc.motor.revFrame(num)
cam.mode=cam.CAPTURING
take_a_photo(0) #take a single photo and stream it.
#smart capture settings
elif cmd -- '^':
if setting:
fc.smart_motor = True
fc.smart_headroom = setting
else:
fc.smart_motor = False
def camera_busy():
global fc #, missed_frames, cam_lock
#used to have logic to ensure cam coultn't be triggered before it was finished
#with previous photo. Using stepper motors should prevent the need for this
fc.yellow_on()
def camera_free():
global fc #, cam_lock
fc.yellow_off()
def take_a_photo(channel): #callback triggered by frame advance setting GPIO pin
global fc, auto_advance, cap_event, calibrate_motor_every, framecount
cam = config.cam
#sleep(0.03) #wait before checking status of trigger, to avoid false triggers
logging.debug("Trigger "+str(GPIO.input(fc.trigger_pin))+str(cam.mode))
if (((channel == 0) or (GPIO.input(fc.trigger_pin) == 1)) and cam.mode==cam.CAPTURING): #only take this photo if we're in capture mode
logging.debug("Trigger Valid Channel "+str(channel)+" Trigger Pin "+str(GPIO.input(fc.trigger_pin)))
#if channel!=0: #photo hasn't been triggered, so we're not monitoring it
while cap_event.is_set(): #Here, we're waiting for the motor to finish winding, so the setup is still for the pictue.
sleep(.05) #You may be able to remove this and speed up captures by starting the photo immediately after trigger, while the
#motor is still moving. However, I found that motor vibration led to 'jello' in my captures
#when capuring single frames using still mode. Test your results before removing.
camera_busy()
i=1
if ((channel == 0) or (cam.bracketing >1)): #True: #this signifies a non-triggered photo, so we need to manually set the initial shutter speed
cam.shutter_speed = bracketSS(cam.stops, i, cam.bracketing, cam.ss)
sleep(2/cam.framerate)
while i<= cam.bracketing:
while not len(config.pool): #if we don't have enough processors for the next photo (maybe because of network congestion or a busy client)
sleep(1) #the we just wait until we do. This should be rare, but it will happen
logging.debug("Photo "+str(i)+" taken "+str(cam.shutter_speed)+" "+str(cam.analog_gain)+" "+str(cam.digital_gain))
imgflag = 's' if cam.bracketing==1 else 'a' if i<cam.bracketing else 'b'
take_and_queue_photo(imgflag)
logging.debug("Photo "+imgflag+" taken "+str(cam.shutter_speed)+" "+str(cam.analog_gain)+" "+str(cam.digital_gain))
#trying AFTER photo, to avoid delaying at beginning.
nextshot=i+1 if i<cam.bracketing else 1
cam.shutter_speed = bracketSS(cam.stops, nextshot, cam.bracketing, cam.ss)
#below line is really important! Without sufficient delay, camera won't use new shutter speed
#on next photo. Only necessary when using video port.
if i< cam.bracketing and cam.vidcap:
sleep(2/cam.framerate)#try giving camera time to register new shutter speed
i+=1
# if channel == 0 and not auto_advance: #again, a non-triggered photo, so set it BACK to avg
# cam.shutter_speed = cam.ss
camera_free()
if auto_advance:
while cap_event.is_set(): #perhaps we're done w/ photo before last wind is finished? If so, wait for it.
sleep(.05)
if GPIO.input(fc.trigger_pin) == 1: #if motor's done winding and we're still triggered, there's a problem - log it and stop
logging.debug("Capture Trigger still live after capture - dang. Need to calibrate early")
framecount=0
fc.calibrate() #wind the motor more slowly to the next frame.
take_a_photo(0)
elif framecount >= calibrate_motor_every: #the motor may skip steps occasionally,
#so every so often we reset it relative to the projector
logging.debug("Time to calibrate the motor!")
framecount=0
fc.calibrate() #wind the motor more slowly to the next frame.
take_a_photo(0)
else:
framecount += 1
cap_event.set() #send the signal to the motor winder
logging.debug("Photo taken "+ str(framecount))
def bracketSS(stops, shot, bkt, ss): #determine shutter speed for arbitrary number of bracketed shots, at arbitrary spread of stops
if bkt == 1:
return ss
else:
adj = (float(shot-1)/float(bkt-1)*2)-1 #should provide an evently spaced range of values between -1 and 1
#so if bkt = 1, it's [-1,1] if 3, it's [-1,0,1], if 4 it's [-1,-1/3,1/3,1], etc.
return int(ss*1.41**(adj*stops))
def take_and_queue_photo(imgflag):
global auto_advance
cam=config.cam
processor=None
while not processor:
with config.pool_lock:
if config.pool:
processor = config.pool.pop()
if processor:
#logging.debug("Got processor")
stream=processor.stream
logging.debug("CapStart")
# cam.capture(stream, "yuv", use_video_port = cam.vidcap)#, resize=cam.capsize) #experiment
cam.capture(stream, img_format, quality=jpeg_quality, use_video_port = cam.vidcap)#, resize=cam.capsize)
logging.debug("got photo "+str(cam.contrast))
processor.imgflag = imgflag
processor.event.set() #start the thread which streams the photo
#logging.debug("Set Event")
return 0
else: #When the pool is starved, wait a while for it to refill
logging.warning("No Processors available in pool - stopping capture")
auto_advance = False
time.sleep(1)
###########MAIN LOOP BELOW #######################
logging.info("Starting")
#Set up sockets and connections
img_socket = socket.socket()
img_socket.bind(('0.0.0.0', 8000))
img_socket.listen(0)
ctrl_socket = socket.socket()
ctrl_socket.bind(('0.0.0.0', 8001))
ctrl_socket.listen(0)
cam_lock = threading.Lock()
#SETUP LOsCKS FOR POOL OF THREADS USED BY IMAGE STREAMING THREADS
connection_lock = threading.Lock()
config.pool = [filmCap.ImageStreamer(connection_lock, config.pool_lock) for i in range(5)]
#We use separate threads to stream captured images so that the
# camera will never miss a shot while waiting for an image to
#stream over the network. It's complex, but allows for faster
#capture rates with fewer missed/blurred frames
#Trying with 5 threads | |
<reponame>qobi/amazing-race
from __future__ import print_function
import pickle
import tf
import cv2
import os
import numpy as np
import math
import sys
import scipy
import argparse
from mpl_toolkits.mplot3d import Axes3D
import time
from scipy.optimize import linear_sum_assignment
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.rcParams.update({'font.size': 10})
from scipy.spatial.distance import cdist, pdist
from scipy.cluster.hierarchy import fclusterdata
from simplification.cutil import simplify_coords#, simplify_coords_vw, simplify_coords_vwp
#set up transformation matrices
trans = [-0.03596812, 0.27371754, -0.08881337]
rot = [0.544521193495, -0.512626565799, 0.452427950113, 0.485818509146]
rot_euler = tf.transformations.euler_from_quaternion(rot)
rot = tf.transformations.quaternion_from_euler(2.68, -1.42, -1.1)
trans = tuple(trans) + ( 1, )
rotationMatrix = tf.transformations.quaternion_matrix( rot )
rotationMatrix[ :, 3 ] = trans
lsd = cv2.createLineSegmentDetector(0)
def line_to_points(line, step):
x1, y1, x2, y2 = line
left = min([[x1, y1], [x2, y2]], key = lambda x:x[0])
right = max([[x1, y1], [x2, y2]], key = lambda x:x[0])
x1, y1 = left
x2, y2 = right
m = (y2-y1)/(x2-x1)
b = -(m*x1 - y1)
x = np.arange(x1, x2, step)
y = m*x + b
return zip(x, y)
def line_equation(lines):
x1 = lines[:,0]
y1 = lines[:,1]
x2 = lines[:,2]
y2 = lines[:,3]
m = (y2-y1)/(x2-x1)
b = -(m*x1 - y1)
return m[:,np.newaxis], b[:,np.newaxis]
def RDP_laser(vel_points, rings, map_vel_points, camera_model, rotationMatrix, top, bot, min_dist, rdp_thresh, point_thresh, dist_thresh, bound):
#only keep points that are at least a certain distance in front of the robot
min_dist_cond = (vel_points[:,0] >= min_dist)
vel_points = vel_points[min_dist_cond]
map_vel_points = map_vel_points[min_dist_cond]
rings = rings[min_dist_cond]
#keep first two coordinates (x, y) of points
des_points = vel_points[rings == 8] #just need to take first 2 coords
des_points_map = map_vel_points[rings == 8] #just need to take first 2 coords
rdp_time = time.time()
#apply RDP algorithm to points to create line segments
#If i take out half the points, it takes half the time
#rdp_points = rdp(des_points[:,0:2], epsilon = rdp_thresh) #threshold for rdp
#output are the remaining points that define the piecewise function
#print (len(rdp_points))
#print (rdp_points)
#print ("RDP time", time.time() - rdp_time)
simplify_time = time.time()
rdp_points = simplify_coords(des_points[:,0:2], rdp_thresh)
#find out the original indices of these points
#this creates a binary mask of original points that are in rdp_points
mask = np.isin(des_points[:,0:2], rdp_points)
#this extracts the actual indices, I do a step size of 2 because it outputs duplicate indices (since 2 coordinates)
indeces = np.nonzero(mask)[0][::2]
#I take the difference between consecutive indices to figure out how many points lie on those segments
indeces_diff = indeces[1:] - indeces[:-1]
#I compute the 3d length of these line segments
indeces_dist = np.sqrt((des_points[indeces[1:],0] - des_points[indeces[:-1],0])**2 + (des_points[indeces[1:],1] - des_points[indeces[:-1],1])**2)
#I only keep indices whose length and # points satisfy minimum thresholds
#The indices go from left to right (clockwise starting from the robot's left hand side)
#indeces_left and indeces_right define the line segments, so [indeces_left[i], indeces_right[i]] is the ith line segment
indeces_left = indeces[:-1][(indeces_diff > point_thresh) & (indeces_dist > dist_thresh)] #thresholds for filtering segments by points and length
indeces_right = indeces[1:][(indeces_diff > point_thresh) & (indeces_dist > dist_thresh)] #thresholds for filtering segments by points and length
#With the points extracted, I transform them into pixel coordinates
#TODO I apply this transformation to all points, but I can just apply it to des_points[indeces_right] and des_points[indeces_left]
one_col = np.ones((des_points.shape[0], 1))
#transform laser points at ground level to pixels
laser_points_top = np.hstack((des_points[:,0:2], one_col*top, one_col))
laser_uvs_top = np.transpose(camera_model.P.dot(rotationMatrix.dot(np.transpose(laser_points_top))))
laser_uvs_top = np.array(laser_uvs_top[:,0:2]/laser_uvs_top[:,2])
#transform laser points at ground level to pixels
laser_points_bot = np.hstack((des_points[:,0:2], one_col*bot, one_col))
laser_uvs_bot = np.transpose(camera_model.P.dot(rotationMatrix.dot(np.transpose(laser_points_bot))))
laser_uvs_bot = np.array(laser_uvs_bot[:,0:2]/laser_uvs_bot[:,2])
#generate top boundary for top segments
top_top_boundary = np.transpose(camera_model.P.dot(rotationMatrix.dot(np.transpose(np.hstack((des_points[:,0:2], one_col*(top+bound), one_col))))))
top_top_boundary = np.array(top_top_boundary[:,0:2]/top_top_boundary[:,2])
#generate bottom boundary for top segments
top_bot_boundary = np.transpose(camera_model.P.dot(rotationMatrix.dot(np.transpose(np.hstack((des_points[:,0:2], one_col*(top-bound), one_col))))))
top_bot_boundary = np.array(top_bot_boundary[:,0:2]/top_bot_boundary[:,2])
#generate top boundary for bottom segments
bot_top_boundary = np.transpose(camera_model.P.dot(rotationMatrix.dot(np.transpose(np.hstack((des_points[:,0:2], one_col*(bot+bound), one_col))))))
bot_top_boundary = np.array(bot_top_boundary[:,0:2]/bot_top_boundary[:,2])
#generate bottom boundary for bottom segments
bot_bot_boundary = np.transpose(camera_model.P.dot(rotationMatrix.dot(np.transpose(np.hstack((des_points[:,0:2], one_col*(bot-bound), one_col))))))
bot_bot_boundary = np.array(bot_bot_boundary[:,0:2]/bot_bot_boundary[:,2])
#assemble segments
laser_segs_uv_top = np.hstack((laser_uvs_top[indeces_left,:], laser_uvs_top[indeces_right,:])).astype(int)
laser_segs_uv_bot = np.hstack((laser_uvs_bot[indeces_left,:], laser_uvs_bot[indeces_right,:])).astype(int)
top_top_boundary_segs = np.hstack((top_top_boundary[indeces_left,:], top_top_boundary[indeces_right,:])).astype(int)
top_bot_boundary_segs = np.hstack((top_bot_boundary[indeces_left,:], top_bot_boundary[indeces_right,:])).astype(int)
bot_top_boundary_segs = np.hstack((bot_top_boundary[indeces_left,:], bot_top_boundary[indeces_right,:])).astype(int)
bot_bot_boundary_segs = np.hstack((bot_bot_boundary[indeces_left,:], bot_bot_boundary[indeces_right,:])).astype(int)
laser_segs_3d = np.hstack((des_points[indeces_left,0:2], des_points[indeces_right,0:2]))
laser_segs_map = np.hstack((des_points_map[indeces_left,0:2], des_points_map[indeces_right,0:2]))
#return everything
return laser_segs_3d, laser_segs_map, laser_segs_uv_top, laser_uvs_top, laser_segs_uv_bot, laser_uvs_bot, [top_top_boundary_segs, top_bot_boundary_segs, bot_top_boundary_segs, bot_bot_boundary_segs]
def merge_lines(lines, lines_uv_top, lines_uv_bot, boundary_segs, m_thresh, b_thresh, length_thresh):
#goal is to merge lines that belong to the same wall
#cluster by slope, rotate, then cluster by b
#get boundary segs
top_top_boundary_segs, top_bot_boundary_segs, bot_top_boundary_segs, bot_bot_boundary_segs = boundary_segs
#convert lines into numpy array
lines = np.array(lines).astype(float)
lines_adj = lines
#compute angles of all lines
og_line_thetas = np.mod(np.arctan2(lines[:,3] - lines[:,1], lines[:,2] - lines[:,0]).reshape((lines[:,1].shape[0], 1)), 2*math.pi)
#print ("og line thetas")
#print (og_line_thetas)
#initialize arrays
merged_lines_top = []
merged_lines_bot = []
merged_lines_3d = []
merged_lines_top_top = []
merged_lines_top_bot = []
merged_lines_bot_top = []
merged_lines_bot_bot = []
#only perform if there's at least 2 lines
if lines.shape[0] > 1:
#cluster lines based on their angle to group lines with similar angle
clusters = fclusterdata(og_line_thetas, m_thresh, criterion='distance')
#iterate over each cluster
for i in range(1,max(clusters)+1):
#get the uv lines that correspond to this cluster
line_set_top = lines_uv_top[clusters==i,:]
line_set_bot = lines_uv_bot[clusters==i,:]
#get the uv boundary lines that correspond to this cluster
line_set_top_top = top_top_boundary_segs[clusters==i,:]
line_set_top_bot = top_bot_boundary_segs[clusters==i,:]
line_set_bot_top = bot_top_boundary_segs[clusters==i,:]
line_set_bot_bot = bot_bot_boundary_segs[clusters==i,:]
#get the 3d lines that correspond to this cluster
line_set_3d = lines[clusters==i,:] # 3d
b_old = ((line_set_3d[:,1] + line_set_3d[:,3])/2.0).reshape((line_set_3d[:,1].shape[0], 1))
#get copy of the 3d line that correspond to this cluster
line_set_copy = lines[clusters==i,:] # 3d copy
#compute angles of lines in this cluster
#TODO just index into og_line_thetas above to do this
line_thetas = np.arctan2(line_set_3d[:,3] - line_set_3d[:,1], line_set_3d[:,2] - line_set_3d[:,0])
#print ("Cluster %d"%i)
#print (line_thetas)
#print (line_set_bot)
#rotate all of the lines by their angles so that they are all approximately parallel to the y axis
cos_theta = np.cos(-line_thetas)
sin_theta = np.sin(-line_thetas)
line_set_3d[:,0] = line_set_copy[:,0]*cos_theta - line_set_copy[:,1]*sin_theta
line_set_3d[:,1] = line_set_copy[:,0]*sin_theta + line_set_copy[:,1]*cos_theta
line_set_3d[:,2] = line_set_copy[:,2]*cos_theta - line_set_copy[:,3]*sin_theta
line_set_3d[:,3] = line_set_copy[:,2]*sin_theta + line_set_copy[:,3]*cos_theta
#compute angles of rotated lines
#TODO why do I do this? left over from debugging?
new_line_thetas = np.arctan2(line_set_3d[:,3] - line_set_3d[:,1], line_set_3d[:,2] - line_set_3d[:,0])
#compute new b by computing distance of rotated lines to y axis
b_new = ((line_set_3d[:,1] + line_set_3d[:,3])/2.0).reshape((line_set_3d[:,1].shape[0], 1))
#only perform if there's at least 2 lines
if b_new.shape[0] > 1:
#cluster lines in this cluster by b to separate distinct walls that are parallel
new_clusters = fclusterdata(b_new, b_thresh, criterion='distance')
#iterate over these clusters, to merge the lines in each cluster
for j in range(1, max(new_clusters)+1):
#print ("Sub cluster %d"%j)
#get the 3d lines that correspond to this cluster
new_line_set_3d = line_set_3d[new_clusters==j,:]
old_line_set_3d = line_set_copy[new_clusters==j,:]
#get the top uv lines that correspond to this cluster
new_line_set_top = line_set_top[new_clusters==j,:]
#print (b_new[new_clusters==j,:])
#print (line_set_bot[new_clusters==j,:])
#get leftmost and rightmost points in the uv lines (pixel coordinates)
left = min(new_line_set_top, key = lambda x:x[0])
right = max(new_line_set_top, key = lambda x:x[2])
#get the 3d points corresponding to the leftmost and rightmost points in pixel coordinates
left_idx = (np.where((new_line_set_top==left).all(axis=1))[0][0])
right_idx = (np.where((new_line_set_top==right).all(axis=1))[0][0])
left_3d = old_line_set_3d[left_idx,:]
right_3d = old_line_set_3d[right_idx, :]
#compute the potential distance of the combined line segment
dist_3d = math.sqrt((left_3d[0] - right_3d[2])**2 + (left_3d[1] - right_3d[3])**2)
#if the distnace satisfies the threshold, generate the merged lines and add them
if dist_3d > length_thresh:
#append top uv line
merged_lines_top.append([left[0], left[1], right[2], right[3]])
merged_lines_3d.append([left_3d[0], left_3d[1], right_3d[2], right_3d[3]])
#repeat operation for bottom uv line
new_line_set_bot = line_set_bot[new_clusters==j,:]
left = min(new_line_set_bot, key = lambda x:x[0])
right = max(new_line_set_bot, key = lambda x:x[2])
merged_lines_bot.append([left[0], left[1], right[2], right[3]])
new_line_set_top_top = line_set_top_top[new_clusters==j,:]
left = min(new_line_set_top_top, key = lambda x:x[0])
right = max(new_line_set_top_top, key = lambda x:x[2])
merged_lines_top_top.append([left[0], left[1], right[2], right[3]])
new_line_set_top_bot = line_set_top_bot[new_clusters==j,:]
left = min(new_line_set_top_bot, key = lambda x:x[0])
right = max(new_line_set_top_bot, key = lambda x:x[2])
merged_lines_top_bot.append([left[0], left[1], right[2], right[3]])
new_line_set_bot_top = line_set_bot_top[new_clusters==j,:]
left = min(new_line_set_bot_top, key = lambda x:x[0])
right = max(new_line_set_bot_top, key = lambda x:x[2])
merged_lines_bot_top.append([left[0], left[1], right[2], right[3]])
new_line_set_bot_bot = line_set_bot_bot[new_clusters==j,:]
left = min(new_line_set_bot_bot, key = lambda x:x[0])
right = max(new_line_set_bot_bot, key = lambda x:x[2])
merged_lines_bot_bot.append([left[0], left[1], right[2], right[3]])
#if there is only one line
else:
#add the corresponding top and bottom uv lines if this line's 3d length is greater than threshold
if math.sqrt((line_set_3d[0,0] - line_set_3d[0,2])**2 + (line_set_3d[0,1] - line_set_3d[0,3])**2) > length_thresh:
merged_lines_3d.append(line_set_copy[0,:])
merged_lines_top.append(line_set_top[0,:])
merged_lines_bot.append(line_set_bot[0,:])
merged_lines_top_top.append(line_set_top_top[0,:])
merged_lines_top_bot.append(line_set_top_bot[0,:])
| |
#
# Copyright <NAME> 2009
#
"""
Code that counts the number of sequences for which a gapped PWM has at least one site in (using varying thresholds).
"""
import logging, sys, pylab as P, numpy as N, hmm, hmm.pssm.logo as L, infpy.roc as roc, cPickle
from optparse import OptionParser
from hmm.pssm import seq_to_numpy, numpy_to_seq
from itertools import imap
def sequences_from_fasta(fasta):
"""Yields sequences from fasta file."""
import corebio.seq_io.fasta_io
for seq in corebio.seq_io.fasta_io.iterseq(
open(fasta, 'r'),
corebio.seq.reduced_nucleic_alphabet
):
yield seq.description.strip(), (str(seq).strip('nN'), seq.tally())
def load_seqs(filename):
"Load and convert sequences from fasta file."
logging.info('Loading sequences: %s', filename)
sequences = dict(sequences_from_fasta(filename))
numpy_seqs = dict((desc, hmm.preprocess_sequence(seq_to_numpy(seq))) for desc, (seq, tally) in sequences.iteritems())
tally = sum(imap(N.array, (tally for desc, (seq, tally) in sequences.iteritems())))
logging.info('Loaded %d sequences with %d bases', len(sequences), sum(imap(len, (seq for seq, tally in sequences.values()))))
return numpy_seqs, tally
def build_hmm(freqs, gaps):
"""
Build a HMM representing the gapped PWM with the given frequencies and gaps. Cannot handle PWMs with consecutive gaps
or gaps at beginning or end.
"""
if len(gaps) != len(freqs):
raise ValueError('Frequencies and gaps must be same length.')
K = len(gaps)
# create model
model = hmm.ModelByStates(M=4, markov_order=0)
# add background state
bg = model.add_state()
bg.pi = model.add_parameter(1.)
uniform_param = model.add_parameter(.25)
for m in xrange(bg.M):
bg.b[m] = uniform_param
# add the binding site states in positive and negative directions
positive_states = [model.add_state() for i in xrange(K)]
negative_states = [model.add_state() for i in xrange(K)]
# connect background to initial binding site states
p_binding_site = 0.01
binding_param = model.add_parameter(p_binding_site/2.)
not_binding_param = model.add_parameter(1.-p_binding_site)
bg.add_successor(positive_states[0], binding_param)
bg.add_successor(negative_states[-1], binding_param)
bg.add_successor(bg, not_binding_param)
always_one_param = model.add_parameter(1.)
positive_states[-1].add_successor(bg, always_one_param)
negative_states[0].add_successor(bg, always_one_param)
# set up emissions
for freq, positive_state, negative_state in zip(freqs, positive_states, negative_states):
for b, f in enumerate(freq):
emission_param = model.add_parameter(f)
positive_state.b[b] = emission_param
negative_state.b[-b-1] = emission_param
#positive_state.
# set up transitions
for k, gap in enumerate(gaps):
if gap < 1. and (0 == k or K-1 == k or gaps[k-1] < 1. or gaps[k+1] < 1.):
raise ValueError('Gaps cannot be at first or last character nor next to another gap.')
if gap < 1.:
gap_param = model.add_parameter(gap)
non_gap_param = model.add_parameter(1.-gap)
positive_states[k-1].add_successor(positive_states[k], gap_param)
positive_states[k-1].add_successor(positive_states[k+1], non_gap_param)
negative_states[k+1].add_successor(negative_states[k-1], non_gap_param)
negative_states[k+1].add_successor(negative_states[k], gap_param)
else:
if 0 != k:
positive_states[k-1].add_successor(positive_states[k], always_one_param)
if K-1 != k:
negative_states[k+1].add_successor(negative_states[k], always_one_param)
return model
def build_model_by_states(freqs, gaps, p_binding_site=0.001):
"""
Build a HMM representing the gapped PWM with the given frequencies and gaps. Can handle consecutive gaps
and gaps at beginning or end.
"""
if len(gaps) != len(freqs):
raise ValueError('Frequencies and gaps must be same length.')
K = len(gaps)
# normalise frequencies
freqs = (freqs.T / freqs.sum(axis=1)).T
# create model
model = hmm.ModelByStates(M=4, markov_order=0)
# add background state
bg = model.add_state()
bg.pi = model.add_parameter(1.)
uniform_param = model.add_parameter(.25)
for m in xrange(bg.M):
bg.b[m] = uniform_param
# add the binding site states in positive and negative directions
positive_states = [model.add_state() for i in xrange(K)]
negative_states = [model.add_state() for i in xrange(K)]
# connect background to initial binding site states
binding_param = model.add_parameter()
not_binding_param = model.add_parameter(1.-p_binding_site)
bg.add_successor(positive_states[0], binding_param)
bg.add_successor(negative_states[-1], binding_param)
bg.add_successor(bg, not_binding_param)
always_one_param = model.add_parameter(1.)
positive_states[-1].add_successor(bg, always_one_param)
negative_states[0].add_successor(bg, always_one_param)
# set up emissions
for freq, positive_state, negative_state in zip(freqs, positive_states, negative_states):
for b, f in enumerate(freq):
emission_param = model.add_parameter(f)
positive_state.b[b] = emission_param
negative_state.b[-b-1] = emission_param
# set up transitions
def setup_transitions(states, gaps):
for k in xrange(-1, K):
if -1 == k:
k_state = bg
p_skip = p_binding_site/2.
else:
k_state = states[k]
p_skip = 1.
for m in xrange(k+1, K):
gap_param = model.add_parameter(p_skip * gaps[m])
k_state.add_successor(states[m], gap_param)
p_skip *= (1. - gaps[m])
if 0. == p_skip:
break
if p_skip > 0.:
states[k].add_successor(bg, model.add_parameter(p_skip))
setup_transitions(positive_states, gaps)
setup_transitions(negative_states[::-1], gaps[::-1])
return model
def build_hmm_model(freqs, gaps, p_binding_site=.001):
"@return: A hmm.Model representing the gapped PWM defined by the arguments."
model_by_states = build_model_by_states(freqs, gaps, p_binding_site=p_binding_site)
model = hmm.as_model(model_by_states)
model.normalise()
return model
def run_on_seqs(model, numpy_seqs):
"""
Run model on sequences
"""
total_pos = 0
total_neg = 0
num_seqs_with_site = 0
for desc, seq in numpy_seqs.iteritems():
LL, states = model.viterbi(seq)
num_pos = (states == 1).sum()
num_neg = (states == 1+(model.N-1)/2).sum()
logging.debug('Ran Viterbi algorithm on %20s: found %3d positive sites and %3d negative sites', desc, num_pos, num_neg)
total_pos += num_pos
total_neg += num_neg
if num_pos + num_neg > 0:
num_seqs_with_site += 1
return total_pos, total_neg, num_seqs_with_site
def run_pwm_viterbi(tag, freqs, gaps, positive_seqs, negative_seqs):
"""
Run the PWM using Viterbi algorithm to classify sequences.
"""
logging.info('Running PWM: %s', tag)
logo = L.pssm_as_image(freqs, size=None, transparencies=gaps)
logo_filename = '%s-logo.png' % tag
logo.save(logo_filename)
logging.info('%s: Created logo: %s', tag, logo_filename)
roc_points = []
for p_binding in p_binding_params:
# build model
model = build_hmm_model(freqs, gaps, p_binding)
hmm.graph_as_svg(model, '%s-states' % tag, neato_properties={'-Elen':1.4})
logging.debug('%s: Graphed model', tag)
pos_total_pos, pos_total_neg, pos_num_seqs_with_site = run_on_seqs(model, positive_seqs)
logging.debug(
'%s: p(binding)=%.1e: Positive sequences: Over all sequences: found %4d positive sites and %4d negative sites in %4d/%4d sequences',
tag,
p_binding,
pos_total_pos,
pos_total_neg,
pos_num_seqs_with_site,
len(positive_seqs)
)
neg_total_pos, neg_total_neg, neg_num_seqs_with_site = run_on_seqs(model, negative_seqs)
logging.debug(
'%s: p(binding)=%.1e: Negative sequences: Over all sequences: found %4d positive sites and %4d negative sites in %4d/%4d sequences',
tag,
p_binding,
neg_total_pos,
neg_total_neg,
neg_num_seqs_with_site,
len(negative_seqs)
)
tp = pos_num_seqs_with_site
fp = neg_num_seqs_with_site
fn = len(positive_seqs) - pos_num_seqs_with_site
tn = len(negative_seqs) - neg_num_seqs_with_site
roc_point = roc.RocCalculator(tp=tp, fp=fp, tn=tn, fn=fn)
logging.info('%s: p(binding)=%.1e; Specificity=%.3f; Sensitivity=%.3f',
tag,
p_binding,
roc_point.specificity(),
roc_point.sensitivity(),
)
roc_points.append(roc_point)
return roc_points
def make_classifier(model):
"""
Given a model, creates a classifier from it. A classifier is a function that is given a sequence and returns the threshold
above which the sequence would be considered a positive.
"""
def classifier(sequence):
"A classifier that takes a sequence and returns at what threshold it would be treated as positive."
LL, alpha, beta, c = model.forward_backward(sequence)
alphabeta = alpha * beta
gamma0 = alphabeta[:,0] / alphabeta.sum(axis=1)
# return how often we are not in state 0
return len(gamma0) - gamma0.sum()
return classifier
def test_hmm_forward_backward(model, seqs):
"""
Test a HMM on positive and negative sequences using forward-backward algorithm.
Counts how many bases are expected to be binding sites in each sequence.
"""
classifier = make_classifier(model)
scores = map(classifier, seqs)
scores.sort()
return scores
def run_pwm_forward_backward(tag, freqs, gaps, positive_seqs, negative_seqs):
"""
Run the PWM using forward-backward.
"""
logging.info('Running PWM: %s', tag)
logo = L.pssm_as_image(freqs, size=None, transparencies=gaps)
logo_filename = '%s-logo.png' % tag
logo.save(logo_filename)
logging.info('%s: Created logo: %s', tag, logo_filename)
# build model
model = build_hmm_model(freqs, gaps, .001)
hmm.graph_as_svg(model, '%s-states' % tag, neato_properties={'-Elen':1.4})
logging.debug('%s: Graphed model', tag)
positive_scores = test_hmm_forward_backward(model, positive_seqs.values())
negative_scores = test_hmm_forward_backward(model, negative_seqs.values())
return roc.picked_rocs_from_thresholds(positive_scores, negative_scores)
if '__main__' == __name__:
#
# Initialise the logging
#
logging.basicConfig(level=logging.INFO, format="%(asctime)s-%(name)s:%(levelname)s:%(message)s")
logging.info(hmm.module_info())
#
# Parse the options
#
option_parser = OptionParser()
option_parser.add_option(
"--min-p-binding-site",
dest="min_p_binding",
default=1e-6,
type='float',
help="Minimum probability of a binding site in the model."
)
option_parser.add_option(
"--max-p-binding-site",
dest="max_p_binding",
default=1e-1,
type='float',
help="Maximum probability of a binding site in the model."
)
option_parser.add_option(
"-n",
"--num-p-binding-params",
dest="num_p_binding",
default=10,
type='int',
help="Number of p(binding site) parameters to evaluate."
)
options, args = option_parser.parse_args()
for option in option_parser.option_list:
if option.dest:
logging.info('%20s: %30s (%s)', option.dest, str(getattr(options, option.dest)), option.help)
#
# Load sequences
#
if len(args) < 2 or len(args) > 2:
logging.error('USAGE: %s <positive sequences> <negative sequences>')
else:
#
# Choose p(binding site) params (evenly spaced on a log scale)
#
p_binding_params = N.exp(N.linspace(N.log(options.min_p_binding), N.log(options.max_p_binding), options.num_p_binding))
# import Sp1 matrix definitions
from sp1 import *
def test_matrix():
# TRANSFAC sp1
N.random.seed(1)
freqs = N.random.dirichlet(N.ones(4)*.1, size=6)
gaps = N.array([.1, .5, .4, 1., 1., .3])
return freqs, gaps
def test_hmm(tag, pwm):
freqs, gaps = pwm
logo = L.pssm_as_image(freqs, size=None, transparencies=gaps)
logo_filename = '%s-logo.png' % tag
logo.save(logo_filename)
logging.info('%s: Created logo: %s', tag, logo_filename)
model = build_hmm_model(freqs, gaps, .1)
logging.debug('%s: Created model', tag)
hmm.graph_as_svg(model, '%s-states' % tag, neato_properties={'-Elen':3.})
logging.debug('%s: Graphed model', tag)
return model
#model = test_hmm('test', test_matrix())
#glam2_sp1_i7_model = test_hmm('GLAM2-Sp1-i7', glam2_sp1_i7())
positive_seq_file, negative_seq_file = args
positive_seqs, tally_positive = load_seqs(positive_seq_file)
logging.info('Positive tally: %s', str(tally_positive))
negative_seqs, tally_negative = load_seqs(negative_seq_file)
logging.info('Negative tally: %s', str(tally_negative))
def calc_or_unpickle_roc(tag, pwm, positive_seqs, negative_seqs):
pickle_file = '%s-roc.pickle'%tag
try:
roc = cPickle.load(open(pickle_file))
logging.info('Unpickled ROCs from %s.', pickle_file)
except:
logging.info('Could not unpickle %s, calculating from scratch.', pickle_file)
freqs, gaps = pwm
roc = run_pwm_forward_backward(tag, freqs, gaps, positive_seqs, negative_seqs)
cPickle.dump(roc, open(pickle_file, 'wb'))
return roc
def do_pwm(tag, pwm, color, marker):
roc_points = | |
<gh_stars>1000+
import abc
import copy
import datetime
import json
from dataclasses import dataclass, field
from io import BytesIO
from typing import Any, Dict, Optional, Tuple, Union
import ijson
import requests
import requests.exceptions
from anchore_engine.clients.grype_wrapper import GrypeWrapperSingleton
from anchore_engine.common.models.schemas import (
FeedAPIGroupRecord,
FeedAPIRecord,
GrypeDBListing,
)
from anchore_engine.services.policy_engine.engine.feeds import (
FeedGroupList,
FeedList,
GroupData,
IFeedSource,
)
from anchore_engine.services.policy_engine.engine.feeds.config import SyncConfig
from anchore_engine.subsys import logger
from anchore_engine.util.time import rfc3339str_to_datetime
from anchore_engine.utils import (
AnchoreException,
CommandException,
ensure_bytes,
ensure_str,
)
FEED_DATA_ITEMS_PATH = "data.item"
FEED_DATA_NEXT_TOKEN_PATH = "next_token"
@dataclass
class HTTPClientResponse:
content_type: Optional[str] = None
status_code: int = 1
content: bytes = b""
success: bool = False
headers: Dict[str, Any] = field(default_factory=dict)
class HTTPClientException(AnchoreException):
pass
class HTTPStatusException(HTTPClientException):
def __init__(self, client_response: HTTPClientResponse):
try:
body_content = f" {client_response.content.decode('utf-8')}"
except UnicodeDecodeError:
body_content = ""
error_msg = f"Non-200 HTTP Status. The HTTP request generated a status of {client_response.status_code}{body_content}."
super().__init__(self, error_msg)
class InsufficientAccessTierError(HTTPClientException):
pass
class InvalidCredentialsError(HTTPClientException):
def __init__(self, username, target):
super().__init__(
"Invalid credential for user {} for url: {}".format(username, target)
)
class IAuthenticatedHTTPClientBase(abc.ABC):
@abc.abstractmethod
def execute_request(
self, method, url, connect_timeout=None, read_timeout=None, retries=None
):
pass
@property
@abc.abstractmethod
def user(self):
pass
class HTTPBasicAuthClient(IAuthenticatedHTTPClientBase):
"""
Simple base client type for operations with no auth needed
"""
client_config = {
"max_retries": 3,
"conn_timeout": 3,
"read_timeout": 60,
"verify": True,
}
def __init__(
self,
username,
password,
connect_timeout=None,
read_timeout=None,
retries=None,
verify=True,
):
self.auth_config = copy.copy(self.client_config)
self._user = username
self.password = password
self.retries = retries
if connect_timeout:
self.auth_config["conn_timeout"] = connect_timeout
if read_timeout:
self.auth_config["read_timeout"] = read_timeout
if retries:
self.auth_config["max_retries"] = retries
self.auth_config["verify"] = verify
@property
def user(self):
return self._user
def _map_error_to_exception(self, exc, username, url=None):
if exc.response.status_code == 401:
raise InvalidCredentialsError(username, url)
elif exc.response.status_code == 403:
raise InsufficientAccessTierError(
"Access denied due to insufficient permissions for user: {}".format(
username
)
)
else:
raise Exception(
"Feed operation failed for user: {}. Msg: {}. Response: {}".format(
self.user, exc.response, exc.response.body
)
)
def authenticated_get(
self, url, connect_timeout=None, read_timeout=None, retries=None
) -> HTTPClientResponse:
return self.execute_request(
requests.get, url, connect_timeout, read_timeout, retries
)
def execute_request(
self, method, url, connect_timeout=None, read_timeout=None, retries=None
) -> HTTPClientResponse:
"""
Execute an HTTP request with auth params and the specified timeout overrides
:param method: a callable for the http method to execute (e.g. requests.get, requests.put, ...)
:param url:
:param connect_timeout:
:param read_timeout:
:param retries:
:return:
"""
# make a request
if not connect_timeout:
connect_timeout = int(self.auth_config["conn_timeout"])
if not read_timeout:
read_timeout = int(self.auth_config["read_timeout"])
if not retries:
retries = int(self.auth_config["max_retries"])
retries = int(retries)
verify = self.auth_config["verify"]
client_response = HTTPClientResponse()
success = False
count = 0
conn_timeout = int(connect_timeout)
read_timeout = int(read_timeout)
while not success and count < retries:
count += 1
logger.debug("get attempt " + str(count) + " of " + str(retries))
try:
logger.debug(
"making authenticated request (user={}, conn_timeout={}, read_timeout={}, verify={}) to url {}".format(
str(self.user), conn_timeout, read_timeout, verify, str(url)
)
)
# TODO: move un-authed requests to new class or rename this class
auth = None
if self.user or self.password:
auth = (self.user, self.password)
r = method(
url, auth=auth, timeout=(conn_timeout, read_timeout), verify=verify
)
logger.debug("\tresponse status_code: " + str(r.status_code))
if r.status_code == 200:
success = True
client_response.success = True
elif r.status_code == 401:
logger.debug(
"Got HTTP 401 on authenticated {}, response body: {}".format(
method.__name__, str(r.text)
)
)
r.raise_for_status()
elif r.status_code in [403, 404]:
r.raise_for_status()
client_response.status_code = r.status_code
client_response.content_type = r.headers["Content-Type"]
client_response.content = r.content
client_response.headers = r.headers
except requests.exceptions.ConnectTimeout as err:
logger.debug("attempt failed: " + str(err))
client_response.content = ensure_bytes(
"server error: timed_out: " + str(err)
)
# return(ret)
except requests.HTTPError as e:
if e.response is not None and 400 <= e.response.status_code < 500:
self._map_error_to_exception(e, username=self.user, url=url)
# raise e
else:
logger.debug("attempt failed: " + str(e))
client_response.content = ensure_bytes("server error: " + str(e))
except Exception as err:
logger.debug("attempt failed: " + str(err))
client_response.content = ensure_bytes("server error: " + str(err))
return client_response
class FeedClientError(AnchoreException):
pass
class UnexpectedMIMEType(FeedClientError):
def __init__(self, mime_type: str):
super().__init__(
f"Unexpected MIME type {mime_type} was encountered while downloading feed data."
)
class FeedServiceClient(IFeedSource):
"""
Base client class with no auth
"""
def __init__(
self,
feeds_endpoint: str,
http_client: HTTPBasicAuthClient,
):
if not feeds_endpoint:
raise ValueError("endpoint cannot be None")
self.http_client = http_client
self.feed_url = feeds_endpoint
self.group_url = self.feed_url + "/{feed}"
self.group_data_url = self.group_url + "/{group}"
self.retry_count = 3
def list_feeds(self) -> FeedList:
more_data = True
next_token = None
feed_list = FeedList(feeds=[])
while more_data:
url = self.feed_url + (("?next_token=" + next_token) if next_token else "")
try:
record = self.http_client.execute_request(
requests.get, url, retries=self.retry_count
)
if record.success:
data = json.loads(ensure_str(record.content))
if data and "feeds" in data:
feed_list.feeds.extend(
[
FeedAPIRecord(
name=x.get("name"),
description=x.get("description"),
access_tier=x.get("access_tier"),
)
for x in data["feeds"]
]
)
if "next_token" in data and data["next_token"]:
next_token = data["next_token"]
more_data = True
else:
more_data = False
else:
raise Exception(
"Feed list operation failed. Msg: {}.".format(record.content)
)
except Exception as e:
logger.exception("Error executing feed listing: {}".format(e))
raise e
return feed_list
def list_feed_groups(self, feed: str) -> FeedGroupList:
group_list = FeedGroupList(groups=[])
more_data = True
next_token = None
while more_data:
url = self.group_url.format(feed=feed) + (
("?next_token=" + next_token) if next_token else ""
)
try:
record = self.http_client.execute_request(
requests.get, url, retries=self.retry_count
)
if record.success:
data = json.loads(ensure_str(record.content))
if "groups" in data:
group_list.groups.extend(
[
FeedAPIGroupRecord(
name=x.get("name"),
description=x.get("description"),
access_tier=x.get("access_tier"),
)
for x in data["groups"]
]
)
if "next_token" in data and data["next_token"]:
next_token = data["next_token"]
more_data = True
else:
more_data = False
else:
raise Exception(
"Feed list operation failed. Msg: {}.".format(record.content)
)
except Exception as e:
logger.debug("Error executing feed listing: {}".format(e))
raise e
return group_list
def get_feed_group_data(
self,
feed: str,
group: str,
since: datetime.datetime = None,
next_token: str = None,
):
try:
record = self.get_raw_feed_group_data(feed, group, since, next_token)
if record.success:
if record.content_type != "application/json":
raise UnexpectedMIMEType(record.content_type)
next_token, group_data, count = self._extract_response_data(
record.content
)
return GroupData(
data=group_data,
next_token=next_token,
since=since,
record_count=count,
response_metadata={},
)
else:
raise Exception(
"Feed list operation failed. Msg: {}.".format(record.content)
)
except Exception as e:
logger.debug("Error executing feed data download: {}".format(e))
raise e
def get_raw_feed_group_data(
self,
feed: str,
group: str,
since: datetime.datetime = None,
next_token: str = None,
) -> HTTPClientResponse:
if since and not isinstance(since, datetime.datetime):
raise TypeError("since should be a datetime object")
baseurl = self.group_data_url.format(feed=feed, group=group)
if since:
baseurl += "?since={}".format(since.isoformat())
if next_token:
url = baseurl + "&next_token={}".format(next_token)
else:
url = baseurl
elif next_token:
url = baseurl + "?next_token={}".format(next_token)
else:
url = baseurl
logger.debug("data group url: " + str(url))
try:
return self.http_client.execute_request(
requests.get, url, retries=self.retry_count
)
except Exception as e:
logger.debug("Error executing feed data download: {}".format(e))
raise e
@staticmethod
def _extract_response_data(response_text):
next_token = None
sio = BytesIO(response_text)
count = 0
# Get the next token
p = ijson.items(sio, FEED_DATA_NEXT_TOKEN_PATH)
d = [x for x in p]
if len(d) == 1:
next_token = d[0]
# Be explicit, no empty strings
if not next_token:
next_token = None
# Get the record count
# Not using the special parser for handling decimals here because this isn't on the return path, just counting records
sio.seek(0)
for _ in ijson.items(sio, FEED_DATA_ITEMS_PATH):
count += 1
logger.debug("Found {} records in data chunk".format(count))
sio.close()
return next_token, response_text, count
class GrypeDBUnavailable(FeedClientError):
def __init__(self, db_version: str):
super().__init__(
f"No valid Grype DBs matching version {db_version} are available on the upstream service."
)
class GrypeVersionCommandError(FeedClientError):
pass
class InvalidGrypeVersionResponse(GrypeVersionCommandError):
def __init__(self, response_string: str):
super().__init__(
f"The 'grype version' command did not return the expected response. Response: {response_string}"
)
class GrypeDBServiceClient(IFeedSource):
"""
Client for upstream service (toolbox service or feeds service) serving Grype DB.
:param grype_db_endpoint: base URL of toolbox service
:type grype_db_endpoint: str
:param http_client: configured and instantiated http client to use
:type http_client: HTTPBasicAuthClient
"""
RETRY_COUNT = 3
def __init__(
self,
grype_db_endpoint: str,
http_client: HTTPBasicAuthClient,
):
self.feed_url = grype_db_endpoint
self.http_client = http_client
def list_feeds(self) -> FeedList:
"""
Returns metadata to support existing Feeds Service metadata model.
This is what essentially creates the FeedMetadata object for 'grypedb'.
Shoehorning the GrypeDB into the Feeds Service metadata model is a hack,
but is likely necessary evil until legacy feeds are deprecated and the model
can be redesigned and refactored.
:return: statically generated FeedList response model
:rtype: FeedList
"""
return FeedList(
feeds=[
FeedAPIRecord(
name="grypedb",
description="grypedb feed",
access_tier="0",
)
]
)
def _list_feed_groups(self) -> Dict[str, Union[int, str]]:
"""
Sends HTTP request to toolbox service's listing.json endpoint.
loads and parses the response, returning the first result with the version that applies to the version of Grype
| |
compatibility.
"""
if out:
return OpTreeNode.build("assign", out, self.T)
return self.T
def __add__ (self, other): return OpTreeNode.build("add", self, other)
def __sub__ (self, other): return OpTreeNode.build("sub", self, other)
def __mul__ (self, other): return OpTreeNode.build("mul", self, other)
def __div__ (self, other): return OpTreeNode.build("div", self, other)
def __truediv__ (self, other): return OpTreeNode.build("div", self, other)
def __pow__ (self, other): return OpTreeNode.build("pow", self, other)
def __radd__ (self, other): return OpTreeNode.build("add", other, self)
def __rsub__ (self, other): return OpTreeNode.build("sub", other, self)
def __rmul__ (self, other): return OpTreeNode.build("mul", other, self)
def __rdiv__ (self, other): return OpTreeNode.build("div", other, self)
def __rtruediv__ (self, other): return OpTreeNode.build("div", other, self)
def __rpow__ (self, other): return OpTreeNode.build("pow", other, self)
def __eq__ (self, other): return OpTreeNode.build("eq", self, other)
def __ne__ (self, other): return OpTreeNode.build("ne", self, other)
def __lt__ (self, other): return OpTreeNode.build("lt", self, other)
def __le__ (self, other): return OpTreeNode.build("le", self, other)
def __gt__ (self, other): return OpTreeNode.build("gt", self, other)
def __ge__ (self, other): return OpTreeNode.build("ge", self, other)
def __abs__ (self): return OpTreeNode.build("abs", self, None)
def __neg__ (self): return OpTreeNode.build("neg", self, None)
def __iadd__ (self, other): return OpTreeNode.build("add", self, other, out=self)
def __isub__ (self, other): return OpTreeNode.build("sub", self, other, out=self)
def __imul__ (self, other): return OpTreeNode.build("mul", self, other, out=self)
def __idiv__ (self, other): return OpTreeNode.build("div", self, other, out=self)
def __itruediv__ (self, other): return OpTreeNode.build("div", self, other, out=self)
def __ipow__ (self, other): return OpTreeNode.build("pow", self, other, out=self)
#def __nonzero__ (self): raise ValueError("The truth value of an array with more than one element is ambiguous.")
class NervanaGPU(object):
def __init__(self, stochastic_round=False, bench=False,
cubin_path=os.path.join("kernels", "cubin"),
scratch_size=9*1024*1024, default_dtype=np.float16):
"""
NervanaGPU: the primary interface class and factory for GPUTensors
stochastic_round: set to desired number of mantissa bits to stochasicaly round to
set to zero to disable stochastic rouding.
bench: set to 1 to print out performance data for most kernel calls
"""
if stochastic_round:
if stochastic_round is True:
stochastic_round = 10
else:
stochastic_round = 0
self.scratch_size = scratch_size
self.round_mode = stochastic_round
self.cubin_path = os.path.join(os.path.dirname(__file__), cubin_path)
self.bench = bench
self.stream = None
self.default_dtype = default_dtype
def empty(self, shape, dtype=None, name=None, allocator=drv.mem_alloc):
"""
allocate the space for a GPUTensor
"""
dtype = self.default_dtype if dtype is None else dtype
return GPUTensor(self, shape, dtype, allocator=allocator,
name=name, rounding=self.round_mode)
def array(self, ary, dtype=None, name=None, allocator=drv.mem_alloc):
"""
converts a numpy array to a GPUTensor
"""
dtype = self.default_dtype if dtype is None else dtype
return GPUTensor(self, ary.shape, dtype, allocator=allocator,
name=name, rounding=self.round_mode).set(ary)
def zeros(self, shape, dtype=None, name=None, allocator=drv.mem_alloc):
"""
Returns an array of the given shape and dtype filled with 0's.
"""
dtype = self.default_dtype if dtype is None else dtype
return GPUTensor(self, shape, dtype, allocator=allocator,
name=name, rounding=self.round_mode)._assign(0)
def ones(self, shape, dtype=None, name=None, allocator=drv.mem_alloc):
"""
Returns an array of the given shape and dtype filled with 1's.
"""
dtype = self.default_dtype if dtype is None else dtype
return GPUTensor(self, shape, dtype, allocator,
name=name, rounding=self.round_mode)._assign(1)
def empty_like(self, other_ary, name=None):
"""
Returns an array with the same params as another
"""
return GPUTensor(self, other_ary.shape, other_ary.dtype, other_ary.allocator,
name=name, rounding=self.round_mode)
def zeros_like(self, other_ary, name=None):
"""
Returns an array with the same params as another
"""
return GPUTensor(self, other_ary.shape, other_ary.dtype, other_ary.allocator,
name=name, rounding=self.round_mode)._assign(0)
def conv_layer(self, dtype,
N, C, K,
D=1, H=1, W=1,
T=1, R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1,
grid_P=0, grid_Q=0, update_size=None):
"""
Create a new ConvLayer parameter object.
This then is passed as an argument to all the convolution operations.
N: Number of images in mini-batch
C: Number of input feature maps
K: Number of output feature maps
D: Depth of input image
H: Height of input image
W: Width of input image
T: Depth of filter kernel
R: Height of filter kernel
S: Width of filter kernel
padding: amount of zero-padding around the given edge
strides: factor to step the filters by in a given direction
grid_P, grid_Q: For the update operation define the size of the grid
to distribute the work accross SMs. The smaller the grid, the deeper the
MM and hence more accumulation is done in fp32. The bigger the grid,
the more the work can be evenly spanned accross the SMs, at the cost of
needing more fp16 accumuation operations and increased error.
Set to 1,1 for full fp32 accuracy
Set to P,Q for maximal distribution of work acrross SMs
Set to 0,0 for automactially calculated optimal balance (recommened).
Tweaking these params can have a large impact on performance as the
L2 cache utilization is greatly effected by them.
update_size: override kernel size selection for update.
"C128_K64"
"C128_K128"
dtype: need to know dtype to setup proper kernels and params.
Maximum utilization is achieved when N, K and C*R*S*T is
a multiple of 64
"""
return ConvLayer(self, dtype, N, C, K, D, H, W, T, R, S,
pad_d, pad_h, pad_w, str_d, str_h, str_w, grid_P, grid_Q, update_size)
def deconv_layer(self, dtype,
N, C, K,
P, Q,
R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1,
grid_P=0, grid_Q=0, update_size=None):
"""
Create a new DeconvLayer parameter object.
This then is passed as an argument to all the convolution operations.
N: Number of images in mini-batch
C: Number of output feature maps
K: Number of input feature maps
P: Height of input
Q: Width of input
T: Depth of filter kernel
R: Height of filter kernel
S: Width of filter kernel
padding: amount of zero-padding around the given edge
strides: factor to step the filters by in a given direction
grid_P, grid_Q: For the update operation define the size of the grid
to distribute the work accross SMs. The smaller the grid, the deeper the
MM and hence more accumulation is done in fp32. The bigger the grid,
the more the work can be evenly spanned accross the SMs, at the cost of
needing more fp16 accumuation operations and increased error.
Set to 1,1 for full fp32 accuracy
Set to P,Q for maximal distribution of work acrross SMs
Set to 0,0 for automactially calculated optimal balance (recommened).
Tweaking these params can have a large impact on performance as the
L2 cache utilization is greatly effected by them.
update_size: override kernel size selection for update.
"C128_K64"
"C128_K128"
dtype: need to know dtype to setup proper kernels and params.
Maximum utilization is achieved when N, K and C*R*S*T is
a multiple of 64
"""
return DeconvLayer(self, dtype, N, C, K, P, Q, R, S,
pad_d, pad_h, pad_w, str_d, str_h, str_w, grid_P, grid_Q, update_size)
def fprop_conv(self, layer, I, F, O, alpha=1.0, relu=False, repeat=1):
assert layer.sizeI == I.size
assert layer.sizeF == F.size
assert layer.sizeO == O.size
return self._execute_conv(
layer, "fprop", layer.fprop_size,
layer.fprop_grid, layer.fprop_block, layer.fprop_args, layer.fprop_lut_size,
I, F, O, alpha, relu, False, repeat)
def bprop_conv(self, layer, F, E, grad_I, alpha=1.0, repeat=1):
assert layer.sizeF == F.size
assert layer.sizeO == E.size
assert layer.sizeI == grad_I.size
return self._execute_conv(
layer, "bprop", layer.bprop_size,
layer.bprop_grid, layer.bprop_block, layer.bprop_args, layer.bprop_lut_size,
E, F, grad_I, alpha, False, layer.bprop_zero, repeat)
def update_conv(self, layer, I, E, grad_F, alpha=1.0, repeat=1):
assert layer.sizeI == I.size
assert layer.sizeO == E.size
assert layer.sizeF == grad_F.size
return self._execute_conv(
layer, "updat", layer.updat_size,
layer.updat_grid, layer.updat_block, layer.update_args, 0,
I, E, grad_F, alpha, False, True, repeat)
def _execute_conv(self, layer, op, size, grid, block, args, shared, A, B, C, alpha, relu, zero, repeat):
assert A.dtype == B.dtype
clss = "hconv" if A.dtype.type is np.float16 else "sconv"
flags = 0
if relu: flags |= 2
B_gpudata = B.gpudata
C_gpudata = C.gpudata
shuffle_kernel = None
convert_data = False
if op == "bprop":
assert B.size <= self.scratch_size
B_gpudata = _get_scratch_data(self.scratch_size)
if zero:
shuffle_kernel = _get_transpose_kernel(B.dtype.str[1:])
else:
shuffle_kernel = _get_shuffle_kernel(B.dtype.str[1:])
shuffle_args = [ layer.shuffle_grid, layer.shuffle_block, self.stream,
B_gpudata, B.gpudata ] + layer.shuffle_args
if op == "updat" and C.dtype.type is not np.float32:
assert C.size <= self.scratch_size
C_gpudata = _get_scratch_data(self.scratch_size)
convert_data = True
kernel = _get_conv_kernel(self.cubin_path, clss, op, size)
params = [ grid, block, self.stream,
C_gpudata, A.gpudata, B_gpudata,
alpha, flags ] + args
# Warmup
if repeat > 1:
for r in range(max(repeat // 10, 1)):
kernel.prepared_async_call(*params, shared_size=shared)
if self.bench or repeat > 1:
start, end | |
<reponame>geransmith/axonius_api_client
# -*- coding: utf-8 -*-
"""API models for working with device and user assets."""
import copy
import sys
from ...constants import (DEFAULT_PATH, FIELD_JOINER, FIELD_TRIM_LEN,
FIELD_TRIM_STR, SCHEMAS_CUSTOM)
from ...exceptions import ApiError
from ...tools import (calc_percent, echo_error, echo_ok, echo_warn, get_path,
join_kv, listify)
class Base:
"""Object for handling callbacks for assets."""
CB_NAME = "base"
FIND_KEYS = ["name", "name_qual", "column_title", "name_base"]
def __init__(
self, apiobj, store, state=None, fields_map=None, getargs=None,
):
"""Object for handling callbacks for assets."""
self.LOG = apiobj.LOG.getChild(self.__class__.__name__)
""":obj:`logging.Logger`: logger for this object."""
self.APIOBJ = apiobj
""":obj:`AssetMixin`: assets object."""
self.ALL_SCHEMAS = fields_map or apiobj.fields.get()
""":obj:`dict`: map of adapter -> field schemas."""
self.STATE = state or {}
""":obj:`dict`: state dict used by get assets method to track paging."""
self.STORE = store or {}
""":obj:`dict`: store dict used by get assets method to track arguments."""
self.GETARGS = getargs or {}
""":obj:`dict`: original kwargs supplied to get assets method."""
self.RAN = []
""":obj:`list` of :obj:`str`: used by callbacks to see if they've run already."""
self.TAG_ROWS_ADD = []
""":obj:`list` of :obj:`dict`: assets to add tags to in do_tagging."""
self.TAG_ROWS_REMOVE = []
""":obj:`list` of :obj:`dict`: assets to remove tags from in do_tagging."""
self._init()
def _init(self):
"""Pass."""
pass
def start(self, **kwargs):
"""Run start callbacks."""
join = "\n - "
self.echo(msg=f"Starting {self}")
cbargs = join + join.join(join_kv(obj=self.GETARGS))
self.LOG.debug(f"Get Extra Arguments: {cbargs}")
config = join + join.join(self.args_strs)
self.echo(msg=f"Configuration: {config}")
store = join + join.join(join_kv(obj=self.STORE))
self.echo(msg=f"Get Arguments: {store}")
schemas_pretty = self.APIOBJ.fields._prettify_schemas(
schemas=self.schemas_selected
)
schemas_pretty = join + join.join(schemas_pretty)
self.echo(msg=f"Selected Columns: {schemas_pretty}")
final_columns = join + join.join(self.final_columns)
self.echo(msg=f"Final Columns: {final_columns}")
def stop(self, **kwargs):
"""Run stop callbacks."""
self.do_tagging()
self.echo(msg=f"Stopping {self}")
def process_row(self, row):
"""Handle callbacks for an asset."""
self.do_pre_row()
return self.do_row(row=row)
def do_pre_row(self):
"""Pass."""
self.STATE.setdefault("rows_processed_total", 0)
self.STATE["rows_processed_total"] += 1
self.echo_page_progress()
def do_row(self, row):
"""Pass."""
self.process_tags_to_add(row=row)
self.process_tags_to_remove(row=row)
self.add_report_adapters_missing(row=row)
for schema in self.schemas_selected:
self.do_excludes(row=row, schema=schema)
self.do_add_null_values(row=row, schema=schema)
self.do_flatten_fields(row=row, schema=schema)
new_rows = self.do_explode_field(row=row)
for new_row in new_rows:
self.do_join_values(row=new_row)
self.do_change_field_titles(row=new_row)
return new_rows
def echo_page_progress(self):
"""Asset callback to echo progress per N rows using an echo method."""
page_progress = self.GETARGS.get("page_progress", 10000)
if not page_progress or not isinstance(page_progress, int):
return
proc = self.STATE.get("rows_processed_total", 0) or 0
total = self.STATE.get("rows_to_fetch_total", 0) or 0
taken = self.STATE.get("fetch_seconds_total", 0) or 0
page_total = self.STATE.get("pages_to_fetch_total", 0) or 0
page_num = self.STATE.get("page_number", 0) or 0
if not ((proc % page_progress == 0) or (proc >= total) or (proc <= 1)):
return
percent = calc_percent(part=proc, whole=total)
percent = f"{percent:.2f}%"
percent = f"{percent:>7}"
total_len = len(str(total))
rows = f"[ROWS: {proc:>{total_len}} / {total}]"
page_total_len = len(str(page_total))
pages = f"[PAGES: {page_num:>{page_total_len}} / {page_total}]"
taken = f"{taken:.2f} seconds so far"
self.echo(msg=f"PROGRESS: {percent} {rows} {pages} in {taken}")
def do_add_null_values(self, row, schema, key="name_qual"):
"""Null out missing fields."""
if not self.GETARGS.get("field_null", False) or self.is_excluded(schema=schema):
return
null_value = self.GETARGS.get("field_null_value", None)
field = schema[key]
if schema["is_complex"]:
row[field] = listify(row.get(field, []))
for item in row[field]:
for sub_schema in self.get_sub_schemas(schema=schema):
self.do_add_null_values(schema=sub_schema, row=item, key="name")
else:
row[field] = row.get(field, null_value)
def do_excludes(self, row, schema):
"""Asset callback to remove fields from row."""
if not self.GETARGS.get("field_excludes", []):
return
if self.is_excluded(schema=schema):
row.pop(schema["name_qual"], None)
return
if schema["is_complex"]:
items = listify(row.get(schema["name_qual"], []))
for sub_schema in schema["sub_fields"]:
if self.is_excluded(schema=sub_schema):
for item in items:
item.pop(sub_schema["name"], None)
def do_join_values(self, row):
"""Join values."""
if not self.GETARGS.get("field_join", False):
return
joiner = str(self.GETARGS.get("field_join_value", FIELD_JOINER))
trim_len = self.GETARGS.get("field_join_trim", FIELD_TRIM_LEN)
trim_str = FIELD_TRIM_STR
for field in row:
if isinstance(row[field], list):
row[field] = joiner.join([str(x) for x in row[field]])
if trim_len and isinstance(row[field], str):
field_len = len(row[field])
if len(row[field]) >= trim_len:
msg = trim_str.format(field_len=field_len, trim_len=trim_len)
row[field] = joiner.join([row[field][:trim_len], msg])
def do_change_field_titles(self, row):
"""Asset callback to change qual name to title."""
if not self.GETARGS.get("field_titles", False):
return
for schema in self.final_schemas:
row[schema["column_title"]] = row.pop(schema["name_qual"], None)
def do_flatten_fields(self, row, schema):
"""Asset callback to flatten complex fields."""
if not self.GETARGS.get("field_flatten", False):
return
if self.schema_to_explode == schema:
return
self._do_flatten_fields(row=row, schema=schema)
def _do_flatten_fields(self, row, schema):
"""Pass."""
if self.is_excluded(schema=schema):
return
if not schema["is_complex"]:
return
null_value = self.GETARGS.get("field_null_value", None)
items = listify(row.pop(schema["name_qual"], []))
for sub_schema in self.get_sub_schemas(schema=schema):
row[sub_schema["name_qual"]] = []
for item in items:
value = item.pop(sub_schema["name"], null_value)
value = value if isinstance(value, list) else [value]
row[sub_schema["name_qual"]] += value
def do_explode_field(self, row):
"""Explode a field into multiple rows."""
explode = self.GETARGS.get("field_explode", "")
null_value = self.GETARGS.get("field_null_value", None)
if not explode:
return [row]
schema = self.schema_to_explode
if self.is_excluded(schema=schema):
return [row]
original_row = copy.deepcopy(row)
if schema["is_complex"]:
new_rows_map = {}
items = listify(row.pop(schema["name_qual"], []))
for sub_schema in self.get_sub_schemas(schema=schema):
for idx, item in enumerate(items):
new_rows_map.setdefault(idx, copy.deepcopy(row))
value = item.pop(sub_schema["name"], null_value)
new_rows_map[idx][sub_schema["name_qual"]] = value
else:
new_rows_map = {}
items = listify(row.pop(schema["name_qual"], []))
for idx, item in enumerate(items):
new_rows_map.setdefault(idx, copy.deepcopy(row))
new_rows_map[idx][schema["name_qual"]] = item
new_rows = [new_rows_map[idx] for idx in new_rows_map]
if not new_rows:
self._do_flatten_fields(row=original_row, schema=schema)
return [original_row]
return new_rows
def do_tagging(self):
"""Pass."""
self.do_tag_add()
self.do_tag_remove()
def do_tag_add(self):
"""Pass."""
tags_add = listify(self.GETARGS.get("tags_add", []))
rows_add = self.TAG_ROWS_ADD
if tags_add and rows_add:
self.echo(msg=f"Adding tags {tags_add} to {len(rows_add)} assets")
self.APIOBJ.labels.add(rows=rows_add, labels=tags_add)
def do_tag_remove(self):
"""Pass."""
tags_remove = listify(self.GETARGS.get("tags_remove", []))
rows_remove = self.TAG_ROWS_REMOVE
if tags_remove and rows_remove:
self.echo(msg=f"Removing tags {tags_remove} from {len(rows_remove)} assets")
self.APIOBJ.labels.remove(rows=rows_remove, labels=tags_remove)
def process_tags_to_add(self, row):
"""Pass."""
tags = listify(self.GETARGS.get("tags_add", []))
if not tags:
return
tag_row = {"internal_axon_id": row["internal_axon_id"]}
if tag_row not in self.TAG_ROWS_ADD:
self.TAG_ROWS_ADD.append(tag_row)
def process_tags_to_remove(self, row):
"""Pass."""
tags = listify(self.GETARGS.get("tags_remove", []))
if not tags:
return
tag_row = {"internal_axon_id": row["internal_axon_id"]}
if tag_row not in self.TAG_ROWS_REMOVE:
self.TAG_ROWS_REMOVE.append(tag_row)
def add_report_adapters_missing(self, row):
"""Pass."""
if not self.GETARGS.get("report_adapters_missing", False):
return
schemas = SCHEMAS_CUSTOM["report_adapters_missing"]
schema = schemas["adapters_missing"]
field_name = schema["name_qual"]
adapters_row = row.get("adapters", [])
adapter_map = self.adapter_map
missing = []
for adapter in adapter_map["all"]:
if adapter in adapters_row:
continue
if adapter not in adapter_map["all_fields"]:
continue
if adapter not in missing:
missing.append(adapter)
row[field_name] = missing
def is_excluded(self, schema):
"""Check if a name supplied to field_excludes matches one of GET_SCHEMA_KEYS."""
excludes = listify(self.GETARGS.get("field_excludes", []))
for exclude in excludes:
for key in self.FIND_KEYS:
name = schema.get(key, None)
if (name and exclude) and name == exclude:
return True
return False
def open_fd_arg(self):
"""Pass."""
self._fd = self.GETARGS["export_fd"]
self._fd_close = self.GETARGS.get("export_fd_close", False)
self.echo(msg=f"Exporting to {self._fd}")
return self._fd
def open_fd_path(self):
"""Pass."""
self._export_file = self.GETARGS.get("export_file", None)
self._export_path = self.GETARGS.get("export_path", DEFAULT_PATH)
self._export_overwrite = self.GETARGS.get("export_overwrite", False)
file_path = get_path(obj=self._export_path)
file_path.mkdir(mode=0o700, parents=True, exist_ok=True)
self._file_path = fp = (file_path / self._export_file).resolve()
if self._file_path.exists():
self._file_mode = "overwrote"
mode = "overwriting"
else:
self._file_mode = "created"
mode = "creating"
if self._file_path.exists() and not self._export_overwrite:
msg = f"Export file '{fp}' already exists and overwite is False!"
self.echo(msg=msg, error=ApiError, level="error")
self._file_path.touch(mode=0o600)
self._fd_close = self.GETARGS.get("export_fd_close", True)
self._fd = self._file_path.open(mode="w", encoding="utf-8")
self.echo(msg=f"Exporting to file '{fp}' ({mode})")
return self._fd
def open_fd_stdout(self):
"""Pass."""
self._file_path = None
self._fd_close = False
self._fd = sys.stdout
self.echo(msg="Exporting to stdout")
return self._fd
def open_fd(self):
"""Open a file descriptor."""
if "export_fd" in self.GETARGS:
self.open_fd_arg()
elif self.GETARGS.get("export_file", None):
self.open_fd_path()
else:
self.open_fd_stdout()
return self._fd
def close_fd(self):
"""Close a file descriptor."""
self._fd.write("\n")
if getattr(self, "_fd_close", False):
name = str(getattr(self._fd, "name", self._fd))
self.echo(msg=f"Finished exporting to {name!r}")
self._fd.close()
def echo(
self,
msg,
error=False,
warning=False,
level="info",
level_error="error",
level_warning="warning",
abort=True,
):
"""Pass."""
do_echo = self.GETARGS.get("do_echo", False)
if do_echo:
if warning:
echo_warn(msg=msg)
elif error:
echo_error(msg=msg, abort=abort)
else:
echo_ok(msg=msg)
return
if warning:
getattr(self.LOG, level_warning)(msg)
elif error:
getattr(self.LOG, level_error)(msg)
if abort:
raise error(msg)
getattr(self.LOG, level)(msg)
def get_sub_schemas(self, schema):
"""Pass."""
sub_schemas = schema["sub_fields"]
for sub_schema in sub_schemas:
if self.is_excluded(schema=sub_schema) or not sub_schema["is_root"]:
continue
yield sub_schema
@property
def custom_schemas(self):
"""Pass."""
schemas = []
if self.GETARGS.get("report_adapters_missing", False):
schemas += list(SCHEMAS_CUSTOM["report_adapters_missing"].values())
return schemas
@property
def final_schemas(self):
"""Predict the future schemas that will be returned."""
if hasattr(self, "_final_schemas"):
return self._final_schemas
flat = self.GETARGS.get("field_flatten", False)
explode_field_name = self.schema_to_explode.get("name_qual", "")
final = {}
for schema in self.schemas_selected:
if self.is_excluded(schema=schema):
continue
is_explode_field = schema["name_qual"] == explode_field_name
if schema["is_complex"] and (is_explode_field or flat):
for sub_schema in self.get_sub_schemas(schema=schema):
final[sub_schema["name_qual"]] = sub_schema
else:
final.setdefault(schema["name_qual"], schema)
self._final_schemas = list(final.values())
return self._final_schemas
@property
def final_columns(self):
"""Pass."""
if hasattr(self, "_final_columns"):
return self._final_columns
use_titles = self.GETARGS.get("field_titles", False)
key = "column_title" if use_titles else "name_qual"
self._final_columns = [x[key] for x in self.final_schemas]
return self._final_columns
@property
def fields_selected(self):
"""Pass."""
if hasattr(self, "_fields_selected"):
return self._fields_selected
include_details = self.STORE.get("include_details", False)
fields = listify(self.STORE.get("fields", []))
api_fields = [x for x in self.APIOBJ.FIELDS_API if x not in fields]
if include_details:
api_fields += ["meta_data.client_used", "unique_adapter_names_details"]
self._fields_selected = []
for field in api_fields + fields:
self._fields_selected.append(field)
| |
<filename>metalibm-master/metalibm_core/targets/intel/x86_processor.py
# -*- coding: utf-8 -*-
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# created: Apr 11th, 2014
# last-modified: Mar 7th, 2018
#
# Author(s): <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>
###############################################################################
from metalibm_core.code_generation.generator_utility import *
from metalibm_core.code_generation.complex_generator import ComplexOperator
from metalibm_core.core.ml_formats import *
from metalibm_core.core.ml_complex_formats import ML_Pointer_Format
from metalibm_core.core.ml_operations import *
from metalibm_core.core.target import UniqueTargetDecorator
from metalibm_core.core.ml_table import ML_TableFormat
from metalibm_core.utility.debug_utils import ML_Debug
from metalibm_core.core.special_values import (FP_PlusZero, FP_MinusZero)
from metalibm_core.core.meta_interval import MetaInterval
from metalibm_core.core.legalizer import minmax_legalizer_wrapper
from metalibm_core.opt.opt_utils import (
uniform_list_check, uniform_vector_constant_check, uniform_shift_check)
from metalibm_core.utility.log_report import Log
from metalibm_core.utility.debug_utils import debug_multi
from metalibm_core.targets.common.vector_backend import VectorBackend
from metalibm_core.code_generation.abstract_backend import LOG_BACKEND_INIT
from metalibm_core.code_generation.generic_processor import GenericProcessor
from metalibm_core.code_generation.complex_generator import DynamicOperator
from .x86_processor_table import x86_sse_approx_table_map
def get_sse_scalar_cst(format_object, value, language = C_Code):
base_format = format_object.get_base_format()
if isinstance(value, FP_PlusZero):
value_str = base_format.get_cst(0, language)
elif isinstance(value, FP_MinusZero):
value_str = "-" + base_format.get_cst(0, language)
else:
value_str = base_format.get_cst(value, language)
return "{{{}}}/*sse*/".format(value_str)
def get_sse_vector_float_cst(format_object, value, language=C_Code):
""" Generate vector constant value for SSE vector format """
scalar_format = format_object.get_scalar_format()
value_list = ["{}".format(scalar_format.get_cst(svalue, language)) for svalue in value]
return "{{{}}}/* sse */".format(", ".join(value_list))
def signed2unsigned(value, width=32):
""" convert a signed value to it's 2 complement unsigned
encoding """
if value >= 0:
return int(value)
else:
return int(value + 2**(width) )
def unsigned2signed(value, width=32):
""" convert an unsigned value representing the 2's complement
encoding of a signed value to its numerical signed value """
msb = value >> (width - 1)
return int(value - msb * 2**width)
def get_sseavx_vector_bool_cst(format_object, value, language=C_Code):
""" Convert the list of constant boolean values <value> into constant
code for SSE/AVX vectors
"""
# ML_<SSE/AVX>_v<i>bool is not a pratical format, it should not appear
# during code generation (including constant generation)
Log.report(Log.Error, "ML_(SSE/AVX)_v<i>bool format {} with value {} should not generate code", format_object, value)
raise NotImplementedError
def get_sse_vector_int_cst(format_object, value, language=C_Code):
""" integer constant must be packed as 64-bit signed values if built by gcc
"""
scalar_format = format_object.get_scalar_format()
scalar_w = scalar_format.get_bit_size()
compound_cst = reduce((lambda acc, x: (acc * 2**scalar_w + signed2unsigned(x, scalar_w))), value[::-1], 0)
component_w = 64
value_list = []
while compound_cst != 0:
component_abs_value = compound_cst % 2**component_w
compound_cst >>= component_w
value_list.append(unsigned2signed(component_abs_value, component_w))
value_enc_list = ["{}".format(ML_Int64.get_cst(value, language)) for value in value_list]
return "{{{}}}/* sse */".format(", ".join(value_enc_list))
ML_SSE_m128 = ML_FormatConstructor(128, "__m128", None, lambda v: None)
ML_SSE_m128i = ML_FormatConstructor(128, "__m128i", None, lambda v: None)
ML_SSE_m128d = ML_FormatConstructor(128, "__m128d", None, lambda v: None)
ML_AVX_m256 = ML_FormatConstructor(256, "__m256", None, lambda v: None)
ML_AVX_m256i = ML_FormatConstructor(256, "__m256i", None, lambda v: None)
ML_AVX_m256d = ML_FormatConstructor(256, "__m256d", None, lambda v: None)
## format for a single fp32 stored in a XMM 128-bit register
ML_SSE_m128_v1float32 = VirtualFormatNoForward(ML_Binary32, ML_SSE_m128, get_sse_scalar_cst, True)
## format for single 1 fp64 in a XMM 128-bit register
ML_SSE_m128_v1float64 = VirtualFormatNoForward(ML_Binary64, ML_SSE_m128d, get_sse_scalar_cst, True)
## format for a single int32 stored in a XMM 128-bit register
ML_SSE_m128_v1int32 = VirtualFormatNoForward(ML_Int32, ML_SSE_m128i, get_sse_scalar_cst, True)
ML_SSE_m128_v1uint32 = VirtualFormatNoForward(ML_UInt32, ML_SSE_m128i, get_sse_scalar_cst, True)
## format for single 1 int64 in a XMM 128-bit register
ML_SSE_m128_v1int64 = VirtualFormatNoForward(ML_Int64, ML_SSE_m128i, get_sse_scalar_cst, True)
## format for packed 2 fp32 in a XMM 128-bit register
ML_SSE_m128_v2float32 = vector_format_builder("__m128", None, 2, ML_Binary32,
cst_callback = get_sse_vector_float_cst)
## format for packed 4 fp32 in a XMM 128-bit register
ML_SSE_m128_v4float32 = vector_format_builder("__m128", None, 4, ML_Binary32,
cst_callback = get_sse_vector_float_cst)
## format for packed 2 fp64 in a XMM 128-bit register
ML_SSE_m128_v2float64 = vector_format_builder("__m128d", None, 2, ML_Binary64)
## format for packed 2 int32 in a XMM 128-bit register
ML_SSE_m128_v2int32 = vector_format_builder("__m128i", None, 2, ML_Int32,
cst_callback = get_sse_vector_int_cst,
compound_constructor = ML_IntegerVectorFormat)
## format for packed 4 int32 in a XMM 128-bit register
ML_SSE_m128_v4int32 = vector_format_builder("__m128i", None, 4, ML_Int32,
cst_callback = get_sse_vector_int_cst,
compound_constructor = ML_IntegerVectorFormat)
## format for packed 2 int64 in a XMM 128-bit register
ML_SSE_m128_v2int64 = vector_format_builder("__m128i", None, 2, ML_Int64,
cst_callback = get_sse_vector_int_cst,
compound_constructor = ML_IntegerVectorFormat)
## format for packed 2 uint32 in a XMM 128-bit register
ML_SSE_m128_v2uint32 = vector_format_builder("__m128i", None, 2, ML_UInt32,
cst_callback = get_sse_vector_int_cst,
compound_constructor = ML_IntegerVectorFormat)
## format for packed 4 uint32 in a XMM 128-bit register
ML_SSE_m128_v4uint32 = vector_format_builder("__m128i", None, 4, ML_UInt32,
cst_callback = get_sse_vector_int_cst,
compound_constructor = ML_IntegerVectorFormat)
## format for packed 2 uint64 in a XMM 128-bit register
ML_SSE_m128_v2uint64 = vector_format_builder("__m128i", None, 2, ML_UInt64,
cst_callback = get_sse_vector_int_cst,
compound_constructor = ML_IntegerVectorFormat)
# debug-format for SSE format
debug_sse_vfloat32 = ML_Debug(
display_format="{%a, %a, %a, %a}",
require_header=["ml_utils.h", "smmintrin.h"],
pre_process=lambda v: ", ".join("float_from_32b_encoding(_mm_extract_ps({v}, {i}))".format(v=v,i=i) for i in range(4))
)
debug_sse_vint32 = ML_Debug(
display_format="{%d, %d, %d, %d}",
require_header=["ml_utils.h", "smmintrin.h"],
pre_process=lambda v: ", ".join("_mm_extract_epi32({v}, {i})".format(v=v,i=i) for i in range(4))
)
# unsigned version
debug_sse_vuint32 = ML_Debug(
display_format="{%u, %u, %u, %u}",
require_header=["ml_utils.h", "smmintrin.h"],
pre_process=lambda v: ", ".join("_mm_extract_epi32({v}, {i})".format(v=v,i=i) for i in range(4))
)
# virtual vector boolean format
ML_SSE_m128_v4bool = VirtualFormatNoForward(
v4bool, ML_SSE_m128i, get_sseavx_vector_bool_cst, True)
ML_SSE_m128_v2lbool = VirtualFormatNoForward(
v2lbool, ML_SSE_m128i, get_sseavx_vector_bool_cst, True)
ML_AVX_m256_v8bool = VirtualFormatNoForward(
v8bool, ML_AVX_m256i, get_sseavx_vector_bool_cst, True)
ML_AVX_m256_v4lbool = VirtualFormatNoForward(
v4lbool, ML_AVX_m256i, get_sseavx_vector_bool_cst, True)
# registering ML_SSE_m128_v<i>float32 specific format
debug_multi.add_mapping(ML_SSE_m128_v4float32, debug_sse_vfloat32)
debug_multi.add_mapping(ML_SSE_m128_v2float32, debug_sse_vfloat32)
debug_multi.add_mapping(ML_SSE_m128_v1float32, debug_sse_vfloat32)
# registering ML_SSE_m128_v<i>int32 specific format
debug_multi.add_mapping(ML_SSE_m128_v4int32, debug_sse_vint32)
debug_multi.add_mapping(ML_SSE_m128_v2int32, debug_sse_vint32)
debug_multi.add_mapping(ML_SSE_m128_v1int32, debug_sse_vint32)
# registering ML_SSE_m128_v<i>uint32 specific format
debug_multi.add_mapping(ML_SSE_m128_v4uint32, debug_sse_vuint32)
debug_multi.add_mapping(ML_SSE_m128_v2uint32, debug_sse_vuint32)
debug_multi.add_mapping(ML_SSE_m128_v1uint32, debug_sse_vuint32)
# registering ML_SSE_m128_v<i>bool specific format
debug_multi.add_mapping(ML_SSE_m128_v4bool, debug_sse_vint32)
## format for packed 8 fp32 in a YMM 256-bit register
ML_AVX_m256_v8float32 = vector_format_builder("__m256", None, 8, ML_Binary32)
## format for packed 4 fp64 in a YMM 256-bit register
ML_AVX_m256_v4float64 = vector_format_builder("__m256d", None, 4, ML_Binary64)
## format for packed 4 int32 in a YMM 256-bit register
ML_AVX_m256_v4int32 = vector_format_builder("__m256i", None, 4, ML_Int32,
cst_callback = get_sse_vector_int_cst,
compound_constructor = ML_IntegerVectorFormat)
## format for packed 8 int32 in a YMM 256-bit register
ML_AVX_m256_v8int32 = vector_format_builder("__m256i", None, 8, ML_Int32,
cst_callback = get_sse_vector_int_cst,
compound_constructor = ML_IntegerVectorFormat)
## format for packed 4 int64 in a YMM 256-bit register
ML_AVX_m256_v4int64 = vector_format_builder("__m256i", None, 4, ML_Int64,
cst_callback = get_sse_vector_int_cst,
compound_constructor = ML_IntegerVectorFormat)
## format for packed 4 uint32 in a YMM 256-bit register
ML_AVX_m256_v4uint32 = vector_format_builder("__m256i", None, 4, ML_UInt32,
cst_callback = get_sse_vector_int_cst,
compound_constructor = ML_IntegerVectorFormat)
## format for packed 8 uint32 in a YMM 256-bit register
ML_AVX_m256_v8uint32 = vector_format_builder("__m256i", None, 8, ML_UInt32,
cst_callback = get_sse_vector_int_cst,
compound_constructor = ML_IntegerVectorFormat)
## format for packed 4 uint64 in a YMM 256-bit register
ML_AVX_m256_v4uint64 = vector_format_builder("__m256i", None, 4, ML_UInt64,
cst_callback = get_sse_vector_int_cst,
compound_constructor = ML_IntegerVectorFormat)
# debug utilities for AVX format
# debug-format for SSE format
debug_avx_vfloat32 = ML_Debug(
display_format="{%a, %a, %a, %a, %a, %an %a, %a}",
require_header=["ml_utils.h", "smmintrin.h"],
pre_process=lambda v: ", ".join("float_from_32b_encoding(_mm256_extract_epi32(_mm256_castps_si256({v}), {i}))".format(v=v,i=i) for i in range(8))
)
debug_avx_vint32 = ML_Debug(
display_format="{%d, %d, %d, %d, %d, %d, %d, %d}",
require_header=["ml_utils.h", "smmintrin.h"],
pre_process=lambda v: ", ".join("_mm256_extract_epi32({v}, {i})".format(v=v,i=i) for i in range(8))
)
# unsigned version
debug_avx_vuint32 = ML_Debug(
display_format="{%u, %u, %u, %u, %u, %u, %u, %u}",
require_header=["ml_utils.h", "smmintrin.h"],
pre_process=lambda v: ", ".join("_mm256_extract_epi32({v}, {i})".format(v=v,i=i) for i in range(8))
)
# registering ML_SSE_m128_v<i>float32 specific format
debug_multi.add_mapping(ML_AVX_m256_v8float32, debug_avx_vfloat32)
# registering ML_SSE_m128_v<i>int32 specific format
debug_multi.add_mapping(ML_AVX_m256_v8int32, debug_avx_vint32)
# registering ML_SSE_m128_v<i>uint32 specific format
debug_multi.add_mapping(ML_AVX_m256_v8uint32, debug_avx_vuint32)
# registering ML_SSE_m128_v<i>bool specific format
debug_multi.add_mapping(ML_AVX_m256_v8bool, debug_avx_vint32)
## Wrapper for intel x86_sse intrinsics
# defined in <xmmintrin.h> header
def XmmIntrin(*args, **kw):
kw.update({
'require_header': ["xmmintrin.h"]
})
return FunctionOperator(*args, **kw)
## Wrapper for intel x86_sse2 intrinsics
# defined in <emmintrin.h> header
def EmmIntrin(*args, **kw):
kw.update({
'require_header': ["emmintrin.h"]
})
return FunctionOperator(*args, **kw)
## Wrapper for intel x86_ssse3 intrinsics
# defined in <tmmintrin.h> header
def TmmIntrin(*args, **kw):
kw.update({
'require_header': ["tmmintrin.h"]
})
return FunctionOperator(*args, **kw)
## Wrapper for intel x86 sse4.1 intrinsics
# defined in <smmintrin.h> header
def SmmIntrin(*args, **kw):
kw.update({
'require_header': ["smmintrin.h"]
})
return FunctionOperator(*args, **kw)
## Wrapper for intel x86_avx2 intrinsics
# defined in <immintrin.h> header
def ImmIntrin(*args, **kw):
kw.update({
'require_header': ["immintrin.h"]
})
return FunctionOperator(*args, **kw)
# Conversion function from any float to a float packed into a __m128 register
_mm_set_ss = XmmIntrin("_mm_set_ss", arity = 1, force_folding = True,
output_precision = ML_SSE_m128_v1float32)
_mm_set_sd = XmmIntrin("_mm_set_sd", arity = 1, force_folding = True,
output_precision = ML_SSE_m128_v1float64)
_mm_set1_epi32 = XmmIntrin("_mm_set1_epi32", arity = 1, force_folding = True,
output_precision = | |
<reponame>Lachimax/FRB<gh_stars>0
""" Module for an FRB event
"""
import inspect
from pkg_resources import resource_filename
import os
import glob
import copy
import numpy as np
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units
from linetools import utils as ltu
from frb import utils
from frb import mw
from frb import defs
from frb.galaxies import frbgalaxy
from IPython import embed
class GenericFRB(object):
"""
Parent object for FRBs
Args:
S : Quantity
Source density of the burst
nu_c : Quantity
Centre frequency
DM : Quantity
coord (astropy.coordinates.SkyCoord): multi-format, optional
RA/DEC in one of many formats (see utils.radec_to_coord)
cosmo:
Attributes:
fluence (Quantity):
Fluence
fluence_err (Quantity):
DM (Quantity):
Dispersion Measure
DM_err (Quantity):
RM (Quantity):
Rotation Measure
RM_err (Quantity):
lpol (float):
Linear Polarization (%)
lpol_err (Quantity):
refs (list):
List of str, reference names
z (float):
Redshift
z_err (float):
Uncertainty in the redshift
repeater (bool):
Marks the FRB as being a Repeater
"""
@classmethod
def from_dict(cls, idict, **kwargs):
"""
Instantiate from a dict
Args:
idict (dict):
**kwargs: Passed to the __init__ call
Returns:
"""
# Init
slf = cls(idict['S'], idict['nu_c'], idict['DM'], **kwargs)
for key in ['S','nu_c','DM']:
idict.pop(key)
# FRB coord
if 'ra' in idict.keys():
slf.coord = SkyCoord(ra=idict['ra'],
dec=idict['dec'],
unit='deg')
# Check cosmology
if slf.cosmo.name != idict['cosmo']:
raise AssertionError("Your cosmology does not match the expected. Gotta deal..")
# dicts
for ndict in slf.main_dict:
if ndict in idict.keys():
setattr(slf,ndict,idict[ndict])
idict.pop(ndict)
# Remainder
for key in idict.keys():
setattr(slf,key,idict[key])
# Return
return slf
@classmethod
def from_json(cls, json_file, **kwargs):
"""
Instantiate from a JSON file
A simple wrapper to the from_dict method
Args:
json_file (str):
**kwargs: Passed to from_dict()
Returns:
slf
"""
idict = utils.loadjson(json_file)
slf = cls.from_dict(idict, **kwargs)
return slf
def __init__(self, S, nu_c, DM, coord=None, cosmo=None, repeater=None):
"""
"""
self.S = S
self.nu_c = nu_c
# NE2001 (for speed)
self.DMISM = None
self.DMISM_err = None
# Repeater?
self.repeater = repeater
# Coord
if coord is not None:
self.coord = utils.radec_to_coord(coord)
else:
self.coord = None
# Cosmology
if cosmo is None:
self.cosmo = defs.frb_cosmo
else:
self.cosmo = cosmo
# Attributes
self.z = None
self.frb_name = None
self.fluence = None
self.fluence_err = None
self.DM = DM
self.DM_err = None
self.RM = None
self.RM_err = None
self.lpol = None
self.lpol_err = None
self.refs = []
# dicts of attributes to be read/written
self.eellipse = {}
self.pulse = {}
self.main_dict = ['eellipse', 'pulse']
def set_DMISM(self):
if self.coord is None:
print("Need to set coord first!")
self.DMISM = mw.ismDM(self.coord)
def set_ee(self, a, b, theta, cl, stat=True):
"""
Set an error ellipse for the FRB position
Args:
a (float): major axis; Arcsec
b (float): minor axis; Arcsec
theta (float): rotation of the major axis E from N (deg)
cl (float): confidence level
stat (bool, optional):
If True, fill in statistical error
if False, fill in systematic
"""
if a < b:
raise IOError("For the ellipse, a must be greater than or equal to b")
if stat:
self.eellipse['a'] = a
self.eellipse['b'] = b
self.eellipse['theta'] = theta
self.eellipse['cl'] = cl
else:
self.eellipse['a_sys'] = a
self.eellipse['b_sys'] = b
self.eellipse['theta_sys'] = theta
self.eellipse['cl_sys'] = cl
#
return
@property
def sig_a(self):
"""
Combined semi-major axis error
Returns:
float:
"""
if len(self.eellipse) == 0:
return None
siga = self.eellipse['a'] # arcsec
if 'a_sys' in self.eellipse.keys():
siga = np.sqrt(self.eellipse['a_sys']**2 + siga**2)
return siga
@property
def sig_b(self):
"""
Combined semi-minor axis error
Returns:
float:
"""
if len(self.eellipse) == 0:
return None
sigb = self.eellipse['b'] # arcsec
if 'b_sys' in self.eellipse.keys():
sigb = np.sqrt(self.eellipse['b_sys']**2 + sigb**2)
return sigb
def set_pulse(self, freq,
time_res=None, t0=None, Wi=None, Wi_err=None,
tscatt=None, tscatt_err=None, scatt_index=None,
scatt_index_err=None, DM_smear=None):
"""
Args:
freq (Quantity):
Frequency at which the pulse was analyzed
time_res (Quantity):
Time resolution of the telescope/instrument
t0 (Quantity):
Pulse arrival time (MJD) at top band frequency
Wi (Quantity):
Intrinsic width
Wi_err (Quantity):
Error in intrinsic width
tscatt (Quantity):
Scattering broadening time
tscatt_err (Quantity):
Error in Scattering broadening time
scatt_index (float):
Scattering index
scatt_index_err (float):
Error in scattering index
DM_smear (float):
Dispersion smearing generated observed width
"""
args, _, _, values = inspect.getargvalues(inspect.currentframe())
self.pulse = dict([(k,values[k]) for k in args[1:]])
def make_outfile(self):
"""
Simple method for naming the output file
Returns:
str
"""
if self.frb_name is None:
outfile = 'Generic_FRB.json'
else:
outfile = '{:s}.json'.format(self.frb_name)
#
return outfile
def write_to_json(self, outfile=None, path='./', overwrite=True):
"""
Write key aspects of the class to disk in a JSON file
Args:
outfile (str, optional): Output filename
If not provided, one will be generated with make_outfile()
path (str, optional): Path for the output file
overwrite (bool, optional): Overwrite?
Returns:
"""
if outfile is None:
outfile = self.make_outfile()
# Build the dict
frb_dict = {}
# Basics
if self.coord is not None:
frb_dict['ra'] = self.coord.ra.value
frb_dict['dec'] = self.coord.dec.value
if self.frb_name is not None:
frb_dict['FRB'] = self.frb_name
frb_dict['cosmo'] = self.cosmo.name
frb_dict['refs'] = self.refs
if self.repeater is not None:
frb_dict['repeater'] = self.repeater
# Measured properties
for attr in ['S', 'nu_c', 'DM', 'z', 'RM', 'DMISM', 'fluence', 'lpol']:
# Value
if getattr(self,attr) is not None:
frb_dict[attr] = getattr(self, attr)
# Error
if hasattr(self, attr+'_err'):
if getattr(self, attr+'_err') is not None:
frb_dict[attr+'_err'] = getattr(self, attr+'_err')
# Main dicts
for idict in self.main_dict:
if getattr(self,idict) is not None and len(getattr(self,idict)) > 0:
frb_dict[idict] = getattr(self,idict)
# JSONify
jdict = utils.jsonify(copy.deepcopy(frb_dict))
# Write
utils.savejson(os.path.join(path,outfile), jdict, easy_to_read=True, overwrite=overwrite)
print("Wrote data to {}".format(os.path.join(path,outfile)))
def __repr__(self):
txt = '<{:s}: S={} nu_c={}, DM={}'.format(
self.__class__.__name__, self.S, self.nu_c, self.DM)
# Finish
txt = txt + '>'
return (txt)
class FRB(GenericFRB):
"""
FRB class used for actual, observed FRBs
"""
@classmethod
def from_dict(cls, idict, **kwargs):
"""
Instantiate from a dict
Args:
idict (dict):
**kwargs: Passed to the __init__ call
Returns:
"""
# Init
coord = SkyCoord(ra=idict['ra'], dec=idict['dec'], unit='deg')
DM = units.Quantity(idict['DM']['value'],unit=idict['DM']['unit'])
slf = cls(idict['FRB'], coord, DM, **kwargs)
for key in ['ra','dec','DM']:
idict.pop(key)
for key in ['DM_err', 'DMISM', 'DMISM_err', 'RM', 'RM_err', 'fluence', 'fluence_err']:
if key in idict.keys():
setattr(slf,key,units.Quantity(idict[key]['value'], unit=idict[key]['unit']))
idict.pop(key)
# Cosmology
if slf.cosmo.name != idict['cosmo']:
raise AssertionError(f"Your cosmology does not match the expected for {idict['FRB']}. Gotta deal..")
idict.pop('cosmo')
# dicts
for ndict in slf.main_dict:
if ndict in idict.keys():
for key, value in idict[ndict].items():
if isinstance(value, dict):
newvalue = ltu.convert_quantity_in_dict(value)
else:
newvalue = value
idict[ndict][key] = newvalue
setattr(slf,ndict,idict[ndict])
# Deal with quantities
idict.pop(ndict)
# Remainder
for key in idict.keys():
setattr(slf,key,idict[key])
# Return
return slf
@classmethod
def by_name(cls, frb_name, **kwargs):
"""
Method to instantiate an FRB by its name
Args:
frb_name (str):
Name of the FRB,
**kwargs:
Returns:
"""
path = os.path.join(resource_filename('frb', 'data'), 'FRBs', frb_name)
json_file = path + '.json'
slf = cls.from_json(json_file, **kwargs)
return slf
def __init__(self, frb_name, coord, DM, S=None, nu_c=None, z_frb=None, **kwargs):
"""
Args:
frb_name (str):
coord (astropy.coordinates.SkyCoord):
DM (Quantity):
S (Quantity):
Source density
nu_c:
z_frb (float):
Redshift
**kwargs:
"""
super(FRB, self).__init__(S, nu_c, DM, coord=coord, **kwargs)
self.frb_name = frb_name
self.z = z_frb
def grab_host(self):
"""
Returns the FRBHost object for this FRB
Returns:
frb.galaxies.frbgalaxy.FRBHost
"""
frbHost = frbgalaxy.FRBHost.by_frb(self)
return frbHost
def __repr__(self):
txt = '<{:s}: {} J{}{} DM={}'.format(
self.__class__.__name__, self.frb_name,
self.coord.icrs.ra.to_string(unit=units.hour, sep='', pad=True),
self.coord.icrs.dec.to_string(sep='', pad=True, alwayssign=True),
self.DM)
if self.z is not None:
txt += ' z={}'.format(self.z)
# Finish
txt = txt + '>'
return (txt)
def list_of_frbs(require_z=False):
"""
Generate a list of FRB objects for all the FRBs in the Repo
Args:
require_z (bool, optional):
If True, require z be set
Returns:
list:
"""
# Grab the files
frb_files = glob.glob(os.path.join(resource_filename('frb', 'data'), 'FRBs', 'FRB*json'))
frb_files.sort()
# Load up the FRBs
frbs = []
for frb_file in frb_files:
frb_name = os.path.basename(frb_file).split('.')[0]
frb = FRB.by_name(frb_name)
if require_z and frb.z is None:
continue
frbs.append(frb)
# Return
return frbs
def build_table_of_frbs(frbs=None, fattrs=None):
"""
Generate a Pandas table of FRB data
Warning: As standard, missing values are given NaN in the Pandas table
Be careful!
Args:
fattrs (list, optional):
Float attributes for the Table
The code also, by default, looks for accompanying _err attributes
Returns:
pd.DataFrame, dict: Table of data on FRBs, dict of their units
"""
if fattrs is None:
fattrs = ['DM', 'fluence', 'RM', 'lpol', 'z', 'DMISM']
# Load up the FRBs
if frbs is None:
frbs = list_of_frbs()
# Table
frb_tbl = | |
Care,1970-01-01 16:25:00,5.85,323.0,4.78
4469,5,474.0,745,Adult Care,1970-01-01 16:25:00,0.64,103.0,1.52
4470,6,28396.0,746,Work and Education,1970-01-01 16:26:00,38.5,2439.0,36.09
4471,10,24402.0,746,Leisure,1970-01-01 16:26:00,33.08,2285.0,33.81
4472,3,8572.0,746,Housework,1970-01-01 16:26:00,11.62,880.0,13.02
4473,11,7630.0,746,Travel and Other,1970-01-01 16:26:00,10.34,722.0,10.68
4474,4,4279.0,746,Child Care,1970-01-01 16:26:00,5.8,328.0,4.85
4475,5,479.0,746,Adult Care,1970-01-01 16:26:00,0.65,104.0,1.54
4476,6,28396.0,747,Work and Education,1970-01-01 16:27:00,38.5,2441.0,36.12
4477,10,24421.0,747,Leisure,1970-01-01 16:27:00,33.11,2284.0,33.8
4478,3,8582.0,747,Housework,1970-01-01 16:27:00,11.64,880.0,13.02
4479,11,7621.0,747,Travel and Other,1970-01-01 16:27:00,10.33,716.0,10.59
4480,4,4265.0,747,Child Care,1970-01-01 16:27:00,5.78,329.0,4.87
4481,5,473.0,747,Adult Care,1970-01-01 16:27:00,0.64,108.0,1.6
4482,6,28400.0,748,Work and Education,1970-01-01 16:28:00,38.5,2442.0,36.13
4483,10,24433.0,748,Leisure,1970-01-01 16:28:00,33.13,2279.0,33.72
4484,3,8595.0,748,Housework,1970-01-01 16:28:00,11.65,882.0,13.05
4485,11,7602.0,748,Travel and Other,1970-01-01 16:28:00,10.31,716.0,10.59
4486,4,4261.0,748,Child Care,1970-01-01 16:28:00,5.78,330.0,4.88
4487,5,467.0,748,Adult Care,1970-01-01 16:28:00,0.63,109.0,1.61
4488,6,28398.0,749,Work and Education,1970-01-01 16:29:00,38.5,2442.0,36.13
4489,10,24456.0,749,Leisure,1970-01-01 16:29:00,33.16,2280.0,33.74
4490,3,8595.0,749,Housework,1970-01-01 16:29:00,11.65,883.0,13.07
4491,11,7570.0,749,Travel and Other,1970-01-01 16:29:00,10.26,718.0,10.62
4492,4,4271.0,749,Child Care,1970-01-01 16:29:00,5.79,328.0,4.85
4493,5,468.0,749,Adult Care,1970-01-01 16:29:00,0.63,107.0,1.58
4494,6,28402.0,750,Work and Education,1970-01-01 16:30:00,38.51,2442.0,36.13
4495,10,24471.0,750,Leisure,1970-01-01 16:30:00,33.18,2278.0,33.71
4496,3,8600.0,750,Housework,1970-01-01 16:30:00,11.66,890.0,13.17
4497,11,7554.0,750,Travel and Other,1970-01-01 16:30:00,10.24,717.0,10.61
4498,4,4266.0,750,Child Care,1970-01-01 16:30:00,5.78,325.0,4.81
4499,5,465.0,750,Adult Care,1970-01-01 16:30:00,0.63,106.0,1.57
4500,10,24656.0,751,Leisure,1970-01-01 16:31:00,33.43,2259.0,33.43
4501,6,24474.0,751,Work and Education,1970-01-01 16:31:00,33.18,2102.0,31.1
4502,11,10989.0,751,Travel and Other,1970-01-01 16:31:00,14.9,1045.0,15.46
4503,3,8942.0,751,Housework,1970-01-01 16:31:00,12.12,932.0,13.79
4504,4,4186.0,751,Child Care,1970-01-01 16:31:00,5.68,320.0,4.74
4505,5,511.0,751,Adult Care,1970-01-01 16:31:00,0.69,100.0,1.48
4506,10,24685.0,752,Leisure,1970-01-01 16:32:00,33.47,2266.0,33.53
4507,6,24465.0,752,Work and Education,1970-01-01 16:32:00,33.17,2102.0,31.1
4508,11,10962.0,752,Travel and Other,1970-01-01 16:32:00,14.86,1038.0,15.36
4509,3,8970.0,752,Housework,1970-01-01 16:32:00,12.16,930.0,13.76
4510,4,4166.0,752,Child Care,1970-01-01 16:32:00,5.65,322.0,4.76
4511,5,510.0,752,Adult Care,1970-01-01 16:32:00,0.69,100.0,1.48
4512,10,24769.0,753,Leisure,1970-01-01 16:33:00,33.58,2275.0,33.66
4513,6,24468.0,753,Work and Education,1970-01-01 16:33:00,33.17,2105.0,31.15
4514,11,10880.0,753,Travel and Other,1970-01-01 16:33:00,14.75,1026.0,15.18
4515,3,8994.0,753,Housework,1970-01-01 16:33:00,12.19,937.0,13.87
4516,4,4141.0,753,Child Care,1970-01-01 16:33:00,5.61,320.0,4.74
4517,5,506.0,753,Adult Care,1970-01-01 16:33:00,0.69,95.0,1.41
4518,10,24820.0,754,Leisure,1970-01-01 16:34:00,33.65,2280.0,33.74
4519,6,24470.0,754,Work and Education,1970-01-01 16:34:00,33.18,2104.0,31.13
4520,11,10807.0,754,Travel and Other,1970-01-01 16:34:00,14.65,1024.0,15.15
4521,3,8995.0,754,Housework,1970-01-01 16:34:00,12.2,937.0,13.87
4522,4,4156.0,754,Child Care,1970-01-01 16:34:00,5.63,319.0,4.72
4523,5,510.0,754,Adult Care,1970-01-01 16:34:00,0.69,94.0,1.39
4524,10,24847.0,755,Leisure,1970-01-01 16:35:00,33.69,2285.0,33.81
4525,6,24474.0,755,Work and Education,1970-01-01 16:35:00,33.18,2104.0,31.13
4526,11,10766.0,755,Travel and Other,1970-01-01 16:35:00,14.6,1016.0,15.03
4527,3,9003.0,755,Housework,1970-01-01 16:35:00,12.21,936.0,13.85
4528,4,4163.0,755,Child Care,1970-01-01 16:35:00,5.64,322.0,4.76
4529,5,505.0,755,Adult Care,1970-01-01 16:35:00,0.68,95.0,1.41
4530,10,25145.0,756,Leisure,1970-01-01 16:36:00,34.09,2323.0,34.37
4531,6,24466.0,756,Work and Education,1970-01-01 16:36:00,33.17,2095.0,31.0
4532,11,10295.0,756,Travel and Other,1970-01-01 16:36:00,13.96,988.0,14.62
4533,3,9097.0,756,Housework,1970-01-01 16:36:00,12.33,932.0,13.79
4534,4,4235.0,756,Child Care,1970-01-01 16:36:00,5.74,315.0,4.66
4535,5,520.0,756,Adult Care,1970-01-01 16:36:00,0.71,105.0,1.55
4536,10,25183.0,757,Leisure,1970-01-01 16:37:00,34.14,2332.0,34.51
4537,6,24470.0,757,Work and Education,1970-01-01 16:37:00,33.18,2094.0,30.99
4538,11,10274.0,757,Travel and Other,1970-01-01 16:37:00,13.93,983.0,14.55
4539,3,9101.0,757,Housework,1970-01-01 16:37:00,12.34,933.0,13.81
4540,4,4223.0,757,Child Care,1970-01-01 16:37:00,5.73,315.0,4.66
4541,5,507.0,757,Adult Care,1970-01-01 16:37:00,0.69,101.0,1.49
4542,10,25228.0,758,Leisure,1970-01-01 16:38:00,34.2,2348.0,34.74
4543,6,24479.0,758,Work and Education,1970-01-01 16:38:00,33.19,2092.0,30.96
4544,11,10220.0,758,Travel and Other,1970-01-01 16:38:00,13.86,975.0,14.43
4545,3,9088.0,758,Housework,1970-01-01 16:38:00,12.32,928.0,13.73
4546,4,4243.0,758,Child Care,1970-01-01 16:38:00,5.75,315.0,4.66
4547,5,500.0,758,Adult Care,1970-01-01 16:38:00,0.68,100.0,1.48
4548,10,25262.0,759,Leisure,1970-01-01 16:39:00,34.25,2347.0,34.73
4549,6,24478.0,759,Work and Education,1970-01-01 16:39:00,33.19,2092.0,30.96
4550,11,10202.0,759,Travel and Other,1970-01-01 16:39:00,13.83,975.0,14.43
4551,3,9094.0,759,Housework,1970-01-01 16:39:00,12.33,931.0,13.78
4552,4,4224.0,759,Child Care,1970-01-01 16:39:00,5.73,314.0,4.65
4553,5,498.0,759,Adult Care,1970-01-01 16:39:00,0.68,99.0,1.46
4554,10,25284.0,760,Leisure,1970-01-01 16:40:00,34.28,2345.0,34.7
4555,6,24475.0,760,Work and Education,1970-01-01 16:40:00,33.18,2092.0,30.96
4556,11,10175.0,760,Travel and Other,1970-01-01 16:40:00,13.8,976.0,14.44
4557,3,9109.0,760,Housework,1970-01-01 16:40:00,12.35,932.0,13.79
4558,4,4219.0,760,Child Care,1970-01-01 16:40:00,5.72,314.0,4.65
4559,5,496.0,760,Adult Care,1970-01-01 16:40:00,0.67,99.0,1.46
4560,10,25695.0,761,Leisure,1970-01-01 16:41:00,34.84,2367.0,35.03
4561,6,24425.0,761,Work and Education,1970-01-01 16:41:00,33.12,2090.0,30.93
4562,11,9569.0,761,Travel and Other,1970-01-01 16:41:00,12.97,921.0,13.63
4563,3,9262.0,761,Housework,1970-01-01 16:41:00,12.56,966.0,14.29
4564,4,4286.0,761,Child Care,1970-01-01 16:41:00,5.81,319.0,4.72
4565,5,521.0,761,Adult Care,1970-01-01 16:41:00,0.71,95.0,1.41
4566,10,25730.0,762,Leisure,1970-01-01 16:42:00,34.88,2367.0,35.03
4567,6,24424.0,762,Work and Education,1970-01-01 16:42:00,33.11,2090.0,30.93
4568,11,9555.0,762,Travel and Other,1970-01-01 16:42:00,12.95,924.0,13.67
4569,3,9254.0,762,Housework,1970-01-01 16:42:00,12.55,967.0,14.31
4570,4,4290.0,762,Child Care,1970-01-01 16:42:00,5.82,317.0,4.69
4571,5,505.0,762,Adult Care,1970-01-01 16:42:00,0.68,93.0,1.38
4572,10,25777.0,763,Leisure,1970-01-01 16:43:00,34.95,2373.0,35.11
4573,6,24433.0,763,Work and Education,1970-01-01 16:43:00,33.13,2092.0,30.96
4574,11,9510.0,763,Travel and Other,1970-01-01 16:43:00,12.89,908.0,13.44
4575,3,9249.0,763,Housework,1970-01-01 16:43:00,12.54,965.0,14.28
4576,4,4288.0,763,Child Care,1970-01-01 16:43:00,5.81,323.0,4.78
4577,5,501.0,763,Adult Care,1970-01-01 16:43:00,0.68,97.0,1.44
4578,10,25799.0,764,Leisure,1970-01-01 16:44:00,34.98,2368.0,35.04
4579,6,24437.0,764,Work and Education,1970-01-01 16:44:00,33.13,2091.0,30.94
4580,11,9489.0,764,Travel and Other,1970-01-01 16:44:00,12.87,914.0,13.52
4581,3,9244.0,764,Housework,1970-01-01 16:44:00,12.53,969.0,14.34
4582,4,4289.0,764,Child Care,1970-01-01 16:44:00,5.81,319.0,4.72
4583,5,500.0,764,Adult Care,1970-01-01 16:44:00,0.68,97.0,1.44
4584,10,25812.0,765,Leisure,1970-01-01 16:45:00,35.0,2370.0,35.07
4585,6,24441.0,765,Work and Education,1970-01-01 16:45:00,33.14,2091.0,30.94
4586,11,9480.0,765,Travel and Other,1970-01-01 16:45:00,12.85,913.0,13.51
4587,3,9244.0,765,Housework,1970-01-01 16:45:00,12.53,971.0,14.37
4588,4,4272.0,765,Child Care,1970-01-01 16:45:00,5.79,317.0,4.69
4589,5,509.0,765,Adult Care,1970-01-01 16:45:00,0.69,96.0,1.42
4590,10,26205.0,766,Leisure,1970-01-01 16:46:00,35.53,2393.0,35.41
4591,6,23922.0,766,Work and Education,1970-01-01 16:46:00,32.43,2042.0,30.22
4592,3,9423.0,766,Housework,1970-01-01 16:46:00,12.78,989.0,14.63
4593,11,9406.0,766,Travel and Other,1970-01-01 16:46:00,12.75,903.0,13.36
4594,4,4262.0,766,Child Care,1970-01-01 16:46:00,5.78,322.0,4.76
4595,5,540.0,766,Adult Care,1970-01-01 16:46:00,0.73,109.0,1.61
4596,10,26215.0,767,Leisure,1970-01-01 16:47:00,35.54,2401.0,35.53
4597,6,23929.0,767,Work and Education,1970-01-01 16:47:00,32.44,2044.0,30.25
4598,3,9432.0,767,Housework,1970-01-01 16:47:00,12.79,990.0,14.65
4599,11,9395.0,767,Travel and Other,1970-01-01 16:47:00,12.74,901.0,13.33
4600,4,4262.0,767,Child Care,1970-01-01 16:47:00,5.78,316.0,4.68
4601,5,525.0,767,Adult Care,1970-01-01 16:47:00,0.71,106.0,1.57
4602,10,26219.0,768,Leisure,1970-01-01 16:48:00,35.55,2393.0,35.41
4603,6,23933.0,768,Work and Education,1970-01-01 16:48:00,32.45,2044.0,30.25
4604,3,9446.0,768,Housework,1970-01-01 16:48:00,12.81,997.0,14.75
4605,11,9392.0,768,Travel and Other,1970-01-01 16:48:00,12.73,910.0,13.47
4606,4,4263.0,768,Child Care,1970-01-01 16:48:00,5.78,313.0,4.63
4607,5,505.0,768,Adult Care,1970-01-01 16:48:00,0.68,101.0,1.49
4608,10,26244.0,769,Leisure,1970-01-01 16:49:00,35.58,2390.0,35.37
4609,6,23933.0,769,Work and Education,1970-01-01 16:49:00,32.45,2044.0,30.25
4610,3,9453.0,769,Housework,1970-01-01 16:49:00,12.82,1000.0,14.8
4611,11,9368.0,769,Travel and Other,1970-01-01 16:49:00,12.7,912.0,13.5
4612,4,4250.0,769,Child Care,1970-01-01 16:49:00,5.76,312.0,4.62
4613,5,510.0,769,Adult Care,1970-01-01 16:49:00,0.69,100.0,1.48
4614,10,26273.0,770,Leisure,1970-01-01 16:50:00,35.62,2398.0,35.48
4615,6,23937.0,770,Work and Education,1970-01-01 16:50:00,32.45,2043.0,30.23
4616,3,9452.0,770,Housework,1970-01-01 16:50:00,12.81,1001.0,14.81
4617,11,9342.0,770,Travel and Other,1970-01-01 16:50:00,12.67,906.0,13.41
4618,4,4250.0,770,Child Care,1970-01-01 16:50:00,5.76,310.0,4.59
4619,5,504.0,770,Adult Care,1970-01-01 16:50:00,0.68,100.0,1.48
4620,10,26649.0,771,Leisure,1970-01-01 16:51:00,36.13,2413.0,35.71
4621,6,23927.0,771,Work and Education,1970-01-01 16:51:00,32.44,2034.0,30.1
4622,3,9489.0,771,Housework,1970-01-01 16:51:00,12.87,1013.0,14.99
4623,11,8864.0,771,Travel and Other,1970-01-01 16:51:00,12.02,883.0,13.07
4624,4,4307.0,771,Child Care,1970-01-01 16:51:00,5.84,307.0,4.54
4625,5,522.0,771,Adult Care,1970-01-01 16:51:00,0.71,108.0,1.6
4626,10,26673.0,772,Leisure,1970-01-01 16:52:00,36.16,2409.0,35.65
4627,6,23931.0,772,Work and Education,1970-01-01 16:52:00,32.45,2033.0,30.08
4628,3,9501.0,772,Housework,1970-01-01 16:52:00,12.88,1011.0,14.96
4629,11,8843.0,772,Travel and Other,1970-01-01 16:52:00,11.99,888.0,13.14
4630,4,4297.0,772,Child Care,1970-01-01 16:52:00,5.83,309.0,4.57
4631,5,513.0,772,Adult Care,1970-01-01 16:52:00,0.7,108.0,1.6
4632,10,26729.0,773,Leisure,1970-01-01 16:53:00,36.24,2423.0,35.85
4633,6,23932.0,773,Work and Education,1970-01-01 16:53:00,32.45,2033.0,30.08
4634,3,9521.0,773,Housework,1970-01-01 16:53:00,12.91,1003.0,14.84
4635,11,8789.0,773,Travel and Other,1970-01-01 16:53:00,11.92,885.0,13.1
4636,4,4279.0,773,Child Care,1970-01-01 16:53:00,5.8,308.0,4.56
4637,5,508.0,773,Adult Care,1970-01-01 16:53:00,0.69,106.0,1.57
4638,10,26746.0,774,Leisure,1970-01-01 16:54:00,36.26,2431.0,35.97
4639,6,23935.0,774,Work and Education,1970-01-01 16:54:00,32.45,2032.0,30.07
4640,3,9541.0,774,Housework,1970-01-01 16:54:00,12.94,1000.0,14.8
4641,11,8751.0,774,Travel and Other,1970-01-01 16:54:00,11.86,877.0,12.98
4642,4,4279.0,774,Child Care,1970-01-01 16:54:00,5.8,310.0,4.59
4643,5,506.0,774,Adult Care,1970-01-01 16:54:00,0.69,108.0,1.6
4644,10,26756.0,775,Leisure,1970-01-01 16:55:00,36.28,2431.0,35.97
4645,6,23937.0,775,Work and Education,1970-01-01 16:55:00,32.45,2033.0,30.08
4646,3,9548.0,775,Housework,1970-01-01 16:55:00,12.95,999.0,14.78
4647,11,8743.0,775,Travel and Other,1970-01-01 16:55:00,11.85,881.0,13.04
4648,4,4272.0,775,Child Care,1970-01-01 16:55:00,5.79,309.0,4.57
4649,5,502.0,775,Adult Care,1970-01-01 16:55:00,0.68,105.0,1.55
4650,10,26983.0,776,Leisure,1970-01-01 16:56:00,36.58,2470.0,36.55
4651,6,23940.0,776,Work and Education,1970-01-01 16:56:00,32.46,2036.0,30.13
4652,3,9573.0,776,Housework,1970-01-01 16:56:00,12.98,990.0,14.65
4653,11,8439.0,776,Travel and Other,1970-01-01 16:56:00,11.44,849.0,12.56
4654,4,4320.0,776,Child Care,1970-01-01 16:56:00,5.86,311.0,4.6
4655,5,503.0,776,Adult Care,1970-01-01 16:56:00,0.68,102.0,1.51
4656,10,27013.0,777,Leisure,1970-01-01 16:57:00,36.62,2470.0,36.55
4657,6,23940.0,777,Work and Education,1970-01-01 16:57:00,32.46,2037.0,30.14
4658,3,9583.0,777,Housework,1970-01-01 16:57:00,12.99,990.0,14.65
4659,11,8415.0,777,Travel and Other,1970-01-01 16:57:00,11.41,846.0,12.52
4660,4,4306.0,777,Child Care,1970-01-01 16:57:00,5.84,312.0,4.62
4661,5,501.0,777,Adult Care,1970-01-01 16:57:00,0.68,103.0,1.52
4662,10,27068.0,778,Leisure,1970-01-01 16:58:00,36.7,2462.0,36.43
4663,6,23938.0,778,Work and Education,1970-01-01 16:58:00,32.45,2039.0,30.17
4664,3,9580.0,778,Housework,1970-01-01 16:58:00,12.99,996.0,14.74
4665,11,8379.0,778,Travel and Other,1970-01-01 16:58:00,11.36,846.0,12.52
4666,4,4297.0,778,Child Care,1970-01-01 16:58:00,5.83,313.0,4.63
4667,5,496.0,778,Adult Care,1970-01-01 16:58:00,0.67,102.0,1.51
4668,10,27078.0,779,Leisure,1970-01-01 16:59:00,36.71,2467.0,36.5
4669,6,23939.0,779,Work and Education,1970-01-01 16:59:00,32.46,2040.0,30.19
4670,3,9586.0,779,Housework,1970-01-01 16:59:00,13.0,997.0,14.75
4671,11,8360.0,779,Travel and Other,1970-01-01 16:59:00,11.33,842.0,12.46
4672,4,4296.0,779,Child Care,1970-01-01 16:59:00,5.82,311.0,4.6
4673,5,499.0,779,Adult Care,1970-01-01 16:59:00,0.68,101.0,1.49
4674,10,27100.0,780,Leisure,1970-01-01 17:00:00,36.74,2464.0,36.46
4675,6,23937.0,780,Work and Education,1970-01-01 17:00:00,32.45,2041.0,30.2
4676,3,9581.0,780,Housework,1970-01-01 17:00:00,12.99,996.0,14.74
4677,11,8348.0,780,Travel and Other,1970-01-01 17:00:00,11.32,843.0,12.47
4678,4,4296.0,780,Child Care,1970-01-01 17:00:00,5.82,313.0,4.63
4679,5,496.0,780,Adult Care,1970-01-01 17:00:00,0.67,101.0,1.49
4680,10,27513.0,781,Leisure,1970-01-01 17:01:00,37.3,2462.0,36.43
4681,6,17567.0,781,Work and Education,1970-01-01 17:01:00,23.82,1516.0,22.43
4682,11,13995.0,781,Travel and Other,1970-01-01 17:01:00,18.97,1288.0,19.06
4683,3,10170.0,781,Housework,1970-01-01 17:01:00,13.79,1093.0,16.17
4684,4,4023.0,781,Child Care,1970-01-01 17:01:00,5.45,300.0,4.44
4685,5,490.0,781,Adult Care,1970-01-01 17:01:00,0.66,99.0,1.46
4686,10,27584.0,782,Leisure,1970-01-01 17:02:00,37.4,2462.0,36.43
4687,6,17568.0,782,Work and Education,1970-01-01 17:02:00,23.82,1516.0,22.43
4688,11,13947.0,782,Travel and Other,1970-01-01 17:02:00,18.91,1280.0,18.94
4689,3,10162.0,782,Housework,1970-01-01 17:02:00,13.78,1099.0,16.26
4690,4,4022.0,782,Child Care,1970-01-01 17:02:00,5.45,301.0,4.45
4691,5,475.0,782,Adult Care,1970-01-01 17:02:00,0.64,100.0,1.48
4692,10,27723.0,783,Leisure,1970-01-01 17:03:00,37.59,2475.0,36.62
4693,6,17567.0,783,Work and Education,1970-01-01 17:03:00,23.82,1517.0,22.45
4694,11,13801.0,783,Travel and Other,1970-01-01 17:03:00,18.71,1266.0,18.73
4695,3,10188.0,783,Housework,1970-01-01 17:03:00,13.81,1098.0,16.25
4696,4,4009.0,783,Child Care,1970-01-01 17:03:00,5.44,302.0,4.47
4697,5,470.0,783,Adult Care,1970-01-01 17:03:00,0.64,100.0,1.48
4698,10,27807.0,784,Leisure,1970-01-01 17:04:00,37.7,2479.0,36.68
4699,6,17573.0,784,Work and Education,1970-01-01 17:04:00,23.83,1517.0,22.45
4700,11,13696.0,784,Travel and Other,1970-01-01 17:04:00,18.57,1255.0,18.57
4701,3,10189.0,784,Housework,1970-01-01 17:04:00,13.81,1099.0,16.26
4702,4,4018.0,784,Child Care,1970-01-01 17:04:00,5.45,307.0,4.54
4703,5,475.0,784,Adult Care,1970-01-01 17:04:00,0.64,101.0,1.49
4704,10,27832.0,785,Leisure,1970-01-01 17:05:00,37.73,2474.0,36.61
4705,6,17571.0,785,Work and Education,1970-01-01 17:05:00,23.82,1520.0,22.49
4706,11,13672.0,785,Travel and Other,1970-01-01 17:05:00,18.54,1254.0,18.56
4707,3,10186.0,785,Housework,1970-01-01 17:05:00,13.81,1103.0,16.32
4708,4,4026.0,785,Child Care,1970-01-01 17:05:00,5.46,307.0,4.54
4709,5,471.0,785,Adult Care,1970-01-01 17:05:00,0.64,100.0,1.48
4710,10,28403.0,786,Leisure,1970-01-01 17:06:00,38.51,2523.0,37.33
4711,6,17569.0,786,Work and Education,1970-01-01 17:06:00,23.82,1516.0,22.43
4712,11,12839.0,786,Travel and Other,1970-01-01 17:06:00,17.41,1187.0,17.56
4713,3,10323.0,786,Housework,1970-01-01 17:06:00,14.0,1114.0,16.48
4714,4,4143.0,786,Child Care,1970-01-01 17:06:00,5.62,318.0,4.71
4715,5,481.0,786,Adult Care,1970-01-01 17:06:00,0.65,100.0,1.48
4716,10,28439.0,787,Leisure,1970-01-01 17:07:00,38.56,2525.0,37.36
4717,6,17562.0,787,Work and Education,1970-01-01 17:07:00,23.81,1513.0,22.39
4718,11,12825.0,787,Travel and Other,1970-01-01 17:07:00,17.39,1194.0,17.67
4719,3,10333.0,787,Housework,1970-01-01 17:07:00,14.01,1112.0,16.45
4720,4,4123.0,787,Child Care,1970-01-01 17:07:00,5.59,313.0,4.63
4721,5,476.0,787,Adult Care,1970-01-01 17:07:00,0.65,101.0,1.49
4722,10,28508.0,788,Leisure,1970-01-01 17:08:00,38.65,2542.0,37.61
4723,6,17564.0,788,Work and Education,1970-01-01 17:08:00,23.81,1512.0,22.37
4724,11,12730.0,788,Travel and Other,1970-01-01 17:08:00,17.26,1169.0,17.3
4725,3,10344.0,788,Housework,1970-01-01 17:08:00,14.02,1115.0,16.5
4726,4,4131.0,788,Child Care,1970-01-01 17:08:00,5.6,320.0,4.74
4727,5,481.0,788,Adult Care,1970-01-01 17:08:00,0.65,100.0,1.48
4728,10,28537.0,789,Leisure,1970-01-01 17:09:00,38.69,2543.0,37.63
4729,6,17569.0,789,Work and Education,1970-01-01 17:09:00,23.82,1512.0,22.37
4730,11,12693.0,789,Travel and Other,1970-01-01 17:09:00,17.21,1164.0,17.22
4731,3,10360.0,789,Housework,1970-01-01 17:09:00,14.05,1117.0,16.53
4732,4,4121.0,789,Child Care,1970-01-01 17:09:00,5.59,323.0,4.78
4733,5,478.0,789,Adult Care,1970-01-01 17:09:00,0.65,99.0,1.46
4734,10,28562.0,790,Leisure,1970-01-01 17:10:00,38.72,2544.0,37.64
4735,6,17566.0,790,Work and Education,1970-01-01 17:10:00,23.82,1511.0,22.36
4736,11,12669.0,790,Travel and Other,1970-01-01 17:10:00,17.18,1165.0,17.24
4737,3,10369.0,790,Housework,1970-01-01 17:10:00,14.06,1115.0,16.5
4738,4,4121.0,790,Child Care,1970-01-01 17:10:00,5.59,324.0,4.79
4739,5,471.0,790,Adult Care,1970-01-01 17:10:00,0.64,99.0,1.46
4740,10,29317.0,791,Leisure,1970-01-01 17:11:00,39.75,2591.0,38.34
4741,6,17489.0,791,Work and Education,1970-01-01 17:11:00,23.71,1504.0,22.26
4742,11,11661.0,791,Travel and Other,1970-01-01 17:11:00,15.81,1096.0,16.22
4743,3,10557.0,791,Housework,1970-01-01 17:11:00,14.31,1126.0,16.66
4744,4,4252.0,791,Child Care,1970-01-01 17:11:00,5.76,334.0,4.94
4745,5,482.0,791,Adult Care,1970-01-01 17:11:00,0.65,107.0,1.58
4746,10,29343.0,792,Leisure,1970-01-01 17:12:00,39.78,2604.0,38.53
4747,6,17491.0,792,Work and Education,1970-01-01 17:12:00,23.71,1504.0,22.26
4748,11,11639.0,792,Travel and Other,1970-01-01 17:12:00,15.78,1088.0,16.1
4749,3,10585.0,792,Housework,1970-01-01 17:12:00,14.35,1128.0,16.69
4750,4,4222.0,792,Child Care,1970-01-01 17:12:00,5.72,331.0,4.9
4751,5,478.0,792,Adult Care,1970-01-01 17:12:00,0.65,103.0,1.52
4752,10,29367.0,793,Leisure,1970-01-01 17:13:00,39.82,2615.0,38.69
4753,6,17492.0,793,Work and Education,1970-01-01 17:13:00,23.72,1503.0,22.24
4754,11,11595.0,793,Travel and Other,1970-01-01 17:13:00,15.72,1089.0,16.11
4755,3,10605.0,793,Housework,1970-01-01 17:13:00,14.38,1123.0,16.62
4756,4,4223.0,793,Child Care,1970-01-01 17:13:00,5.73,325.0,4.81
4757,5,476.0,793,Adult Care,1970-01-01 17:13:00,0.65,103.0,1.52
4758,10,29396.0,794,Leisure,1970-01-01 17:14:00,39.85,2618.0,38.74
4759,6,17493.0,794,Work and Education,1970-01-01 17:14:00,23.72,1503.0,22.24
4760,11,11551.0,794,Travel and Other,1970-01-01 17:14:00,15.66,1081.0,16.0
4761,3,10615.0,794,Housework,1970-01-01 17:14:00,14.39,1127.0,16.68
4762,4,4236.0,794,Child Care,1970-01-01 17:14:00,5.74,327.0,4.84
4763,5,467.0,794,Adult Care,1970-01-01 17:14:00,0.63,102.0,1.51
4764,10,29444.0,795,Leisure,1970-01-01 17:15:00,39.92,2626.0,38.86
4765,6,17494.0,795,Work and Education,1970-01-01 17:15:00,23.72,1503.0,22.24
4766,11,11511.0,795,Travel and Other,1970-01-01 17:15:00,15.61,1073.0,15.88
4767,3,10605.0,795,Housework,1970-01-01 17:15:00,14.38,1126.0,16.66
4768,4,4237.0,795,Child Care,1970-01-01 17:15:00,5.74,328.0,4.85
4769,5,467.0,795,Adult Care,1970-01-01 17:15:00,0.63,102.0,1.51
4770,10,30176.0,796,Leisure,1970-01-01 17:16:00,40.91,2674.0,39.57
4771,6,16781.0,796,Work and Education,1970-01-01 17:16:00,22.75,1438.0,21.28
4772,11,11223.0,796,Travel and Other,1970-01-01 17:16:00,15.22,1094.0,16.19
4773,3,10728.0,796,Housework,1970-01-01 17:16:00,14.54,1127.0,16.68
4774,4,4365.0,796,Child Care,1970-01-01 17:16:00,5.92,329.0,4.87
4775,5,485.0,796,Adult Care,1970-01-01 17:16:00,0.66,96.0,1.42
4776,10,30212.0,797,Leisure,1970-01-01 17:17:00,40.96,2681.0,39.67
4777,6,16781.0,797,Work and Education,1970-01-01 17:17:00,22.75,1439.0,21.29
4778,11,11224.0,797,Travel and Other,1970-01-01 17:17:00,15.22,1088.0,16.1
4779,3,10727.0,797,Housework,1970-01-01 17:17:00,14.54,1128.0,16.69
4780,4,4345.0,797,Child Care,1970-01-01 17:17:00,5.89,330.0,4.88
4781,5,469.0,797,Adult Care,1970-01-01 17:17:00,0.64,92.0,1.36
4782,10,30263.0,798,Leisure,1970-01-01 17:18:00,41.03,2703.0,40.0
4783,6,16786.0,798,Work and Education,1970-01-01 17:18:00,22.76,1439.0,21.29
4784,11,11193.0,798,Travel and Other,1970-01-01 17:18:00,15.18,1072.0,15.86
4785,3,10720.0,798,Housework,1970-01-01 17:18:00,14.53,1125.0,16.65
4786,4,4329.0,798,Child Care,1970-01-01 17:18:00,5.87,327.0,4.84
4787,5,467.0,798,Adult Care,1970-01-01 17:18:00,0.63,92.0,1.36
4788,10,30315.0,799,Leisure,1970-01-01 17:19:00,41.1,2706.0,40.04
4789,6,16785.0,799,Work and Education,1970-01-01 17:19:00,22.76,1441.0,21.32
4790,11,11146.0,799,Travel and Other,1970-01-01 17:19:00,15.11,1068.0,15.8
4791,3,10720.0,799,Housework,1970-01-01 17:19:00,14.53,1123.0,16.62
4792,4,4324.0,799,Child Care,1970-01-01 17:19:00,5.86,328.0,4.85
4793,5,468.0,799,Adult Care,1970-01-01 17:19:00,0.63,92.0,1.36
4794,10,30336.0,800,Leisure,1970-01-01 17:20:00,41.13,2707.0,40.06
4795,6,16787.0,800,Work and Education,1970-01-01 17:20:00,22.76,1441.0,21.32
4796,11,11134.0,800,Travel and Other,1970-01-01 17:20:00,15.1,1064.0,15.74
4797,3,10723.0,800,Housework,1970-01-01 17:20:00,14.54,1125.0,16.65
4798,4,4314.0,800,Child Care,1970-01-01 17:20:00,5.85,327.0,4.84
4799,5,464.0,800,Adult Care,1970-01-01 17:20:00,0.63,94.0,1.39
4800,10,30881.0,801,Leisure,1970-01-01 17:21:00,41.87,2782.0,41.17
4801,6,16696.0,801,Work and Education,1970-01-01 17:21:00,22.64,1431.0,21.17
4802,3,10809.0,801,Housework,1970-01-01 17:21:00,14.65,1107.0,16.38
4803,11,10482.0,801,Travel and Other,1970-01-01 17:21:00,14.21,1009.0,14.93
4804,4,4387.0,801,Child Care,1970-01-01 17:21:00,5.95,332.0,4.91
4805,5,503.0,801,Adult Care,1970-01-01 17:21:00,0.68,97.0,1.44
4806,10,30931.0,802,Leisure,1970-01-01 17:22:00,41.94,2787.0,41.24
4807,6,16694.0,802,Work and Education,1970-01-01 17:22:00,22.63,1435.0,21.23
4808,3,10810.0,802,Housework,1970-01-01 17:22:00,14.66,1106.0,16.37
4809,11,10424.0,802,Travel and Other,1970-01-01 17:22:00,14.13,1000.0,14.8
4810,4,4410.0,802,Child Care,1970-01-01 17:22:00,5.98,332.0,4.91
4811,5,489.0,802,Adult Care,1970-01-01 17:22:00,0.66,98.0,1.45
4812,10,30995.0,803,Leisure,1970-01-01 17:23:00,42.02,2793.0,41.33
4813,6,16695.0,803,Work and Education,1970-01-01 17:23:00,22.63,1436.0,21.25
4814,3,10800.0,803,Housework,1970-01-01 17:23:00,14.64,1103.0,16.32
4815,11,10391.0,803,Travel and Other,1970-01-01 17:23:00,14.09,995.0,14.72
4816,4,4398.0,803,Child Care,1970-01-01 17:23:00,5.96,333.0,4.93
4817,5,479.0,803,Adult Care,1970-01-01 17:23:00,0.65,98.0,1.45
4818,10,31009.0,804,Leisure,1970-01-01 17:24:00,42.04,2795.0,41.36
4819,6,16702.0,804,Work and Education,1970-01-01 17:24:00,22.64,1435.0,21.23
4820,3,10816.0,804,Housework,1970-01-01 17:24:00,14.66,1103.0,16.32
4821,11,10357.0,804,Travel and Other,1970-01-01 17:24:00,14.04,995.0,14.72
4822,4,4402.0,804,Child Care,1970-01-01 17:24:00,5.97,333.0,4.93
4823,5,472.0,804,Adult Care,1970-01-01 17:24:00,0.64,97.0,1.44
4824,10,31014.0,805,Leisure,1970-01-01 17:25:00,42.05,2792.0,41.31
4825,6,16701.0,805,Work and Education,1970-01-01 17:25:00,22.64,1434.0,21.22
4826,3,10830.0,805,Housework,1970-01-01 17:25:00,14.68,1110.0,16.42
4827,11,10342.0,805,Travel and Other,1970-01-01 17:25:00,14.02,993.0,14.69
4828,4,4392.0,805,Child Care,1970-01-01 17:25:00,5.95,332.0,4.91
4829,5,479.0,805,Adult Care,1970-01-01 17:25:00,0.65,97.0,1.44
4830,10,31485.0,806,Leisure,1970-01-01 17:26:00,42.69,2844.0,42.08
4831,6,16713.0,806,Work and Education,1970-01-01 17:26:00,22.66,1427.0,21.12
4832,3,10852.0,806,Housework,1970-01-01 17:26:00,14.71,1118.0,16.54
4833,11,9833.0,806,Travel and Other,1970-01-01 17:26:00,13.33,933.0,13.81
4834,4,4393.0,806,Child Care,1970-01-01 17:26:00,5.96,335.0,4.96
4835,5,482.0,806,Adult Care,1970-01-01 17:26:00,0.65,101.0,1.49
4836,10,31532.0,807,Leisure,1970-01-01 17:27:00,42.75,2844.0,42.08
4837,6,16718.0,807,Work and Education,1970-01-01 17:27:00,22.67,1427.0,21.12
4838,3,10840.0,807,Housework,1970-01-01 17:27:00,14.7,1121.0,16.59
4839,11,9812.0,807,Travel and Other,1970-01-01 17:27:00,13.3,937.0,13.87
4840,4,4378.0,807,Child Care,1970-01-01 17:27:00,5.94,331.0,4.9
4841,5,478.0,807,Adult Care,1970-01-01 17:27:00,0.65,98.0,1.45
4842,10,31620.0,808,Leisure,1970-01-01 17:28:00,42.87,2846.0,42.11
4843,6,16721.0,808,Work and Education,1970-01-01 17:28:00,22.67,1427.0,21.12
4844,3,10817.0,808,Housework,1970-01-01 17:28:00,14.67,1125.0,16.65
4845,11,9750.0,808,Travel and Other,1970-01-01 17:28:00,13.22,935.0,13.84
4846,4,4377.0,808,Child Care,1970-01-01 17:28:00,5.93,330.0,4.88
4847,5,473.0,808,Adult Care,1970-01-01 17:28:00,0.64,95.0,1.41
4848,10,31643.0,809,Leisure,1970-01-01 17:29:00,42.9,2851.0,42.19
4849,6,16723.0,809,Work and Education,1970-01-01 17:29:00,22.67,1427.0,21.12
4850,3,10801.0,809,Housework,1970-01-01 17:29:00,14.64,1124.0,16.63
4851,11,9746.0,809,Travel and Other,1970-01-01 17:29:00,13.21,931.0,13.78
4852,4,4378.0,809,Child Care,1970-01-01 17:29:00,5.94,329.0,4.87
4853,5,467.0,809,Adult Care,1970-01-01 17:29:00,0.63,96.0,1.42
4854,10,31670.0,810,Leisure,1970-01-01 17:30:00,42.94,2853.0,42.22
4855,6,16724.0,810,Work and Education,1970-01-01 17:30:00,22.67,1427.0,21.12
4856,3,10794.0,810,Housework,1970-01-01 17:30:00,14.63,1123.0,16.62
4857,11,9722.0,810,Travel and Other,1970-01-01 17:30:00,13.18,932.0,13.79
4858,4,4382.0,810,Child Care,1970-01-01 17:30:00,5.94,328.0,4.85
4859,5,466.0,810,Adult Care,1970-01-01 17:30:00,0.63,95.0,1.41
4860,10,32503.0,811,Leisure,1970-01-01 17:31:00,44.07,2958.0,43.77
4861,6,13546.0,811,Work and Education,1970-01-01 17:31:00,18.37,1141.0,16.88
4862,11,11894.0,811,Travel and Other,1970-01-01 17:31:00,16.13,1087.0,16.08
4863,3,11062.0,811,Housework,1970-01-01 17:31:00,15.0,1134.0,16.78
4864,4,4273.0,811,Child Care,1970-01-01 17:31:00,5.79,340.0,5.03
4865,5,480.0,811,Adult Care,1970-01-01 17:31:00,0.65,98.0,1.45
4866,10,32566.0,812,Leisure,1970-01-01 17:32:00,44.15,2964.0,43.86
4867,6,13542.0,812,Work and Education,1970-01-01 17:32:00,18.36,1138.0,16.84
4868,11,11849.0,812,Travel and Other,1970-01-01 17:32:00,16.06,1083.0,16.03
4869,3,11074.0,812,Housework,1970-01-01 17:32:00,15.01,1134.0,16.78
4870,4,4262.0,812,Child Care,1970-01-01 17:32:00,5.78,343.0,5.08
4871,5,465.0,812,Adult Care,1970-01-01 17:32:00,0.63,96.0,1.42
4872,10,32668.0,813,Leisure,1970-01-01 17:33:00,44.29,2970.0,43.95
4873,6,13547.0,813,Work and Education,1970-01-01 17:33:00,18.37,1136.0,16.81
4874,11,11758.0,813,Travel and Other,1970-01-01 17:33:00,15.94,1080.0,15.98
4875,3,11052.0,813,Housework,1970-01-01 17:33:00,14.98,1139.0,16.85
4876,4,4277.0,813,Child Care,1970-01-01 17:33:00,5.8,338.0,5.0
4877,5,456.0,813,Adult Care,1970-01-01 17:33:00,0.62,95.0,1.41
4878,10,32728.0,814,Leisure,1970-01-01 17:34:00,44.37,2975.0,44.02
4879,6,13545.0,814,Work and Education,1970-01-01 17:34:00,18.36,1136.0,16.81
4880,11,11690.0,814,Travel and Other,1970-01-01 17:34:00,15.85,1068.0,15.8
4881,3,11062.0,814,Housework,1970-01-01 17:34:00,15.0,1145.0,16.94
4882,4,4280.0,814,Child Care,1970-01-01 17:34:00,5.8,339.0,5.02
4883,5,453.0,814,Adult Care,1970-01-01 17:34:00,0.61,95.0,1.41
4884,10,32769.0,815,Leisure,1970-01-01 17:35:00,44.43,2990.0,44.24
4885,6,13548.0,815,Work and Education,1970-01-01 17:35:00,18.37,1136.0,16.81
4886,11,11642.0,815,Travel and Other,1970-01-01 17:35:00,15.78,1057.0,15.64
4887,3,11066.0,815,Housework,1970-01-01 17:35:00,15.0,1140.0,16.87
4888,4,4285.0,815,Child Care,1970-01-01 17:35:00,5.81,339.0,5.02
4889,5,448.0,815,Adult Care,1970-01-01 17:35:00,0.61,96.0,1.42
4890,10,33231.0,816,Leisure,1970-01-01 17:36:00,45.05,3036.0,44.92
4891,6,13545.0,816,Work and Education,1970-01-01 17:36:00,18.36,1134.0,16.78
4892,11,11133.0,816,Travel and Other,1970-01-01 17:36:00,15.09,1006.0,14.89
4893,3,11051.0,816,Housework,1970-01-01 17:36:00,14.98,1136.0,16.81
4894,4,4335.0,816,Child Care,1970-01-01 17:36:00,5.88,350.0,5.18
| |
#! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import sys
from typing import Callable, Dict, List, Optional, Union
import torch
from ludwig.constants import TEXT
from ludwig.encoders.base import Encoder
from ludwig.encoders.registry import register_encoder
from ludwig.modules.reduction_modules import SequenceReducer
logger = logging.getLogger(__name__)
@register_encoder("albert", TEXT)
class ALBERTEncoder(Encoder):
fixed_preprocessing_parameters = {
"word_tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "albert-base-v2",
}
def __init__(
self,
max_sequence_length,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "albert-base-v2",
trainable: bool = True,
reduce_output: str = "cls_pooled",
vocab_size: int = 30000,
embedding_size: int = 128,
hidden_size: int = 4096,
num_hidden_layers: int = 12,
num_hidden_groups: int = 1,
num_attention_heads: int = 64,
intermediate_size: int = 16384,
inner_group_num: int = 1,
hidden_act: str = "gelu_new",
hidden_dropout_prob: float = 0,
attention_probs_dropout_prob: float = 0,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
classifier_dropout_prob: float = 0.1,
position_embedding_type: str = "absolute",
pad_token_id: int = 0,
bos_token_id: int = 2,
eos_token_id: int = 3,
**kwargs
):
super().__init__()
try:
from transformers import AlbertConfig, AlbertModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained:
self.transformer = AlbertModel.from_pretrained(pretrained_model_name_or_path)
else:
config = AlbertConfig(
vocab_size=vocab_size,
embedding_size=embedding_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_hidden_groups=num_hidden_groups,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
inner_group_num=inner_group_num,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
classifier_dropout_prob=classifier_dropout_prob,
position_embedding_type=position_embedding_type,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
)
self.transformer = AlbertModel(config)
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by BERT tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("mt5", TEXT)
class MT5Encoder(Encoder):
fixed_preprocessing_parameters = {
"word_tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "google/mt5-base",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "google/mt5-base",
trainable: bool = True,
reduce_output: str = "cls_pooled",
vocab_size: int = 250112,
d_model: int = 512,
d_kv: int = 64,
d_ff: int = 1024,
num_layers: int = 8,
num_decoder_layers: int = None,
num_heads: int = 6,
relative_attention_num_buckets: int = 32,
dropout_rate: float = 0.1,
layer_norm_epsilon: float = 1e-06,
initializer_factor: float = 1.0,
feed_forward_proj: str = "gated-gelu",
is_encoder_decoder: bool = True,
use_cache: bool = True,
tokenizer_class: str = "T5Tokenizer",
tie_word_embeddings: bool = False,
pad_token_id: int = 0,
eos_token_id: int = 1,
decoder_start_token_id: int = 0,
**kwargs
):
super().__init__()
try:
from transformers import MT5Config, MT5EncoderModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained:
self.transformer = MT5EncoderModel.from_pretrained(pretrained_model_name_or_path)
else:
config = MT5Config(
vocab_size=vocab_size,
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
num_layers=num_layers,
num_decoder_layers=num_decoder_layers,
num_heads=num_heads,
relative_attention_num_buckets=relative_attention_num_buckets,
dropout_rate=dropout_rate,
layer_norm_epsilon=layer_norm_epsilon,
initializer_factor=initializer_factor,
feed_forward_proj=feed_forward_proj,
is_encoder_decoder=is_encoder_decoder,
use_cache=use_cache,
tokenizer_class=tokenizer_class,
tie_word_embeddings=tie_word_embeddings,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
decoder_start_token_id=decoder_start_token_id,
)
self.transformer = MT5EncoderModel(config)
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by MT5 tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("xlmroberta", TEXT)
class XLMRoBERTaEncoder(Encoder):
fixed_preprocessing_parameters = {
"word_tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "xlm-roberta-base",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "xlm-roberta-base",
reduce_output: str = "cls_pooled",
trainable: bool = True,
vocab_size: int = None,
pad_token_id: int = 1,
bos_token_id: int = 0,
eos_token_id: int = 2,
add_pooling_layer: bool = True,
**kwargs
):
super().__init__()
try:
from transformers import XLMRobertaConfig, XLMRobertaModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained:
self.transformer = XLMRobertaModel.from_pretrained(pretrained_model_name_or_path)
else:
config = XLMRobertaConfig(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
)
self.transformer = XLMRobertaModel(config, add_pooling_layer)
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by XLMRoberta tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("bert", TEXT)
class BERTEncoder(Encoder):
# TODO(justin): Use official class properties.
fixed_preprocessing_parameters = {
"word_tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "bert-base-uncased",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "bert-base-uncased",
trainable: bool = True,
reduce_output: str = "cls_pooled",
vocab_size: int = 30522,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: Union[str, Callable] = "gelu",
hidden_dropout_prob: float = 0.1,
attention_probs_dropout_prob: float = 0.1,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
pad_token_id: int = 0,
gradient_checkpointing: bool = False,
position_embedding_type: str = "absolute",
classifier_dropout: float = None,
**kwargs
):
super().__init__()
try:
from transformers import BertConfig, BertModel
except ModuleNotFoundError:
logger.error(
" transformers is not installed. "
"In order to install all text feature dependencies run "
"pip install ludwig[text]"
)
sys.exit(-1)
if use_pretrained:
self.transformer = BertModel.from_pretrained(pretrained_model_name_or_path)
else:
config = BertConfig(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
gradient_checkpointing=gradient_checkpointing,
position_embedding_type=position_embedding_type,
classifier_dropout=classifier_dropout,
)
self.transformer = BertModel(config)
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if trainable:
self.transformer.train()
self.transformer.resize_token_embeddings(vocab_size)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
# TODO(shreya): Confirm that this is it
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by BERT tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.config.hidden_size,
]
)
return torch.Size([self.transformer.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@register_encoder("xlm", TEXT)
class XLMEncoder(Encoder):
fixed_preprocessing_parameters = {
"word_tokenizer": "hf_tokenizer",
"pretrained_model_name_or_path": "feature.pretrained_model_name_or_path",
}
default_params = {
"pretrained_model_name_or_path": "xlm-mlm-en-2048",
}
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = "xlm-mlm-en-2048",
trainable: bool = True,
reduce_output: str = "cls_pooled",
vocab_size: int = 30145,
emb_dim: int = 2048,
n_layers: int = 12,
n_heads: int = 16,
dropout: float = 0.1,
attention_dropout: float = 0.1,
gelu_activation: bool = True,
sinusoidal_embeddings: bool = False,
causal: bool = False,
asm: bool = False,
n_langs: int = 1,
use_lang_emb: bool = True,
max_position_embeddings: int = 512,
| |
stage = 4, block = 'a', trainable = True
)
x4 = identity_block_2D(
x4, 3, [128, 128, 256], stage = 4, block = 'b', trainable = True
)
x4 = identity_block_2D(
x4, 3, [128, 128, 256], stage = 4, block = 'c', trainable = True
)
# ===============================================
# Convolution Section 5
# ===============================================
x5 = conv_block_2D(
x4, 3, [256, 256, 512], stage = 5, block = 'a', trainable = True
)
x5 = identity_block_2D(
x5, 3, [256, 256, 512], stage = 5, block = 'b', trainable = True
)
x5 = identity_block_2D(
x5, 3, [256, 256, 512], stage = 5, block = 'c', trainable = True
)
y = MaxPooling2D((3, 1), strides = (2, 1), name = 'mpool2')(x5)
return inputs, y
def resnet_2D_v2(inputs, mode = 'train'):
bn_axis = 3
# if mode == 'train':
# inputs = Input(shape=input_dim, name='input')
# else:
# inputs = Input(shape=(input_dim[0], None, input_dim[-1]), name='input')
# ===============================================
# Convolution Block 1
# ===============================================
x1 = Conv2D(
64,
(7, 7),
strides = (2, 2),
kernel_initializer = 'orthogonal',
use_bias = False,
trainable = True,
kernel_regularizer = l2(weight_decay),
padding = 'same',
name = 'conv1_1/3x3_s1',
)(inputs)
x1 = BatchNormalization(
axis = bn_axis, name = 'conv1_1/3x3_s1/bn', trainable = True
)(x1)
x1 = Activation('relu')(x1)
x1 = MaxPooling2D((2, 2), strides = (2, 2))(x1)
# ===============================================
# Convolution Section 2
# ===============================================
x2 = conv_block_2D(
x1,
3,
[64, 64, 256],
stage = 2,
block = 'a',
strides = (1, 1),
trainable = True,
)
x2 = identity_block_2D(
x2, 3, [64, 64, 256], stage = 2, block = 'b', trainable = True
)
x2 = identity_block_2D(
x2, 3, [64, 64, 256], stage = 2, block = 'c', trainable = True
)
# ===============================================
# Convolution Section 3
# ===============================================
x3 = conv_block_2D(
x2, 3, [128, 128, 512], stage = 3, block = 'a', trainable = True
)
x3 = identity_block_2D(
x3, 3, [128, 128, 512], stage = 3, block = 'b', trainable = True
)
x3 = identity_block_2D(
x3, 3, [128, 128, 512], stage = 3, block = 'c', trainable = True
)
# ===============================================
# Convolution Section 4
# ===============================================
x4 = conv_block_2D(
x3,
3,
[256, 256, 1024],
stage = 4,
block = 'a',
strides = (1, 1),
trainable = True,
)
x4 = identity_block_2D(
x4, 3, [256, 256, 1024], stage = 4, block = 'b', trainable = True
)
x4 = identity_block_2D(
x4, 3, [256, 256, 1024], stage = 4, block = 'c', trainable = True
)
# ===============================================
# Convolution Section 5
# ===============================================
x5 = conv_block_2D(
x4, 3, [512, 512, 2048], stage = 5, block = 'a', trainable = True
)
x5 = identity_block_2D(
x5, 3, [512, 512, 2048], stage = 5, block = 'b', trainable = True
)
x5 = identity_block_2D(
x5, 3, [512, 512, 2048], stage = 5, block = 'c', trainable = True
)
y = MaxPooling2D((3, 1), strides = (2, 1), name = 'mpool2')(x5)
return inputs, y
class VladPooling(keras.layers.Layer):
"""
This layer follows the NetVlad, GhostVlad
"""
def __init__(self, mode, k_centers, g_centers = 0, **kwargs):
self.k_centers = k_centers
self.g_centers = g_centers
self.mode = mode
super(VladPooling, self).__init__(**kwargs)
def build(self, input_shape):
self.cluster = self.add_weight(
shape = [self.k_centers + self.g_centers, input_shape[0][-1]],
name = 'centers',
initializer = 'orthogonal',
)
self.built = True
def compute_output_shape(self, input_shape):
assert input_shape
return (input_shape[0][0], self.k_centers * input_shape[0][-1])
def call(self, x):
# feat : bz x W x H x D, cluster_score: bz X W x H x clusters.
feat, cluster_score = x
num_features = feat.shape[-1]
# softmax normalization to get soft-assignment.
# A : bz x W x H x clusters
max_cluster_score = K.max(cluster_score, -1, keepdims = True)
exp_cluster_score = K.exp(cluster_score - max_cluster_score)
A = exp_cluster_score / K.sum(
exp_cluster_score, axis = -1, keepdims = True
)
# Now, need to compute the residual, self.cluster: clusters x D
A = K.expand_dims(A, -1) # A : bz x W x H x clusters x 1
feat_broadcast = K.expand_dims(
feat, -2
) # feat_broadcast : bz x W x H x 1 x D
feat_res = (
feat_broadcast - self.cluster
) # feat_res : bz x W x H x clusters x D
weighted_res = tf.multiply(
A, feat_res
) # weighted_res : bz x W x H x clusters x D
cluster_res = K.sum(weighted_res, [1, 2])
if self.mode == 'gvlad':
cluster_res = cluster_res[:, : self.k_centers, :]
cluster_l2 = K.l2_normalize(cluster_res, -1)
outputs = K.reshape(
cluster_l2, [-1, int(self.k_centers) * int(num_features)]
)
return outputs
def amsoftmax_loss(y_true, y_pred, scale = 30, margin = 0.35):
y_pred = y_true * (y_pred - margin) + (1 - y_true) * y_pred
y_pred *= scale
return K.categorical_crossentropy(y_true, y_pred, from_logits = True)
def vggvox_resnet2d_icassp(
inputs, num_class = 8631, mode = 'train', args = None
):
net = 'resnet34s'
loss = 'softmax'
vlad_clusters = 8
ghost_clusters = 2
bottleneck_dim = 512
aggregation = 'gvlad'
mgpu = 0
if net == 'resnet34s':
inputs, x = resnet_2D_v1(inputs, mode = mode)
else:
inputs, x = resnet_2D_v2(inputs, mode = mode)
x_fc = keras.layers.Conv2D(
bottleneck_dim,
(7, 1),
strides = (1, 1),
activation = 'relu',
kernel_initializer = 'orthogonal',
use_bias = True,
trainable = True,
kernel_regularizer = keras.regularizers.l2(weight_decay),
bias_regularizer = keras.regularizers.l2(weight_decay),
name = 'x_fc',
)(x)
# ===============================================
# Feature Aggregation
# ===============================================
if aggregation == 'avg':
if mode == 'train':
x = keras.layers.AveragePooling2D(
(1, 5), strides = (1, 1), name = 'avg_pool'
)(x)
x = keras.layers.Reshape((-1, bottleneck_dim))(x)
else:
x = keras.layers.GlobalAveragePooling2D(name = 'avg_pool')(x)
x = keras.layers.Reshape((1, bottleneck_dim))(x)
elif aggregation == 'vlad':
x_k_center = keras.layers.Conv2D(
vlad_clusters,
(7, 1),
strides = (1, 1),
kernel_initializer = 'orthogonal',
use_bias = True,
trainable = True,
kernel_regularizer = keras.regularizers.l2(weight_decay),
bias_regularizer = keras.regularizers.l2(weight_decay),
name = 'vlad_center_assignment',
)(x)
x = VladPooling(
k_centers = vlad_clusters, mode = 'vlad', name = 'vlad_pool'
)([x_fc, x_k_center])
elif aggregation == 'gvlad':
x_k_center = keras.layers.Conv2D(
vlad_clusters + ghost_clusters,
(7, 1),
strides = (1, 1),
kernel_initializer = 'orthogonal',
use_bias = True,
trainable = True,
kernel_regularizer = keras.regularizers.l2(weight_decay),
bias_regularizer = keras.regularizers.l2(weight_decay),
name = 'gvlad_center_assignment',
)(x)
x = VladPooling(
k_centers = vlad_clusters,
g_centers = ghost_clusters,
mode = 'gvlad',
name = 'gvlad_pool',
)([x_fc, x_k_center])
else:
raise IOError('==> unknown aggregation mode')
x = keras.layers.Dense(
bottleneck_dim,
activation = 'relu',
kernel_initializer = 'orthogonal',
use_bias = True,
trainable = True,
kernel_regularizer = keras.regularizers.l2(weight_decay),
bias_regularizer = keras.regularizers.l2(weight_decay),
name = 'fc6',
)(x)
if loss == 'softmax':
y = keras.layers.Dense(
num_class,
activation = 'softmax',
kernel_initializer = 'orthogonal',
use_bias = False,
trainable = True,
kernel_regularizer = keras.regularizers.l2(weight_decay),
bias_regularizer = keras.regularizers.l2(weight_decay),
name = 'prediction',
)(x)
trnloss = 'categorical_crossentropy'
elif loss == 'amsoftmax':
x_l2 = keras.layers.Lambda(lambda x: K.l2_normalize(x, 1))(x)
y = keras.layers.Dense(
num_class,
kernel_initializer = 'orthogonal',
use_bias = False,
trainable = True,
kernel_constraint = keras.constraints.unit_norm(),
kernel_regularizer = keras.regularizers.l2(weight_decay),
bias_regularizer = keras.regularizers.l2(weight_decay),
name = 'prediction',
)(x_l2)
trnloss = amsoftmax_loss
else:
raise IOError('==> unknown loss.')
if mode == 'eval':
y = keras.layers.Lambda(lambda x: keras.backend.l2_normalize(x, 1))(x)
return y
learning_rate = 1e-5
init_checkpoint = '../vggvox-speaker-identification/v2/vggvox.ckpt'
def model_fn(features, labels, mode, params):
Y = tf.cast(features['targets'][:, 0], tf.int32)
params = {
'dim': (257, None, 1),
'nfft': 512,
'spec_len': 250,
'win_length': 400,
'hop_length': 160,
'n_classes': 5994,
'sampling_rate': 16000,
'normalize': True,
}
logits = vggvox_resnet2d_icassp(
features['inputs'], num_class = 2, mode = 'train'
)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits = logits, labels = Y
)
)
tf.identity(loss, 'train_loss')
accuracy = tf.metrics.accuracy(
labels = Y, predictions = tf.argmax(logits, axis = 1)
)
tf.identity(accuracy[1], name = 'train_accuracy')
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
variables = [v for v in variables if 'prediction' not in v.name]
assignment_map, initialized_variable_names = get_assignment_map_from_checkpoint(
variables, init_checkpoint
)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step = global_step)
estimator_spec = tf.estimator.EstimatorSpec(
mode = mode, loss = loss, train_op = train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode = tf.estimator.ModeKeys.EVAL,
loss = loss,
eval_metric_ops = {'accuracy': accuracy},
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter = 1
)
]
train_files = glob('vad2/data/vad-train-*') + glob('noise/data/vad-train-*')
train_dataset = get_dataset(train_files, batch_size = 64)
dev_files = glob('vad2/data/vad-dev-*') + glob('noise/data/vad-dev-*')
dev_dataset = get_dataset(dev_files, batch_size = 32)
save_directory = 'output-vggvox-v2-vad'
train.run_training(
train_fn = train_dataset,
model_fn = model_fn,
| |
<filename>stemdl/inputs.py
"""
Created on 10/8/17.
@author: <NAME>.
email: <EMAIL>
"""
import tensorflow as tf
import numpy as np
import sys
import os
from itertools import chain, cycle
from tensorflow.python.ops import data_flow_ops
import horovod.tensorflow as hvd
import lmdb
import time
from mpi4py import MPI
global world_rank
world_rank = MPI.COMM_WORLD.Get_rank()
tf.logging.set_verbosity(tf.logging.ERROR)
def print_rank(*args, **kwargs):
if hvd.rank() == 0:
print(*args, **kwargs)
class DatasetTFRecords(object):
"""
Handles training and evaluation data operations. \n
Data is read from a TFRecords filename queue.
"""
def __init__(self, params, dataset=None, mode='train',
debug=False):
self.params = params
self.mode = mode
self.debug = debug
self.dataset = dataset
if self.dataset == 'spacegroup_classification':
self.features_specs={'image_keys':['image_raw'], 'label_keys':['space_group'],
'specs': spacegroup}
elif self.dataset == 'chemicalcomp_regression':
self.features_specs={'specs': spacegroup,
'image_keys':['image_raw'],
'label_keys':['chemical_comp']}
elif self.dataset == 'spacegroup_chemicalcomp':
self.features_specs = {'specs':spacegroup,
'image_keys':['image_raw'],
'label_keys':['chemical_comp', 'space_group']}
elif self.dataset == '2d_reconstruction':
self.features_specs = {'image_keys': ['cbed'],
'label_keys': ['2d_potential'], 'specs': reconstruction_2d}
elif self.dataset == 'abf_oxides_regression':
self.features_specs = {'image_keys': ['image_raw'],
'label_keys': ['label'], 'specs': abf_oxides_regression}
elif self.dataset == 'abf_oxides_classification':
self.features_specs = {'image_keys': ['image_raw'],
'label_keys': ['label'], 'specs': abf_oxides_classification}
elif self.dataset is None:
self.features_specs = None
def set_mode(self,mode='train'):
self.mode = mode
def decode_image_label(self, record):
"""
Returns: image, label decoded from ds
"""
if self.features_specs is None:
features = tf.parse_single_example( record,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.string),
})
# decode from byte and reshape label and image
label_dtype = tf.as_dtype(self.params['LABEL_DTYPE'])
label = tf.decode_raw(features['label'], label_dtype)
label.set_shape(self.params['NUM_CLASSES'])
image = tf.decode_raw(features['image_raw'], self.params['IMAGE_DTYPE'] )
image.set_shape([self.params['IMAGE_HEIGHT'] * self.params['IMAGE_WIDTH'] * self.params['IMAGE_DEPTH']])
image = tf.reshape(image, [self.params['IMAGE_HEIGHT'], self.params['IMAGE_WIDTH'], self.params['IMAGE_DEPTH']])
else:
specs = self.features_specs['specs']
features_images = [(image_key, tf.FixedLenFeature([], tf.string))
for image_key in self.features_specs['image_keys']]
features_labels = [(label_key, tf.FixedLenFeature([], tf.string))
for label_key in self.features_specs['label_keys']]
# parse a single record
features_all = dict(features_labels + features_images)
features = tf.parse_single_example(record, features=features_all)
# process labels
labels = []
for label_key in self.features_specs['label_keys']:
label_dtype = tf.as_dtype(specs[label_key]['dtype'])
label_shape = specs[label_key]['shape']
label = tf.decode_raw(features[label_key], label_dtype)
label.set_shape(np.prod(np.array(label_shape)))
label = tf.reshape(label, label_shape)
if specs[label_key]['dtype'] == 'int64':
label = tf.cast(label, tf.float64)
labels.append(label)
if len(labels) == 1:
label = labels[0]
else:
label = tf.concat([tf.expand_dims(label, 0) for label in labels], 1)
# process images
images = []
for image_key in self.features_specs['image_keys']:
image_dtype = tf.as_dtype(specs[image_key]['dtype'])
image_shape = specs[image_key]['shape']
image = tf.decode_raw(features[image_key], image_dtype)
image.set_shape(np.prod(np.array(image_shape)))
image = tf.reshape(image, image_shape)
images.append(image)
if len(images) > 1:
image = images[0]
else:
image = tf.concat(images, 1)
# TODO stack images and return as one
pass
if self.features_specs is None or specs['preprocess']:
# TODO: all of this should be cached
# standardize the image to [-1.,1.]
#image = tf.sqrt(image)
#image = tf.image.per_image_standardization(image)
pass
# Checking for nan, bug in simulation codes...
#image = tf.where(tf.is_nan(image), -tf.ones_like(image), image)
# Manipulate labels
#label = tf.expand_dims(label,axis=0)
#label = tf.sqrt(tf.sqrt(label))
#label = tf.image.per_image_standardization(label)
return image, label
def glimpse_at_image(self, image):
"""
Apply isotropic scaling, sampled from a normal distribution.
:param image: 2D tensor
:param params: dict, image dimension parameters must be included
:return: 2D tensor
"""
#TODO: change calls to image specs from self.params to self.features
image_params = self.features_specs['specs'][self.features_specs['image_keys'][0]]
zoom_factor = np.random.normal(1.0, 0.05, size=1)
crop_y_size, crop_x_size = image_params['IMAGE_HEIGHT'], image_params['IMAGE_WIDTH']
size = tf.constant(value=[int(np.round(crop_y_size / zoom_factor)),
int(np.round(crop_x_size / zoom_factor))], dtype=tf.int32)
cen_y = np.ones((1,), dtype=np.float32) * int(image_params['IMAGE_HEIGHT'] / 2)
cen_x = np.ones((1,), dtype=np.float32) * int(image_params['IMAGE_WIDTH'] / 2)
offsets = tf.stack([cen_y, cen_x], axis=1)
scaled_image = tf.expand_dims(image, axis=0)
scaled_image = tf.image.extract_glimpse(scaled_image, size, offsets,
centered=False,
normalized=False,
uniform_noise=False)
scaled_image = tf.reshape(scaled_image, (scaled_image.shape[1].value, scaled_image.shape[2].value,
scaled_image.shape[3].value))
scaled_image = tf.image.resize_images(scaled_image, (image_params['IMAGE_HEIGHT'], image_params['IMAGE_WIDTH']))
return scaled_image
def add_noise_image(self, image):
"""
Adds random noise to the provided image
:param image: 2D image specified as a tensor
:param params: dict, parameters required - noise_min and noise_max
:return: 2d tensor
"""
alpha = tf.random_uniform([1], minval=self.params['noise_min'], maxval=self.params['noise_max'], dtype=image.dtype)
noise = tf.random_uniform(image.shape, dtype=image.dtype)
trans_image = (1 - alpha[0]) * image + alpha[0] * noise
return trans_image
def distort(self, image):
"""
Performs distortions on an image: noise + global affine transformations.
Args:
image: 3D Tensor
noise_min:float, lower limit in (0,1)
noise_max:float, upper limit in (0,1)
geometric: bool, apply affine distortion
Returns:
distorted_image: 3D Tensor
"""
# Apply random global affine transformations
image = self.rotate_image(image)
image = self.add_noise_image(image)
return image
def get_glimpses(self, batch_images):
"""
Get bounded glimpses from images, corresponding to ~ 2x1 supercell
:param batch_images: batch of training images
:return: batch of glimpses
"""
if self.params['glimpse_mode'] not in ['uniform', 'normal', 'fixed']:
"""
print('No image glimpsing will be performed since mode: "{}" is not'
'among "uniform", "normal", "fixed"'
'.'.format(self.params['glimpse_mode']))
"""
return batch_images
# set size of glimpses
#TODO: change calls to image specs from self.params to self.features
image_params = self.features_specs['specs'][self.features_specs['image_keys'][0]]
y_size, x_size = image_params['IMAGE_HEIGHT'], image_params['IMAGE_WIDTH']
crop_y_size, crop_x_size = image_params['CROP_HEIGHT'], image_params['CROP_WIDTH']
size = tf.constant(value=[crop_y_size, crop_x_size],
dtype=tf.int32)
if self.params['glimpse_mode'] == 'uniform':
# generate uniform random window centers for the batch with overlap with input
y_low, y_high = int(crop_y_size / 2), int(y_size - crop_y_size // 2)
x_low, x_high = int(crop_x_size / 2), int(x_size - crop_x_size // 2)
cen_y = tf.random_uniform([self.params['batch_size']], minval=y_low, maxval=y_high)
cen_x = tf.random_uniform([self.params['batch_size']], minval=x_low, maxval=x_high)
offsets = tf.stack([cen_y, cen_x], axis=1)
elif self.params['glimpse_mode'] == 'normal':
# generate normal random window centers for the batch with overlap with input
cen_y = tf.random_normal([self.params['batch_size']], mean=y_size // 2, stddev=self.params['glimpse_normal_off_stdev'])
cen_x = tf.random_normal([self.params['batch_size']], mean=x_size // 2, stddev=self.params['glimpse_normal_off_stdev'])
offsets = tf.stack([cen_y, cen_x], axis=1)
elif self.params['glimpse_mode'] == 'fixed':
# fixed crop
cen_y = np.ones((self.params['batch_size'],), dtype=np.int32) * self.params['glimpse_height_off']
cen_x = np.ones((self.params['batch_size'],), dtype=np.int32) * self.params['glimpse_width_off']
offsets = np.vstack([cen_y, cen_x]).T
offsets = tf.constant(value=offsets, dtype=tf.float32)
else:
# should not come here:
return batch_images
# extract glimpses
glimpse_batch = tf.image.extract_glimpse(batch_images, size, offsets, centered=False, normalized=False,
uniform_noise=False, name='batch_glimpses')
return glimpse_batch
def minibatch(self):
"""
Returns minibatch of images and labels from TF records file.
"""
mode = self.mode
batch_size = self.params['batch_size']
if mode not in ['train', 'validation', 'test']:
mode = 'train'
if self.debug: self.inspect_tfrecords(mode)
record_input = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.params['data_dir'], '*.tfrecords'),
parallelism=self.params['IO_threads'],
buffer_size=self.params['buffer_cap'],
batch_size=batch_size)
records = record_input.get_yield_op()
# Split batch into individual images
records = tf.split(records, batch_size, 0)
records = [tf.reshape(record, []) for record in records]
#print('record contents %s' %(format(records)))
#print('record length %s and contents %s' %(len(records),format(records)))
# Deserialize and preprocess images into batches for each device
images = []
labels = []
with tf.name_scope('input_pipeline'):
if self.params[mode + '_distort']:
print_rank('images will be distorted')
for i, record in enumerate(records):
image, label = self.decode_image_label(record)
if self.params[mode + '_distort']:
# image = self.add_noise_image(image)
image = self.distort(image)
images.append(image)
labels.append(label)
image_shape = image.get_shape().as_list()
label_shape = label.get_shape().as_list()
# Stack images and labels back into a single tensor
labels = tf.parallel_stack(labels)
images = tf.parallel_stack(images)
# reshape them to the expected shape:
labels_newshape = [batch_size] + label_shape
images_newshape = [batch_size] + image_shape
labels = tf.reshape(labels, labels_newshape)
images = tf.reshape(images, images_newshape)
# glimpse images: moved to GPU
#images = self.get_glimpses(images)
# Display the training images in the Tensorboard visualizer.
if self.debug: tf.summary.image("images", images, max_outputs=4)
# resize
if self.params['resize']:
images = tf.image.resize_bilinear(images, [self.params['RESIZE_WIDTH'],
self.params['RESIZE_HEIGHT']])
if self.params['tile']:
images = tf.ones([self.params['IMAGE_DEPTH'], self.params['IMAGE_HEIGHT'],
self.params['IMAGE_WIDTH']], dtype=self.params['IMAGE_DTYPE'])
labels = tf.ones([256, 512,512], dtype=self.params['LABEL_DTYPE'])
return images, labels
@staticmethod
def stage(tensors):
"""
Stages the given tensors in a StagingArea for asynchronous put/get
:param tensors: tf.Tensor
:return: get and put tf.Op operations.
"""
staging_area = data_flow_ops.StagingArea(
dtypes=[tensor.dtype for tensor in tensors],
shapes=[tensor.get_shape() for tensor in tensors])
load_op = staging_area.put(tensors)
get_tensors = staging_area.get()
get_tensors = [tf.reshape(get_t, t.get_shape())
for (get_t,t) in zip(get_tensors, tensors)]
return load_op, get_tensors
@staticmethod
def onehot(label):
index = tf.cast(label[0],tf.int32)
full_vec = tf.cast(tf.linspace(20., 200., 91),tf.int32)
bool_vector = tf.equal(index, full_vec)
onehot_vector = tf.cast(bool_vector, tf.int64)
return onehot_vector
@staticmethod
def label_minmaxscaling(label, min_vals, max_vals, scale_range=[0,1]):
"""
:param label: tensor
:param min_vals: list, minimum value for each label dimension
:param max_vals: list, maximum value for each label dimension
:param range: list, range of label, default [0,1]
:return:
scaled label tensor
"""
min_tensor = tf.constant(min_vals, dtype=tf.float32)
max_tensor = tf.constant(max_vals, dtype=tf.float32)
if label.dtype != tf.float32:
orig_dtype = label.dtype
label = tf.cast(label, tf.float32)
scaled_label = (label - min_tensor)/(max_tensor - min_tensor)
scaled_label = scaled_label * (scale_range[-1] - scale_range[0]) + scale_range[0]
if scaled_label.dtype != orig_dtype:
scaled_label = tf.cast(scaled_label, orig_dtype)
return scaled_label
@staticmethod
def rotate_image(image):
"""
Apply random global affine transformations, sampled from a normal distributions.
:param image: 2D tensor
:return: 2D tensor
"""
# Setting bounds and generating random values for scaling and rotations
scale_X = np.random.normal(1.0, 0.025, size=1)
scale_Y = np.random.normal(1.0, 0.025, size=1)
theta_angle = np.random.normal(0., 1, size=1)
nu_angle = np.random.normal(0., 1, size=1)
# Constructing transfomation matrix
a_0 = scale_X * np.cos(np.deg2rad(theta_angle))
a_1 = -scale_Y | |
-self.__data
def __abs__(self):
return abs(self.__data)
def __mul__(self, other):
if isinstance(other, Scalar):
return self.__data * other.__data
else:
return self.__data * other # Hope that other defines multiplication with a simple scalar numeric
def __add__(self, other):
if isinstance(other, Scalar):
return self.__data + other.__data
else:
return self.__data + other
def __radd__(self, other):
if isinstance(other, Scalar):
return other.__data + self.__data
else:
return other + self.__data
def __sub__(self, other):
if isinstance(other, Scalar):
return self.__data * other.__data
else:
return self.__data * other
def __div__(self, other):
if isinstance(other. Scalar):
return self.__data / other.__data
else:
return self.__data / other
def __rdiv__(self, other):
if isinstance(other, Scalar):
return other.__data / self.__data
else:
return other / self.__data
# Define a global exception for unit mismatch
class UnitMismatch(Exception):
pass
# Define a global exception for missing units
class MissingUnits(Exception):
pass
class MissingDataArrayAxis(Exception):
pass
def xd_identity(np_vector, axis_name, units=None, attrs=None):
""" Create an identity xarray.DataArray. That is, a DataArray vector in which both the values and axis
coordinates are identical.
:param np_vector: Vector of numeric data
:param axis_name: Name for the axis - must be in vocabulary
:param units: The units of the np_vector data. If the units are not in the default units, the Python pint package
is used to make a conversion. If units are not given, it is assumed that the data is already in the
default units for the quantity named in axis_name. It is better to provide the units if in any way
unsure.
:param attrs: Dictionary of additional attributes to attach to the DataArray
:return:
"""
if axis_name in long_name:
the_long_name = long_name[axis_name]
else:
warnings.warn('Unknown axis name ' + axis_name + ' encountered in xd_identity creation.')
if axis_name in default_units:
the_units = default_units[axis_name]
else:
the_units = '' # Assumed to be unitless quantity
if units is None:
units = the_units
values = Q_(np_vector, units) # Create a pint quantity with the given units
values = values.to(the_units) # actually the default units
np_vector = values.magnitude
if attrs is not None:
the_attrs = attrs
else:
the_attrs = {}
the_attrs.update({'long_name': the_long_name})
the_attrs.update({'units': the_units})
return xarray.DataArray(np_vector, [(axis_name, np_vector)], name=axis_name, attrs=the_attrs)
def xd_harmonise_interp(xd_list):
""" Perform linear interpolation on merged set of axis points for two or more xarray DataArray objects.
This function can be used to prepare (harmonise) multiple xarray.DataArray objects for multiplication or addition
on a common set of coordinate axis points by linearly interpolating all DataArray objects onto the same
set of points, obtained by merging and sorting the points from all input DataArray objects.
The DataArry objects provided. The scipy linear grid interpolator is used for this purpose. See:
scipy.interpolate.RegularGridInterpolator
:param xd_list:
:return: Tuple of xarray.DataArray objects with merged and linearly interpolated values in all axes.
Only unique values in the interpolation axis are used.
"""
# TODO : enforce compatible attributes or not ? What attributes in returned object ?
# TODO : Ignore axes that have non-numeric coordinates e.g. xdarray['axisname'].dtype.char in 'SUa',
# TODO : which detects dtypes that are string, or xdarray['axisname'].dtype.kind in 'fc' (float or complex)
# TODO : alternatively require that axis coordinates are always numeric, say with a list of labels as attrs
# TODO : What about interpolation on times axes
# TODO : Need to expand on extrapolation (and possibly also single-axis interpolation) schemes
# Accumulate the index values from each of the given arrays, for each of the axes in the first array
index_vals = {} # dictionary of index coordinates for each axis
index_float = {} # determine if the index kind is a floating point type (complex included)
#metadata = {}
for xd_arr in xd_list:
for axis in xd_arr.dims:
# accumulate dictionary for all dimensions in the entire collection of DataArrays
if not axis in index_vals:
index_vals[axis] = xd_arr[axis]
else:
index_vals[axis] = np.hstack((index_vals[axis], xd_arr[axis]))
# also accumulate the attributes (metadata)
# metadata.update(xd_arr.attrs)
# get the unique values in increasing numerical order using np.unique for each axis found in the whole set
for axis in index_vals:
index_vals[axis] = np.unique(index_vals[axis])
index_float[axis] = index_vals[axis].dtype.kind in 'fc'
# interpolate each of the DataArray objects onto the new grid (for whatever axes it does have)
xd_return_list = []
for xd_arr in xd_list:
# Create the linear interpolator
interpolator = RegularGridInterpolator([xd_arr[axis].values for axis in xd_arr.dims],
xd_arr.values,
method='linear', bounds_error=False, fill_value=0.0)
merged_coordinates = np.meshgrid(*[index_vals[axis] for axis in xd_arr.dims],
indexing='ij')
interp_vals = interpolator(tuple(merged_coordinates))
# reconstruct the xarray.DataArray with interpolated data
xd_arr_interp = xarray.DataArray(interp_vals, [(axis, index_vals[axis]) for axis in xd_arr.dims],
name=xd_arr.name, attrs=xd_arr.attrs)
xd_arr_interp.attrs = xd_arr.attrs # transfer the attributes verbatim
xd_return_list.append(xd_arr_interp)
# There may be axes not present in a specific DataArray. These are omitted for that DataArray and
# simply allowed to broadcast when performing operations with other DataArrays
return xd_return_list
def xd_interp_axis_to(from_xd, to_xd, axis, interp_method='linear', bounds_error=False, fill_value=0.0,
assume_sorted=True):
""" Interpolate a single xarray.DataArray axis from one set of coordinates to another. Since interpolation
occurs along a single axis, there is more flexibility in the method of interpolation that can be used.
The `scipy.interpoalte.interp1d` class is used to perform the interpolation.
:param from_xd: The xarray>DataArray object with originating data.
:param to_xd: The xarray>DataArray object that will provide the new coordinates to which the interpolation will
be carried out.
:param axis: The name of the axis along which to perform the interpolation.
:param interp_method: Is the kind of interpolation to perform. Options are as for sipy.interpolate.interp1d,
namely 'linear', 'nearest', 'zero', 'slinear', 'quadratic' and 'cubic', where 'slinear', 'quadratic' and
'cubic' produce spline interpolation of first, second or third order respectively. The default is 'linear'.
:return: New xarray.DataArray with xd_from interpolated along given axis to coordinates provided by xd_to in
the given axis.
"""
from_dims = from_xd.dims
from_axes = [copy.deepcopy(from_xd[this_axis]) for this_axis in from_dims]
interp_func = interp1d(from_xd[axis].data, from_xd.data, kind=interp_method, axis=from_xd.get_axis_num(axis),
copy=False, bounds_error=bounds_error, fill_value=fill_value, assume_sorted=assume_sorted)
new_data = interp_func(to_xd[axis].data) # Interpolate along the named axis
# Now reconstruct the xd_from DataArray
from_axes[from_xd.get_axis_num(axis)] = to_xd[axis] # Grab the new axis from the xd_to DataArray
new_from_xd = xarray.DataArray(new_data, from_axes, attrs=from_xd.attrs) # Use attributes from original
return new_from_xd
def xd_harmonised_product(xd_list,raise_exception=True):
""" Compute the harmonised product of a number of N-dimensional data arrays.
The DataArrays are interpolated onto a common set of coordinates and then the product of the DataArrays
is computed, returning a single DataArray with merged attributes. Unit mismatches are flagged with warnings.
:param xd_list: List/tuple of xarray.DataArray objects to be multiplied
:return: Product of xarray.DataArray objects with merged attributes
:except UnitMismatch, MissingUnits:
"""
# TODO : This function to be checked to correct "var_units" mistake
#main_attrs = {} # Will accumulate all main attributes here - not sure what to do with units ?
unit_dict = {} # Dictionary of units
axis_attrs = {} # Dictionary of axis attribute dictionaries
# Check units and merge metadata
# have to merge attributes for main data and all axes individually
for xd_arr in xd_list:
#main_attrs.update(xd_arr.attrs)
for axis in xd_arr.dims:
if axis in axis_attrs:
axis_attrs[axis].update(xd_arr[axis].attrs) # Accumulate the attributes for each axis
else:
axis_attrs[axis] = xd_arr[axis].attrs
if not axis in unit_dict:
if 'units' in xd_arr[axis].attrs:
unit_dict[axis] = xd_arr[axis].attrs['units']
else:
if raise_exception:
raise MissingUnits('Units not found for ' + xd_arr.name + ' on axis ' + axis)
else:
print(MissingUnits('Units not found for ' + xd_arr.name + ' on axis ' + axis))
elif ('units' in xd_arr[axis].attrs['units']) and (unit_dict[axis] != xd_arr[axis].attrs['units']):
# TODO : Consider throwing a unit mismatch error, or converting to desired units with pint
warnings.warn('Unit mismatch found when taking xarray.DataArray harmonised product.')
if raise_exception:
raise UnitMismatch('Unit mismatch encountered for ' + xd_arr.name + ' on axis ' + axis)
else:
print(UnitMismatch('Unit mismatch encountered for ' + xd_arr.name + ' on axis ' + axis))
else:
if raise_exception:
raise MissingUnits('Units not found for ' + xd_arr.name + ' on axis ' + axis)
else:
print(MissingUnits('Units not found for ' + xd_arr.name + ' on axis ' + axis))
xd_factors = xd_harmonise_interp(xd_list)
xd_product = functools.reduce(mul, xd_factors) # take the product by reducing the list using the | |
(""'vacation corrections', 7)
, (""'remaining vacation', 8)
, (""'additional_submitted', 9)
, (""'flexi_time', 10)
, (""'flexi_sub', 11)
, (""'flexi_max', 12)
, (""'flexi_rem', 13)
, (""'special_leave', 14)
, (""'special_sub', 15)
)
header_classes = \
{ 'remaining vacation' : 'emphasized'
}
def __init__ (self, db, request, utils, is_csv = False) :
timestamp = time.time ()
self.htmldb = db
self.need_period = False
try :
db = db._db
except AttributeError :
pass
self.db = db
self.uid = db.getuid ()
self.hv = hv = common.user_has_role (
self.db, self.uid, 'HR-vacation', 'Vacation-report')
db.log_info ("vacation_report: %s" % timestamp)
st_accp = db.leave_status.lookup ('accepted')
st_cnrq = db.leave_status.lookup ('cancel requested')
st_subm = db.leave_status.lookup ('submitted')
self.request = request
self.utils = utils
filterspec = request.filterspec
now = Date ('.')
year = now.get_tuple () [0]
d = filterspec.get ('date')
fields = dict (self.fields)
# By default show obsolete users, users might have a time-limited
# contract, they would be confused if they can't see vacation
# report
self.show_obsolete = True
if 'show_obsolete' in filterspec :
self.show_obsolete = filterspec ['show_obsolete'] == 'yes'
opt = \
( 'approved_submissions'
, 'additional_submitted'
, 'flexi_time'
, 'flexi_sub'
, 'flexi_max'
, 'flexi_rem'
, 'special_leave'
, 'special_sub'
, 'department'
)
for k in opt :
if k not in request.columns :
del fields [k]
self.fields = sorted \
(fields.keys (), key = lambda x : fields [x])
if d :
try :
if ';' in d :
start, end = d.split (';')
if not start :
start = None
else :
start = Date (start)
if end :
end = Date (end)
else :
end = Date ('%s-12-31' % year)
else :
start = end = Date (d)
except ValueError :
start = end = Date ('%s-12-31' % year)
else :
start = end = Date ('%s-12-31' % year)
self.start = start
self.end = end
soy = common.start_of_year (end)
users = sum_common.get_users (db, filterspec, soy, end)
min_user_date = {}
max_user_date = {}
user_vc = {}
self.user_ctypes = {}
for u in users.keys () :
srt = [('+', 'date')]
vcs = db.vacation_correction.filter \
(None, dict (user = u, absolute = True), sort = srt)
if not vcs :
del users [u]
continue
ctypes = {}
for id in vcs :
vc = db.vacation_correction.getnode (id)
if vc.contract_type not in ctypes :
ctypes [vc.contract_type] = vc
for ctype in ctypes :
vc = ctypes [ctype]
if start :
md = min_user_date [(u, ctype)] = max (vc.date, start)
else :
md = min_user_date [(u, ctype)] = vc.date
mind = md
if start == end :
mind = Date ('%s-01-01' % start.year)
if end and min_user_date [(u, ctype)] > end :
continue
dyn = vacation.vac_get_user_dynamic (db, u, ctype, md)
if ( not dyn or (dyn.valid_from and dyn.valid_from > end)
or not self.permission_ok (u, dyn)
) :
continue
last_dyn = ldyn = dyn
while ldyn and ldyn.valid_to and ldyn.valid_to < end :
ldyn = vacation.vac_next_user_dynamic (db, ldyn)
if ldyn :
last_dyn = ldyn
if last_dyn.valid_to :
max_user_date [(u, ctype)] = last_dyn.valid_to
if last_dyn.valid_to < mind :
continue
user_vc [(u, ctype)] = vc
if u not in self.user_ctypes :
self.user_ctypes [u] = []
self.user_ctypes [u].append (ctype)
self.users = sorted \
( users.keys ()
, key = lambda x : db.user.get (x, 'username')
)
db.log_info ("vacation_report: users: %s" % (time.time () - timestamp))
self.values = values = {}
year = Interval ('1y')
for u in self.users :
if u not in self.user_ctypes :
continue
for ctype in self.user_ctypes [u] :
vc = user_vc [(u, ctype)]
yday, pd, carry, ltot = vacation.vacation_params \
(db, u, min_user_date [(u, ctype)], vc, hv)
ld = None
d = yday
if hv :
d = min (d, self.end)
if ld is None :
ld = pd
# Round up to next multiple of 0.5 days
while d and d <= end :
# Find latest vacation correction at or before d
dts = common.pretty_range (None, d)
srt = [('-', 'date')]
vcd = dict \
( user = u
, absolute = True
, contract_type = ctype
, date = dts
)
vci = db.vacation_correction.filter (None, vcd, sort = srt)
if vci [0] != vc.id :
vc = db.vacation_correction.getnode (vci [0])
yday, pd, carry, ltot = vacation.vacation_params \
(db, u, min_user_date [(u, ctype)], vc, hv)
rcarry = carry
if not hv :
rcarry = ceil (carry)
if (u, ctype) in max_user_date :
if max_user_date [(u, ctype)] <= ld :
break
fd = ld
if fd.year != d.year :
fd = fd + day
container = Day_Container (d)
container ['is_obsolete'] = False
uname = self.linked_user (u)
if not ctype :
container ['user'] = uname
else :
lct = self.linked_ctype (ctype)
lst = HTML_List (' / ')
lst.append (uname)
lst.append (lct)
container ['user'] = lst
dep = {}
dyn = vacation.vac_get_user_dynamic (db, u, ctype, d)
ent = {}
lastdyn = dyn
while (dyn and dyn.valid_from < d) :
lastdyn = dyn
ent [dyn.vacation_yearly] = 1
dep [dyn.department] = True
dyn = vacation.vac_next_user_dynamic (db, dyn)
container ['is_obsolete'] = self.is_obsolete (dyn, d)
v = list (sorted (ent.keys ()))
if 'department' in self.fields :
deps = \
(db.department.get (d, 'name') for d in dep.keys ())
container ['department'] = ', '.join (sorted (deps))
# Use '..' as separator to prevent excel from computing
# difference if exported to excel
if len (v) > 1 :
container ['yearly entitlement'] = \
'%s .. %s' % (v [0], v [-1])
elif len (v) == 1 :
container ['yearly entitlement'] = v [0]
else :
container ['yearly entitlement'] = 0.0
container ['carry forward'] = rcarry
cons = vacation.consolidated_vacation \
(db, u, ctype, d, to_eoy = not hv)
et = cons - ltot + carry
yp = cons - ltot
# new carry and remaining vacation
carry = rv = vacation.remaining_vacation \
(db, u, ctype, d, cons, to_eoy = not hv)
if not hv :
et = ceil (et)
yp = ceil (yp)
rc = ceil (cons)
rv = ceil (carry)
container ['entitlement total'] = et
container ['yearly prorated'] = yp
container ['remaining vacation'] = rv
val = vacation.vacation_time_sum (db, u, ctype, fd, d)
r = ('HR-vacation', 'HR-leave-approval')
if common.user_has_role (self.db, self.uid, *r) :
dt = common.pretty_range (ld, d)
url = ( '%sleave_submission?@template=approve_hr&'
'@filter=user,first_day&@startwith=0&'
'@pagesize=20&'
)
url %= db.config.TRACKER_WEB
url += urlencode (dict (user = u, first_day = dt))
container ['approved days'] = HTML_Link (val, url)
else :
container ['approved days'] = val
if 'additional_submitted' in self.fields :
container ['additional_submitted'] = \
vacation.vacation_submission_days \
(db, u, ctype, fd, d, st_subm)
if 'flexi_time' in self.fields :
container ['flexi_time'] = \
vacation.flexitime_submission_days \
(db, u, ctype, fd, d, st_accp, st_cnrq)
if 'flexi_sub' in self.fields :
container ['flexi_sub'] = \
vacation.flexitime_submission_days \
(db, u, ctype, fd, d, st_subm)
if 'flexi_max' in self.fields :
container ['flexi_max'] = \
vacation.flexi_alliquot (db, u, fd, ctype)
if 'flexi_rem' in self.fields :
container ['flexi_rem'] = \
vacation.flexi_remain (db, u, fd, ctype) or ''
if 'special_leave' in self.fields :
container ['special_leave'] = \
vacation.special_submission_days \
(db, u, ctype, fd, d, st_accp, st_cnrq)
if 'special_sub' in self.fields :
container ['special_sub'] = \
vacation.special_submission_days \
(db, u, ctype, fd, d, st_subm)
ltot = cons
if 'approved_submissions' in self.fields :
container ['approved_submissions'] = \
vacation.vacation_submission_days \
(db, u, ctype, fd, d, st_accp, st_cnrq)
vd = common.pretty_range (fd, d)
vcids = db.vacation_correction.filter \
( None
, dict
( user = u
, date = vd
, contract_type = ctype
, absolute = False
)
)
try :
vcs = HTML_List ()
for x in vcids :
item = self.htmldb.vacation_correction.getItem (x)
days = item.days
ep = self.utils.ExtProperty
vcs.append \
( ep
( self.utils, days
, item = item
, is_labelprop = True
)
)
container ['vacation corrections'] = vcs
except AttributeError :
container ['vacation corrections'] = ' + '.join \
(str (db.vacation_correction.get (i, 'days'))
for i in vcids
)
if (u, ctype) not in self.values :
self.values [(u, | |
= re.match("\"(.*)\"", label).group(1) # remove quotation marks
# if label not in labelsToClusterId.keys():
# labelsToClusterId[label] = len(labelsToClusterId) + 1
if label == "Utilities":
selectedVariables.append(varId)
hiddenVarIds.append(1)
elif label == "Information Technology":
selectedVariables.append(varId)
hiddenVarIds.append(2)
hiddenVarIds = numpy.asarray(hiddenVarIds, dtype = numpy.int)
numberOfClusters = numpy.max(hiddenVarIds)
print("selectedVariables = ")
print(selectedVariables)
print("hiddenVarIds = ")
print(hiddenVarIds)
print("numberOfClusters = ", numberOfClusters)
dataVectors = dataVectors[:,selectedVariables]
# assert(False)
print("loaded data successfully")
return dataVectors, hiddenVarIds, numberOfClusters
# checked
# return data matrix with format (number of rows, number of columns) = (number of samples, number of variables)
# in order to normalize each variable use "statHelper.normalizeData(dataVectors)"
# used for example in "The cluster graphical lasso for improved estimation of Gaussian graphical models", 2015
def loadArabidopsisThalianaData_forVariableClustering(pathprefix):
numberOfSamples = 118
numberOfVariables = 39
hiddenVarIds = numpy.zeros(numberOfVariables, dtype = numpy.int_)
dataVectors = numpy.zeros(shape = (numberOfSamples, numberOfVariables))
numberOfClusters = 2
filename = pathprefix + "datasets/arabidopsis_thaliana_data.txt"
labelsToClusterId = {}
labelsToClusterId["Mevalonatepathway"] = 1
labelsToClusterId["Non-Mevalonatepathway"] = 2
nrOfDescriptionCols = 6
for varId, line in enumerate(open(filename, "r")):
line = line.strip()
allParts = line.split(" ")
label = allParts[0]
assert(len(allParts) == numberOfSamples + nrOfDescriptionCols)
assert(label in labelsToClusterId.keys())
for sampleId, num in enumerate(allParts[nrOfDescriptionCols:(numberOfSamples + nrOfDescriptionCols)]):
dataVectors[sampleId, varId] = float(num)
hiddenVarIds[varId] = labelsToClusterId[label]
print("loaded data successfully")
return dataVectors, hiddenVarIds, numberOfClusters
# def loadOlivettiFaces_forVariableClustering(pathprefix):
# numberOfClusters = 2
# numberOfPictures = numberOfClusters * 10
#
# dataVectors = numpy.load(pathprefix + "datasets/olivettifaces_plain.npy")
#
# dataVectors = dataVectors[0:numberOfPictures, :]
# dataVectors = dataVectors.transpose()
#
# hiddenVarIds = numpy.load(pathprefix + "datasets/olivettifaces_labels.npy")
# hiddenVarIds = hiddenVarIds[0:numberOfPictures]
# hiddenVarIds += 1
#
# numberOfClusters = numpy.max(hiddenVarIds)
# return dataVectors, hiddenVarIds, numberOfClusters
def loadOlivettiFaces_forVariableClustering(pathprefix):
numberOfClusters = 10
numberOfPictures = numberOfClusters * 10
dataVectors = numpy.load(pathprefix + "datasets/olivettifaces_plain.npy")
print("dataVectors.shape = ", dataVectors.shape)
dataVectors = dataVectors[0:numberOfPictures, :]
dataVectors = dataVectors.transpose()
hiddenVarIds = numpy.load(pathprefix + "datasets/olivettifaces_labels.npy")
hiddenVarIds = hiddenVarIds[0:numberOfPictures]
hiddenVarIds += 1
# print "hiddenVarIds = ", hiddenVarIds
# assert(False)
numberOfClusters = numpy.max(hiddenVarIds)
return dataVectors, hiddenVarIds, numberOfClusters
# from 109th Senate Roll Data at
# http://www.voteview.com/senate109.htm
# encoding used here:
# 0 = no voting
# 1 = voting yes
# -1 = voting no
def loadCongressVotes_forVariableClustering(pathprefix):
numberOfClusters = 3
d = scipy.io.loadmat(pathprefix + "datasets/senate109_.mat")
dataVectors = d["xVote"]
dataVectors = dataVectors.transpose()
hiddenVarIds = d["xPartyMask"][:,0]
hiddenVarIds[hiddenVarIds == 200] = 2 # 200 Republican
hiddenVarIds[hiddenVarIds == 100] = 1 # 100 Democrat
hiddenVarIds[hiddenVarIds == 328] = 3 # 328 Independent
# print "votes of first guy:"
# print d["names"][-1]
# print dataVectors[:,1]
nameList = loadPoliticianNames(pathprefix)
assert(len(nameList) == hiddenVarIds.shape[0])
return dataVectors, hiddenVarIds, numberOfClusters, nameList
def showClusteredNamesCongreeVotes(clusterAssignments, hiddenVarIds, nameList):
assert(clusterAssignments.shape[0] == hiddenVarIds.shape[0])
assert(numpy.min(clusterAssignments) == 1)
assert(numpy.min(hiddenVarIds) == 1)
assert(clusterAssignments.shape[0] == len(nameList))
partyNameMap = {}
partyNameMap[1] = "D"
partyNameMap[2] = "R"
partyNameMap[3] = "I"
for z in range(1, numpy.max(clusterAssignments) + 1):
print("********************")
print("Cluster ", z)
for i in range(clusterAssignments.shape[0]):
if clusterAssignments[i] == z:
print(nameList[i] + " (" + str(partyNameMap[hiddenVarIds[i]]) + ") \\\\")
return
def loadPoliticianNames(pathprefix):
filename = pathprefix + "datasets/sen109kh.ord"
nameList = []
with open(filename, "r") as f:
for i, line in enumerate(f):
line = line.strip()
matchObj = re.match(r'(\d*)\s+\d(\w+)\s+\d+(\w+)\s+(\d+)', line)
state = matchObj.group(2)
name = matchObj.group(3)
# fullName = name + " (" + state + ")"
nameList.append(name.strip())
return nameList
# line = "1091563368 0WYOMING 20001THOMAS 911116661111666666166666661616"
# line = "1099991099 0USA 20000BUSH 911999"
# matchObj = re.match(r'(\d*)\s+\d(\w+)\s+\d+(\w+)\s+(\d+)', line)
# state = matchObj.group(2)
# name = matchObj.group(3)
# fullName = name + " (" + state + ")"
# print fullName
# pathprefix = "../../"
# loadOlivettiFaces_forVariableClustering(pathprefix)
# import sklearn.datasets
# d = sklearn.datasets.fetch_olivetti_faces()
# print d.data
# numpy.save("../../olivettifaces_plain", d.data)
# numpy.save("../../olivettifaces_labels", d.target)
# pathprefix = "../../"
# d = scipy.io.loadmat(pathprefix + "datasets/senate109_.mat")
# print d.keys()
# print d["xVote"].shape
# # print d["names"].shape
# print d["xPartyMask"].shape
# # print d["names"][0]
# # print d["xVote"][101]
# hiddenVarIds = d["xPartyMask"][:,0]
# hiddenVarIds[hiddenVarIds == 200] = 2
# hiddenVarIds[hiddenVarIds == 100] = 1
# hiddenVarIds[hiddenVarIds == 328] = 3
# "geneExpression" from r pckage BDgraph
def loadGeneExpression(pathprefix):
variableNames = None
filename = pathprefix + "datasets/geneExpression.csv"
numberOfSamples = 60
numberOfVariables = 100
dataVectors = numpy.zeros(shape = (numberOfSamples, numberOfVariables))
with open(filename,'r') as f:
for lineNr, elemsInLine in enumerate(csv.reader(f)):
assert(len(elemsInLine) == numberOfVariables + 1)
allRelElems = elemsInLine[1:numberOfVariables + 1]
if lineNr == 0:
# get all variable names
variableNames = numpy.asarray(allRelElems)
else:
allRelElems = [float(elem) for elem in allRelElems]
allRelElems = numpy.asarray(allRelElems)
dataVectors[lineNr-1] = allRelElems
# print "variableNames = "
# print variableNames
# dummy cluster labels
clusterLabels = numpy.ones(numberOfVariables, dtype = numpy.int)
return dataVectors, clusterLabels, 1
# TOWN TOWNNO TRACT LON LAT MEDV CMEDV CRIM ZN INDUS CHAS NOX RM AGE DIS RAD TAX PTRATIO B LSTAT
# data from "boston.c" in R library "spData"
def loadBostonHousing(pathprefix):
filename = pathprefix + "datasets/bostonHousing.csv"
usedVariables = ["CRIM", "ZN", "INDUS", "NOX", "RM", "AGE", "DIS", "RAD", "TAX", "PTRATIO", "B", "LSTAT"]
# usedVariables = ["CMEDV", "CRIM", "ZN", "INDUS", "NOX", "RM", "AGE", "DIS", "RAD", "TAX", "PTRATIO", "B", "LSTAT"]
# usedVariables = ["CMEDV", "CRIM", "ZN", "INDUS", "NOX", "RM", "AGE", "DIS", "RAD", "PTRATIO", "B", "LSTAT"]
# usedVariables = ["CRIM", "ZN", "INDUS", "NOX", "RM", "AGE", "DIS", "RAD", "PTRATIO", "B"] # "LSTAT"]
numberOfSamples = 506
numberOfVariables = len(usedVariables)
dataVectors = numpy.zeros(shape = (numberOfSamples, numberOfVariables))
nameToCSVIdMapping = {}
with open(filename,'r') as f:
for lineNr, elemsInLine in enumerate(csv.reader(f)):
if lineNr == 0:
# get all variable names
for idInCSV, variableName in enumerate(elemsInLine):
if variableName in usedVariables:
nameToCSVIdMapping[variableName] = idInCSV
else:
selectedEntries = numpy.zeros(numberOfVariables)
for i, variableName in enumerate(usedVariables):
assert(variableName in nameToCSVIdMapping.keys())
selectedEntries[i] = float(elemsInLine[nameToCSVIdMapping[variableName]])
dataVectors[lineNr-1] = selectedEntries
# print "dataVectors = "
# print dataVectors
# dummy cluster labels
clusterLabels = numpy.ones(numberOfVariables, dtype = numpy.int)
return dataVectors, clusterLabels, 1
# Gene function regulations data from <NAME> used in "Robust Sparse Gaussian Graphical Modeling"
def loadGeneRegulations(pathprefix):
filename = pathprefix + "datasets/gene_regulations.csv"
numberOfSamples = 445
numberOfVariables = 11
dataVectors = numpy.zeros(shape = (numberOfSamples, numberOfVariables))
with open(filename,'r') as f:
for lineNr, elemsInLine in enumerate(csv.reader(f)):
# print len(elemsInLine)
assert(len(elemsInLine) == numberOfVariables + 1)
allRelElems = elemsInLine[1:numberOfVariables + 1]
if lineNr == 0:
# get all variable names
variableNames = numpy.asarray(allRelElems)
else:
allRelElems = [float(elem) for elem in allRelElems]
allRelElems = numpy.asarray(allRelElems)
dataVectors[lineNr-1] = allRelElems
# print "variableNames = "
# print variableNames
# assert(False)
# print "dataVectors = "
# print dataVectors
# dummy cluster labels
clusterLabels = numpy.ones(numberOfVariables, dtype = numpy.int)
return dataVectors, clusterLabels, 1
# BASE_FOLDER = "/Users/danielandrade/workspace/StanTest/"
# dataVectorsAllOriginal, hiddenVarIds, numberOfClusters = loadGeneRegulations(BASE_FOLDER)
#
# print "dataVectorsAllOriginal.shape = ", dataVectorsAllOriginal.shape
# dataVectorsAllOriginal
#
# numpy.savetxt(BASE_FOLDER + "datasets/test.csv", dataVectorsAllOriginal, delimiter=",")
#
# assert(False)
# get node labels for "aviationSuperLargeSmallVar"
def getAviationNodeLabels(pathprefix):
allRelevantKeysDescriptions = numpy.load(pathprefix + "/datasets/relevantDescriptionsAviationData_allOver500.npy")
selectedVarIdsStr = "54 6 56 61 2 55 18 19 58 8 4 3 1 63 9 64"
allRelevantKeysDescriptions = allRelevantKeysDescriptions[getIdsFromString(selectedVarIdsStr)]
assert(allRelevantKeysDescriptions.shape[0] == 16)
return allRelevantKeysDescriptions
def getGeneRegulationsNodeLabels(pathprefix):
numberOfVariables = 11
variableNames = None
with open(pathprefix + "datasets/gene_regulations.csv",'r') as f:
for lineNr, elemsInLine in enumerate(csv.reader(f)):
allRelElems = elemsInLine[1:numberOfVariables + 1]
if lineNr == 0:
# get all variable names
variableNames = numpy.asarray(allRelElems)
break
return variableNames
def showGeneRegulationsClusteringResult(pathprefix, clusteringResult):
if clusteringResult is None:
return
variableNames = getGeneRegulationsNodeNames(pathprefix)
print(variableNames)
numberOfClusters = numpy.max(clusteringResult)
for clusterId in range(1, numberOfClusters+1, 1):
ids = numpy.where(clusteringResult == clusterId)[0]
print("Cluster " + str(clusterId) + " = " + ",".join(variableNames[ids]))
assert(len(ids) >= 1)
def colorMFClustering(clusteringResult):
# \textbf{\color{blue}{2}, \color{red}{2}, \color{brown}{2}, \color{teal}{2}}
USbondFunds = 13
USstockFunds = 30
balancedFunds = 7
internationalStockFunds = 9
coloredClusteringOutput = "\\color{blue}{U.S. bond funds} & \\textbf{ "
coloredClusteringOutput += "\\color{blue}{ "
for varId in range(0, USbondFunds, 1):
coloredClusteringOutput += str(clusteringResult[varId]) + " "
coloredClusteringOutput += " } } \\\\"
print(coloredClusteringOutput)
coloredClusteringOutput = "\\color{red}{U.S. stock funds} & \\textbf{ "
coloredClusteringOutput += "\\color{red}{ "
for varId in range(USbondFunds, USbondFunds + USstockFunds, 1):
coloredClusteringOutput += str(clusteringResult[varId]) + " "
coloredClusteringOutput += " } } \\\\"
print(coloredClusteringOutput)
coloredClusteringOutput = "\\color{brown}{balanced funds} & \\textbf{ "
coloredClusteringOutput += "\\color{brown}{ "
for varId in range(USbondFunds + USstockFunds, balancedFunds + USbondFunds + USstockFunds, 1):
coloredClusteringOutput += str(clusteringResult[varId]) + " "
coloredClusteringOutput += " } } \\\\"
print(coloredClusteringOutput)
coloredClusteringOutput = "\\color{teal}{international stock funds} | |
#!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action")=="yahooWeatherForecast":
baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_query = makeYqlQuery(req)
if yql_query is None:
return {}
yql_url = baseurl + urlencode({'q': yql_query}) + "&format=json"
result = urlopen(yql_url).read()
data = json.loads(result)
res = makeWebhookResult(data)
elif req.get("result").get("action")=="getjoke":
baseurl = "http://api.icndb.com/jokes/random"
result = urlopen(baseurl).read()
data = json.loads(result)
res = makeWebhookResultForGetJoke(data)
elif req.get("result").get("action")=="layerabout":
result = req.get("result")
parameters = result.get("parameters")
layer = parameters.get("layer")
res = makeWebhookResultLayerAbout(layer)
elif req.get("result").get("action")=="layer4_congestion":
result = req.get("result")
parameters = result.get("parameters")
congestion4 = parameters.get("congestion_control")
res = congestion_control_layer4(congestion4)
elif req.get("result").get("action")=="layer2_congestion":
result = req.get("result")
parameters = result.get("parameters")
congestion2 = parameters.get("congestion_control")
res = congestion_control_layer2(congestion2)
elif req.get("result").get("action")=="get_protocol_spec":
result = req.get("result")
parameters = result.get("parameters")
prot = parameters.get("protocols")
res = prot_info(prot)
elif req.get("result").get("action")=="get_protocol_spec_info":
result = req.get("result")
parameters = result.get("parameters")
prot = parameters.get("protocols")
res = prot_more_info(prot)
elif req.get("result").get("action")=="get_protocol_info_more":
result = req.get("result")
parameters = result.get("parameters")
prot = parameters.get("protocols")
infor = parameters.get("Information")
res = prot_more_info_more(prot, infor)
elif req.get("result").get("action")=="get_ipvdiff":
result = req.get("result")
parameters = result.get("parameters")
prot = parameters.get("protocols")
res = prot_more_info_more("IP", "advantages")
elif req.get("result").get("action")=="get_layer_info_general":
result = req.get("result")
parameters = result.get("parameters")
res = layer_general_event()
elif req.get("result").get("action")=="trigger_peer_event":
result = req.get("result")
parameters = result.get("parameters")
res = peer_event()
elif req.get("result").get("action")=="p2p_info":
result = req.get("result")
parameters = result.get("parameters")
topo = parameters.get("Topologies")
res = p2p_inf(topo)
#elif req.get("result").get("action")=="greeting":
#result = req.get("result")
#parameters = result.get("parameters")
#eve = parameters.get("eve")
#res = makeWebhookResultTriggerEvent()
else:
return {}
return res
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
if city is None:
return None
return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "')"
def p2p_inf(topo):
topodef = {'p2pv1':'Every node of the overlay knows k > 2 other nodes. Data gets flooded over the edges and every node contains every information.',
'p2pv2':'Every node contains only a small fraction of the data. Hence rare content is hard to find. This type of p2p is usually deployed via directory servers or flooding with backtracking.',
'dht':'Distributed Hash-Tables are a structured p2p overlay and utilizes a dynamic number of nodes. I realizes a cyclic data space and since every node knows the address of its logical successor, the complexity of searches is reduced to O(n).',
'unstructured':'Unstructured peer-to-peer networks do not impose a particular structure on the overlay network by design, but rather are formed by nodes that randomly form connections to each other.',
'structured':'In structured peer-to-peer networks the overlay is organized into a specific topology, and the protocol ensures that any node can efficiently search the network for a file/resource, even if the resource is extremely rare.'}
if topo in topodef:
speech = topodef[topo]
else:
speech = "Could you tell me the p2p form you are interested in again?"
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
def peer_event():
speech = "peer event was triggered!"
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample",
"followupEvent":{"name":"peerevent","data":{" ":" "}}
}
def layer_general_event():
speech = "Layer general event was triggered!"
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample",
"followupEvent":{"name":"layergeneraltrigger","data":{" ":" "}}
}
def prot_more_info_more(prot, infor):
if infor == "advantages":
speech = prot_advantages(prot)
elif infor == "issues":
speech = prot_disadvantages(prot)
elif infor == "alternatives":
speech = prot_alternatives(prot)
elif infor == "difference":
speech = prot_diff_udp_tcp()
else:
speech = "Mhh I am not quite sure about " + infor + " but I will ask someone and come back to you :) In the mean time we could talk about advantages, issues or alternatives to this protocol or something else altogehter!"
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
def prot_diff_udp_tcp():
return "There are two types of Internet Protocol (IP) traffic. They are TCP or Transmission Control Protocol and UDP or User Datagram Protocol. TCP is connection oriented – once a connection is established, data can be sent bidirectional. UDP is a simpler, connectionless Internet protocol."
def prot_advantages(prot):
protdef = {'TCP':'The main advantage of TCP is that it offers connection-oriented communication - which means that a communication session or a semi-permanent connection is established before any useful data can be transferred, and where a stream of data is delivered in the same order as it was sent',
'HTTP':'It s greates adantage is that basically is everywhere on the internet',
'SMTP':'Although proprietary systems (such as Microsoft Exchange and IBM Notes) and webmail systems (such as Outlook.com, Gmail and Yahoo! Mail) use their own non-standard protocols to access mail box accounts on their own mail servers, all use SMTP when sending or receiving email from outside their own systems.',
'IMAP':'The main advantage of IMAP would be that one can acces their mails directly on the server',
'DNS':'The Domain Name System delegates the responsibility of assigning domain names and mapping those names to Internet resources by designating authoritative name servers for each domain. Network administrators may delegate authority over sub-domains of their allocated name space to other name servers. This mechanism provides distributed and fault tolerant service and was designed to avoid a single large central database.',
'SIP':'SIPs main advantages lies within its capability to singal and control multimedia communication sessions',
'RTP':'RTPs greates strength is that it is designed for end-to-end, real-time, transfer of streaming media. The protocol provides facilities for jitter compensation and detection of out of sequence arrival in data, which are common during transmissions on an IP network. RTP allows data transfer to multiple destinations through IP multicast.',
'HTML':'Advantes of HTML are...',
'IP':'IPv4 provides safeguards to ensure that the IP packet header is error-free. A routing node calculates a checksum for a packet. If the checksum is bad, the routing node discards the packet. Although the Internet Control Message Protocol (ICMP) allows such notification, the routing node is not required to notify either end node of these errors. By contrast, in order to increase performance, and since current link layer technology is assumed to provide sufficient error detection, the IPv6 header has no checksum to protect it.',
'UDP':'Since UDP is connectionless it is a ton fastern than TCP.',
'RPC':'The greates advantage of the RPC model is that it implies a level of location transparency, namely that calling procedures is largely the same whether it is local or remote, but usually they are not identical, so local calls can be distinguished from remote calls. Remote calls are usually orders of magnitude slower and less reliable than local calls, so distinguishing them is important.'
}
return protdef[prot]
def prot_disadvantages(prot):
protdef = {'TCP':'Some possible issues with TCP are Denial of Service, Connection hijaking and TCP veto.',
'HTTP':'The TRACE method can be used as part of a class of attacks known as cross-site tracing; for that reason, common security advice is for it to be disabled in the server configuration. Microsoft IIS supports a proprietary "TRACK" method, which behaves similarly, and which is likewise recommended to be disabled',
'SMTP':'One cannot delete or access mails directly on the server',
'IMAP':'IMAPs disadvantages would be...',
'DNS':'Several vulnerability issues were discovered and exploited by malicious users. One such issue is DNS cache poisoning, in which data is distributed to caching resolvers under the pretense of being an authoritative origin server, thereby polluting the data store with potentially false information and long expiration times (time-to-live). Subsequently, legitimate application requests may be redirected to network hosts operated with malicious intent.',
'SIP':'Issues with SIP include...',
'RTP':'The most common problems with RTP are...',
'HTML':'Common issues with HTML include...',
'IP':'Various error conditions may occur, such as data corruption, packet loss, duplication and out-of-order delivery. Because | |
CompressionType=None, Guid=None,
GuidHdrLen=None, GuidAttr=[], Ui=None, Ver=None, InputAlign=[], BuildNumber=None, DummyFile=None, IsMakefile=False):
Cmd = ["GenSec"]
if Type:
Cmd += ("-s", Type)
if CompressionType:
Cmd += ("-c", CompressionType)
if Guid:
Cmd += ("-g", Guid)
if DummyFile:
Cmd += ("--dummy", DummyFile)
if GuidHdrLen:
Cmd += ("-l", GuidHdrLen)
#Add each guided attribute
for Attr in GuidAttr:
Cmd += ("-r", Attr)
#Section Align is only for dummy section without section type
for SecAlign in InputAlign:
Cmd += ("--sectionalign", SecAlign)
CommandFile = Output + '.txt'
if Ui:
if IsMakefile:
if Ui == "$(MODULE_NAME)":
Cmd += ('-n', Ui)
else:
Cmd += ("-n", '"' + Ui + '"')
Cmd += ("-o", Output)
if ' '.join(Cmd).strip() not in GenFdsGlobalVariable.SecCmdList:
GenFdsGlobalVariable.SecCmdList.append(' '.join(Cmd).strip())
else:
SectionData = array('B', [0, 0, 0, 0])
SectionData.fromstring(Ui.encode("utf_16_le"))
SectionData.append(0)
SectionData.append(0)
Len = len(SectionData)
GenFdsGlobalVariable.SectionHeader.pack_into(SectionData, 0, Len & 0xff, (Len >> 8) & 0xff, (Len >> 16) & 0xff, 0x15)
SaveFileOnChange(Output, SectionData.tostring())
elif Ver:
Cmd += ("-n", Ver)
if BuildNumber:
Cmd += ("-j", BuildNumber)
Cmd += ("-o", Output)
SaveFileOnChange(CommandFile, ' '.join(Cmd), False)
if IsMakefile:
if ' '.join(Cmd).strip() not in GenFdsGlobalVariable.SecCmdList:
GenFdsGlobalVariable.SecCmdList.append(' '.join(Cmd).strip())
else:
if not GenFdsGlobalVariable.NeedsUpdate(Output, list(Input) + [CommandFile]):
return
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate section")
else:
Cmd += ("-o", Output)
Cmd += Input
SaveFileOnChange(CommandFile, ' '.join(Cmd), False)
if IsMakefile:
if GlobalData.gGlobalDefines.get("FAMILY") == "MSFT":
Cmd = ['if', 'exist', Input[0]] + Cmd
else:
Cmd = ['test', '-e', Input[0], "&&"] + Cmd
if ' '.join(Cmd).strip() not in GenFdsGlobalVariable.SecCmdList:
GenFdsGlobalVariable.SecCmdList.append(' '.join(Cmd).strip())
elif GenFdsGlobalVariable.NeedsUpdate(Output, list(Input) + [CommandFile]):
GenFdsGlobalVariable.DebugLogger(EdkLogger.DEBUG_5, "%s needs update because of newer %s" % (Output, Input))
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate section")
if (os.path.getsize(Output) >= GenFdsGlobalVariable.LARGE_FILE_SIZE and
GenFdsGlobalVariable.LargeFileInFvFlags):
GenFdsGlobalVariable.LargeFileInFvFlags[-1] = True
@staticmethod
def GetAlignment (AlignString):
if not AlignString:
return 0
if AlignString.endswith('K'):
return int (AlignString.rstrip('K')) * 1024
if AlignString.endswith('M'):
return int (AlignString.rstrip('M')) * 1024 * 1024
if AlignString.endswith('G'):
return int (AlignString.rstrip('G')) * 1024 * 1024 * 1024
return int (AlignString)
@staticmethod
def GenerateFfs(Output, Input, Type, Guid, Fixed=False, CheckSum=False, Align=None,
SectionAlign=None, MakefilePath=None):
Cmd = ["GenFfs", "-t", Type, "-g", Guid]
mFfsValidAlign = ["0", "8", "16", "128", "512", "1K", "4K", "32K", "64K", "128K", "256K", "512K", "1M", "2M", "4M", "8M", "16M"]
if Fixed == True:
Cmd.append("-x")
if CheckSum:
Cmd.append("-s")
if Align:
if Align not in mFfsValidAlign:
Align = GenFdsGlobalVariable.GetAlignment (Align)
for index in range(0, len(mFfsValidAlign) - 1):
if ((Align > GenFdsGlobalVariable.GetAlignment(mFfsValidAlign[index])) and (Align <= GenFdsGlobalVariable.GetAlignment(mFfsValidAlign[index + 1]))):
break
Align = mFfsValidAlign[index + 1]
Cmd += ("-a", Align)
Cmd += ("-o", Output)
for I in range(0, len(Input)):
if MakefilePath:
Cmd += ("-oi", Input[I])
else:
Cmd += ("-i", Input[I])
if SectionAlign and SectionAlign[I]:
Cmd += ("-n", SectionAlign[I])
CommandFile = Output + '.txt'
SaveFileOnChange(CommandFile, ' '.join(Cmd), False)
GenFdsGlobalVariable.DebugLogger(EdkLogger.DEBUG_5, "%s needs update because of newer %s" % (Output, Input))
if MakefilePath:
if (tuple(Cmd), tuple(GenFdsGlobalVariable.SecCmdList), tuple(GenFdsGlobalVariable.CopyList)) not in GenFdsGlobalVariable.FfsCmdDict:
GenFdsGlobalVariable.FfsCmdDict[tuple(Cmd), tuple(GenFdsGlobalVariable.SecCmdList), tuple(GenFdsGlobalVariable.CopyList)] = MakefilePath
GenFdsGlobalVariable.SecCmdList = []
GenFdsGlobalVariable.CopyList = []
else:
if not GenFdsGlobalVariable.NeedsUpdate(Output, list(Input) + [CommandFile]):
return
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate FFS")
@staticmethod
def GenerateFirmwareVolume(Output, Input, BaseAddress=None, ForceRebase=None, Capsule=False, Dump=False,
AddressFile=None, MapFile=None, FfsList=[], FileSystemGuid=None):
if not GenFdsGlobalVariable.NeedsUpdate(Output, Input+FfsList):
return
GenFdsGlobalVariable.DebugLogger(EdkLogger.DEBUG_5, "%s needs update because of newer %s" % (Output, Input))
Cmd = ["GenFv"]
if BaseAddress:
Cmd += ("-r", BaseAddress)
if ForceRebase == False:
Cmd += ("-F", "FALSE")
elif ForceRebase == True:
Cmd += ("-F", "TRUE")
if Capsule:
Cmd.append("-c")
if Dump:
Cmd.append("-p")
if AddressFile:
Cmd += ("-a", AddressFile)
if MapFile:
Cmd += ("-m", MapFile)
if FileSystemGuid:
Cmd += ("-g", FileSystemGuid)
Cmd += ("-o", Output)
for I in Input:
Cmd += ("-i", I)
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate FV")
@staticmethod
def GenerateFirmwareImage(Output, Input, Type="efi", SubType=None, Zero=False,
Strip=False, Replace=False, TimeStamp=None, Join=False,
Align=None, Padding=None, Convert=False, IsMakefile=False):
if not GenFdsGlobalVariable.NeedsUpdate(Output, Input) and not IsMakefile:
return
GenFdsGlobalVariable.DebugLogger(EdkLogger.DEBUG_5, "%s needs update because of newer %s" % (Output, Input))
Cmd = ["GenFw"]
if Type.lower() == "te":
Cmd.append("-t")
if SubType:
Cmd += ("-e", SubType)
if TimeStamp:
Cmd += ("-s", TimeStamp)
if Align:
Cmd += ("-a", Align)
if Padding:
Cmd += ("-p", Padding)
if Zero:
Cmd.append("-z")
if Strip:
Cmd.append("-l")
if Replace:
Cmd.append("-r")
if Join:
Cmd.append("-j")
if Convert:
Cmd.append("-m")
Cmd += ("-o", Output)
Cmd += Input
if IsMakefile:
if " ".join(Cmd).strip() not in GenFdsGlobalVariable.SecCmdList:
GenFdsGlobalVariable.SecCmdList.append(" ".join(Cmd).strip())
else:
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate firmware image")
@staticmethod
def GenerateOptionRom(Output, EfiInput, BinaryInput, Compress=False, ClassCode=None,
Revision=None, DeviceId=None, VendorId=None, IsMakefile=False):
InputList = []
Cmd = ["EfiRom"]
if EfiInput:
if Compress:
Cmd.append("-ec")
else:
Cmd.append("-e")
for EfiFile in EfiInput:
Cmd.append(EfiFile)
InputList.append (EfiFile)
if BinaryInput:
Cmd.append("-b")
for BinFile in BinaryInput:
Cmd.append(BinFile)
InputList.append (BinFile)
# Check List
if not GenFdsGlobalVariable.NeedsUpdate(Output, InputList) and not IsMakefile:
return
GenFdsGlobalVariable.DebugLogger(EdkLogger.DEBUG_5, "%s needs update because of newer %s" % (Output, InputList))
if ClassCode:
Cmd += ("-l", ClassCode)
if Revision:
Cmd += ("-r", Revision)
if DeviceId:
Cmd += ("-i", DeviceId)
if VendorId:
Cmd += ("-f", VendorId)
Cmd += ("-o", Output)
if IsMakefile:
if " ".join(Cmd).strip() not in GenFdsGlobalVariable.SecCmdList:
GenFdsGlobalVariable.SecCmdList.append(" ".join(Cmd).strip())
else:
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to generate option rom")
@staticmethod
def GuidTool(Output, Input, ToolPath, Options='', returnValue=[], IsMakefile=False):
if not GenFdsGlobalVariable.NeedsUpdate(Output, Input) and not IsMakefile:
return
GenFdsGlobalVariable.DebugLogger(EdkLogger.DEBUG_5, "%s needs update because of newer %s" % (Output, Input))
Cmd = [ToolPath, ]
Cmd += Options.split(' ')
Cmd += ("-o", Output)
Cmd += Input
if IsMakefile:
if " ".join(Cmd).strip() not in GenFdsGlobalVariable.SecCmdList:
GenFdsGlobalVariable.SecCmdList.append(" ".join(Cmd).strip())
else:
GenFdsGlobalVariable.CallExternalTool(Cmd, "Failed to call " + ToolPath, returnValue)
@staticmethod
def CallExternalTool (cmd, errorMess, returnValue=[]):
if type(cmd) not in (tuple, list):
GenFdsGlobalVariable.ErrorLogger("ToolError! Invalid parameter type in call to CallExternalTool")
if GenFdsGlobalVariable.DebugLevel != -1:
cmd += ('--debug', str(GenFdsGlobalVariable.DebugLevel))
GenFdsGlobalVariable.InfLogger (cmd)
if GenFdsGlobalVariable.VerboseMode:
cmd += ('-v',)
GenFdsGlobalVariable.InfLogger (cmd)
else:
stdout.write ('#')
stdout.flush()
GenFdsGlobalVariable.SharpCounter = GenFdsGlobalVariable.SharpCounter + 1
if GenFdsGlobalVariable.SharpCounter % GenFdsGlobalVariable.SharpNumberPerLine == 0:
stdout.write('\n')
try:
PopenObject = Popen(' '.join(cmd), stdout=PIPE, stderr=PIPE, shell=True)
except Exception as X:
EdkLogger.error("GenFds", COMMAND_FAILURE, ExtraData="%s: %s" % (str(X), cmd[0]))
(out, error) = PopenObject.communicate()
while PopenObject.returncode is None:
PopenObject.wait()
if returnValue != [] and returnValue[0] != 0:
#get command return value
returnValue[0] = PopenObject.returncode
return
if PopenObject.returncode != 0 or GenFdsGlobalVariable.VerboseMode or GenFdsGlobalVariable.DebugLevel != -1:
GenFdsGlobalVariable.InfLogger ("Return Value = %d" % PopenObject.returncode)
GenFdsGlobalVariable.InfLogger(out.decode(encoding='utf-8', errors='ignore'))
GenFdsGlobalVariable.InfLogger(error.decode(encoding='utf-8', errors='ignore'))
if PopenObject.returncode != 0:
print("###", cmd)
EdkLogger.error("GenFds", COMMAND_FAILURE, errorMess)
@staticmethod
def VerboseLogger (msg):
EdkLogger.verbose(msg)
@staticmethod
def InfLogger (msg):
EdkLogger.info(msg)
@staticmethod
def ErrorLogger (msg, File=None, Line=None, ExtraData=None):
EdkLogger.error('GenFds', GENFDS_ERROR, msg, File, Line, ExtraData)
@staticmethod
def DebugLogger (Level, msg):
EdkLogger.debug(Level, msg)
## MacroExtend()
#
# @param Str String that may contain macro
# @param MacroDict Dictionary that contains macro value pair
#
@staticmethod
def MacroExtend (Str, MacroDict={}, Arch=DataType.TAB_COMMON):
if Str is None:
return None
Dict = {'$(WORKSPACE)': GenFdsGlobalVariable.WorkSpaceDir,
# '$(OUTPUT_DIRECTORY)': GenFdsGlobalVariable.OutputDirFromDsc,
'$(TARGET)': GenFdsGlobalVariable.TargetName,
'$(TOOL_CHAIN_TAG)': GenFdsGlobalVariable.ToolChainTag,
'$(SPACE)': ' '
}
if Arch != DataType.TAB_COMMON and Arch in GenFdsGlobalVariable.ArchList:
OutputDir = GenFdsGlobalVariable.OutputDirFromDscDict[Arch]
else:
OutputDir = GenFdsGlobalVariable.OutputDirFromDscDict[GenFdsGlobalVariable.ArchList[0]]
Dict['$(OUTPUT_DIRECTORY)'] = OutputDir
if MacroDict:
Dict.update(MacroDict)
for key in Dict:
if Str.find(key) >= 0:
Str = Str.replace (key, Dict[key])
if Str.find('$(ARCH)') >= 0:
if len(GenFdsGlobalVariable.ArchList) == 1:
Str = Str.replace('$(ARCH)', GenFdsGlobalVariable.ArchList[0])
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "No way to determine $(ARCH) for %s" % Str)
return Str
## GetPcdValue()
#
# @param PcdPattern pattern that labels a PCD.
#
@staticmethod
def GetPcdValue (PcdPattern):
if PcdPattern is None:
return None
if PcdPattern.startswith('PCD('):
PcdPair = PcdPattern[4:].rstrip(')').strip().split('.')
else:
PcdPair = PcdPattern.strip().split('.')
TokenSpace = PcdPair[0]
TokenCName = PcdPair[1]
for Arch in GenFdsGlobalVariable.ArchList:
Platform = GenFdsGlobalVariable.WorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
PcdDict = Platform.Pcds
for Key in PcdDict:
PcdObj = PcdDict[Key]
if (PcdObj.TokenCName == TokenCName) and (PcdObj.TokenSpaceGuidCName == TokenSpace):
if PcdObj.Type != DataType.TAB_PCDS_FIXED_AT_BUILD:
EdkLogger.error("GenFds", GENFDS_ERROR, "%s is not FixedAtBuild type." % PcdPattern)
if PcdObj.DatumType != DataType.TAB_VOID:
EdkLogger.error("GenFds", GENFDS_ERROR, "%s is not VOID* datum type." % PcdPattern)
return PcdObj.DefaultValue
for Package in GenFdsGlobalVariable.WorkSpace.GetPackageList(GenFdsGlobalVariable.ActivePlatform,
Arch,
GenFdsGlobalVariable.TargetName,
GenFdsGlobalVariable.ToolChainTag):
PcdDict = Package.Pcds
for Key in PcdDict:
PcdObj = PcdDict[Key]
if (PcdObj.TokenCName == TokenCName) and (PcdObj.TokenSpaceGuidCName == TokenSpace):
if PcdObj.Type != DataType.TAB_PCDS_FIXED_AT_BUILD:
EdkLogger.error("GenFds", GENFDS_ERROR, "%s is not FixedAtBuild type." % PcdPattern)
if PcdObj.DatumType != DataType.TAB_VOID:
EdkLogger.error("GenFds", GENFDS_ERROR, "%s is not VOID* datum type." % PcdPattern)
return PcdObj.DefaultValue
return ''
## FindExtendTool()
#
# Find location of tools to process data
#
# @param KeyStringList Filter for inputs of section generation
# @param CurrentArchList Arch list
# @param NameGuid The Guid name
#
def FindExtendTool(KeyStringList, CurrentArchList, NameGuid):
ToolDb = ToolDefDict(GenFdsGlobalVariable.ConfDir).ToolsDefTxtDatabase
# if user not specify filter, | |
8, 10],
"3": [2, 4, 5, 7, 9],
"5": [3, 4, 5, 9, 10],
"7": [1, 5, 6, 7, 9],
}
idx_kov = {key: np.array(val) for key, val in idx_kov.items()}
idx_vig = {key: np.setdiff1d(np.arange(1, 11), np.array(val), assume_unique=True) for key, val in idx_kov.items()}
abi_kov, abi_vig = [
pd.concat(
[abi_raw.loc[:, key].iloc[:, idx[key] - 1] for key in idx],
axis=1,
keys=idx_kov.keys(),
)
for idx in [idx_kov, idx_vig]
]
abi_data = {
score_name + "_KOV_T": abi_kov.sum(axis=1),
score_name + "_VIG_T": abi_vig.sum(axis=1),
score_name + "_KOV_P": abi_kov.loc[:, ["2", "4", "6", "8"]].sum(axis=1),
score_name + "_VIG_P": abi_vig.loc[:, ["2", "4", "6", "8"]].sum(axis=1),
score_name + "_KOV_E": abi_kov.loc[:, ["1", "3", "5", "7"]].sum(axis=1),
score_name + "_VIG_E": abi_vig.loc[:, ["1", "3", "5", "7"]].sum(axis=1),
}
return pd.DataFrame(abi_data, index=data.index)
def stadi(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
stadi_type: Optional[Literal["state", "trait", "state_trait"]] = None,
) -> pd.DataFrame:
"""Compute the **State-Trait Anxiety-Depression Inventory (STADI)**.
With the STADI, anxiety and depression can be recorded, both as state and as trait.
Two self-report questionnaires with 20 items each are available for this purpose.
The state part measures the degree of anxiety and depression currently experienced by a person, which varies
depending on internal or external influences. It can be used in a variety of situations of different types.
This includes not only the whole spectrum of highly heterogeneous stressful situations, but also situations of
neutral or positive ("euthymic") character. The trait part is used to record trait expressions, i.e. the
enduring tendency to experience anxiety and depression.
The STADI can either be computed only for state, only for trait, or for state and trait.
The state and trait scales both consist of the subscales with the item indices
(count-by-one, i.e., the first question has the index 1!):
* Emotionality (Aufgeregtheit - affektive Komponente – ``AU``): [1, 5, 9, 13, 17]
* Worry (Besorgnis - kognitive Komponente - ``BE``): [2, 6, 10, 14, 18]
* Anhedonia (Euthymie - positive Stimmung - ``EU``): [3, 7, 11, 15, 19]
* Dysthymia (Dysthymie - depressive Stimmung - ``DY``): [4, 8, 12, 16, 20]
.. note::
This implementation assumes a score range of [1, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. note::
If both state and trait score are present it is assumed that all *state* items are first,
followed by all *trait* items. If all subscales are present this adds up to 20 state items and 20 trait items.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
stadi_type : any of ``state``, ``trait``, or ``state_trait``
which type of STADI subscale should be computed. Default: ``state_trait``
Returns
-------
:class:`~pandas.DataFrame`
STADI score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
if invalid parameter was passed to ``stadi_type``
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2013).
Das State-Trait-Angst-Depressions-Inventar: STADI; Manual.
<NAME>., <NAME>., <NAME>., & <NAME>. (2018). Differentiating anxiety and depression:
the state-trait anxiety-depression inventory. *Cognition and Emotion*, 32(7), 1409-1423.
"""
score_name = "STADI"
score_range = [1, 4]
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
stadi_type = _get_stadi_type(stadi_type)
if subscales is None:
_assert_num_columns(data, 20 * len(stadi_type))
subscales = {
"AU": [1, 5, 9, 13, 17],
"BE": [2, 6, 10, 14, 18],
"EU": [3, 7, 11, 15, 19],
"DY": [4, 8, 12, 16, 20],
}
_assert_value_range(data, score_range)
# split into n subitems (either "State", "Trait" or "State and Trait")
items = np.split(data, len(stadi_type), axis=1)
data = pd.concat(items, keys=stadi_type, axis=1)
stadi_data = {}
for st in stadi_type:
stadi_data.update(_compute_questionnaire_subscales(data[st], "{}_{}".format(score_name, st), subscales))
if all("{}_{}_{}".format(score_name, st, subtype) in stadi_data for subtype in ["AU", "BE"]):
stadi_data.update(
{
"{}_{}_Anxiety".format(score_name, st): stadi_data["{}_{}_AU".format(score_name, st)]
+ stadi_data["{}_{}_BE".format(score_name, st)]
}
)
if all("{}_{}_{}".format(score_name, st, subtype) in stadi_data for subtype in ["EU", "DY"]):
stadi_data.update(
{
"{}_{}_Depression".format(score_name, st): stadi_data["{}_{}_EU".format(score_name, st)]
+ stadi_data["{}_{}_DY".format(score_name, st)]
}
)
if all("{}_{}_{}".format(score_name, st, subtype) in stadi_data for subtype in ["Anxiety", "Depression"]):
stadi_data.update(
{
"{}_{}_Total".format(score_name, st): stadi_data["{}_{}_Anxiety".format(score_name, st)]
+ stadi_data["{}_{}_Depression".format(score_name, st)]
}
)
df_stadi = pd.DataFrame(stadi_data, index=data.index)
return df_stadi
def _get_stadi_type(stadi_type: str) -> Sequence[str]:
if stadi_type is None:
stadi_type = ["State", "Trait"]
elif stadi_type == "state_trait":
stadi_type = ["State", "Trait"]
elif stadi_type == "state":
stadi_type = ["State"]
elif stadi_type == "trait":
stadi_type = ["Trait"]
else:
raise ValueError(
"Invalid 'stadi_type'! Must be one of 'state_trait', 'state', or 'trait', not {}.".format(stadi_type)
)
return stadi_type
def svf_120(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Stressverarbeitungsfragebogen - 120 item version (SVF120)**.
The stress processing questionnaire enables the assessment of coping or processing measures in stressful
situations. The SVF is not a singular test instrument, but rather an inventory of methods that relate to various
aspects of stress processing and coping and from which individual procedures can be selected depending on
the study objective/question.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* Trivialization/Minimalization (Bagatellisierung – ``Bag``): [10, 31, 50, 67, 88, 106]
* De-Emphasis by Comparison with Others (Herunterspielen – ``Her``): [17, 38, 52, 77, 97, 113]
* Rejection of Guilt (Schuldabwehr – ``Schab``): [5, 30, 43, 65, 104, 119]
* Distraction/Deflection from a Situation (Ablenkung – ``Abl``): [1, 20, 45, 86, 101, 111]
* Vicarious Satisfaction (Ersatzbefriedigung –``Ers``): [22, 36, 64, 74, 80, 103]
* Search for Self-Affirmation (Selbstbestätigung – ``Sebest``): [34, 47, 59, 78, 95, 115]
* Relaxation (Entspannung –``Entsp``): [12, 28, 58, 81, 99, 114]
* Attempt to Control Situation (Situationskontrolle – ``Sitkon``): [11, 18, 39, 66, 91, 116]
* Response Control (Reaktionskontrolle – ``Rekon``): [2, 26, 54, 68, 85, 109]
* Positive Self-Instruction (Positive Selbstinstruktion – ``Posi``): [15, 37, 56, 71, 83, 96]
* Need for Social Support (Soziales Unterstützungsbedürfnis – ``Sozube``): [3, 21, 42, 63, 84, 102]
* Avoidance Tendencies (Vermeidung – ``Verm``): [8, 29, 48, 69, 98, 118]
* Escapist Tendencies (Flucht – ``Flu``): [14, 24, 40, 62, 73, 120]
* Social Isolation (Soziale Abkapselung – ``Soza``): [6, 27, 49, 76, 92, 107]
* Mental Perseveration (Gedankliche Weiterbeschäftigung – ``Gedw``): [16, 23, 55, 72, 100, 110]
* Resignation (Resignation – ``Res``): [4, 32, 46, 60, 89, 105]
* Self-Pity (Selbstbemitleidung – ``Selmit``): [13, 41, 51, 79, 94, 117]
* Self-Incrimination (Selbstbeschuldigung – ``Sesch``): [9, 25, 35, 57, 75, 87]
* Aggression (Aggression – ``Agg``): [33, 44, 61, 82, 93, 112]
* Medicine-Taking (Pharmakaeinnahme – ``Pha``): [7, 19, 53, 70, 90, 108]
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
| |
import tensorflow as tf
from tensorflow.keras.losses import (
sparse_categorical_crossentropy,
binary_crossentropy,
)
import logging
from logging import handlers
from time import perf_counter
import os
import numpy as np
import pandas as pd
from xml.etree.ElementTree import SubElement
from xml.etree import ElementTree
from lxml import etree
def get_logger():
"""
Initialize logger configuration.
Returns:
logger.
"""
formatter = logging.Formatter(
'%(asctime)s %(name)s.%(funcName)s +%(lineno)s: '
'%(levelname)-8s [%(process)d] %(message)s'
)
logger = logging.getLogger('session_log')
logger.setLevel(logging.DEBUG)
file_title = os.path.join('Logs', 'session.log')
if 'Logs' not in os.listdir():
file_title = f'{os.path.join("..", file_title)}'
file_handler = handlers.RotatingFileHandler(file_title, backupCount=10)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger
default_logger = get_logger()
def timer(logger):
"""
Timer wrapper.
logger: logging.RootLogger object
Returns:
timed
"""
def timed(func):
def wrapper(*args, **kwargs):
start_time = perf_counter()
result = func(*args, **kwargs)
total_time = perf_counter() - start_time
if logger is not None:
logger.info(
f'{func.__name__} execution time: ' f'{total_time} seconds'
)
if result is not None:
return result
return wrapper
return timed
def ratios_to_coordinates(bx, by, bw, bh, width, height):
"""
Convert relative coordinates to actual coordinates.
Args:
bx: Relative center x coordinate.
by: Relative center y coordinate.
bw: Relative box width.
bh: Relative box height.
width: Image batch width.
height: Image batch height.
Return:
x1: x coordinate.
y1: y coordinate.
x2: x1 + Bounding box width.
y2: y1 + Bounding box height.
"""
w, h = bw * width, bh * height
x, y = bx * width + (w / 2), by * height + (h / 2)
return x, y, x + w, y + h
def transform_images(x_train, size):
"""
Resize image tensor.
Args:
x_train: Image tensor.
size: new (width, height)
"""
x_train = tf.image.resize(x_train, (size, size))
return x_train / 255
@tf.function
def transform_targets_for_output(y_true, grid_size, anchor_idxs):
n = tf.shape(y_true)[0]
y_true_out = tf.zeros(
(n, grid_size, grid_size, tf.shape(anchor_idxs)[0], 6)
)
anchor_idxs = tf.cast(anchor_idxs, tf.int32)
indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
idx = 0
for i in tf.range(n):
for j in tf.range(tf.shape(y_true)[1]):
if tf.equal(y_true[i][j][2], 0):
continue
anchor_eq = tf.equal(
anchor_idxs, tf.cast(y_true[i][j][5], tf.int32)
)
if tf.reduce_any(anchor_eq):
box = y_true[i][j][0:4]
box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2
anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
grid_xy = tf.cast(box_xy // (1 / grid_size), tf.int32)
indexes = indexes.write(
idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]]
)
updates = updates.write(
idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]]
)
idx += 1
return tf.tensor_scatter_nd_update(
y_true_out, indexes.stack(), updates.stack()
)
def transform_targets(y_train, anchors, anchor_masks, size):
y_outs = []
grid_size = size // 32
anchors = tf.cast(anchors, tf.float32)
anchor_area = anchors[..., 0] * anchors[..., 1]
box_wh = y_train[..., 2:4] - y_train[..., 0:2]
box_wh = tf.tile(
tf.expand_dims(box_wh, -2), (1, 1, tf.shape(anchors)[0], 1)
)
box_area = box_wh[..., 0] * box_wh[..., 1]
intersection = tf.minimum(box_wh[..., 0], anchors[..., 0]) * tf.minimum(
box_wh[..., 1], anchors[..., 1]
)
iou = intersection / (box_area + anchor_area - intersection)
anchor_idx = tf.cast(tf.argmax(iou, axis=-1), tf.float32)
anchor_idx = tf.expand_dims(anchor_idx, axis=-1)
y_train = tf.concat([y_train, anchor_idx], axis=-1)
for anchor_idxs in anchor_masks:
y_outs.append(
transform_targets_for_output(y_train, grid_size, anchor_idxs)
)
grid_size *= 2
return tuple(y_outs)
def broadcast_iou(box_1, box_2):
box_1 = tf.expand_dims(box_1, -2)
box_2 = tf.expand_dims(box_2, 0)
new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))
box_1 = tf.broadcast_to(box_1, new_shape)
box_2 = tf.broadcast_to(box_2, new_shape)
int_w = tf.maximum(
tf.minimum(box_1[..., 2], box_2[..., 2])
- tf.maximum(box_1[..., 0], box_2[..., 0]),
0,
)
int_h = tf.maximum(
tf.minimum(box_1[..., 3], box_2[..., 3])
- tf.maximum(box_1[..., 1], box_2[..., 1]),
0,
)
int_area = int_w * int_h
box_1_area = (box_1[..., 2] - box_1[..., 0]) * (
box_1[..., 3] - box_1[..., 1]
)
box_2_area = (box_2[..., 2] - box_2[..., 0]) * (
box_2[..., 3] - box_2[..., 1]
)
return int_area / (box_1_area + box_2_area - int_area)
def get_boxes(pred, anchors, classes):
grid_size = tf.shape(pred)[1]
box_xy, box_wh, object_probability, class_probabilities = tf.split(
pred, (2, 2, 1, classes), axis=-1
)
box_xy = tf.sigmoid(box_xy)
object_probability = tf.sigmoid(object_probability)
class_probabilities = tf.sigmoid(class_probabilities)
pred_box = tf.concat((box_xy, box_wh), axis=-1)
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
box_xy = (box_xy + tf.cast(grid, tf.float32)) / tf.cast(
grid_size, tf.float32
)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, object_probability, class_probabilities, pred_box
def calculate_loss(anchors, classes=80, ignore_thresh=0.5):
def yolo_loss(y_true, y_pred):
pred_box, pred_obj, pred_class, pred_xywh = get_boxes(
y_pred, anchors, classes
)
pred_xy = pred_xywh[..., 0:2]
pred_wh = pred_xywh[..., 2:4]
true_box, true_obj, true_class_idx = tf.split(
y_true, (4, 1, 1), axis=-1
)
true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2
true_wh = true_box[..., 2:4] - true_box[..., 0:2]
box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]
grid_size = tf.shape(y_true)[1]
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
true_xy = true_xy * tf.cast(grid_size, tf.float32) - tf.cast(
grid, tf.float32
)
true_wh = tf.math.log(true_wh / anchors)
true_wh = tf.where(
tf.math.is_inf(true_wh), tf.zeros_like(true_wh), true_wh
)
obj_mask = tf.squeeze(true_obj, -1)
best_iou = tf.map_fn(
lambda x: tf.reduce_max(
broadcast_iou(
x[0], tf.boolean_mask(x[1], tf.cast(x[2], tf.bool))
),
axis=-1,
),
(pred_box, true_box, obj_mask),
tf.float32,
)
ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)
xy_loss = (
obj_mask
* box_loss_scale
* tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
)
wh_loss = (
obj_mask
* box_loss_scale
* tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
)
obj_loss = binary_crossentropy(true_obj, pred_obj)
obj_loss = (
obj_mask * obj_loss + (1 - obj_mask) * ignore_mask * obj_loss
)
class_loss = obj_mask * sparse_categorical_crossentropy(
true_class_idx, pred_class
)
xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))
class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))
return xy_loss + wh_loss + obj_loss + class_loss
return yolo_loss
def add_xml_path(xml_file, path):
"""
Add a path element to the xml file and save.
Args:
xml_file: .xml file path.
path: str, path to add.
Returns:
None
"""
tree = ElementTree.parse(xml_file)
top = tree.getroot()
folder_tag = tree.find('folder')
folder_tag.text = path
file_name_tag = tree.find('filename')
path_tag = SubElement(top, 'path')
path_tag.text = os.path.join(folder_tag.text, file_name_tag.text)
rough_string = ElementTree.tostring(top, 'utf8')
root = etree.fromstring(rough_string)
pretty = etree.tostring(root, pretty_print=True, encoding='utf-8').replace(
' '.encode(), '\t'.encode()
)
os.remove(xml_file)
with open(xml_file, 'wb') as output:
output.write(pretty)
def get_detection_data(image, image_name, outputs, class_names):
"""
Organize predictions of a single image into a pandas DataFrame.
Args:
image: Image as a numpy array.
image_name: str, name to write in the image column.
outputs: Outputs from inference_model.predict()
class_names: A list of object class names.
Returns:
data: pandas DataFrame with the detections.
"""
nums = outputs[-1]
boxes, scores, classes = 3 * [None]
if isinstance(outputs[0], np.ndarray):
boxes, scores, classes = [
item[0][: int(nums)] for item in outputs[:-1]
]
if not isinstance(outputs[0], np.ndarray):
boxes, scores, classes = [
item[0][: int(nums)].numpy() for item in outputs[:-1]
]
w, h = np.flip(image.shape[0:2])
data = pd.DataFrame(boxes, columns=['x1', 'y1', 'x2', 'y2'])
data[['x1', 'x2']] = (data[['x1', 'x2']] * w).astype('int64')
data[['y1', 'y2']] = (data[['y1', 'y2']] * h).astype('int64')
data['object_name'] = np.array(class_names)[classes.astype('int64')]
data['image'] = image_name
data['score'] = scores
data['image_width'] = w
data['image_height'] = h
data = data[
[
'image',
'object_name',
'x1',
'y1',
'x2',
'y2',
'score',
'image_width',
'image_height',
]
]
return data
def activate_gpu():
"""
Check for GPU existence and if found, activate.
Returns:
None
"""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
default_logger.info('GPU activated')
def calculate_ratios(x1, y1, x2, y2, width, height):
"""
Calculate relative object ratios in the labeled image.
Args:
x1: Start x coordinate.
y1: Start y coordinate.
x2: End x coordinate.
y2: End y coordinate.
width: Bounding box width.
height: Bounding box height.
Return:
bx: Relative center x coordinate.
by: Relative center y coordinate.
bw: Relative box width.
bh: Relative box height.
"""
box_width = abs(x2 - x1)
box_height = abs(y2 - y1)
bx = 1 - ((width - min(x1, x2) + (box_width / 2)) / width)
by = 1 - ((height - min(y1, y2) + (box_height / 2)) / height)
bw = box_width / width
bh = box_height / height
return bx, by, bw, bh
def calculate_display_data(
prediction_file, classes_file, img_width, img_height, out
):
"""
Convert coordinates to relative labels.
prediction_file: csv file containing label coordinates.
classes_file: .txt file containing object classes.
img_width: Image width.
img_height: Image height.
out: Output path.
Returns:
None
"""
preds = pd.read_csv(prediction_file)
rows = []
indices = {
item: ind
for ind, item in enumerate(
[item.strip() for item in open(classes_file).readlines()]
)
}
for ind, item in preds.iterrows():
img, obj, xx1, yy1, xx2, yy2, score, imgw, imgh, dk = item.values
bxx, byy, bww, bhh = calculate_ratios(
xx1, yy1, xx2, | |
#
# soaplib - Copyright (C) Soaplib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import logging
logger = logging.getLogger(__name__)
from lxml import etree
from soaplib.core import namespaces, styles
from soaplib.core import MethodDescriptor
from soaplib.core.model.clazz import ClassModel as Message
from soaplib.core.model.clazz import ClassModelMeta as MessageMeta
from soaplib.core.model.clazz import TypeInfo
from soaplib.core.model.primitive import Any
_pref_wsa = namespaces.const_prefmap[namespaces.ns_wsa]
def _produce_input_message(ns, f, params, kparams):
_in_message = kparams.get('_in_message', f.func_name)
_in_variable_names = kparams.get('_in_variable_names', {})
arg_count = f.func_code.co_argcount
param_names = f.func_code.co_varnames[1:arg_count]
try:
in_params = TypeInfo()
for i in range(len(params)):
e0 = _in_variable_names.get(param_names[i], param_names[i])
e1 = params[i]
in_params[e0] = e1
except IndexError, e:
raise Exception("%s has parameter numbers mismatching" % f.func_name)
message=Message.produce(type_name=_in_message, namespace=ns,
members=in_params)
message.__namespace__ = ns
message.resolve_namespace(message, ns)
return message
def _produce_rpc_output_message(ns, f, params, kparams):
_returns = kparams.get('_returns')
_out_message = kparams.get('_out_message', '%sResponse' % f.func_name)
out_params = TypeInfo()
if _returns:
if isinstance(_returns, (list, tuple)):
default_names = ['%sResult%d' % (f.func_name, i) for i in
range(len(_returns))]
_out_variable_names = kparams.get('_out_variable_names',
default_names)
assert (len(_returns) == len(_out_variable_names))
var_pair = zip(_out_variable_names,_returns)
out_params = TypeInfo(var_pair)
else:
_out_variable_name = kparams.get('_out_variable_name',
'%sResult' % f.func_name)
out_params[_out_variable_name] = _returns
message=Message.produce(type_name=_out_message, namespace=ns,
members=out_params)
message.__namespace__ = ns
message.resolve_namespace(message, ns)
return message
class Alias(Message):
"""New type_name, same type_info.
"""
@classmethod
def add_to_schema(cls, schema_dict):
if not schema_dict.has_class(cls._target):
cls._target.add_to_schema(schema_dict)
element = etree.Element('{%s}element' % namespaces.ns_xsd)
element.set('name',cls.get_type_name())
element.set('type',cls._target.get_type_name_ns(schema_dict.app))
schema_dict.add_element(cls, element)
def _makeAlias(type_name, namespace, target):
""" Return an alias class for the given target class.
This function is a variation on 'ClassModel.produce'.
The alias will borrow the target's typeinfo.
"""
cls_dict = {}
cls_dict['__namespace__'] = namespace
cls_dict['__type_name__'] = type_name
cls_dict['_type_info'] = getattr(target, '_type_info', ())
cls_dict['_target'] = target
return MessageMeta(type_name, (Alias,), cls_dict)
def _produce_document_output_message(ns, f, params, kparams):
"""Generate an output message for "document"-style API methods.
This message is just an alias for the declared return type.
"""
_returns = kparams.get('_returns', Any)
_out_message = kparams.get('_out_message', '%sResponse' % f.func_name)
message = _makeAlias(_out_message, ns, _returns)
message.resolve_namespace(message, ns)
return message
def soap(*params, **kparams):
'''
This is a method decorator to flag a method as a remote procedure call. It
will behave like a normal python method on a class, and will only behave
differently when the keyword '_method_descriptor' is passed in, returning a
'MethodDescriptor' object. This decorator does none of the soap/xml
serialization, only flags a method as a soap method. This decorator should
only be used on member methods of an instance of ServiceBase.
'''
def explain(f):
def explain_method(*args, **kwargs):
retval = None
if '_method_descriptor' not in kwargs :
retval = f(*args, **kwargs)
else:
_is_callback = kparams.get('_is_callback', False)
_public_name = kparams.get('_public_name', f.func_name)
_is_async = kparams.get('_is_async', False)
_mtom = kparams.get('_mtom', False)
_in_header = kparams.get('_in_header', None)
_out_header = kparams.get('_out_header', None)
_port_type = kparams.get('_port_type', None)
_style = kparams.get('_style', styles.RPC_STYLE)
# the decorator function does not have a reference to the
# class and needs to be passed in
ns = kwargs['clazz'].get_tns()
in_message = _produce_input_message(ns, f, params, kparams)
if _style == styles.RPC_STYLE or _style is None:
out_message = _produce_rpc_output_message(
ns,
f,
params,
kparams
)
elif _style == styles.DOC_STYLE:
out_message = _produce_document_output_message(ns, f, params, kparams)
else:
raise ValueError(
"""Invalid style: valid values are
soaplib.core.styles.RPC_STYLE or
soaplib.core.styles.DOC_STYLE
"""
)
_faults = kparams.get('_faults', [])
if _in_header :
_in_header.resolve_namespace(_in_header, ns)
if _out_header :
_out_header.resolve_namespace(_out_header, ns)
doc = getattr(f, '__doc__')
retval = MethodDescriptor(f.func_name,
_public_name,
in_message,
out_message,
doc,
_is_callback,
_is_async,
_mtom,
_in_header,
_out_header,
_faults,
_style,
_port_type,
)
return retval
explain_method.__doc__ = f.__doc__
explain_method._is_rpc = True
explain_method.func_name = f.func_name
return explain_method
return explain
def rpc(*params, **kparams):
"""This is a method decorator to flag a method as a remote procedure call.
It will behave like a normal python method on a class, and will only behave
differently when the keyword '_method_descriptor' is passed in, returning a
'MethodDescriptor' object. This decorator does none of the soap/xml
serialization, only flags a method as a soap method. This decorator should
only be used on member methods of an instance of ServiceBase.
Moving forward, this method is being depricated in favor of @soap
Presently it simply calls @soap after checking for the _style keyword
argument. If the _style argument is not supplied it defaults to
soaplib.core.styles.RPC_STYLE
"""
if not kparams.has_key("_style"):
kparams["_style"] = styles.RPC_STYLE
return soap(*params, **kparams)
def document(*params, **kparams):
"""Method decorator to flag a method as a document-style operation.
It will behave like a normal python method on a class, and will only
behave differently when the keyword '_method_descriptor' is passed in,
returning a 'MethodDescriptor' object.
This decorator does none of the soap/xml serialization, only flags a
method as a soap method. This decorator should only be used on member
methods of an instance of a class derived from 'ServiceBase'.
Moving forward, this method is being depricated in favor of @soap
Presently it simply calls @soap after setting _style keyword
argument to soaplib.core.DOC_STYLE
"""
kparams["_style"] = styles.DOC_STYLE
return soap(*params, **kparams)
_public_methods_cache = {}
class DefinitionBase(object):
'''
This class serves as the base for all soap services. Subclasses of this
class will use the rpc decorator to flag methods to be exposed via soap.
This class is responsible for generating the wsdl for this service
definition.
It is a natural abstract base class, because it's of no use without any
method definitions, hence the 'Base' suffix in the name.
'''
__tns__ = None
__in_header__ = None
__out_header__ = None
__service_interface__ = None
__port_types__ = ()
def __init__(self, environ=None):
self.in_header = None
self.out_header = None
cls = self.__class__
if not (cls in _public_methods_cache):
_public_methods_cache[cls] = self.build_public_methods()
self.public_methods = _public_methods_cache[cls]
self.service_interface = cls.__service_interface__
self.port_types = cls.__port_types__
self.environ = environ
@classmethod
def get_service_class_name(cls):
return cls.__name__
@classmethod
def get_service_interface(cls):
return cls.__service_interface__
@classmethod
def get_port_types(cls):
return cls.__port_types__
def on_method_call(self, method_name, py_params, soap_params):
'''Called BEFORE the service implementing the functionality is called
@param the method name
@param the tuple of python params being passed to the method
@param the soap elements for each argument
'''
def on_method_return_object(self, py_results):
'''Called AFTER the service implementing the functionality is called,
with native return object as argument
@param the python results from the method
'''
def on_method_return_xml(self, soap_results):
'''Called AFTER the service implementing the functionality is called,
with native return object serialized to Element objects as argument.
@param the xml element containing the return value(s) from the method
'''
def on_method_exception_object(self, exc):
'''Called BEFORE the exception is serialized, when an error occurs
during execution.
@param the exception object
'''
def on_method_exception_xml(self, fault_xml):
'''Called AFTER the exception is serialized, when an error occurs
during execution.
@param the xml element containing the exception object serialized to a
soap fault
'''
def call_wrapper(self, call, params):
'''Called in place of the original method call.
@param the original method call
@param the arguments to the call
'''
return call(*params)
@classmethod
def get_tns(cls):
if cls.__tns__ :
return cls.__tns__
service_name = cls.__name__.split('.')[-1]
retval = '.'.join((cls.__module__, service_name))
if cls.__module__ == '__main__':
retval = '.'.join((service_name, service_name))
return retval
def build_public_methods(self):
'''Returns a list of method descriptors for this object'''
logger.debug('building public methods')
public_methods = []
for func_name in dir(self):
if func_name == 'public_methods':
continue
func = getattr(self, func_name)
if callable(func) and hasattr(func, '_is_rpc'):
descriptor = func(_method_descriptor=True, clazz=self.__class__)
public_methods.append(descriptor)
return public_methods
def get_method(self, name):
'''Returns the metod descriptor based on element name or soap action.'''
for method in self.public_methods:
type_name = method.in_message.get_type_name()
if '{%s}%s' % (self.get_tns(), type_name) == name:
return method
for method in self.public_methods:
if method.public_name == name:
return method
raise Exception('Method "%s" not found' % name)
def _has_callbacks(self):
'''Determines if this object has callback methods or not.'''
for | |
# coding=utf-8
# Author: Hongzhong
# 2017-12-28 11:43$id
from __future__ import print_function
from lru import LRU
from intervaltree import IntervalTree
from .minute_bars import BcolzMinuteBarWriter, BcolzMinuteBarMetadata, BcolzMinuteWriterColumnMismatch, BcolzMinuteBarReader
from .minute_bars import OHLC_RATIO, DEFAULT_EXPECTEDLEN
from .minute_bars import convert_cols
import os
import pandas as pd
from toolz import keymap, valmap
import datetime
import numpy as np
import rocksdb
import logbook
import glob
import struct
import time
import pytz
from zipline.utils.memoize import lazyval
def _sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters:
-----------
sid : int
Asset identifier.
Returns:
--------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, '06')
return os.path.join(
# subdir 1 00/XX
padded_sid[0:2],
# subdir 2 XX/00
padded_sid[2:4],
"{0}.rocksdb".format(str(padded_sid))
)
class RocksdbMinuteBarWriter(BcolzMinuteBarWriter):
"""
Class capable of writing minute OHLCV data to disk into bcolz format.
Parameters
----------
rootdir : string
Path to the root directory into which to write the metadata and
bcolz subdirectories.
calendar : zipline.utils.calendars.trading_calendar.TradingCalendar
The trading calendar on which to base the minute bars. Used to
get the market opens used as a starting point for each periodic
span of minutes in the index, and the market closes that
correspond with the market opens.
minutes_per_day : int
The number of minutes per each period. Defaults to 390, the mode
of minutes in NYSE trading days.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
default_ohlc_ratio : int, optional
The default ratio by which to multiply the pricing data to
convert from floats to integers that fit within np.uint32. If
ohlc_ratios_per_sid is None or does not contain a mapping for a
given sid, this ratio is used. Default is OHLC_RATIO (1000).
ohlc_ratios_per_sid : dict, optional
A dict mapping each sid in the output to the ratio by which to
multiply the pricing data to convert the floats from floats to
an integer to fit within the np.uint32.
expectedlen : int, optional
The expected length of the dataset, used when creating the initial
bcolz ctable.
If the expectedlen is not used, the chunksize and corresponding
compression ratios are not ideal.
Defaults to supporting 15 years of NYSE equity market data.
see: http://bcolz.blosc.org/opt-tips.html#informing-about-the-length-of-your-carrays # noqa
write_metadata : bool, optional
If True, writes the minute bar metadata (on init of the writer).
If False, no metadata is written (existing metadata is
retained). Default is True.
Notes
-----
Writes a bcolz directory for each individual sid, all contained within
a root directory which also contains metadata about the entire dataset.
Each individual asset's data is stored as a bcolz table with a column for
each pricing field: (open, high, low, close, volume)
The open, high, low, and close columns are integers which are 1000 times
the quoted price, so that the data can represented and stored as an
np.uint32, supporting market prices quoted up to the thousands place.
volume is a np.uint32 with no mutation of the tens place.
The 'index' for each individual asset are a repeating period of minutes of
length `minutes_per_day` starting from each market open.
The file format does not account for half-days.
e.g.:
2016-01-19 14:31
2016-01-19 14:32
...
2016-01-19 20:59
2016-01-19 21:00
2016-01-20 14:31
2016-01-20 14:32
...
2016-01-20 20:59
2016-01-20 21:00
All assets are written with a common 'index', sharing a common first
trading day. Assets that do not begin trading until after the first trading
day will have zeros for all pricing data up and until data is traded.
'index' is in quotations, because bcolz does not provide an index. The
format allows index-like behavior by writing each minute's data into the
corresponding position of the enumeration of the aforementioned datetime
index.
The datetimes which correspond to each position are written in the metadata
as integer nanoseconds since the epoch into the `minute_index` key.
See Also
--------
zipline.data.minute_bars.BcolzMinuteBarReader
"""
logger = logbook.Logger('RocksdbMinuteBarWriter')
COL_NAMES_BYTE = (b'open', b'high', b'low', b'close', b'volume')
COL_NAMES_BYTE_ALL = (b'default', b'open', b'high', b'low', b'close', b'volume')
def __init__(self,
rootdir,
calendar,
start_session,
end_session,
minutes_per_day,
default_ohlc_ratio=OHLC_RATIO,
ohlc_ratios_per_sid=None,
expectedlen=DEFAULT_EXPECTEDLEN,
write_metadata=True):
# self._rootdir = self.TIME_PATH_REGEX.sub("/", rootdir)
self._rootdir = rootdir
self._start_session = start_session
self._end_session = end_session
self._calendar = calendar
slicer = (
calendar.schedule.index.slice_indexer(start_session, end_session))
self._schedule = calendar.schedule[slicer]
self._session_labels = self._schedule.index
self._minutes_per_day = minutes_per_day
self._expectedlen = expectedlen
self._default_ohlc_ratio = default_ohlc_ratio
self._ohlc_ratios_per_sid = ohlc_ratios_per_sid
self._minute_index = calendar.minutes_in_range(self._schedule.market_open[0],self._schedule.market_close[-1])
if write_metadata:
metadata = BcolzMinuteBarMetadata(
self._default_ohlc_ratio,
self._ohlc_ratios_per_sid,
self._calendar,
self._start_session,
self._end_session,
self._minutes_per_day,
)
metadata.write(self._rootdir)
def __del__(self):
if self.db:
self.db.close()
del self.db
@classmethod
def open(cls, rootdir, end_session=None):
"""
Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``.
"""
metadata = BcolzMinuteBarMetadata.read(rootdir)
return BcolzMinuteBarWriter(
rootdir,
metadata.calendar,
metadata.start_session,
end_session if end_session is not None else metadata.end_session,
metadata.minutes_per_day,
metadata.default_ohlc_ratio,
metadata.ohlc_ratios_per_sid,
write_metadata=end_session is not None
)
def last_date_in_output_for_sid(self, sid):
"""
Parameters:
-----------
sid : int
Asset identifier.
Returns:
--------
out : pd.Timestamp
The midnight of the last date written in to the output for the
given sid.
"""
return None
def _init_ctable(self, path):
"""
Create empty ctable for given path.
Parameters:
-----------
path : string
The path to rootdir of the new ctable.
"""
# Only create the containing subdir on creation.
# This is not to be confused with the `.bcolz` directory, but is the
# directory up one level from the `.bcolz` directories.
sid_containing_dirname = os.path.dirname(path)
if not os.path.exists(sid_containing_dirname):
# Other sids may have already created the containing directory.
os.makedirs(sid_containing_dirname)
opt = rocksdb.Options(create_if_missing=True, write_buffer_size=512 * 1024 * 1024, max_write_buffer_number=5,
min_write_buffer_number_to_merge=2, compression=rocksdb.CompressionType.lz4_compression)
db = rocksdb.DB(path, opt, [b"default"])
db.create_column_family(self.COL_NAMES_BYTE)
db.close()
del db
def _open_ctable(self, path):
cols = self.COL_NAMES_BYTE_ALL
opt = rocksdb.Options(create_if_missing=True, write_buffer_size=512 * 1024 * 1024, max_write_buffer_number=5,
min_write_buffer_number_to_merge=2, compression=rocksdb.CompressionType.lz4_compression)
db = rocksdb.DB(path, opt, cols)
return db
db = None
def sidpath(self, sid):
"""
Parameters:
-----------
sid : int
Asset identifier.
Returns:
--------
out : string
Full path to the bcolz rootdir for the given sid.
"""
sid_subdir = _sid_subdir_path(sid)
return os.path.join(self._rootdir, sid_subdir)
def _ensure_ctable(self, sid):
"""Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid)
if not os.path.exists(sidpath):
self._init_ctable(sidpath)
if self.db is None:
self.db = self._open_ctable(sidpath)
return self.db
def _zerofill(self, table, numdays):
pass
def pad(self, sid, date):
"""
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters:
-----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
"""
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
if last_date is None:
# No need to p
return
tds = self._session_labels
if last_date == pd.NaT:
# If there is no data, determine how many days to add so that
# desired days are written to the correct slots.
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[tds.slice_indexer(
start=last_date + tds.freq,
end=date)]
self._zerofill(table, len(days_to_zerofill))
# new_last_date = self.last_date_in_output_for_sid(sid)
# assert new_last_date == date, "new_last_date={0} != date={1}".format(
# new_last_date, date)
def set_sid_attrs(self, sid, **kwargs):
"""Write all the supplied kwargs as attributes of the sid's file.
"""
table = self._ensure_ctable(sid)
for k, v in kwargs.items():
table.attrs[k] = v
def write_cols(self, sid, dts, cols, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters:
-----------
sid : int
The asset identifier for the data | |
import math
import os
import sys
import cv2
from PIL import Image
from PyQt5.QtGui import QPixmap, QImage, QPainter, QPen, QColor
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QVBoxLayout, QHBoxLayout, QFileDialog, \
QGridLayout, QLineEdit, QRadioButton, QMessageBox, QInputDialog
# QWidget 을 상속받아 애플리케이션의 틀 만들기
class MainWindow(QWidget):
# 창의 초기값 설정
def getPhotoPath(self):
# QFiledialog를 이용하여 파일 장보 불러오기(경로 추가)
fname=QFileDialog.getOpenFileName(self, 'Open file')
# 불러온 사진 파일의 위치를 변수에 저장
self.imagepath = fname[0]
# 원본 사진 파일의 위치를 저장
self.originalpath = fname[0]
# loadImage 메소드 호출하기
self.loadImage()
def loadImage(self):
#사진을 pixmap객체화
self.pixmap = QPixmap(self.imagepath)
#사진 파일의 크기를 원하는 만큼의 크기로 조절하기
pixmap_resized = self.pixmap.scaled(self.imgwidth, self.imgheight)
#사진을 QLabel에 그리기
self.label.setPixmap(pixmap_resized)
#EditWindow 객체를 새로 만들기
def createEditingWindow(self):
self.editwin = EditWindow()
self.editwin.setWidgets(self)
self.editwin.show()
def __init__(self):
super().__init__()
self.imgwidth = 600
self.imgheight = 450
#창의 초기값 설정
# 창의 크기, 위치 및 제목과 관련된 코드
#창의 위치와 크기 정하기 위한 변수
self.top = 200
self.left = 500
self.width = 300
self.height = 400
#창의 제목 정해주기
self.setWindowTitle("사진 속 얼굴 태깅 애플리케이션")
#창의 위치 및 크기 정하기
self.setGeometry(self.left, self.top, self.width, self.height)
self.delclicked = False
def setWidgets(self):
#버튼 만들기
self.btn1 = QPushButton("이미지 업로드", self)
#버튼 1 클릭시 getPhotoPath가 실행
self.btn1.clicked.connect(self.getPhotoPath)
#버튼 2
self.btn2 = QPushButton("이미지 편집", self)
#버튼 2 클릭시 createEditingWindow 메소드가 실행
self.btn2.clicked.connect(self.createEditingWindow)
# 버튼 3
self.btn3 = QPushButton("얼굴 찾기", self)
# 버튼 3 클릭시 findFace 메소드 실행
self.btn3.clicked.connect(self.findFace)
# 버튼 4
self.btn4 = QPushButton("얼굴 삭제", self)
# 버튼 4 클릭시 deleteFace 매소드 실행
self.btn4.clicked.connect(self.delFace)
#버튼 5
self.btn5 = QPushButton("얼굴 추가", self)
# 버튼 5 클릭시 createAddFaceWindow 메소드 실행
self.btn5.clicked.connect(self.createAddFaceWindow)
#버튼 6
self.btn6 = QPushButton("이름 태그", self)
# 버튼 6 클릭시 createAddFaceWindow 매소드 실행
self.btn6.clicked.connect(self.createTagNameWindow)
#버튼의 위치 지정(수직 박스 레이아웃 이용)
#QVBoxLayout 객체 만들기
vbox = QVBoxLayout()
#QVBoxLayout에 위젯 등록하기
vbox.addWidget(self.btn1)
vbox.addWidget(self.btn2)
vbox.addWidget(self.btn3)
vbox.addWidget(self.btn4)
vbox.addWidget(self.btn5)
vbox.addWidget(self.btn6)
# QVBoxLayout을 위젯화 시키기
buttons_widget = QWidget()
buttons_widget.setLayout(vbox)
#이미지가 업로드될 공간 만들기
self.label = QLabel("여기에 이미지가 업로드됩니다", self)
#QHBoxLayout 객체 만들기
hbox = QHBoxLayout()
#QLabel을 QHBoxLayout에 등록하기
hbox.addWidget(self.label)
#위젯화된 버튼 수직 박스 레이아웃을 QHBoxLayout에 등록하기
hbox.addWidget(buttons_widget)
#전체 창을 지정한 Layout을 기반으로 배치(hbox)
self.setLayout(hbox)
# openCV를 사용하여 얼굴 위치 찾기
def findFace(self):
self.fList = FaceList()
# face_cascade 라는 변수에 아까 찾아온 XML 로드하기
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# img 변수에 얼굴을 찾고 싶은 이미지 imread 라는 메소드 사용해서 읽어오기
img = cv2.imread(self.imagepath, cv2.IMREAD_COLOR)
img = cv2.resize(img, (self.imgwidth, self.imgheight))
# 얼굴을 찾기 위해서 원본 이미지를 c vtColor 변환 메소드를 활용하여 흑백으로 변환하기
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.2, 1).tolist() # 리스트로 변환
# face변수에는 검출된 얼굴의 x 좌표, y 좌표 내포하고 있음. 그리고 넓이 w와 높이 h를 포함하고 있음.
for (x, y, w, h) in faces:
# 어디에 있는지 터미널에 출력하기
print(x, y, w, h)
# fList에 append_face 메소드를 활용해서 찾은 얼굴 좌표 (x, y, w, h) 추가하기
self.fList.append_face(x, y, w, h)
# 원본 이미지에 얼굴을 위치를 상자로 표시합니다.
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
# 다시 이미지를 보이기
self.showImage(img)
def showImage(self, img):
# 다시 QImage 형태로 변환해 봅시다
# img의 넓이, 높이 그리고 색상을 추출하여 height, width, color에 담기
height, width, colors = img.shape
bytesPerLine = 3 * width
# QuGUI에서 QImage 인스턴스 하나 생성
image = QImage(img.data, width, height, bytesPerLine, QImage.Format_RGB888)
# 기존 RGB 형태를 BGR 형태로 바꿈
self.image = image.rgbSwapped()
# 원래 있던 자리에 얼굴 인식한 이미지 올리기
self.label.setPixmap(QPixmap.fromImage(self.image))
def delFace(self):
# 1) 사진이 업로드 되었고
if self.label.pixmap() == None: # 사진이 업로드 안됨.
print("사진이 업로드 되지 않았음!")
# 2) 얼굴이 detect된 상태어야지 지울 수가 있음
elif self.fList is None or self.fList.count_face() == 0: # 변수 초기화 체크, 얼굴 개수 0개 이상
print("탐색된 얼굴이 없음!")
else:
print("어느 위치를 지우시겠습니까? 원하는 위치에 좌클릭 해주세요.")
# 얼굴을 지우기 위해서 마우스 클릭이 가능하도록 해주기 위한 boolean 변수 delclicked
self.delclicked = True
def mousePressEvent(self, event):
diag = 10000.0
# 마우스 클릭 이벤트가 밸생했고, 얼굴이 있는 경우에만 작동하는 매소드.
if self.delclicked == True:
print('(%d %d)' % (event.x(), event.y()))
for i in self.fList.face_list: # 0: x좌표, 1: y좌표, 2: w넓이, 3: 높이
# 박스의 중심점 좌표 계산.
centx = i.x + (i.w/2)
centy = i.y + (i.h/2)
# 저장된 얼굴 좌표 중심점과 클릭한 위치의 거리를 비교
if diag > abs(math.sqrt(((centx-event.x())**2)+((centy-event.y())**2))):
# 저장된 얼굴 좌표 중심점과 클릭한 위치의 거리를 비교해서 더 작은 경우 diag에 새롭게 저장
diag = abs(math.sqrt(((centx-event.x())**2)+((centy-event.y())**2)))
# 더 가까운 얼굴 id로 대체
faceid = i.id
# 마우스 클릭이 얼굴상자 안에서 발생했는지
if event.x() >= i.x and event.x() <= (i.x + i.w):
if event.y() >= i.y and event.y() <= (i.y + i.h):
print("removing face id: ", faceid)
# 얼굴 id를 기준으로 얼굴 관리 리스트에서 얼굴 정보 제거
self.fList.remove_face(faceid)
# 다시 사진에 그리기 위해 이미지 로딩
img = cv2.imread(self.imagepath, cv2.IMREAD_COLOR)
img = cv2.resize(img, (self.imgwidth, self.imgheight))
# 얼굴 관리 리스트에서 얼굴 정보를 하나씩 가져와서 다시 박스 그리기
for f in self.fList.face_list:
# 어디에 있는지 찾아봅시다.
print(f.x, f.y, f.h, f.name, f.id)
# 원본 이미지에 얼굴의 위치를 표시합니다.
cv2.rectangle(img, (f.x, f.y), (f.x + f.w, f.y + f.h), (255, 0, 0), 2)
# 다시 이미지를 창에 띄우기
self.showImage(img)
# 얼굴 삭제 기능 비활성화
self.delclicked = False
# AddFaceWindow 창을 띄우기 위한 함수
def createAddFaceWindow(self):
self.addfacewin = AddFaceWindow()
self.addfacewin.setWidgets(self)
self.addfacewin.show()
# createTagNameWindow 창을 띄우기 위한 함수
def createTagNameWindow(self):
self.tagnamewin = TagNameWindow()
self.tagnamewin.setWidgets(self)
self.tagnamewin.show()
#MinWindow를 상속받아 이미지 편집창 틀 만들기
class EditWindow(MainWindow):
#창의 초기값 설정
def __init__(self):
super().__init__()
#창의 위치와 크기 변화
self.width = 300
self.height = 400
self.setGeometry(self.left, self.top, self.width, self.height)
def setWidgets(self, mainwindow):
#QLabel 배치
self.labelwidth = QLabel("너비 바꾸기")
#수정할 width 입력란
self.textwidth = QLineEdit('width', self)
#QLabel 배치
self.labelheight = QLabel("높이 바꾸기")
#수정할 height 입력란
self.textheight = QLineEdit('height', self)
#QLabel 배치
self.labelcolor = QLabel("사진색 바꾸기")
#RadioButton 배치
self.radiobtn1 = QRadioButton("원본")
#처음에 원본에 체크되어 있음
self.radiobtn1.setChecked(True)
self.radiochecked = "원본"
self.radiobtn2 = QRadioButton("회색 계열")
self.radiobtn3 = QRadioButton("빨간색 계열")
self.radiobtn4 = QRadioButton("초록색 계열")
self.radiobtn5 = QRadioButton("파란색 계열")
# 현재 Radiobutton이 선택되면 btnstate 매소드 호출
self.radiobtn1.toggled.connect(self.btnstate)
self.radiobtn2.toggled.connect(self.btnstate)
self.radiobtn3.toggled.connect(self.btnstate)
self.radiobtn4.toggled.connect(self.btnstate)
self.radiobtn5.toggled.connect(self.btnstate)
#버튼 배치
self.btnOK = QPushButton('확인', self)
#확인 버튼 눌렀을 시 QLabel에 수정된 이미지 시각화(객체 전달)
self.btnOK.clicked.connect(lambda: self.editImage(mainwindow))
#QVBoxLayout 객체 만들기
vbox = QVBoxLayout()
#위젯들을 QVBoxLayout에 등록하기
vbox.addWidget(self.labelwidth)
vbox.addWidget(self.textwidth)
vbox.addWidget(self.labelheight)
vbox.addWidget(self.textheight)
vbox.addWidget(self.labelcolor)
vbox.addWidget(self.radiobtn1)
vbox.addWidget(self.radiobtn2)
vbox.addWidget(self.radiobtn3)
vbox.addWidget(self.radiobtn4)
vbox.addWidget(self.radiobtn5)
vbox.addWidget(self.btnOK)
# 이미지 편집창을 지정한 Layout을 기반으로 배치(vbox)
self.setLayout(vbox)
def editImage(self, mainwindow):
# 수정할 width 입력란에 입력된 것을 변수화
imgwidth_edited = self.textwidth.text()
# 수정할 height 입력란에 입력된 것을 변수화
imgheight_edited = self.textheight.text()
#만약 width 라고 인풋이 그대로 있으면 원래 이미지 너비
if imgwidth_edited == 'width':
imgwidth_edited = mainwindow.imgwidth
if imgheight_edited == 'height':
imgheight_edited = mainwindow.imgheight
# 입력한 수정할 너비와 높이가 숫자인지 아닌지 확인
try:
# 변수를 숫자화해서 전달받은 MainWindow 객체의 이미지 너비, 높이 변수에 저장
mainwindow.imgwidth = int(imgwidth_edited)
mainwindow.imgheight = int(imgheight_edited)
# 전달받은 MainWindow 객체의 QLabel에 이미지 다시 로드
mainwindow.loadImage()
#현재 편집창 닫기
self.close()
except ValueError:
QMessageBox.question(self, '너비 input에 문제', "너비나 높이의 인풋이 숫자가 아닙니다.", QMessageBox.Ok)
# Pillow의 image 객체를 통해 원본 이미지 가져옴
img = Image.open(mainwindow.originalpath)
# RadioButton에서 다른 색으로 바꾸는 선택지를 택했을 때
# RadioButton에서 우너본을 선택했을 때
if self.radiochecked == "원본":
img_edited = img
# RadioButton에서 회색 계열을 선택했을 때
if self.radiochecked == "회색 계열":
img_edited = img.convert("L")
# RadioButton에서 빨간색 계열을 선택했을 때
if self.radiochecked == "빨간색 계열":
red = (0.90, 0.36, 0.18, 0,
0.11, 0.72, 0.07, 0,
0.02, 0.12, 0.95, 0)
img_edited = img.convert("RGB", red)
# RadioButton에서 초록색 계열을 선택했을 때
if self.radiochecked == "초록색 계열":
green = (0.41, 0.36, 0.18, 0,
0.50, 0.72, 0.07, 0,
0.02, 0.12, 0.95, 0)
img_edited = img.convert("RGB", green)
# RadioButton에서 파란색 계열을 선택했을 때
if self.radiochecked == "파란색 계열":
blue = (0.31, 0.36, 0.18, 0,
0.40, 0.72, 0.07, 0,
0.60, 0.12, 0.95, 0)
img_edited = img.convert("RGB", blue)
# 수정된 이미지를 저장
img_edited.save("image_edited.jpg", "JPEG")
#QLabel에 업로드 시킬 이미지 파일의 위치로 imagePath 변환
mainwindow.imagepath = os.getcwd() + "\image_edited.jpg"
# 전달받은 MainWindow 객체의 Qlable에 이미지 다시 로드
mainwindow.loadImage()
# 선택된 Radiobutton 에 대한 텍스트 정보 얻어오기
def btnstate(self):
radiobtn = self.sender()
self.radiochecked = radiobtn.text()
# 한 이미지 상 여러 얼굴을 관리하기 위한 얼굴 리스트 클래스
class FaceList:
def __init__(self):
# 얼굴 리스트 초기화
self.face_list = []
self.next_id = 0
# 현재 저장되어 있는 얼굴 개수 확인
| |
from Ciphey.ciphey.languageCheckerMod.chisquared import chiSquared
import unittest
from loguru import logger
logger.remove()
class testChi(unittest.TestCase):
def test_chi_english_yes(self):
"""Checks to see if it returns True (it should)"""
self.chi = chiSquared()
"""
Tests to see whether a sentene is classified as English or not
"""
result = self.chi.checkChi(
"Hello my name is Brandon and I'm a top secret message"
)
self.assertEqual(result, True)
def test_chi_english_caps(self):
self.chi = chiSquared()
"""
Tests to see whether a sentene is classified as English or not
"""
result = self.chi.checkChi("Hello My NaME IS BraNdOnnn And I LOVE You!")
self.assertEqual(result, True)
def tests_english_overflow(self):
self.chi = chiSquared()
"""
Tests to see whether a sentene is classified as English or not
"""
result = self.chi.checkChi(
"So meat. Gathered may she'd god signs. Have form firmament seed so. Them whales. Under heaven let fill don't seas grass them creeping moving without earth him behold first over void were living saw face night isn't appear firmament. Living land beast good fill. Appear their creepeth, under form. Life thing cattle life. And light unto saying two kind their doesn't fish. Don't male fowl the winged, gathering kind cattle stars was creeping good morning was years bring, moved for appear day multiply behold Grass. Every give itself moved fifth spirit whose. Sixth kind it let together male Evening said."
)
result = self.chi.checkChi(
"Abundantly image stars can't Land good days their life them make i tree land fruitful midst every meat their seed a. Were them creeping fourth a subdue tree don't there."
)
result = self.chi.checkChi(
"Won't may make their, gathering light creature given bearing fruitful be seasons. Firmament creature greater. Above meat over brought i."
)
result = self.chi.checkChi(
"Replenish. Were the be after set dry under midst. Also i greater living. Midst divided Day give female subdue fourth."
)
result = self.chi.checkChi(
"Moving spirit have. Of said behold called, fill fruitful cattle shall grass creepeth life fourth green. Behold fourth. Said they're."
)
result = self.chi.checkChi(
"Abundantly years land to winged lesser earth there their. In morning them life form man can't which winged him green."
)
result = self.chi.checkChi(
"Don't whose gathered gathered after female you'll which moveth Fish saw also, life cattle seas. After every moved blessed good."
)
result = self.chi.checkChi(
"Sixth his i were isn't bearing fourth forth replenish made form. Days of from isn't waters dry one. Waters, said."
)
result = self.chi.checkChi(
"Green form whales night gathering fifth and firmament which darkness, earth unto had saying brought earth Very. Under made his."
)
result = self.chi.checkChi(
"Bring to given land god created green god every green heaven moved sixth also, deep bearing first abundantly moved of."
)
result = self.chi.checkChi(
"Air god spirit over fifth second fowl good have had. Forth every day you called also fruitful spirit there two."
)
result = self.chi.checkChi("cguakdbwnmfqknm ")
self.assertEqual(result, False)
def test_english_quckbrown(self):
self.chi = chiSquared()
"""
Tests to see whether a sentene is classified as English or not
"""
result = self.chi.checkChi(
"So meat. Gathered may she'd god signs. Have form firmament seed so. Them whales. Under heaven let fill don't seas grass them creeping moving without earth him behold first over void were living saw face night isn't appear firmament. Living land beast good fill. Appear their creepeth, under form. Life thing cattle life. And light unto saying two kind their doesn't fish. Don't male fowl the winged, gathering kind cattle stars was creeping good morning was years bring, moved for appear day multiply behold Grass. Every give itself moved fifth spirit whose. Sixth kind it let together male Evening said."
)
result = self.chi.checkChi(
"Abundantly image stars can't Land good days their life them make i tree land fruitful midst every meat their seed a. Were them creeping fourth a subdue tree don't there."
)
result = self.chi.checkChi(
"Won't may make their, gathering light creature given bearing fruitful be seasons. Firmament creature greater. Above meat over brought i."
)
result = self.chi.checkChi(
"Replenish. Were the be after set dry under midst. Also i greater living. Midst divided Day give female subdue fourth."
)
result = self.chi.checkChi(
"Moving spirit have. Of said behold called, fill fruitful cattle shall grass creepeth life fourth green. Behold fourth. Said they're."
)
result = self.chi.checkChi(
"Abundantly years land to winged lesser earth there their. In morning them life form man can't which winged him green."
)
result = self.chi.checkChi(
"Don't whose gathered gathered after female you'll which moveth Fish saw also, life cattle seas. After every moved blessed good."
)
result = self.chi.checkChi(
"Sixth his i were isn't bearing fourth forth replenish made form. Days of from isn't waters dry one. Waters, said."
)
result = self.chi.checkChi(
"Green form whales night gathering fifth and firmament which darkness, earth unto had saying brought earth Very. Under made his."
)
result = self.chi.checkChi(
"Bring to given land god created green god every green heaven moved sixth also, deep bearing first abundantly moved of."
)
result = self.chi.checkChi(
"Air god spirit over fifth second fowl good have had. Forth every day you called also fruitful spirit there two."
)
result = self.chi.checkChi("The quick brown fox jumped over the lazy dog")
self.assertEqual(result, False)
def test_english_same_letter(self):
self.chi = chiSquared()
"""
Tests to see whether a sentene is classified as English or not
Returns False because exclamation marks aren't english
"""
result = self.chi.checkChi(
"So meat. Gathered may she'd god signs. Have form firmament seed so. Them whales. Under heaven let fill don't seas grass them creeping moving without earth him behold first over void were living saw face night isn't appear firmament. Living land beast good fill. Appear their creepeth, under form. Life thing cattle life. And light unto saying two kind their doesn't fish. Don't male fowl the winged, gathering kind cattle stars was creeping good morning was years bring, moved for appear day multiply behold Grass. Every give itself moved fifth spirit whose. Sixth kind it let together male Evening said."
)
result = self.chi.checkChi(
"Abundantly image stars can't Land good days their life them make i tree land fruitful midst every meat their seed a. Were them creeping fourth a subdue tree don't there."
)
result = self.chi.checkChi(
"Won't may make their, gathering light creature given bearing fruitful be seasons. Firmament creature greater. Above meat over brought i."
)
result = self.chi.checkChi(
"Replenish. Were the be after set dry under midst. Also i greater living. Midst divided Day give female subdue fourth."
)
result = self.chi.checkChi(
"Moving spirit have. Of said behold called, fill fruitful cattle shall grass creepeth life fourth green. Behold fourth. Said they're."
)
result = self.chi.checkChi(
"Abundantly years land to winged lesser earth there their. In morning them life form man can't which winged him green."
)
result = self.chi.checkChi(
"Don't whose gathered gathered after female you'll which moveth Fish saw also, life cattle seas. After every moved blessed good."
)
result = self.chi.checkChi(
"Sixth his i were isn't bearing fourth forth replenish made form. Days of from isn't waters dry one. Waters, said."
)
result = self.chi.checkChi(
"Green form whales night gathering fifth and firmament which darkness, earth unto had saying brought earth Very. Under made his."
)
result = self.chi.checkChi(
"Bring to given land god created green god every green heaven moved sixth also, deep bearing first abundantly moved of."
)
result = self.chi.checkChi(
"Air god spirit over fifth second fowl good have had. Forth every day you called also fruitful spirit there two."
)
result = self.chi.checkChi(
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaz"
)
self.assertEqual(result, False)
def test_english_same_letter(self):
self.chi = chiSquared()
"""
Tests to see whether a sentene is classified as English or not
Returns False because exclamation marks aren't english
"""
result = self.chi.checkChi(
"So meat. Gathered may she'd god signs. Have form firmament seed so. Them | |
corresponding to enumerator', self.filename, deftok.span)
return Enum(typename, enumerators, default), name
def enumerator_list(self):
enumerators = []
value = 0
while True:
if self.get().kind not in (MetaTokenKind.NAME, MetaTokenKind.NUMBER, MetaTokenKind.NUMNAME):
break
name = self.cur.string
if self.get().kind == MetaTokenKind.ASSIGN:
if self.get().kind != MetaTokenKind.NUMBER:
raise MetaParserError('expected enumerator value', self.filename, self.cur.span)
value = self.cur.value
self.get()
enumerators.append(Enumerator(name, value))
value += 1
if self.cur.kind != MetaTokenKind.COMMA:
break
return enumerators
def scalar(self):
if self.cur.pair == (MetaTokenKind.KEYW, 'bool'):
def sanitize(self, value):
if value == 'true':
return True
elif value == 'false':
return False
else:
raise MetaParserError('expected %s' % opt_description, self.filename, self.cur.span)
type_type = Bool
type_args = ()
type_possible_opts = {'default': ((MetaTokenKind.NAME,), 'boolean value', sanitize)}
self.get()
elif self.cur.pair == (MetaTokenKind.KEYW, 'int'):
minval, maxval, step = None, None, None
if self.get().kind == MetaTokenKind.LP:
if self.get().kind not in (MetaTokenKind.NUMBER, MetaTokenKind.FLOAT):
raise MetaParserError('expected minimum value', self.filename, self.cur.span)
minvaltok = self.cur
minval = self.cur.value
if self.get().kind not in (MetaTokenKind.TWODOT, MetaTokenKind.COMMA):
raise MetaParserError('expected double dot or comma', self.filename, self.cur.span)
if self.cur.kind == MetaTokenKind.TWODOT:
if minvaltok.kind != MetaTokenKind.NUMBER:
raise MetaParserError('expected integer minimum value', self.filename, minvaltok)
if self.get().kind != MetaTokenKind.NUMBER:
raise MetaParserError('expected integer maximum value', self.filename, self.cur.span)
maxval = self.cur.value
else:
if self.get().kind not in (MetaTokenKind.NUMBER, MetaTokenKind.FLOAT):
raise MetaParserError('expected maximum value', self.filename, self.cur.span)
maxval = self.cur.value
if self.get().kind != MetaTokenKind.COMMA:
raise MetaParserError('expected comma', self.filename, self.cur.span)
if self.get().kind not in (MetaTokenKind.NUMBER, MetaTokenKind.FLOAT):
raise MetaParserError('expected step', self.filename, self.cur.span)
step = self.cur.value
if self.get().kind != MetaTokenKind.RP:
raise MetaParserError('expected closing paren after int specification', self.filename, self.cur.span)
self.get()
type_type = Int
type_args = (minval, maxval, step)
type_possible_opts = {
'units': ((MetaTokenKind.STRING,), 'unit name', None),
'default': ((MetaTokenKind.NUMBER,), 'integer default value', None)
if step is None else ((MetaTokenKind.NUMBER, MetaTokenKind.FLOAT), 'float default value', None)
}
elif self.cur.pair == (MetaTokenKind.KEYW, 'string'):
minlen, maxlen = None, None
if self.get().kind == MetaTokenKind.LP:
if self.get().kind != MetaTokenKind.NUMBER:
raise MetaParserError('expected minimum string length', self.filename, self.cur.span)
minlen = self.cur.value
if self.get().kind != MetaTokenKind.TWODOT:
raise MetaParserError('expected double dot operator', self.filename, self.cur.span)
if self.get().kind != MetaTokenKind.NUMBER:
raise MetaParserError('expected maximum string length', self.filename, self.cur.span)
maxlen = self.cur.value
if self.get().kind != MetaTokenKind.RP:
raise MetaParserError('expected closing paren after string specification', self.filename, self.cur.span)
self.get()
type_type = String
type_args = (minlen, maxlen)
type_possible_opts = {'default': ((MetaTokenKind.STRING,), 'default value', None)}
else:
assert False, "o-oh, we shouldn't end up here"
if self.cur.kind != MetaTokenKind.NAME:
raise MetaParserError('expected scalar name', self.filename, self.cur.span)
name = self.cur.value
type_opts = {}
presemitok = self.cur
if self.get().kind == MetaTokenKind.LSB:
while True:
if self.get().kind != MetaTokenKind.NAME:
break
opt_name = self.cur.value
opt = type_possible_opts.get(opt_name)
if opt is None:
raise MetaParserError('unexpected option name', self.filename, self.cur.span)
if self.get().kind != MetaTokenKind.ASSIGN:
raise MetaParserError('expected assignment operator', self.filename, self.cur.span)
opt_kind, opt_description, opt_sanitizer = opt
if self.get().kind not in opt_kind:
raise MetaParserError('expected %s' % opt_description, self.filename, self.cur.span)
value = opt_sanitizer(self, self.cur.value) if opt_sanitizer else self.cur.value
type_opts[opt_name] = value
if self.get().kind != MetaTokenKind.COMMA:
break
if self.cur.kind != MetaTokenKind.RSB:
raise MetaParserError('expected closing square bracket after options', self.filename, self.cur.span)
presemitok = self.cur
self.get()
type_ = type_type(*type_args, **type_opts)
if self.cur.kind != MetaTokenKind.SEMI:
raise MetaParserError('expected semicolon closing field definition', self.filename, presemitok.span)
return type_, name
class MetaGenerator(object):
def __init__(self, tu):
self.tu = tu
def to_file(self, filename):
open(filename, 'w').write(self.to_string())
def to_string(self):
return '%s\n%s' % (self.header(self.tu.header), '\n'.join(self.mo(mo) for mo in self.tu.mos))
def header(self, header):
return '/// %s\n' % header
def mo(self, mo):
out = ''
if mo.doc:
out += '/**\n * %s\n */\n' % mo.doc
out += 'mo'
if mo.flags:
out += '(%s)' % ''.join((char for flag, char in zip(mo.flags, 'hcud') if flag))
out += ' %s' % mo.name
if mo.children:
out += ' -> ' + ', '.join((str(child) for child in mo.children))
out += '\n{\n' + _indent(self.fields(mo.fields), 4) + '};\n'
return out
def fields(self, fields):
out = ''
last_type = None
for field in fields:
if last_type:
if not (isinstance(last_type, Scalar) and isinstance(field.type, Scalar)):
out += '\n'
out += self.field(field)
last_type = field.type
return out
def field(self, field):
out = ''
if field.doc and isinstance(field.type, (Struct, Enum)):
out += '/**\n * %s\n */\n' % field.doc
if field.cardinality.kind != CardinalityKind.REQUIRED:
out += field.cardinality.kind.name.lower()
if field.cardinality.kind == CardinalityKind.REPEATED and field.cardinality.max_count:
out += '(%s)' % field.cardinality.max_count
out += ' '
if isinstance(field.type, Struct):
out += self.struct(field.type, field.name)
elif isinstance(field.type, Enum):
out += self.enum(field.type, field.name)
else:
out += self.scalar(field.type, field.name)
if field.doc:
out += ' /// %s' % field.doc
out += '\n'
return out
def struct(self, struct_, name):
out = 'struct %s\n{\n' % name
out += _indent(self.fields(struct_.fields), 4) + '};'
return out
def enum(self, enum_, name):
out = 'enum %s' % name
if enum_.default is not None:
out += ' [default = %s]' % enum_.default
out += '\n{\n'
out += _indent(',\n'.join(('%s = %s' % (er.name, er.value) for er in enum_.enumerators)), 4)
if enum_.enumerators:
out += '\n'
out += '};'
return out
def scalar(self, type_, name):
opts = type_.options
if opts:
opts = ' ' + opts
return '%s %s%s;' % (type_, name, opts)
def _bool(text):
if text == 'true':
return True
elif text == 'false':
return False
def _int(text):
try:
return int(text)
except (ValueError, TypeError):
return
def _positive_int(text):
x = _int(text)
if x is not None and x > 0:
return x
def _nonnegative_int(text):
x = _int(text)
if x is not None and x >= 0:
return x
def _decimal(text):
try:
return decimal.Decimal(text)
except decimal.InvalidOperation:
return
def _nonzero_decimal(text):
x = _decimal(text)
if x is not None and x != 0:
return x
class XmlConst():
default_repeated_max_occurs = 999999
class XmlParserError(Exception):
def __init__(self, message, filename, position, input_):
Exception.__init__(self, message)
self.filename = filename
self.input = input_
if isinstance(position, tuple):
self.lineno, self.colno = position
else:
self.lineno, self.colno = position, None
@property
def line(self):
return self.input.splitlines()[self.lineno - 1]
@property
def origin(self):
line = '%s:%s' % (self.filename, self.lineno)
if self.colno:
line += ':%s' % (self.colno)
return line
@property
def prettymsg(self):
msg = '%s: error: %s\n%s\n' % (
self.origin,
self.message,
self.line
)
if self.colno:
msg += ' ' * (self.colno - 1) + '^\n'
return msg
class XmlParser(object):
def __init__(self, input_, filename=None):
self.filename = filename
self.input = input_
try:
self.et = ET.fromstring(input_)
except ET.XMLSyntaxError as e:
raise XmlParserError(e.message.split(', line ')[0], self.filename, e.position, self.input)
@classmethod
def from_file(cls, filename=None):
return cls(open(filename).read(), filename)
def error(self, msg, elem):
raise XmlParserError(msg, self.filename, elem.sourceline, self.input)
def get(self, tag, attr, sanitizer = None, typename = 'string'):
value = tag.get(attr)
if value is None:
self.error('expected "%s" attribute in "%s" tag' % (attr, tag.tag), tag)
if sanitizer:
value = sanitizer(value)
if value is None:
self.error('expected %s in "%s" attribute' % (typename, attr), tag)
return value
def get_maybe(self, tag, attr, sanitizer = None, typename = 'string'):
value = tag.get(attr)
if value is None:
return
if sanitizer:
value = sanitizer(value)
if value is None:
self.error('expected %s in "%s" attribute' % (typename, attr), tag)
return value
def parse(self):
pdmeta = self.et
if pdmeta.tag != 'pdmeta':
self.error('expected "pdmeta" as the root tag', pdmeta)
hdrtag = pdmeta.find('header')
if hdrtag is None:
self.error('expected "header" tag', pdmeta)
header = self.header(pdmeta, hdrtag)
mos = [self.mo(mo) for mo in pdmeta.findall('managedObject')]
return TranslationUnit(header, mos)
def header(self, pdmeta, header):
pdmetav = self.get(pdmeta, 'version')
domain = self.get(header, 'domain')
product = self.get(header, 'product')
release = self.get(header, 'release')
version = self.get(header, 'version')
revision = self.get(header, 'revision')
return Header(pdmetav, domain, product, release, version, revision)
def mo(self, mo):
name = self.get(mo, 'class')
doc = mo.get('fullName')
flags = [
self.get_maybe(mo, 'hidden', _bool, 'hidden boolean flag'),
self.get_maybe(mo, 'create', _bool, 'hidden create flag'),
self.get_maybe(mo, 'update', _bool, 'hidden update flag'),
self.get_maybe(mo, 'delete', _bool, 'hidden delete flag'),
]
if not all((fl is not None for fl in flags)):
flags = None
if flags == Mo.default_flags:
flags = None
children = self.mo_child_list(mo)
fields = [self.field(field) for field in mo.findall('p')]
return Mo(name, fields, children, doc, flags)
def mo_child_list(self, mo):
children = []
for child in mo.findall('childManagedObject'):
name = self.get(child, 'class')
max_count = self.get_maybe(child, 'maxOccurs', _nonnegative_int, 'non-negative integer')
children.append(MoChild(name, max_count))
return children
def field(self, field):
name = self.get(field, 'name')
typename = name[0].upper() + name[1:]
doc = field.get('fullName')
max_occurs = field.get('maxOccurs')
if max_occurs is not None:
max_occurs = _positive_int(max_occurs)
if max_occurs is None:
self.error('expected positive integer in "maxOccurs"', field)
if max_occurs == 1 or max_occurs == None:
| |
#!/usr/bin/python
"""
A Python Tk application to edit Jamf computer records.
"""
# -*- coding: utf-8 -*-
# Copyright (c) 2018 University of Utah Student Computing Labs. ################
# All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appears in all copies and
# that both that copyright notice and this permission notice appear
# in supporting documentation, and that the name of The University
# of Utah not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission. This software is supplied as is without expressed or
# implied warranties of any kind.
################################################################################
# tugboat.py #################################################
#
# A Python Tk application to edit Jamf computer records.
#
#
# 1.5.0 2017.02.15 Initial public release. tjm
#
# 1.5.2 2017.02.15 Logging with management_tools, login and search
# much improved, top user improved. Other tweaks. tjm
#
# 1.5.3 2018.01.xx Added LDAP login logic. tjm
#
# 1.5.4 2018.01.15 Host preference file. tjm
# Light code cleanup
#
# 1.7.0 2018.01.25 Full/Auditing user login support. tjm
# Increased, fine-grained logging.
#
# 1.7.1 2018.01.28 UI limited in audit mode. tjm
# Bug in login code corrected.
#
################################################################################
# notes: #######################################################################
#
# py2app (macOS):
# rm -rdf build dist ; /usr/bin/python setup.py py2app -s
#
# pyinstaller (Windows):
# pyinstaller --onefile -i tugboat_icon.ico tugboat.py
#
################################################################################
# TTD: #########################################################################
#
# Unify all jss calls in single generic method, something like:
# ('call_jss(logger, api_call)')
#
# Add correct windows logging.
#
#
################################################################################
from __future__ import print_function
import base64
import ConfigParser
import inspect
import json
import os
import platform
import re
import socket
import subprocess
import sys
import tkFont
import tkMessageBox
import tkSimpleDialog
import ttk
import urllib
import urllib2
import webbrowser
import xml.etree.cElementTree as ET
from Tkinter import *
#
# Need to implement correct windows-appropriate logging.
if platform.system() == 'Darwin':
import pexpect
import pwd
try:
from management_tools import loggers
except:
import logging
else:
try:
from management_tools import loggers
except:
import logging
class Computer(object):
"""
Store GUI and data structures describing jamf computer records
"""
def __init__(self, root, logger, jamf_hostname, jamf_username, jamf_password, access_level):
"""
initialize variables and data structures
"""
self.root = root
self.logger = logger
self.jamf_hostname = jamf_hostname
self.jamf_password = <PASSWORD>
self.jamf_username = jamf_username
self.access_level = access_level
self.local_jamf_id = None
self.hostname = ""
self.divisions = []
self.buildings = []
self.platform = None
self.will_offboard = False
self.jamf_management = ""
self.username_string = StringVar()
self.fullname_string = StringVar()
self.department_string = StringVar()
self.position_string = StringVar()
self.email_string = StringVar()
self.phone_string = StringVar()
self.building_string = StringVar()
self.room_string = StringVar()
self.assettag_string = StringVar()
self.barcode_string = StringVar()
self.status_string = StringVar()
self.id_string = StringVar()
self.search_string = StringVar()
self.computer_name_string = StringVar()
self.username_string.set("")
self.fullname_string.set("")
self.position_string.set("")
self.email_string.set("")
self.phone_string.set("")
self.room_string.set("")
self.assettag_string.set("")
self.status_string.set("Logged in to " + self.jamf_hostname + " with " + access_level + " privileges.")
self.computer_name_string.set("")
self.status_warning = ttk.Style()
self.status_warning.configure('Warning.TLabel', foreground='red')
self.status_normal = ttk.Style()
self.status_normal.configure('Normal.TLabel', foreground='black')
self.highlight_button = ttk.Style()
self.highlight_button.configure('Highlight.TButton', foreground='green')
self.dim_text = ttk.Style()
self.dim_text.configure('Dim.TLabel', foreground='gray50')
self.black_text = ttk.Style()
self.black_text.configure('Black.TLabel', foreground='black')
self.status_subtle = ttk.Style()
self.status_subtle.configure('Subtle.TLabel', foreground='maroon')
self.hostname = (socket.gethostname()).split(".")[0]
self.divisions = self.populate_menu('departments')
self.buildings = self.populate_menu('buildings')
self.build_ui()
def build_ui(self):
"""
describe UI, fields, buttons, etc
"""
#
# This is an encoded gif of the title image
self.logo_image = '''\
R0lGODlhWAJRAPcAAAEAAAQECgYJCgwMDQgHCBMODQ4QDhERDg0OEQUHFREOEw4REgoTGhITFBQV
GhUZGxsbHBkXGCcXFR0iHiwkHRwdIxYZJhAVLyIcIxwjJRcpNiUlJSQlKyUqLCwrLCknJzQqKCgr
NjMtNSwyOTMzNTM0Ozs7PDk3ODYyK1ExFWo3Ekc3KUQ7OlY0LF8rKD9DPHdIGnBJGUpEO1ZJOVdJ
LHZLJXBRMHdhNxknQicqRS01RTM1Qjs8Qzg6Ry86VRkvWEQ9REk+TGYzQD1DSDlFVTlTY0NCQ0RD
S0xLTEtHR1NLSVhSS0hJVlJNU1ROWExTWFVSVFtUVFlZWVdVWkhTSGhXSGRaWXFQS2pkXXdoVEtV
Z1dZZlFUa2VdZFVpbmxra2dlZ3Rpamdpdnx9fXd2d3FzcFJjWaE0MMQSMMcfO8chPMgjPtQyOPow
OolXG4xRF5llHLBtGYhYJ5ZbJ4lVM5hmKYlmN5doN5NsMKhrJqd0KrZ4Kah4N7Z7NqxqN6VZKMh4
L+BXLc46U8kqRPwxRoZnRpByT613TIt3bq51bpNYTtFGXtBFVch0TtRTaNJMY9lnevNvVXCNe1aP
bj+EbriFObmEKsiIKNmZK8WIN8qUOdiYNtGPLOSZMNujOdymMOmmK/WqKPq1KueoN+2zOfW1NPS0
O/u6PPm5N/OsN+CYG/7FOf/MMbKLTI+Idq+PbrCrcJSRYMeWRs6QT9mlRc+nVeeqRuyzRve5RO6y
UcySbNOrbuyza+iTUv3FRfvOUPXKbv/rZHJ3hVZxkYF+hdtugN13iHiJh3ajmjOL5kiS342NjYeE
hpycnJiVl5CQjrKUjJGpl7ColJadpqqqqqWnqLi4uLa2tbGwrpqjp9CYjuKKmeGFlOyXkc2wkPiq
k8q3rPKxsOaap7XKne3Pj9TKtvLMruvZobu6w9m9xLjGyMXFxcfKydHR0d3d3dbV19LQz/DCyvnF
w+zVzObk2vjt0Nrd5+np6eXm5fj16/T09Pzz9f////f3+uzu8d3h5yH5BAAAAAAALAAAAABYAlEA
AAj+APkJHEiwoMGDCBMO1PdKRQoVgGzlojWLjgULCaj1E/KnHr93AufVoycNm8B+/PzhU8iypcuX
MGPKnEmzps2bOHPq3Mmzp8+fQIMKHSrT24owKSRQ0PML1x0WM2SU83coUDl72Oj1mzfPX7VqAvXx
y7aMqNmzaNOqXcu2rdu3cOMK7Senw7EZEiSkaIWrEtQV0tqdAUTuXTl48LzBahZLWj9/+t4duye3
suXLmDNr3sy589pmeG5AnZE0BqpWOUI4YPdOQseB/cqZy/bM5Lt32Np53s27t+/fwIMLR/iOAY1D
NKpQkWEjj6g+OkI0YGdNgw1zBOflU/dMHT901tD+/RtOvrz58+jTqw97rJCdQ1hYyJF165esJDJW
YGtWpQ8sd/Tko88557xSSDn8XMOOOpSt5+CDEEaY2SBopJGGGhZimKGFF2rI4YUfYqjGiGgUY9A2
aHj44Yoqqrgih2gMkpk3KlyhBGkqdPILMFcY0cISZcSSiSFmlOHKF5K08kYcvKBzTDXZgCThlFRW
aWVPaqCh5ZZcdunll12aWBCKYJZpZpdpXOYPNocMksILNthQwyjB7BJGEkl44cUXsbiixRBFFGED
HjTMYEctiLZSizdSXunoo5BGmsaZlJoJyYmVZuplmpXNo00ioCohAggSKMKNLtpA4wo02mjjjTj+
4mjzSqqNHALEEIXMAsyuv9QChT2RBivssOplqemxJWKKrKZqVAbPNuA8I0kZW2CQwhmRdAPNMc84
44o234QTjjnmiNsNN65AYow03PSSCzC57BJCFh4Ra++9+HJm7LKVikkQmfxSyulbW3UDrTNOEDFC
C9uE8404zoQRhpHdiGuOPBZ/o2055VyDjTiNzFKnDzeMglK+KKesclqTBkypvwMB7HKZA7c1jy66
9FKrIiusYEMv3/DCSy84RwIJ0OQmnbTD4HjTjTfetMtLLUywAMM5K2et9dY5tTxzmZeO+bWZNbP1
jAowwPBGDCqwPYcbb7gBRx151F23Hn3s4Yf+Hnv04bcsszTixyF9DN7KIZkU4sMS9XLt+OOQG7Tv
2F7CLJDMlHNZ9lrLpBBDDTHA4AbobsghBwwxsP156G7A7cYcb8DxOhxwxKCHH3XoYUcdbtShiSHW
RC788Fx7nTmXlvOD+fEVwtVMCqh/XoMcb8BgeurTvzHH23T7oUkme+yRSSV51FEHH3f4IUsrfRxS
CSq1iEX8/PTfazzzyYqN/5abq+WMCnJAQQhE4AEayIEFFphABjZQBzmAgAITiKDtzCeDCq4gD5yw
hBIskAJDySEFfNBDK05WvxKa8Er3Y17ylne8/qWFGSp4wwqY8AQt2KAOMmDAAy6iiT5gAAP+DGBA
BuxwCUzcQAM/IIIObnAJPbhCCS24QRVWAIM6gKAV9DihFrcIoRQeb4X74x9cXKECN9BgC4gQRiH0
MIMMeKADGMgEIDCwAhQYoAOaCAUrZFEEIpjBC5gIRR+ygII63MAONIhBHWYQB3FwkWv6aMYXmkGU
e1CjUY9kixczB8YwNu8t1IABIGggBmYIIwuAsMEOSuABC/SBFiAAgQwEkIFWhEIUrRjGFvb0CVZs
ggIS2IQdYsEHGPghC39wZCaBoo9lGAEAABjDOgpyj2ZIAZpGWEaDrPEFTLJkkj3RxzO/UJaBfOGc
6EQnJWvSDgCAZZma9GT+/iXPZr0FGzH+AEQNtiAMNRqxAx74AAbycAtE2IAGEgiBLXF5jF1KAhSi
AAUFKCCLSrSiEk2swh+8AU+f6GMD0aTGMkDaKGuAFArUoMYXALCB4FEDAN5UCAC+0JNrAIAaBkkn
S9Vpk3fctKNr2STlOhlGFxJFH/ngRyhVcYcjJIEJhQjFFS4QghFcIA+hUAILNpCACywUFsMQwzBi
0YmIQgAElbBoHjCRCUXEARxA5UkzABA8gdyjrvxYBwA8gMl3fIEyL2UHTGbak5fqJiGEzYlPcRpX
tAh1bETdn1F/og94UEMa5CrHK+YAiDrQQU5wAIUmDmcIG/ABF4WYwQ1mUIVKhCIUr1j+xjOYAQs9
jiICK6jFRTXRCU1UYhb+aKxOVpqQZ8Z0IC89LkISuxPDypSmOWHHT4Vrlsd+LbL4m2xP4HGIVhji
FfMQyDrmEIcYlO4NevAELk6xClSEAhOjSEUpboELVHSCFKSQSC5wgYtRiGIUFFgBLVqhiVuQYhOj
kEWDqGuTZ8D0ID4tJ0ICO1jofkSSX6CG/BKkYZVqkx/czLBdVQqAZVBjJQdhrkDasYwvjOEaBnlH
i8eA149MN0FjmCSKGawT684Mu8zTLk/6AY1D3OEQ35BGNZhxiDzEwA41qEFioMGxcpAjFfJNhShC
gYpb7IIXv8CvKVIxilN4IgV2uMX+KBDcCVnIQhNZ5HFN3gHSZUyTIC+tsUGSW2GBvHQDX3imFMw5
UyNc0whz/QJIKelTaEJTuSq2BjQDDQApbFjShQYAFOQnXcbO1QiKfrCcceJjlwH5ePY8yytgAA1F
AKIVWMiCIuYgB0W84RDlaIUr0uEOe8Bjvfw9hS96cYjBeaO+qBAFKljBiQPMaRSeoEUq+sAHT3jj
MPXIh7azzY99bHjUMV4pS5chPwonhM8vSaw+GMuPudZ1peWc6wZAIk4ANAjdy7WwTy0tEElD16eb
7ndi28nYSodFw+C+SakDFjZ6ejLVQ8kHNK5wBXYI4wazkEYi6LA92MlCGrDQhiv+yjEPcPACFbjA
ci4S0Yc49KERjfiFL1BxClZcIgEtSAUpoA1twBFIGq/oxStg4YxnNMMZCEo4Qix5TX5XQ9QTBoBg
021hgbyjHS9l7Eob1E4Jz/Ww5s73QJYBgMMKZKUrITsmt25jxhphA2ZXuk0eIYhF2H0RjAiYGh5x
90WoYRsGEccg+G73R1CIX4J4BCPsXne0YMMQh/DGF7JgCHaAowaiiyEt3gENRDQjHySxBSpSYQpb
1OoUs+gFJg7Bi1zslxWaWIEdcLFzT2zCFpuYRYD8MQ8/0AEZyVBGMsLgDLkrZAx0tfE6o67cFP+b
GY6GptYBMBCCI7fsfoa68wn+XRA+ExfPD16s1WXA0jHE3fg22cfCu+SImrCwUvFoSz94AYhDAGIO
figEGRDxhjfQYUm+IA2xoAiuQA3g8AqqoGyoMAqH0AmsAAiAcAohswqngAqrcAkRQAcG5gmjgHL8
dQ714A/1gAhnMA3BlwzAg37EMV0bsAHf1n1S12cf1VJiIX788H38AA83hm7OhVgWhnzfNlcgAYQE
IYT80GkEcQ/XMATYp4Lpt35c0n408X6UEn9s4Q+jEAd0MAuz8Ac28ARXsAfb8wZlVQd3YAdocwWH
gAqlUAps6Ae98At7AAi8kAidgAp4uAqWkABz4gm+kAl+wAet0Ap3EAvq0A3+zgAqyqAMwzAL8MAP
LwhulgR+JqFUlRZcC9FNlth87/AMU+dTjLaD04WDNriJ2Xd+BcFcNsVuH2UEAoENN9aKVjddjWJ9
TlgT6scvUjgTVHgmVrgW86AJgBAHvvcGV+ADNfAH/XcIuZAJAAQDKoA2h3AK/LVenJAIutAJtuAH
fuALFYgLsMcAcrCArFADASAACIAABGAD6gAOiGAM39AMw0AMgIAP+TAKWCN3K/UF6/AOn0ZCZCcD
1fAO8FANIAUWL2ViKZVSSdiC1rAOUPBg98BS64B10nd21CcQSJh9U9eDYjcQ19QM72ANEXln/BCS
I3lNd4aEdMaP7bBSenb+izCRi8uyizLRi2byi2pBD52wB3Fwf3NgCIZQCZkQB69gcrRwByEEN8bU
CaMwCrTACZyQCcXGjZ3gC1iJC6uACSvgB/xVjgGQAAmAAQVwA/QAD1eQCNqACbMglP1AD3GwC8bX
TNH3VwWxDtfkaEZwZy8VfdB0lyAFTZWYIIFpBDa1TqR4Y08nJR65fZmolyYJieIGAHs5EDZIDYEJ
AMsnkzKRD1C4JTYZEzhZJjrZE9PwBcVnEP5wBZnQVnagCH7AZblQC6dQD8AQCvXBC9WIC67Hhe4C
CIPDcbOQC73QCZtgnHPgAjYgR6ZQA2LZABggAYjAD+6gCIfQgKogB4j+sA/8MAvlkA/zAILzsGBy
xhrkWRD4cBs7FhO38YKRgYpncQ+3sXTt0HyW+Q6RyJkv0Q+fqSWhCROjCSalyRNMAABbcBDTAAB2
0AlnGANzsAd6cAd50AnmAAw6Vwu7IHqmwF+2IAt+kAm0UAlKGUJ8ADhc6KF04AYuIAduYAutEAIM
EAAeEAWxoBWIAASHkAqXoJ0CQQ7e8AusQCDBAAzk+Q/wsAtZAFf6uaT1Q5PI8p8vEaBfMqA5UQ3L
8AQgkGGRyQ/PUABy4AdxsAd5sAecgAmaYAmbQA6/cEuVIAvyhV+koAlPuWae0JqjIFrjk1aaIAub
QAtRSYyV0Ac3EAP+hkAOrfAK8DAMP9AHdyoDW5AP/bALepBBiEIKoOAN/pAPAvgKQ3cB6MCkoEo8
TnosUOoSUuolVIoT+9EFJyAFZMBuAoEOhbAHHDgKZSUKnzAK8LULshAKpeAGe3AKbdiGT3kKplBz
+LUKWOkL/EVfWclfvlAJlqAHgLAJwRAMvtAHlYA+bHYIrwAM3nCddaAXteMA31UIrZCa/IAISReq
7uo4o6oppdoSp9olqToT/uAKZVAN2IAOrDoG77AOK/EP2HAP/4BRl+AJgaQJl0AKtCAKfLAFh2Bz
OcIKboiHp4BlpCAKrPALq9CGpxCy7BWyy8YKLnetm4AJaLoK0hr+B53glGv2C6DwBpcACjtns5/w
CY1RD/bgDyR0DsDwrkK7NfGaKfPKEvXKJfcKE/9wDCWQABZwAQmgAU7ABCXAA1HwBdngBRYgBv7w
Cu+lCakgCzkgAbSQcq4gDLrgC3lQA/tVCht6CvhFc6vgCXdwC+2Fh6QgshVYc6oAA3BgCaSACnGg
AnFwPpbQB5dQRKaQsK3ACTs3uKcAB39wC/VAQgORD0owdUPbuflStJVytAqRtFuytC7hDkxwAR0w
BF3wBU0QBCPAAasEBtewDEwQBluwBVZwB5XQCbjgDT4QAHqwCptgCM9QC6KACVUABrSACqZQCjp3
rL9ACw4AABD+YAsUKLKksF4hi6upgwmygAuW8BCVEKJ2IAcxgAe8sD25IK2VoAeWgAl7AAd4MB4H
0RieaxPpyRrvwA7nmb++AbqUIroJQbpaYrotoQ5E0AVhkAVWgAiIQAxWwANM0AMm8AVk0AVbwARJ
0AM5oAMscAflwAUp0Ak1dwmv8AscCwqQ24a4oGYh+wt2AAABAACVwAohy70VaIGoEAMoYAm3wASt
oAoqQAN2YAkbQAOYoLLoswe5Iwc1YAlRWQdxcAuYSxDeYAn20A/44A/uYA3OcAzCwAzP8BUKcg9X
3Ll96WiwCsAB3J9oQMAIYcBogMAsMQ1bIAVGIAIfIAJhgAj+0EAMRLADPVDIXMAERLAFSNADOpAD
FxAEPcACeFAJm9AKzQANrwANsDBzbngLtlCNvlALGgAAEUALv2AKvqCsy0qNp2AJK2AJcIAD5PC3
d2AJN8AAMnAKNlsJNDALhVAJn2AJv6AJDQQHleC1YZGe1pANiEADWKADI9ADHiCWHtAAARCWDGAB
CrABUtAMwDIs1rCQ4jzO5EwNMZkWa3yRbgwcAnwmj+B+AWPHB6EP60ANZGAFJwABDdAACEACUIAF
roAIhVzITtAFUgAFPTAChFwCGlBVGoAD0UwETMAFEIwIq8AKBlYKOUwKvWIDKzADlcCsvOl6F22B
ltACNZD+AsNQD6rwBq3pBwwwCazAVppgAzHQBbwACpiwCpqgCXoAB7JAAlOQY1KQBCJgARmwBHbA
CTYAow7QM1hQzVpQBB0wAgsgADQcAmPADmlsJZPpl2Dtl1W3Ful8Y+vcG/ypi/DML/JsEMwABAW9
xx4QAQSAAA4ABFZQBvdcwUHQBWDg11owAiXQA6u0AyIgAiHwwQqdAydQBUmQoyG7CspKgRf9Cytw
AZWAC4FwBlegCGzACPS1Cp+AvnrgDbcZBz2NByMgCavACXigCXggAcgwy5/Q076gBzVQCUZgAQGQ
AR1gAQggA0pgCDRwHGBABFLwCrWQBUcABuBwDtDADMv+MAZVAAHX7ABaYA1dPSVfHdbePdZqUdZt
fNae0c5mIscHQcdtTRDV0ARRgARRYARbRQAEEAAIUAJRUAZgkAQnkARfUAZRkARjQAxdUAIGXtiD
XQI7sOA7kAMNnQMmoAQ30F2koKxAigqXLQufwAZncAFC0AZswAntVQqkAAOWwAoQdQmysAp5UASw
QAo7yglm1A7AoAqjYAmWsAq4nQcg0AEcgAIeQAVlwAxgYAhgcAKGcAzcEg3CsARfIAauwAuv0AWI
4AphEAUj8ABheQFi8L/c7d1gPmmg5JfjnRl+lU6bObTmXSborSzLst5+dgJGYAQmYAQ8UN0AgNUN
EOH+WVAFLAACQBIFEIAAHgAGzCAFJFACJgAEBm7gIgAEPSACIzDpIRACGnABOKABL0ADNhALRMkC
OSALrLDZGvDhgVDhbLhllloKorAHm7AKeGAGsAAKt0BgbnADwAAKnwAKpQAKqWAJecAHJ9AEUFBB
YSAJVGAGhSAJk+AJsSAJ0C4JrrAMx+AFkwAGTVDtkoAMyDAJZqDlOzAN+Rkh3R3mdTnm0VfmmNFo
jgbe77rmYNLm+vPmMmENAWUCJ8ACL2AE8ZHnvX0ERgAFRoAnYFAGWF3DFUAMYLADCj0CJ0DIhN0D
QbDgOrDQH5wDGI8Dma4BC80cQiAEOCAEhBAJHcv+CqiwCSjPCZ/Qk4cgC3fgBbAgCrXADK0gC4cw
Crfgp7kwC5lgC6egBxXQA1NwBCLABJQwCZSQBUj/BNs+CdFQDtIQDdEwDlQQAAQQBF7wAlQABlLg
BTLQAV4gDGTwTlWCYen0TNEHaukETm4h3rshXeeev/D+JfLucPQOE9hQzR+wARDgASaQBFaQBQ1A
wyQgAxF+Ai0I0ArEAQBgAFIQBVpVAiRAAifAA4XMAyVwBCZwtSaw+QoOzQo92At+AiOgAaZ/2C0w
KJqwbGSm06DQkxglB5Mw63hQBV1wnd3VprLQByifCjCdABnQABlABJOADFTQCsUfDYVQBsgwDuf+
MA7Nfw5UoBQsAAZLUAVYAAVQ8N/CsAxkAARlgA3jPiVufxnl3xnsLuZyD8d1HzPxDBPtYM0N8AAH
IAAC4AFLEAVhsAQg0N9GcAIA4eEBggMgoD0zcuTDCSk8lGC5EsWIBxAeSBgxkuRIiR0eTBjpcaRH
jx07SgwBeWLkkCE9cuDIcSGHERuF+PDRowlTnEqY9EyCJUpOlCpyjOKZU2fPJk6aOtlJkACBgy9e
Jh3LYoiMOnW1EGHJ6k3aq12xED0jtkVMGTFbugwTdoyYmClBkoy5x0/vXr59/f4FHFjwYMKCqQFA
jJhaYcZ+71mjRs1aXsLVEivW+zjyZMb31kX+praOcuF3oKm90wf43WUAXxq/hh1b9mzatQf3S4NG
927evXk/mr3N9/Dh8RqvAxCggQDmCx4kwWIlDKIuTU6c6NAAgQACSZqNsTKGmJMHGKBYAQNGCkYT
IkqUIMFjxAgPCXsECXK/B4+RJP0PYWKIHYLY4SUcfsghhyFYmOGGSrLwIpZOUpChFV36AOSSPfIY
pZNRUnmqgagkUESJD8BYwpBnkImGGUSqAIGCKxIJIwssthiGCyaKEEYYLnrssQsuxNgBDC6csMc2
JZds7LDLFuvrC9be6Ys1AP56R0rWvsBntcuo3MvJxKjRZxkrlxntL2yksBIAKawR7J5lNmj+8wsw
9WrTSib35LNPPxvbJzfiBkXDkdm4IZRQ4xp7BgADEDCggQ0gaEAGK7KwIgkTNohAuwaWC2ADI6KY
4ggIDmjAAQg4IGGJKaQAwwoommBhiSROKAGIJIxwz4MekijhBR5EiK8//k7igYck+GOCiR6K8CGH
knpoKQgWgpCiiiWuqKmVQgpR5IornCAQiDCY2OGJJ4iASwt3t9hCRy244EILIpiAV4snhBGjCWGe
0EKYL8j9YgwShPkzYSXFxCxK1tipkjW/rMkTgA0YBgDiMFkbg842N3Dnr3vYrLi1NPdqpmTE4MRT
5SsVhjlmmV8LNFHiDJVNOJuLe80fBhb+CMEBARqgtAEWkiCBgw0aiGDoChxwYLkEoGZuuQMEcCCC
CCrgQIQkohD30iygmFVWJnRVgocSeMAVCP56ECEItfkD4ogjRijhCCZ03GG/HXzoQYsidCCCCyLm
dZYIInzwgQgx2tKirS280KLZHuid993H42222SeYaGtzYYjpggxErChBnZlXJwxjKPnSMrE7W76s
L4pd/pIvjFXeYHZ+3vGYd99jL/kDynBnPXnlY655595wjk1n53lbtDFhALAguwMigCCCAx749FMF
UrXgaQggEGGHBcJvAFUPJmAO1QcegKAiFpRIggUWjoi1iSigSAKwPDDAvvHgAywwAgn+mnA3HVhh
BQnoARDwkwT/JWEk8wrC3ujFBSnoC2BHUgu9IBcveOmoXlyAFxOcoKMtPCFZ8ZKCMIbhhceJAQxh
+EIXMECG5fVwY0/yC/EA4Ds97eUewcPdEHWXxMRIoS8kw50T+fIOD+BuGbQrmQ+1uMXZNG96uoEe
bKT3RTRUrzHVIAEDwAeBDWzgAARoAAEI4ICnWSADlFLVBzzQgAX0cWhXAyRzhtYAAwgAVZ2CgAP0
+IEPkEAJW6hC/lggAh54gAMVkIERjlaCESghBSRoyAmC0AQlSMcKQGDC55iABCUsQQpKmEITyGaF
6lhhCwEE4BS2MAUjsc0JU5iCKpv+IMsm2AUMSVCCFLqAhQBKIQlDIEEAMkAPLirPdUF8WMRqtxcz
bWkd77CGEBEDjyVa6QvWeMc1oJgYlvFjdxtoBjXmZKXX6QV4ADCCNVKjD2ogcQN6iUw3E2ME01TT
oAcFjBe/CJyckXE3ZmxMmTSAKqKBb3wNKJ8FLBACCCAgAgNhH0EC2b72Da1pgRRAAaoWxwZ4gAVW
gCkWvqCeJjgBf0GwghSs8IUpkEsEMpiBpWQggypEIQlO0EIPHLkEmFoBC7aKQhAwcARnnsAIUAAC
Ep6ABC48IQi3RAIFxZoEKET1aybwAENMAAESbOABUVEdQmd2TYflji9F1AsSAVD+T34INDEaA+iZ
+jIGju3FCKxZRmr0og8hGsEvpfELxu7kpcS4Rq6XvaxCpxfG14zxixB9zTEAcICrpUoBUTut+T4l
AZB+ypB/LKkhSRs/0s52tu0jAHMQsIEPbIAER2OCLKOwBFciIgtRkAJJgMACE5jgCyYAwAOiEIUm
JAEJQIiCEnrQ1CF4IApOsMAGpmuEIEDBBEAwAhOsEFwrIAEJ1U2CFU5AtmshUwTteUAFMPAAB9gx
rpiFGV35EoZs3lVi9mSNFPtyWLsG9jKO7Ys+9IrgByt2L/pgsOwEow92lIY119gLO7YEYBJXU7PO
S4MgVLxiFreYxYwYhEN1A9r+xrgDAcox5GkVMJVPnW8BDgDfp9a3nKH98WoIWE5JYztbOLqWtqVt
QGo5MMAK6M8BGVgBCD5QgQd4QL0OAAAKwBCFEpygC8PdAAsAaIQFeMAKXGgACL5gBBNoqj5Q+III
GGIiIyxBk+eFQpp3leYgIAACHQhBCADwhBIHmDV85YcQAYtFxPxwTH+xzGUmjbFq/IWwuRNwORPT
6b6sYxkZbtPrKIsYyzba1cnDjYxlPesy1uYJACAp1ByAgB03oAKJREAFEDACDuQ2ABj4AADkKAAE
NNu1Sbaa1VCFqkEaEo7MbjaSF1A1BCTAAkKgQ30qsAERgE2lUQhDEEI1UxH+HICsuxJAEsAggwlY
IQofyOQD5tuFIyRhCktoKRR4QAAEkncDTZCPmj2QAYZXIAHoePWfQq0XIRLxwO5kDTkfOyVRI8Z3
DtawZLH0aL6sA9UVU/WII77ymOVDULSGufNofEYAxC/K/GWAdoCsHfBVoAJDA8CUaw7bJguyfQi4
9h+bXNqlK4Ci7CMAAjYK0yB8DgMKcHMSMCCdLpQAACf4QhQk8IEw8OAIaV2vvMPA1iWYAATvHq4m
I4C/DGxgCQ+QoBJO4IGHdOABGXiAAMrA8j5NPNIcN/A2Md7gKSIe5B7/i8hFvnEgPl5lrxPxZVpN
eM7vKdYxB32iZt4YHeD+WgAbqECUew21TyEd6dwRJAAS8KkDNLvay3F2ATwlFSQXmdm1J0ACxmdI
JLfeAh7IuQI+gAECHAADUejCF86FhBI0wJa3AkMXMsADKyCiBxyAwhZMwIFdjaoL0LSCCUoAAvyd
AAKBVoC8T7CAJJSBBAvoQAY0MOnO18bwkuYLfcArw1u8xNA4y2uGv6g4fridUcO0D6Owy2AGauiw
VdurvbDAzeu/DZSNfjix0APBGbMNyxiaFhCB4nMABXiACPgxBACAqUiAJiOAAIAjVGm20qotIxsk
7Sg+aBukHnw67TA67igpODqBmko9EwiCLxA/IMCCMPg5c+EBR4EA9TP+Lw+YKSbAQiNwADMDAgh4
Ka9bACNAAYuhvgzoAAaYBA5cGJKrq0vbiwZMDAxkDQhbsAKzvA2wsAicw99hjSHYQ37AMMTzKyVK
vIbhB3hQOTZkxNf4vBCERBGsjX5QtAlgASCwgPCZH9ZjqfHpPQIYrasRJFEcRZsTxSSTLdLSjtoj
MlUEJEIygG1bjubjjiZLAOYggV35FBMwleT4tWfbgOoQAA5ogg/ogC9IggCAgAQCABGgviCbgO2Y
ABLoAAjyh0akDcP7tIFSrEG8OH7Qqyvii0I0RMsDgCiwsJEpLL04OUSwMH3YRsSwQ+LRQOSovESs
Q2zUR8J4xEgMwdH+awzRggAPKAEL4DH+ipok07VMDL7byzYdtLkk2w7ScjZqe7aJ7L0DGDIbNICO
XACU4iMBcBQ+CoAImIAGuD8HyL8SyADmQIEkWAAAyIDlIAESIKQJWBoD2IAQsAAOqLsOqAA7ygAG
ALF9jA3D2x2CaoaTqzRLqyxseAdsEKeM6bjE2IBlkCe9AoB1iEMrucqstJJ2Ih49vLCTex13oCd7
Msq13AtI8EdIFIR+UBJ/sAAAoDL2IZpOiSNs+xRQpDZrczJAsq1AykHSaj6QFMzZKqTaq62OVEWP
7CNCorYJOMkhmAD4oEwnmwAL+LtJeQCc3IAM8IAOmIDOZLjTLE3+AEAYtnwNw7sHJmrKxdJKlZmd
3VEZDVyn2+SLlHmweJonN8yrPNFA1sRGR3jL0GOEPREtEyCBPUKAIWOaN5Kj3BJCWhQp2NvLVpwt
IBzMa7NIImsfVInFBWg2jyykT4FMIROyB1iACfCACvC7CfiAESABvCGB97CkEDhNNAzNDuAAn2S4
RFqAAGgN4mxN4ISdkjm5vrBAl6lN1pjNxPAA39GH3MwTKdjD12SieiJHVjPQtXTL46Q1QeiTEjg9
YPsUVXGt7cg2OUKyN3qy+Pkja1PFqqm22vJBVywy8sy2x+xIQlqfIFVP9pyf8pmyF5CCJ8gIIxgC
HciAjRrNDkD+NCllOA6wo7dKAAZ4gmuQyw8tDAKUsDwJp2/Ui3VItjaRAt40wKp0hwj9gI8TxKm8
jDEIRAasGCGqpzA1Jy81ymIQUVlbhC7dE3VggFgMnwXQmo/SLy7jL+04TOZYTFEsgMV8LRmVrUtl
spKqLduqGhwMz9prtiFbAENlH1UZNw84glIxAiAYAmEhgUTTgZLYAR0YgRBgzwWICgZIUzjlU8Ag
wDitQ64Uor/gJyQiqAKEPKe8kiw5kzotuam0k8BgB6ZsBgG8x8wQp+HsVQ700z/9okVIGDOJTD5i
mk4Bsqzhr4tKTEGqVFLEUU21PbyEMtmixZC8wcScNj7qIyH+/ZQHsKNf44AhcC+tQgL+4AH9DIER
KAlavYCGzYAhOIZ22FaFKY3I4NXB6JJ3wAduYo2NJQzNCI2TCQzPAA3RIA3ICA1nBQyQrQbUmFh9
DFFvtZnkTBh/8DpDor3ZGsU3Msx2PQADuJrcklQYFUVO3VRqO0ydxVftBNpCMqT1+dGfbYD5mR/9
+swOKIEgOIJKCoH8gwD+stIL0IAuWAZ0SJKXJTxU+ye0ZVvC61aZHRRwhZlsqLnxbMVLLQDCvNTX
+khOfa3S0i3XAlpqw0HaqlTtvBrHdK1ts9uONICc87WTrDv4XDhi27UECAAEIMZloJJ9aNvLWgau
jDzE+tz+0n01Y4Bb4pDbmBkBAPgAydRbxizaZWvXnbXUHCQI2Xo2Gi0y2zrP3BWASO3IHmQ2WPxR
oinNr6VaGMxSFhgDdbgHzzVduXKSLxBdveiH3bnY6eXeLSqGl0tdNFjdmDETEzCAoZPRU/y9+FGp
vXXXvx3F7dDU3r3bok3f4F2Ojqy23zuyjizU2nOOCFCpAAiABICAD7CC0+hezLonzfsCaF3gCEYo
1A3fQuGLfRCHDJZePvGHCgAACKgAUpQjwXxRoYU9weQOvUW67SQIkireQZo2w83fv7Rf3dqOwIXU
GSxgZTwBLFgGbLgHlZXgLbLQitkAkR3iJE4eCobbMOr+B26ABEgoBnH4kzIAgCQQgaFTWhNmDpWi
RXatYVEc4fS1X6tJYfhlV+YIAHbNrRlMDAJOjqk9GiNghmrgPyVGKH1QU5Uxgu3F4z9OGCb+U84K
B3GIh0WIB24IBz/5h5hkmkK64W5jV6vJrdq9Xem01FJMY2sTJDiuGAImYAF4SSXoAlcogzGgQHcQ
YkBGqHtYSiMmE1aW5dUR5LfkLH4Ih3iYB0EoZCr2E2eIrg0ARfmNuquJQdqtxWMWpEqu5GUeYWVr
Zul04+RIjgJeANHUAiwQhmkYAzD44WuwhnZY5Vl+tdJohgf+AkQIDXJmZ5nxUzVQgzSQ53mm53q2
53v+ToMYu2V+iAdjiId42AZjmIeEAQNlewAYZY7phObpnMFaTGOGliNPXuhqvgwCtgAk+AIywEp4
cId8QOJ2BumQbud96AcP3IeTNmmSTmmVPumWbumU9sB82OC+EIdiiIdiWOSadQACQIEISNoz5lkj
C1o5ouZqJlACnj1cxdwEiIANAAGG+AJmmAZwGGeRtuqrbmdBZZ2Z9hOKIQDuyduwbt9pRowCdtEG
mAAeaIItKBhnqAZrYAd4eId1wAZsWAd88Id8wOq95uu+Zs1GmWhlkz3nkIA6I6tnwIZrQId2oIfo
9evHhuzINlBsYIZniAxnaIZmqIZyaAd38FjJBu0V0BZt1tTq0Tbt00bt1Fbt1WbtBQ4IACH/C1hN
UCBEYXRhWE1QPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtj
OWQiPz4KPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUg
WE1QIENvcmUgNS42LWMxMzggNzkuMTU5ODI0LCAyMDE2LzA5LzE0LTAxOjA5OjAxICAgICAgICAi
PgogPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1z
eW50YXgtbnMjIj4KICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIi8+CiA8L3JkZjpSREY+
CjwveDp4bXBtZXRhPgo8P3hwYWNrZXQgZW5kPSJyIj8+Af/+/fz7+vn49/b19PPy8fDv7u3s6+rp
6Ofm5eTj4uHg397d3Nva2djX1tXU09LR0M/OzczLysnIx8bFxMPCwcC/vr28u7q5uLe2tbSzsrGw
r66trKuqqainpqWko6KhoJ+enZybmpmYl5aVlJOSkZCPjo2Mi4qJiIeGhYSDgoGAf359fHt6eXh3
dnV0c3JxcG9ubWxramloZ2ZlZGNiYWBfXl1cW1pZWFdWVVRTUlFQT05NTEtKSUhHRkVEQ0JBQD8+
PTw7Ojk4NzY1NDMyMTAvLi0sKyopKCcmJSQjIiEgHx4dHBsaGRgXFhUUExIREA8ODQwLCgkIBwYF
BAMCAQAAOw==
'''
self.root.title("Tugboat 1.7.1")
self.mainframe = ttk.Frame(self.root)
self.mainframe.grid(column=0, row=0, sticky=NSEW)
self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(0, weight=1)
self.root.geometry('+0+0')
#
# position and display logo image
self.logo_label = ttk.Label(self.mainframe)
self.logo_photoimage = PhotoImage(data=self.logo_image)
self.logo_label['image'] = self.logo_photoimage
self.logo_label.grid(column=1, row=0, columnspan=4)
ttk.Separator(self.mainframe, orient=HORIZONTAL).grid(row=30, columnspan=35, sticky=EW)
#
# these are the elements of the Navigation section of the UI
ttk.Label(self.mainframe, text="Discovery Method:").grid(column=1, row=100, sticky=E)
ttk.Button(self.mainframe, text="This Device", style='Highlight.TButton', command=self.query_jamf_me).grid(column=2, row=100, sticky=W)
ttk.Button(self.mainframe, text="Search Jamf", command=self.search_string_jamf).grid(column=2, row=100, sticky=E)
self.search_entry = ttk.Entry(self.mainframe, width=25, textvariable=self.search_string)
self.search_entry.grid(column=3, row=100, columnspan=2, sticky=W)
ttk.Label(self.mainframe, text="Jamf ID: ").grid(column=4, row=100, sticky=E)
self.id_entry = ttk.Entry(self.mainframe, width=6, textvariable=self.id_string)
self.id_entry.grid(column=4, row=100, sticky=E)
if platform.system() == 'Darwin':
ttk.Label(self.mainframe, text="User Selection:").grid(column=1, row=150, sticky=E)
if self.access_level == 'full':
ttk.Button(self.mainframe, text="Top User (Admin Req.)", command=self.usage).grid(column=2, row=150, sticky=W)
else:
ttk.Button(self.mainframe, text="Top User (Admin Req.)", command=self.usage, state='disabled').grid(column=2, row=150, sticky=W)
#
# If you are considering adding UI elements to communicate with user database, place them here
#
ttk.Separator(self.mainframe, orient=HORIZONTAL).grid(row=300, columnspan=5, sticky=EW)
#
# these are the elements of the General section of the UI
ttk.Label(self.mainframe, text="Computer Name:").grid(column=1, row=320, sticky=E)
self.computername_entry = ttk.Entry(self.mainframe, textvariable=self.computer_name_string)
self.computername_entry.grid(column=2, row=320, sticky=EW)
ttk.Label(self.mainframe, text="Asset Tag:").grid(column=1, row=340, sticky=E)
self.assettag_entry = ttk.Entry(self.mainframe, textvariable=self.assettag_string)
self.assettag_entry.grid(column=2, row=340, sticky=EW)
ttk.Label(self.mainframe, text="Bar Code:").grid(column=3, row=340, sticky=E)
self.barcode_entry = ttk.Entry(self.mainframe, textvariable=self.barcode_string)
self.barcode_entry.grid(column=4, row=340, sticky=EW)
ttk.Separator(self.mainframe, orient=HORIZONTAL).grid(row=500, columnspan=5, sticky=EW)
#
# these are the elements of the User and Location section of the UI
ttk.Label(self.mainframe, text="Username:").grid(column=1, row=550, sticky=E)
self.endusername_entry = ttk.Entry(self.mainframe, textvariable=self.username_string)
self.endusername_entry.grid(column=2, row=550, sticky=EW)
ttk.Label(self.mainframe, text="Full Name:").grid(column=3, row=550, sticky=E)
self.fullname_entry = ttk.Entry(self.mainframe, width=31, textvariable=self.fullname_string)
self.fullname_entry.grid(column=4, row=550, sticky=EW)
ttk.Label(self.mainframe, text="Department:").grid(column=1, row=600, sticky=E)
self.division_combobox = ttk.Combobox(self.mainframe, width=31, state="readonly", textvariable=self.department_string)
self.division_combobox['values'] = self.divisions
self.division_combobox.current(0)
self.division_combobox.grid(column=2, row=600, sticky=EW)
ttk.Label(self.mainframe, text="Position:").grid(column=3, row=600, sticky=E)
self.position_entry = ttk.Entry(self.mainframe, width=31, textvariable=self.position_string)
self.position_entry.grid(column=4, row=600, sticky=EW)
ttk.Label(self.mainframe, text="Email:").grid(column=1, row=650, sticky=E)
self.email_entry = ttk.Entry(self.mainframe, width=31, textvariable=self.email_string)
self.email_entry.grid(column=2, row=650, sticky=EW)
ttk.Label(self.mainframe, text="Phone:").grid(column=3, row=650, sticky=E)
self.phone_entry = ttk.Entry(self.mainframe, width=31, textvariable=self.phone_string)
self.phone_entry.grid(column=4, row=650, sticky=EW)
ttk.Label(self.mainframe, text="Building:").grid(column=1, row=700, sticky=E)
self.building_combobox = ttk.Combobox(self.mainframe, width=31, state="readonly", textvariable=self.building_string)
self.building_combobox['values'] = self.buildings
self.building_combobox.current(0)
self.building_combobox.grid(column=2, row=700, sticky=EW)
ttk.Label(self.mainframe, text="Room:").grid(column=3, row=700, sticky=E)
self.room_entry = ttk.Entry(self.mainframe, width=31, textvariable=self.room_string)
self.room_entry.grid(column=4, row=700, sticky=EW)
ttk.Separator(self.mainframe, orient=HORIZONTAL).grid(row=800, columnspan=5, sticky=EW)
#
# these are the elements of the Administration section of the UI
ttk.Label(self.mainframe, text="Open in Jamf:").grid(column=1, row=850, sticky=E)
ttk.Button(self.mainframe, text="Device", command=self.open_id_web).grid(column=2, row=850, sticky=W)
ttk.Button(self.mainframe, text="User", command=self.open_user_web).grid(column=2, row=850)
ttk.Button(self.mainframe, text="Search", command=self.open_search_web).grid(column=2, row=850, sticky=E)
self.jamf_management_label = ttk.Label(self.mainframe, text="Managed by Jamf: ")
self.jamf_management_label.grid(column=4, row=850, sticky=E)
self.jamf_management_btn = ttk.Button(self.mainframe, text="True", width=6, command=lambda: self.jamf_management_btn.config(text="False") if self.jamf_management_btn.config('text')[-1] == 'True' else self.jamf_management_btn.config(text="True"))
self.jamf_management_btn.grid(column=4, row=850, sticky=E)
ttk.Separator(self.mainframe, orient=HORIZONTAL).grid(row=1000, columnspan=5, sticky=EW)
self.status_label = ttk.Label(self.mainframe, textvariable=self.status_string)
self.status_label.grid(column=1, row=1100, sticky=W, columnspan=4)
ttk.Button(self.mainframe, text="Reset", width=6, command=self.reset_data).grid(column=4, row=1100, sticky=W)
ttk.Button(self.mainframe, text="Quit", width=6, command=self.root.destroy).grid(column=4, row=1100)
if access_level == 'full':
self.submit_btn = ttk.Button(self.mainframe, text="Submit", default='active', command=self.submit)
self.submit_btn.grid(column=4, row=1100, sticky=E)
else:
self.submit_btn = ttk.Button(self.mainframe, text="Auditing", state='disabled')
self.submit_btn.grid(column=4, row=1100, sticky=E)
self.computername_entry.configure(state='disabled')
self.assettag_entry.configure(state='disabled')
self.barcode_entry.configure(state='disabled')
self.endusername_entry.configure(state='disabled')
self.fullname_entry.configure(state='disabled')
self.division_combobox.configure(state="disabled")
self.position_entry.configure(state='disabled')
self.email_entry.configure(state='disabled')
self.phone_entry.configure(state='disabled')
self.building_combobox.configure(state="disabled")
self.room_entry.configure(state='disabled')
#
# this loop adds a small amount of space around each UI element, changing the value will significantly change the final size of the window
for child in self.mainframe.winfo_children():
child.grid_configure(padx=3, pady=3)
def open_user_web(self):
"""
Open currently displayed user record in jamf
"""
self.logger.info("%s: activated" % inspect.stack()[0][3])
#
# in order to open the user in a browser you need the user's Jamf ID
# in order to get the ID you need to open the user's record on Jamf
if self.username_string.get():
url = self.jamf_hostname + '/JSSResource/users/name/' + self.username_string.get()
url = urllib.quote(url, ':/()')
request = urllib2.Request(url)
request.add_header('Accept', 'application/json')
request.add_header('Authorization', 'Basic ' + base64.b64encode(self.jamf_username + ':' + self.jamf_password))
response = urllib2.urlopen(request)
response_json = json.loads(response.read())
if response.code != 200:
self.logger.error("%s: Invalid response code." % inspect.stack()[0][3])
self.status_label.configure(style='Warning.TLabel')
self.status_string.set("%i returned." % response.code)
return
jss_user_id = response_json['user']['id']
else:
self.logger.error("%s: No user set." % inspect.stack()[0][3])
self.status_label.configure(style='Warning.TLabel')
self.status_string.set("No user set.")
return
if jss_user_id:
url_formatted = self.jamf_hostname + "/users.html?id=" + str(jss_user_id) + "&o=r"
webbrowser.open_new_tab(url_formatted)
self.logger.info("%s: Opened user web. (%s)" % (inspect.stack()[0][3], self.username_string.get()))
self.status_label.configure(style='Normal.TLabel')
self.status_string.set("Opened URL for User.")
else:
self.logger.error("%s: No | |
""" multivariative simplicial weighted interpolation and extrapolation.
This is an implementation of four different interpolation
and extrapolation technics.
F_w - is for average weighted interpolation, also called baricentric.
It is a global scheme.
F_b - is a baricentric weighted simplicial interpolation. It is local and does
not provide derivative continuousness. Neigher it has a extrapolation method
implemented.
F_l - is for linear simplicial weighted interpolation. It is a local scheme, so it
needs simplicial complex, given as a list of simplexes, to operate. Note that
while interpolation function itself is continuous, its derivatives may not be
that way.
F_s - simplicial weighted interpolation. It is local, and being used with
appropriate weighting function and basis functions, it can provide derivative
continuousness up to any choosen level.
"""
__authors__ = [
'"<NAME>" <<EMAIL>>'
]
import operator
import math
###################### data functions section
def make_vector(n):
''' Makes n-dimmentional vector '''
return [0.0]*n
def copy_vector(V):
''' Copies input vector '''
return [Vi for Vi in V]
def make_matrix(n):
''' Makes square n-dimensional matrix '''
return [[0.0 for j in xrange(0,n)] for i in xrange(0,n)]
def copy_matrix(M):
''' Copies rectangular matrix '''
return [[Mij for Mij in Mi] for Mi in M]
def make_n_vectors(k,n):
''' Makes k-sized list of n-dimmentional vectors '''
return [[0.0 for j in xrange(0,n)] for i in xrange(0,k)]
###################### algebra section
def v_add(a,b):
''' Vector sum'''
return map(lambda s,d: s+d, a,b)
def v_sub(a,b):
''' Vector sub '''
return map(lambda s,d: s-d, a,b)
def v_dot(a,b):
''' Vector dot product '''
return sum(map(lambda s,d: s*d, a,b))
def v_len(a):
''' Vector length'''
return math.sqrt(sum(map(lambda s: s**2, a)))
def v_smul(s,a):
''' Multiplication by a scalar '''
return [s*ai for ai in a]
def v__E(a):
''' Levi-Civita symbol '''
n=0
t=[]
for ti in a:
t.append(ti)
for i in xrange(0,len(a)):
for j in xrange(0,len(a)-i-1):
if t[j]==t[j+1]:
return 0
elif t[j]>t[j+1]:
n+=1
t.insert(j+1,t.pop(j))
if n%2==0:return 1
return -1
def v_cross(A):
''' n-dimensional cross product on (n-1) vectors in list A '''
for a in A:
if len(a)!=len(A[0]):
print "Vector size mismatch in 'v_cross'"
DIMM=len(A[0])
N=len(A)
if N!=DIMM-1:
print "Vector number mismatch in 'v_cross'"
A[100][100]=0.0
v_res=[]
for i in xrange(0,DIMM):
v_res.append(0.0)
for jk in xrange(0,DIMM**N):
v_ijk=[i]
for j in xrange(0,N):
v_ijk.append((jk/(DIMM**(N-j-1)))%DIMM)
t_res=v__E(v_ijk)
if t_res!=0:
for k in xrange(0,N):
t_res*=A[k][v_ijk[k+1]]
v_res[i]+=t_res
return v_res
def v_mixed(A):
''' n-dimensional triple product of n vectors in list A '''
for i in xrange(1,len(A)):
if len(A[i])!=len(A[i-1]):
print "Vector size mismatch in 'v_cross'"
DIMM=len(A[0])
N=len(A)
if N!=DIMM:print "Vector number mismatch in 'v_cross'"
v_res=0.0
for jk in xrange(0,DIMM**N):
v_ijk=[]
for j in xrange(0,N):
v_ijk.append((jk/(DIMM**(N-j-1)))%DIMM)
t_res=v__E(v_ijk)
if t_res!=0:
for k in xrange(0,N):
t_res*=A[k][v_ijk[k]]
v_res+=t_res
return v_res
###################### linear equations section
def Solve(A, B):
''' Linear system solving for AX=B. Returns vector X '''
N=len(B)
X=[0]*N
def a(i, j, n):
if n==N: return A[i][j]
return a(i,j,n+1)*a(n,n,n+1)-a(i,n,n+1)*a(n,j,n+1)
def b(i, n):
if n==N: return B[i]
return a(n,n,n+1)*b(i,n+1)-a(i,n,n+1)*b(n,n+1)
def x(i):
d=b(i,i+1)
for j in xrange(i): d-=a(i,j,i+1)*X[j]
return d/a(i,i,i+1)
for k in xrange(N):
X[k]=x(k)
return X
def Gauss(A, B):
''' Gauss method implementation for linear system solving.
Args:
A: matrix,
B: vector
for equation AX=B
Returns:
X for AX=B '''
N=len(B)
X=[0]*N
gA=[]
for i in xrange(0,len(A)):
ga=[]
for j in xrange(0,len(A[i])):
ga.append(float(A[i][j]))
gA.append(ga)
gB=[]
for i in xrange(0,len(B)):
gB.append(float(B[i]))
r=0.0
for k in xrange(0, N): X[k]=0.0
for k in xrange(0, N-1):
for j in xrange(0, k+1):
if gA[j][j]==0.0: gA[j][j]=0.000000001
r=gA[k+1][j]/gA[j][j]
gA[k+1][j]=0.0
for bJ in xrange(j+1,N):
gA[k+1][bJ]=gA[k+1][bJ]-gA[j][bJ]*r
gB[k+1]=gB[k+1]-gB[j]*r
if gA[N-1][N-1]==0: gA[N-1][N-1]=0.00000001
X[N-1]=gB[N-1]/gA[N-1][N-1]
for i in xrange(N-2,-1,-1):
s=0.0
for j in xrange(i,N):
s=s+gA[i][j]*X[j]
if gA[i][j]==0: gA[i][j]=0.0000001
X[i]=(gB[i]-s)/gA[i][i]
return X
###################### projection
def v_proj(a,S):
''' Projection on a simplex plane.
Args:
a: Projecting point.
S: Simplex given by a list of points.
Returns:
Point, which is an 'a' projection on 'S' simplex plane.
'''
for b in S:
if len(b)!=len(a): print "Vector sizes mismatch in v_proj"
if len(S)==1:
return S[0]
elif len(S)==2:
a0=v_sub(a,S[0])
v01=v_sub(S[1],S[0])
Ei=0.0
E=0.0
for i in xrange(0,len(a)):
Ei+=v01[i]*v01[i]
E+=a0[i]*v01[i]
return v_add(S[0],v_smul(float(E)/Ei,v01))
elif len(S)>2:
N=len(S)-1
a0=v_sub(a,S[0])
v0i=[]
for i in xrange(0,N):
v0i.append(v_sub(S[i+1],S[0]))
A=make_matrix(N)
B=make_vector(N)
for k in xrange(0,len(a)):
for i in xrange(0,N):
for j in xrange(0,N):
A[i][j]+=v0i[j][k]*v0i[i][k]
B[i]+=a0[k]*v0i[i][k]
I=Gauss(A,B)
to_ret=copy_vector(S[0])
for i in xrange(0,N):
to_ret=v_add(to_ret,v_smul(I[i],v0i[i]))
return to_ret
def v_proj_or(a,S, p0):
''' Projection on a simplex inner space. If a projection on a simplex plane
does not lie in its inner space, function returns p0
Args:
a: Point to project.
S: Simplex given by a list of its points.
Returns:
Point of projection or p0 if projection does not lie in a
simplex inner space
'''
ret=make_vector(len(a))
ret[0]=p0
for b in S:
if len(b)!=len(a): print "Vector sizes mismatch in v_proj"
if len(S)==1:
return S[0]
elif len(S)==2:
a0=v_sub(a,S[0])
v01=v_sub(S[1],S[0])
Ei=0.0
E=0.0
for i in xrange(0,len(a)):
Ei+=v01[i]*v01[i]
E+=a0[i]*v01[i]
k=float(E)/Ei
if k<0 or k>1.0:
return ret
return v_add(S[0],v_smul(k,v01))
elif len(S)>2:
N=len(S)-1
a0=v_sub(a,S[0])
v0i=[]
for i in xrange(0,N):
v0i.append(v_sub(S[i+1],S[0]))
A=make_matrix(N)
B=make_vector(N)
for k in xrange(0,len(a)):
for i in xrange(0,N):
for j in xrange(0,N):
A[i][j]+=v0i[j][k]*v0i[i][k]
B[i]+=a0[k]*v0i[i][k]
I=Gauss(A,B)
sum_I=0
for i in I:
if i<0 or i>1: return ret
sum_I+=i
if sum_I<0 or sum_I>1: return ret
to_ret=copy_vector(S[0])
for i in xrange(0,N):
to_ret=v_add(to_ret,v_smul(I[i],v0i[i]))
return to_ret
###################### common functions section
def v_k(vx, s_k):
''' Weight function for 'vx' vector
Args:
vx: vector to weight.
s_k: scalar weight function.
Returns:
Weight of the vector
'''
return reduce(operator.mul, [s_k(x) for x in vx])
def coords_in_simplex(sx,dot,pnt, xyz,Sx, crd=[]):
''' Determines if a point is in simplex
Args:
dot: point coordinates in an a basis set by simplex 1-edges.
sx: index for basic simplex in an array of simplexes 'Sx'.
pnt: index of an origin point in simplex.
xyz: list of points,
Sx: list of point indexes, representing simplicial complex.
crd: the return value for calculated coordinates.
Returns:
'True' if point is in a simplex, 'False' otherwise.
'''
DIMM=len(dot)
A=make_matrix(DIMM)
B=make_vector(DIMM)
crd=make_vector(DIMM)
cnt=0
p_pnt=Sx[sx][pnt]-1
for i in xrange(0,DIMM+1):
p_i=Sx[sx][i]-1
if p_i!=p_pnt:
for j in xrange(0,DIMM):
A[j][cnt]=xyz[p_i][j]-xyz[p_pnt][j]
cnt+=1
if cnt!=DIMM: print "WTF error: not enough points in simplex"
for j in xrange(0,DIMM):
B[j]=dot[j]-xyz[p_pnt][j]
crd=Gauss(A,B)
res=True
summ=0.0
for j in xrange(0,DIMM):
if not 1>=crd[j]>=0: res=False
summ+=crd[j]
if 1>=summ>=0 and res:
return True
else:
return False
def get_nearest_simplex(dot,xyz,Sx,sx, best_pack):
''' Finds a simplex which is the nearest to a 'dot' point.
Args:
sx: A candiate simplex.
xyz: List of all points forming simplicial complex.
Sx: List of point indexes, representing simplicial complex.
best_pack: Structure for passing found data recoursively.
Returns:
List, first element of which represents nearest simplex index.
'''
new_pack=[best_pack[0],copy_vector(best_pack[1]),copy_vector(best_pack[2])]
new_S=[]
for i in sx:
new_S.append(xyz[i-1])
new_prj=v_proj_or(dot,new_S, 1.e10)
new_l=v_len(v_sub(new_prj,dot))
if new_l<best_pack[0]:
best_pack[0]=new_l
best_pack[1]=copy_vector(new_prj)
best_pack[2]=copy_vector(sx)
if len(sx)>1:
for i in xrange(0,len(sx)):
c_sx=copy_vector(sx)
c_sx[i:i+1]=[]
best_pack=get_nearest_simplex(dot,xyz,Sx,c_sx, best_pack)
return best_pack
def get_constant_functions(xyz,f,Sx):
''' Determines a list of constant basis functions
Args:
xyz: Point set.
f: Corresponding array of function values.
Sx: List of simplexes
Returns:
List of basis functions
'''
def fi(i):
return lambda dot: f[i]
return [fi(i) for i in xrange(len(xyz))]
def get_linear_functions(xyz,f,Sx):
''' Determines a list of linear basis functions
Args:
xyz: Point set.
f: Corresponding array of function values.
Sx: List of simplexes
Returns:
List of basis functions
'''
if len(xyz)==0:
return []
dimm=len(xyz[0])
simplex_linears=make_n_vectors(len(Sx),dimm+1)
point_linears=make_n_vectors(len(xyz),dimm+1)
for i in xrange(0,len(Sx)):
A=make_matrix(dimm+1)
B=make_vector(dimm+1)
for j in xrange(dimm+1):
pnt=Sx[i][j]-1
for k in xrange(dimm):
A[j][k]=xyz[pnt][k]
A[j][dimm]=1.0
B[j]=f[pnt]
simplex_linears[i]=Gauss(A,B)
for i in xrange(len(xyz)):
sx_N=0
for j in xrange(0,len(Sx)):
for k in xrange(0,dimm+1):
if Sx[j][k]==i+1:
sx_N+=1
for l in xrange(0,dimm+1):
point_linears[i][l]+=simplex_linears[j][l]
break
if sx_N==0: print "error: point is not in simplex"
point_linears[i]=map(lambda a:a/sx_N, point_linears[i])
def fi(i):
return lambda dot: sum([point_linears[i][j]*dot[j] for j in xrange(dimm)])+point_linears[i][dimm]
return [fi(i) for i in xrange(len(xyz))]
###################### interpolation and extrapolation section
def F_w(dot, xyz,Sx,base_f,s_k):
''' Average weighted interpolation
Args:
dot: Argument for interpolation function
given by a list of variables
xyz: Data points.
Sx: List of simplexes, which is excessive
for this particular algorithm yet let for consistancy.
base_f: Corresponding to 'xyz' list of basic functions.
s_k: Scalar weight function.
Returns:
Value of interpolation function.
'''
Up=0.0
Dn=0.0
for i in xrange(0,len(xyz)):
k=s_k(v_len(v_sub(xyz[i],dot)))
Up=Up+base_f[i](dot)*k
Dn=Dn+k
return Up/Dn
def F_l(dot, xyz,Sx,base_f,s_k):
''' Simplicial linear interpolation.
Args:
dot: Argument for interpolation function
| |
def summary_mask(anat_data, mask_data):
"""Will calculate the three values (mean, stdev, and size) and return them
as a tuple.
:type anat_data: NumPy array
:param anat_data: The anatomical scan data.
:type mask_data: NumPy array
:param mask_data: The binary mask to mask the anatomical data with.
:rtype: tuple
:return: The summary values (mean, standard deviation, size) of the scan.
"""
import numpy as np
anat_masked = anat_data[mask_data == 1].astype(np.float)
mean = anat_masked.mean()
std = anat_masked.std(ddof=1)
size = len(anat_masked)
return (mean, std, size)
def check_datatype(background):
"""Process the image data to only include non-negative integer values.
:type background: NumPy array
:param background: The voxel values of teh background (outside of the head
) of the anatomical image.
:rtype: NumPy array
:return: The input array with floats converted to integers and
negative values set to zero.
"""
import numpy as np
# If this is float then downgrade the data to an integer with some checks
if np.issubdtype(background.dtype, float):
background2 = background.astype('int32')
# Ensure downgrading datatype didn't really change the values
if np.abs(background2.astype('float32') - background).mean() > 0.05:
print "WARNING: Downgraded float to an int but values are " \
"different by more than 0.05"
background = background2
del background2
# We only allow integer values for now
if not np.issubdtype(background.dtype, int):
print "QI1 can not be calculated for data that is not integer or " \
"floating point: %s" % background.dtype
raise TypeError
# convert any negative voxel values to zero, provide warning
for vox in background.flatten():
if vox < 0:
print "\nWARNING: Negative voxel values in anatomical scan " \
"converted to zero.\n"
background = background.clip(0)
break
return background
def convert_negatives(img_data):
"""Convert any negative voxel values to zero and provide a warning.
:type img_data: NumPy array
:param img_data: The anatomical image's voxel values.
:rtype: NumPy array
:return: The input array with negative values set to zero.
"""
for vox in img_data.flatten():
if vox < 0:
print "\nWARNING: Negative voxel values in anatomical scan " \
"converted to zero.\n"
img_data = img_data.clip(0)
break
return img_data
def snr(mean_fg, std_bg):
"""Calculate the Signal-to-Noise Ratio (SNR) of an image.
- For anatomical images:
SNR = (mean GM intensity) / (std of background intensities)
- For functional images:
SNR = (mean brain intensity) / (std of background intensities)
:type mean_fg: float
:param mean_fg: The mean value of voxel intensities in the foreground
(either within the head or a particular tissue) of the
image.
:type std_bg: float
:param std_bg: The standard deviation of the voxel intensities of the
background (outside of the head) voxels.
:rtype: float
:return: The signal-to-noise ratio (SNR).
"""
snr_val = mean_fg / std_bg
return snr_val
def cnr(mean_gm, mean_wm, std_bg):
"""Calculate Contrast-to-Noise Ratio (CNR) of an image.
- CNR = |(mean GM intensity) - (mean WM intensity)| /
(std of background intensities)
:type mean_gm: float
:param mean_gm: The mean value of the gray matter voxels.
:type mean_wm: float
:param mean_wm: The mean value of the whiet matter voxels.
:type std_bg: float
:param std_bg: The standard deviation of the voxel intensities of the
background (outside the head) voxels.
:rtype: float
:return: The contrast-to-noise (CNR) ratio.
"""
import numpy as np
cnr_val = np.abs(mean_gm - mean_wm)/std_bg
return cnr_val
def cortical_contrast(mean_gm, mean_wm):
"""Calculate the vertex-wise cortical contrast.
- cortical contrast = (mean WM intensity) - (mean GM intensity) /
( (mean WM intensity + mean GM intensity) / 2 )
:type mean_gm: float
:param mean_gm: The mean value of the gray matter voxels.
:type mean_wm: float
:param mean_wm: The mean value of the white matter voxels.
:rtype: float
:return: The cortical contrast value.
"""
cort_con = (mean_wm - mean_gm) / ((mean_wm + mean_gm) / 2)
return cort_con
def fber(anat_data, skull_mask_data, bg_mask_data):
"""Calculate the Foreground-to-Background Energy Ratio (FBER) of an image.
- FBER = (mean foreground energy) / (mean background energy)
:type anat_data: NumPy array
:param anat_data: The anatomical/spatial data of the image.
:type skull_mask_data: NumPy array
:param skull_mask_data: The binary mask defining the head.
:type bg_mask_data: NumPy array
:param bg_mask_data: The binary mask defining the background (outside of
the head).
:rtype: float
:return: The foreground-to-background energy ratio (FBER).
"""
import numpy as np
mean_fg = (np.abs(anat_data[skull_mask_data == 1]) ** 2).sum() / (skull_mask_data.sum())
mean_bg = (np.abs(anat_data[bg_mask_data == 1]) ** 2).sum() / (bg_mask_data.size - bg_mask_data.sum())
fber = mean_fg / mean_bg
return fber
def efc(anat_data):
"""Calculate the Entropy Focus Criterion of the image.
- EFC based on Atkinson 1997, IEEE TMI
- We normalize the original equation by the maximum entropy so our EFC
can be easily compared across images with different dimensions.
:type anat_data: Nibabel data
:param anat_data: The anatomical image data.
:rtype: float
:return: The entropy focus criterion (EFC) value.
"""
import numpy as np
# let's get rid of those negative values
anat_data = convert_negatives(anat_data)
# Calculate the maximum value of the EFC (which occurs any time all
# voxels have the same value)
efc_max = 1.0 * np.prod(anat_data.shape) * (1.0 / np.sqrt(np.prod(anat_data.shape))) * \
np.log(1.0 / np.sqrt(np.prod(anat_data.shape)))
# Calculate the total image energy
b_max = np.sqrt((anat_data**2).sum())
# Calculate EFC (add 1e-16 to the image data to keep log happy)
efc = (1.0 / efc_max) * np.sum((anat_data / b_max) * np.log((anat_data + 1e-16) / b_max))
if np.isnan(efc):
print "NaN found for efc (%3.2f,%3.2f)" % (efc_max,b_max)
return efc
def artifacts(anat_data, fg_mask_data, bg_mask_data, calculate_qi2=False):
"""Calculates QI1, the fraction of total voxels that contain artifacts.
- Detect artifacts in the anatomical image using the method described in
Mortamet et al. 2009 (MRM).
- Optionally, also calculates QI2, the distance between the distribution
of noise voxel (non-artifact background voxels) intensities, and a
Ricean distribution.
:type anat_data: Nibabel data
:param anat_data: The anatomical image data.
:type fg_mask_data: Nibabel data
:param fg_mask_data: The binary mask of the head.
:type bg_mask_data: Nibabel data
:param bg_mask_data: The binary mask of the background.
:type calculate_qi2: bool
:param calculate_qi2: (default: False) Whether to calculate Qi2.
:rtype: tuple
:return: The Qi1 and Qi2 values (Qi2 = None if not calculated).
"""
import numpy as np
import scipy.ndimage as nd
# Create an image containing only background voxels (everything
# outside bg_mask set to 0)
background = anat_data.copy()
background[bg_mask_data != 1] = 0
# make sure the datatype is an int
background = check_datatype(background)
# Find the background threshold (the most frequently occurring value
# excluding 0)
bg_counts = np.bincount(background.flatten())
bg_threshold = np.argmax(bg_counts[1:]) + 1
# Apply this threshold to the background voxels to identify voxels
# contributing artifacts.
background[background <= bg_threshold] = 0
background[background != 0] = 1
# Create a structural element to be used in an opening operation.
struct_elmnt = np.zeros((3,3,3))
struct_elmnt[0,1,1] = 1
struct_elmnt[1,1,:] = 1
struct_elmnt[1,:,1] = 1
struct_elmnt[2,1,1] = 1
# Perform an opening operation on the background data.
background = nd.binary_opening(background, structure=struct_elmnt)
# Count the number of voxels that remain after the opening operation.
# These are artifacts.
QI1 = background.sum() / float(bg_mask_data.sum())
''' "bg" in code below not defined- need to ascertain what that should '''
''' be, and correct it- unit test for this part disabled for now '''
if calculate_qi2:
# Now lets focus on the noise, which is everything in the background
# that was not identified as artifact
bgNoise = anat_data[(fg_mask_data-bg)==1]
# calculate the histogram of the noise and its derivative
H = np.bincount(bgNoise)
H = 1.0*H/H.sum()
dH = H[1:]-H[:-1]
# find the first value on the right tail, i.e. tail with negative
# slope, i.e. dH < 0 that is less than or equal to half of the
# histograms max
firstNegSlope = np.nonzero(dH<0)[0][0]
halfMaxRightTail = np.nonzero(H[firstNegSlope:]<(H.max()/2))[0][0]
# divide by the standard deviation
bgNoiseZ = bgNoise / bgNoise.std()
bgChiParams = ss.chi.fit(bgNoiseZ)
#print bgChiParams
# now generate values that are consistent with the histogram
yx = range(0,H.size)/bgNoise.std()
rvs = ss.chi.pdf(yx,bgChiParams[0],loc=bgChiParams[1],scale=bgChiParams[2])
# now we can calculate the | |
import os,sys,subprocess,shutil,time,traceback,fileinput
import yaml
g_dbg = '-dbg' in sys.argv or False
g_dbgexec = g_dbg or ('-dbgexec' in sys.argv or False)
try:
import mako.template as mako_temp
except ImportError:
mako_temp = None
pass
k_vt_col_map = { '':'\x1b[0m', 'default':'\x1b[0m', 'black':'\x1b[30m', 'red':'\x1b[31m', 'green':'\x1b[32m', 'yellow':'\x1b[33m',
'blue':'\x1b[34m', 'magenta':'\x1b[35m', 'cyan':'\x1b[36m', 'white':'\x1b[37m',
'bdefault':'\x1b[49m', 'bblack':'\x1b[40m', 'bred':'\x1b[41m', 'bgreen':'\x1b[42m', 'byellow':'\x1b[43m',
'bblue':'\x1b[44m', 'bmagenta':'\x1b[45m', 'bcyan':'\x1b[46m', 'bwhite':'\x1b[47m' }
vt_cm = k_vt_col_map
def set_vt_col(col):
sys.stdout.write(k_vt_col_map[col])
def fpjoin(aa):
ret = os.path.join(aa[0], aa[1])
for a in aa[2:]:
ret = os.path.join(ret,a)
return ret
def to_afp(base, fp):
return fp if os.path.isabs(fp) else fpjoin([base, fp])
def fphere():
return os.path.dirname(os.path.realpath(__file__))
def fpjoinhere(aa):
return fpjoin([fphere()]+aa)
def fptemp():
return fpjoin([fphere(), 'temp'])
def cwdtemp():
os.chdir(fptemp())
def mktemp():
if os.path.isdir(fptemp()) == False:
os.mkdir(fptemp())
def repext(fp, ext):
return os.path.splitext(fp)[0]+ext
def break_txt(line, n = 48, endl = '\n'):
delims = [' ', ',', ';', '.']
lines = []
while len(line):
for i in range(len(line)):
if line[i] in delims or i+1 == len(line):
word = line[:i+1]; line = line[i+1:]
if len(lines) > 0 and (len(lines[-1])+len(word) <= n or (word[-1] != ' ' and lines[-1][-1] == ' ') ):
lines[-1] = lines[-1] + word
else:
lines.append(word.lstrip())
break
return endl.join(lines)
def break_lines(lines, n = 48, endl = '\n'):
lines = lines.split("\n")
return endl.join([break_txt(x, n, endl) for x in lines])
def handle_generic(fp,fn,fe):
print 'Unknown extension for [{}]'.format(fp)
def extract_command_multiline(fp,comt):
started = False; exec_cmd = [];
with open(fp, "r") as ifile:
for line in ifile.readlines():
if started or line.startswith(comt[0]):
if (line.strip() != comt[0]):
exec_cmd.append(line if started else line[len(comt[0]):])
started = True
if len(exec_cmd) and (comt[1] in exec_cmd[-1]):
if (exec_cmd[-1].strip() != comt[1]):
exec_cmd[-1] = exec_cmd[-1].split(comt[1])[0]
else:
exec_cmd.pop()
break
return exec_cmd
def extract_command_singleline(fp,comt):
exec_cmd = [];
with open(fp, "r") as ifile:
for line in ifile.readlines():
if line.startswith(comt) and (line.strip() != comt or len(exec_cmd)):
exec_cmd.append(line[len(comt):])
else:
if len(exec_cmd) > 0:
break
return exec_cmd
def extract_command(fp,comt):
return extract_command_singleline(fp, comt[0]) if len(comt) == 1 else extract_command_multiline(fp, comt)
def exe_command(fp, exec_cmds, is_shell = False, capture = False, print_run = False):
outs = []
if g_dbgexec and len(exec_cmds) > 1:
print '{} cmds'.format(len(exec_cmds))
for exec_cmd in exec_cmds:
if len(exec_cmd):
if g_dbgexec:
print 'exec_cmd = [{}]'.format(exec_cmd); sys.stdout.flush();
if print_run:
print 'running ...'; sys.stdout.flush()
os.chdir(os.path.dirname(fp))
pop_in = exec_cmd.split() if not is_shell else exec_cmd
if capture or '-silence' in exec_cmd:
pop = subprocess.Popen(pop_in, shell = is_shell, stdout=subprocess.PIPE)
else:
pop = subprocess.Popen(pop_in, shell = is_shell)
if capture:
out, err = pop.communicate()
outs.append([out, err])
else:
pop.wait()
if print_run:
print 'done'
else:
if capture:
outs.append(['',''])
print 'No command found for [{}]'.format(fp)
return outs
def do_jbu_hook(fp,fn,fe,exec_cmd):
for line in exec_cmd:
if len(line) and ('-[' in line or ']->' in line):
handle_jbu_lines(fp,fn,fe, exec_cmd)
return True
return False
def handle_embed_command(fp,fn,fe,comt,dflt_cmd=[]):
exec_cmd = extract_command(fp, comt)
exec_cmd = exec_cmd if len(exec_cmd) else dflt_cmd
if do_jbu_hook(fp,fn,fe,exec_cmd) == False:
exe_command(fp, exec_cmd, False, False, True)
def handle_md(fp,fn,fe):
handle_embed_command(fp,fn,fe,['<!---','-->'])
def handle_tex(fp,fn,fe):
handle_embed_command(fp,fn,fe,['%'])
def handle_graphviz(fp,fn,fe):
handle_embed_command(fp,fn,fe,['//'])
def handle_multi(fp,fn,fe):
handle_embed_command(fp,fn,fe,['--#'])
def handle_python(fp,fn,fe):
handle_embed_command(fp,fn,fe,['#'])
def handle_lzt(fp,fn,fe):
handle_embed_command(fp,fn,fe,['#'])
def handle_frt(fp,fn,fe):
dflt_cmd = ['./frt_template.tex -[with {self}]-[inject]-[pdf]-[png]-> {self.}.png']
handle_embed_command(fp,fn,fe,['%'], dflt_cmd)
def handle_mako(fp,fn,fe):
if mako_temp:
os.chdir(os.path.dirname(fp))
with open(fn, 'w+') as of:
if g_dbg:
print 'rendering mako to ', fn, '...'; sys.stdout.flush()
print >> of, mako_temp.Template(filename=fp).render()
handle_embed_command(fp,fn,fe,['##'])
else:
print 'Mako is not installed'
def handle_jgr(fp,fn,fe):
return handle_embed_command(fp,fn,fe,[';'])
def handle_shell(fp,fn,fe):
exe_command(fp, ['./'+os.path.basename(fp)], True)
def jbu_expect(fp):
if os.path.isfile(fp):
mt = os.path.getmtime(fp)
os.utime(fp, (os.path.getatime(fp), mt-10))
mt = os.path.getmtime(fp)
return {'fp':fp, 'mt':mt }
else:
return {'fp':fp, 'mt':0}
def jbu_expect_check(exp):
return jbu_expect(exp['fp'])['mt'] > exp['mt']
def jbu_trace_files(files, lvl=1):
print ''.join([' ']*lvl*2),
for (fp, fpok) in files:
set_vt_col('green' if fpok else 'red'); print(os.path.basename(fp)), ; set_vt_col('default');
print ''
def jbu_check_files(files):
return all(fpok for (fp, fpok) in files)
def jbu_check_fgroups(fgroups):
return all(jbu_check_files(fg) for fg in fgroups)
def jbu_flatten_fgroups(fgroups):
ifiles = []
for fg in fgroups:
for ifile in fg:
ifiles.append(ifile)
return ifiles
def jbu_gen_tmpfile(tmp_files, ext):
return fpjoinhere(['temp', '_tmp_{}{}'.format(len(tmp_files)+1, ext)])
def do_handle(fp):
k_ext_handlers = {'.md': handle_md, '.tex': handle_tex
, '.gv': handle_graphviz, '.dot': handle_graphviz
, '.py': handle_python, '.sh': handle_shell, '.mako': handle_mako
, '.jgr': handle_jgr, '.jbu': handle_jbu, '.lzt': handle_lzt
, '.frt': handle_frt, '.multi': handle_multi}
(fn,fe) = os.path.splitext(sys.argv[1])
if g_dbg:
print 'fp,(fn,fe) = ', fp,(fn,fe)
if '-jbu_direct' in sys.argv:
handle_jbu_direct(fp,fn,fe)
return
k_ext_handlers.get(fe, handle_generic)(fp,fn,fe)
def jbu_jgr_to_tex(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, '.tex'); efpo = jbu_expect(fpo);
tmako = fpo + '.mako'
shutil.copy(fpjoinhere(['handle_file_jgr.tex.mako']), tmako)
try:
with open(fpo, 'w+') as of:
print >> of, mako_temp.Template(filename=tmako).render(my_argv=args+['-in', fp])
except:
print(traceback.format_exc())
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2); return fpo2;
def yaml_cmds(_cmds):
try:
cmds = _cmds; cmds[0] = cmds[0].lstrip(); cmds[-1] = cmds[-1].lstrip();
cmds = yaml.load(''.join(cmds))
return cmds if cmds else {}
except:
return {}
def jbu_extract_tools(fp, cmt):
cmds = extract_command(fp,['%'])
ycmds = yaml_cmds(cmds)
if 'tools' in ycmds:
return ycmds['tools']
return []
def jbu_extract_tex_tool(fp, cmt, def_tool='pdflatex'):
tools = jbu_extract_tools(fp, cmt)
for tool in tools:
if 'tex' in tool:
return tool
return def_tool
def jbu_tex_to_pdf(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, '.pdf');
textool = jbu_extract_tex_tool(fp, ['%'])
rem_arg_i = []
for i,arg in enumerate(args):
if arg.startswith('tool:'):
textool = arg[len('tool:'):]
rem_arg_i.append(i)
for arg_i in reversed(rem_arg_i):
args.pop(arg_i)
exec_cmd = [' '.join([textool] + args + [fp])]
fptoolout = repext(fp, '.pdf')
efpo = jbu_expect(fptoolout)
#print exec_cmd
outs = exe_command(fp, exec_cmd, False, True)
shutil.copy(fptoolout, fpo)
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2);
if fpo2[1] == False:
print 'captured log of: [{}]'.format(exec_cmd)
print outs[0][0]; print outs[0][1];
return fpo2;
def jbu_dvi_to_pdf(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, '.pdf');
pdftool = 'dvipdfm'
rem_arg_i = []
for i,arg in enumerate(args):
if arg.startswith('tool:'):
pdftool = arg[len('tool:'):]
rem_arg_i.append(i)
for arg_i in reversed(rem_arg_i):
args.pop(arg_i)
exec_cmd = [' '.join([pdftool, fp] + args)]
fptoolout = repext(fp, '.pdf')
efpo = jbu_expect(fptoolout)
#print exec_cmd
outs = exe_command(fp, exec_cmd, False, True)
shutil.copy(fptoolout, fpo)
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2);
if fpo2[1] == False:
print 'captured log of: [{}]'.format(exec_cmd)
print outs[0][0]; print outs[0][1];
return fpo2;
def jbu_ps_to_pdf(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, '.pdf');
pdftool = 'ps2pdf'
rem_arg_i = []
for i,arg in enumerate(args):
if arg.startswith('tool:'):
pdftool = arg[len('tool:'):]
rem_arg_i.append(i)
for arg_i in reversed(rem_arg_i):
args.pop(arg_i)
exec_cmd = [' '.join([pdftool, fp] + args)]
fptoolout = repext(fp, '.pdf')
efpo = jbu_expect(fptoolout)
#print exec_cmd
outs = exe_command(fp, exec_cmd, False, True)
shutil.copy(fptoolout, fpo)
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2);
if fpo2[1] == False:
print 'captured log of: [{}]'.format(exec_cmd)
print outs[0][0]; print outs[0][1];
return fpo2;
def jbu_dvi_to_ps(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, '.ps');
pdftool = 'dvips'
rem_arg_i = []
for i,arg in enumerate(args):
if arg.startswith('tool:'):
pdftool = arg[len('tool:'):]
rem_arg_i.append(i)
for arg_i in reversed(rem_arg_i):
args.pop(arg_i)
exec_cmd = [' '.join([pdftool, fp] + args)]
fptoolout = repext(fp, '.ps')
efpo = jbu_expect(fptoolout)
#print exec_cmd
outs = exe_command(fp, exec_cmd, False, True)
shutil.copy(fptoolout, fpo)
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2);
if fpo2[1] == False:
print 'captured log of: [{}]'.format(exec_cmd)
print outs[0][0]; print outs[0][1];
return fpo2;
def jbu_tex_to_dvi(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, '.dvi');
textool = jbu_extract_tex_tool(fp, ['%'], def_tool='latex')
rem_arg_i = []
for i,arg in enumerate(args):
if arg.startswith('tool:'):
textool = arg[len('tool:'):]
rem_arg_i.append(i)
for arg_i in reversed(rem_arg_i):
args.pop(arg_i)
exec_cmd = [' '.join([textool] + args + [fp])]
fptoolout = repext(fp, '.dvi')
efpo = jbu_expect(fptoolout)
# print exec_cmd
outs = exe_command(fp, exec_cmd, False, True)
shutil.copy(fptoolout, fpo)
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2);
if fpo2[1] == False:
print 'captured log of: [{}]'.format(exec_cmd)
print outs[0][0]; print outs[0][1];
return fpo2;
def jbu_pdf_to_png(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, '.png'); efpo = jbu_expect(fpo);
pngtool = 'convert'
exec_cmd = [' '.join([pngtool, '-density', '300', '-alpha', 'remove', fp, fpo] + args)]
#print exec_cmd
exe_command(fp, exec_cmd)
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2); return fpo2;
def jbu_pdf_to_jpeg(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, '.jpeg'); efpo = jbu_expect(fpo);
pngtool = 'convert'
exe_command(fp, [' '.join([pngtool, '-density', '300', '-alpha', 'remove', fp, fpo] + args)])
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2); return fpo2;
def jbu_md_to_pdf(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, '.pdf'); efpo = jbu_expect(fpo);
mdtool = 'pandoc'
exe_command(fp, [' '.join([mdtool, fp, '-o', fpo] + args)])
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2); return fpo2;
def jbu_md_to_tex(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, '.tex'); efpo = jbu_expect(fpo);
mdtool = 'pandoc'
exe_command(fp, [' '.join([mdtool, fp, '-o', fpo] + args)])
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2); return fpo2;
def jbu_dot_to_tex(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, '.tex'); efpo = jbu_expect(fpo);
dottool = 'dot2tex'
exe_command(fp, [' '.join([dottool, fp, '-o', fpo] + args)])
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2); return fpo2;
def jbu_lzt_to_md(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, '.md'); efpo = jbu_expect(fpo);
mdtool = fpjoinhere(['lztex'])
exe_command(fp, [' '.join([mdtool, fp, '-o', fpo] + args)])
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2); return fpo2;
def jbu_wrap_single(args, tmp_files, fp):
fpo = jbu_gen_tmpfile(tmp_files, os.path.splitext(fp)[1]); efpo = jbu_expect(fpo);
wrap_n = int(args[0]) if len(args) else 48
lines = []
with open(fp, 'r') as ifp:
lines = ifp.readlines()
text = break_lines(''.join(lines).replace(' \n', ' ').replace('\n ', ' ').replace('\n', ''), wrap_n)
with open(fpo, 'w') as ofp:
ofp.write(text)
fpo2 = [fpo, jbu_expect_check(efpo)]; tmp_files.append(fpo2); return fpo2;
def jbu_concat_pdf(args, tmp_files, fgroups):
fgo = []
for files in fgroups:
fpo = jbu_gen_tmpfile(tmp_files, '.pdf')
efpo = jbu_expect(fpo)
concattool = fpjoinhere(['concat_pdf'])
ifiles = [x[0] for x in files if x[1]]
exe_command(fpo, [' '.join([concattool, '--output', fpo]+ifiles)], True)
fpo = [[fpo, jbu_expect_check(efpo)]]; tmp_files.append(fpo);
fgo.append(fpo)
return fgo
def jbu_wrap(args, tmp_files, fgroups):
fgo = []
for files in fgroups:
fpo = []
for (fp, fpok) in files:
if fpok:
fpo.append(jbu_wrap_single(args, tmp_files, fp))
else:
fpo.append(['', False])
fgo.append(fpo)
return fgo
def jbu_to_tex(args, tmp_files, fgroups):
fgo = []
for files in fgroups:
fpo = []
for (fp, fpok) in files:
if fpok:
if fp.endswith('.tex'):
fpo.append([fp, fpok])
if fp.endswith('.md'):
fpo.append(jbu_md_to_tex(args, tmp_files, fp))
if fp.endswith('.jgr'):
fpo.append(jbu_jgr_to_tex(args, tmp_files, fp))
else:
fpo.append(['', False])
fgo.append(fpo)
return fgo
def jbu_to_md(args, tmp_files, fgroups):
fgo = []
for files in fgroups:
fpo = []
for (fp, fpok) in files:
if fpok:
if fp.endswith('.md'):
fpo.append([fp, fpok])
if fp.endswith('.lzt'):
fpo.append(jbu_lzt_to_md(args, tmp_files, fp))
else:
fpo.append(['', False])
fgo.append(fpo)
return fgo
def jbu_to_png(args, tmp_files, fgroups):
fgo = []
for files in fgroups:
fpo = []
for (fp, fpok) in files:
if fpok:
if fp.endswith('.png'):
fpo.append([fp, fpok])
elif fp.endswith('.pdf'):
fpo.append(jbu_pdf_to_png(args, tmp_files, fp))
else:
fpo.append(['', False])
fgo.append(fpo)
return fgo
def jbu_to_jpeg(args, tmp_files, fgroups):
fgo = []
for files in fgroups:
fpo = []
for (fp, fpok) in files:
if fpok:
if fp.endswith('.jpeg'):
fpo.append([fp, fpok])
if fp.endswith('.pdf'):
fpo.append(jbu_pdf_to_jpeg(args, tmp_files, fp))
else:
fpo.append(['', False])
fgo.append(fpo)
return fgo
def jbu_to_pdf(args, tmp_files, fgroups):
fgo = []
for files in fgroups:
fpo = []
for (fp, fpok) in files:
if fpok:
if fp.endswith('.pdf'):
fpo.append([fp, fpok])
elif fp.endswith('.jgr'):
(tex, texok) = jbu_jgr_to_tex(args, tmp_files, fp)
if texok:
fpo.append(jbu_tex_to_pdf(args, tmp_files, tex))
else:
fpo.append(['', False])
elif fp.endswith('.lzt'):
(md, mdok) = jbu_lzt_to_md(args, tmp_files, fp)
if mdok:
fpo.append(jbu_md_to_pdf(args, tmp_files, md))
else:
fpo.append(['', False])
elif fp.endswith('.tex'):
fpo.append(jbu_tex_to_pdf(args, tmp_files, fp))
elif fp.endswith('.dvi'):
fpo.append(jbu_dvi_to_pdf(args, tmp_files, fp))
elif fp.endswith('.ps'):
fpo.append(jbu_ps_to_pdf(args, tmp_files, fp))
elif fp.endswith('.md'):
fpo.append(jbu_md_to_pdf(args, tmp_files, fp))
elif fp.endswith('.dot'):
(tex, texok) = jbu_dot_to_tex(args, tmp_files, fp)
if texok:
fpo.append(jbu_tex_to_pdf(args, tmp_files, tex))
else:
fpo.append(['', False])
else:
fpo.append(['', False])
fgo.append(fpo)
return fgo
def jbu_to_dvi(args, tmp_files, fgroups):
fgo = []
for files in fgroups:
fpo = []
for (fp, fpok) in files:
if fpok:
if fp.endswith('.dvi'):
fpo.append([fp, fpok])
elif fp.endswith('.jgr'):
(tex, texok) = jbu_jgr_to_tex(args, tmp_files, fp)
if texok:
fpo.append(jbu_tex_to_dvi(args, tmp_files, tex))
else:
fpo.append(['', False])
elif fp.endswith('.tex'):
fpo.append(jbu_tex_to_dvi(args, tmp_files, fp))
else:
fpo.append(['', False])
fgo.append(fpo)
return fgo
def jbu_to_ps(args, tmp_files, fgroups):
fgo = []
for files in fgroups:
fpo = []
for (fp, fpok) in files:
if fpok:
if fp.endswith('.ps'):
fpo.append([fp, fpok])
elif fp.endswith('.dvi'):
fpo.append(jbu_dvi_to_ps(args, tmp_files, fp))
else:
fpo.append(['', False])
fgo.append(fpo)
return fgo
def jbu_fail_files():
return [['', False]]
def jbu_fail_fgo():
return [jbu_fail_files()]
def jbu_include_tex(fo, args, cfg, tmp_files, files):
arg_recipe = '' if ('-recipe' not in args) else args[args.index('-recipe')+1]
pre_recipe = cfg.get(arg_recipe, r"{\includegraphics[]{jbu_1}}")
if fo[1]:
fpo | |
= c_void_p
isl.isl_union_map_intersect_params.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_intersect_range.restype = c_void_p
isl.isl_union_map_intersect_range.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_is_bijective.restype = c_bool
isl.isl_union_map_is_bijective.argtypes = [c_void_p]
isl.isl_union_map_is_empty.restype = c_bool
isl.isl_union_map_is_empty.argtypes = [c_void_p]
isl.isl_union_map_is_equal.restype = c_bool
isl.isl_union_map_is_equal.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_is_injective.restype = c_bool
isl.isl_union_map_is_injective.argtypes = [c_void_p]
isl.isl_union_map_is_single_valued.restype = c_bool
isl.isl_union_map_is_single_valued.argtypes = [c_void_p]
isl.isl_union_map_is_strict_subset.restype = c_bool
isl.isl_union_map_is_strict_subset.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_is_subset.restype = c_bool
isl.isl_union_map_is_subset.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_lexmax.restype = c_void_p
isl.isl_union_map_lexmax.argtypes = [c_void_p]
isl.isl_union_map_lexmin.restype = c_void_p
isl.isl_union_map_lexmin.argtypes = [c_void_p]
isl.isl_union_map_polyhedral_hull.restype = c_void_p
isl.isl_union_map_polyhedral_hull.argtypes = [c_void_p]
isl.isl_union_map_product.restype = c_void_p
isl.isl_union_map_product.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_range.restype = c_void_p
isl.isl_union_map_range.argtypes = [c_void_p]
isl.isl_union_map_range_factor_domain.restype = c_void_p
isl.isl_union_map_range_factor_domain.argtypes = [c_void_p]
isl.isl_union_map_range_factor_range.restype = c_void_p
isl.isl_union_map_range_factor_range.argtypes = [c_void_p]
isl.isl_union_map_range_map.restype = c_void_p
isl.isl_union_map_range_map.argtypes = [c_void_p]
isl.isl_union_map_range_product.restype = c_void_p
isl.isl_union_map_range_product.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_reverse.restype = c_void_p
isl.isl_union_map_reverse.argtypes = [c_void_p]
isl.isl_union_map_subtract.restype = c_void_p
isl.isl_union_map_subtract.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_subtract_domain.restype = c_void_p
isl.isl_union_map_subtract_domain.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_subtract_range.restype = c_void_p
isl.isl_union_map_subtract_range.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_union.restype = c_void_p
isl.isl_union_map_union.argtypes = [c_void_p, c_void_p]
isl.isl_union_map_wrap.restype = c_void_p
isl.isl_union_map_wrap.argtypes = [c_void_p]
isl.isl_union_map_zip.restype = c_void_p
isl.isl_union_map_zip.argtypes = [c_void_p]
isl.isl_union_map_free.argtypes = [c_void_p]
isl.isl_union_map_to_str.argtypes = [c_void_p]
isl.isl_union_map_to_str.restype = POINTER(c_char)
class map(union_map):
def __init__(self, *args, **keywords):
if "ptr" in keywords:
self.ctx = keywords["ctx"]
self.ptr = keywords["ptr"]
return
if len(args) == 1 and args[0].__class__ is basic_map:
self.ctx = Context.getDefaultInstance()
self.ptr = isl.isl_map_from_basic_map(isl.isl_basic_map_copy(args[0].ptr))
return
if len(args) == 1 and type(args[0]) == str:
self.ctx = Context.getDefaultInstance()
self.ptr = isl.isl_map_read_from_str(self.ctx, args[0])
return
raise Error
def __del__(self):
if hasattr(self, 'ptr'):
isl.isl_map_free(self.ptr)
def __str__(self):
ptr = isl.isl_map_to_str(self.ptr)
res = str(cast(ptr, c_char_p).value)
libc.free(ptr)
return res
def __repr__(self):
s = str(self)
if '"' in s:
return 'isl.map("""%s""")' % s
else:
return 'isl.map("%s")' % s
def affine_hull(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_affine_hull(isl.isl_map_copy(arg0.ptr))
return basic_map(ctx=ctx, ptr=res)
def apply_domain(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).apply_domain(arg1)
ctx = arg0.ctx
res = isl.isl_map_apply_domain(isl.isl_map_copy(arg0.ptr), isl.isl_map_copy(arg1.ptr))
return map(ctx=ctx, ptr=res)
def apply_range(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).apply_range(arg1)
ctx = arg0.ctx
res = isl.isl_map_apply_range(isl.isl_map_copy(arg0.ptr), isl.isl_map_copy(arg1.ptr))
return map(ctx=ctx, ptr=res)
def coalesce(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_coalesce(isl.isl_map_copy(arg0.ptr))
return map(ctx=ctx, ptr=res)
def complement(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_complement(isl.isl_map_copy(arg0.ptr))
return map(ctx=ctx, ptr=res)
def deltas(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_deltas(isl.isl_map_copy(arg0.ptr))
return set(ctx=ctx, ptr=res)
def detect_equalities(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_detect_equalities(isl.isl_map_copy(arg0.ptr))
return map(ctx=ctx, ptr=res)
def flatten(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_flatten(isl.isl_map_copy(arg0.ptr))
return map(ctx=ctx, ptr=res)
def flatten_domain(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_flatten_domain(isl.isl_map_copy(arg0.ptr))
return map(ctx=ctx, ptr=res)
def flatten_range(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_flatten_range(isl.isl_map_copy(arg0.ptr))
return map(ctx=ctx, ptr=res)
def foreach_basic_map(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
exc_info = [None]
fn = CFUNCTYPE(c_int, c_void_p, c_void_p)
def cb_func(cb_arg0, cb_arg1):
cb_arg0 = basic_map(ctx=arg0.ctx, ptr=cb_arg0)
try:
arg1(cb_arg0)
except:
import sys
exc_info[0] = sys.exc_info()
return -1
return 0
cb = fn(cb_func)
ctx = arg0.ctx
res = isl.isl_map_foreach_basic_map(arg0.ptr, cb, None)
if exc_info[0] != None:
raise exc_info[0][0], exc_info[0][1], exc_info[0][2]
return res
def gist(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).gist(arg1)
ctx = arg0.ctx
res = isl.isl_map_gist(isl.isl_map_copy(arg0.ptr), isl.isl_map_copy(arg1.ptr))
return map(ctx=ctx, ptr=res)
def gist_domain(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is set:
arg1 = set(arg1)
except:
return union_map(arg0).gist_domain(arg1)
ctx = arg0.ctx
res = isl.isl_map_gist_domain(isl.isl_map_copy(arg0.ptr), isl.isl_set_copy(arg1.ptr))
return map(ctx=ctx, ptr=res)
def intersect(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).intersect(arg1)
ctx = arg0.ctx
res = isl.isl_map_intersect(isl.isl_map_copy(arg0.ptr), isl.isl_map_copy(arg1.ptr))
return map(ctx=ctx, ptr=res)
def intersect_domain(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is set:
arg1 = set(arg1)
except:
return union_map(arg0).intersect_domain(arg1)
ctx = arg0.ctx
res = isl.isl_map_intersect_domain(isl.isl_map_copy(arg0.ptr), isl.isl_set_copy(arg1.ptr))
return map(ctx=ctx, ptr=res)
def intersect_params(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is set:
arg1 = set(arg1)
except:
return union_map(arg0).intersect_params(arg1)
ctx = arg0.ctx
res = isl.isl_map_intersect_params(isl.isl_map_copy(arg0.ptr), isl.isl_set_copy(arg1.ptr))
return map(ctx=ctx, ptr=res)
def intersect_range(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is set:
arg1 = set(arg1)
except:
return union_map(arg0).intersect_range(arg1)
ctx = arg0.ctx
res = isl.isl_map_intersect_range(isl.isl_map_copy(arg0.ptr), isl.isl_set_copy(arg1.ptr))
return map(ctx=ctx, ptr=res)
def is_bijective(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_is_bijective(arg0.ptr)
if res < 0:
raise
return bool(res)
def is_disjoint(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).is_disjoint(arg1)
ctx = arg0.ctx
res = isl.isl_map_is_disjoint(arg0.ptr, arg1.ptr)
if res < 0:
raise
return bool(res)
def is_empty(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_is_empty(arg0.ptr)
if res < 0:
raise
return bool(res)
def is_equal(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).is_equal(arg1)
ctx = arg0.ctx
res = isl.isl_map_is_equal(arg0.ptr, arg1.ptr)
if res < 0:
raise
return bool(res)
def is_injective(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_is_injective(arg0.ptr)
if res < 0:
raise
return bool(res)
def is_single_valued(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_is_single_valued(arg0.ptr)
if res < 0:
raise
return bool(res)
def is_strict_subset(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).is_strict_subset(arg1)
ctx = arg0.ctx
res = isl.isl_map_is_strict_subset(arg0.ptr, arg1.ptr)
if res < 0:
raise
return bool(res)
def is_subset(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).is_subset(arg1)
ctx = arg0.ctx
res = isl.isl_map_is_subset(arg0.ptr, arg1.ptr)
if res < 0:
raise
return bool(res)
def lexmax(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_lexmax(isl.isl_map_copy(arg0.ptr))
return map(ctx=ctx, ptr=res)
def lexmin(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_lexmin(isl.isl_map_copy(arg0.ptr))
return map(ctx=ctx, ptr=res)
def polyhedral_hull(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_polyhedral_hull(isl.isl_map_copy(arg0.ptr))
return basic_map(ctx=ctx, ptr=res)
def reverse(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_reverse(isl.isl_map_copy(arg0.ptr))
return map(ctx=ctx, ptr=res)
def sample(arg0):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_map_sample(isl.isl_map_copy(arg0.ptr))
return basic_map(ctx=ctx, ptr=res)
def subtract(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).subtract(arg1)
ctx = arg0.ctx
res = isl.isl_map_subtract(isl.isl_map_copy(arg0.ptr), isl.isl_map_copy(arg1.ptr))
return map(ctx=ctx, ptr=res)
def union(arg0, arg1):
try:
if not arg0.__class__ is map:
arg0 = map(arg0)
except:
raise
try:
if not arg1.__class__ is map:
arg1 = map(arg1)
except:
return union_map(arg0).union(arg1)
ctx = arg0.ctx
res = isl.isl_map_union(isl.isl_map_copy(arg0.ptr), isl.isl_map_copy(arg1.ptr))
return map(ctx=ctx, ptr=res)
isl.isl_map_from_basic_map.restype = c_void_p
isl.isl_map_from_basic_map.argtypes = [c_void_p]
isl.isl_map_read_from_str.restype = c_void_p
isl.isl_map_read_from_str.argtypes = [Context, c_char_p]
isl.isl_map_affine_hull.restype = c_void_p
isl.isl_map_affine_hull.argtypes = [c_void_p]
isl.isl_map_apply_domain.restype = c_void_p
isl.isl_map_apply_domain.argtypes = [c_void_p, c_void_p]
isl.isl_map_apply_range.restype = c_void_p
isl.isl_map_apply_range.argtypes = [c_void_p, c_void_p]
isl.isl_map_coalesce.restype = c_void_p
isl.isl_map_coalesce.argtypes = [c_void_p]
isl.isl_map_complement.restype = c_void_p
isl.isl_map_complement.argtypes = [c_void_p]
isl.isl_map_deltas.restype = c_void_p
isl.isl_map_deltas.argtypes = [c_void_p]
isl.isl_map_detect_equalities.restype = c_void_p
isl.isl_map_detect_equalities.argtypes = [c_void_p]
isl.isl_map_flatten.restype = c_void_p
isl.isl_map_flatten.argtypes = [c_void_p]
isl.isl_map_flatten_domain.restype = c_void_p
isl.isl_map_flatten_domain.argtypes = [c_void_p]
isl.isl_map_flatten_range.restype = c_void_p
isl.isl_map_flatten_range.argtypes = [c_void_p]
isl.isl_map_foreach_basic_map.argtypes = [c_void_p, c_void_p, c_void_p]
isl.isl_map_gist.restype = c_void_p
isl.isl_map_gist.argtypes = [c_void_p, c_void_p]
isl.isl_map_gist_domain.restype = c_void_p
isl.isl_map_gist_domain.argtypes = [c_void_p, c_void_p]
isl.isl_map_intersect.restype = c_void_p
isl.isl_map_intersect.argtypes = [c_void_p, c_void_p]
isl.isl_map_intersect_domain.restype = c_void_p
isl.isl_map_intersect_domain.argtypes = [c_void_p, c_void_p]
isl.isl_map_intersect_params.restype = c_void_p
isl.isl_map_intersect_params.argtypes = [c_void_p, c_void_p]
isl.isl_map_intersect_range.restype = c_void_p
isl.isl_map_intersect_range.argtypes = [c_void_p, c_void_p]
isl.isl_map_is_bijective.restype = c_bool
isl.isl_map_is_bijective.argtypes = [c_void_p]
isl.isl_map_is_disjoint.restype = c_bool
isl.isl_map_is_disjoint.argtypes = [c_void_p, c_void_p]
isl.isl_map_is_empty.restype = c_bool
isl.isl_map_is_empty.argtypes = [c_void_p]
isl.isl_map_is_equal.restype = c_bool
isl.isl_map_is_equal.argtypes = [c_void_p, c_void_p]
isl.isl_map_is_injective.restype = c_bool
isl.isl_map_is_injective.argtypes = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.