input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
import pickle
import datetime
import pytest
import six
import uuid
from pyrsistent import (
PRecord, field, InvariantException, ny, pset, PSet, CheckedPVector,
PTypeError, pset_field, pvector_field, pmap_field, pmap, PMap,
pvector, PVector, v, m)
class ARecord(PRecord):
x = field(type=(int, float))
y = field()
class RecordContainingContainers(PRecord):
map = pmap_field(str, str)
vec = pvector_field(str)
set = pset_field(str)
class UniqueThing(PRecord):
id = field(type=uuid.UUID, factory=uuid.UUID)
class Something(object):
pass
class Another(object):
pass
def test_create():
r = ARecord(x=1, y='foo')
assert r.x == 1
assert r.y == 'foo'
assert isinstance(r, ARecord)
def test_correct_assignment():
r = ARecord(x=1, y='foo')
r2 = r.set('x', 2.0)
r3 = r2.set('y', 'bar')
assert r2 == {'x': 2.0, 'y': 'foo'}
assert r3 == {'x': 2.0, 'y': 'bar'}
assert isinstance(r3, ARecord)
def test_direct_assignment_not_possible():
with pytest.raises(AttributeError):
ARecord().x = 1
def test_cannot_assign_undeclared_fields():
with pytest.raises(AttributeError):
ARecord().set('z', 5)
def test_cannot_assign_wrong_type_to_fields():
try:
ARecord().set('x', 'foo')
assert False
except PTypeError as e:
assert e.source_class == ARecord
assert e.field == 'x'
assert e.expected_types == set([int, float])
assert e.actual_type is type('foo')
def test_cannot_construct_with_undeclared_fields():
with pytest.raises(AttributeError):
ARecord(z=5)
def test_cannot_construct_with_fields_of_wrong_type():
with pytest.raises(TypeError):
ARecord(x='foo')
def test_support_record_inheritance():
class BRecord(ARecord):
z = field()
r = BRecord(x=1, y='foo', z='bar')
assert isinstance(r, BRecord)
assert isinstance(r, ARecord)
assert r == {'x': 1, 'y': 'foo', 'z': 'bar'}
def test_single_type_spec():
class A(PRecord):
x = field(type=int)
r = A(x=1)
assert r.x == 1
with pytest.raises(TypeError):
r.set('x', 'foo')
def test_remove():
r = ARecord(x=1, y='foo')
r2 = r.remove('y')
assert isinstance(r2, ARecord)
assert r2 == {'x': 1}
def test_remove_non_existing_member():
r = ARecord(x=1, y='foo')
with pytest.raises(KeyError):
r.remove('z')
def test_field_invariant_must_hold():
class BRecord(PRecord):
x = field(invariant=lambda x: (x > 1, 'x too small'))
y = field(mandatory=True)
try:
BRecord(x=1)
assert False
except InvariantException as e:
assert e.invariant_errors == ('x too small',)
assert e.missing_fields == ('BRecord.y',)
def test_global_invariant_must_hold():
class BRecord(PRecord):
__invariant__ = lambda r: (r.x <= r.y, 'y smaller than x')
x = field()
y = field()
BRecord(x=1, y=2)
try:
BRecord(x=2, y=1)
assert False
except InvariantException as e:
assert e.invariant_errors == ('y smaller than x',)
assert e.missing_fields == ()
def test_set_multiple_fields():
a = ARecord(x=1, y='foo')
b = a.set(x=2, y='bar')
assert b == {'x': 2, 'y': 'bar'}
def test_initial_value():
class BRecord(PRecord):
x = field(initial=1)
y = field(initial=2)
a = BRecord()
assert a.x == 1
assert a.y == 2
def test_enum_field():
try:
from enum import Enum
except ImportError:
return # Enum not supported in this environment
class TestEnum(Enum):
x = 1
y = 2
class RecordContainingEnum(PRecord):
enum_field = field(type=TestEnum)
r = RecordContainingEnum(enum_field=TestEnum.x)
assert r.enum_field == TestEnum.x
def test_type_specification_must_be_a_type():
with pytest.raises(TypeError):
class BRecord(PRecord):
x = field(type=1)
def test_initial_must_be_of_correct_type():
with pytest.raises(TypeError):
class BRecord(PRecord):
x = field(type=int, initial='foo')
def test_invariant_must_be_callable():
with pytest.raises(TypeError):
class BRecord(PRecord):
x = field(invariant='foo')
def test_global_invariants_are_inherited():
class BRecord(PRecord):
__invariant__ = lambda r: (r.x % r.y == 0, 'modulo')
x = field()
y = field()
class CRecord(BRecord):
__invariant__ = lambda r: (r.x > r.y, 'size')
try:
CRecord(x=5, y=3)
assert False
except InvariantException as e:
assert e.invariant_errors == ('modulo',)
def test_global_invariants_must_be_callable():
with pytest.raises(TypeError):
class CRecord(PRecord):
__invariant__ = 1
def test_repr():
r = ARecord(x=1, y=2)
assert repr(r) == 'ARecord(x=1, y=2)' or repr(r) == 'ARecord(y=2, x=1)'
def test_factory():
class BRecord(PRecord):
x = field(type=int, factory=int)
assert BRecord(x=2.5) == {'x': 2}
def test_factory_must_be_callable():
with pytest.raises(TypeError):
class BRecord(PRecord):
x = field(type=int, factory=1)
def test_nested_record_construction():
class BRecord(PRecord):
x = field(int, factory=int)
class CRecord(PRecord):
a = field()
b = field(type=BRecord)
r = CRecord.create({'a': 'foo', 'b': {'x': '5'}})
assert isinstance(r, CRecord)
assert isinstance(r.b, BRecord)
assert r == {'a': 'foo', 'b': {'x': 5}}
def test_pickling():
x = ARecord(x=2.0, y='bar')
y = pickle.loads(pickle.dumps(x, -1))
assert x == y
assert isinstance(y, ARecord)
def test_supports_pickling_with_typed_container_fields():
obj = RecordContainingContainers(
map={'foo': 'bar'}, set=['hello', 'there'], vec=['a', 'b'])
obj2 = pickle.loads(pickle.dumps(obj))
assert obj == obj2
def test_all_invariant_errors_reported():
class BRecord(PRecord):
x = field(factory=int, invariant=lambda x: (x >= 0, 'x negative'))
y = field(mandatory=True)
class CRecord(PRecord):
a = field(invariant=lambda x: (x != 0, 'a zero'))
b = field(type=BRecord)
try:
CRecord.create({'a': 0, 'b': {'x': -5}})
assert False
except InvariantException as e:
assert set(e.invariant_errors) == set(['x negative', 'a zero'])
assert e.missing_fields == ('BRecord.y',)
def test_precord_factory_method_is_idempotent():
class BRecord(PRecord):
x = field()
y = field()
r = BRecord(x=1, y=2)
assert BRecord.create(r) is r
def test_serialize():
class BRecord(PRecord):
d = field(type=datetime.date,
factory=lambda d: datetime.datetime.strptime(d, "%d%m%Y").date(),
serializer=lambda format, d: d.strftime('%Y-%m-%d') if format == 'ISO' else d.strftime('%d%m%Y'))
assert BRecord(d='14012015').serialize('ISO') == {'d': '2015-01-14'}
assert BRecord(d='14012015').serialize('other') == {'d': '14012015'}
def test_nested_serialize():
class BRecord(PRecord):
d = field(serializer=lambda format, d: format)
class CRecord(PRecord):
b = field()
serialized = CRecord(b=BRecord(d='foo')).serialize('bar')
assert serialized == {'b': {'d': 'bar'}}
assert isinstance(serialized, dict)
def test_serializer_must_be_callable():
with pytest.raises(TypeError):
class CRecord(PRecord):
x = field(serializer=1)
def test_transform_without_update_returns_same_precord():
r = ARecord(x=2.0, y='bar')
assert r.transform([ny], lambda x: x) is r
class Application(PRecord):
name = field(type=(six.text_type,) + six.string_types)
image = field(type=(six.text_type,) + six.string_types)
class ApplicationVector(CheckedPVector):
__type__ = Application
class Node(PRecord):
applications = field(type=ApplicationVector)
def test_nested_create_serialize():
node = Node(applications=[Application(name='myapp', image='myimage'),
Application(name='b', image='c')])
node2 = Node.create({'applications': [{'name': 'myapp', 'image': 'myimage'},
{'name': 'b', 'image': 'c'}]})
assert node == node2
serialized = node.serialize()
restored = Node.create(serialized)
assert restored == node
def test_pset_field_initial_value():
"""
``pset_field`` results in initial value that is empty.
"""
class Record(PRecord):
value = pset_field(int)
assert Record() == Record(value=[])
def test_pset_field_custom_initial():
"""
A custom initial value can be passed in.
"""
class Record(PRecord):
value = pset_field(int, initial=(1, 2))
assert Record() == Record(value=[1, 2])
def test_pset_field_factory():
"""
``pset_field`` has a factory that creates a ``PSet``.
"""
class Record(PRecord):
value = pset_field(int)
record = Record(value=[1, 2])
assert isinstance(record.value, PSet)
def test_pset_field_checked_set():
"""
``pset_field`` results in a set that enforces its type.
"""
class Record(PRecord):
value = pset_field(int)
record = Record(value=[1, 2])
with pytest.raises(TypeError):
record.value.add("hello")
def test_pset_field_checked_vector_multiple_types():
"""
``pset_field`` results in a vector that enforces its types.
"""
class Record(PRecord):
value = pset_field((int, str))
record = Record(value=[1, 2, "hello"])
with pytest.raises(TypeError):
record.value.add(object())
def test_pset_field_type():
"""
``pset_field`` enforces its type.
"""
class Record(PRecord):
value = pset_field(int)
record = Record()
with pytest.raises(TypeError):
record.set("value", None)
def test_pset_field_mandatory():
"""
``pset_field`` is a mandatory field.
"""
class Record(PRecord):
value = pset_field(int)
record = Record(value=[1])
with pytest.raises(InvariantException):
record.remove("value")
def test_pset_field_default_non_optional():
"""
By default ``pset_field`` is non-optional, i.e. does not allow
``None``.
"""
class Record(PRecord):
value = pset_field(int)
with pytest.raises(TypeError):
Record(value=None)
def test_pset_field_explicit_non_optional():
"""
If ``optional`` argument is ``False`` then ``pset_field`` is
non-optional, i.e. does not allow ``None``.
"""
class Record(PRecord):
value = pset_field(int, optional=False)
with pytest.raises(TypeError):
Record(value=None)
def test_pset_field_optional():
"""
If ``optional`` argument is true, ``None`` is acceptable alternative
to a set.
"""
class Record(PRecord):
value = pset_field(int, optional=True)
assert ((Record(value=[1, 2]).value, Record(value=None).value) ==
(pset([1, 2]), None))
def test_pset_field_name():
"""
The created set class name is based on the type of items in the set.
"""
class Record(PRecord):
value = pset_field(Something)
value2 = pset_field(int)
assert ((Record().value.__class__.__name__,
Record().value2.__class__.__name__) ==
("SomethingPSet", "IntPSet"))
def test_pset_multiple_types_field_name():
"""
The created set class name is based on the multiple given types of
items in the set.
"""
class Record(PRecord):
value = pset_field((Something, int))
assert (Record().value.__class__.__name__ ==
"SomethingIntPSet")
def test_pset_field_name_string_type():
"""
The created set class name is based on the type of items specified by name
"""
class Record(PRecord):
value = pset_field("record_test.Something")
assert Record().value.__class__.__name__ == "SomethingPSet"
def test_pset_multiple_string_types_field_name():
"""
The created set class name is based on the multiple given types of
items in the set specified by name
"""
class Record(PRecord):
value = pset_field(("record_test.Something", "record_test.Another"))
assert Record().value.__class__.__name__ == "SomethingAnotherPSet"
def test_pvector_field_initial_value():
"""
``pvector_field`` results in initial value that is empty.
"""
class Record(PRecord):
value = pvector_field(int)
assert Record() == Record(value=[])
def test_pvector_field_custom_initial():
"""
A custom initial value can be passed in.
"""
class Record(PRecord):
value = pvector_field(int, initial=(1, 2))
assert Record() == Record(value=[1, 2])
def test_pvector_field_factory():
"""
``pvector_field`` has a factory that creates a ``PVector``.
"""
class Record(PRecord):
value = pvector_field(int)
record = Record(value=[1, 2])
assert isinstance(record.value, PVector)
def test_pvector_field_checked_vector():
"""
``pvector_field`` results in a vector that enforces its type.
"""
class Record(PRecord):
value = pvector_field(int)
record = Record(value=[1, 2])
with pytest.raises(TypeError):
record.value.append("hello")
def test_pvector_field_checked_vector_multiple_types():
"""
``pvector_field`` results in a vector that enforces its types.
"""
class Record(PRecord):
value = pvector_field((int, str))
record = Record(value=[1, 2, "hello"])
with pytest.raises(TypeError):
record.value.append(object())
def test_pvector_field_type():
"""
``pvector_field`` enforces its type.
"""
class Record(PRecord):
value = pvector_field(int)
record = Record()
with pytest.raises(TypeError):
record.set("value", None)
def test_pvector_field_mandatory():
"""
``pvector_field`` is a mandatory field.
"""
class Record(PRecord):
value = pvector_field(int)
record = Record(value=[1])
with pytest.raises(InvariantException):
record.remove("value")
def test_pvector_field_default_non_optional():
"""
By default ``pvector_field`` is non-optional, i.e. does not allow
``None``.
"""
class Record(PRecord):
value = pvector_field(int)
with pytest.raises(TypeError):
Record(value=None)
def test_pvector_field_explicit_non_optional():
"""
If ``optional`` argument is ``False`` then ``pvector_field`` is
non-optional, i.e. does not allow | |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long, too-many-statements, consider-using-f-string
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azure.cli.core.commands.parameters import (resource_group_name_type, get_location_type,
file_type,
get_three_state_flag, get_enum_type, tags_type)
# from azure.cli.core.commands.validators import get_default_location_from_resource_group
from ._validators import (validate_memory, validate_cpu, validate_managed_env_name_or_id, validate_registry_server,
validate_registry_user, validate_registry_pass, validate_target_port, validate_ingress)
from ._constants import UNAUTHENTICATED_CLIENT_ACTION, FORWARD_PROXY_CONVENTION
def load_arguments(self, _):
name_type = CLIArgumentType(options_list=['--name', '-n'])
with self.argument_context('containerapp') as c:
# Base arguments
c.argument('name', name_type, metavar='NAME', id_part='name', help="The name of the Containerapp.")
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.ignore('disable_warnings')
with self.argument_context('containerapp') as c:
c.argument('tags', arg_type=tags_type)
c.argument('managed_env', validator=validate_managed_env_name_or_id, options_list=['--environment'], help="Name or resource ID of the container app's environment.")
c.argument('yaml', type=file_type, help='Path to a .yaml file with the configuration of a container app. All other parameters will be ignored. For an example, see https://docs.microsoft.com/azure/container-apps/azure-resource-manager-api-spec#examples')
with self.argument_context('containerapp exec') as c:
c.argument('container', help="The name of the container to ssh into")
c.argument('replica', help="The name of the replica to ssh into. List replicas with 'az containerapp replica list'. A replica may not exist if there is not traffic to your app.")
c.argument('revision', help="The name of the container app revision to ssh into. Defaults to the latest revision.")
c.argument('startup_command', options_list=["--command"], help="The startup command (bash, zsh, sh, etc.).")
c.argument('name', name_type, id_part=None, help="The name of the Containerapp.")
c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None)
with self.argument_context('containerapp logs show') as c:
c.argument('follow', help="Print logs in real time if present.", arg_type=get_three_state_flag())
c.argument('tail', help="The number of past logs to print (0-300)", type=int, default=20)
c.argument('container', help="The name of the container")
c.argument('output_format', options_list=["--format"], help="Log output format", arg_type=get_enum_type(["json", "text"]), default="json")
c.argument('replica', help="The name of the replica. List replicas with 'az containerapp replica list'. A replica may not exist if there is not traffic to your app.")
c.argument('revision', help="The name of the container app revision. Defaults to the latest revision.")
c.argument('name', name_type, id_part=None, help="The name of the Containerapp.")
c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None)
# Replica
with self.argument_context('containerapp replica') as c:
c.argument('replica', help="The name of the replica. ")
c.argument('revision', help="The name of the container app revision. Defaults to the latest revision.")
c.argument('name', name_type, id_part=None, help="The name of the Containerapp.")
c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None)
# Container
with self.argument_context('containerapp', arg_group='Container') as c:
c.argument('container_name', help="Name of the container.")
c.argument('cpu', type=float, validator=validate_cpu, help="Required CPU in cores from 0.25 - 2.0, e.g. 0.5")
c.argument('memory', validator=validate_memory, help="Required memory from 0.5 - 4.0 ending with \"Gi\", e.g. 1.0Gi")
c.argument('env_vars', nargs='*', help="A list of environment variable(s) for the container. Space-separated values in 'key=value' format. Empty string to clear existing values. Prefix value with 'secretref:' to reference a secret.")
c.argument('startup_command', nargs='*', options_list=['--command'], help="A list of supported commands on the container that will executed during startup. Space-separated values e.g. \"/bin/queue\" \"mycommand\". Empty string to clear existing values")
c.argument('args', nargs='*', help="A list of container startup command argument(s). Space-separated values e.g. \"-c\" \"mycommand\". Empty string to clear existing values")
c.argument('revision_suffix', help='User friendly suffix that is appended to the revision name')
# Env vars
with self.argument_context('containerapp', arg_group='Environment variables') as c:
c.argument('set_env_vars', nargs='*', help="Add or update environment variable(s) in container. Existing environmentenvironment variables are not modified. Space-separated values in 'key=value' format. If stored as a secret, value must start with 'secretref:' followed by the secret name.")
c.argument('remove_env_vars', nargs='*', help="Remove environment variable(s) from container. Space-separated environment variable names.")
c.argument('replace_env_vars', nargs='*', help="Replace environment variable(s) in container. Other existing environment variables are removed. Space-separated values in 'key=value' format. If stored as a secret, value must start with 'secretref:' followed by the secret name.")
c.argument('remove_all_env_vars', help="Remove all environment variable(s) from container..")
# Scale
with self.argument_context('containerapp', arg_group='Scale') as c:
c.argument('min_replicas', type=int, help="The minimum number of replicas.")
c.argument('max_replicas', type=int, help="The maximum number of replicas.")
# Dapr
with self.argument_context('containerapp', arg_group='Dapr') as c:
c.argument('dapr_enabled', options_list=['--enable-dapr'], default=False, arg_type=get_three_state_flag(), help="Boolean indicating if the Dapr side car is enabled.")
c.argument('dapr_app_port', type=int, help="The port Dapr uses to talk to the application.")
c.argument('dapr_app_id', help="The Dapr application identifier.")
c.argument('dapr_app_protocol', arg_type=get_enum_type(['http', 'grpc']), help="The protocol Dapr uses to talk to the application.")
# Configuration
with self.argument_context('containerapp', arg_group='Configuration') as c:
c.argument('revisions_mode', arg_type=get_enum_type(['single', 'multiple']), help="The active revisions mode for the container app.")
c.argument('registry_server', validator=validate_registry_server, help="The container registry server hostname, e.g. myregistry.azurecr.io.")
c.argument('registry_pass', validator=validate_registry_pass, options_list=['--registry-password'], help="The password to log in to container registry. If stored as a secret, value must start with \'secretref:\' followed by the secret name.")
c.argument('registry_user', validator=validate_registry_user, options_list=['--registry-username'], help="The username to log in to container registry.")
c.argument('secrets', nargs='*', options_list=['--secrets', '-s'], help="A list of secret(s) for the container app. Space-separated values in 'key=value' format.")
# Ingress
with self.argument_context('containerapp', arg_group='Ingress') as c:
c.argument('ingress', validator=validate_ingress, default=None, arg_type=get_enum_type(['internal', 'external']), help="The ingress type.")
c.argument('target_port', type=int, validator=validate_target_port, help="The application port used for ingress traffic.")
c.argument('transport', arg_type=get_enum_type(['auto', 'http', 'http2']), help="The transport protocol used for ingress traffic.")
with self.argument_context('containerapp create') as c:
c.argument('traffic_weights', nargs='*', options_list=['--traffic-weight'], help="A list of revision weight(s) for the container app. Space-separated values in 'revision_name=weight' format. For latest revision, use 'latest=weight'")
with self.argument_context('containerapp create', arg_group='Identity') as c:
c.argument('user_assigned', nargs='+', help="Space-separated user identities to be assigned.")
c.argument('system_assigned', help="Boolean indicating whether to assign system-assigned identity.")
with self.argument_context('containerapp create', arg_group='Container') as c:
c.argument('image', options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.")
with self.argument_context('containerapp update', arg_group='Container') as c:
c.argument('image', options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.")
with self.argument_context('containerapp scale') as c:
c.argument('min_replicas', type=int, help="The minimum number of replicas.")
c.argument('max_replicas', type=int, help="The maximum number of replicas.")
with self.argument_context('containerapp env') as c:
c.argument('name', name_type, help='Name of the Container Apps environment.')
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), help='Location of resource. Examples: eastus2, northeurope')
c.argument('tags', arg_type=tags_type)
with self.argument_context('containerapp env', arg_group='Log Analytics') as c:
c.argument('logs_customer_id', options_list=['--logs-workspace-id'], help='Name or resource ID of the Log Analytics workspace to send diagnostics logs to. You can use \"az monitor log-analytics workspace create\" to create one. Extra billing may apply.')
c.argument('logs_key', options_list=['--logs-workspace-key'], help='Log Analytics workspace key to configure your Log Analytics workspace. You can use \"az monitor log-analytics workspace get-shared-keys\" to retrieve the key.')
with self.argument_context('containerapp env', arg_group='Dapr') as c:
c.argument('instrumentation_key', options_list=['--dapr-instrumentation-key'], help='Application Insights instrumentation key used by Dapr to export Service to Service communication telemetry')
with self.argument_context('containerapp env', arg_group='Virtual Network') as c:
c.argument('infrastructure_subnet_resource_id', options_list=['--infrastructure-subnet-resource-id', '-s'], help='Resource ID of a subnet for infrastructure components and user app containers.')
c.argument('app_subnet_resource_id', options_list=['--app-subnet-resource-id'], help='Resource ID of a subnet that Container App containers are injected into. This subnet must be in the same VNET as the subnet defined in infrastructureSubnetResourceId.')
c.argument('docker_bridge_cidr', options_list=['--docker-bridge-cidr'], help='CIDR notation IP range assigned to the Docker bridge. It must not overlap with any Subnet IP ranges or the IP range defined in Platform Reserved CIDR, if defined')
c.argument('platform_reserved_cidr', options_list=['--platform-reserved-cidr'], help='IP range in CIDR notation that can be reserved for environment infrastructure IP addresses. It must not overlap with any other Subnet IP ranges')
c.argument('platform_reserved_dns_ip', options_list=['--platform-reserved-dns-ip'], help='An IP address from the IP range defined by Platform Reserved CIDR that will be reserved for the internal DNS server.')
c.argument('internal_only', arg_type=get_three_state_flag(), options_list=['--internal-only'], help='Boolean indicating the environment only has an internal load balancer. These environments do not have a public static IP resource, therefore must provide infrastructureSubnetResourceId if enabling this property')
with self.argument_context('containerapp env create') as c:
c.argument('zone_redundant', options_list=["--zone-redundant", "-z"], help="Enable zone redundancy on the environment. Cannot be used without --infrastructure-subnet-resource-id. If used with --location, the subnet's location must match")
with self.argument_context('containerapp env update') as c:
c.argument('name', name_type, help='Name of the Container Apps environment.')
c.argument('tags', arg_type=tags_type)
with self.argument_context('containerapp env delete') as c:
c.argument('name', name_type, help='Name of the Container Apps Environment.')
with self.argument_context('containerapp env show') as c:
c.argument('name', name_type, help='Name of the Container Apps Environment.')
with self.argument_context('containerapp env certificate upload') as c:
c.argument('certificate_file', options_list=['--certificate-file', '-f'], help='The filepath of the .pfx or .pem file')
c.argument('certificate_name', options_list=['--certificate-name', '-c'], help='Name of the certificate which should be unique within the Container Apps environment.')
c.argument('certificate_password', options_list=['--password', '-p'], help='The certificate file password')
with self.argument_context('containerapp env certificate list') as c:
c.argument('name', id_part=None)
c.argument('certificate', options_list=['--certificate', '-c'], help='Name or resource id of the certificate.')
c.argument('thumbprint', options_list=['--thumbprint', '-t'], help='Thumbprint of the certificate.')
with self.argument_context('containerapp env certificate delete') as c:
c.argument('certificate', options_list=['--certificate', '-c'], help='Name or resource id of the certificate.')
c.argument('thumbprint', options_list=['--thumbprint', '-t'], help='Thumbprint of the certificate.')
with self.argument_context('containerapp env storage') as c:
c.argument('name', id_part=None)
c.argument('storage_name', | |
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
super(L2Tp.Classes.Class, self).__init__()
self.yang_name = "class"
self.yang_parent_name = "classes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['class_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('class_name', YLeaf(YType.str, 'class-name')),
('ip_tos', YLeaf(YType.uint8, 'ip-tos')),
('vrf_name', YLeaf(YType.str, 'vrf-name')),
('receive_window_size', YLeaf(YType.uint16, 'receive-window-size')),
('class_name_xr', YLeaf(YType.str, 'class-name-xr')),
('digest_hash', YLeaf(YType.enumeration, 'digest-hash')),
('password', YLeaf(YType.str, 'password')),
('encoded_password', YLeaf(YType.str, 'encoded-password')),
('host_name', YLeaf(YType.str, 'host-name')),
('accounting_method_list', YLeaf(YType.str, 'accounting-method-list')),
('hello_timeout', YLeaf(YType.uint32, 'hello-timeout')),
('setup_timeout', YLeaf(YType.uint32, 'setup-timeout')),
('retransmit_minimum_timeout', YLeaf(YType.uint32, 'retransmit-minimum-timeout')),
('retransmit_maximum_timeout', YLeaf(YType.uint32, 'retransmit-maximum-timeout')),
('initial_retransmit_minimum_timeout', YLeaf(YType.uint32, 'initial-retransmit-minimum-timeout')),
('initial_retransmit_maximum_timeout', YLeaf(YType.uint32, 'initial-retransmit-maximum-timeout')),
('timeout_no_user', YLeaf(YType.uint32, 'timeout-no-user')),
('retransmit_retries', YLeaf(YType.uint32, 'retransmit-retries')),
('initial_retransmit_retries', YLeaf(YType.uint32, 'initial-retransmit-retries')),
('is_authentication_enabled', YLeaf(YType.boolean, 'is-authentication-enabled')),
('is_hidden', YLeaf(YType.boolean, 'is-hidden')),
('is_digest_enabled', YLeaf(YType.boolean, 'is-digest-enabled')),
('is_digest_check_enabled', YLeaf(YType.boolean, 'is-digest-check-enabled')),
('is_congestion_control_enabled', YLeaf(YType.boolean, 'is-congestion-control-enabled')),
('is_peer_address_checked', YLeaf(YType.boolean, 'is-peer-address-checked')),
])
self.class_name = None
self.ip_tos = None
self.vrf_name = None
self.receive_window_size = None
self.class_name_xr = None
self.digest_hash = None
self.password = <PASSWORD>
self.encoded_password = <PASSWORD>
self.host_name = None
self.accounting_method_list = None
self.hello_timeout = None
self.setup_timeout = None
self.retransmit_minimum_timeout = None
self.retransmit_maximum_timeout = None
self.initial_retransmit_minimum_timeout = None
self.initial_retransmit_maximum_timeout = None
self.timeout_no_user = None
self.retransmit_retries = None
self.initial_retransmit_retries = None
self.is_authentication_enabled = None
self.is_hidden = None
self.is_digest_enabled = None
self.is_digest_check_enabled = None
self.is_congestion_control_enabled = None
self.is_peer_address_checked = None
self._segment_path = lambda: "class" + "[class-name='" + str(self.class_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/classes/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L2Tp.Classes.Class, ['class_name', 'ip_tos', 'vrf_name', 'receive_window_size', 'class_name_xr', 'digest_hash', 'password', 'encoded_password', 'host_name', 'accounting_method_list', 'hello_timeout', 'setup_timeout', 'retransmit_minimum_timeout', 'retransmit_maximum_timeout', 'initial_retransmit_minimum_timeout', 'initial_retransmit_maximum_timeout', 'timeout_no_user', 'retransmit_retries', 'initial_retransmit_retries', 'is_authentication_enabled', 'is_hidden', 'is_digest_enabled', 'is_digest_check_enabled', 'is_congestion_control_enabled', 'is_peer_address_checked'], name, value)
class Tunnels(Entity):
"""
List of tunnel IDs
.. attribute:: tunnel
L2TP tunnel information
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Tunnels.Tunnel>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
super(L2Tp.Tunnels, self).__init__()
self.yang_name = "tunnels"
self.yang_parent_name = "l2tp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("tunnel", ("tunnel", L2Tp.Tunnels.Tunnel))])
self._leafs = OrderedDict()
self.tunnel = YList(self)
self._segment_path = lambda: "tunnels"
self._absolute_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L2Tp.Tunnels, [], name, value)
class Tunnel(Entity):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: local_address
Local tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_address
Remote tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: local_port
Local port
**type**\: int
**range:** 0..65535
.. attribute:: remote_port
Remote port
**type**\: int
**range:** 0..65535
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
.. attribute:: is_pmtu_enabled
True if tunnel PMTU checking is enabled
**type**\: bool
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
.. attribute:: class_name
L2TP class name
**type**\: str
**length:** 0..256
.. attribute:: active_sessions
Number of active sessions
**type**\: int
**range:** 0..4294967295
.. attribute:: sequence_ns
Sequence NS
**type**\: int
**range:** 0..65535
.. attribute:: sequence_nr
Sequence NR
**type**\: int
**range:** 0..65535
.. attribute:: local_window_size
Local window size
**type**\: int
**range:** 0..65535
.. attribute:: remote_window_size
Remote window size
**type**\: int
**range:** 0..65535
.. attribute:: retransmission_time
Retransmission time in seconds
**type**\: int
**range:** 0..65535
**units**\: second
.. attribute:: maximum_retransmission_time
Maximum retransmission time in seconds
**type**\: int
**range:** 0..65535
**units**\: second
.. attribute:: unsent_queue_size
Unsent queue size
**type**\: int
**range:** 0..65535
.. attribute:: unsent_maximum_queue_size
Unsent maximum queue size
**type**\: int
**range:** 0..65535
.. attribute:: resend_queue_size
Resend queue size
**type**\: int
**range:** 0..65535
.. attribute:: resend_maximum_queue_size
Resend maximum queue size
**type**\: int
**range:** 0..65535
.. attribute:: order_queue_size
Order queue size
**type**\: int
**range:** 0..65535
.. attribute:: packet_queue_check
Current number session packet queue check
**type**\: int
**range:** 0..65535
.. attribute:: digest_secrets
Control message authentication with digest secrets
**type**\: int
**range:** 0..65535
.. attribute:: resends
Total resends
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_acknowledgement_sent
Total zero length body acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: total_out_of_order_drop_packets
Total out of order dropped packets
**type**\: int
**range:** 0..4294967295
.. attribute:: total_out_of_order_reorder_packets
Total out of order reorder packets
**type**\: int
**range:** 0..4294967295
.. attribute:: total_peer_authentication_failures
Number of peer authentication failures
**type**\: int
**range:** 0..4294967295
.. attribute:: is_tunnel_up
True if tunnel is up
**type**\: bool
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled else false
**type**\: bool
.. attribute:: retransmit_time
Retransmit time distribution in seconds
**type**\: list of int
**range:** 0..65535
**units**\: second
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
super(L2Tp.Tunnels.Tunnel, self).__init__()
self.yang_name = "tunnel"
self.yang_parent_name = "tunnels"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['local_tunnel_id']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('local_tunnel_id', YLeaf(YType.int32, 'local-tunnel-id')),
('local_address', YLeaf(YType.str, 'local-address')),
('remote_address', YLeaf(YType.str, 'remote-address')),
('local_port', YLeaf(YType.uint16, 'local-port')),
('remote_port', YLeaf(YType.uint16, 'remote-port')),
('protocol', YLeaf(YType.uint8, 'protocol')),
('is_pmtu_enabled', YLeaf(YType.boolean, 'is-pmtu-enabled')),
('remote_tunnel_id', YLeaf(YType.uint32, 'remote-tunnel-id')),
('local_tunnel_name', YLeaf(YType.str, 'local-tunnel-name')),
('remote_tunnel_name', YLeaf(YType.str, 'remote-tunnel-name')),
('class_name', YLeaf(YType.str, 'class-name')),
('active_sessions', YLeaf(YType.uint32, 'active-sessions')),
('sequence_ns', YLeaf(YType.uint16, 'sequence-ns')),
('sequence_nr', YLeaf(YType.uint16, 'sequence-nr')),
('local_window_size', YLeaf(YType.uint16, 'local-window-size')),
('remote_window_size', YLeaf(YType.uint16, 'remote-window-size')),
('retransmission_time', YLeaf(YType.uint16, 'retransmission-time')),
('maximum_retransmission_time', YLeaf(YType.uint16, 'maximum-retransmission-time')),
('unsent_queue_size', YLeaf(YType.uint16, 'unsent-queue-size')),
('unsent_maximum_queue_size', YLeaf(YType.uint16, 'unsent-maximum-queue-size')),
('resend_queue_size', YLeaf(YType.uint16, 'resend-queue-size')),
('resend_maximum_queue_size', YLeaf(YType.uint16, 'resend-maximum-queue-size')),
('order_queue_size', YLeaf(YType.uint16, 'order-queue-size')),
('packet_queue_check', YLeaf(YType.uint16, 'packet-queue-check')),
('digest_secrets', YLeaf(YType.uint16, 'digest-secrets')),
('resends', YLeaf(YType.uint32, 'resends')),
('zero_length_body_acknowledgement_sent', YLeaf(YType.uint32, 'zero-length-body-acknowledgement-sent')),
('total_out_of_order_drop_packets', YLeaf(YType.uint32, 'total-out-of-order-drop-packets')),
('total_out_of_order_reorder_packets', YLeaf(YType.uint32, 'total-out-of-order-reorder-packets')),
('total_peer_authentication_failures', YLeaf(YType.uint32, 'total-peer-authentication-failures')),
('is_tunnel_up', YLeaf(YType.boolean, 'is-tunnel-up')),
('is_congestion_control_enabled', YLeaf(YType.boolean, 'is-congestion-control-enabled')),
('retransmit_time', YLeafList(YType.uint16, 'retransmit-time')),
])
self.local_tunnel_id = None
self.local_address = None
self.remote_address = None
self.local_port = None
self.remote_port = None
self.protocol = None
self.is_pmtu_enabled = None
self.remote_tunnel_id = None
self.local_tunnel_name = None
self.remote_tunnel_name = None
self.class_name = None
self.active_sessions = None
self.sequence_ns = None
self.sequence_nr = None
self.local_window_size = None
self.remote_window_size = None
self.retransmission_time = None
self.maximum_retransmission_time = None
self.unsent_queue_size = None
self.unsent_maximum_queue_size = None
self.resend_queue_size = None
self.resend_maximum_queue_size = None
self.order_queue_size = None
self.packet_queue_check = None
self.digest_secrets = None
self.resends = None
self.zero_length_body_acknowledgement_sent = None
self.total_out_of_order_drop_packets = None
self.total_out_of_order_reorder_packets = None
self.total_peer_authentication_failures = None
self.is_tunnel_up = None
self.is_congestion_control_enabled = None
self.retransmit_time = []
self._segment_path = lambda: "tunnel" + "[local-tunnel-id='" + str(self.local_tunnel_id) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/tunnels/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L2Tp.Tunnels.Tunnel, ['local_tunnel_id', 'local_address', 'remote_address', 'local_port', 'remote_port', 'protocol', 'is_pmtu_enabled', 'remote_tunnel_id', 'local_tunnel_name', 'remote_tunnel_name', 'class_name', 'active_sessions', 'sequence_ns', 'sequence_nr', 'local_window_size', 'remote_window_size', 'retransmission_time', 'maximum_retransmission_time', 'unsent_queue_size', 'unsent_maximum_queue_size', 'resend_queue_size', 'resend_maximum_queue_size', 'order_queue_size', 'packet_queue_check', 'digest_secrets', 'resends', 'zero_length_body_acknowledgement_sent', 'total_out_of_order_drop_packets', 'total_out_of_order_reorder_packets', 'total_peer_authentication_failures', 'is_tunnel_up', 'is_congestion_control_enabled', 'retransmit_time'], name, value)
class Sessions(Entity):
"""
List of session IDs
.. attribute:: session
L2TP information for a particular session
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Sessions.Session>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
super(L2Tp.Sessions, self).__init__()
self.yang_name = "sessions"
self.yang_parent_name = "l2tp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("session", ("session", L2Tp.Sessions.Session))])
self._leafs = OrderedDict()
self.session = YList(self)
self._segment_path = lambda: "sessions"
self._absolute_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L2Tp.Sessions, [], name, value)
class Session(Entity):
"""
L2TP information for a particular session
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: local_session_id (key)
Local session ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: session_application_data
Session application data
**type**\: :py:class:`SessionApplicationData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Sessions.Session.SessionApplicationData>`
.. attribute:: local_ip_address
Local session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_ip_address
Remote session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: l2tp_sh_sess_udp_lport
l2tp sh sess udp lport
**type**\: int
**range:** 0..65535
.. attribute:: l2tp_sh_sess_udp_rport
l2tp sh sess udp rport
**type**\: int
**range:** 0..65535
.. attribute:: | |
= '2222122020' #provided by <NAME>, <NAME> and <NAME>
addinfo_f = 'It should be noted that ACCESS-CM2 continues to utilise JULES representations for some terrestrial processes, particularly dust source/deposition and river routing'
cmip_f = 6
rgb_f = 'blue'
marker_f = '<'
latres_atm_f = 145 #from in metadata in file!
lonres_atm_f = 192
lev_atm_f = 38
latres_oc_f = 300
lonres_oc_f = 360
lev_oc_f = 50
ecs_f = 3.9
tcr_f = 2.0
elif model == 'awi_esm_1_1_lr':
mrun_f = 'r1i1p1f1'
family_f = 'gcm'
doi_f = '10.1029/2019MS002009, 10.5194/acp-21-5015-2021, https://pure.mpg.de/rest/items/item_3279802_16/component/file_3316522/content'
atmos_f = 'ECHAM6.3.04p1 (T63L47 native atmosphere T63 gaussian grid; 192 x 96 longitude/latitude; 47 levels; top level 80 km)'
surface_f = 'JSBACH 3.20 with dynamic vegetation'
ocean_f = 'FESOM 1.4 (unstructured grid in the horizontal with 126859 wet nodes; 46 levels; top grid cell 0-5 m)'
seaice_f = 'FESOM 1.4'
aerosols_f = 'none'
chemistry_f = 'none'
obgc_f = 'none'
landice_f = 'none'
coupler_f = 'OASIS3-MCT'
complex_f = '2222220100' #provided by <NAME>
addinfo_f = 'Tropospheric and stratospheric ozone is prescribed from the CMIP6 dataset (Checa-Garcia et al., 2018a). GHG concentrations including CO2, CH4, N2O, and chlorofluorocarbons (CFCs) are prescribed after Meinshausen et al. (2017). Methane oxidation and photolysis of water vapour are parameterized for the stratosphere and mesosphere (further information in Sect. 2.1.2 of Schmidt et al., 2013, and references therein)'
cmip_f = 6
rgb_f = 'white' #white
marker_f = 'v'
latres_atm_f = 96
lonres_atm_f = 192
lev_atm_f = 47
latres_oc_f = 3 #these are pseudo latlons for the unstructured grid containing 126859 wet nodes, confirmed by metadata and array in file
lonres_oc_f = 42286
lev_oc_f = 46
ecs_f = 3.29
tcr_f = 2.11
elif model == 'bcc_csm1_1':
mrun_f = 'r1i1p1'
family_f = 'esm'
doi_f = '10.5194/gmd-12-1573-2019, 10.1007/s13351-014-3041-7, 10.1002/jgrd.50320, 10.1007/s00382-008-0487-2'
atmos_f = 'BCC-AGCM2.1, origin: CAM3'
surface_f = 'BCC-AVIM1.0 + dynamic vegetation + tbgc biophysical module is almost the same as in CLM3, own terrestrial carbon cycle is added'
ocean_f = 'MOM-L40v1 + obgc'
seaice_f = 'GFDL-SISv1'
aerosols_f = 'prescribed'
chemistry_f = 'none'
landice_f = 'none'
obgc_f = 'MOM-L40v1'
coupler_f = 'NCAR flux coupler version 5'
complex_f = '2222221120' #Confirmed by <NAME> and <NAME>, Global carbon budget without spatial distribution, Oceanic carbon cycle based on OCMIP2, https://doi.org/10.5194/gmd-12-1573-2019
addinfo_f = 'Global carbon budget without spatial distribution, Oceanic carbon cycle based on OCMIP2'
cmip_f = 5
rgb_f = '#FF4500'
marker_f = 'P'
latres_atm_f = 64
lonres_atm_f = 128
lev_atm_f = 26
latres_oc_f = 232
lonres_oc_f = 360
lev_oc_f = 40
ecs_f = 2.8
tcr_f = 1.7
elif model == 'bcc_csm2_mr':
mrun_f = 'r1i1p1f1'
family_f = 'esm'
doi_f = 'https://doi.org/10.5194/gmd-12-1573-2019'
atmos_f = 'BCC-AGCM3-MR, origin: CAM3'
surface_f = 'BCC-AVIM2.0 + dynamic vegetation + tbgc, as BCC-AVIM1.0 but with several improvments'
ocean_f = 'MOM-L40v2 without obgc'
seaice_f = 'GFDL-SISv2'
aerosols_f = 'prescribed'
chemistry_f = 'none'
obgc_f = 'none'
landice_f = 'none'
coupler_f = 'NCAR flux coupler version 5'
complex_f = '2222221120' #Confirmed by <NAME> and <NAME>, Prognostic spatial CO2 in the atmosphere, https://doi.org/10.5194/gmd-12-1573-2019
addinfo_f = 'Prognostic spatial CO2 in the atmosphere'
cmip_f = 6
rgb_f = '#FF4500'
marker_f = 'v'
latres_atm_f = 160
lonres_atm_f = 320
lev_atm_f = 46
latres_oc_f = 232
lonres_oc_f = 360
lev_oc_f = 40
ecs_f = 3.0
tcr_f = 1.7
elif model == 'canesm2':
mrun_f = 'r1i1p1'
family_f = 'esm'
doi_f = '10.1029/2010GL046270'
atmos_f = ' CanAM4 (AGCM15i, T63L35)'
surface_f = 'CLASS2.7 and CTEM1 (CTEM1 represents the terrestrial carbon cycle)'
ocean_f = 'CanOM4 (OGCM4.0, 256x192L40)'
seaice_f = 'CanSIM1 (Cavitating Fluid, T63 Gaussian Grid)'
aerosols_f = 'interactive tropospheric aerosols and prescribed stratospheric aerosols'
chemistry_f = 'none'
obgc_f = 'CMOC1.2'
landice_f = 'none'
coupler_f = 'inhouse'
complex_f = '2222222021' #confirmed by <NAME>
addinfo_f = 'From e-mail correspondence with <NAME>: From the CanAM team: For the aerosols in both models there are interactive tropospheric aerosols and prescribed stratospheric aerosols. The same chemistry is used in CanESM2 and CanESM5, related to the sulphur cycle. The full chemistry package is not in CanESM5.'
cmip_f = 5
rgb_f = 'white'
marker_f = 'X'
latres_atm_f = 64
lonres_atm_f = 128
lev_atm_f = 35
latres_oc_f = 192
lonres_oc_f = 256
lev_oc_f = 40
ecs_f = 2.7
tcr_f = 2.3
elif model == 'canesm5':
mrun_f = 'r1i1p2f1'
family_f = 'esm'
doi_f = 'doi.org/10.5194/gmd-12-4823-2019'
atmos_f = 'CanAM5 (T63L49 native atmosphere, T63 Linear Gaussian Grid; 128 x 64 longitude/latitude; 49 levels; top level 1 hPa)'
surface_f = 'CLASS3.6/CTEM1.2'
ocean_f = 'NEMO3.4.1 (ORCA1 tripolar grid, 1 deg with refinement to 1/3 deg within 20 degrees of the equator; 361 x 290 longitude/latitude; 45 vertical levels; top grid cell 0-6.19 m)'
seaice_f = 'LIM2'
aerosols_f = 'interactive tropospheric aerosols and prescribed stratospheric aerosols. '
chemistry_f = 'specified oxidants for aerosols'
obgc_f = 'Canadian Model of Ocean Carbon (CMOC); NPZD ecosystem with OMIP prescribed carbonate chemistry'
landice_f = 'specified ice sheets'
coupler_f = 'inhouse'
complex_f = '2222222121' #confirmed by <NAME>
addinfo_f = 'From e-mail correspondence with <NAME>: vegetation, from the CTEM team: In CanESM5, the fractional coverage of PFTs were specified so vegetation could grow/change vertically in response to changes in climate (i.e. vegetation is interactive). However, since fractional coverage of PFTs were specified vegetation was NOT allowed to compete and move spatially. So competition was off. Typically, however, people use “dynamic vegetation” phrase to describe competition. From the CanAM team: For the aerosols in both models there are interactive tropospheric aerosols and prescribed stratospheric aerosols. The same chemistry is used in CanESM2 and CanESM5, related to the sulphur cycle. The full chemistry package is not in CanESM5.'
cmip_f = 6
rgb_f = 'white' #'#ff007f'
marker_f = 'v'
latres_atm_f = 64
lonres_atm_f = 128
lev_atm_f = 49
latres_oc_f = 290
lonres_oc_f = 361
lev_oc_f = 45
ecs_f = 5.6
tcr_f = 2.7
elif model == 'ccsm4':
mrun_f = 'r6i1p1'
family_f = 'gcm'
doi_f = '10.1175/2011JCLI4083.1'
atmos_f = 'CAM4'
surface_f = 'CLM4'
ocean_f = 'Parallel Ocean Program version 2'
seaice_f = 'Community Ice Code version 4'
aerosols_f = 'none'
chemistry_f = 'none'
obgc_f = 'none'
landice_f = 'none'
coupler_f = 'CPL7'
complex_f = '2222221000' #confirmed by <NAME>
addinfo_f = ''
cmip_f = 5
rgb_f = 'white'
marker_f = 'P'
latres_atm_f = 192
lonres_atm_f = 288
lev_atm_f = 26
latres_oc_f = 320
lonres_oc_f = 384
lev_oc_f = 60
ecs_f = 2.9
tcr_f = 1.7
elif model == 'cmcc_cm': #
fullname_f = 'Centro Euro-Mediterraneo per i Cambiamenti Climatici'
mrun_f = 'r1i1p1'
doi_f = '10.1175/2011JCLI4104.1'
atmos_f = 'ECHAM5 with a T159 horizontal resolution, Gaussian grid of about 0.75 x 0.75 degrees. This configuration has 31 hybrid sigma-pressure levels in the vertical and top at 10 hPa.'
surface_f = 'part of ECHAM5'
ocean_f = 'OPA 8.2 (Madec et al. 1998), in its ORCA2 global configuration. The horizontal resolution is 2 x 2 deg with a meridional refinement near the equator, approaching a minimum 0.5 deg grid spacing. The model has 31 vertical levels, 10 of which lie within the upper 100 m.'
seaice_f = 'Louvain-La-Neuve (LIM) model'
aerosols_f = 'none'
chemistry_f = 'none'
obgc_f = 'none'
landice_f = 'none'
coupler_f = 'Ocean Atmosphere Sea Ice Soil version 3 (OASIS3)'
complex_f = '2222000000' # # confirmed by <NAME> and <NAME>
addinfo_f = 'model described in the documentation at http://www.cmcc.it/data-models/models'
family_f = 'gcm'
cmip_f = 5
rgb_f = 'cyan'
marker_f = 'P'
latres_atm_f = 240
lonres_atm_f = 480
lev_atm_f = 31
latres_oc_f = 149
lonres_oc_f = 182
lev_oc_f = 31
ecs_f = np.nan
tcr_f = np.nan
elif model == 'cmcc_cm2_sr5':
fullname_f = 'Euro-Mediterranean Centre on Climate Change coupled climate model'
mrun_f = 'r1i1p1f1'
doi_f = '10.1029/2018MS001369'
atmos_f = ' CAM5.3 (1deg; 288 x 192 longitude/latitude; 30 levels; top at ~2 hPa)'
surface_f = 'CLM4.5 (BGC mode)'
ocean_f = 'NEMO3.6 (ORCA1 tripolar primarly 1 deg lat/lon with meridional refinement down to 1/3 degree in the tropics; 362 x 292 longitude/latitude; 50 vertical levels; top grid cell 0-1 m)'
seaice_f = 'CICE4.0'
| |
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
import argparse
import os
import sys
from . import config
from . import update
from .messages import *
def main():
# Hack around argparse's lack of optional subparsers
if len(sys.argv) == 1:
sys.argv.append("spec")
argparser = argparse.ArgumentParser(description="Processes spec source files into valid HTML.")
argparser.add_argument("-q", "--quiet", dest="quiet", action="count", default=0,
help="Silences one level of message, least-important first.")
argparser.add_argument("-s", "--silent", dest="silent", action="store_true",
help="Shorthand for 'as many -q as you need to shut it up'")
argparser.add_argument("-f", "--force", dest="errorLevel", action="store_const", const="nothing",
help="Force the preprocessor to run to completion; fatal errors don't stop processing.")
argparser.add_argument("-d", "--dry-run", dest="dryRun", action="store_true",
help="Prevents the processor from actually saving anything to disk, but otherwise fully runs.")
argparser.add_argument("--print", dest="printMode", action="store", default=None,
help="Print mode. Options are 'plain' (just text), 'console' (colored with console color codes), 'markup', and 'json'. Defaults to 'console'.")
argparser.add_argument("--die-on", dest="errorLevel", choices=[b"nothing", b"fatal", b"link-error", b"warning", b"everything"],
help="Determines what sorts of errors cause Bikeshed to die (quit immediately with an error status code). Default is 'fatal'; the -f flag is a shorthand for 'nothing'")
subparsers = argparser.add_subparsers(title="Subcommands", dest='subparserName')
specParser = subparsers.add_parser('spec', help="Process a spec source file into a valid output file.")
specParser.add_argument("infile", nargs="?",
default=None,
help="Path to the source file.")
specParser.add_argument("outfile", nargs="?",
default=None,
help="Path to the output file.")
specParser.add_argument("--debug", dest="debug", action="store_true", help="Switches on some debugging tools. Don't use for production!")
specParser.add_argument("--gh-token", dest="ghToken", nargs="?",
help="GitHub access token. Useful to avoid API rate limits. Generate tokens: https://github.com/settings/tokens.")
specParser.add_argument("--byos", dest="byos", action="store_true",
help="Bring-Your-Own-Spec: turns off all the Bikeshed auto-niceties, so you can piecemeal its features into your existing doc instead. Experimental, let me know if things get crashy or weird.")
specParser.add_argument("-l", "--line-numbers", dest="lineNumbers", action="store_true",
help="Hacky support for outputting line numbers on all error messages. Disables output, as this is hacky and might mess up your source.")
echidnaParser = subparsers.add_parser('echidna', help="Process a spec source file into a valid output file and publish it according to certain automatic protocols.")
echidnaParser.add_argument("infile", nargs="?",
default=None,
help="Path to the source file.")
echidnaParser.add_argument("--gh-token", dest="ghToken", nargs="?",
help="GitHub access token. Useful to avoid API rate limits. Generate tokens: https://github.com/settings/tokens.")
echidnaParser.add_argument("--u", dest="un", metavar="USERNAME", required=False, help="W3C username.")
echidnaParser.add_argument("--p", dest="pw", metavar="PASSWORD", required=False, help="W3C password.")
echidnaParser.add_argument("--decision", dest="decision", metavar="DECISION_URL", required=False, help="URL recording the decision to publish.")
echidnaParser.add_argument("--cc", dest="cc", metavar="EMAIL", required=False, help="Comma-separated list of email addresses to ping with the publication status when complete.")
echidnaParser.add_argument("--additional-directories", dest="additionalDirectories", required=False, nargs="*", help="Directories to bundle in the tar file. Defaults to examples/, diagrams/, and images/.")
echidnaParser.add_argument("--self-contained", dest="selfContained", action="store_true", help="The spec is self-contained, do not bundle any extra directories in the tar file.")
echidnaParser.add_argument("--just-tar", dest="justTar", action="store_true")
watchParser = subparsers.add_parser('watch', help="Process a spec source file into a valid output file, automatically rebuilding when it changes.")
watchParser.add_argument("infile", nargs="?",
default=None,
help="Path to the source file.")
watchParser.add_argument("outfile", nargs="?",
default=None,
help="Path to the output file.")
watchParser.add_argument("--gh-token", dest="ghToken", nargs="?",
help="GitHub access token. Useful to avoid API rate limits. Generate tokens: https://github.com/settings/tokens.")
watchParser.add_argument("--byos", dest="byos", action="store_true",
help="Bring-Your-Own-Spec: turns off all the Bikeshed auto-niceties, so you can piecemeal its features into your existing doc instead. Experimental, let me know if things get crashy or weird.")
serveParser = subparsers.add_parser('serve', help="Identical to 'watch', but also serves the folder on localhost.")
serveParser.add_argument("infile", nargs="?",
default=None,
help="Path to the source file.")
serveParser.add_argument("outfile", nargs="?",
default=None,
help="Path to the output file.")
serveParser.add_argument("--port", dest="port", nargs="?", default="8000",
help="Specify the port to serve it over.")
serveParser.add_argument("--localhost", dest="localhost", action="store_true",
help="Only allow connections from localhost.")
serveParser.add_argument("--gh-token", dest="ghToken", nargs="?",
help="GitHub access token. Useful to avoid API rate limits. Generate tokens: https://github.com/settings/tokens.")
serveParser.add_argument("--byos", dest="byos", action="store_true",
help="Bring-Your-Own-Spec: turns off all the Bikeshed auto-niceties, so you can piecemeal its features into your existing doc instead. Experimental, let me know if things get crashy or weird.")
updateParser = subparsers.add_parser('update', help="Update supporting files (those in /spec-data).", epilog="If no options are specified, everything is downloaded.")
updateParser.add_argument("--skip-manifest", dest="force", action="store_true", help="Forces Bikeshed to do a full update manually, rather than using the manifest to get the preprocessed update (which can be several minutes old).")
updateParser.add_argument("--anchors", action="store_true", help="Download crossref anchor data.")
updateParser.add_argument("--backrefs", action="store_true", help="Download link backref data.")
updateParser.add_argument("--biblio", action="store_true", help="Download biblio data.")
updateParser.add_argument("--caniuse", action="store_true", help="Download Can I Use... data.")
updateParser.add_argument("--link-defaults", dest="linkDefaults", action="store_true", help="Download link default data.")
updateParser.add_argument("--test-suites", dest="testSuites", action="store_true", help="Download test suite data.")
updateParser.add_argument("--languages", dest="languages", action="store_true", help="Download language/translation data.")
updateParser.add_argument("--wpt", dest="wpt", action="store_true", help="Download web-platform-tests data.")
issueParser = subparsers.add_parser('issues-list', help="Process a plain-text issues file into HTML. Call with no args to see an example input text.")
issueParser.add_argument("-t",
dest="printTemplate",
action="store_true",
help="Output example Issues List template.")
issueParser.add_argument("infile", nargs="?",
default=None,
help="Path to the plain-text issue file.")
issueParser.add_argument("outfile", nargs="?",
default=None,
help="Path to the output file. Default is file of the same name as input, with .html.")
debugParser = subparsers.add_parser('debug', help="Run various debugging commands.")
debugParser.add_argument("infile", nargs="?",
default=None,
help="Path to the source file.")
debugCommands = debugParser.add_mutually_exclusive_group(required=True)
debugCommands.add_argument("--print-exports", dest="printExports", action="store_true",
help="Prints those terms that will be exported for cross-ref purposes.")
debugCommands.add_argument("--print-refs-for", dest="linkText",
help="Prints the ref data for a given link text.")
debugCommands.add_argument("--print", dest="code",
help="Runs the specified code and prints it.")
debugCommands.add_argument("--print-json", dest="jsonCode",
help="Runs the specified code and prints it as formatted JSON.")
debugCommands.add_argument("--refresh-data", dest="refreshData", action="store_true",
help="Clobbers the readonly data files with the mutable ones.")
refParser = subparsers.add_parser('refs', help="Search Bikeshed's ref database.")
refParser.add_argument("infile", nargs="?",
default=None,
help="Path to the source file.")
refParser.add_argument("--text", dest="text", default=None)
refParser.add_argument("--type", dest="linkType", default=None)
refParser.add_argument("--for", dest="linkFor", default=None)
refParser.add_argument("--spec", dest="spec", default=None)
refParser.add_argument("--status", dest="status", default=None)
refParser.add_argument("--exact", dest="exact", action="store_true", help="Only search for the exact text provided; don't apply Bikeshed's automatic conjugation help for plurals/etc.")
refParser.add_argument("--latest-only", dest="latestOnly", action="store_true", help="Apply Bikeshed's logic for only returning the latest version of a given ref when it exists in multiple levels of a spec.")
sourceParser = subparsers.add_parser('source', help="Tools for formatting the *source* document.")
sourceParser.add_argument("--big-text",
dest="bigText",
action="store_true",
help="Finds HTML comments containing 'Big Text: foo' and turns them into comments containing 'foo' in big text.")
sourceParser.add_argument("infile", nargs="?",
default=None,
help="Path to the source file.")
sourceParser.add_argument("outfile", nargs="?",
default=None,
help="Path to the output file.")
testParser = subparsers.add_parser('test', help="Tools for running Bikeshed's testsuite.")
testParser.add_argument("--rebase",
default=False,
action="store_true",
help="Rebase the specified files.")
testParser.add_argument('testFiles',
default=[],
metavar="FILE",
nargs="*",
help="Run these tests. If called with no args, tests everything.")
profileParser = subparsers.add_parser('profile', help="Profiling Bikeshed. Needs graphviz, gprof2dot, and xdot installed.")
profileParser.add_argument("--root",
dest="root",
default=None,
metavar="ROOTFUNC",
help="Prune the graph to start with the specified root node.")
profileParser.add_argument("--leaf",
dest="leaf",
default=None,
metavar="LEAFFUNC",
help="Prune the graph to only show ancestors of the specified leaf node.")
profileParser.add_argument("--svg", dest="svgFile", default=None, help="Save the graph to a specified SVG file, rather than outputting with xdot immediately.")
templateParser = subparsers.add_parser('template', help="Outputs a skeleton .bs file for you to start with.")
wptParser = subparsers.add_parser('wpt', help="Tools for writing Web Platform Tests.")
wptParser.add_argument("--template",
default=False,
action="store_true",
help="Outputs a skeleton WPT file for you to start with.")
options, extras = argparser.parse_known_args()
config.quiet = options.quiet
if options.silent:
config.quiet = float("infinity")
config.setErrorLevel(options.errorLevel)
config.dryRun = options.dryRun
if options.printMode is None:
if "NO_COLOR" in os.environ:
config.printMode = "plain"
else:
config.printMode = "console"
else:
config.printMode = options.printMode
update.fixupDataFiles()
if options.subparserName == "update":
handleUpdate(options, extras)
elif options.subparserName == "spec":
handleSpec(options, extras)
elif options.subparserName == "echidna":
handleEchidna(options, extras)
elif options.subparserName == "watch":
handleWatch(options, extras)
elif options.subparserName == "serve":
handleServe(options, extras)
elif options.subparserName == "debug":
handleDebug(options, extras)
elif options.subparserName == "refs":
handleRefs(options, extras)
elif options.subparserName == "issues-list":
handleIssuesList(options, extras)
elif options.subparserName == "source":
handleSource(options, extras)
elif options.subparserName == "test":
handleTest(options, extras)
elif options.subparserName == "profile":
handleProfile(options, extras)
elif options.subparserName == "template":
handleTemplate(options, extras)
elif options.subparserName == "wpt":
handleWpt(options, extras)
def handleUpdate(options, extras):
update.update(anchors=options.anchors, backrefs=options.backrefs, biblio=options.biblio, caniuse=options.caniuse, linkDefaults=options.linkDefaults, testSuites=options.testSuites, languages=options.languages, wpt=options.wpt, dryRun=config.dryRun, force=options.force)
def handleSpec(options, extras):
from . import metadata
from .Spec import Spec
doc = Spec(inputFilename=options.infile, debug=options.debug, token=options.ghToken, lineNumbers=options.lineNumbers)
doc.mdCommandLine = metadata.fromCommandLine(extras)
if options.byos:
doc.mdCommandLine.addData("Group", "byos")
doc.preprocess()
doc.finish(outputFilename=options.outfile)
def handleEchidna(options, extras):
from . import metadata
from . import publish
from .Spec import Spec
doc = Spec(inputFilename=options.infile, token=options.ghToken)
doc.mdCommandLine = metadata.fromCommandLine(extras)
doc.mdCommandLine.addData("Prepare For TR", "yes")
doc.preprocess()
addDirs = [] if options.selfContained else options.additionalDirectories
if options.justTar:
publish.prepareTar(doc, visibleTar=True, additionalDirectories=addDirs)
else:
publish.publishEchidna(doc, username=options.un, password=<PASSWORD>, decision=options.decision, additionalDirectories=addDirs, cc=options.cc)
def handleWatch(options, extras):
from . import metadata
from .Spec import Spec
# Can't have an error killing the watcher
config.setErrorLevel("nothing")
doc = Spec(inputFilename=options.infile, token=options.ghToken)
doc.mdCommandLine = metadata.fromCommandLine(extras)
if options.byos:
doc.mdCommandLine.addData("Group", "byos")
doc.watch(outputFilename=options.outfile)
def handleServe(options, extras):
from . import metadata
from .Spec import Spec
config.setErrorLevel("nothing")
doc = Spec(inputFilename=options.infile, | |
<filename>SiSp/dataloader.py
"""SiSp dataloader
"""
# python2, 3 compatibility
from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
import pysam
import itertools
import subprocess
import shlex
import os
from optparse import OptionParser, OptionGroup
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
from kipoi.data import PreloadedDataset
class FastaFile:
"""docstring for FastaFile"""
def __init__(self, fasta_file):
self.f = pysam.FastaFile(fasta_file)
def get_seq(self, qref, start, stop):
"""get the sequence in a given region, the start is from 1.
The start and stop index may still need double check."""
return self.f.fetch(qref, start-1, stop)
def rev_seq(seq):
_tmp = []
_tmp[:] = seq
for j in range(len(_tmp)):
if _tmp[j] == "A": _tmp[j] = "T"
elif _tmp[j] == "T": _tmp[j] = "A"
elif _tmp[j] == "G": _tmp[j] = "C"
elif _tmp[j] == "C": _tmp[j] = "G"
RV = "".join(_tmp[::-1])
return RV
class Transcript:
def __init__(self, chrom, strand, start, stop, tran_id, tran_name="*",
biotype="*"):
"""a general purpose transcript object with the basic information.
"""
self.chrom = chrom
self.strand = strand
self.start = int(start)
self.stop = int(stop)
self.tranID = tran_id
self.exons = np.zeros((0,2), "int")
self.seglen = None
self.tranL = 0
self.exonNum = 0
self.biotype = biotype
self.tranName = tran_name
def add_exon(self, chrom, strand, start, stop):
if strand != self.strand or chrom != self.chrom:
print("The exon has different chrom or strand to the transcript.")
return
_exon = np.array([start, stop], "int").reshape(1,2)
self.exons = np.append(self.exons, _exon, axis=0)
self.exons = np.sort(self.exons, axis=0)
self.tranL += abs(int(stop) - int(start) + 1)
self.exonNum += 1
self.seglen = np.zeros(self.exons.shape[0] * 2 - 1, "int")
self.seglen[0] = self.exons[0,1]-self.exons[0,0] + 1
for i in range(1, self.exons.shape[0]):
self.seglen[i*2-1] = self.exons[i,0]-self.exons[i-1,1] - 1
self.seglen[i*2] = self.exons[i,1]-self.exons[i,0] + 1
if ["-","-1","0",0,-1].count(self.strand) > 0:
self.seglen = self.seglen[::-1]
class Gene:
def __init__(self, chrom, strand, start, stop, gene_id, gene_name="*",
biotype="*"):
"""
"""
self.chrom = chrom
self.strand = strand
self.start = int(start)
self.stop = int(stop)
self.exons = np.zeros((0,2), "int")
self.tranL = 0
self.exonNum = 0
self.geneID = gene_id
self.trans = []
self.tranNum = 0
self.biotype = biotype
self.geneName = gene_name
def add_transcipt(self, transcript):
self.trans.append(transcript)
self.tranNum += 1
def get_gene_info(self):
RV = [self.geneID, self.geneName, self.chrom, self.strand, self.start,
self.stop, self.biotype]
_trans = []
for t in self.trans:
_trans.append(t.tranID)
RV.append(",".join(_trans))
return RV
def add_premRNA(self):
_tran = Transcript(self.chrom, self.strand, self.start, self.stop,
self.geneID+".p", self.geneName, self.biotype)
_tran.add_exon(self.chrom, self.strand, self.start, self.stop)
self.trans.append(_tran)
self.tranNum += 1
def get_exon_max_num(self):
exonMax = 0
for _tran in self.trans:
exonMax = max(exonMax, _tran.exonNum)
return exonMax
def gene_ends_update(self):
for t in self.trans:
self.start = min(self.start, np.min(t.exons))
self.stop = max(self.stop, np.max(t.exons))
def add_exon(self, chrom, strand, start, stop):
if strand != self.strand or chrom != self.chrom:
print("The exon has different chrom or strand to the transcript.")
return
_exon = np.array([start, stop], "int").reshape(1,2)
self.exons = np.append(self.exons, _exon, axis=0)
self.exons = np.sort(self.exons, axis=0)
self.tranL += abs(int(stop) - int(start) + 1)
self.exonNum += 1
self.seglen = np.zeros(self.exons.shape[0] * 2 - 1, "int")
self.seglen[0] = self.exons[0,1]-self.exons[0,0] + 1
for i in range(1, self.exons.shape[0]):
self.seglen[i*2-1] = self.exons[i,0]-self.exons[i-1,1] - 1
self.seglen[i*2] = self.exons[i,1]-self.exons[i,0] + 1
if ["-","-1","0",0,-1].count(self.strand) > 0:
self.seglen = self.seglen[::-1]
def parse_attribute(attStr, default="*",
ID_tags="ID,gene_id,transcript_id,mRNA_id",
Name_tags="Name,gene_name,transcript_name,mRNA_name",
Type_tags="Type,gene_type,gene_biotype,biotype",
Parent_tags="Parent"):
"""
Parse attributes in GTF or GFF3
Parameters
----------
attStr: string
String containing attributes either in GTF or GFF3 format.
default: string
default value for ID, Name, Type and Parent.
ID_tags: string
Multiple tags for ID. Use comma for delimit.
If multiple tags found, use the last one.
Name_tags: string
Multiple tags for Name. Use comma for delimit.
If multiple tags found, use the last one.
Type_tags: string
Multiple tags for Type. Use comma for delimit.
If multiple tags found, use the last one.
Parent_tags: string
Multiple tags for Parent. Use comma for delimit.
If multiple tags found, use the last one.
Returns
-------
RV: library of string
Library of all tags, always including ID, Name, Type, Parenet.
"""
RV = {}
RV["ID"] = default
RV["Name"] = default
RV["Type"] = default
RV["Parent"] = default
ID_tags = ID_tags.split(",")
Name_tags = Name_tags.split(",")
Type_tags = Type_tags.split(",")
Parent_tags = Parent_tags.split(",")
attList = attStr.rstrip().split(";")
for att in attList:
while len(att) > 0 and att[0] == " ":
att = att[1:]
if len(att) == 0:
continue
if att.find("=") > -1:
_att = att.split("=") #GFF3
else:
_att = att.split(" ") #GTF
if len(_att) < 2:
print("Can't pase this attribute: %s" %att)
continue
if _att[1][0] == '"':
_att[1] = _att[1].split('"')[1]
if ID_tags.count(_att[0]) == 1:
RV["ID"] = _att[1]
elif Name_tags.count(_att[0]) == 1:
RV["Name"] = _att[1]
elif Type_tags.count(_att[0]) == 1:
RV["Type"] = _att[1]
elif Parent_tags.count(_att[0]) == 1:
RV["Parent"] = _att[1]
else: RV[_att[0]] = _att[1]
return RV
def loadgene(anno_file, comments="#,>", geneTag="gene",
tranTag="transcript,mRNA", exonTag="exon"):
"""
Load genes from gtf or gff3 file.
Parameters
----------
anno_file: str
path for the annotation file in GTF or GFF3 format.
comments: string
Multiple comments. Use comma for delimit.
geneTag: string
Multiple tags for gene. Use comma for delimit.
tranTag: string
Multiple tags for transcript. Use comma for delimit.
exonTag: string
Multiple tags for exon. Use comma for delimit.
Return
------
genes: list of ``pyseqlib.Gene``
a list of loaded genes
"""
#TODO: load gzip file
fid = open(anno_file, "r")
anno_in = fid.readlines()
fid.close()
geneTag = geneTag.split(",")
tranTag = tranTag.split(",")
exonTag = exonTag.split(",")
comments = comments.split(",")
genes = []
genenames = []
_gene = None
for _line in anno_in:
if comments.count(_line[0]):
continue
aLine = _line.split("\t")
if len(aLine) < 8:
continue
elif geneTag.count(aLine[2]) == 1:
RVatt = parse_attribute(aLine[8], ID_tags="ID,gene_id",
Name_tags="Name,gene_name")
_gene = Gene(aLine[0], aLine[6], aLine[3], aLine[4],
RVatt["ID"], RVatt["Name"], RVatt["Type"])
if _gene is not None:
genes.append(_gene)
elif tranTag.count(aLine[2]) == 1:
RVatt = parse_attribute(aLine[8],ID_tags="ID,transcript_id,mRNA_id",
Name_tags="Name,transcript_name,mRNA_name")
_tran = Transcript(aLine[0], aLine[6], aLine[3], aLine[4],
RVatt["ID"], RVatt["Name"], RVatt["Type"])
if _gene is not None:
_gene.add_transcipt(_tran)
else:
print("Gene is not ready before transcript.")
elif exonTag.count(aLine[2]) == 1:
#if aLine[0] != _gene.trans[-1].chrom:
# print("Exon from a different chrom of transcript.")
# continue
#if aLine[6] != _gene.trans[-1].strand:
# print("Exon from a different strand of transcript.")
# continue
RVatt = parse_attribute(aLine[8], ID_tags="ID,gene_id",
Name_tags="Name,gene_name")
_gene = Gene(aLine[0], aLine[6], aLine[3], aLine[4],
RVatt["ID"], RVatt["Name"], RVatt["Type"])
if _gene is not None:
_gene.add_exon(aLine[0], aLine[6], aLine[3], aLine[4])
else:
print("Gene or transcript is not ready before exon.")
if genenames is not None:
genenames.append(_gene.geneID)
if _gene is not None:
genes.append(_gene)
return genes, genenames
def get_one_hot(sequence, nucleo):
#also replace non excisting nucleos with 0
repl='TGCAN'
sequence=sequence.replace(nucleo, '1')
for nucl in repl:
sequence=sequence.replace(nucl, '0')
t=[i for i in sequence]
return t
def get_one_hot_C(sequence, region_dict ):
#also replace non excisting nucleos with 0
repl='TGAN'
seq_Cmeth_new=""
seq_C_new=""
for nucl in repl:
sequence=sequence.replace(nucl, '0')
# split the Cs in C meth and C unmeth
for dict_key in sorted(region_dict):
meth_true=region_dict[dict_key][2]
start_meth=int(region_dict[dict_key][0])
end_meth=int(region_dict[dict_key][1])
if meth_true==1:
seqsnip_Cmeth=sequence[start_meth:end_meth+1].replace("C", '1')
seqsnip_C=sequence[start_meth:end_meth+1].replace("C", '0')
else:
seqsnip_Cmeth=sequence[start_meth:end_meth+1].replace("C", '0')
seqsnip_C=sequence[start_meth:end_meth+1].replace("C", '1')
seq_C_new=seq_C_new + seqsnip_C
seq_Cmeth_new=seq_Cmeth_new+seqsnip_Cmeth
return seq_C_new, seq_Cmeth_new
def get_methylation_dict(df_output):
# detact methylation changes
output_list=list(df_output[3])
output_changes=[output_list[n]!=output_list[n-1] for n in range(len(output_list))]
output_changes[0]=False
# get the methylation regions, if methylation changes start new region
# save in dict with the start of the region, the end of the region and the methylation
region_dict={}
#use relative lenght in comparison to start of sequence
start_region=0
end_region=seq_end-seq_start-1
n=0 # counter for splice entries
m=1 # counter for dict entries
for entry in output_changes:
if entry==True:
#get the middle point of the splicing change
middle_region=(df_output[1][n]+df_output[1][n-1])/2-seq_start
# get the methylation (of previous region)
methylation=df_output[3][n-1]
name="key_"+str(m)
region_dict[name]=[start_region, middle_region, methylation ]
#update the start region
start_region=middle_region+1
m=m+1
n=n+1
# finish with last region (-1 because we already added 1 to n)
methylation=df_output[3][n-1]
name="key_"+str(m)
region_dict[name]=[start_region, end_region, methylation ]
return region_dict
def get_hot_seq_meth(meth_file, genes, fastaFile):
seqshot2=np.ndarray(shape=(len(genes),800,5), dtype=float)
n=0
for g in genes:
#if (n+1)%100==0:
# break
# print n
gene_ids=g.geneID
exons = g.exons
global chrom
chrom = g.chrom
global strand
strand=g.strand
alt=exons[0]
cons_gene=[]
#get the center of the exon
center=(alt[1]+alt[0])/2
#get the 800 bp around the exon
global seq_start
seq_start=center-400
global seq_end
seq_end=center+400
#seq_ex=np.ndarray((2,), buffer=np.array([seq_start,seq_end]), dtype=int)
######
#get sequences
| |
<gh_stars>10-100
"""
Tests for various Pyflakes behavior.
"""
from sys import version_info
from pyflakes import messages as m
from pyflakes.test.harness import TestCase, skip, skipIf
class Test(TestCase):
def test_duplicateArgs(self):
self.flakes('def fu(bar, bar): pass', m.DuplicateArgument)
def test_localReferencedBeforeAssignment(self):
self.flakes('''
a = 1
def f():
a; a=1
f()
''', m.UndefinedLocal, m.UnusedVariable)
@skipIf(version_info >= (3,),
'in Python 3 list comprehensions execute in a separate scope')
def test_redefinedInListComp(self):
"""
Test that shadowing a variable in a list comprehension raises
a warning.
"""
self.flakes('''
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
class A:
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
def f():
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
[1 for a, b in [(1, 2)]]
[1 for a, b in [(1, 2)]]
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
[1 for a, b in [(1, 2)]]
''')
def test_redefinedInGenerator(self):
"""
Test that reusing a variable in a generator does not raise
a warning.
"""
self.flakes('''
a = 1
(1 for a, b in [(1, 2)])
''')
self.flakes('''
class A:
a = 1
list(1 for a, b in [(1, 2)])
''')
self.flakes('''
def f():
a = 1
(1 for a, b in [(1, 2)])
''', m.UnusedVariable)
self.flakes('''
(1 for a, b in [(1, 2)])
(1 for a, b in [(1, 2)])
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
(1 for a, b in [(1, 2)])
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_redefinedInSetComprehension(self):
"""
Test that reusing a variable in a set comprehension does not raise
a warning.
"""
self.flakes('''
a = 1
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
class A:
a = 1
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
def f():
a = 1
{1 for a, b in [(1, 2)]}
''', m.UnusedVariable)
self.flakes('''
{1 for a, b in [(1, 2)]}
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
{1 for a, b in [(1, 2)]}
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_redefinedInDictComprehension(self):
"""
Test that reusing a variable in a dict comprehension does not raise
a warning.
"""
self.flakes('''
a = 1
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
class A:
a = 1
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
def f():
a = 1
{1: 42 for a, b in [(1, 2)]}
''', m.UnusedVariable)
self.flakes('''
{1: 42 for a, b in [(1, 2)]}
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
{1: 42 for a, b in [(1, 2)]}
''')
def test_redefinedFunction(self):
"""
Test that shadowing a function definition with another one raises a
warning.
"""
self.flakes('''
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedClassFunction(self):
"""
Test that shadowing a function definition in a class suite with another
one raises a warning.
"""
self.flakes('''
class A:
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedIfElseFunction(self):
"""
Test that shadowing a function definition twice in an if
and else block does not raise a warning.
"""
self.flakes('''
if True:
def a(): pass
else:
def a(): pass
''')
def test_redefinedIfFunction(self):
"""
Test that shadowing a function definition within an if block
raises a warning.
"""
self.flakes('''
if True:
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedTryExceptFunction(self):
"""
Test that shadowing a function definition twice in try
and except block does not raise a warning.
"""
self.flakes('''
try:
def a(): pass
except:
def a(): pass
''')
def test_redefinedTryFunction(self):
"""
Test that shadowing a function definition within a try block
raises a warning.
"""
self.flakes('''
try:
def a(): pass
def a(): pass
except:
pass
''', m.RedefinedWhileUnused)
def test_redefinedIfElseInListComp(self):
"""
Test that shadowing a variable in a list comprehension in
an if and else block does not raise a warning.
"""
self.flakes('''
if False:
a = 1
else:
[a for a in '12']
''')
@skipIf(version_info >= (3,),
'in Python 3 list comprehensions execute in a separate scope')
def test_redefinedElseInListComp(self):
"""
Test that shadowing a variable in a list comprehension in
an else (or if) block raises a warning.
"""
self.flakes('''
if False:
pass
else:
a = 1
[a for a in '12']
''', m.RedefinedInListComp)
def test_functionDecorator(self):
"""
Test that shadowing a function definition with a decorated version of
that function does not raise a warning.
"""
self.flakes('''
from somewhere import somedecorator
def a(): pass
a = somedecorator(a)
''')
def test_classFunctionDecorator(self):
"""
Test that shadowing a function definition in a class suite with a
decorated version of that function does not raise a warning.
"""
self.flakes('''
class A:
def a(): pass
a = classmethod(a)
''')
@skipIf(version_info < (2, 6), "Python >= 2.6 only")
def test_modernProperty(self):
self.flakes("""
class A:
@property
def t(self):
pass
@t.setter
def t(self, value):
pass
@t.deleter
def t(self):
pass
""")
def test_unaryPlus(self):
"""Don't die on unary +."""
self.flakes('+1')
def test_undefinedBaseClass(self):
"""
If a name in the base list of a class definition is undefined, a
warning is emitted.
"""
self.flakes('''
class foo(foo):
pass
''', m.UndefinedName)
def test_classNameUndefinedInClassBody(self):
"""
If a class name is used in the body of that class's definition and
the name is not already defined, a warning is emitted.
"""
self.flakes('''
class foo:
foo
''', m.UndefinedName)
def test_classNameDefinedPreviously(self):
"""
If a class name is used in the body of that class's definition and
the name was previously defined in some other way, no warning is
emitted.
"""
self.flakes('''
foo = None
class foo:
foo
''')
def test_classRedefinition(self):
"""
If a class is defined twice in the same module, a warning is emitted.
"""
self.flakes('''
class Foo:
pass
class Foo:
pass
''', m.RedefinedWhileUnused)
def test_functionRedefinedAsClass(self):
"""
If a function is redefined as a class, a warning is emitted.
"""
self.flakes('''
def Foo():
pass
class Foo:
pass
''', m.RedefinedWhileUnused)
def test_classRedefinedAsFunction(self):
"""
If a class is redefined as a function, a warning is emitted.
"""
self.flakes('''
class Foo:
pass
def Foo():
pass
''', m.RedefinedWhileUnused)
def test_classWithReturn(self):
"""
If a return is used inside a class, a warning is emitted.
"""
self.flakes('''
class Foo(object):
return
''', m.ReturnOutsideFunction)
def test_moduleWithReturn(self):
"""
If a return is used at the module level, a warning is emitted.
"""
self.flakes('''
return
''', m.ReturnOutsideFunction)
def test_classWithYield(self):
"""
If a yield is used inside a class, a warning is emitted.
"""
self.flakes('''
class Foo(object):
yield
''', m.YieldOutsideFunction)
def test_moduleWithYield(self):
"""
If a yield is used at the module level, a warning is emitted.
"""
self.flakes('''
yield
''', m.YieldOutsideFunction)
@skipIf(version_info < (3, 3), "Python >= 3.3 only")
def test_classWithYieldFrom(self):
"""
If a yield from is used inside a class, a warning is emitted.
"""
self.flakes('''
class Foo(object):
yield from range(10)
''', m.YieldOutsideFunction)
@skipIf(version_info < (3, 3), "Python >= 3.3 only")
def test_moduleWithYieldFrom(self):
"""
If a yield from is used at the module level, a warning is emitted.
"""
self.flakes('''
yield from range(10)
''', m.YieldOutsideFunction)
def test_continueOutsideLoop(self):
self.flakes('''
continue
''', m.ContinueOutsideLoop)
self.flakes('''
def f():
continue
''', m.ContinueOutsideLoop)
self.flakes('''
while True:
pass
else:
continue
''', m.ContinueOutsideLoop)
self.flakes('''
while True:
pass
else:
if 1:
if 2:
continue
''', m.ContinueOutsideLoop)
self.flakes('''
while True:
def f():
continue
''', m.ContinueOutsideLoop)
self.flakes('''
while True:
class A:
continue
''', m.ContinueOutsideLoop)
def test_continueInsideLoop(self):
self.flakes('''
while True:
continue
''')
self.flakes('''
for i in range(10):
continue
''')
self.flakes('''
while True:
if 1:
continue
''')
self.flakes('''
for i in range(10):
if 1:
continue
''')
self.flakes('''
while True:
while True:
pass
else:
continue
else:
pass
''')
self.flakes('''
while True:
try:
pass
finally:
while True:
continue
''')
def test_continueInFinally(self):
# 'continue' inside 'finally' is a special syntax error
self.flakes('''
while True:
try:
pass
finally:
continue
''', m.ContinueInFinally)
self.flakes('''
while True:
try:
pass
finally:
if 1:
if 2:
continue
''', m.ContinueInFinally)
# Even when not in a loop, this is the error Python gives
| |
import json
from django.urls import reverse
from django.contrib.gis.geos import Polygon, Point, MultiPolygon
from rest_framework import status
from main.models import Record, Dataset
from main.tests.api import helpers
class TestJsonSearchAndOrdering(helpers.BaseUserTestCase):
def test_filter_dataset(self):
dataset1 = self._create_dataset_and_records_from_rows([
['What', 'When', 'Who'],
['Crashed the db', '2018-02-14', 'Serge'],
['Restored the db', '2018-02-14', 'Shay']
])
dataset2 = self._create_dataset_and_records_from_rows([
['What', 'When', 'Latitude', 'Longitude'],
['Canis lupus', '2018-02-14', -32.0, 115.75],
['Chubby bat', '2017-05-18', -34.4, 116.78]
])
client = self.custodian_1_client
url = reverse('api:record-list')
# no filters
resp = client.get(url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(len(records), 4)
expected_whats = sorted(['Crashed the db', 'Restored the db', 'Canis lupus', 'Chubby bat'])
self.assertEqual(sorted([r['data']['What'] for r in records]), expected_whats)
# dataset__id
expected_dataset = dataset1
url = reverse('api:record-list')
resp = client.get(url, {'dataset__id': expected_dataset.pk})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(len(records), 2)
expected_whats = sorted(['Crashed the db', 'Restored the db'])
self.assertEqual(sorted([r['data']['What'] for r in records]), expected_whats)
# dataset__name
expected_dataset = dataset2
resp = client.get(url, {'dataset__name': expected_dataset.name})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(len(records), 2)
expected_whats = sorted(['Canis lupus', 'Chubby bat'])
self.assertEqual(sorted([r['data']['What'] for r in records]), expected_whats)
def test_search_in_json_data(self):
"""
Test that if we provide a dataset and a search parameters we can search through the data json field
:return:
"""
dataset1 = self._create_dataset_and_records_from_rows([
['What', 'When', 'Who'],
['Crashed the db', '2018-02-14', 'Serge'],
['Restored the db', '2018-02-14', 'Shay']
])
dataset2 = self._create_dataset_and_records_from_rows([
['What', 'When', 'Latitude', 'Longitude'],
['Canis lupus', '2018-02-14', -32.0, 115.75],
['<NAME>', '2017-05-18', -34.4, 116.78],
['<NAME>', '2017-05-18', -34.4, 116.78]
])
client = self.custodian_1_client
url = reverse('api:record-list')
# search Serge in dataset1
resp = client.get(url, {'search': 'Serge', 'dataset__id': dataset1.pk})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(len(records), 1)
record = records[0]
expected_data = sorted(['Crashed the db', '2018-02-14', 'Serge'])
self.assertEqual(sorted(list(record['data'].values())), expected_data)
# search serge in dataset2 case insensitive
resp = client.get(url, {'search': 'Serge', 'dataset__id': dataset2.pk})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(len(records), 1)
record = records[0]
expected_data = sorted(['<NAME>', '2017-05-18', '-34.4', '116.78'])
record_values_as_string = [str(v) for v in record['data'].values()]
self.assertEqual(sorted(list(record_values_as_string)), expected_data)
def test_string_ordering_in_json_data(self):
"""
Test that if we provide a dataset and an order parameter (field) we can order through the data json field
for string
:return:
"""
dataset = self._create_dataset_and_records_from_rows([
['What', 'When', 'Latitude', 'Longitude'],
['Canis lupus', '2018-02-14', -32.0, 115.75],
['Zebra', '2017-01-01', -34.7, 115.75],
['Chubby bat', '2017-05-18', -34.4, 116.78],
['Alligator', '2017-05-18', -34.4, 116.78]
])
client = self.custodian_1_client
url = reverse('api:record-list')
# order by What asc
ordering = 'What'
resp = client.get(url, {'ordering': ordering, 'dataset__id': dataset.pk})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(len(records), 4)
expected_whats = sorted(['Alligator', 'Canis lupus', 'Chubby bat', 'Zebra'])
self.assertEqual([r['data']['What'] for r in records], expected_whats)
# order by What desc
ordering = '-What'
resp = client.get(url, {'ordering': ordering, 'dataset__id': dataset.pk})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(len(records), 4)
expected_whats = sorted(['Alligator', 'Canis lupus', 'Chubby bat', 'Zebra'], reverse=True)
self.assertEqual([r['data']['What'] for r in records], expected_whats)
# test that the ordering is case sensitive
ordering = 'what'
resp = client.get(url, {'ordering': ordering, 'dataset__id': dataset.pk})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(len(records), 4)
expected_whats = sorted(['Alligator', 'Canis lupus', 'Chubby bat', 'Zebra'])
self.assertNotEqual([r['data']['What'] for r in records], expected_whats)
def test_server_side_ordering_row_number(self):
"""
Test that we can order by the source_info['row'] (row number in the csv or xlsx) and that the
sort in numeric based not char based (10 is after 9)
"""
# create 11 records (data not important)
rows = [
['When', 'Species', 'How Many', 'Latitude', 'Longitude', 'Comments'],
['2018-02-07', 'Canis lupus', 1, -32.0, 115.75, ''],
['2018-01-12', 'Chubby bat', 10, -32.0, 115.75, 'Awesome'],
['2018-02-10', 'Unknown', 2, -32.0, 115.75, 'Canis?'],
['2018-02-02', 'Canis dingo', 2, -32.0, 115.75, 'Watch out kids'],
['2018-02-07', '<NAME>', 1, -32.0, 115.75, ''],
['2018-01-12', '<NAME>', 10, -32.0, 115.75, 'Awesome'],
['2018-02-10', 'Unknown', 2, -32.0, 115.75, 'Canis?'],
['2018-02-02', 'Canis dingo', 2, -32.0, 115.75, 'Watch out kids'],
['2018-02-07', '<NAME>', 1, -32.0, 115.75, ''],
['2018-01-12', '<NAME>', 10, -32.0, 115.75, 'Awesome'],
['2018-02-10', 'Unknown', 2, -32.0, 115.75, 'Canis?'],
]
dataset = self._create_dataset_and_records_from_rows(rows)
client = self.custodian_1_client
url = reverse('api:record-list')
ordering = 'row'
resp = client.get(url, {'ordering': ordering, 'dataset__id': dataset.pk})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
json_response = resp.json()
self.assertEqual(len(json_response), 11)
# row start at 2
sorted_rows = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
record_rows = [record['source_info']['row'] for record in json_response]
self.assertEqual(record_rows, sorted_rows)
# check is request ordered by family in descending order is ordered by family in reverse alphabetical order
ordering = '-row'
resp = client.get(url, {'ordering': ordering, 'dataset__id': dataset.pk})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
json_response = resp.json()
self.assertEqual(len(json_response), 11)
record_rows = [record['source_info']['row'] for record in json_response]
self.assertEqual(record_rows, list(reversed(sorted_rows)))
def test_numeric_ordering_in_json_data_from_upload_end_point(self):
"""
Assuming we have a schema that contains a numeric field (integer or number types).
Querying an order on this field should return a numerical order not string (10, after 9)
This test uses the upload end_point
"""
dataset = self._create_dataset_and_records_from_rows([
['What', 'How Many'],
['Canis lupus', 7],
['Zebra', 1],
['Chubby bat', 9],
['Alligator', 10]
])
# check that we have a field of type integer
self.assertEqual(dataset.schema.get_field_by_name('How Many').type, 'integer')
client = self.custodian_1_client
url = reverse('api:record-list')
ordering = 'How Many'
resp = client.get(url, {'ordering': ordering, 'dataset__id': dataset.pk})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(len(records), 4)
expected = [('Zebra', 1), ('Canis lupus', 7), ('Chubby bat', 9), ('Alligator', 10)]
self.assertEqual([(r['data']['What'], r['data']['How Many']) for r in records], expected)
def test_numeric_ordering_in_json_data_from_post_end_point(self):
"""
Assuming we have a schema that contains a numeric field (integer or number types).
Querying an order on this field should return a numerical order not string (10, after 9)
This test uses the api POST record/ end_point with floats instead of integers.
"""
weights = [23.6, 123.4, 2.6, 203.4]
# sorted float list should return [2.6, 23.6, 123.4, 203.4]
# while a string sorted should return ['123.4', '2.6', '203.4', '23.6']
float_sorted = sorted(weights)
string_sorted = sorted([str(w) for w in weights])
self.assertNotEqual(float_sorted, [float(s) for s in string_sorted])
dataset = self._create_dataset_from_rows([
['What', 'Weight'],
['Canis lupus', weights[0]],
['Zebra', weights[1]],
['Chubby bat', weights[2]],
['Alligator', weights[3]]
])
# check that we have a field of type integer
self.assertEqual(dataset.schema.get_field_by_name('Weight').type, 'number')
# post some records
records_data = [
{
'What': 'Canis lupus',
'Weight': weights[0]
},
{
'What': 'Zebra',
'Weight': weights[1]
},
{
'What': 'Chubby bat',
'Weight': weights[2]
},
{
'What': 'Alligator',
'Weight': weights[3]
},
]
records = []
for record_data in records_data:
records.append(self._create_record(self.custodian_1_client, dataset, record_data))
client = self.custodian_1_client
url = reverse('api:record-list')
ordering = 'Weight'
resp = client.get(url, {'ordering': ordering, 'dataset__id': dataset.pk})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(len(records), 4)
expected = [('Chubby bat', 2.6), ('Canis lupus', 23.6), ('Zebra', 123.4), ('Alligator', 203.4)]
self.assertEqual([(r['data']['What'], r['data']['Weight']) for r in records], expected)
# revert ordering
ordering = '-Weight'
resp = client.get(url, {'ordering': ordering, 'dataset__id': dataset.pk})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(len(records), 4)
# reverse expected
expected = expected[::-1]
self.assertEqual([(r['data']['What'], r['data']['Weight']) for r in records], expected)
def test_filter_id__in(self):
"""
Test the id__in filter.
Note: the filter parameter has to be a comma-separated list of ids, e.g: &id__in=1,2,3,4
Not supported:
- &id__in=[1,2,3,4] (square bracket)
- &id__in=1&id__in=2&id__in=3 (repeated key)
"""
self._create_dataset_and_records_from_rows([
['What', 'Comment'],
['aaaa', 'AAAA'],
['bbbb', 'BBBB'],
['cccc', 'CCCC'],
['dddd', 'DDDD'],
])
record_ids = list(Record.objects.values_list('id', flat=True))
self.assertTrue(len(record_ids) >= 4)
url = reverse('api:record-list')
client = self.custodian_1_client
# Test 2 records (first and last)
expected_ids = [record_ids[0], record_ids[-1]]
params = {
'id__in': ','.join([str(i) for i in expected_ids])
}
resp = client.get(url, params)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(2, len(records))
self.assertEqual(sorted([r['id'] for r in records]), sorted(expected_ids))
# Test only one value
expected_id = record_ids[1]
params = {
'id__in': expected_id
}
resp = client.get(url, params)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(1, len(records))
self.assertEqual(records[0]['id'], expected_id)
# Test blank returns all records (filter disabled)
params = {
'id__in': ''
}
resp = client.get(url, params)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(len(record_ids), len(records))
expected_ids = record_ids
self.assertEqual(sorted([r['id'] for r in records]), sorted(expected_ids))
# Test that square bracket doesn't work. It will return no records
expected_ids = [record_ids[0], record_ids[-1]]
params = {
'id__in': json.dumps(expected_ids) # '[1,4]'
}
resp = client.get(url, params)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
records = resp.json()
self.assertEqual(0, len(records))
self.assertNotEqual(sorted([r['id'] for r in records]), sorted(expected_ids))
# Test that repeated key doesn't work. It will return the last one
expected_ids = [record_ids[0], record_ids[-1]]
params = {
'id__in': expected_ids, # repeated key is the default url encoding for an array for the python test client
}
resp = client.get(url, params)
| |
"address": "0x68d53441c0e253f76c500e551bdeA3D102206C9a",
"ens_address": "",
"decimals": 18,
"website": "https://dimensions.network/",
"logo": {
"src": "https://dimensions.network/static/home/img/branding/logo_o_400px.png",
"width": "400",
"height": "400",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://support.dimensions.network/"
},
"social": {
"blog": "https://blog.dimensions.network/",
"chat": "https://t.me/DimensionsTalk",
"facebook": "https://fb.me/dimensionsnetwork",
"forum": "",
"github": "https://github.com/DimensionsNetwork",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/dimensions-network",
"reddit": "https://www.reddit.com/r/DimensionsNetwork/",
"slack": "",
"telegram": "https://t.me/DimensionsTalk",
"twitter": "https://twitter.com/Dimensions_DST",
"youtube": "https://www.youtube.com/DimensionsNetwork"
}
},
"LEMO": {
"symbol": "LEMO",
"address": "0xB5AE848EdB296C21259b7467331467d2647eEcDf",
"decimals": 18,
"name": "Lemo",
"ens_address": "",
"website": "https://www.lemochain.com",
"logo": {
"src": "https://lemochip.hellobyebye.com/logo.png",
"width": "110",
"height": "72",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/LemoChain",
"forum": "",
"github": "https://github.com/LemoFoundationLtd",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/lemochain",
"twitter": "https://twitter.com/LemoChain",
"youtube": ""
}
},
"TCA": {
"symbol": "TCA",
"type": "ERC20",
"address": "0xfA0eF5E034CaE1AE752d59bdb8aDcDe37Ed7aB97",
"decimals": 18,
"name": "TangguoTao Token",
"ens_address": "",
"website": "https://www.tcandy.io",
"logo": {
"src": "https://www.tcandy.io/images/logo.png",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://weibo.com/u/6612897220",
"chat": "",
"facebook": "https://www.facebook.com/tcandy.io",
"forum": "",
"github": "https://github.com/TcandyChain",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/TcandyChain",
"youtube": ""
}
},
"ORI": {
"symbol": "ORI",
"name": "Origami",
"type": "ERC20",
"address": "0xd2Fa8f92Ea72AbB35dBD6DECa57173d22db2BA49",
"ens_address": "",
"decimals": 18,
"website": "https://ori.network",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@origaminetwork",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/origaminetwork",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/origami_network",
"youtube": ""
}
},
"FTT": {
"symbol": "FTT",
"address": "0x2AEC18c5500f21359CE1BEA5Dc1777344dF4C0Dc",
"decimals": 18,
"name": "FarmaTrust Token",
"ens_address": "",
"website": "https://www.farmatrust.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/@farmatrust",
"chat": "",
"facebook": "https://www.facebook.com/farmatrustchain",
"forum": "https://bitcointalk.org/index.php?topic=2496382",
"github": "https://github.com/farmatrust",
"gitter": "",
"instagram": "https://www.instagram.com/farmatrust",
"linkedin": "https://www.linkedin.com/company/24797056",
"reddit": "https://www.reddit.com/user/FarmaTrust",
"slack": "",
"telegram": "https://t.me/farmatrust",
"twitter": "https://twitter.com/farmatrust",
"youtube": ""
}
},
"CTGC": {
"symbol": "CTGC",
"address": "0x9E7D29bd499B6c7da2a5B2EaFCF4A39d3BD845D1",
"decimals": 18,
"name": "Convenient To Go",
"ens_address": "christiantraders.eth",
"website": "https://www.ctgcoin.org",
"logo": {
"src": "https://www.ctgcoin.org/upload/logo/ctg.logo.png",
"width": "68",
"height": "41",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://www.ctgcoin.org"
},
"social": {
"blog": "https://weibo.com/6550499942/info",
"chat": "",
"facebook": "https://www.facebook.com/ctg.coin",
"forum": "",
"github": "https://github.com/ctgcoin/",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://telegram.me/CTGgroup",
"twitter": "https://twitter.com/CtGcoin",
"youtube": ""
}
},
"DRGN": {
"symbol": "DRGN",
"address": "0x419c4dB4B9e25d6Db2AD9691ccb832C8D9fDA05E",
"decimals": 18,
"name": "Dragon",
"ens_address": "dragonchain.eth",
"website": "https://dragonchain.com",
"logo": {
"src": "https://dragonchain.com/assets/images/dragon.png",
"width": 813,
"height": 879,
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://dragonchain.com/blog",
"chat": "https://t.me/dragontalk",
"facebook": "",
"forum": "",
"github": "https://github.com/dragonchain/dragonchain",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/18216867",
"reddit": "https://www.reddit.com/r/dragonchain",
"slack": "",
"telegram": "https://t.me/dragontalk",
"twitter": "https://twitter.com/dragonchaingang",
"youtube": "https://www.youtube.com/channel/UC2_StJYNWFrQz2wiL8n6hoA/videos"
}
},
"STC": {
"symbol": "STC",
"address": "0x629aEe55ed49581C33ab27f9403F7992A289ffd5",
"decimals": 18,
"name": "StrikeCoin Token",
"ens_address": "",
"website": "https://dimensions.network",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://dimensions.network/en/contact_us"
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://fb.me/dimensionsnetwork",
"forum": "",
"github": "https://github.com/DimensionsNetwork",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/dimensions-network",
"reddit": "https://www.reddit.com/r/DimensionsNetwork",
"slack": "",
"telegram": "https://t.me/DimensionsTalk",
"twitter": "https://twitter.com/DN_STC",
"youtube": "https://www.youtube.com/channel/UCticN7-IvIaEpbXVdbsBlcQ/videos"
}
},
"Fzcoin": {
"symbol": "Fzcoin",
"name": "Frozencoin Network",
"type": "ERC20",
"address": "0xE5aeE163513119F4F750376C718766B40fA37A5F",
"ens_address": "",
"decimals": 18,
"website": "https://fzcoin.cc/",
"logo": {
"src": "https://avatars2.githubusercontent.com/u/44194025?s=400&u=970b2065cf404120fe0f9e486c506003aa96563f&v=4",
"width": "100",
"height": "100",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/coin.fz.39",
"forum": "",
"github": "https://github.com/fzcoinProtocol",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/Official_Fzcoin",
"twitter": "",
"youtube": ""
}
},
"NCT": {
"symbol": "NCT",
"address": "0x9E46A38F5DaaBe8683E10793b06749EEF7D733d1",
"decimals": 18,
"name": "Nectar",
"ens_address": "",
"website": "https://polyswarm.io",
"logo": {
"src": "https://polyswarm.io/img/coin/nectar_300x300.png",
"width": "301",
"height": "301",
"ipfs_hash": "QmaQmaNUDawjkbnH6STZStmUMx32se3rBTvgHeZi7Cygmq"
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/swarmdotmarket",
"chat": "",
"facebook": "https://www.facebook.com/PolySwarm/",
"forum": "",
"github": "https://github.com/polyswarm",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/swarm-industries",
"reddit": "https://www.reddit.com/r/polyswarm",
"slack": "",
"telegram": "https://t.me/polyswarm",
"twitter": "https://twitter.com/polyswarm",
"youtube": "https://www.youtube.com/channel/UClkA8JVQ--oMsPomOKM85fA"
}
},
"FYN": {
"symbol": "FYN",
"address": "0x88FCFBc22C6d3dBaa25aF478C578978339BDe77a",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "http://www.fundyourselfnow.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "http://fundyourselfnowslack.herokuapp.com",
"telegram": "https://t.me/fundyourselfnow",
"twitter": "https://twitter.com/fundyourselfnow",
"youtube": ""
}
},
"BTH": {
"symbol": "BTH",
"address": "0xFAd572db566E5234AC9Fc3d570c4EdC0050eAA92",
"decimals": 18,
"name": "Bytether",
"ens_address": "",
"website": "https://www.bytether.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/Bytether",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/bytether",
"slack": "https://bytether.slack.com",
"telegram": "",
"twitter": "https://twitter.com/bytether",
"youtube": ""
}
},
"XYO": {
"symbol": "XYO",
"name": "XYO",
"type": "ERC20",
"address": "0x55296f69f40Ea6d20E478533C15A6B08B654E758",
"ens_address": "",
"decimals": 18,
"website": "https://xyo.network",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://community.xyo.network/",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/XYONetwork",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/officialxyo",
"youtube": ""
}
},
"INT": {
"symbol": "INT",
"name": "<NAME>",
"type": "ERC20",
"address": "0x0b76544F6C413a555F309Bf76260d1E02377c02A",
"ens_address": "",
"decimals": 6,
"website": "https://intchain.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/int_chain",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/intchain",
"youtube": ""
}
},
"YUPIE": {
"symbol": "YUPIE",
"address": "0x0F33bb20a282A7649C7B3AFf644F084a9348e933",
"decimals": 18,
"name": "YUPIE",
"ens_address": "",
"website": "https://www.crowdholding.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/crowdholdingico/YupieSmartContract",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "https://goalman.slack.com",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"JOT": {
"symbol": "JOT",
"name": "<NAME>",
"type": "ERC20",
"address": "0xdb455c71C1bC2de4e80cA451184041Ef32054001",
"ens_address": "",
"decimals": 18,
"website": "https://jury.online",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@Jury.Online",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/jury2beonline",
"youtube": ""
}
},
"TICO": {
"symbol": "TICO",
"address": "0xa5dB1d6F7A0D5Bccc17d0bFD39D7AF32d5E5EDc6",
"decimals": 5,
"name": "Topinvestmentcoin",
"ens_address": "",
"website": "https://www.topinvestmentcoin.online/",
"logo": {
"src": "https://i.imgur.com/DnDglhQ.png",
"width": "130",
"height": "129",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://www.topinvestmentcoin.online/"
},
"social": {
"blog": "https://medium.com/topinvestmentcoin",
"chat": "",
"facebook": "https://web.facebook.com/Topinvestmentcoin/",
"forum": "https://bitcointalk.org/index.php?topic=3123082.0#new",
"github": "https://github.com/Topinvestmentcoin/Topinvestmentcoin-",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/topinvestmentcoin",
"twitter": "https://www.twitter.com/Topinvestcoin",
"youtube": ""
}
},
"ABT": {
"symbol": "ABT",
"address": "0xB98d4C97425d9908E66E53A6fDf673ACcA0BE986",
"decimals": 18,
"name": "ArcBlock Token",
"ens_address": "",
"website": "https://www.arcblock.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"CORI": {
"symbol": "CORI",
"address": "0x725B190Bc077FFde17Cf549AA8ba25e298550B18",
"decimals": 2,
"name": "Corrently Invest Token",
"ens_address": "",
"website": "https://corrently.de/token/",
"logo": {
"src": "https://corrently.de/token/img/logo_square.png",
"width": "128",
"height": "128"
},
"support": {
"email": "<EMAIL>",
"url": "https://corrently.de/token/"
},
"social": {}
},
"MTR": {
"symbol": "MTR",
"address": "0x7FC408011165760eE31bE2BF20dAf450356692Af",
"decimals": 8,
"name": "Mitrav",
"ens_address": "",
"website": "https://mitrav.co",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://mitrav.co"
},
"social": {
"blog": "",
"chat": "skype:team mitrav?add",
"facebook": "https://www.facebook.com/mitrav.mitrav.58",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/25049387",
"reddit": "https://www.reddit.com/user/Mitrav",
"slack": "https://mitrav.slack.com/open",
"telegram": "https://t.me/mitravteam",
"twitter": "https://twitter.com/mitrav_m",
"youtube": ""
}
},
"SENC": {
"symbol": "SENC",
"name": "Sentinel Chain",
"type": "ERC20",
"address": "0xA13f0743951B4f6E3e3AA039f682E17279f52bc3",
"ens_address": "",
"decimals": 18,
"website": "https://www.sentinel-chain.org",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://www.medium.com/sentinelchain",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
| |
Closest feature of each type
if held_obj_name == "onion":
all_features["p{}_closest_onion".format(i)] = (0, 0)
else:
make_closest_feature(i, "onion", self.get_onion_dispenser_locations() + counter_objects["onion"])
make_closest_feature(i, "empty_pot", pot_state["empty"])
make_closest_feature(i, "one_onion_pot", pot_state["1_items"])
make_closest_feature(i, "two_onion_pot", pot_state["2_items"])
make_closest_feature(i, "cooking_pot", pot_state["cooking"])
make_closest_feature(i, "ready_pot", pot_state["ready"])
if held_obj_name == "dish":
all_features["p{}_closest_dish".format(i)] = (0, 0)
else:
make_closest_feature(i, "dish", self.get_dish_dispenser_locations() + counter_objects["dish"])
if held_obj_name == "soup":
all_features["p{}_closest_soup".format(i)] = (0, 0)
else:
make_closest_feature(i, "soup", counter_objects["soup"])
make_closest_feature(i, "serving", self.get_serving_locations())
for direction, pos_and_feat in enumerate(self.get_adjacent_features(player)):
adj_pos, feat = pos_and_feat
if direction == player.orientation:
# Check if counter we are facing is empty
facing_counter = (feat == 'X' and adj_pos not in overcooked_state.objects.keys())
facing_counter_feature = [1] if facing_counter else [0]
# NOTE: Really, this feature should have been "closest empty counter"
all_features["p{}_facing_empty_counter".format(i)] = facing_counter_feature
all_features["p{}_wall_{}".format(i, direction)] = [0] if feat == ' ' else [1]
features_np = {k: np.array(v) for k, v in all_features.items()}
p0, p1 = overcooked_state.players
p0_dict = {k: v for k, v in features_np.items() if k[:2] == "p0"}
p1_dict = {k: v for k, v in features_np.items() if k[:2] == "p1"}
p0_features = np.concatenate(list(p0_dict.values()))
p1_features = np.concatenate(list(p1_dict.values()))
p1_rel_to_p0 = np.array(pos_distance(p1.position, p0.position))
abs_pos_p0 = np.array(p0.position)
ordered_features_p0 = np.squeeze(np.concatenate([p0_features, p1_features, p1_rel_to_p0, abs_pos_p0]))
p0_rel_to_p1 = np.array(pos_distance(p0.position, p1.position))
abs_pos_p1 = np.array(p1.position)
ordered_features_p1 = np.squeeze(np.concatenate([p1_features, p0_features, p0_rel_to_p1, abs_pos_p1]))
return ordered_features_p0, ordered_features_p1
def featurize(self, idx, overcooked_state, action, mlam, horizon=400):
"""
Encode state with some manually designed features.
NOTE: currently works for just two players.
"""
#TODO: what happens if we are in terminal state?
# self.is_terminal(overcooked_state)
act2use = None
if idx == 0:
act2use = [action, Action.STAY]
else:
act2use = [Action.STAY, action]
nextState, _ = self.get_state_transition(overcooked_state, act2use)
overcooked_state = nextState
all_features = {}
def make_closest_feature(idx, name, locations):
"Compute (x, y) deltas to closest feature of type `name`, and save it in the features dict"
delt = self.get_deltas_to_closest_location(player, locations,mlam)
all_features["p{}_closest_{}".format(idx, name)] = math.sqrt((delt[0] ** 2) + (delt[1] ** 2))
IDX_TO_OBJ = ["onion", "soup", "dish", "tomato"]
OBJ_TO_IDX = {o_name: idx for idx, o_name in enumerate(IDX_TO_OBJ)}
counter_objects = self.get_counter_objects_dict(overcooked_state)
pot_state = self.get_pot_states(overcooked_state)
# Player Info
for i, player in enumerate(overcooked_state.players):
orientation_idx = Direction.DIRECTION_TO_INDEX[player.orientation]
all_features["p{}_orientation".format(i)] = orientation_idx
obj = player.held_object
if obj is None:
held_obj_name = "none"
all_features["p{}_objs".format(i)] = 0.0
else:
held_obj_name = obj.name
obj_idx = OBJ_TO_IDX[held_obj_name]
all_features["p{}_objs".format(i)] = obj_idx
# Closest feature of each type
if held_obj_name == "onion":
all_features["p{}_closest_onion".format(i)] = 0.0
else:
make_closest_feature(i, "onion", self.get_onion_dispenser_locations() + counter_objects["onion"])
make_closest_feature(i, "empty_pot", pot_state["empty"])
make_closest_feature(i, "one_onion_pot", pot_state["1_items"])
make_closest_feature(i, "two_onion_pot", pot_state["2_items"])
make_closest_feature(i, "cooking_pot", pot_state["cooking"])
make_closest_feature(i, "ready_pot", pot_state["ready"])
if held_obj_name == "dish":
all_features["p{}_closest_dish".format(i)] = 0.0
else:
make_closest_feature(i, "dish", self.get_dish_dispenser_locations() + counter_objects["dish"])
if held_obj_name == "soup":
all_features["p{}_closest_soup".format(i)] = 0.0
else:
make_closest_feature(i, "soup", counter_objects["soup"])
make_closest_feature(i, "serving", self.get_serving_locations())
for direction, pos_and_feat in enumerate(self.get_adjacent_features(player)):
adj_pos, feat = pos_and_feat
if direction == player.orientation:
# Check if counter we are facing is empty
facing_counter = (feat == 'X' and adj_pos not in overcooked_state.objects.keys())
facing_counter_feature = [1] if facing_counter else [0]
# NOTE: Really, this feature should have been "closest empty counter"
all_features["p{}_facing_empty_counter".format(i)] = facing_counter_feature
all_features["p{}_wall_{}".format(i, direction)] = 0 if feat == ' ' else 1
features_np = {k: np.array(v) for k, v in all_features.items()}
p0, p1 = overcooked_state.players
p0_dict = {k: v for k, v in features_np.items() if k[:2] == "p0"}
p1_dict = {k: v for k, v in features_np.items() if k[:2] == "p1"}
return p0_dict, p1_dict
def get_deltas_to_closest_location(self, player, locations, mlam):
_, closest_loc = mlam.motion_planner.min_cost_to_feature(player.pos_and_or, locations, with_argmin=True)
if closest_loc is None:
# "any object that does not exist or I am carrying is going to show up as a (0,0)
# but I can disambiguate the two possibilities by looking at the features
# for what kind of object I'm carrying"
return (0, 0)
dy_loc, dx_loc = pos_distance(closest_loc, player.position)
return dy_loc, dx_loc
###############################
# POTENTIAL REWARD SHAPING FN #
###############################
def potential_function(self, state, mp, gamma=0.99):
"""
Essentially, this is the ɸ(s) function.
The main goal here to to approximately infer the actions of an optimal agent, and derive an estimate for the value
function of the optimal policy. The perfect potential function is indeed the value function
At a high level, we assume each agent acts independetly, and greedily optimally, and then, using the decay factor "gamma",
we calculate the expected discounted reward under this policy
Some implementation details:
* the process of delivering a soup is broken into 4 steps
* Step 1: placing the first ingredient into an empty pot
* Step 2: placing the remaining ingredients in the pot
* Step 3: cooking the soup/retreiving a dish with which to serve the soup
* Step 4: delivering the soup once it is in a dish
* Here is an exhaustive list of the greedy assumptions made at each step
* step 1:
* If an agent is holding an ingredient that could be used to cook an optimal soup, it will use it in that soup
* If no such optimal soup exists, but there is an empty pot, the agent will place the ingredient there
* If neither of the above cases holds, no potential is awarded for possessing the ingredient
* step 2:
* The agent will always try to cook the highest valued soup possible based on the current ingredients in a pot
* Any agent possessing a missing ingredient for an optimal soup will travel directly to the closest such pot
* If the optimal soup has all ingredients, the closest agent not holding anything will go to cook it
* step 3:
* Any player holding a dish attempts to serve the highest valued soup based on recipe values and cook time remaining
* step 4:
* Any agent holding a soup will go directly to the nearest serving area
* At every step, the expected reward is discounted by multiplying the optimal reward by gamma ^ (estimated #steps to complete greedy action)
* In the case that certain actions are infeasible (i.e. an agent is holding a soup in step 4, but no path exists to a serving
area), estimated number of steps in order to complete the action defaults to `max_steps`
* Cooperative behavior between the two agents is not considered for complexity reasons
* Soups that are worth <1 points are rounded to be worth 1 point. This is to incentivize the agent to cook a worthless soup
that happens to be in a pot in order to free up the pot
Parameters:
state: OvercookedState instance representing the state to evaluate potential for
mp: MotionPlanner instance used to calculate gridworld distances to objects
gamma: float, discount factor
max_steps: int, number of steps a high level action is assumed to take in worst case
Returns
phi(state), the potential of the state
"""
if not hasattr(Recipe, '_tomato_value') or not hasattr(Recipe, '_onion_value'):
raise ValueError("Potential function requires Recipe onion and tomato values to work properly")
# Constants needed for potential function
potential_params = {
'gamma' : gamma,
'tomato_value' : Recipe._tomato_value if Recipe._tomato_value else 13,
'onion_value' : Recipe._onion_value if Recipe._tomato_value else 21,
**POTENTIAL_CONSTANTS.get(self.layout_name, POTENTIAL_CONSTANTS['default'])
}
pot_states = self.get_pot_states(state)
# Base potential value is the geometric sum of making optimal soups infinitely
opt_recipe, discounted_opt_recipe_value = self.get_optimal_possible_recipe(state, None, discounted=True, potential_params=potential_params, return_value=True)
opt_recipe_value = self.get_recipe_value(state, opt_recipe)
discount = discounted_opt_recipe_value / opt_recipe_value
steady_state_value = (discount / (1 - discount)) * opt_recipe_value
potential = steady_state_value
# Get list of all soups that have >0 ingredients, sorted based on value of best possible recipe
idle_soups = [state.get_object(pos) for pos in self.get_full_but_not_cooking_pots(pot_states)]
idle_soups.extend([state.get_object(pos) for pos in self.get_partially_full_pots(pot_states)])
idle_soups = sorted(idle_soups, key=lambda soup : self.get_optimal_possible_recipe(state, Recipe(soup.ingredients), discounted=True, potential_params=potential_params, return_value=True)[1], reverse=True)
# Build mapping of non_idle soups to the potential value each one will contribue
# Default potential value is maximimal discount for last two steps applied to optimal recipe value
cooking_soups = [state.get_object(pos) for pos in self.get_cooking_pots(pot_states)]
done_soups = [state.get_object(pos) for pos in self.get_ready_pots(pot_states)]
non_idle_soup_vals = { soup : gamma**(potential_params['max_delivery_steps'] + max(potential_params['max_pickup_steps'], soup.cook_time - soup._cooking_tick)) * max(self.get_recipe_value(state, soup.recipe), 1) for soup in | |
from __future__ import print_function, division
import hashlib
from copy import deepcopy
import h5py
import numpy as np
from ..util.meshgrid import meshgrid_nd
from ..util.functions import FreezableClass, is_numpy_array, monotonically_increasing, link_or_copy
from astropy import log as logger
from .grid_helpers import single_grid_dims
class SphericalPolarGrid(FreezableClass):
'''
A spherical polar grid.
The grid can be initialized by passing the r, theta, and phi coordinates of cell walls::
>>> grid = SphericalPolarGrid(r_wall, t_wall, p_wall)
where ``r_wall``, ``t_wall``, and ``p_wall`` are 1-d sequences of wall
positions. The number of cells in the resulting grid will be one less
in each dimension that the length of these arrays.
:class:`~hyperion.grid.SphericalPolarGrid` objects may contain multiple
quantities (e.g. density, specific energy). To access these, you can
specify the name of the quantity as an item::
>>> grid['density']
which is no longer a :class:`~hyperion.grid.SphericalPolarGrid` object, but
a :class:`~hyperion.grid.SphericalPolarGridView` object. When setting
this for the first time, this can be set either to another
:class:`~hyperion.grid.SphericalPolarGridView` object, an external h5py
link, or an empty list. For example, the following should work:
>>> grid['density_new'] = grid['density']
:class:`~hyperion.grid.SphericalPolarGridView` objects allow the
specific dust population to be selected as an index:
>>> grid['density'][0]
Which is also a :class:`~hyperion.grid.SphericalPolarGridView` object. The
data can then be accessed with the ``array`` attribute::
>>> grid['density'][0].array
which is a 3-d array of the requested quantity.
'''
def __init__(self, *args):
self.shape = None
self.r_wall = None
self.t_wall = None
self.p_wall = None
self.r = None
self.t = None
self.p = None
self.gr = None
self.gt = None
self.gp = None
self.gw = None
self.gz = None
self.volumes = None
self.areas = None
self.widths = None
self.quantities = {}
self._freeze()
if len(args) > 0:
self.set_walls(*args)
def set_walls(self, r_wall, t_wall, p_wall):
if type(r_wall) in [list, tuple]:
r_wall = np.array(r_wall)
if type(t_wall) in [list, tuple]:
t_wall = np.array(t_wall)
if type(p_wall) in [list, tuple]:
p_wall = np.array(p_wall)
if not is_numpy_array(r_wall) or r_wall.ndim != 1:
raise ValueError("r_wall should be a 1-D sequence")
if not is_numpy_array(t_wall) or t_wall.ndim != 1:
raise ValueError("t_wall should be a 1-D sequence")
if not is_numpy_array(p_wall) or p_wall.ndim != 1:
raise ValueError("p_wall should be a 1-D sequence")
if not monotonically_increasing(r_wall):
raise ValueError("r_wall should be monotonically increasing")
if not monotonically_increasing(t_wall):
raise ValueError("t_wall should be monotonically increasing")
if not monotonically_increasing(p_wall):
raise ValueError("p_wall should be monotonically increasing")
if np.any(t_wall < 0.) or np.any(t_wall > np.pi):
raise ValueError("t_wall values be in the range [0:pi]")
if np.any(p_wall < 0.) or np.any(p_wall > 2. * np.pi):
raise ValueError("p_wall values be in the range [0:2*pi]")
# Find number of grid cells
self.shape = (len(p_wall) - 1, len(t_wall) - 1, len(r_wall) - 1)
# Store wall positions
self.r_wall = r_wall
self.t_wall = t_wall
self.p_wall = p_wall
# Compute cell centers
if r_wall[0] == 0.:
self.r = np.zeros(len(r_wall) - 1)
self.r[0] = r_wall[1] / 2.
self.r[1:] = 10. ** ((np.log10(r_wall[1:-1]) + np.log10(r_wall[2:])) / 2.)
else:
self.r = 10. ** ((np.log10(r_wall[:-1]) + np.log10(r_wall[1:])) / 2.)
self.t = (t_wall[:-1] + t_wall[1:]) / 2.
self.p = (p_wall[:-1] + p_wall[1:]) / 2.
# Generate 3D versions of r, t, p
#(each array is 3D and defined in every cell)
self.gr, self.gt, self.gp = meshgrid_nd(self.r, self.t, self.p)
# Compute cell centers in cylindrical coordinates
self.gz = self.gr * np.cos(self.gt)
self.gw = self.gr * np.sin(self.gt)
# Generate 3D versions of the inner and outer wall positions respectively
gr_wall_min, gt_wall_min, gp_wall_min = \
meshgrid_nd(r_wall[:-1], t_wall[:-1], p_wall[:-1])
gr_wall_max, gt_wall_max, gp_wall_max = \
meshgrid_nd(r_wall[1:], t_wall[1:], p_wall[1:])
# USEFUL QUANTITIES
dr = gr_wall_max - gr_wall_min
dr2 = gr_wall_max ** 2 - gr_wall_min ** 2
dr3 = gr_wall_max ** 3 - gr_wall_min ** 3
dt = gt_wall_max - gt_wall_min
dcost = np.cos(gt_wall_min) - np.cos(gt_wall_max)
dp = gp_wall_max - gp_wall_min
# CELL VOLUMES
# dV = dr * (r*dtheta) * (r*sin(theta)*dphi)
# V = [r_2^3 - r_1^3] / 3. * [cos(theta_1) - cos(theta_2)] * [phi_2 - phi_1]
self.volumes = dr3 * dcost * dp / 3.
# WALL AREAS
self.areas = np.zeros((6,) + self.shape)
# R walls:
# dA = r^2 * sin(theta) * dtheta * dphi
# A = r^2 * [cos(theta_1) - cos(theta_2)] * [phi_2 - phi_1]
self.areas[0, :, :, :] = gr_wall_min ** 2 * dcost * dp
self.areas[1, :, :, :] = gr_wall_max ** 2 * dcost * dp
# Theta walls:
# dA = r * sin(theta) * dr * dphi
# A = 0.5 * [r_2^2 - r_1^2] * sin(theta) * [phi_2 - phi_1]
self.areas[2, :, :, :] = 0.5 * dr2 * np.sin(gt_wall_min) * dp
self.areas[3, :, :, :] = 0.5 * dr2 * np.sin(gt_wall_max) * dp
# Phi walls:
# dA = r * dr * dtheta
# A = 0.5 * [r_2^2 - r_1^2] * [theta_2 - theta_1]
self.areas[4, :, :, :] = 0.5 * dr2 * dt
self.areas[5, :, :, :] = 0.5 * dr2 * dt
# CELL WIDTHS
self.widths = np.zeros((3,) + self.shape)
# R direction:
# dS = dr
# S = r_2 - r_1
self.widths[0, :, :, :] = dr
# Theta direction:
# dS = r * dtheta
# S = r * [theta_2 - theta_1]
self.widths[1, :, :, :] = self.gr * dt
# Phi direction:
# dS = r * sin(theta) * dphi
# S = r * sin(theta) * [phi_2 - phi_1]
self.widths[2, :, :, :] = self.gr * np.sin(self.gt) * dp
def __getattr__(self, attribute):
if attribute == 'n_dust':
n_dust = None
for quantity in self.quantities:
n_dust_q, shape_q = single_grid_dims(self.quantities[quantity])
if n_dust is None:
n_dust = n_dust_q
elif n_dust_q is not None:
if n_dust != n_dust_q:
raise ValueError("Not all dust lists in the grid have the same size")
return n_dust
else:
return FreezableClass.__getattribute__(self, attribute)
def _check_array_dimensions(self, array=None):
'''
Check that a grid's array dimensions agree with this grid's metadata
Parameters
----------
array : np.ndarray or list of np.ndarray, optional
The array for which to test the dimensions. If this is not
specified, this method performs a self-consistency check of array
dimensions and meta-data.
'''
n_pop_ref = None
for quantity in self.quantities:
if array is None:
n_pop, shape = single_grid_dims(self.quantities[quantity])
else:
n_pop, shape = single_grid_dims(array)
if shape != self.shape:
raise ValueError("Quantity arrays do not have the right "
"dimensions: %s instead of %s"
% (shape, self.shape))
if n_pop is not None:
if n_pop_ref is None:
n_pop_ref = n_pop
elif n_pop != n_pop_ref:
raise ValueError("Not all dust lists in the grid have the same size")
def read(self, group, quantities='all'):
'''
Read the geometry and physical quantities from a spherical polar grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid from. This group should contain
groups named 'Geometry' and 'Quantities'.
quantities : 'all' or list
Which physical quantities to read in. Use 'all' to read in all
quantities or a list of strings to read only specific quantities.
'''
# Read in geometry
self.read_geometry(group['Geometry'])
# Read in physical quantities
self.read_quantities(group['Quantities'], quantities=quantities)
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
def read_geometry(self, group):
'''
Read in geometry information from a spherical polar grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid geometry from.
'''
if group.attrs['grid_type'].decode('utf-8') != 'sph_pol':
raise ValueError("Grid is not spherical polar")
self.set_walls(group['walls_1']['r'],
group['walls_2']['t'],
group['walls_3']['p'])
# Check that advertised hash matches real hash
if group.attrs['geometry'].decode('utf-8') != self.get_geometry_id():
raise Exception("Calculated geometry hash does not match hash in file")
def read_quantities(self, group, quantities='all'):
'''
Read in physical quantities from a spherical polar grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid quantities from
quantities : 'all' or list
Which physical quantities to read in. Use 'all' to read in all
quantities or a list of strings to read only specific quantities.
'''
# Read in physical quantities
if quantities is not None:
for quantity in group:
if quantities == 'all' or quantity in quantities:
array = np.array(group[quantity])
if array.ndim == 4: # if array is 4D, it is a list of 3D arrays
self.quantities[quantity] = [array[i] for i in range(array.shape[0])]
| |
format: Symbol("delu_el") where el is the name of
the element
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
miller_index (list): Miller index for a specific facet to get a
dictionary for.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or
eV/A^2 (False)
show_unstable (bool): Whether or not to show parts of the surface
energy plot outside the region of stability.
ylim ([ymax, ymin]): Range of y axis
no_doped (bool): Whether to plot for the clean slabs only.
no_clean (bool): Whether to plot for the doped slabs only.
use_entry_labels (bool): If True, will label each slab configuration
according to their given label in the SlabEntry object.
no_label (bool): Option to turn off labels.
Returns:
(Plot): Plot of surface energy vs chempot for all entries.
"""
chempot_range = sorted(chempot_range)
plt = pretty_plot(width=8, height=7) if not plt else plt
axes = plt.gca()
for hkl in self.all_slab_entries.keys():
if miller_index and hkl != tuple(miller_index):
continue
# Get the chempot range of each surface if we only
# want to show the region where each slab is stable
if not show_unstable:
stable_u_range_dict = self.stable_u_range_dict(chempot_range, ref_delu,
no_doped=no_doped,
delu_dict=delu_dict,
miller_index=hkl)
already_labelled = []
label = ''
for clean_entry in self.all_slab_entries[hkl]:
urange = stable_u_range_dict[clean_entry] if \
not show_unstable else chempot_range
# Don't plot if the slab is unstable, plot if it is.
if urange != []:
label = clean_entry.label
if label in already_labelled:
label = None
else:
already_labelled.append(label)
if not no_clean:
if use_entry_labels:
label = clean_entry.label
if no_label:
label = ""
plt = self.chempot_vs_gamma_plot_one(plt, clean_entry, ref_delu,
urange, delu_dict=delu_dict,
delu_default=delu_default,
label=label, JPERM2=JPERM2)
if not no_doped:
for ads_entry in self.all_slab_entries[hkl][clean_entry]:
# Plot the adsorbed slabs
# Generate a label for the type of slab
urange = stable_u_range_dict[ads_entry] \
if not show_unstable else chempot_range
if urange != []:
if use_entry_labels:
label = ads_entry.label
if no_label:
label = ""
plt = self.chempot_vs_gamma_plot_one(plt, ads_entry,
ref_delu, urange,
delu_dict=delu_dict,
delu_default=delu_default,
label=label,
JPERM2=JPERM2)
# Make the figure look nice
plt.ylabel(r"Surface energy (J/$m^{2}$)") if JPERM2 \
else plt.ylabel(r"Surface energy (eV/$\AA^{2}$)")
plt = self.chempot_plot_addons(plt, chempot_range, str(ref_delu).split("_")[1],
axes, ylim=ylim)
return plt
def monolayer_vs_BE(self, plot_eads=False):
"""
Plots the binding energy energy as a function of monolayers (ML), i.e.
the fractional area adsorbate density for all facets. For each
facet at a specific monlayer, only plot the lowest binding energy.
Args:
plot_eads (bool): Option to plot the adsorption energy (binding
energy multiplied by number of adsorbates) instead.
Returns:
(Plot): Plot of binding energy vs monolayer for all facets.
"""
plt = pretty_plot(width=8, height=7)
for hkl in self.all_slab_entries.keys():
ml_be_dict = {}
for clean_entry in self.all_slab_entries[hkl].keys():
if self.all_slab_entries[hkl][clean_entry]:
for ads_entry in self.all_slab_entries[hkl][clean_entry]:
if ads_entry.get_monolayer not in ml_be_dict.keys():
ml_be_dict[ads_entry.get_monolayer] = 1000
be = ads_entry.gibbs_binding_energy(eads=plot_eads)
if be < ml_be_dict[ads_entry.get_monolayer]:
ml_be_dict[ads_entry.get_monolayer] = be
# sort the binding energies and monolayers
# in order to properly draw a line plot
vals = sorted(ml_be_dict.items())
monolayers, BEs = zip(*vals)
plt.plot(monolayers, BEs, '-o',
c=self.color_dict[clean_entry], label=hkl)
adsorbates = tuple(ads_entry.ads_entries_dict.keys())
plt.xlabel(" %s" * len(adsorbates) % adsorbates + " Coverage (ML)")
plt.ylabel("Adsorption Energy (eV)") if plot_eads \
else plt.ylabel("Binding Energy (eV)")
plt.legend()
plt.tight_layout()
return plt
def chempot_plot_addons(self, plt, xrange, ref_el, axes, pad=2.4,
rect=[-0.047, 0, 0.84, 1], ylim=[]):
"""
Helper function to a chempot plot look nicer.
Args:
plt (Plot) Plot to add things to.
xrange (list): xlim parameter
ref_el (str): Element of the referenced chempot.
axes(axes) Axes object from matplotlib
pad (float) For tight layout
rect (list): For tight layout
ylim (ylim parameter):
return (Plot): Modified plot with addons.
return (Plot): Modified plot with addons.
"""
# Make the figure look nice
plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
axes.set_xlabel(r"Chemical potential $\Delta\mu_{%s}$ (eV)" % (ref_el))
ylim = ylim if ylim else axes.get_ylim()
plt.xticks(rotation=60)
plt.ylim(ylim)
xlim = axes.get_xlim()
plt.xlim(xlim)
plt.tight_layout(pad=pad, rect=rect)
plt.plot([xrange[0], xrange[0]], ylim, '--k')
plt.plot([xrange[1], xrange[1]], ylim, '--k')
xy = [np.mean([xrange[1]]), np.mean(ylim)]
plt.annotate("%s-rich" % (ref_el), xy=xy,
xytext=xy, rotation=90, fontsize=17)
xy = [np.mean([xlim[0]]), np.mean(ylim)]
plt.annotate("%s-poor" % (ref_el), xy=xy,
xytext=xy, rotation=90, fontsize=17)
return plt
def BE_vs_clean_SE(self, delu_dict, delu_default=0, plot_eads=False,
annotate_monolayer=True, JPERM2=False):
"""
For each facet, plot the clean surface energy against the most
stable binding energy.
Args:
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
plot_eads (bool): Option to plot the adsorption energy (binding
energy multiplied by number of adsorbates) instead.
annotate_monolayer (bool): Whether or not to label each data point
with its monolayer (adsorbate density per unit primiitve area)
JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or
eV/A^2 (False)
Returns:
(Plot): Plot of clean surface energy vs binding energy for
all facets.
"""
plt = pretty_plot(width=8, height=7)
for hkl in self.all_slab_entries.keys():
for clean_entry in self.all_slab_entries[hkl].keys():
all_delu_dict = self.set_all_variables(delu_dict, delu_default)
if self.all_slab_entries[hkl][clean_entry]:
clean_se = self.as_coeffs_dict[clean_entry]
se = sub_chempots(clean_se, all_delu_dict)
for ads_entry in self.all_slab_entries[hkl][clean_entry]:
ml = ads_entry.get_monolayer
be = ads_entry.gibbs_binding_energy(eads=plot_eads)
# Now plot the surface energy vs binding energy
plt.scatter(se, be)
if annotate_monolayer:
plt.annotate("%.2f" % (ml), xy=[se, be],
xytext=[se, be])
plt.xlabel(r"Surface energy ($J/m^2$)") if JPERM2 \
else plt.xlabel(r"Surface energy ($eV/\AA^2$)")
plt.ylabel("Adsorption Energy (eV)") if plot_eads \
else plt.ylabel("Binding Energy (eV)")
plt.tight_layout()
plt.xticks(rotation=60)
return plt
def surface_chempot_range_map(self, elements, miller_index, ranges,
incr=50, no_doped=False, no_clean=False,
delu_dict=None, plt=None, annotate=True,
show_unphyiscal_only=False, fontsize=10):
"""
Adapted from the get_chempot_range_map() method in the PhaseDiagram
class. Plot the chemical potential range map based on surface
energy stability. Currently works only for 2-component PDs. At
the moment uses a brute force method by enumerating through the
range of the first element chempot with a specified increment
and determines the chempot rangeo fht e second element for each
SlabEntry. Future implementation will determine the chempot range
map first by solving systems of equations up to 3 instead of 2.
Args:
elements (list): Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to duLi and duO, you will supply
[Element("Li"), Element("O")]
miller_index ([h, k, l]): Miller index of the surface we are interested in
ranges ([[range1], [range2]]): List of chempot ranges (max and min values)
for the first and second element.
incr (int): Number of points to sample along the range of the first chempot
no_doped (bool): Whether or not to include doped systems.
no_clean (bool): Whether or not to include clean systems.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
annotate (bool): Whether to annotate each "phase" with the label of
the entry. If no label, uses the reduced formula
show_unphyiscal_only (bool): Whether to only show the shaded region where
surface energy is negative. Useful for drawing other chempot range maps.
"""
# Set up
delu_dict = delu_dict if delu_dict else {}
plt = pretty_plot(12, 8) if not plt else plt
el1, el2 = str(elements[0]), str(elements[1])
delu1 = Symbol("delu_%s" % (str(elements[0])))
delu2 = Symbol("delu_%s" % (str(elements[1])))
range1 = ranges[0]
range2 = ranges[1]
# Find a range map for each entry (surface). This part is very slow, will
# need to implement a more sophisticated method of getting the range map
vertices_dict = {}
for dmu1 in np.linspace(range1[0], range1[1], incr):
# Get chemical potential range of dmu2 for each increment of dmu1
new_delu_dict = delu_dict.copy()
new_delu_dict[delu1] = dmu1
range_dict, se_dict = self.stable_u_range_dict(range2, delu2, dmu_at_0=True,
miller_index=miller_index,
no_doped=no_doped,
no_clean=no_clean,
delu_dict=new_delu_dict,
return_se_dict=True)
# Save the chempot range for dmu1 and dmu2
for entry in range_dict.keys():
if not range_dict[entry]:
continue
if entry not in vertices_dict.keys():
vertices_dict[entry] = []
selist = se_dict[entry]
vertices_dict[entry].append({delu1: dmu1, | |
connected PNs for the "stealing" of connections with a
priority on towards PNs with an above-average number of outgoing connections.
This ensures an even distribution of the connections going from the PN layer.
"""
# don't extend if there's a limit.
if self.max_pn_size is not None and self.pn_size == self.max_pn_size:
return
# add a PN to the Fruitfly and change its parameters accordingly
self.pn_size+=1
self.pn_layer = np.append(self.pn_layer, [0])
self.kc_factor = self.kc_size/self.pn_size
# number of connections from the new PN = avg. PN connectedness
new_avg_pn_con = int(sum([len(p) for k,p in self.proj_functions.items()])/self.pn_size)
weighted_kcs = {}
for cell in self.proj_functions:
# weight the KC with the inverse of its connectedness
weighted_kcs[cell] = 1.0/(1+len(self.proj_functions[cell]))
weighted_kcs[cell] = weighted_kcs[cell]*np.random.rand()
# these winners connect to the new PN
winners = sorted(weighted_kcs, key=weighted_kcs.get, reverse=True)[:new_avg_pn_con]
# add PN to connections of the winner KCs
for kc in winners:
# fully connected winner KCs experience connection switching
if len(self.proj_functions[kc]) == self.proj_size:
pn_con = {pn:len(self.pn_to_kc[pn]) for pn in self.proj_functions[kc]}
# the most connected of the PNs for this winner KC gets robbed
robbed_pn = sorted(pn_con, key=pn_con.get, reverse=True)[0]
# replace PN indices in proj_functions
self.proj_functions[kc][self.proj_functions[kc].index(robbed_pn)] = self.pn_size-1
# update pn_to_kc
del self.pn_to_kc[robbed_pn][self.pn_to_kc[robbed_pn].index(kc)]
# non-full KCs receive a new connection
else:
self.proj_functions[kc].append(self.pn_size-1)
self.pn_to_kc.update(self.forward_connections([self.pn_size-1]))
def reduce_pn_layer(self, del_indices, new_pn_size):
"""
When a Fruitfly object is maintained parallelly to a co-occurrence count,
that count might be 'pruned' and thus dimensions might be deleted.
This method reduces the PN layer by the PNs of those words that are deleted
from the co-occurrence count.
This entails a change of the mapping of vocabulary to PN layer, of the mappings
between PN layer KC layer, and a freeing-up of KC connections.
:param del_indices: [int] -- the positions that are deleted from the count
:param new_pn_size: int -- usually the size of the count matrix (in order to fit the PN layer to the count)
"""
# make a mapping that represents the shift induced by deleting PNs (important to keep the correct connections)
old_to_new_i = {}
newi = 0
for oldi in range(self.pn_size):
if oldi in del_indices:
pass
else:
old_to_new_i[oldi] = newi
newi += 1
# The KC layer is independent from pn_size and can be modified before the other components
for kc,pns in self.proj_functions.items():
# choose remaining PNs and do a look-up in the mapping for shifted PNs
self.proj_functions[kc] = [old_to_new_i[oldi] for oldi in list(set(pns).difference(set(del_indices)))]
# update the pn_layer to be of same size as the count matrix
self.pn_size = new_pn_size
# re-size the PN layer
self.pn_layer = np.zeros(self.pn_size)
# re-do the forward connections
self.pn_to_kc = self.forward_connections([i for i in range(self.pn_size)])
def fit_space(self, unhashed_space, words_to_i):
"""
Returns vectors which fit (number of dimensions) to pn_size. This enables the Incrementor
to count independently from the Fruitfly. this method also pads dimensions if the vectors
are shorter than pn_size.
Dimension reduction deletes the words from the vectors which have co-occurred the least
often with words in the space. The dimensions of the returned space are sorted alphabetically.
:param unhashed_space: {str:[float]} -- words and their corresponding co-occurrence counts
:param words_to_i: {str:int} -- mapping of vocabulary to the dimension in the co-occurrence count
:return: {str:[int]} -- co-occurrence counts of length pn_size
:return: {str:int} -- mapping of vocabulary to PN index (and to the returned vectors)
:return: {int:str} -- inverse mapping: PN indices to vocabulary
"""
# pad the vectors if they haven't reached pn_size yet (only in early stages)
if len(words_to_i) < self.pn_size:
print("unhashed_space needs to be padded:",len(unhashed_space),"to",self.pn_size,"dimensions.")
pad_size = self.pn_size - len(words_to_i)
padded_space = {w:np.append(vec, np.zeros(pad_size)) for w,vec in unhashed_space.items()}
padded_dic = {w:i+pad_size for w,i in words_to_i.items()}
padded_ind = {v:k for k,v in padded_dic.items()}
return padded_space, padded_dic, padded_ind
# max_pn_size not defined or not yet reached --> fitting not needed
elif self.max_pn_size is None or len(words_to_i)<=self.max_pn_size:
return unhashed_space, words_to_i, {v: k for k, v in words_to_i.items()}
# the space has more dimensions than pn_size --> fitting needed
else:
# extract the most frequent words
vecsums = np.zeros(len(unhashed_space[list(unhashed_space.keys())[0]]))
for w,vec in unhashed_space.items():
vecsums += vec
freq = {w:vecsums[i] for w,i in words_to_i.items()}
# only keep the most frequent context words
new_keys = sorted(freq, key=freq.get, reverse=True)[:self.max_pn_size]
fitted_space = {}
old_dims = [i for w,i in words_to_i.items() if w not in new_keys]
for w,vec in unhashed_space.items():
fitted_space[w] = np.delete(vec,old_dims)
# sort words alphabetically (this sorts the space)
new_keys.sort()
new_dic = {k:new_keys.index(k) for k in new_keys}
new_ind = {v:k for k,v in new_dic.items()}
return fitted_space, new_dic, new_ind
#========== FFA APPLICATION
def flatten(self, frequency_vector):
"""
Counteracts the Zipfian distribution of words (which leads to very unequal co-occurrence
values) by applying log, log2, or log10 to each count of a given vector. The flattening
function is specified during initilization of the Fruitfly object.
:return: [float] -- 'flattened' vector
"""
flat_vector = np.zeros(len(frequency_vector))
if self.flattening == "log":
for i, freq in enumerate(frequency_vector):
# '1.0+' for co-occurrence values of 0
flat_vector[i] = np.log(1.0+freq)
elif self.flattening == "log2":
for i, freq in enumerate(frequency_vector):
flat_vector[i] = np.log2(1.0+freq)
elif self.flattening == "log10":
for i, freq in enumerate(frequency_vector):
flat_vector[i] = np.log10(1.0+freq)
else:
return frequency_vector
return flat_vector
def projection(self):
"""
For each KC, sum up the values of the PNs that have a connection to this KC. Return the sums
( = activated KC layer).
:return: [float] -- activated KC layer
"""
kc_layer = np.zeros(self.kc_size)
for cell in range(self.kc_size):
# PNs connected to this particular KC
activated_pns = self.proj_functions[cell]
for pn in activated_pns:
kc_layer[cell]+=self.pn_layer[pn]
return kc_layer
def hash_kenyon(self):
"""
Choose the most activated KCs, set them to 1 and the rest to 0
:return: [int] -- binary array; = hashed vector
"""
kc_activations = np.zeros(self.kc_size)
# number of winners
top = int(ceil(self.hash_percent * self.kc_size / 100))
# select those KCs with the highest activation
activated_kcs = np.argpartition(self.kc_layer, -top)[-top:]
for cell in activated_kcs:
kc_activations[cell] = 1
return kc_activations
def fly(self, unhashed_space, words_to_i, timed=False):
"""
Hash each element of the input space.
Fit input space to the Fruitfly's PN layer (if necessary) by
choosing the most frequent words as dimensions. Afterwards, apply
flattening before input, afterwards project, hash, and return the
complete hashed space.
:param unhashed_space: {str:[int]} -- words and their raw co-ocurrence counts
:param words_to_i: {str:int} -- mapping of vocabulary to dimension in the co-occurrence count
:param timed: bool -- optionally return the time taken for execution
:return: {str:[int]} -- words and their binary hash signatures
:return: {str:int} -- mapping of words (from unhashed_space) to indices (of the PN layer)
:return: {int:str} -- inverse mapping: PN indices to words of the dimensions used for hashing
:return: float -- time taken for execution
"""
t0 = time.time()
print("Starting flying...")
# choose the most frequent words in the count as dimensions for the PN layer
fitted_space, flight_dic, flight_ind = self.fit_space(unhashed_space, words_to_i)
space_hashed = {}
# hashes count vectors one by one
for w in tqdm(fitted_space):
# initialize the PN activation for this vector
self.pn_layer = self.flatten(fitted_space[w])
# sum PN activations to obtain the KC activations
self.kc_layer = self.projection()
# WTA procedure; hashes have the same dimensionality as kc_layer
space_hashed[w] = self.hash_kenyon()
if timed is True:
return space_hashed, flight_dic, flight_ind, time.time()-t0
else:
return space_hashed, flight_dic, flight_ind
if __name__ == '__main__':
"""
This working example of the application of the FFA reads in a space (= words and vectors) and evaluates
on a given test set by means of Spearman Correlation:
it either applies the FFA (one or multiple times) and evaluates the improvement of the hashes over the unhashed
space or it evaluates just the input space, without applying the FFA.
"""
import sys
import MEN
import utils
# parameter input
while True:
spacefiles = utils.loop_input(rtype=str, default=None, msg="Space to be used (without file extension): ")
try:
data = spacefiles + ".dm"
column_labels = spacefiles + ".cols"
# returns {word:word_vector}
unhashed_space = utils.readDM(data)
# returns both-ways dicts of the vocabulary (word:index_in_vector)
i_to_cols, cols_to_i = utils.readCols(column_labels)
except | |
import utils
import os
import random
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
import jellyfish # for Levenshtein distance
import re
import math
import nltk
from nltk.tokenize import sent_tokenize # for sentence tokenization
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
# install nltk packages
nltk_downloader = nltk.downloader.Downloader()
if not nltk_downloader.is_installed('punkt'):
nltk_downloader.download('punkt')
if not nltk_downloader.is_installed('averaged_perceptron_tagger'):
nltk_downloader.download('averaged_perceptron_tagger')
####
# Stopwords list and tokenization functions
####
# nltk stoplist is not complete
nltk_sw = ['d', 'm', 'o', 's', 't', 'y', 'll', 're', 've', 'ma',
"that'll", 'ain',
"she's", "it's", "you're", "you've", "you'll", "you'd",
'isn', "isn't", 'aren', "aren't", 'wasn', "wasn't", 'weren', "weren't",
'don', "don't", 'doesn', "doesn't", 'didn', "didn't",
'hasn', "hasn't", 'haven', "haven't", 'hadn', "hadn't",
'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't",
'shan', "shan't", 'shouldn', "shouldn't", "should've",
'won', "won't", 'wouldn', "wouldn't", 'couldn', "couldn't",
'i', 'me', 'my', 'we', 'our', 'ours', 'you', 'your', 'yours',
'he', 'him', 'his', 'she', 'her', 'hers', 'it', 'its', 'they', 'them', 'their', 'theirs',
'himself', 'herself', 'itself', 'myself',
'yourself', 'yourselves', 'ourselves', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those',
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
'had', 'has', 'have', 'having', 'do', 'does', 'did', 'doing',
'a', 'an', 'the', 'and', 'but', 'if', 'or',
'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with',
'about', 'against', 'between', 'into', 'through',
'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off',
'over', 'under', 'here', 'there', 'when', 'where', 'why', 'how',
'all', 'any', 'both', 'each', 'few', 'more', 'most',
'other', 'some', 'such', 'no', 'nor',
'only', 'own', 'same', 'so', 'than', 'too',
'again', 'further', 'then', 'once', 'can', 'will', 'just',
'should', 'now']
# removed from nltk stoplist: not, very
added_sw = [ "he's", "he'd", "she'd", "he'll", "she'll", "you'll",
"they'd", "could've", "would've", 'could', 'would', "i'm", 'im',
"thatll", "shes", "youre", "youve", "youll", "youd",
"isnt", "arent", "wasnt", "werent",
"dont", "doesnt", "didnt",
"hasnt", "havent", "hadnt",
"mightnt", "mustnt", "neednt",
"shant", "shouldnt", "shouldve",
"wont", "wouldnt", "couldnt",
'a','b','c','e','f','g','h','i','j','k','l','n','p','q','r','u','v','w','x','z','lol']
stop_words = added_sw + nltk_sw
punc = ''',.;:?!'"()[]{}<>|\/@#^&*_~=+\n\t''' #exclude hyphen, $, %
fullstop = '.'
# Input a string
# Returns a list of tokens with no stopwords, punctuation, numbers
def text_preprocess_clean(review):
for p in punc:
review = review.replace(p,' ')
review = review.lower()
review = review.replace('protectors','protector')
review = review.replace('headphones','headphone')
review = review.replace('iphones','iphone')
review = review.replace('phones','phone')
review = review.replace('mounts','mount')
review = review.replace('stands','stand')
review = review.replace('adapters','adapter')
review = review.replace('chargers','charger')
review = review.replace('cables','cable')
review = review.replace('packs','pack')
review = review.replace('batteries','battery')
review = review.replace('cards','card')
review = review.replace('styluses','stylus')
review = review.replace('kits','kit')
review = review.replace('speakers','speaker')
review = review.replace('docks','dock')
review = review.replace('boosters','booster')
review = review.replace('cases','case')
review = re.sub('\d+', '', review)
review = word_tokenize(review)
review = [w for w in review if w not in stop_words]
return review
# Input a string
# Returns a list of tokens with punctuation and numbers and stopwords
# (punctuation allows us to eliminate meaningless bigrams containing punctuation symbols)
def text_preprocess(review):
review = review.replace(fullstop,' . ')
review = review.lower()
review = review.replace("'m'",' am')
review = review.replace("'s'",' is')
review = review.replace("'re'",' are')
review = review.replace("'ve'",' have')
review = review.replace("'ll'",' wi11')
review = review.replace("'d'",'')
review = review.replace("n't",' not')
review = review.replace("shan't",'shall not')
review = review.replace("won't",'will not')
review = review.replace('protectors','protector')
review = review.replace('headphones','headphone')
review = review.replace('phones','phone')
review = review.replace('iphones','iphone')
review = review.replace('mounts','mount')
review = review.replace('stands','stand')
review = review.replace('adapters','adapter')
review = review.replace('chargers','charger')
review = review.replace('cables','cable')
review = review.replace('packs','pack')
review = review.replace('batteries','battery')
review = review.replace('cards','card')
review = review.replace('styluses','stylus')
review = review.replace('kits','kit')
review = review.replace('speakers','speaker')
review = review.replace('docks','dock')
review = review.replace('boosters','booster')
review = review.replace('cases','case')
review = word_tokenize(review)
return review
####
# Calculate idf for all words in the corpus, excluding stopwords
####
# Create the dictionary word_df (include stopwords)
# dataframe = df_allreview
def compute_idf(dataframe):
df = dataframe
vocabulary = set() # corpus vocabulary including stopwords
doc_f = defaultdict(lambda: 0) # dictionary {word : num of products whose reviews contain the word (document frequency)}
idf = dict() # dictionary {word : idf}
for i, row in df.iterrows():
t1 = text_preprocess_clean(row['all_reviews'])
vocabulary.update(t1)
t2 = set(text_preprocess_clean(row['all_reviews']))
for t in t2:
doc_f[t] += 1
vocabulary = list(vocabulary)
DOC_COUNT = len(df) # DOC COUNT = number of products (each product has an allreviews document)
VOCAB_COUNT = len(vocabulary) # number of unique words
print(f'Number of words in corpus (excluding stopwords): {VOCAB_COUNT}')
print(f'Number of documents (products): {DOC_COUNT}')
# Calculate the idf of each word in the vocabulary
for w in vocabulary:
idf[w] = math.log10(DOC_COUNT / float(doc_f[w])) # log to base 10
return idf
####
# Search for and print the product's reviews
####
# search for the product with index idx and prints the data for the product
# dataf = df_review
def search(idx, dataf):
pid = dataf.loc[idx]['asin']
n = dataf.loc[idx]['num_reviews']
print(f'Index: {idx}')
print(f'Product ID: {pid}')
print(f'Number of reviews: {n}\n')
print('Sample reviews:\n')
for i in range(1,4):
rev = dataf.loc[idx][i+1]
print(f'Review {i}:\n {rev}\n')
return n
####
# Calculate tf and tf-idf for each word in the product's reviews (excluding stopwords)
####
# Returns a dictionary {word : tf) for all words (excluding stopwords) of the product
# tf = word frequency / total number of words (excluding stopwords)
# dataf = df_allreview
def word_tfidf(idx, idf, dataf):
allrev = dataf.loc[idx]['all_reviews']
u1 = text_preprocess_clean(allrev)
u2 = set(text_preprocess_clean(allrev))
u2 = list(u2)
tfreq = defaultdict(lambda: 0) # {word : freq of word in all_reviews}
tf = defaultdict(lambda: 0)
tfidf = defaultdict(lambda: 0)
for w in u1:
tfreq[w] += 1
for w in u2:
tf[w] = 1 + math.log10(float(tfreq[w]))
if w in idf:
tfidf[w] = tf[w] * idf[w]
else:
tfidf[w] = tf[w]
return tfreq, tf, tfidf
####
# Get all candidate phrases (unigrams, bigrams and trigrams) by tokenization and filter out undesirable candidates
####
# returns all unigrams (excluding stopwords) for the product with index idx
# dataf = df_allreview
def unigram(idx, dataf):
allrev = dataf.loc[idx]['all_reviews'] # type(allrev) = str
u = set(text_preprocess_clean(allrev))
u = list(u)
return u
# returns all bigrams for the product with index idx
# remove and reduce bigrams by checking punctuation and stopwords
# dataf = df_allreview
def bigram(idx, dataf):
allrev = dataf.loc[idx]['all_reviews']
u = text_preprocess(allrev)
b1 = set(nltk.ngrams(u, 2))
b1 = list(b1)
b2 = []
for b in b1:
if (b[0] in stop_words) or (b[1] in stop_words) or (b[0] in punc) or (b[1] in punc):
continue
if (b[0] not in punc) and (b[1] not in punc) and (b[0] not in stop_words) and (b[1] not in stop_words):
b2.append(b)
b2 = list(set(b2))
return b2
# returns all trigrams for the product with index idx
# remove and reduce trigrams by checking punctuation and stopwords
# dataf = df_allreview
def trigram(idx, dataf):
allrev = dataf.loc[idx]['all_reviews']
u = text_preprocess(allrev)
t1 = set(nltk.ngrams(u, 3))
t1 = list(t1)
t2 = []
for t in t1:
if (t[0] in stop_words) or (t[1] in stop_words) or (t[2] in stop_words) or (t[0] in punc) or (t[1] in punc) or (t[2] in punc):
continue
if (t[0] not in punc) and (t[1] not in punc) and (t[2] not in punc) and (t[0] not in stop_words) and (t[1] not in stop_words) and (t[2] not in stop_words):
t2.append(t)
t2 = list(set(t2))
return t2
####
# POS-tag each candidates phrase and select those satisfying certain POS tag patterns
####
def tagging(tokens):
tagged_tokens = nltk.pos_tag(tokens)
return tagged_tokens
# For bigrams, selects and returns the list of final candidates
def candidate_pos(tokens, n):
candidates = []
# Popular phone brands - for bigrams, include it as a candidate if the first word is a phone brand
# (because second word likely to be phone model)
brands = ('nokia','motorola','iphone','samsung','xiaomi','huawei',
'siemens','sony','sonyericsson', 'ericsson',
'palm','blackberry','htc','alcatel','benq','at&t','galaxy',
'apple','asus','casio','google','kyocera','nec','sony','android')
# JJR - adj comparative, JJS - adj superlative,
# RBR - adverb comparative, RBS - adverb superlative
# CD - cardinal number
unigram_tags = ('NN','NNS','NNP','NNPS')
noun_tags = ('NN','NNS','NNP','NNPS')
adjective_tags = ('JJ','JJR','JJS','CD')
adverb_tags = ('RB','RBR','RBS') # RB for 'not' and 'very'
#verb_tags = ('VB','VBD','VBP','VBZ')
if n == 1: # for unigrams
tagged_tokens = tagging(tokens)
for t in tagged_tokens:
if t[0] in brands:
candidates.append(t[0])
if t[1] in unigram_tags:
candidates.append(t[0])
if n == 2: # for bigrams
for x in tokens:
t = tagging(x)
if x[0] in brands:
candidates.append(x)
if | |
will be bypassed.
.. note:: The UTC to TT correction is only carried out for dates after
January 1st, 1972.
:param args: Either JDE, Epoch, date, datetime or year, month, day,
hours, minutes, seconds values, by themselves or inside a tuple or
list
:type args: int, float, :py:class:`Epoch`, tuple, list, date,
datetime
:param utc: Whether the provided epoch is a civil time (UTC)
:type utc: bool
:param leap_seconds: This is the value to be used in the UTC->TAI
conversion, instead of taking it from internal leap seconds table.
:type leap_seconds: int, float
:returns: None.
:rtype: None
:raises: ValueError if input values are in the wrong range.
:raises: TypeError if input values are of wrong type.
>>> e = Epoch()
>>> e.set(1987, 6, 19.5)
>>> print(e)
2446966.0
>>> e.set(1977, 'Apr', 26.4)
>>> print(e)
2443259.9
>>> e.set(1957, 'October', 4.81)
>>> print(e)
2436116.31
>>> e.set(333, 'Jan', 27, 12)
>>> print(e)
1842713.0
>>> e.set(1900, 'Jan', 1)
>>> print(e)
2415020.5
>>> e.set(-1001, 'august', 17.9)
>>> print(e)
1355671.4
>>> e.set(-4712, 1, 1.5)
>>> print(e)
0.0
>>> e.set((1600, 12, 31))
>>> print(e)
2305812.5
>>> e.set([1988, 'JUN', 19, 12])
>>> print(e)
2447332.0
>>> d = datetime.date(2000, 1, 1)
>>> e.set(d)
>>> print(e)
2451544.5
>>> e.set(837, 'Apr', 10, 7, 12)
>>> print(e)
2026871.8
>>> d = datetime.datetime(837, 4, 10, 7, 12, 0, 0)
>>> e.set(d)
>>> print(e)
2026871.8
"""
# Clean up the internal parameters
self._jde = 0.0
# If no arguments are given, return. Internal values are 0.0
if len(args) == 0:
return
# If we have only one argument, it can be a JDE or another Epoch object
elif len(args) == 1:
if isinstance(args[0], Epoch):
self._jde = args[0]._jde
return
elif isinstance(args[0], (int, float)):
self._jde = args[0]
return
elif isinstance(args[0], (tuple, list)):
year, month, day, hours, minutes, sec = \
self._check_values(*args[0])
elif isinstance(args[0], datetime.datetime):
d = args[0]
year, month, day, hours, minutes, sec = self._check_values(
d.year,
d.month,
d.day,
d.hour,
d.minute,
d.second + d.microsecond / 1e6,
)
elif isinstance(args[0], datetime.date):
d = args[0]
year, month, day, hours, minutes, sec = self._check_values(
d.year, d.month, d.day
)
else:
raise TypeError("Invalid input type")
elif len(args) == 2:
# Insuficient data to set the Epoch
raise ValueError("Invalid number of input values")
elif len(args) >= 3: # Year, month, day
year, month, day, hours, minutes, sec = self._check_values(*args)
day += hours / DAY2HOURS + minutes / DAY2MIN + sec / DAY2SEC
# Handle the 'leap_seconds' argument, if pressent
if "leap_seconds" in kwargs:
# Compute JDE
self._jde = self._compute_jde(year, month, day, utc2tt=False,
leap_seconds=kwargs["leap_seconds"])
elif "utc" in kwargs:
self._jde = self._compute_jde(year, month, day,
utc2tt=kwargs["utc"])
else:
self._jde = self._compute_jde(year, month, day, utc2tt=False)
def _compute_jde(self, y, m, d, utc2tt=True, leap_seconds=0.0):
"""Method to compute the Julian Ephemeris Day (JDE).
.. note:: The UTC to TT correction is only carried out for dates after
January 1st, 1972.
:param y: Year
:type y: int
:param m: Month
:type m: int
:param d: Day
:type d: float
:param utc2tt: Whether correction UTC to TT is done automatically.
:type utc2tt: bool
:param leap_seconds: Number of leap seconds to apply
:type leap_seconds: float
:returns: Julian Ephemeris Day (JDE)
:rtype: float
"""
# The best approach here is first convert to JDE, and then adjust secs
if m <= 2:
y -= 1
m += 12
a = iint(y / 100.0)
b = 0.0
if not Epoch.is_julian(y, m, iint(d)):
b = 2.0 - a + iint(a / 4.0)
jde = (iint(365.25 * (y + 4716.0)) +
iint(30.6001 * (m + 1.0)) + d + b - 1524.5)
# If enabled, let's convert from UTC to TT, adding the needed seconds
deltasec = 0.0
# In this case, UTC to TT correction is applied automatically
if utc2tt:
if y >= 1972:
deltasec = 32.184 # Difference between TT and TAI
deltasec += 10.0 # Difference between UTC and TAI in 1972
deltasec += Epoch.leap_seconds(y, m)
else: # Correction is NOT automatic
if leap_seconds != 0.0: # We apply provided leap seconds
if y >= 1972:
deltasec = 32.184 # Difference between TT and TAI
deltasec += 10.0 # Difference between UTC-TAI in 1972
deltasec += leap_seconds
return jde + deltasec / DAY2SEC
def _check_values(self, *args):
"""This method takes the input arguments to 'set()' method (year,
month, day, etc) and carries out some sanity checks on them.
It returns a tuple containing those values separately, assigning zeros
to those arguments which were not provided.
:param args: Year, month, day, hours, minutes, seconds values.
:type args: int, float
:returns: Tuple with year, month, day, hours, minutes, seconds values.
:rtype: tuple
:raises: ValueError if input values are in the wrong range, or too few
arguments given as input.
"""
# This list holds the maximum amount of days a given month can have
maxdays = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# Initialize some variables
year = -9999
month = -9999
day = -9999
hours = 0.0
minutes = 0.0
sec = 0.0
# Carry out some basic checks
if len(args) < 3:
raise ValueError("Invalid number of input values")
elif len(args) >= 3: # Year, month, day
year = args[0]
month = args[1]
day = args[2]
if len(args) >= 4: # Year, month, day, hour
hours = args[3]
if len(args) >= 5: # Year, month, day, hour, minutes
minutes = args[4]
if len(args) >= 6: # Year, month, day, hour, minutes, seconds
sec = args[5]
if year < -4712: # No negative JDE will be allowed
raise ValueError("Invalid value for the input year")
if day < 1 or day >= 32:
raise ValueError("Invalid value for the input day")
if hours < 0 or hours >= 24:
raise ValueError("Invalid value for the input hours")
if minutes < 0 or minutes >= 60:
raise ValueError("Invalid value for the input minutes")
if sec < 0 or sec >= 60:
raise ValueError("Invalid value for the input seconds")
# Test the days according to the month
month = Epoch.get_month(month)
limit_day = maxdays[month - 1]
# We need extra tests if month is '2' (February)
if month == 2:
if Epoch.is_leap(year):
limit_day = 29
if day > limit_day:
raise ValueError("Invalid value for the input day")
# We are ready to return the parameters
return year, month, day, hours, minutes, sec
@staticmethod
def check_input_date(*args, **kwargs):
"""Method to check that the input is a proper date.
This method returns an Epoch object, and the **leap_seconds** argument
then controls the way the UTC->TT conversion is handled for that new
object. If **leap_seconds** argument is set to a value different than
zero, then that value will be used for the UTC->TAI conversion, and the
internal leap seconds table will be bypassed. On the other hand, if it
is set to zero, then the UTC to TT correction is disabled, and it is
supposed that the input data is already in TT scale.
:param args: Either Epoch, date, datetime or year, month, day values,
by themselves or inside a tuple or list
:type args: int, float, :py:class:`Epoch`, datetime, date, tuple,
list
:param leap_seconds: If different from zero, this is the value to be
used in the UTC->TAI conversion. If equals to zero, conversion is
disabled. If not given, UTC to TT conversion is carried out
(default).
:type leap_seconds: int, float
:returns: Epoch object corresponding to the input date
:rtype: :py:class:`Epoch`
:raises: ValueError if input values are in the wrong range.
:raises: TypeError if input values are of wrong type.
"""
t = Epoch()
if len(args) == 0:
raise ValueError("Invalid input: No date given")
# If we have only one argument, it can be an Epoch, a date, a datetime
# or a tuple/list
elif len(args) == 1:
if isinstance(args[0], Epoch):
t = args[0]
elif isinstance(args[0], (tuple, list)):
if len(args[0]) >= 3:
t = Epoch(args[0][0], args[0][1], args[0][2], **kwargs)
else:
raise ValueError("Invalid input")
elif isinstance(args[0], datetime.datetime) | |
<gh_stars>0
#! /usr/bin/env python
# 22 October 2010
# <NAME>
# <EMAIL>
#
# Neuromorphometrics, Inc.
# 22 Westminster Street
# Somerville, MA 02144-1630 USA
#
# http://neuromorphometrics.com
#
#*********************************************************************
#
# (c) Copyright 2010 Neuromorphometrics, Inc. All rights reserved
#
#*********************************************************************
import sys
import string
import os
import re
import array
import math
import glob
import datetime
import numpy as np
from optparse import OptionParser
from xml.dom import minidom
usage = "%prog [options] inFile.eps outFile.eps\n\
\n\
This program identifies and recolors items in an eps file.\n\
\n\
Examples:\n\
%prog -p --color 'RRR GGG BBB' inFile.eps preparedFile.eps \n\
%prog --mapFile labelFileName.xml preparedFile.eps finalFile.eps"
parser = OptionParser(usage)
parser.add_option("-p", "--prepare",
action="store_true", dest="doPrepare", default=False,
help="prepare the file for coloring "
"[default: %default]")
parser.add_option("-c", "--color",
action="store", type="string", dest="findMeColor",
default='255 255 0',
help="color used to identify colored items "
"[default: %default]")
parser.add_option("-m", "--mapFile",
action="store", type="string", dest="mapFileName",
default='parcLabels.xml',
help="Name to color mapping file "
"[default: %default]")
parser.add_option("-d", "--csvFile",
action="store", type="string", dest="csvFileName",
default='GMM_matrix.csv',
help="Name to CSV matrix file of the variance for EBM, GMM or the 3 cluster matrices for Dirichet processes"
"[default: %default]")
parser.add_option("-o", "--overrideCount",
action="store", type="int", dest="overrideCount",
default='0',
help="Override number of colors found (for testing) "
"[default: %default]")
parser.add_option("-v", action="store_true", dest="verbose", default=False,
help="say what is going on "
"[default: %default]")
parser.add_option("-q", action="store_false", dest="verbose",
help="don't say what is going on")
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("2 arguments are required")
# Get required arguments
inFileName = args[0]
outFileName = args[1]
if options.verbose:
print "inFileName = %s" % inFileName
print "outFileName = %s" % outFileName
print "color = \"%s\"" % options.findMeColor
print "mapFile = %s" % options.mapFileName
# This program takes an eps file and can do one of two things:
# 1) Prepare an eps file to be recolored by identifying the items in
# the eps file that are colored, or
# 2) Recolor a prepared eps file given a color mapping file.
#
# To prepare an eps file, first "eps2eps" is run on it to remove the
# cruft. Then the file is written out for each colored item found with
# that colored item set to a given color (yellow by default), opened
# for viewing, and then asks for an identifier for that colored item.
# Once all desired colored items are identified, the eps file is written
# out with a comment indicating the identifier for each color.
#
# To recolor the prepared eps file, a mapping file is read that gives
# the color for each of the identifiers that might be found in the
# eps file comments. The mapping file is XML like this:
#
# <?xml version="1.0"?>
# <LabelList>
# <Label>
# <Name>Unlabeled</Name>
# <Number>1</Number>
# <RGBColor>0 0 0</RGBColor>
# </Label>
# ...
# <Label>
# <Name><NAME></Name>
# <Number>74</Number>
# <RGBColor>236 217 151</RGBColor>
# </Label>
# </LabelList>
#
# The <Number> tag is not used here, but it is used by NVM!
#
# After reading the mapping file, the eps file is read and the colors
# in the mapping file are substituted for the original colors for each
# of the identifiers, and then the final recolored eps file is written
# out.
labelList = [ ]
labelNums = [ ]
#RED = (255, 0, 0)
#YELLOW = (255 255 0)
GRAY = '128 128 128'
# stages 6,12,18,24,36
ABNORMAL_ORDER = [170, 171, 132, 133, 154, 155, 106, 107, 200, 201, 144, 145, 122, 123, 180, 181, 202, 203, 152, 153, 102, 103, 118, 119, 172, 173, 166, 167, 190, 191, 160, 161, 128, 129, 168, 169, 142, 143, 198, 199, 195, 196, 184, 185]
stageIndexAbnormal = [0] + [ABNORMAL_ORDER.index(x) + 1 for x in [133, 133, 181, 169 ]]
STAGE_NR_LABELS = [6,12,18,24,36]
# stage values start from 0: ABETA142 = 0, PTAU181P = 1, TAU = 2, ..
NUMS_TO_EVENT = {170:6, 171:6, 132:11, 133:11, 154:18, 155:18, 106:19, 107:19, 200:20, 201:20, 144:21, 145:21, 122:22, 123:22, 180:23, 181:23, 202:24, 203:24, 152:26, 153:26, 102:27, 103:27, 118:28, 119:28, 172:30, 173:30, 166:31, 167:31, 190:32, 191:32, 160:33, 161:33, 128:34, 129:34, 168:35, 169:35, 142:36, 143:36, 198:37, 199:37, 195:39, 196:39, 184:40, 185:40}
CSV_MATRICES = ['EBM_matrix.csv', 'GMM_matrix.csv', 'cluster1_matrix.csv', 'cluster2_matrix.csv', 'cluster3_matrix.csv']
OUT_FOLDERS = [ "ordering_figures_final/images/%s" % x.split("_")[0] for x in CSV_MATRICES]
# 1st_ball=ABETA(event at index 0) 2nd_ball=PTAU(index 0) ... 7th_ball=FDG(index 12)
BALL_ABN_IND = [0,1,2,3,4,5,12]
NR_STAGES = len(stageIndexAbnormal)
NR_MATRICES = len(CSV_MATRICES)
NR_EVENTS = 42
NR_BALLS = 7
def getInterpolatedColor(abn_level):
# abn_level = 0 -> yellow
# abn_level = 1 -> red
print "abn_level: %f" % abn_level
x = int((1-abn_level)*255)
print "x:%f" % x
return "255 %d 0" % x
def create_images():
if os.access(options.mapFileName,os.F_OK): # then file exists
# Read color map file
xmldoc = minidom.parse(options.mapFileName)
xmlLabelList = xmldoc.getElementsByTagName("Label")
for xmlLabel in xmlLabelList:
name = xmlLabel.getElementsByTagName("Name")
labelList.append(name[0].firstChild.data)
uNumber = xmlLabel.getElementsByTagName("Number")
number = int(uNumber[0].firstChild.data)
labelNums.append(number)
eventsAbnormalityAll = np.zeros([NR_MATRICES, NR_STAGES, NR_EVENTS], float)
#for matrixName,outFolder in zip([CSV_MATRICES[0]], [OUT_FOLDERS[0]]):
for matrixIndex in range(len(CSV_MATRICES)):
matrixName = CSV_MATRICES[matrixIndex]
outFolder = OUT_FOLDERS[matrixIndex]
matrix = np.loadtxt(open(matrixName,"rb"),delimiter=",")
for stageIndex in range(NR_STAGES):
#print matrix
# for each event get the sum of all the probabilities until the current stage
eventsAbnormality = np.sum(matrix[:,:STAGE_NR_LABELS[stageIndex]],1)
print eventsAbnormality
assert(len(eventsAbnormality) == NR_EVENTS)
eventsAbnormalityAll[matrixIndex, stageIndex, :] = eventsAbnormality
labelColors = []
redNums = ABNORMAL_ORDER[:stageIndexAbnormal[stageIndex]]
yellowNums = set(ABNORMAL_ORDER) - set(redNums)
for labelNum in labelNums:
if labelNum in ABNORMAL_ORDER:
color = getInterpolatedColor(eventsAbnormality[NUMS_TO_EVENT[labelNum]])
print color
labelColors.append(color)
else:
labelColors.append(GRAY)
print "\nColoring file %s using matrix %s, stage %d ..." % (inFileName, matrixName, stageIndex)
# Read input file and write ouput, changing the colors after
# finding '% recoloreps LABELSTRING' comments
ff = open(inFileName,'r')
contents = ff.read()
ff.close()
contentLines = contents.split('\n')
# open output file for writing
newOutFileName = "%s/stage_%d.eps" % (outFolder, STAGE_NR_LABELS[stageIndex])
print newOutFileName
of = open(newOutFileName,'w')
#of = open(os.devnull,'w')
skipNextLine = False
for line in contentLines:
if skipNextLine == False:
h = re.compile('% recoloreps .*')
hS = h.search(line)
if hS: # if this is a color comment line
if options.verbose:
print 'Found color comment: ', line
of.write(line+'\n')
toFind = line[13:]
if toFind in labelList:
if options.verbose:
print 'looking for color for ', toFind
index = labelList.index(toFind)
if options.verbose:
print 'index is ', index
color = labelColors[index]
if options.verbose:
print 'writing color: ', color
of.write(color+' rG\n')
skipNextLine = True
else:
if options.verbose:
print toFind + ' is not in labelList\n'
else: # something other than color, print it out
of.write(line+'\n')
else:
if options.verbose:
print 'skipped actual color line\n'
skipNextLine = False
of.close()
# --------------------------------------------------------------------
#
if options.verbose:
print "All done, bye."
print eventsAbnormalityAll
return eventsAbnormalityAll
#sys.exit()
def create_latex(eventsAbnormalityAll, out_fold_prefix = ""):
text = r'''
\documentclass[11pt,a4paper,oneside]{report}
\usepackage{float}
\usepackage{tikz}
\usetikzlibrary{plotmarks}
\usepackage{amsmath,graphicx}
\usepackage{epstopdf}
\usepackage[font=normal,labelfont=bf]{caption}
\usepackage{subcaption}
\usepackage{color}
\usepackage[T1]{fontenc}
\usepackage{lmodern}
\usepackage{scalefnt}
% margin size
\usepackage[margin=1in]{geometry}
% tikz settings
\tikzstyle{state}=[circle,thick,draw=black, align=center, minimum size=2.1cm,
inner sep=0]
\tikzstyle{vertex}=[circle,thick,draw=black]
\tikzstyle{terminal}=[rectangle,thick,draw=black]
\tikzstyle{edge} = [draw,thick]
\tikzstyle{lo} = [edge,dotted]
\tikzstyle{hi} = [edge]
\tikzstyle{trans} = [edge,->]
\begin{document}
\belowdisplayskip=12pt plus 3pt minus 9pt
\belowdisplayshortskip=7pt plus 3pt minus 4pt
% scale parameter for the circles and the gradient
\tikzset{every picture/.append style={scale=0.6}}
% scale parameter for the upper and lower small brain images
\newcommand*{\scaleBrainImg}{0.3}
%col{x}{y}{z} respresents the color for ball z from matrix x at stage y (matrix x, stage y, ball z)
'''
for matrixIndex in range(NR_MATRICES):
for stageIndex in range(NR_STAGES):
for ballIndex in range(NR_BALLS):
# abnormality=0 -> yellow (greenComponent=1); abnormality=1 -> red (greenComponent=0)
greenVal = 1 - eventsAbnormalityAll[matrixIndex, stageIndex, BALL_ABN_IND[ballIndex]]
text += r'''
\definecolor{col''' + "%d%d%d%s" % (matrixIndex, stageIndex, ballIndex, out_fold_prefix) + '''}{rgb}{1,''' + "%.3f" % greenVal + ''',0}'''
text += '''\n\n '''
for matrixIndex in range(NR_MATRICES):
text += r'''
\begin{figure}[H]
\centering'''
for stageIndex in range(NR_STAGES):
text += r'''
%\begin{subfigure}[b]{0.15\textwidth}
\begin{tikzpicture}[scale=1.0,auto,swap]
% the two brain figures on top
\node (upper_brain) at (0,1.5) { \includegraphics*[scale=\scaleBrainImg,trim=0 0 240 0]{'''
text += "images/%s%s/stage_%d.eps" % (CSV_MATRICES[matrixIndex].split("_")[0], out_fold_prefix, STAGE_NR_LABELS[stageIndex]) + r'''}};
\node (lower_brain) at (0,-1.5) { \includegraphics*[scale=\scaleBrainImg,trim=240 0 0 0]{'''
text += "images/%s%s/stage_%d.eps" % (CSV_MATRICES[matrixIndex].split("_")[0], out_fold_prefix, STAGE_NR_LABELS[stageIndex]) + r'''}};
% the 6 circles
\draw[fill=''' + "col%d%d0%s" % (matrixIndex, stageIndex, out_fold_prefix) + '''] (-1.6,-3.4) circle [radius=0.33cm] node {\scriptsize A};
\draw[fill=''' + "col%d%d1%s" % (matrixIndex, stageIndex, out_fold_prefix) + '''] (-0.7,-3.4) circle [radius=0.33cm] node {\scriptsize P};
\draw[fill=''' + "col%d%d2%s" % (matrixIndex, stageIndex, out_fold_prefix) + '''] (0.2,-3.4) circle [radius=0.33cm] node {\scriptsize T};
\draw[fill=''' + "col%d%d3%s" % (matrixIndex, stageIndex, out_fold_prefix) + '''] (-1.6,-4.2) circle [radius=0.33cm] node {\scriptsize C1};
\draw[fill=''' + "col%d%d4%s" % (matrixIndex, stageIndex, out_fold_prefix) + '''] (-0.7,-4.2) circle [radius=0.33cm] node {\scriptsize C2};
\draw[fill=''' + "col%d%d5%s" % (matrixIndex, stageIndex, out_fold_prefix) + '''] (0.2,-4.2) circle [radius=0.33cm] node {\scriptsize C3};
% the big circle on the right
\draw[fill=''' + "col%d%d6%s" % (matrixIndex, stageIndex, out_fold_prefix) + '''] (1.3,-3.8) circle [radius=0.6cm] node {\scriptsize FDG};
\end{tikzpicture}
%\end{subfigure}
% next subfigure
\hspace{-1.5em}
~'''
text += r'''
\hspace{1em}
% the red-to-yellow gradient on the right
\begin{tikzpicture}[scale=1.0,auto,swap]
\shade[top color=red,bottom color=yellow] (0,0) rectangle (0.5,6);
\node[inner sep=0] (corr_text) at (0.2,6.5) {1};
\node[inner sep=0] (corr_text) at (0.2,-0.5) {0};
\end{tikzpicture}
\caption{''' + CSV_MATRICES[matrixIndex].split("_")[0] +'''}
\end{figure}
'''
text += r'''
% scale parameter for the font size in | |
"Ice Caves"
elif theme == "olmec":
return "Olmec"
elif theme == "volcano":
return "Volcana"
elif theme == "surface":
return "Surface"
return "Unknown"
# Path to the background image that will be shown behind the grid.
def background_for_theme(self, theme):
def background_file(theme):
if theme == "cave":
return "bg_cave.png"
elif theme == "tidepool":
return "bg_tidepool.png"
elif theme == "babylon":
return "bg_babylon.png"
elif theme == "jungle":
return "bg_jungle.png"
elif theme == "temple":
return "bg_temple.png"
elif theme == "sunken":
return "bg_sunken.png"
elif theme == "gold":
return "bg_gold.png"
elif theme == "duat":
return "bg_temple.png"
elif theme == "eggplant":
return "bg_eggplant.png"
elif theme == "ice":
return "bg_ice.png"
elif theme == "olmec":
return "bg_stone.png"
elif theme == "volcano":
return "bg_volcano.png"
return "bg_cave.png"
return self.textures_dir / background_file(theme)
# List of tiles that will be included in the tile palette even if they do not already
# exist in the level file, since they are likely options to be used for the current theme.
@staticmethod
def suggested_tiles_for_theme(theme):
common_tiles = [
"floor",
"empty",
"floor%50",
"minewood_floor",
"floor_hard",
"floor_hard%50%floor",
"push_block",
"ladder",
"ladder_plat",
"entrance",
"exit",
"door",
"door2",
"door2_secret",
"locked_door",
"treasure",
"treasure_chest",
"treasure_vaultchest",
]
def theme_tiles(theme):
beehive_tiles = [
"beehive_floor",
"beehive_floor%50",
"honey_upwards",
"honey_downwards",
"bee",
]
if theme == "cave":
return [
"bone_block",
"platform",
"arrow_trap",
"totem_trap",
"spikes",
"snake",
"bat",
"skeleton",
"caveman",
"caveman_asleep",
"caveman_asleep%50",
"scorpion",
"mole",
"lizard",
"critter_dungbeetle",
"cookfire",
"turkey",
"yang",
"cavemanboss",
"autowalltorch",
"litwalltorch",
"ghist_shopkeeper",
"ghist_door2",
]
elif theme == "volcano":
return [
"powder_keg",
"timed_powder_keg",
"falling_platform",
"chain_ceiling",
"chainandblocks_ceiling",
"spikeball_trap",
"conveyorbelt_left",
"conveyorbelt_right",
"factory_generator",
"robot",
"imp",
"firebug",
"caveman",
"caveman_asleep",
"lavamander",
"bat",
"vampire",
"vlad",
"oldhunter",
"critter_snail",
"lava",
"vlad_floor",
"nonreplaceable_babylon_floor",
"drill",
"udjat_socket",
"slidingwall_switch",
"slidingwall_ceiling",
"crown_statue",
]
elif theme == "jungle":
return [
"stone_floor",
"vine",
"growable_vine",
"spikes",
"bush_block",
"thorn_vine",
"jungle_spear_trap",
"tree_base",
"cookfire",
"caveman",
"mantrap",
"witchdoctor",
"tikiman",
"mosquito",
"giant_spider",
"hangspider",
"bat",
"monkey",
"critter_butterfly",
"snap_trap",
] + beehive_tiles
elif theme == "olmec":
return [
"stone_floor",
"crate_parachute",
"storage_guy",
"storage_floor",
"autowalltorch",
"olmec",
"ankh",
"pillar",
"critter_crab",
"critter_locust",
]
elif theme == "tidepool":
return [
"pagoda_floor",
"pagoda_floor%50%floor",
"climbing_pole",
"growable_climbing_pole",
"pagoda_platform",
"spikes",
"bone_block",
"powder_keg",
"water",
"jiangshi",
"assassin",
"octopus",
"hermitcrab",
"crabman",
"flying_fish",
"critter_fish",
"critter_anchovy",
"critter_crab",
"giantclam",
"fountain_head",
"fountain_drain",
"slidingwall_switch",
"slidingwall_ceiling",
"minewood_floor",
"excalibur",
"excalibur_stone",
"haunted_corpse",
]
elif theme == "temple":
return [
"quicksand",
"temple_floor",
"temple_floor%50",
"pot",
"crushtrap",
"crushtraplarge",
"catmummy",
"cobra",
"crocman",
"anubis",
"mummy",
"sorceress",
"necromancer",
"critter_locust",
] + beehive_tiles
elif theme == "ice":
return [
"minewood_floor",
"icefloor",
"icefloor%50",
"spikes",
"upsidedown_spikes",
"falling_platform",
"thinice",
"spring_trap",
"forcefield",
"timed_forcefield",
"forcefield_top",
"litwalltorch",
"autowalltorch",
"cookfire",
"landmine",
"storage_guy",
"storage_floor",
"eggplant_altar",
"moai_statue",
"eggplant_child",
"mothership_floor",
"plasma_cannon",
"alienqueen",
"shopkeeper_vat",
"alien_generator",
"alien",
"ufo",
"yeti",
"empty_mech",
"critter_penguin",
"critter_firefly",
]
elif theme == "babylon":
return [
"babylon_floor",
"babylon_floor%50%floor",
"laser_trap",
"spark_trap",
"forcefield",
"timed_forcefield",
"forcefield_top",
"elevator",
"zoo_exhibit",
"litwalltorch",
"mushroom_base",
"lava",
"lava%50%floor",
"lamassu",
"olmite",
"ufo",
"empty_mech",
"critter_drone",
"ushabti",
"palace_floor",
"palace_entrance",
"palace_table",
"palace_table_tray",
"palace_chandelier",
"palace_candle",
"palace_bookcase",
"stone_floor",
"bone_block",
"madametusk",
"bodyguard",
]
elif theme == "sunken":
return [
"sunken_floor",
"sunken_floor%50",
"spikes",
"pipe",
"regenerating_block",
"bigspear_trap",
"bone_block",
"sticky_trap",
"storage_guy",
"storage_floor",
"autowalltorch",
"mother_statue",
"eggplant_door",
"giant_frog",
"guts_floor",
"water",
"frog",
"firefrog",
"tadpole",
"giantfly",
"critter_slime",
"skull_drop_trap",
"eggsac",
]
elif theme == "gold":
return [
"quicksand",
"cog_floor",
"cog_floor%50",
"crushtrap",
"crushtraplarge",
"slidingwall_switch",
"slidingwall_ceiling",
"crocman",
"leprechaun",
"mummy",
"cobra",
"sorceress",
"critter_locust",
]
elif theme == "duat":
return [
"duat_floor",
"duat_floor%50",
"chain_ceiling",
"lava",
"ammit%50",
"crocman",
"snake",
"cobra",
"osiris",
"anubis2",
]
elif theme == "eggplant":
return [
"pagoda_floor",
"pagoda_floor%50",
"pagoda_platform",
"slidingwall_switch",
"slidingwall_ceiling",
"fountain_head",
"fountain_drain",
"water",
"vine",
"growable_vine",
"jumpdog",
"minister",
"yama",
"empress_grave",
]
return []
return common_tiles + theme_tiles(theme)
# Shows an error dialog when attempting to open a level using an unrecognized template format.
def show_format_error_dialog(self, lvl):
def on_create():
# When a new format is created, try reading the level file again.
self.read_custom_lvl_file(lvl)
self.show_setroom_create_dialog(
"Couldn't find room templates",
"Create a new room template format to load this level file?\n{x} and {y} are the coordinates of the room.\n",
"Continue",
on_create,
)
# Popup dialog with widgets to create a new room template.
def show_setroom_create_dialog(self, title, message, button_title, button_action):
win = PopupWindow(title, self.modlunky_config)
message = ttk.Label(win, text=message)
name_label = ttk.Label(win, text="Name: ")
name_entry = ttk.Entry(win, foreground="gray")
format_label = ttk.Label(win, text="Format: ")
format_entry = ttk.Entry(win, foreground="gray")
win.columnconfigure(1, weight=1)
message.grid(row=0, column=0, columnspan=2, sticky="nswe")
name_label.grid(row=1, column=0, sticky="nse")
name_entry.grid(row=1, column=1, sticky="nswe")
format_label.grid(row=2, column=0, sticky="nse")
format_entry.grid(row=2, column=1, sticky="nswe")
name_entry.insert(0, "Optional")
format_entry.insert(0, "setroom{y}_{x}")
name_entry_changed = False
format_entry_changed = False
# If displaying a placeholder, delete the placeholder text and update the font color
# when the field is focused.
def focus_name(_):
nonlocal name_entry_changed
if name_entry_changed:
return
name_entry.delete("0", "end")
name_entry.config(foreground="black")
def focus_format(_):
nonlocal format_entry_changed
if format_entry_changed:
return
format_entry.delete("0", "end")
format_entry.config(foreground="black")
# When defocusing the field, if the field is empty, replace the text with the
# placeholder text and change the font color.
def defocus_name(_):
nonlocal name_entry_changed
if str(name_entry.get()) == "":
name_entry_changed = False
name_entry.insert(0, "Optional")
name_entry.config(foreground="gray")
else:
name_entry_changed = True
def defocus_format(_):
nonlocal format_entry_changed
if str(format_entry.get()) == "":
format_entry_changed = False
format_entry.insert(0, "setroom{y}_{x}")
format_entry.config(foreground="gray")
else:
format_entry_changed = True
name_entry.bind("<FocusIn>", focus_name)
name_entry.bind("<FocusOut>", defocus_name)
format_entry.bind("<FocusIn>", focus_format)
format_entry.bind("<FocusOut>", defocus_format)
# Checkbox to enable or disable vanilla setrooms for themes such as ice caves which
# crash without them.
add_vanilla_var = tk.IntVar()
add_vanilla_var.set(True)
add_vanilla_label = ttk.Label(win, text="Include vanilla setrooms:")
add_vanilla_check = ttk.Checkbutton(win, variable=add_vanilla_var)
add_vanilla_label.grid(row=3, column=0, sticky="nse")
add_vanilla_check.grid(row=3, column=1, sticky="nsw")
add_vanilla_tip = ttk.Label(
win,
text=(
"It is recommended to include vanilla setrooms.\n"
"This setting adds setrooms for some themes which require them.\n"
"There could be errors if not using this in some themes."
),
)
add_vanilla_tip.grid(row=4, column=0, columnspan=2, sticky="nswe")
win.rowconfigure(5, minsize=20)
buttons = ttk.Frame(win)
buttons.grid(row=6, column=0, columnspan=2, sticky="nswe")
buttons.columnconfigure(0, weight=1)
buttons.columnconfigure(1, weight=1)
def continue_open():
template_format = str(format_entry.get())
name = str(name_entry.get()) if name_entry_changed else template_format
if (
not format_entry_changed
or template_format == ""
or name == ""
or template_format == "setroom{y}-{x}"
or template_format == "setroom{x}-{y}"
):
return
save_format = CustomLevelSaveFormat(
name, template_format, bool(add_vanilla_var.get())
)
win.destroy()
self.add_save_format(save_format)
if button_action:
button_action()
continue_button = ttk.Button(buttons, text=button_title, command=continue_open)
continue_button.grid(row=0, column=0, sticky="nswe")
cancel_button = ttk.Button(buttons, text="Cancel", command=win.destroy)
cancel_button.grid(row=0, column=1, sticky="nswe")
# Create a new save format and save it to the config file to be loaded on future launches.
def add_save_format(self, save_format):
self.custom_save_formats.append(save_format)
self.add_save_format_radio(save_format, self.save_format_frame)
self.modlunky_config.custom_level_editor_custom_save_formats = list(
map(lambda save_format: save_format.to_json(), self.custom_save_formats)
)
self.modlunky_config.save()
# Updates the current radio button in the save format select options menu to the
# proper save format.
def update_save_format_variable(self, save_format):
if save_format in self.base_save_formats:
self.save_format_variable.set(self.base_save_formats.index(save_format))
elif save_format in self.custom_save_formats:
self.save_format_variable.set(
len(self.base_save_formats)
+ self.custom_save_formats.index(save_format)
)
self.save_format_radios[self.save_format_variable.get()].select()
# Adds a warning message below the save format radio list based on the selected
# save format.
def update_save_format_warning(self, save_format):
warning_message = ""
if save_format == CustomLevelSaveFormat.level_sequence():
warning_message = (
"This save format can be used to load saved level files into the "
"Custom Levels or Level Sequence packages.\n"
"(https://github.com/jaythebusinessgoose/LevelSequence)"
)
elif save_format == CustomLevelSaveFormat.vanilla():
warning_message = (
"WARNING: Files saved using vanilla setrooms will only work when loaded "
"into themes that use them. Otherwise, it will crash the game. Also, themes "
"that do allow loading vanilla setrooms will only load the required setrooms "
"for the default size of the level. It is recommended to use another save "
"format and use scripts to load the proper rooms."
)
elif not save_format.include_vanilla_setrooms:
warning_message = (
"WARNING: Some themes override the desired level with a vanilla setroom, so it "
"is recommended to use a save format that includes the correct vanilla setrooms."
)
self.save_format_warning_message["text"] = warning_message
# Update the current save format and update the UIs to represent this.
def set_current_save_format(self, save_format):
self.current_save_format = save_format
self.update_save_format_warning(save_format)
self.update_save_format_variable(save_format)
# Called when a save format radio button is selected.
def select_save_format_radio(self):
save_format_index = self.save_format_variable.get()
save_format = None
if save_format_index < len(self.base_save_formats):
save_format = self.base_save_formats[save_format_index]
else:
save_format = self.custom_save_formats[
save_format_index - len(self.base_save_formats)
]
if not save_format:
return
self.set_current_save_format(save_format)
self.default_save_format = save_format
self.modlunky_config.custom_level_editor_default_save_format = (
save_format.to_json()
)
self.modlunky_config.save()
def add_save_format_radio(self, save_format, save_format_frame):
index = len(self.save_format_radios)
radio = tk.Radiobutton(
save_format_frame,
text=save_format.name,
variable=self.save_format_variable,
indicatoron=True,
value=index,
command=self.select_save_format_radio,
)
radio.grid(column=0, row=index, sticky="nsw")
self.save_format_radios.append(radio)
label = tk.Label(save_format_frame, text=save_format.room_template_format)
label.grid(column=1, row=index, sticky="nsw")
# Updates the level size from the options menu.
def update_custom_level_size(self, width, height):
self.size_select_button["state"] = tk.DISABLED
if width == self.lvl_width and height == self.lvl_height:
return
# If the new level size is | |
** n_ * WC("b", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons52,
cons48,
cons228,
cons258,
cons40,
)
rule1318 = ReplacementRule(pattern1318, replacement1318)
pattern1319 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n2_ * WC("c", S(1))) ** WC("p", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons52,
cons19,
cons4,
cons52,
cons48,
cons259,
cons40,
)
rule1319 = ReplacementRule(pattern1319, replacement1319)
pattern1320 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** q_
* (a_ + x_ ** n2_ * WC("c", S(1)) + x_ ** n_ * WC("b", S(1))) ** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons5,
cons52,
cons48,
cons228,
cons258,
cons149,
)
rule1320 = ReplacementRule(pattern1320, replacement1320)
pattern1321 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n2_ * WC("c", S(1))) ** p_
* (d_ + x_ ** n_ * WC("e", S(1))) ** q_,
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons5,
cons52,
cons48,
cons259,
cons149,
)
rule1321 = ReplacementRule(pattern1321, replacement1321)
pattern1322 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** q_
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons48,
cons228,
cons466,
cons737,
cons398,
cons170,
)
rule1322 = ReplacementRule(pattern1322, replacement1322)
pattern1323 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** WC("p", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** q_,
x_,
),
cons2,
cons8,
cons29,
cons50,
cons48,
cons466,
cons737,
cons398,
cons170,
)
rule1323 = ReplacementRule(pattern1323, replacement1323)
pattern1324 = Pattern(
Integral(
x_ ** m_
* (d_ + x_ ** n_ * WC("e", S(1))) ** q_
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons48,
cons228,
cons466,
cons737,
cons398,
cons269,
)
rule1324 = ReplacementRule(pattern1324, replacement1324)
pattern1325 = Pattern(
Integral(
x_ ** m_
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** WC("p", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** q_,
x_,
),
cons2,
cons8,
cons29,
cons50,
cons48,
cons466,
cons737,
cons398,
cons269,
)
rule1325 = ReplacementRule(pattern1325, replacement1325)
pattern1326 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons52,
cons48,
cons228,
cons466,
cons738,
cons388,
cons739,
)
rule1326 = ReplacementRule(pattern1326, replacement1326)
pattern1327 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** WC("p", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons52,
cons48,
cons466,
cons738,
cons388,
cons739,
)
rule1327 = ReplacementRule(pattern1327, replacement1327)
pattern1328 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons52,
cons48,
cons466,
)
rule1328 = ReplacementRule(pattern1328, replacement1328)
pattern1329 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** WC("p", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons52,
cons48,
cons48,
cons466,
)
rule1329 = ReplacementRule(pattern1329, replacement1329)
pattern1330 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons5,
cons52,
cons48,
cons228,
cons150,
cons20,
CustomConstraint(With1330),
)
rule1330 = ReplacementRule(pattern1330, replacement1330)
pattern1331 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** p_
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons5,
cons52,
cons48,
cons150,
cons20,
CustomConstraint(With1331),
)
rule1331 = ReplacementRule(pattern1331, replacement1331)
pattern1332 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** m_
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons5,
cons52,
cons48,
cons228,
cons150,
cons369,
cons40,
)
rule1332 = ReplacementRule(pattern1332, With1332)
pattern1333 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** m_
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** p_
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons5,
cons52,
cons48,
cons150,
cons369,
cons40,
)
rule1333 = ReplacementRule(pattern1333, With1333)
pattern1334 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1)))
* (a_ + x_ ** n2_ * WC("c", S(1)) + x_ ** n_ * WC("b", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons48,
cons228,
cons150,
cons246,
cons165,
cons96,
cons740,
cons696,
)
rule1334 = ReplacementRule(pattern1334, replacement1334)
pattern1335 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n2_ * WC("c", S(1))) ** WC("p", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons48,
cons150,
cons246,
cons165,
cons96,
cons740,
cons696,
)
rule1335 = ReplacementRule(pattern1335, replacement1335)
pattern1336 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1)))
* (a_ + x_ ** n2_ * WC("c", S(1)) + x_ ** n_ * WC("b", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons48,
cons228,
cons150,
cons13,
cons165,
cons512,
cons741,
cons696,
)
rule1336 = ReplacementRule(pattern1336, replacement1336)
pattern1337 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n2_ * WC("c", S(1))) ** WC("p", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons48,
cons150,
cons13,
cons165,
cons512,
cons741,
cons696,
)
rule1337 = ReplacementRule(pattern1337, replacement1337)
pattern1338 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1)))
* (a_ + x_ ** n2_ * WC("c", S(1)) + x_ ** n_ * WC("b", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons48,
cons228,
cons150,
cons246,
cons139,
cons532,
cons696,
)
rule1338 = ReplacementRule(pattern1338, replacement1338)
pattern1339 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n2_ * WC("c", S(1))) ** WC("p", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons48,
cons150,
cons246,
cons139,
cons532,
cons696,
)
rule1339 = ReplacementRule(pattern1339, replacement1339)
pattern1340 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1)))
* (a_ + x_ ** n2_ * WC("c", S(1)) + x_ ** n_ * WC("b", S(1))) ** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons48,
cons228,
cons150,
cons13,
cons139,
cons696,
)
rule1340 = ReplacementRule(pattern1340, replacement1340)
pattern1341 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n2_ * WC("c", S(1))) ** p_
* (d_ + x_ ** n_ * WC("e", S(1))),
x_,
),
cons2,
| |
hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/36'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/33'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/48'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/49'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/50'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/1'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/2'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/3'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/4'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/15'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/17'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/5'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/6'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/7'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/8'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/9'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/10'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/11'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/12'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/1'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/2'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/1'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/2'], hostname=hn201)
metric_name = 'cisco_aci.fabric.port.egr_bytes.flood.cum'
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=3196.0, tags=tags101 + ['port:eth1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=3196.0, tags=tags101 + ['port:eth1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=1632.0, tags=tags101 + ['port:eth1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=3069384.0, tags=tags101 + ['port:eth1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=5334816.0, tags=tags101 + ['port:eth1/49'], hostname=hn101)
aggregator.assert_metric(metric_name, value=8725392.0, tags=tags101 + ['port:eth1/50'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/13'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/14'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/16'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/17'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/18'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/20'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/21'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/22'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/23'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/24'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/25'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/26'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/27'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/28'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/29'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/30'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/31'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/32'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/36'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/33'], hostname=hn102)
aggregator.assert_metric(metric_name, value=3067140.0, tags=tags102 + ['port:eth1/48'], hostname=hn102)
aggregator.assert_metric(metric_name, value=826.0, tags=tags102 + ['port:eth1/49'], hostname=hn102)
aggregator.assert_metric(metric_name, value=5323724.0, tags=tags102 + ['port:eth1/50'], hostname=hn102)
aggregator.assert_metric(metric_name, value=2992.0, tags=tags102 + ['port:eth1/1'], hostname=hn102)
aggregator.assert_metric(metric_name, value=2992.0, tags=tags102 + ['port:eth1/2'], hostname=hn102)
aggregator.assert_metric(metric_name, value=1428.0, tags=tags102 + ['port:eth1/3'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/4'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/15'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/5'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/6'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/7'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/8'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/9'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/10'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/11'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/12'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/1'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/2'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/1'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/2'], hostname=hn201)
metric_name = 'cisco_aci.fabric.port.ingr_bytes.unicast.cum'
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=348576910354.0, tags=tags101 + ['port:eth1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=261593756336.0, tags=tags101 + ['port:eth1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=365920898063.0, tags=tags101 + ['port:eth1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=184287764.0, tags=tags101 + ['port:eth1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=538835221922.0, tags=tags101 + ['port:eth1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=2750448258067.0, tags=tags101 + ['port:eth1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=40951035120.0, tags=tags101 + ['port:eth1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=2375501844518679.0, tags=tags101 + ['port:eth1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=861805170.0, tags=tags101 + ['port:eth1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=6542704613.0, tags=tags101 + ['port:eth1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=222953108982.0, tags=tags101 + ['port:eth1/49'], hostname=hn101)
aggregator.assert_metric(metric_name, value=212196336085.0, tags=tags101 + ['port:eth1/50'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/13'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/14'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/16'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/17'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/18'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/20'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/21'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/22'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/23'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/24'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/25'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/26'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/27'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/28'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/29'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/30'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/31'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/32'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/36'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/33'], hostname=hn102)
aggregator.assert_metric(metric_name, value=5719602980.0, tags=tags102 + ['port:eth1/48'], hostname=hn102)
aggregator.assert_metric(metric_name, value=309410271266.0, tags=tags102 + ['port:eth1/49'], hostname=hn102)
aggregator.assert_metric(metric_name, value=282668234330.0, tags=tags102 + ['port:eth1/50'], hostname=hn102)
aggregator.assert_metric(metric_name, value=163927500335.0, tags=tags102 + ['port:eth1/1'], hostname=hn102)
aggregator.assert_metric(metric_name, value=156170003449.0, tags=tags102 + ['port:eth1/2'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/3'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/4'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/15'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/5'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/6'], hostname=hn102)
aggregator.assert_metric(metric_name, value=484380566541.0, tags=tags102 + ['port:eth1/7'], hostname=hn102)
aggregator.assert_metric(metric_name, value=56517467206.0, tags=tags102 + ['port:eth1/8'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/9'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/10'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/11'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/12'], hostname=hn102)
aggregator.assert_metric(metric_name, value=837500850233.0, tags=tags202 + ['port:eth1/1'], hostname=hn202)
aggregator.assert_metric(metric_name, value=656825388437.0, tags=tags202 + ['port:eth1/2'], hostname=hn202)
aggregator.assert_metric(metric_name, value=812202557859.0, tags=tags201 + ['port:eth1/1'], hostname=hn201)
aggregator.assert_metric(metric_name, value=654650672906.0, tags=tags201 + ['port:eth1/2'], hostname=hn201)
metric_name = 'cisco_aci.fabric.port.egr_total.bytes.cum'
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=370712178855.0, tags=tags101 + ['port:eth1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=263367725436.0, tags=tags101 + | |
if arguments.get('limit') is None:
limit = 0
else:
limit = int(arguments.get('limit'))
try:
# Creates the full query
if len(sort) and offset:
entities = self.mongodb.mongoConn.Entities.find(
query, fields).skip(offset).sort(sort).limit(limit)
elif offset:
entities = self.mongodb.mongoConn.Entities.find(
query, fields).skip(offset).limit(limit)
elif len(sort):
entities = self.mongodb.mongoConn.Entities.find(
query, fields).sort(sort).limit(limit)
else:
entities= self.mongodb.mongoConn.Entities.find(
query, fields).limit(limit)
if count_opt:
# Sets count header
headers["Count"] = entities.count()
entities = list(entities)
if not len(entities):
self.helpers.logger.info(
self.program + " 404: " \
+ self.helpers.confs["errorMessages"][str(404)]["Description"])
return self.broker.respond(
404, self.helpers.confs["errorMessages"][str(404)],
{}, False, accepted)
else:
# Converts data to key -> value
if keyValues_opt:
newData = []
for i, entity in enumerate(entities):
dataHolder = {}
for attr in entity:
if isinstance(entity[attr], str):
dataHolder.update({attr: entity[attr]})
if isinstance(entity[attr], dict):
dataHolder.update({attr: entity[attr]["value"]})
if isinstance(entity[attr], list):
dataHolder.update({attr: entity[attr]})
newData.append(dataHolder)
entities = newData
# Converts data to values
elif values_opt:
newData = []
for i, entity in enumerate(entities):
dataHolder = []
for attr in entity:
if isinstance(entity[attr], str):
dataHolder.append(entity[attr])
if isinstance(entity[attr], dict):
dataHolder.append(entity[attr]["value"])
if isinstance(entity[attr], list):
dataHolder.append(entity[attr])
newData.append(dataHolder)
entities = newData
# Converts data to unique values
elif unique_opt:
newData = []
for i, entity in enumerate(entities):
dataHolder = []
for attr in entity:
if isinstance(entity[attr], str):
dataHolder.append(entity[attr])
if isinstance(entity[attr], dict):
dataHolder.append(entity[attr]["value"])
if isinstance(entity[attr], list):
dataHolder.append(entity[attr])
[newData.append(x) for x in dataHolder if x not in newData]
entities = newData
self.helpers.logger.info(
self.program + " 200: " \
+ self.helpers.confs["successMessage"][str(200)]["Description"])
return self.broker.respond(200, entities, headers,
False, accepted)
except Exception as e:
self.helpers.logger.info(
self.program + " 404: " \
+ self.helpers.confs["errorMessages"][str(404)]["Description"])
self.helpers.logger.info(str(e))
return self.broker.respond(404, self.helpers.confs["errorMessages"][str(404)],
{}, False, accepted)
def create_entity(self, data, accepted=[]):
""" Creates a new HIASCDI Entity.
References:
FIWARE-NGSI v2 Specification
https://fiware.github.io/specifications/ngsiv2/stable/
Reference
- Entities
- Create Entity
"""
if data["type"] not in self.mongodb.collextions:
data["type"] = "Thing"
_id = self.mongodb.mongoConn.Entities.insert(data)
if str(_id) is not False:
return self.broker.respond(
201, {}, {"Location": "v1/entities/" \
+ data["id"] + "?type=" + data["type"]},
False, accepted)
else:
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
def get_entity(self, typeof, _id, attrs, options, metadata,
attributes=False, accepted=[]):
""" Gets a specific HIASCDI Entity.
References:
FIWARE-NGSI v2 Specification
https://fiware.github.io/specifications/ngsiv2/stable/
Reference
- Entities
- Entity by ID
- Retrieve Entity / Retrieve Entity Attributes
"""
keyValues_opt = False
count_opt = False
values_opt = False
unique_opt = False
# Processes the options parameter
if options is not None:
options = options.split(",")
for option in options:
keyValues_opt = True if option == "keyValues" else keyValues_opt
values_opt = True if option == "values" else values_opt
unique_opt = True if option == "unique" else unique_opt
query = {'id': _id}
# Removes the MongoDB ID
fields = {
'_id': False
}
clear_builtin = False
attribs = []
if attrs is not None:
# Processes attrs parameter
attribs = attrs.split(",")
if '*' in attribs:
# Removes builtin attributes
if 'dateCreated' not in attribs:
fields.update({'dateCreated': False})
if 'dateModified' not in attribs:
fields.update({'dateModified': False})
if 'dateExpired' not in attribs:
fields.update({'dateExpired': False})
else:
clear_builtin = True
for attr in attribs:
fields.update({attr: True})
else:
fields.update({'dateCreated': False})
fields.update({'dateModified': False})
fields.update({'dateExpired': False})
mattribs = []
if metadata is not None:
# Processes metadata parameter
mattribs = metadata.split(",")
if '*' not in mattribs:
clear_builtin = True
for attr in mattribs:
fields.update({attr: True})
if typeof is not None:
query.update({"type": typeof})
entity = list(self.mongodb.mongoConn.Entities.find(query, fields))
if not entity:
self.helpers.logger.info(
self.program + " 404: " \
+ self.helpers.confs["errorMessages"][str(404)]["Description"])
return self.broker.respond(
404, self.helpers.confs["errorMessages"][str(404)],
{}, False, accepted)
elif len(entity) > 1:
self.helpers.logger.info(
self.program + " 409: " \
+ self.helpers.confs["errorMessages"][str(409)]["Description"])
return self.broker.respond(
409, self.helpers.confs["errorMessages"][str(409)],
{}, False, accepted)
else:
data = entity[0]
if keyValues_opt:
newData = {}
# Converts data to key -> value
for attr in list(data):
if isinstance(data[attr], str):
newData.update({attr: data[attr]})
if isinstance(data[attr], dict):
newData.update({attr: data[attr]["value"]})
if isinstance(data[attr], list):
newData.update({attr: data[attr]})
data = newData
elif values_opt:
newData = []
# Converts data to values
for attr in list(data):
if isinstance(data[attr], str):
newData.append(data[attr])
if isinstance(data[attr], dict):
newData.append(data[attr]["value"])
if isinstance(data[attr], list):
newData.append(data[attr])
data = newData
elif unique_opt:
newData = []
# Converts data to unique values
for attr in list(data):
if isinstance(data[attr], str):
newData.append(data[attr])
if isinstance(data[attr], dict):
newData.append(data[attr]["value"])
if isinstance(data[attr], list):
newData.append(data[attr])
data = []
[data.append(x) for x in newData if x not in data]
if clear_builtin:
# Clear builtin data
if "dateCreated" in data and 'dateCreated' not in attribs:
del data["dateCreated"]
if "dateModified" in data and 'dateModified' not in attribs:
del data["dateModified"]
if "dateExpired" in data and 'dateExpired' not in attribs:
del data["dateExpired"]
if attributes:
if "id" in data and 'id' not in attribs:
del data["id"]
if "type" in data and 'type' not in attribs:
del data["type"]
self.helpers.logger.info(
self.program + " 200: " \
+ self.helpers.confs["successMessage"][str(200)]["Description"])
return self.broker.respond(200, data, {},
False, accepted)
def update_entity_post(self, _id, typeof, data, options, accepted=[]):
""" Updates an HIASCDI Entity.
References:
FIWARE-NGSI v2 Specification
https://fiware.github.io/specifications/ngsiv2/stable/
Reference
- Entities
- Entity by ID
- Update or Append Entity Attributes
"""
#self.subscriptions.checkForSubscription(_id)
updated = False
error = False
_append = False
_keyValues = False
if "id" in data:
del data['id']
if "type" in data:
del data['type']
if options is not None:
options = options.split(",")
for option in options:
_append = True if option == "append" else _append
_keyValues = True if option == "keyValues" else _keyValues
entity = list(self.mongodb.mongoConn.Entities.find(
{'id': _id}))
if _append:
for update in data:
if update in entity[0]:
error = True
else:
self.mongodb.mongoConn.Entities.update_one(
{"id" : _id},
{"$set": {update: data[update]}}, upsert=True)
updated = True
else:
for update in data:
self.mongodb.mongoConn.Entities.update_one(
{"id" : _id},
{"$set": {update: data[update]}}, upsert=True)
updated = True
if updated and error is False:
return self.broker.respond(
204, self.helpers.confs["successMessage"][str(204)],
{}, False, accepted)
else:
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
def update_entity_patch(self, _id, typeof, data, options, accepted=[]):
""" Updates an HIASCDI Entity.
References:
FIWARE-NGSI v2 Specification
https://fiware.github.io/specifications/ngsiv2/stable/
Reference
- Entities
- Entity by ID
- Update Existing Entity Attributes
"""
updated = False
error = False
if "id" in data:
del data['id']
if "type" in data:
del data['type']
_keyValues = False
if options is not None:
options = options.split(",")
for option in options:
_keyValues = True if option == "keyValues" else _keyValues
entity = list(self.mongodb.mongoConn.Entities.find(
{'id': _id}))
for update in data:
if update not in entity[0]:
error = True
else:
self.mongodb.mongoConn.Entities.update_one(
{"id" : _id},
{"$set": {update: data[update]}})
updated = True
if updated and error is False:
return self.broker.respond(
204, self.helpers.confs["successMessage"][str(204)],
{}, False, accepted)
else:
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
def update_entity_put(self, _id, typeof, data, options, accepted=[]):
""" Updates an HIASCDI Entity.
References:
FIWARE-NGSI v2 Specification
https://fiware.github.io/specifications/ngsiv2/stable/
Reference
- Entities
- Entity by ID
- Replace all entity attributes
"""
if "id" in data:
del data['id']
if "type" in data:
del data['type']
fields = {
'_id': False,
'id': False,
'type': False,
'dateCreated': False,
'dateModified': False,
'dateExpired': False
}
updated = False
_keyValues = False
if options is not None:
options = options.split(",")
for option in options:
_keyValues = True if option == "keyValues" else _keyValues
entity = list(self.mongodb.mongoConn.Entities.find(
{"id": _id}, fields))
for e in entity:
self.mongodb.mongoConn.Entities.update(
{"id": _id}, {'$unset': {e: ""}})
for update in data:
self.mongodb.mongoConn.Entities.update_one(
{"id" : _id},
{"$set": {update: data[update]}}, upsert=True)
updated = True
if updated:
return self.broker.respond(
204, self.helpers.confs["successMessage"][str(204)],
{}, False, accepted)
else:
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
def delete_entity(self, typeof, _id, accepted=[]):
""" Deletes an HIASCDI Entity.
References:
FIWARE-NGSI v2 Specification
https://fiware.github.io/specifications/ngsiv2/stable/
Reference
- Entities
- Entity by ID
- Remove entity
"""
if typeof in self.mongodb.collextions:
collection = self.mongodb.collextions[typeof]
else:
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
deleted = False
result = collection.delete_one({"id": _id})
if result.deleted_count == 1:
self.helpers.logger.info("Mongo data delete OK")
return self.broker.respond(204, {}, {},
False, accepted)
else:
self.helpers.logger.info("Mongo data delete FAILED")
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
def get_entity_attributes(self, typeof, _id, _attr, metadata, is_value=False, accepted=[]):
""" Gets a specific HIASCDI Entity Attribute.
References:
FIWARE-NGSI v2 Specification
https://fiware.github.io/specifications/ngsiv2/stable/
Reference
- Attribute
- Attribute by Entity ID
- Get Attribute Data
"""
query = {'id': _id}
# Removes the MongoDB ID
fields = {
'_id': False
}
mattribs = []
if metadata is not None:
# Processes metadata parameter
mattribs = metadata.split(",")
| |
import os.path
import parsers.variable_list_parser
# A lookup dictionary for setting up code generation rules.
# Key: Type of autocoded entity. Used to generate varibles, then list of code generation items
# First entry in list: Name of XML file suffix
# Second entry in list: List of suffixes of generated source files
# Third entry in list: List of suffixes of generated header files
# Fifth entry in list: make rule for generating code
xml_gen_dictionary = {
'Serializable':
[
'SerializableAi.xml',
['SerializableAc.cpp',],
['SerializableAc.hpp',],
'\tcd $(BUILD_ROOT)/<module_dir> && $(MKDIR) $(BUILD_ROOT)/<module_dir>/$(AC_DEP_DIR) && $(AC_SERIALIZABLE_GEN) $(notdir $<) $(DEP_FILE_ARG) $(BUILD_ROOT)/<module_dir>/$(AC_DEP_DIR)/$(basename $(notdir $<)).dep'
],
'Port':
[
'PortAi.xml',
['PortAc.cpp',],
['PortAc.hpp',],
'\tcd $(BUILD_ROOT)/<module_dir> && $(MKDIR) $(BUILD_ROOT)/<module_dir>/$(AC_DEP_DIR) && $(AC_INTERFACE_GEN) $(notdir $<) $(DEP_FILE_ARG) $(BUILD_ROOT)/<module_dir>/$(AC_DEP_DIR)/$(basename $(notdir $<)).dep'
],
'Component':
[
'ComponentAi.xml',
['ComponentAc.cpp',],
['ComponentAc.hpp',],
'\tcd $(BUILD_ROOT)/<module_dir> && $(MKDIR) $(BUILD_ROOT)/<module_dir>/$(AC_DEP_DIR) && $(AC_COMPONENT_GEN) $(notdir $<) $(DEP_FILE_ARG) $(BUILD_ROOT)/<module_dir>/$(AC_DEP_DIR)/$(basename $(notdir $<)).dep'
],
'App':
[
'AppAi.xml',
['AppAc.cpp',],
['AppAc.hpp',],
'\tcd $(BUILD_ROOT)/<module_dir> && $(MKDIR) $(BUILD_ROOT)/<module_dir>/$(AC_DEP_DIR) && $(AC_TOPOLOGY_GEN) $(notdir $<) $(DEP_FILE_ARG) $(BUILD_ROOT)/<module_dir>/$(AC_DEP_DIR)/$(basename $(notdir $<)).dep'
],
}
# Some variable assignments to make reading the code easier
xml_source_file_list_entry = 0
xml_cpp_file_list_entry = 1
xml_hpp_file_list_entry = 2
xml_build_rule_list_entry = 3
lib_target = """
$(BUILD_ROOT)/<module_dir>/$(OUTPUT_DIR)/$(LIB_PREFIX)<module_name>$(LIB_SUFFIX): $(OBJS_<module_name>_$(BUILD)) $(OBJS_AC_<module_name>)
$(MKDIR) $(@D)
echo "typedef int dummy;"> $(dir $@)empty.c
$(CC) $(CFLAGS) $(COMPILE_ONLY) $(COMPILE_TO) $(dir $@)empty$(OBJ_SUFFIX) $(dir $@)empty.c
$(LINK_LIB) $(LINK_LIB_FLAGS) $(LIBRARY_TO) $@ $^ $(dir $@)empty$(OBJ_SUFFIX)
$(RM) $(dir $@)empty.c $(dir $@)empty$(OBJ_SUFFIX)
$(POST_LINK_LIB) $(POST_LINK_LIB_ARGS) $@
# this is hard-coded for LARS NCSL. Fix in mk/src/parsers/mod_mk_parser.py.
<module_name>_sloc:
@$(MKDIR) $(BUILD_ROOT)/<module_dir>/$(OUTPUT_DIR)
@$(SLOC_COUNTER) $(SRC_<module_name>_$(BUILD)) $(HDR_<module_name>_$(BUILD)) > $(BUILD_ROOT)/<module_dir>/$(OUTPUT_DIR)/written_sloc.txt
@$(SLOC_COUNTER) $(SRC_AC_<module_name>) $(HDR_AC_<module_name>) > $(BUILD_ROOT)/<module_dir>/$(OUTPUT_DIR)/ac_sloc.txt
@$(SLOC_COUNTER) $(SRC_XML_AC_<module_name>) > $(BUILD_ROOT)/<module_dir>/$(OUTPUT_DIR)/xml_sloc.txt
<module_name>_sloc_dump:
@echo "XML files:"
@$(CAT) $(BUILD_ROOT)/<module_dir>/$(OUTPUT_DIR)/xml_sloc.txt
@echo "Code-generated files:"
@$(CAT) $(BUILD_ROOT)/<module_dir>/$(OUTPUT_DIR)/ac_sloc.txt
@echo "Handcoded files:"
@$(CAT) $(BUILD_ROOT)/<module_dir>/$(OUTPUT_DIR)/written_sloc.txt
"""
test_bin_target = """
$(BUILD_ROOT)/<module_dir>/$(OUTPUT_DIR)/$(BIN_PREFIX)test_<last_dir>$(BIN_SUFFIX): $(TEST_MODS_LIBS_<module_name>_$(BUILD)) $(TEST_OBJS_<module_name>_$(BUILD))
$(MKDIR) $(@D)
$(LINK_LIB) $(LINK_LIB_FLAGS) $(LIBRARY_TO) $(basename $@)_test_lib$(LIB_SUFFIX) $(TEST_OBJS_<module_name>_$(BUILD))
ifdef CHECK_LINK_BIN
@echo Checking for missing symbols
$(CHECK_LINK_BIN) $(LINK_BIN_TO) $@$(CHECK_LINK_BIN_NAME) $(basename $@)_test_lib$(LIB_SUFFIX) $(TEST_MODS_LIBS_<module_name>_$(BUILD)) $(TEST_LIBS_<module_name>_$(BUILD)) $(LINK_LIBS) $(CHECK_LINK_BIN_SRC)
endif
$(LINK_BIN) $(LINK_BIN_FLAGS) $(LINK_BIN_TO) $@ $(LINK_BIN_PRE_LIB_FLAGS) $(LIBS_START) $(basename $@)_test_lib$(LIB_SUFFIX) $(TEST_MODS_LIBS_<module_name>_$(BUILD)) $(TEST_LIBS_<module_name>_$(BUILD)) $(EXTRA_LIBS_<module_name>_$(BUILD)) $(LINK_LIBS) $(LIBS_END) $(LINK_BIN_POST_LIB_FLAGS)
# clean unit test binary
test_<module_name>_clean: $(foreach module,$(TEST_MODS_<module_name>_$(BUILD)),$(module)_bin_clean)
$(RM_DIR) $(TEST_OBJS_<module_dir>_$(BUILD)) $(BUILD_ROOT)/<module_dir>/$(OUTPUT_DIR)
"""
clean_target = """
<module_name>_clean: <module_name>_ac_clean <module_name>_bin_clean
<module_name>_ac_clean:
$(RM) $(SRC_AC_<module_name>) $(HDR_AC_<module_name>)
$(RM_DIR) $(BUILD_ROOT)/<module_dir>/$(AC_DEP_DIR)
$(RM_DIR) $(BUILD_ROOT)/<module_dir>/$(DICT_MODULE_SUBDIR)
$(RM_DIR) $(BUILD_ROOT)/<module_dir>/$(AMPCS_DICT_MODULE_SUBDIR)
<module_name>_bin_clean:
$(RM) $(OBJS_<module_name>_$(BUILD)) $(OBJS_AC_<module_name>)
$(RM_DIR) $(BUILD_ROOT)/<module_dir>/$(OUTPUT_DIR) $(BUILD_ROOT)/<module_dir>/*/$(OUTPUT_DIR)
"""
dox_targets = """
gen_dox_<module_name>:
cd $(BUILD_ROOT)/<module_dir> && $(DOXYGEN)
show_dox_<module_name>:
$(BROWSER) $(BUILD_ROOT)/<module_dir>/dox/index.html
clean_dox_<module_name>:
$(RM_DIR) $(BUILD_ROOT)/<module_dir>/dox
<module_name>_sdd: $(BUILD_ROOT)/<module_dir>/docs/sdd.html
$(BUILD_ROOT)/<module_dir>/docs/sdd.html: $(wildcard $(BUILD_ROOT)/<module_dir>/docs/sdd.md)
ifneq "$(wildcard $(BUILD_ROOT)/<module_dir>/docs/sdd.md)" ""
@echo "Generating $@ from $<"
@$(MKDIR) $(dir $@)
@$(CAT) $(BUILD_ROOT)/mk/docs/SDD.css > $@
-@$(MARKDOWN) $< >> $@
endif
<module_name>_sdd_clean:
@$(RM) $(BUILD_ROOT)/<module_dir>/docs/sdd.html
"""
comp_helper_targets = """
<module_name>_testcomp:
cd $(BUILD_ROOT)/<module_dir> && $(AC_TEST_COMPONENT_GEN) $(SRC_XML_AC_COMPONENT_<module_name>)
<module_name>_impl:
cd $(BUILD_ROOT)/<module_dir> && $(AC_IMPL_GEN) $(SRC_XML_AC_COMPONENT_<module_name>)
"""
class CfgParseError(Exception):
def __init__(self, error):
self.error = error
def getErr(self):
return self.error
mod_make_cfg = "mod.mk"
class ModMkParser:
def __init__(self, module, directory, isModule):
self.source_dictionary = {} # holds source files by target
self.hdr_dictionary = {} # holds header files by target
self.test_source_dictionary = {} # holds test source by target
self.xml_dictionary = {} # holds xml files
self.defines_dictionary = {} # holds extra defines by target
self.post_defines_dictionary = {} # holds extra defines by target
self.extra_libs_dictionary = {} # holds extra libs by target
self.extra_inc_dirs_dictionary = {} # holds extra include directories by target
self.test_mods_dictionary = {} # hold module libraries to link test binary to
self.test_libs_dictionary = {} # hold libraries to link test binary
self.ac_extra_list = [] # holds extra AC files
self.subdir_parser_list = [] # holds a collection of parsers for each subdirectory
self.module_name = module
self.module_local = os.path.split(directory)[1] # name of module local to directory; for finding files with module name as prefix
# directory string is used for variable/target generation
self.directory_string = directory.replace("/","_")
self.directory = directory
self.isModule = isModule # indicates whether this represents a module in the make system
self.build_targets = ["",]
# read build_targets
build_file_parser = parsers.variable_list_parser.VariableListParser(os.environ["BUILD_ROOT"] + "/mk/configs/builds/builds.mk",":=")
for build in build_file_parser.getValList("BUILDS"):
self.build_targets += "_%s"%(build),
# default to empty source lists so missing variables
# are taken care of when makefile is generated.
for target in self.build_targets:
self.source_dictionary[target] = []
self.hdr_dictionary[target] = []
self.test_source_dictionary[target] = []
self.test_libs_dictionary[target] = []
self.test_mods_dictionary[target] = []
self.defines_dictionary[target] = ""
self.post_defines_dictionary[target] = ""
self.extra_libs_dictionary[target] = ""
self.extra_inc_dirs_dictionary[target] = ""
for xml_type in list(xml_gen_dictionary.keys()):
self.xml_dictionary[xml_type] = []
# stored for debugging printout
self.subdir_list = []
if "PARSER_VERBOSE" in os.environ:
print(("Analyzing directory " + directory))
# check for file
self.mod_file_name = directory + "/" + mod_make_cfg
if not os.path.isfile(self.mod_file_name):
raise CfgParseError("File %s does not exist." % self.mod_file_name)
# read in file
file_lines = open(self.mod_file_name).readlines()
# for each line, search for variables.
line_number = 1
line = ""
for curr_line in file_lines:
# chop off comments (# character to end of page)
# print("Processing line: " + line)
comment_loc = curr_line.find("#")
if comment_loc != -1:
curr_line = curr_line[0:comment_loc]
# strip off remaining whitespace
curr_line = curr_line.strip()
# check to see if empty line
#print("Line length: " + str(len(line)))
if len(curr_line) == 0:
#print ("skipping")
line_number += 1
continue
# look for continuation character
cont_loc = curr_line.find("\\")
if cont_loc != -1:
curr_line = curr_line[0:cont_loc]
curr_line = curr_line.strip()
line += curr_line + " " # the space is for white space between file names
line_number += 1
continue
else:
line += curr_line + " "
#print("Processed line: " + line)
# split at first equal sign
# make sure remaining text has "=" in it somewhere
# by searching for "="
if line.count("=") < 1:
raise CfgParseError("Invalid entry found in %s line %i" % (self.mod_file_name,line_number))
(var,value) = line.split("=",1)
var = var.strip()
value = value.strip()
# search for target specific variables
for target in self.build_targets:
starget = target
if var == "SRC" + starget:
# make sure remaining text is of form "var = val"
# by searching for "="
if line.count("=") != 1:
raise CfgParseError("Invalid entry found in %s line %i" % (self.mod_file_name,line_number))
sources = value.split(" ")
for source in sources:
if source == "":
continue
if not os.path.isfile(os.environ["BUILD_ROOT"] + "/" + directory + "/" + source):
raise CfgParseError("File %s/%s/%s not found." % (os.environ["BUILD_ROOT"],directory,source))
if source.count(".xml"):
source_found = False
for xml_type in list(xml_gen_dictionary.keys()):
if source.count(xml_type):
source_found = True
self.xml_dictionary[xml_type] += source,
if not source_found:
source_names = ""
for xml_type in list(xml_gen_dictionary.keys()):
source_names += "xx%sAi.xml "%xml_type
raise CfgParseError("File %s/%s/%s invalid. Should be one of \n\t%s" % (os.environ["BUILD_ROOT"],directory,source,source_names))
else:
self.source_dictionary[target] += source,
if var == "HDR" + starget:
# make sure remaining text is of form "var = val"
# by searching for "="
if line.count("=") != 1:
raise CfgParseError("Invalid entry found in %s line %i" % (self.mod_file_name,line_number))
headers = value.split(" ")
for header in headers:
if header == "":
continue
if not os.path.isfile(os.environ["BUILD_ROOT"] + "/" + directory + "/" + header):
raise CfgParseError("File %s/%s/%s not found." % (os.environ["BUILD_ROOT"],directory,header))
self.hdr_dictionary[target] += header,
if var == "TEST_SRC" + starget:
# make sure remaining text is of form "var = val"
# by searching for "="
if line.count("=") != 1:
raise CfgParseError("Invalid entry found in %s line %i" % (self.mod_file_name,line_number))
test_sources = value.split(" ")
for source in test_sources:
if source == "":
continue
if not os.path.isfile(os.environ["BUILD_ROOT"] + "/" + directory + "/" + source):
if not source.count("_ac_"): # Only disregard missing _AC_ files. For unit tests that want to include AC files that are not generated yet.
raise CfgParseError("File %s/%s/%s not found." % (os.environ["BUILD_ROOT"],directory,source))
self.test_source_dictionary[target] = test_sources
if var == "TEST_MODS" + starget:
if line.count("=") != 1:
raise CfgParseError("Invalid entry found in %s line %i" % (self.mod_file_name,line_number))
self.test_mods_dictionary[target] = value.split(" ")
if var == "TEST_LIBS" + starget:
if line.count("=") != 1:
raise CfgParseError("Invalid entry found in %s line %i" % (self.mod_file_name,line_number))
self.test_libs_dictionary[target] = value.split(" ")
if var == "COMPARGS" + starget:
self.defines_dictionary[target] = value
if var == "COMPARGS_POST" + starget:
self.post_defines_dictionary[target] = value
if var == "EXTRA_INC_DIRS" + starget:
self.extra_inc_dirs_dictionary[target] = value
if var == "EXTRA_LIBS" + starget:
self.extra_libs_dictionary[target] = value
# extra AC files
if var == "AC_EXTRA_FILES":
self.ac_extra_list = value.split(" ")
# search for subdirectories
if var == "SUBDIRS":
# make sure remaining text is of form "var = val"
# by searching for "="
if line.count("=") != 1:
raise CfgParseError("Invalid entry found in %s line %i" % (self.mod_file_name,line_number))
if value != "":
self.subdir_list = value.split(" ")
line_number += 1
# clear line accumulator
line = ""
# recurse into subdirectories
for subdir in self.subdir_list:
if subdir == "":
continue
self.subdir_parser_list.append(ModMkParser(self.module_name+subdir, directory+"/"+subdir,False))
def | |
from CommonServerPython import DemistoException
SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_CONTEXT = {
'submitted_id': 'id',
'state': 'created',
'created_timestamp': '2020-05-12T15:34:11Z',
'environment_id': 160,
'sha256': 'sha256'
}
SEND_URL_TO_SANDBOX_ANALYSIS_CONTEXT = {
'submitted_id': 'id',
'state': 'created',
'created_timestamp': '2020-05-12T16:40:52Z',
'environment_id': 160
}
GET_REPORT_SUMMARY_CONTEXT = {
'File(val.MD5 && val.MD5 == obj.MD5 || val.SHA1 && val.SHA1 == obj.SHA1 '
'|| val.SHA256 && val.SHA256 == obj.SHA256 || val.SHA512 && val.SHA512 == obj.SHA512 '
'|| val.CRC32 && val.CRC32 == obj.CRC32 || val.CTPH && val.CTPH == obj.CTPH'
' || val.SSDeep && val.SSDeep == obj.SSDeep)': [
{'SHA256': 'sha256', 'Company': 'Microsoft Corporation', 'ProductName': 'Microsoft Windows Operating System',
'Signature': {'Authentihash': '', 'Copyright': 'Microsoft Corporation. All rights reserved.',
'Description': 'Microsoft Smartcard Certificate Propagation Service',
'FileVersion': '10.0.19041.844 (WinBuild.160101.0800)', 'InternalName': 'certprop.dll',
'OriginalName': 'certprop.dll'},
'Hashes': [{'type': 'SHA256', 'value': 'sha256'}]
}
],
'DBotScore(val.Indicator && val.Indicator == obj.Indicator && val.Vendor == obj.Vendor && val.Type == obj.Type)': [
{'Indicator': 'sha256', 'Type': 'file', 'Vendor': '', 'Score': 2, 'Reliability': 'B - Usually reliable'}],
'csfalconx.resource(val.id && val.id == obj.id)': {
'environment_description': 'Windows 10 64 bit',
'environment_id': 160, 'sha256': 'sha256',
'submission_type': 'page_url',
'submit_url': 'hxxps://www.google.com', 'threat_score': 13,
'created_timestamp': '2020-03-16T17:04:48Z', 'id': 'id',
'ioc_report_broad_csv_artifact_id': 'ioc_report_broad_csv_artifact_id',
'ioc_report_broad_json_artifact_id': 'ioc_report_broad_json_artifact_id',
'ioc_report_broad_maec_artifact_id': 'ioc_report_broad_maec_artifact_id',
'ioc_report_broad_stix_artifact_id': 'ioc_report_broad_stix_artifact_id',
'ioc_report_strict_csv_artifact_id': 'ioc_report_strict_csv_artifact_id',
'ioc_report_strict_json_artifact_id': 'ioc_report_strict_json_artifact_id',
'ioc_report_strict_maec_artifact_id': 'ioc_report_strict_maec_artifact_id',
'ioc_report_strict_stix_artifact_id': 'ioc_report_strict_stix_artifact_id',
'verdict': 'suspicious'}}
GET_ANALYSIS_STATUS_CONTEXT = {
'csfalconx.resource(val.id && val.id == obj.id)':
{
'id': 'id',
'state': 'success',
'created_timestamp': '2020-03-16T17:04:48Z',
'environment_id': 160
}
}
CHECK_QUOTA_STATUS_CONTEXT = {
'csfalconx.resource(val.id && val.id == obj.id)':
{
'total': 100,
'used': 47,
'in_progress': 2
}
}
FIND_SANDBOX_REPORTS_CONTEXT = {
'csfalconx.resource(val.id && val.id == obj.id)':
{
'resources': ['resources1', 'resources2', 'resources3', 'resources4']
}
}
FIND_SANDBOX_REPORTS_HASH_CONTEXT = {
'csfalconx.resource(val.id && val.id == obj.id)': {
'resources': ['resources1', 'resources2', 'resources3', 'resources4'],
'FindReport': [{'sha256': 'hash1', 'reportIds': ['resources1', 'resources2', 'resources3', 'resources4']}]
}
}
FIND_SANDBOX_REPORTS_NOT_FOUND_HASH_CONTEXT = {
'csfalconx.resource(val.id && val.id == obj.id)': {
'resources': [],
'FindReport': [{'sha256': 'hash1', 'reportIds': []}]
}
}
FIND_SUBMISSION_ID_CONTEXT = {
'csfalconx.resource(val.id && val.id == obj.id)':
{
'resources': ['resources1', 'resources2', 'resources3', 'resources4']
}
}
GET_FULL_REPORT_CONTEXT_EXTENDED = {'environment_description': 'Windows 10 64 bit', 'environment_id': 160,
'sha256': 'sha256', 'submission_type': 'page_url',
'submit_url': 'hxxps://www.google.com', 'threat_score': 13,
'architecture': 'WINDOWS', 'classification': ['91.6% (.URL) Windows URL shortcut',
'8.3% (.INI) Generic INI configuration'],
'contacted_hosts': [{'address': '172.16.31.10',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428},
{'name': 'name.exe', 'pid': 9372}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '172.16.31.10',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428},
{'name': 'name.exe', 'pid': 9372}],
'country': 'United States', 'port': 80, 'protocol': 'TCP'},
{'address': '172.16.31.10',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '172.16.31.10',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '172.16.31.10',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '172.16.31.10',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '172.16.31.10',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'}],
'dns_requests': [{'address': '172.16.17.32', 'country': 'United States',
'domain': 'googleads.g.doubleclick.net',
'registrar_creation_timestamp': '1996-01-16T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'registrar_organization'},
{'address': '192.168.3.11', 'country': 'United States',
'domain': 'domain'},
{'address': '172.16.31.10', 'country': 'United States',
'domain': 'ssl.gstatic.com',
'registrar_creation_timestamp': '2008-02-11T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'Google Inc.'},
{'address': '192.168.127.12', 'country': 'United States',
'domain': 'www.gstatic.com',
'registrar_creation_timestamp': '2008-02-11T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'registrar_organization'}],
'http_requests': [
{'header': 'header', 'host': 'host', 'host_ip': '172.16.31.10', 'host_port': 80,
'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'host', 'host_ip': '172.16.31.10', 'host_port': 80,
'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '192.168.3.11',
'host_port': 80, 'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '192.168.3.11',
'host_port': 80, 'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '192.168.3.11',
'host_port': 80, 'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '192.168.3.11',
'host_port': 80, 'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '192.168.3.11',
'host_port': 80, 'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '192.168.3.11',
'host_port': 80, 'method': 'GET', 'url': 'url'}], 'incidents': [
{'details': ['Contacts 4 domains and 4 hosts'], 'name': 'Network Behavior'}], 'processes': [
{'command_line': 'command_line', 'icon_artifact_id': 'icon_artifact_id', 'name': 'rundll32.exe',
'normalized_path': 'normalized_path.exe', 'pid': 6648, 'process_flags': [{'name': 'Reduced Monitoring'}],
'sha256': 'sha256', 'uid': '00074182-00006648'}], 'screenshots_artifact_ids': ['screenshots_artifact_ids1',
'screenshots_artifact_ids2',
'screenshots_artifact_ids3',
'screenshots_artifact_ids4'],
'signatures': [
{'category': 'General', 'description': 'description', 'identifier': 'network-0',
'name': 'Contacts domains', 'origin': 'Network Traffic', 'relevance': 1,
'threat_level_human': 'informative', 'type': 7},
{'category': 'General', 'description': 'description', 'identifier': 'network-1',
'name': 'Contacts server', 'origin': 'Network Traffic', 'relevance': 1,
'threat_level_human': 'informative', 'type': 7},
{'category': 'Network Related', 'description': 'description',
'identifier': 'string-3', 'name': 'Found potential URL in binary/memory',
'origin': 'String', 'relevance': 10, 'threat_level_human': 'informative',
'type': 2}, {'category': 'External Systems', 'description': 'description',
'identifier': 'suricata-0', 'name': 'Detected Suricata Alert',
'origin': 'Suricata Alerts', 'relevance': 10,
'threat_level_human': 'informative', 'type': 18},
{'category': 'Ransomware/Banking', 'description': 'description',
'identifier': 'string-12',
'name': 'Detected text artifact in screenshot that indicate file could be ransomware',
'origin': 'String', 'relevance': 10, 'threat_level': 1,
'threat_level_human': 'suspicious', 'type': 2},
{'category': 'Network Related', 'description': 'description',
'identifier': 'network-23',
'name': 'Sends traffic on typical HTTP outbound port, but without HTTP header',
'origin': 'Network Traffic', 'relevance': 5, 'threat_level': 1,
'threat_level_human': 'suspicious', 'type': 7}],
'created_timestamp': '2020-03-16T17:04:48Z', 'id': 'id',
'ioc_report_broad_csv_artifact_id': 'ioc_report_broad_csv_artifact_id',
'ioc_report_broad_json_artifact_id': 'ioc_report_broad_json_artifact_id',
'ioc_report_broad_maec_artifact_id': 'ioc_report_broad_maec_artifact_id',
'ioc_report_broad_stix_artifact_id': 'ioc_report_broad_stix_artifact_id',
'ioc_report_strict_csv_artifact_id': 'ioc_report_strict_csv_artifact_id',
'ioc_report_strict_json_artifact_id': 'ioc_report_strict_json_artifact_id',
'ioc_report_strict_maec_artifact_id': 'ioc_report_strict_maec_artifact_id',
'ioc_report_strict_stix_artifact_id': 'ioc_report_strict_stix_artifact_id',
'verdict': 'no specific threat',
'sandbox': {'architecture': 'WINDOWS',
'classification': [
'91.6% (.URL) Windows URL shortcut',
'8.3% (.INI) Generic INI configuration'],
'contacted_hosts': [
{'address': '172.16.31.10',
'associated_runtime': [
{'name': 'name.exe', 'pid': 6428},
{'name': 'name.exe', 'pid': 9372}],
'country': 'United States',
'port': 443, 'protocol': 'TCP'},
{'address': '172.16.31.10',
'associated_runtime': [
{'name': 'name.exe', 'pid': 6428},
{'name': 'name.exe', 'pid': 9372}],
'country': 'United States',
'port': 80, 'protocol': 'TCP'},
{'address': '172.16.31.10',
'associated_runtime': [
{'name': 'name.exe', 'pid': 6428}],
'country': 'United States',
'port': 443, 'protocol': 'TCP'},
{'address': '172.16.31.10',
'associated_runtime': [
{'name': 'name.exe', 'pid': 6428}],
'country': 'United States',
'port': 443, 'protocol': 'TCP'},
{'address': '172.16.31.10',
'associated_runtime': [
{'name': 'name.exe', 'pid': 6428}],
'country': 'United States',
'port': 443, 'protocol': 'TCP'},
{'address': '172.16.31.10',
'associated_runtime': [
{'name': 'name.exe', 'pid': 6428}],
'country': 'United States',
'port': 443, 'protocol': 'TCP'},
{'address': '172.16.31.10', 'associated_runtime': [
{'name': 'name.exe',
'pid': 6428}],
'country': 'United States',
'port': 443, 'protocol': 'TCP'}],
'dns_requests': [
{'address': '172.16.17.32',
'country': 'United States',
'domain': 'googleads.g.doubleclick.net',
'registrar_creation_timestamp': '1996-01-16T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'registrar_organization'},
{'address': '192.168.3.11',
'country': 'United States',
'domain': 'domain'},
{'address': '172.16.31.10',
'country': 'United States',
'domain': 'ssl.gstatic.com',
'registrar_creation_timestamp': '2008-02-11T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'Google Inc.'},
{'address': '192.168.127.12',
'country': 'United States',
'domain': 'www.gstatic.com',
'registrar_creation_timestamp': '2008-02-11T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'registrar_organization'}],
'http_requests': [
{'header': 'header',
'host': 'host',
'host_ip': '172.16.31.10',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'host',
'host_ip': '172.16.31.10',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'ocsp.pki.goog',
'host_ip': '192.168.3.11',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'ocsp.pki.goog',
'host_ip': '192.168.3.11',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'ocsp.pki.goog',
'host_ip': '192.168.3.11',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'ocsp.pki.goog',
'host_ip': '192.168.3.11',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'ocsp.pki.goog',
'host_ip': '192.168.3.11',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'ocsp.pki.goog',
'host_ip': '192.168.3.11',
'host_port': 80,
'method': 'GET',
'url': 'url'}],
'incidents': [{'details': [
'Contacts 4 domains and 4 hosts'],
'name': 'Network Behavior'}],
'processes': [
{'command_line': 'command_line',
'icon_artifact_id': 'icon_artifact_id',
'name': 'rundll32.exe',
'normalized_path': 'normalized_path.exe',
'pid': 6648, 'process_flags': [{
'name': 'Reduced Monitoring'}],
'sha256': 'sha256',
'uid': '00074182-00006648'}],
'screenshots_artifact_ids': [
'screenshots_artifact_ids1',
'screenshots_artifact_ids2',
'screenshots_artifact_ids3',
'screenshots_artifact_ids4'],
'signatures': [{'category': 'General',
'description': 'description',
'identifier': 'network-0',
'name': 'Contacts domains',
'origin': 'Network Traffic',
'relevance': 1,
'threat_level_human': 'informative',
'type': 7},
{'category': 'General',
'description': 'description',
'identifier': 'network-1',
'name': 'Contacts server',
'origin': 'Network Traffic',
'relevance': 1,
'threat_level_human': 'informative',
'type': 7}, {
'category': 'Network Related',
'description': 'description',
'identifier': 'string-3',
'name': 'Found potential URL in binary/memory',
'origin': 'String',
'relevance': 10,
'threat_level_human': 'informative',
'type': 2}, {
'category': 'External Systems',
'description': 'description',
'identifier': 'suricata-0',
'name': 'Detected Suricata Alert',
'origin': 'Suricata Alerts',
'relevance': 10,
'threat_level_human': 'informative',
'type': 18}, {
'category': 'Ransomware/Banking',
'description': 'description',
'identifier': 'string-12',
'name': 'Detected text artifact in screenshot that indicate file could be ransomware',
'origin': 'String',
'relevance': 10,
'threat_level': 1,
'threat_level_human': 'suspicious',
'type': 2}, {
'category': 'Network Related',
'description': 'description',
'identifier': 'network-23',
'name': 'Sends traffic on typical HTTP outbound port, but without HTTP header',
'origin': 'Network Traffic',
'relevance': 5,
'threat_level': 1,
'threat_level_human': 'suspicious',
'type': 7}]}}
GET_FULL_REPORT_CONTEXT = {'environment_description': 'Windows 10 64 bit', 'environment_id': 160, 'sha256': 'sha256',
'submission_type': 'page_url', 'submit_url': 'hxxps://www.google.com', 'threat_score': 13,
'architecture': 'WINDOWS',
'classification': ['91.6% (.URL) Windows URL shortcut',
'8.3% (.INI) Generic INI configuration'],
'contacted_hosts': [{'address': '172.16.31.10',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428},
{'name': 'name.exe', 'pid': 9372}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '172.16.31.10',
'associated_runtime': [
{'name': 'name.exe',
'pid': 6428},
{'name': 'name.exe',
'pid': 9372}],
'country': 'United States',
'port': | |
# -*- coding: utf-8
"""Module for fluid property integration.
TESPy uses the CoolProp python interface for all fluid property functions. The
TESPyFluid class allows the creation of lookup table for custom fluid
mixtures.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location tespy/tools/fluid_properties.py
SPDX-License-Identifier: MIT
"""
import logging
import os
from collections import OrderedDict
import CoolProp as CP
import numpy as np
import pandas as pd
from CoolProp.CoolProp import PropsSI as CPPSI
from scipy import interpolate
from tespy.tools.global_vars import err
from tespy.tools.global_vars import gas_constants
from tespy.tools.global_vars import molar_masses
from tespy.tools.helpers import molar_mass_flow
from tespy.tools.helpers import newton
from tespy.tools.helpers import reverse_2d
from tespy.tools.helpers import reverse_2d_deriv
from tespy.tools.helpers import single_fluid
# %%
class TESPyFluid:
r"""
Allow the creation of lookup tables for specified mixtures.
The created fluid property lookup table adress an ideal mixture of real
fluids.
Parameters
----------
alias : str
Alias for the fluid. Please note: The alias of a TESPyFluid class
object will always start with :code:`TESPy::`. See the example for more
information!
fluid : dict
Fluid vector specifying the fluid composition of the TESPy fluid.
p_range : list/ndarray
Pressure range for the new fluid lookup table.
T_range : list/ndarray
Temperature range for the new fluid lookup table (optional).
path : str
Path to importing tespy fluid from.
Note
----
Creates lookup tables for
- enthalpy (h),
- entropy (s),
- density (d) and
- viscoity (visc)
from pressure and temperature. Additionally molar mass and gas constant
will be calculated. Inverse functions, e.g. entropy from pressure and
enthalpy are calculated via newton algorithm from these tables.
Example
-------
Create a custom fluid from specified composition within defined pressure
and temperature limits. We define dry air component wise as our custom
fluid.
>>> from tespy.tools.fluid_properties import (TESPyFluid, h_mix_pT,
... s_mix_pT, v_mix_pT, visc_mix_pT)
>>> from tespy.networks import Network
>>> import shutil
>>> fluidvec = {'N2': 0.7552, 'O2': 0.2314, 'CO2': 0.0005, 'Ar': 0.0129}
>>> p_arr = np.array([0.1, 10]) * 1e5
>>> myfluid = TESPyFluid('dry air', fluid=fluidvec, p_range=p_arr,
... T_range=[300, 1200])
Check if the fluid creation was successful and compare some fluid
properties to the CoolProp air implementation. We have to add the CoolProp
air implementation to the memorise class first. The relative deviation
should be very small (< 0.01), we check for enthalpy, volume, entropy and
viscosity. Specific volume and viscosity are absolute values, thus no
difference is calculated.
>>> Memorise.add_fluids({'air': 'HEOS'})
>>> type(myfluid)
<class 'tespy.tools.fluid_properties.TESPyFluid'>
>>> p = 3e5
>>> fluid_props = [0, p, 0, {myfluid.alias: 1}]
>>> T1 = 400
>>> T2 = 1000
>>> delta_h_tespy = h_mix_pT(fluid_props, T2) - h_mix_pT(fluid_props, T1)
>>> fluid_props_CP = [0, p, 0, {'air': 1}]
>>> delta_h = h_mix_pT(fluid_props_CP, T2) - h_mix_pT(fluid_props_CP, T1)
>>> round(abs(delta_h_tespy - delta_h) / delta_h, 2)
0.0
>>> v_tespy = v_mix_pT(fluid_props, T2)
>>> v = v_mix_pT(fluid_props_CP, T2)
>>> round(abs(v_tespy - v) / v, 2)
0.0
>>> s_tespy = s_mix_pT(fluid_props, T2) - s_mix_pT(fluid_props, T1)
>>> s = s_mix_pT(fluid_props_CP, T2) - s_mix_pT(fluid_props_CP, T1)
>>> round(abs(s_tespy - s) / s, 2)
0.0
>>> visc_tespy = visc_mix_pT(fluid_props, T2)
>>> visc = visc_mix_pT(fluid_props_CP, T2)
>>> round(abs(visc_tespy - visc) / visc, 2)
0.0
The fluid had been saved automatically, load it now.
>>> loadfluid = TESPyFluid('dry air', fluid=fluidvec, p_range=p_arr,
... path='./LUT')
>>> type(loadfluid)
<class 'tespy.tools.fluid_properties.TESPyFluid'>
>>> shutil.rmtree('./LUT', ignore_errors=True)
"""
def __init__(self, alias, fluid, p_range, T_range=None, path=None):
if not isinstance(alias, str):
msg = 'Alias must be of type String.'
logging.error(msg)
raise TypeError(msg)
# process parameters
if '::' in alias:
msg = 'The sequence "::" is not allowed in your fluid alias.'
logging.error(msg)
raise ValueError(msg)
else:
self.alias = alias
self.fluid = fluid
# path for loading
self.path = path
# create look up tables
TESPyFluid.fluids[self.alias] = self
self.fluids_back_ends = OrderedDict()
for fluid in sorted(list(self.fluid.keys()) + [self.alias]):
try:
data = fluid.split('::')
back_end = data[0]
fluid = data[1]
except IndexError:
back_end = 'HEOS'
if fluid != self.alias:
self.fluids_back_ends[fluid] = back_end
Memorise.add_fluids(self.fluids_back_ends)
# set up grid
self.p = np.geomspace(p_range[0], p_range[1], 100)
if T_range is None:
T_range = [max([Memorise.value_range[f][2] for f
in self.fluids_back_ends.keys()]) + 1, 2000]
self.T = np.geomspace(T_range[0], T_range[1], 100)
Memorise.add_fluids({self.alias: 'TESPy'})
params = {}
params['h_pT'] = h_mix_pT
params['s_pT'] = s_mix_pT
params['d_pT'] = d_mix_pT
params['visc_pT'] = visc_mix_pT
self.funcs = {}
if self.path is None:
# generate fluid properties
msg = 'Generating lookup-tables from CoolProp fluid properties.'
logging.debug(msg)
for key in params.keys():
self.funcs[key] = self.generate_lookup(key, params[key])
msg = 'Loading function values for function ' + key + '.'
logging.debug(msg)
else:
# load fluid properties from specified path
msg = ('Generating lookup-tables from base path ' + self.path +
'/' + self.alias + '/.')
logging.debug(msg)
for key in params.keys():
self.funcs[key] = self.load_lookup(key)
msg = 'Loading function values for function ' + key + '.'
logging.debug(msg)
msg = ('Successfully created look-up-tables for custom fluid ' +
self.alias + '.')
logging.debug(msg)
def generate_lookup(self, name, func):
r"""
Create lookup table from CoolProp-database.
Parameters
----------
name : str
Name of the lookup table.
func : function
Function to create lookup table from.
"""
x1 = self.p
x2 = self.T
y = np.empty((0, x1.shape[0]), float)
# iterate
for p in x1:
row = []
for T in x2:
row += [func([0, p, 0, self.fluid], T)]
y = np.append(y, [np.array(row)], axis=0)
self.save_lookup(name, x1, x2, y)
func = interpolate.RectBivariateSpline(x1, x2, y)
return func
def save_lookup(self, name, x1, x2, y):
r"""
Save lookup table to working dir :code:`./LUT/fluid_alias/`.
Parameters
----------
name : str
Name of the lookup table.
x1 : ndarray
Pressure.
x2 : ndarray
Temperature.
y : ndarray
Lookup value (enthalpy, entropy, density or viscosity)
"""
df = pd.DataFrame(y, columns=x2, index=x1)
alias = self.alias.replace('::', '_')
path = './LUT/' + alias + '/'
if not os.path.exists(path):
os.makedirs(path)
df.to_csv(path + name + '.csv')
def load_lookup(self, name):
r"""
Load lookup table from specified path base path and alias.
Parameters
----------
name : str
Name of the lookup table.
"""
alias = self.alias.replace('::', '_')
path = self.path + '/' + alias + '/' + name + '.csv'
df = pd.read_csv(path, index_col=0)
x1 = df.index.values
x2 = np.array(list(map(float, list(df))))
y = df.values
func = interpolate.RectBivariateSpline(x1, x2, y)
return func
# create dict for tespy fluids
TESPyFluid.fluids = {}
# %%
class Memorise:
r"""Memorization of fluid properties."""
@staticmethod
def add_fluids(fluids, memorise_fluid_properties=True):
r"""
Add list of fluids to fluid memorisation class.
- Generate arrays for fluid property lookup if memorisation is
activated.
- Calculate/set fluid property value ranges for convergence checks.
Parameters
----------
fluids : dict
Dict of fluid and corresponding CoolProp back end for fluid
property memorization.
memorise_fluid_properties : boolean
Activate or deactivate fluid property value memorisation. Default
state is activated (:code:`True`).
Note
----
The Memorise class creates globally accessible variables for different
fluid property calls as dictionaries:
- T(p,h)
- T(p,s)
- v(p,h)
- visc(p,h)
- s(p,h)
Each dictionary uses the list of fluids passed to the Memorise class as
identifier for the fluid property memorisation. The fluid properties
are stored as numpy array, where each column represents the mass
fraction of the respective fluid and the additional columns are the
values for the fluid properties. The fluid property function will then
look for identical fluid property inputs (p, h, (s), fluid mass
fraction). If the inputs are in the array, the first column of that row
is returned, see example.
Example
-------
T(p,h) for set of fluids ('water', 'air'):
- row 1: [282.64527752319697, 10000, 40000, 1, 0]
- row 2: [284.3140698256616, 10000, 47000, 1, 0]
"""
# number of fluids
num_fl = len(fluids)
if memorise_fluid_properties and num_fl > 0:
fl = tuple(fluids.keys())
# fluid property tables
Memorise.T_ph[fl] = np.empty((0, num_fl + 4), float)
Memorise.T_ps[fl] = np.empty((0, num_fl + 5), float)
Memorise.v_ph[fl] = np.empty((0, num_fl + 4), float)
Memorise.visc_ph[fl] = np.empty((0, num_fl + 4), float)
Memorise.s_ph[fl] = np.empty((0, num_fl + 4), float)
msg = (
'Added fluids ' + ', '.join(fl) +
' to memorise lookup tables.')
logging.debug(msg)
for f, back_end in fluids.items():
if f not in Memorise.state.keys() and back_end != 'TESPy':
# create CoolProp.AbstractState object
try:
Memorise.state[f] = | |
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet, LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler, MinMaxScaler, PolynomialFeatures
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, mean_absolute_error, \
mean_squared_error, mean_absolute_percentage_error
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
import numpy as np
def divide(dataset, target_column):
"""
Divides the dataset in X and y.
This function is called inside prepare_data function (see below) in order to prepare data.
:param dataset:
:param target_column:
:return: X and y
"""
X = dataset.drop(columns=[target_column])
y = dataset[target_column]
return X, y
def prepare_data(dataset, target_column, sampler_type=None, scaler_type='std', test_size=0.2):
"""
Creates a list with several train-test sets to test with:
1. Divides the dataset in X and y
2. If scaler_type is under/smote/both, it samples the data. It will append to the list the generated train-test sets
3. After that, it will scale all sets
4. Returns A LIST OF LISTS with the following format:
[
[X_train, X_test, y_train, y_test] #set original
[X_train, X_test, y_train, y_test] #Solo si se samplean los datos
]
¡IMPORTANT! The output of this function is used as input in all the model functions
EXAMPLE:
If we only want to use the original set, we can just use the first set of the array.
Let's create a linear regression with library functions:
sets = prepare_data(dataset, 'target')
model, metrics = lin_reg(sets[0][0], sets[0][1], sets[0][2], sets[0][3])
If sample is needed, this function will return more than one set. In that case, we will need to iterate over it:
sets = prepare_data(dataset, 'target', sample_type='SMOTE')
for i in range(len(sets)):
model, metrics = lin_reg(sets[i][0], sets[i][1], sets[i][2], sets[i][3])
:param pandas.__class__ dataset: Dataset we are working with
:param str target_column: Target column name
:param str sampler_type: Sampling to be applied. Accepts under, SMOTE, both and none
:param str scaler_type: Scaler to be applied. Accepts std, minmax and None
:param float test_size: Test size
:return list: tuple list with X and y and sampled X and y
"""
sets = []
splited_sets = []
scaled_sets = []
# 1. Divide in X and y
X, y = divide(dataset, target_column)
sets.append((X, y))
# 2. Sample if necessary
if sampler_type == 'under':
X_unders, y_unders = RandomUnderSampler(random_state=42).fit_resample(X, y)
sets.append((X_unders, y_unders))
elif sampler_type == 'SMOTE':
X_overs, y_overs = SMOTE(random_state=42).fit_resample(X, y)
sets.append((X_overs, y_overs))
elif sampler_type == 'both':
X_unders, y_unders = RandomUnderSampler(random_state=42).fit_resample(X, y)
sets.append((X_unders, y_unders))
X_overs, y_overs = SMOTE(random_state=42).fit_resample(X, y)
sets.append((X_overs, y_overs))
for set in sets:
X_train, X_test, y_train, y_test = train_test_split(set[0], set[1], test_size=test_size,
random_state=42)
splited_sets.append([X_train, X_test, y_train, y_test])
# Optimize memory usage
del sets
for set in splited_sets:
if scaler_type == "std":
std_scale = StandardScaler().fit(set[0]) # fit(X_train)
X_train_std = std_scale.transform(set[0]) # fit(X_train)
X_test_std = std_scale.transform(set[1]) # fit(X_test)
scaled_sets.append([X_train_std, X_test_std, set[2], set[3]])
elif scaler_type == "minmax":
minmax_scaler = MinMaxScaler().fit(set[0]) # fit(X_train)
X_train_scl = minmax_scaler.transform(set[0]) # fit(X_train)
X_test_scl = minmax_scaler.transform(set[1]) # fit(X_test)
scaled_sets.append([X_train_scl, X_test_scl, set[2], set[3]])
else:
scaled_sets = splited_sets
return scaled_sets
def model_scoring_regression(name, model, X_test, y_test):
"""
Calculates model scoring dataframe for regression models
This function is called inside regression model functions (see below) in order to generate metrics dataset.
:param str name: Column name
:param model: Trained model to get metrics of
:param X_test: X_train
:param y_test: y_test
:return: metrics
"""
name = f'{name.upper()} (test data)'
preds = model.predict(X_test)
metrics = pd.DataFrame({name: [f'{model.score(X_test, y_test):.10f}',
f'{mean_absolute_error(y_test, preds):.10f}',
f'{mean_absolute_percentage_error(y_test, preds):.10f}',
f'{mean_squared_error(y_test, preds):.10f}',
f'{np.sqrt(mean_squared_error(y_test, preds)):.10f}']},
index=[['Score (R2 coef.)', 'MAE', 'MAPE', 'MSE', 'RMSE']])
return metrics
def lin_reg(X_train, X_test, y_train, y_test, regular_type=None):
"""
Apply a lineal regression model with 3 polynomial levels
:param X_train
:param y_train
:param X_test
:param y_test
:param regular_type: type of Regularization (ridge, lasso, elasticnet)
:return trained model and metrics dataframe
"""
model_fit = LinearRegression()
model_fit.fit(X_train, y_train)
metrics = model_scoring_regression('LinearRegression', model_fit, X_test, y_test)
# Regularization if needed
if regular_type == 'ridge':
ridgeR = Ridge(alpha=10)
ridgeR.fit(X_train, y_train)
metrics = pd.concat([metrics,
model_scoring_regression('LINREG-RIDGE', ridgeR, X_test, y_test)], axis=1)
elif regular_type == 'lasso':
lassoR = Lasso(alpha=1)
lassoR.fit(X_train, y_train)
metrics = pd.concat([metrics,
model_scoring_regression('LINREG-LASSO', lassoR, X_test, y_test)], axis=1)
elif regular_type == 'elasticnet':
elastic_net = ElasticNet(alpha=1, l1_ratio=0.5)
elastic_net.fit(X_train, y_train)
metrics = pd.concat([metrics,
model_scoring_regression('LINREG-ELASTICNET', elastic_net, X_test, y_test)], axis=1)
elif regular_type == 'None':
print("Regularization not necessary")
return model_fit, metrics
def poly_reg(X_train, X_test, y_train, y_test, regular_type=None):
"""
Apply a polynomial regression model with 2 polynomial levels
:param X_train
:param y_train
:param X_test
:param y_test
:param regular_type: type of Regularization (ridge, lasso, elasticnet)
:return trained model and metrics dataframe
"""
poly_feats = PolynomialFeatures(degree=2)
X_poly = poly_feats.fit_transform(X_train)
model_fit = LinearRegression()
model_fit.fit(X_poly, y_train)
X_poly_test = poly_feats.transform(X_test)
metrics = model_scoring_regression('PolynomialRegression', model_fit, X_poly_test, y_test)
# Regularization if needed
if regular_type == 'ridge':
ridgeR = Ridge(alpha=10)
ridgeR.fit(X_train, y_train)
metrics = pd.concat([metrics,
model_scoring_regression('POLREG-RIDGE', ridgeR, X_test, y_test)], axis=1)
elif regular_type == 'lasso':
lassoR = Lasso(alpha=1)
lassoR.fit(X_train, y_train)
metrics = pd.concat([metrics,
model_scoring_regression('POLREG-LASSO', lassoR, X_test, y_test)], axis=1)
elif regular_type == 'elasticnet':
elastic_net = ElasticNet(alpha=1, l1_ratio=0.5)
elastic_net.fit(X_train, y_train)
metrics = pd.concat([metrics,
model_scoring_regression('POLREG-ELASTICNET', elastic_net, X_test, y_test)], axis=1)
elif regular_type == 'None':
print("Regularization not necessary")
return model_fit, metrics
def model_scoring_classification(name, model, X_test, y_test, average="binary"):
"""
Calculates model scoring for classification models
This function is called inside model classification functions (see below) in order to generate metrics dataset.
:param str name: Column name
:param model: Trained model to get metrics of
:param X_test: X_test
:param y_test: y_test
:param average: average param for precision, recall and score. If multiclass, use "weighted"
:param multi_class: multi_class for roc_auc_score (ovr, ovo)
:return: metrics dataframe
"""
name = f'{name.upper()} (test data)'
preds = model.predict(X_test)
metrics = pd.DataFrame({name: [f'{accuracy_score(y_test, preds):.10f}',
f'{precision_score(y_test, preds, average=average):.10f}',
f'{recall_score(y_test, preds, average=average):.10f}',
f'{f1_score(y_test, preds, average=average):.10f}']},
index=[['Accuracy (TP + TN/TT)', 'Precision (TP/TP + FP)', 'Recall (TP/TP + FN)',
'F1 (har_mean Ac, Re)']])
return metrics
def random_forest_classif(X_train, X_test, y_train, y_test):
"""
Tests several parameters with a RandomForestClassifier model
GridSearchCV is used with cv=10 and accuracy metric
:param X_train: X_train
:param X_test: X_test
:param y_train: y_train
:param y_test: y_test
:returns: trained model, metrics dataframe and grid used
"""
rand_forest = RandomForestClassifier()
rand_forest_param = {
'n_estimators': [100, 200, 400, 600, 800],
'max_features': [2, 4, 6]
}
grid = GridSearchCV(rand_forest, rand_forest_param, cv=10, scoring='accuracy', n_jobs=-1, verbose=1)
model_fit = grid.fit(X_train, y_train)
metrics = model_scoring_classification('RandomForest', model_fit, X_test, y_test, average="weighted")
return model_fit, metrics, grid
def SVC_model(X_train, X_test, y_train, y_test):
"""
Tests several parameters with a SVC model
GridSearchCV is used with cv=10 and accuracy metric
:param X_train: X_train
:param y_train: y_train
:param X_test: X_test
:param y_test: y_test
:returns: trained model, metrics dataframe and grid used
"""
svc = SVC()
svc_param = {
"C": np.arange(0.1, 1, 0.3),
"gamma": ["scale", "auto"],
"coef0": [-10., -1., 10],
"kernel": ["linear", "poly", "rbf"],
}
grid = GridSearchCV(svc, svc_param, cv=10, scoring='accuracy', n_jobs=-1, verbose=1)
model_fit = grid.fit(X_train, y_train)
metrics = model_scoring_classification('SVC', model_fit, X_test, y_test, average="weighted")
return model_fit, metrics, grid
def LogistRegress(X_train, X_test, y_train, y_test):
"""
ests several parameters with a LogisticRegression model
GridSearchCV is used with cv=10 and accuracy metric
:param X_train: X_train
:param y_train: y_train
:param X_test: X_test
:param y_test: y_test
:return: fitted model, grid and metrics
"""
log_reg = LogisticRegression()
log_param = {
'penalty': ['l2'],
'C': np.arange(0.1, 4, 0.5),
'solver': ['liblinear', 'newton-cg', 'lbfgs']
}
grid = GridSearchCV(log_reg, log_param, cv=10, scoring='accuracy', n_jobs=-1, verbose=1)
model_fit = grid.fit(X_train, y_train)
metrics = model_scoring_classification('LogisticRegression', model_fit, X_test, y_test, average="weighted")
return model_fit, metrics, grid
# FUNCIÓN AUXILIAR PARA, DE UNA LISTA (EN PRINCIPIO, SIEMPRE UNA LISTA CON SCORE), PODER ESCOGER EL ELEMENTO MÁS ALTO
def change_score(element, p=0.90):
"""
Function that iterates through the values parameter elements, converting them to positive (if necessary) and discarding those higher than the
parameter value "p" (typically 0.90).
It is useful to help the "knn" function (and the "corr_target" auxiliary function), with the aim of being able to identify the best score, and
also discarding high values through the "p" parameter and thus avoiding OVERFITTING.
:param element: List with elements (scores or correlations) to fit
:param p: if the user prefer, percent to discard the high score or correlation. Dont could be higher than 1.
:return: list with the changed values
"""
if p >= 1:
p = 0.90
lista = []
for i in element:
if i | |
# Copyright 2016-2018 Scality
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements an independent command reactor.
The reactor is intended to hold the definition and implementation of commands
that the user might want BertE to react to while parsing a pull request's
comments.
We distinguish two classes of "commands":
* Command: Perform an immediate action of any kind.
* Option: Set an option for the whole lifetime of a pull request.
Commands have the following attributes:
* handler: a function that takes a job as its first argument (and an
arbitrarily long list of arguments).
* help: a string intended to be displayed when the user requests the
command's manual.
* privileged: a boolean indicating if the command is to be performed
by privileged users only.
Options have an additional `default` attribute that describes the option's
default value.
The Reactor class implemented in this module is intended to be plugin-friendly.
If one wants to add new options or commands to the reactor, all he has to do
is register them using the corresponding class methods.
Examples:
>>> from bert_e.reactor import Reactor
>>> from types import SimpleNamespace
Register an unprivileged command to the 'say_hi' key, using the
function's docstring as the command's help:
>>> @Reactor.command
... def say_hi(job, *args):
... '''Print 'hi' in the console.'''
... print("hi")
Register a privileged command to the 'shutdown' key:
>>> @Reactor.command(privileged=True)
... def shutdown(job, *args):
... '''Shut the application down.'''
... print("Shutting down the application")
... raise SystemExit(0)
Register a privileged command using custom help string:
>>> @Reactor.command(privileged=True, help_='Execute a shell command.')
... def shell(job, *args):
... '''Run a command in the shell.'''
... print("Executing a privileged command.")
Register a boolean option that will be set in job.settings to `True`
when called by a privileged user. Default value is `None`:
>>> Reactor.add_option('bypass_checks', privileged=True)
Register an unprivileged option with special behavior:
>>> @Reactor.option(key='after_pull_request', default=set())
... def after_pr(job, pr_id):
... job.settings['after_pull_request'].add(pr_id)
...
To use the Reactor, instanciate it:
>>> reactor = Reactor()
>>> job = SimpleNamespace(settings={})
>>> reactor.handle_commands(job, '!do say_hi', prefix='!do')
hi
>>>
>>> reactor.handle_commands(job, '!do shell ls', prefix='!do')
Traceback (most recent call last):
[...]
bert_e.reactor.NotPrivileged
>>> reactor.handle_commands(job, '!do shell ls', prefix='!do',
... privileged=True)
Executing a privileged command.
Initializing a job's settings to the registered default values:
>>> reactor.init_settings(job)
>>> job.settings
{'after_pull_request': set(), 'bypass_checks': None}
Executing option-related commands:
>>> reactor.handle_options(job, '!do after_pull_request=4', '!do')
>>> reactor.handle_options(job, '!do bypass_checks', '!do',
... privileged=True)
>>> job.settings
{'after_pull_request': {'4'}, 'bypass_checks': True}
Note that you can pass mutable objets as default values, they are copied
during initialization of the settings:
>>> reactor.init_settings(job)
>>> job.settings
{'after_pull_request': set(), 'bypass_checks': None}
"""
# Please note that special effort was made to keep this module relatively
# independent from the rest of the application.
# Its only non-standard dependencies are the Dispatcher utility mixin,
# and the fact that a 'job' must have a 'settings' dictionary-like attribute.
import logging
import re
from collections import namedtuple
from copy import copy
from .lib.dispatcher import Dispatcher
class Error(Exception):
"""Base class for errors raised in this module."""
pass
class NotPrivileged(Error):
"""A non-privileged user tried to use a privileged command or option."""
def __init__(self, keyword: str):
super().__init__()
self.keyword = keyword
class NotAuthored(Error):
"""A user tried to use an author only command or option."""
def __init__(self, keyword: str):
super().__init__()
self.keyword = keyword
class NotFound(Error):
"""The requested command or option doesn't exist."""
def __init__(self, keyword: str):
super().__init__()
self.keyword = keyword
LOG = logging.getLogger(__name__)
Command = namedtuple('Command', ['handler', 'help', 'privileged', 'authored'])
Option = namedtuple('Option', ['handler', 'default', 'help', 'privileged',
'authored'])
def normalize_whitespace(msg):
"""Sanitize help message by removing extra white space.
This helps keeping clean help messages when passed through a function's
docstring.
"""
if msg is None:
return msg
return ' '.join(msg.strip().split())
class Reactor(Dispatcher):
"""Central dispatching class for comment-based commands and options."""
@classmethod
def add_command(cls, key, handler, help_=None, privileged=False,
authored=False):
"""Register a new command to the reactor."""
help_ = normalize_whitespace(help_ or handler.__doc__)
cls.set_callback(key, Command(handler, help_, privileged, authored))
@classmethod
def command(cls, key=None, help_=None, privileged=False):
"""Decorator to register a command.
Args:
key: the key to register the command to. Defaults to the
decorated function's name.
help_: the help message of the command. Defaults to the
decorated function's docstring.
privileged: whether the command is privileged or not. Defaults to
False.
"""
if callable(key):
# the decorator was called with the @Reactor.command syntax
func = key
cls.add_command(func.__name__, func, func.__doc__, False)
return func
# the decorator was called with the @Reactor.command(...) syntax
def decorator(func):
_key = key or func.__name__
_help = help_ or func.__doc__
cls.add_command(_key, func, _help, privileged)
return func
return decorator
@classmethod
def add_option(cls, key, help_=None, privileged=False, default=None,
authored=False):
"""Add a basic option to the reactor."""
def set_option(job, arg=True):
job.settings[key] = arg
help_ = normalize_whitespace(help_)
cls.set_callback(key, Option(set_option, default, help_, privileged,
authored))
@classmethod
def option(cls, key=None, default=None, help_=None, privileged=False,
authored=False):
"""Decorator to register an option handler.
Args:
default: the setting's default value. Defaults to None.
See Reactor.commands() for detail on other args.
"""
if callable(key):
# The decorator was called with the @Reactor.option syntax
func = key
help_ = normalize_whitespace(help_ or func.__doc__)
cls.set_callback(
func.__name__, Option(func, default, help_, privileged,
authored)
)
return func
# the decorator was called with the @Reactor.option(...) syntax
def decorator(func):
_key = key or func.__name__
_help = normalize_whitespace(help_ or func.__doc__)
cls.set_callback(_key, Option(func, default, _help, privileged,
authored))
return func
return decorator
@classmethod
def get_options(cls):
"""Return a dictionary depicting currently registered options."""
return {key: val for key, val in cls.__callbacks__.items()
if isinstance(val, Option)}
@classmethod
def get_commands(cls):
"""Return a dictionary depicting currently registered commands."""
return {key: val for key, val in cls.__callbacks__.items()
if isinstance(val, Command)}
def init_settings(self, job):
"""Initialize a job's settings to the registered options' default
values.
"""
for key, option in self.get_options().items():
job.settings[key] = copy(option.default)
def handle_options(self, job, text, prefix, privileged=False,
authored=False):
"""Find option calls in given text string, and execute the
corresponding option handlers if any is found.
An option declaration can be on the following forms:
{prefix}option1=val1 option2
{prefix}: option1=val1, option2
{prefix}: option1=val1 - option2
The text is ignored if:
* the option declaration is actually a command call,
* there is no option declaration in it.
Args:
job: the job to run the handlers on.
text: the text to look for option calls in.
prefix: the prefix of commands.
privileged: run the option handler in privileged mode. Defaults to
False.
Raises:
NotFound: if the option declaration has the right syntax but calls
an unknown option.
NotPrivileged: when a privileged option declaration is found
and the method is called with privileged=False.
NotAuthored: when an authored option declaration is found and the
method is called with authored=False.
"""
raw = text.strip()
canonical_raw = None
canonical_prefix = None
if raw.startswith(prefix):
canonical_raw = raw
canonical_prefix = prefix
elif re.match('^/[\w=]+([\s,.\-:;|+]+/[\w=]+)*\s*$', raw):
canonical_raw = " " + raw
canonical_prefix = ""
if not canonical_raw:
return
LOG.debug('Found a potential option: %r', raw)
cleaned = re.sub(r'[,.\-/:;|+]', ' ',
canonical_raw[len(canonical_prefix):])
match = re.match(r'\s*(?P<keywords>(\s+[\w=]+)+)\s*$', cleaned)
if not match:
LOG.debug('Ignoring comment. Unknown format')
return
keywords = match.group('keywords').strip().split()
LOG.debug('checking keywords %s', keywords)
for idx, kwd in enumerate(keywords):
key, *args = kwd.split('=')
option = self.dispatch(key)
if option is None:
raise NotFound(key)
if not isinstance(option, Option):
if idx == 0:
# It's a command, ignore it
return
else:
raise NotFound(key)
if option.privileged and not privileged:
raise NotPrivileged(key)
if option.authored and not authored:
raise NotAuthored(key)
# Everything is okay, apply the option
option.handler(job, *args)
def handle_commands(self, job, text, prefix, privileged=False):
"""Find a command call in given text string, and execute the
corresponding handler if any is found.
An command call can be on the following forms:
{prefix} command arg1 arg2 ...
The text is ignored if:
* the command | |
<gh_stars>1-10
# import all the required files i.e. numpy , pandas and math library
import numpy as np
import pandas as pd
from pandas import DataFrame , Series
import math
# All the indicators are defined and arranged in Alphabetical order
# ------------------> A <------------------------
# [0] __ Average True Range (ATR)
# Moving Average of True Range(TR)
def atr(data: DataFrame, period: int = 14) -> Series:
TR = tr(data)
return pd.Series(
TR.rolling(center=False, window=period,
min_periods=1).mean(),
name=f'{period} ATR'
)
# ------------------> D <------------------------
# [0] __ Double Exponential Moving Average (DEMA)
# 2 * EWMA - ewm(EWMA)
def dema(data,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
DEMA = (
2*ema(data,period) - ema(data,period).ewm(span=period , adjust=adjust).mean()
)
return pd.Series(
DEMA ,
name = f'{period}_DEMA'
)
# ------------------> E <------------------------
# [0] __ Exponential Weighted Moving Average (EWMA) or Exponential Moving Average(EMA)
# Exponential average of prev n day prices
def ema(data,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
return pd.Series(
data[column].ewm(span=period, adjust=adjust).mean(),
name = f'{period}_EMA'
)
# [0] __ Kaufman Efficiency indicator (KER) or (ER)
# change in price / volatility Here change and volatility are absolute
def er(data,period: int = 10,column: str ='close') -> Series:
change = data[column].diff(period).abs()
volatility = data[column].diff().abs().rolling(window=period,min_periods=1).sum()
return pd.Series(change / volatility,
name=f'{period}_ER'
)
# [0] __ Elastic Volume Weighted Moving Average (EVWMA)
# x is ((volume sum for n period) - volume ) divided by (volume sum for n period)
# y is volume * close / (volume sum for n period)
def evwma(data, period: int = 20) -> Series:
vol_sum = (data["volume"].rolling(window=period,min_periods=1).sum())
x = (vol_sum - data["volume"]) / vol_sum
y = (data["volume"] * data["close"]) / vol_sum
evwma = [0]
for x, y in zip(x.fillna(0).iteritems(), y.iteritems()):
if x[1] == 0 or y[1] == 0:
evwma.append(0)
else:
evwma.append(evwma[-1] * x[1] + y[1])
return pd.Series(
evwma[1:], index=data.index,
name=f'{period}_EVWMA'
)
# [0] __ Elastic Volume Weighted Moving average convergence divergence (EV_MACD)
# MACD calculation on basis of Elastic Volume Weighted Moving average (EVWMA)
def ev_macd(data: DataFrame,period_fast: int = 20,period_slow: int = 40,
signal: int = 9,adjust: bool = True,) -> DataFrame:
evwma_slow = evwma(data, period_slow)
evwma_fast = evwma(data, period_fast)
MACD = pd.Series(evwma_fast - evwma_slow, name="EV MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
# ------------------> F <------------------------
# [0] __ Fractal Adaptive Moving Average (FRAMA)
# TODO
def FRAMA(data: DataFrame, period: int = 16, batch: int=10) -> Series:
assert period % 2 == 0, print("FRAMA period must be even")
c = data.close.copy()
window = batch * 2
hh = c.rolling(batch).max()
ll = c.rolling(batch).min()
n1 = (hh - ll) / batch
n2 = n1.shift(batch)
hh2 = c.rolling(window).max()
ll2 = c.rolling(window).min()
n3 = (hh2 - ll2) / window
# calculate fractal dimension
D = (np.log(n1 + n2) - np.log(n3)) / np.log(2)
alp = np.exp(-4.6 * (D - 1))
alp = np.clip(alp, .01, 1).values
filt = c.values
for i, x in enumerate(alp):
cl = c.values[i]
if i < window:
continue
filt[i] = cl * x + (1 - x) * filt[i - 1]
return pd.Series(filt, index=data.index,
name= f'{period} FRAMA'
)
# ------------------> H <------------------------
# [0] __ Hull Moving Average (HMA)
# wma of change in wma where change in wma is 2 * (wma half period) - (wma full period)
def hma(data, period: int = 16) -> Series:
half_length = int(period / 2)
sqrt_length = int(math.sqrt(period))
wmaf = wma(data, period=half_length)
wmas = wma(data, period=period)
data["deltawma"] = 2 * wmaf - wmas
hma = wma(data, column="deltawma", period=sqrt_length)
return pd.Series(hma, name=f'{period}_HMA')
# ------------------> K <------------------------
# [0] __ Kaufman's Adaptive Moving Average (KAMA)
# first KAMA is SMA
# Current KAMA = Previous KAMA + smoothing_constant * (Price - Previous KAMA)
def kama(data,er_: int = 10,ema_fast: int = 2,
ema_slow: int = 30,period: int = 20,
column: str ='close') -> Series:
er_ = er(data)
fast_alpha = 2 / (ema_fast + 1)
slow_alpha = 2 / (ema_slow + 1)
sc = pd.Series(
(er_ * (fast_alpha - slow_alpha) + slow_alpha) ** 2,
name="smoothing_constant",
)
sma = pd.Series(
data[column].rolling(period).mean(), name="SMA"
)
kama = []
for s, ma, price in zip(
sc.iteritems(), sma.shift().iteritems(), data[column].iteritems()
):
try:
kama.append(kama[-1] + s[1] * (price[1] - kama[-1]))
except (IndexError, TypeError):
if pd.notnull(ma[1]):
kama.append(ma[1] + s[1] * (price[1] - ma[1]))
else:
kama.append(None)
sma["KAMA"] = pd.Series(
kama, index=sma.index, name=f'{period}_KAMA')
return sma['KAMA']
# ------------------> M <------------------------
# [0] __ Moving average convergence divergence (MACD)
# MACD is Difference of ema fast and ema slow
# Here fast period is 12 and slow period is 26
# MACD Signal is ewm of MACD
def macd(data,period_fast: int = 12,period_slow: int = 26,
signal: int = 9,column: str = "close",adjust: bool = True
) -> DataFrame:
EMA_fast = pd.Series(
data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name=f'{period_fast}_EMA_fast')
EMA_slow = pd.Series(
data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name=f'{period_slow}_EMA_slow')
MACD = pd.Series(EMA_fast - EMA_slow,name='MACD')
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(),name=f'{signal}_SIGNAL'
)
DIFF = pd.Series(
MACD - MACD_signal,
name="diff MACD_MSIGNAL"
)
return pd.concat(
[DIFF, MACD, MACD_signal ],
axis=1
)
# [0] __ Market momentum (MOM)
def mom(data: DataFrame, period: int = 10, column: str = "close") -> Series:
return pd.Series(data[column].diff(period),
name=f'{period}_MOM'
)
# [0] __ Moving Volume Weighted Average Price (MVWAP)
# SMA of (close * volume ) divided by SMA of volume
def mvwap(data: DataFrame, period:int = 9) -> Series:
data["cv"] =(data["close"] * data["volume"])
return pd.Series(
(sma(data,period = period,column = "cv")/sma(data,period=period,column="volume")),
name="MVWAP."
)
# ------------------> P <------------------------
# ------------|| Pivot ||------------------------
# [0] __ Pivot Camarilla
# TODO
def pivot_camarilla(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
s2 = df_['close']-(1.1*(df_['high']-df_['low'])/6)
s3 = df_['close']-(1.1*(df_['high']-df_['low'])/4)
s4 =df_['close']-(1.1*(df_['high']-df_['low'])/2)
r1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
r2 = df_['close']+(1.1*(df_['high']-df_['low'])/6)
r3 =df_['close']+(1.1*(df_['high']-df_['low'])/4)
r4 = df_['close']+(1.1*(df_['high']-df_['low'])/2)
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Classic
# TODO
def pivot_classic(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = (pivot * 2) - df_["high"]
s2 = pivot - (df_["high"] - df_["low"])
s3 = pivot - 2*(df_["high"] - df_["low"])
s4 = pivot - 3*(df_["high"] - df_["low"])
r1 = (pivot * 2) - df_["low"]
r2 = pivot + (df_["high"] - df_["low"])
r3 = pivot + 2*(df_["high"] - df_["low"])
r4 = pivot + 3*(df_["high"] - df_["low"])
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Demark
# TODO
def pivot_demark(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot,s1,r1=[],[],[]
for i in range(len(df_)):
if df_['open'][i]==df_['close'][i]:
x=df_['high'][i]+df_['low'][i]+2*df_['close'][i]
elif df_['close'][i]>df_['open'][i]:
x=2*df_['high'][i]+df_['low'][i]+df_['close'][i]
else:
x=df_['high'][i]+2*df_['low'][i]+df_['close'][i]
pivot.append(x/4)
s1.append(x/2 - df_["high"][i])
r1.append(x/2 - df_["low"][i])
data_ = pd.DataFrame(pivot,columns=['pivot'])
data_['s1']=s1
data_['r1']=r1
return data_
# [0] __ Pivot Fibonacci
# TODO
def pivot_fibonacci(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = pivot - ((df_["high"] - df_["low"])*0.382)
s2 = pivot - ((df_["high"] - df_["low"])*0.618)
s3 = pivot - (df_["high"] - df_["low"])
s4 = pivot + ((df_["high"] - df_["low"])*1.382)
r1 = pivot + ((df_["high"] - df_["low"])*0.382)
r2 = pivot + ((df_["high"] - df_["low"])*0.618)
r3 =pivot + (df_["high"] - df_["low"])
r4 = pivot + (df_["high"] - df_["low"])*1.382
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Traditional
# TODO
def pivot_traditional(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = (pivot * 2) - df_["high"]
s2 = pivot - (df_["high"] - df_["low"])
s3 = df_["low"] - (2 * (df_["high"] - pivot))
s4 = df_["low"] - (3 * (df_["high"] - pivot))
s5 = df_["low"] - (4 * (df_["high"] - pivot))
r1 = (pivot * 2) - df_["low"]
r2 = pivot + (df_["high"] - df_["low"])
r3 = df_["high"] + (2 * (pivot - df_["low"]))
r4 = df_["high"] + (3 * (pivot - df_["low"]))
r5 = df_["high"] + (4 * (pivot - df_["low"]))
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(s5, name="s5"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
pd.Series(r5, name="r5"),
],
axis=1,
)
# [0] __ Pivot Woodie
# TODO
def pivot_woodie(data: DataFrame) -> | |
'Image Sequence',
'Flame Render',
'Flame Batch File',
]
flameMenuApp.__init__(self, framework)
self.connector = connector
# app defaults
if not self.prefs.master.get(self.name):
self.prefs['show_all'] = True
self.prefs['current_page'] = 0
self.prefs['menu_max_items_per_page'] = 128
def __getattr__(self, name):
def method(*args, **kwargs):
entity = self.dynamic_menu_data.get(name)
if entity:
if entity.get('caller') == 'build_addremove_menu':
self.update_loader_list(entity)
elif entity.get('caller') == 'build_batch_loader_menu':
self.load_into_batch(entity)
elif entity.get('caller') == 'flip_show_latest':
self.flip_latest(entity)
elif entity.get('caller') == 'fold_step_entity':
self.fold_step_entity(entity)
elif entity.get('caller') == 'fold_task_entity':
self.fold_task_entity(entity)
self.rescan()
return method
def build_menu(self):
if not self.connector.sg_user:
return None
if not self.connector.sg_linked_project_id:
return None
batch_name = self.flame.batch.name.get_value()
tasks = []
cached_tasks = self.connector.cache_retrive_result('current_tasks')
if not isinstance(cached_tasks, list):
return []
for cached_task in cached_tasks:
if not cached_task.get('entity'):
continue
tasks.append(cached_task)
entities_id_list = [task.get('entity').get('id') for task in tasks]
add_menu_list = []
if (('additional menu ' + batch_name) in self.prefs.keys()) and self.prefs.get('additional menu ' + batch_name):
add_menu_list = self.prefs.get('additional menu ' + batch_name)
for index, stored_entity in enumerate(add_menu_list):
stored_entity_type = stored_entity.get('type', 'Shot')
stored_entity_id = stored_entity.get('id', 0)
if not stored_entity_id in entities_id_list:
add_menu_list.pop(index)
if not add_menu_list:
entity = {}
for task in tasks:
current_entity = task.get('entity')
if current_entity:
if current_entity.get('name') == batch_name:
entity = current_entity
break
if entity:
self.update_loader_list(entity)
add_menu_list = self.prefs.get('additional menu ' + batch_name)
else:
self.prefs['additional menu ' + batch_name] = []
project_id = self.connector.sg_linked_project_id
entity = {}
for task in tasks:
current_entity = task.get('entity')
if current_entity:
if current_entity.get('name') == batch_name:
entity = current_entity
break
if entity:
self.update_loader_list(entity)
add_menu_list = self.prefs.get('additional menu ' + batch_name)
menus = []
menus.append(self.build_addremove_menu())
for entity in add_menu_list:
batch_loader_menu = self.build_batch_loader_menu(entity)
if batch_loader_menu:
menus.append(batch_loader_menu)
return menus
def build_addremove_menu(self):
if not self.connector.sg_user:
return None
if not self.connector.sg_linked_project:
return None
flame_project_name = self.flame.project.current_project.name
batch_name = self.flame.batch.name.get_value()
entities_to_mark = []
batch_loader_additional = self.prefs.get('additional menu ' + batch_name)
for item in batch_loader_additional:
entities_to_mark.append(item.get('id'))
menu = {'actions': []}
menu['name'] = '- ' + self.menu_group_name + ' Add/Remove'
menu_item = {}
menu_item['name'] = '~ Rescan'
menu_item['execute'] = self.rescan
menu['actions'].append(menu_item)
menu_item = {}
if self.prefs['show_all']:
menu_item['name'] = '~ Show Assigned Only'
else:
menu_item['name'] = '~ Show All Avaliable'
menu_item['execute'] = self.flip_assigned
menu['actions'].append(menu_item)
user_only = not self.prefs['show_all']
filter_out = ['Project', 'Sequence']
found_entities = self.get_entities(user_only, filter_out)
menu_ctrls_len = len(menu)
menu_lenght = menu_ctrls_len
menu_lenght += len(found_entities.keys())
for entity_type in found_entities.keys():
menu_lenght += len(found_entities.get(entity_type))
max_menu_lenght = self.prefs.get('menu_max_items_per_page')
menu_main_body = []
for index, entity_type in enumerate(sorted(found_entities.keys())):
menu_item = {}
menu_item['name'] = '- [ ' + entity_type + 's ]'
menu_item['execute'] = self.rescan
menu_main_body.append(menu_item)
entities_by_name = {}
for entity in found_entities[entity_type]:
entities_by_name[entity.get('code')] = entity
for entity_name in sorted(entities_by_name.keys()):
entity = entities_by_name.get(entity_name)
menu_item = {}
if entity.get('id') in entities_to_mark:
menu_item['name'] = ' * ' + entity.get('code')
else:
menu_item['name'] = ' ' + entity.get('code')
entity['caller'] = inspect.currentframe().f_code.co_name
self.dynamic_menu_data[str(id(entity))] = entity
menu_item['execute'] = getattr(self, str(id(entity)))
menu_main_body.append(menu_item)
if menu_lenght < max_menu_lenght:
# controls and entites fits within menu size
# we do not need additional page switch controls
for menu_item in menu_main_body:
menu['actions'].append(menu_item)
else:
# round up number of pages and get current page
num_of_pages = ((menu_lenght) + max_menu_lenght - 1) // max_menu_lenght
curr_page = self.prefs.get('current_page')
# decorate top with move backward control
# if we're not on the first page
if curr_page > 0:
menu_item = {}
menu_item['name'] = '<<[ prev page ' + str(curr_page) + ' of ' + str(num_of_pages) + ' ]'
menu_item['execute'] = self.page_bkw
menu['actions'].append(menu_item)
# calculate the start and end position of a window
# and append items to the list
menu_used_space = menu_ctrls_len + 2 # two more controls for page flip
window_size = max_menu_lenght - menu_used_space
start_index = window_size*curr_page + min(1*curr_page, 1)
end_index = window_size*curr_page+window_size + ((curr_page+1) // num_of_pages)
for menu_item in menu_main_body[start_index:end_index]:
menu['actions'].append(menu_item)
# decorate bottom with move forward control
# if we're not on the last page
if curr_page < (num_of_pages - 1):
menu_item = {}
menu_item['name'] = '[ next page ' + str(curr_page+2) + ' of ' + str(num_of_pages) + ' ]>>'
menu_item['execute'] = self.page_fwd
menu['actions'].append(menu_item)
return menu
def build_batch_loader_menu(self, entity):
if not entity.get('code'):
entity['code'] = entity.get('name', 'no_name')
entity_type = entity.get('type')
entity_id = entity.get('id')
entity_key = (entity_type, entity_id)
if entity_key not in self.prefs.keys():
self.prefs[entity_key] = {}
if 'showLatest' not in self.prefs[entity_key].keys():
self.prefs[entity_key]['showLatest'] = True
cached_tasks_query = self.connector.async_cache.get('current_tasks')
cached_tasks_by_entity = cached_tasks_query.get('by_entity') if cached_tasks_query else False
tasks = cached_tasks_by_entity.get(entity_key, []) if cached_tasks_by_entity else []
tasks_by_id = cached_tasks_query.get('result') if cached_tasks_query else {}
cached_versions_query = self.connector.async_cache.get('current_versions')
cached_versions_by_entity = cached_versions_query.get('by_entity') if cached_versions_query else False
versions = cached_versions_by_entity.get(entity_key, []) if cached_versions_by_entity else []
cached_pbfiles_query = self.connector.async_cache.get('current_pbfiles')
cached_pbfiles_by_entity = cached_pbfiles_query.get('by_entity') if cached_pbfiles_query else False
publishes = cached_pbfiles_by_entity.get(entity_key, []) if cached_pbfiles_by_entity else []
cached_pbfiles_by_id = cached_pbfiles_query.get('result') if cached_pbfiles_query else {}
cached_tasks_query = self.connector.async_cache.get('current_tasks')
current_tasks_by_id = cached_tasks_query.get('result') if cached_tasks_query else {}
menu = {}
menu['name'] = '- ' + chr(127) + entity.get('code') + ':'
menu['actions'] = []
menu_item = {}
menu_item['name'] = '~ Rescan'
menu_item['execute'] = self.rescan
menu['actions'].append(menu_item)
menu_item = {}
show_latest_entity = dict(entity)
show_latest_entity['caller'] = 'flip_show_latest'
if self.prefs[entity_key]['showLatest']:
menu_item['name'] = '~ Show All Versions'
else:
menu_item['name'] = '~ Show Latest Versions'
self.dynamic_menu_data[str(id(show_latest_entity))] = show_latest_entity
menu_item['execute'] = getattr(self, str(id(show_latest_entity)))
menu['actions'].append(menu_item)
# for the loader we're only interested in versions with published files
# versions (should not but) might contain published files from other entities
# if it is the case we should add them to out published files group
# in the same pass we can split the versions into two kinds -
# versions with tasks and without tasks and filter out versions
# without published files at the same time
taskless_versions = []
versions_with_tasks = []
pbfiles_by_id = {p.get('id'):p for p in publishes}
for version in versions:
if not version.get('sg_task.Task.id'):
if version.get('published_files'):
taskless_versions.append(version)
else:
if version.get('published_files'):
versions_with_tasks.append(version)
version_pbfiles = version.get('published_files')
for version_pbfile in version_pbfiles:
pbfile_id = version_pbfile.get('id')
if pbfile_id not in pbfiles_by_id:
pbfile = cached_pbfiles_by_id.get(pbfile_id)
if pbfile:
pbfiles_by_id[pbfile_id] = pbfile
# remove published files with type not listed in types_to_include
for pbfile_id in pbfiles_by_id.keys():
pbfile = pbfiles_by_id.get(pbfile_id)
published_file_type = pbfile.get('published_file_type')
if not published_file_type:
del pbfiles_by_id[pbfile_id]
continue
published_file_type_name = published_file_type.get('name')
if published_file_type_name not in self.types_to_include:
del pbfiles_by_id[pbfile_id]
continue
# versions without tasks will come first in list
taskless_pbfiles = []
for taskless_version in taskless_versions:
tv_pbfiles = taskless_version.get('published_files')
for tv_pbfile in tv_pbfiles:
if tv_pbfile.get('id') in pbfiles_by_id.keys():
taskless_pbfiles.append(pbfiles_by_id[tv_pbfile.get('id')])
if taskless_pbfiles:
task_key = ('Task', -1)
if task_key not in self.prefs[entity_key].keys():
self.prefs[entity_key][task_key] = {'isFolded': False}
fold_task_entity = dict(entity)
fold_task_entity['caller'] = 'fold_task_entity'
fold_task_entity['key'] = task_key
self.dynamic_menu_data[str(id(fold_task_entity))] = fold_task_entity
menu_item = {}
if self.prefs[entity_key][task_key].get('isFolded'):
menu_item['name'] = '+ [ ' + 'No Task' + ' ]'
else:
menu_item['name'] = '- [ ' + 'No Task' + ' ]'
menu_item['execute'] = getattr(self, str(id(fold_task_entity)))
menu['actions'].append(menu_item)
if not self.prefs[entity_key][task_key].get('isFolded'):
if self.prefs[entity_key]['showLatest']:
# show latest version from the (pbfile_id, pbfile_name) group
# collect published files from versions
pbfiles = []
for version in taskless_versions:
version_pbfiles = version.get('published_files')
for version_pbfile in version_pbfiles:
version_pbfile_id = version_pbfile.get('id')
pbfile = pbfiles_by_id.get(version_pbfile_id)
if pbfile: pbfiles.append(pbfile)
# find the latest (pbfile_id, pbfile_name) group
# and get the version linked to it
pbfiles_version_ids = set()
pbfile_type_id_name_group = {}
pbfile_type_id_name_datetime = {}
pbfile_type_id_name_count = {}
for pbfile in pbfiles:
pbfile_id = 0
pbfile_type = pbfile.get('published_file_type')
if isinstance(pbfile_type, dict):
pbfile_id = pbfile_type.get('id')
pbfile_name = pbfile.get('name')
pbfile_created_at = pbfile.get('created_at')
pbfile_type_id_name_key = (pbfile_id, pbfile_name)
if pbfile_type_id_name_key not in pbfile_type_id_name_group.keys():
pbfile_type_id_name_group[pbfile_type_id_name_key] = pbfile
pbfile_type_id_name_datetime[pbfile_type_id_name_key] = pbfile_created_at
pbfile_type_id_name_count[pbfile_type_id_name_key] = 1
else:
if pbfile_created_at > pbfile_type_id_name_datetime.get(pbfile_type_id_name_key):
pbfile_type_id_name_group[pbfile_type_id_name_key] = pbfile
pbfile_type_id_name_datetime[pbfile_type_id_name_key] = pbfile_created_at
pbfile_type_id_name_count[pbfile_type_id_name_key] += 1
taskless_versions_by_id = {v.get('id'):v for v in taskless_versions}
for key in pbfile_type_id_name_group.keys():
pbfile = pbfile_type_id_name_group.get(key)
version_id = pbfile.get('version.Version.id')
version = taskless_versions_by_id.get(version_id)
if not version: continue
version['caller'] = inspect.currentframe().f_code.co_name
menu_item = {}
if pbfile_type_id_name_count.get(key) > 1:
menu_item['name'] = ' '*6 + '* ' + version.get('code')
else:
menu_item['name'] = ' '*8 + version.get('code')
self.dynamic_menu_data[str(id(version))] = version
menu_item['execute'] = getattr(self, str(id(version)))
menu['actions'].append(menu_item)
else:
# show all versions as they are
for version in taskless_versions:
version['caller'] = inspect.currentframe().f_code.co_name
menu_item = {}
menu_item['name'] = ' '*8 + version.get('code')
self.dynamic_menu_data[str(id(version))] = version
menu_item['execute'] = getattr(self, str(id(version)))
menu['actions'].append(menu_item)
# build list of tasks from versions with tasks.
# add versions | |
<gh_stars>10-100
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class FcoeFwdVxPort(Base):
"""Configuration parameters for one FCoE Forwarder interface.
The FcoeFwdVxPort class encapsulates a required fcoeFwdVxPort resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'fcoeFwdVxPort'
_SDM_ATT_MAP = {
'B2bRxSize': 'b2bRxSize',
'Enabled': 'enabled',
'FabricName': 'fabricName',
'FcMap': 'fcMap',
'FdiscRejectInterval': 'fdiscRejectInterval',
'FipAddressingMode': 'fipAddressingMode',
'FipAdvertisementPeriod': 'fipAdvertisementPeriod',
'FipClearVlinkOnExpire': 'fipClearVlinkOnExpire',
'FipClearVlinkPortIds': 'fipClearVlinkPortIds',
'FipEnabled': 'fipEnabled',
'FipFkaDBit': 'fipFkaDBit',
'FipPriority': 'fipPriority',
'FipVersion': 'fipVersion',
'FipVlanDiscovery': 'fipVlanDiscovery',
'FipVnportKeepAlivePeriod': 'fipVnportKeepAlivePeriod',
'FlogiRejectInterval': 'flogiRejectInterval',
'LogoRejectInterval': 'logoRejectInterval',
'Name': 'name',
'NameServer': 'nameServer',
'NameServerCommands': 'nameServerCommands',
'ObjectId': 'objectId',
'OperatingMode': 'operatingMode',
'PlogiRejectInterval': 'plogiRejectInterval',
'SwitchName': 'switchName',
'VlanIds': 'vlanIds',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(FcoeFwdVxPort, self).__init__(parent, list_op)
@property
def B2bRxSize(self):
# type: () -> int
"""
Returns
-------
- number: The buffer-to-buffer receive data field size in bytes.
"""
return self._get_attribute(self._SDM_ATT_MAP['B2bRxSize'])
@B2bRxSize.setter
def B2bRxSize(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['B2bRxSize'], value)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def FabricName(self):
# type: () -> str
"""
Returns
-------
- str: The Fabric Name value assigned to this interface.
"""
return self._get_attribute(self._SDM_ATT_MAP['FabricName'])
@FabricName.setter
def FabricName(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['FabricName'], value)
@property
def FcMap(self):
# type: () -> str
"""
Returns
-------
- str: The MAC Address Prefix associated to local FC fabric.
"""
return self._get_attribute(self._SDM_ATT_MAP['FcMap'])
@FcMap.setter
def FcMap(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['FcMap'], value)
@property
def FdiscRejectInterval(self):
# type: () -> int
"""
Returns
-------
- number: When the user enters N, IxNetwork FCF will send out one LS_RJT for every N-th FDISC request. If N = 0, no FDISC request will be rejected. If N = 1, every FDISC request will be rejected. If N = 10, then the first 9 FDISC requests will be accepted, and the 10th will be rejected.
"""
return self._get_attribute(self._SDM_ATT_MAP['FdiscRejectInterval'])
@FdiscRejectInterval.setter
def FdiscRejectInterval(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FdiscRejectInterval'], value)
@property
def FipAddressingMode(self):
# type: () -> str
"""
Returns
-------
- str: The MAC Addressing Mode supported by this interface.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipAddressingMode'])
@FipAddressingMode.setter
def FipAddressingMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['FipAddressingMode'], value)
@property
def FipAdvertisementPeriod(self):
# type: () -> int
"""
Returns
-------
- number: The interval in milliseconds between periodic Discovery Advertisements.It is also used to monitor the interval between ENodes FIP Keep-Alive frames.A value of 0 milliseconds indicates that no Keep-Alive message is expected.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipAdvertisementPeriod'])
@FipAdvertisementPeriod.setter
def FipAdvertisementPeriod(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FipAdvertisementPeriod'], value)
@property
def FipClearVlinkOnExpire(self):
# type: () -> bool
"""
Returns
-------
- bool: Select this option to automatically send Clear Virtual Linkto ENodes and VN_Ports that forget to send periodic Keep-Alives on time.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipClearVlinkOnExpire'])
@FipClearVlinkOnExpire.setter
def FipClearVlinkOnExpire(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['FipClearVlinkOnExpire'], value)
@property
def FipClearVlinkPortIds(self):
# type: () -> str
"""
Returns
-------
- str: Property used to store port IDs for Clear Virtual Link.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipClearVlinkPortIds'])
@FipClearVlinkPortIds.setter
def FipClearVlinkPortIds(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['FipClearVlinkPortIds'], value)
@property
def FipEnabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Select this option to respond to general FIP requests.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipEnabled'])
@FipEnabled.setter
def FipEnabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['FipEnabled'], value)
@property
def FipFkaDBit(self):
# type: () -> bool
"""
Returns
-------
- bool: When the D bit is set, the VF_Port will not verify periodic receptionof ENode FIP Keep-Alive and VN_Port FIP Keep-Alive frames.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipFkaDBit'])
@FipFkaDBit.setter
def FipFkaDBit(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['FipFkaDBit'], value)
@property
def FipPriority(self):
# type: () -> int
"""
Returns
-------
- number: The FIP Priority value sent with Discovery Advertisements.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipPriority'])
@FipPriority.setter
def FipPriority(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FipPriority'], value)
@property
def FipVersion(self):
# type: () -> str
"""
Returns
-------
- str: The FIP version to use.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipVersion'])
@FipVersion.setter
def FipVersion(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['FipVersion'], value)
@property
def FipVlanDiscovery(self):
# type: () -> bool
"""
Returns
-------
- bool: Select this option to respond to FIP VLAN Discovery requests.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipVlanDiscovery'])
@FipVlanDiscovery.setter
def FipVlanDiscovery(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['FipVlanDiscovery'], value)
@property
def FipVnportKeepAlivePeriod(self):
# type: () -> int
"""
Returns
-------
- number: The interval in milliseconds between periodic VN_Port FIP Keep-Alive frames.A value of 0 milliseconds indicates that no Keep-Alive message is expected.
"""
return self._get_attribute(self._SDM_ATT_MAP['FipVnportKeepAlivePeriod'])
@FipVnportKeepAlivePeriod.setter
def FipVnportKeepAlivePeriod(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FipVnportKeepAlivePeriod'], value)
@property
def FlogiRejectInterval(self):
# type: () -> int
"""
Returns
-------
- number: When the user enters N, IxNetwork FCF will send out one LS_RJT for every N-th FLOGI request. If N = 0, no FLOGI request will be rejected. If N = 1, every FLOGI request will be rejected. If N = 10, then the first 9 FLOGI requests will be accepted, and the 10th will be rejected.
"""
return self._get_attribute(self._SDM_ATT_MAP['FlogiRejectInterval'])
@FlogiRejectInterval.setter
def FlogiRejectInterval(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FlogiRejectInterval'], value)
@property
def LogoRejectInterval(self):
# type: () -> int
"""
Returns
-------
- number: When the user enters N, IxNetwork FCF will send out one LS_RJT for every N-th LOGO request. If N = 0, no LOGO request will be rejected. If N = 1, every LOGO request will be rejected. If N = 10, then the first 9 LOGO requests will be accepted, and the 10th will be rejected.
"""
return self._get_attribute(self._SDM_ATT_MAP['LogoRejectInterval'])
@LogoRejectInterval.setter
def LogoRejectInterval(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LogoRejectInterval'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NameServer(self):
# type: () -> bool
"""
Returns
-------
- bool: Select this option to respond to Name Service requests.
"""
return self._get_attribute(self._SDM_ATT_MAP['NameServer'])
@NameServer.setter
def NameServer(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['NameServer'], value)
@property
def NameServerCommands(self):
# type: () -> List[int]
"""
Returns
-------
- list(number): Signifies the Name Server Commands that will be accepted by the forwarder.
"""
return self._get_attribute(self._SDM_ATT_MAP['NameServerCommands'])
@NameServerCommands.setter
def NameServerCommands(self, value):
# type: (List[int]) -> None
self._set_attribute(self._SDM_ATT_MAP['NameServerCommands'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def OperatingMode(self):
# type: () -> str
"""
Returns
-------
- str: Describes the operating mode for this interface.
"""
return self._get_attribute(self._SDM_ATT_MAP['OperatingMode'])
@OperatingMode.setter
def OperatingMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['OperatingMode'], value)
@property
def PlogiRejectInterval(self):
# type: () -> int
"""
Returns
-------
- number: When the user enters N, IxNetwork FCF will send out one LS_RJT for every N-th PLOGI request. If N = | |
matrix : array
Pearson coefficient correlation matrix
'''
print('Compute OPD-to-OPD Pearson correlation coefficient')
nimg = data.shape[0]
# pupil mask
if pupil_mask is None:
mask = (data[0] != 0)
else:
mask = (pupil_mask != 0)
# compute correlation matrix
t0 = time.time()
matrix_prs = np.full((nimg, nimg), np.nan)
for i in range(nimg):
# time calculation
t = time.time()
delta_t = (t - t0)/((i+1)**2/2)/60
time_left = (nimg**2/2 - (i+1)**2/2)*delta_t
print(' * i={0}, time left={1:.2f} min'.format(i, time_left))
for j in range(i+1):
img0 = data[i][mask]
img1 = data[j][mask]
coeff, p = pearsonr(img0, img1)
matrix_prs[i, j] = coeff
#save
if filename is not None:
fits.writeto(os.path.join(root, 'products', filename+'_prs.fits'), matrix_prs, overwrite=True)
return matrix_prs
def matrix_difference(root, data, pupil_mask=None, filename=None):
'''Extract statistics from opd-to-opd differences
Parameters
----------
root : str
Path to the working directory
data : str
OPD maps cube
pupil_mask : array
Binary mask to hide parts of the pupil in the OPD
maps. Default is None
filename : str
Base name of the files to save result. The _ptv and _std
suffixes will be added to the base name for the PtV and
standard deviation matrices respectively
Returns
-------
matrix_diff_ptv, matrix_diff_std : array
PtV and standard deviation correlation matrices
'''
print('Compute statistics on OPD-to-OPD differences')
nimg = data.shape[0]
# pupil mask
if pupil_mask is None:
mask = (data[0] != 0)
else:
mask = (pupil_mask != 0)
# compute matrices
t0 = time.time()
matrix_diff_ptv = np.full((nimg, nimg), np.nan)
matrix_diff_std = np.full((nimg, nimg), np.nan)
for i in range(nimg):
# time calculation
t = time.time()
delta_t = (t - t0)/((i+1)**2/2)/60
time_left = (nimg**2/2 - (i+1)**2/2)*delta_t
print(' * i={0}, time left={1:.2f} min'.format(i, time_left))
for j in range(i+1):
img = data[i] - data[j]
img = img[mask]
matrix_diff_ptv[i, j] = img.max() - img.min()
matrix_diff_std[i, j] = img.std()
#save
if filename is not None:
fits.writeto(os.path.join(root, 'products', filename+'_ptv.fits'), matrix_diff_ptv, overwrite=True)
fits.writeto(os.path.join(root, 'products', filename+'_std.fits'), matrix_diff_std, overwrite=True)
return matrix_diff_ptv, matrix_diff_std
def array_to_numpy(shared_array, shape, dtype):
if shared_array is None:
return None
numpy_array = np.frombuffer(shared_array, dtype=dtype)
if shape is not None:
numpy_array.shape = shape
return numpy_array
def matrix_tpool_init(matrix_data_i, matrix_shape_i):
global matrix_data, matrix_shape
matrix_data = matrix_data_i
matrix_shape = matrix_shape_i
def matrix_tpool_process(diag):
global matrix_data, matrix_shape
matrix = array_to_numpy(matrix_data, matrix_shape, np.float)
nimg = matrix.shape[-1]
mask = np.eye(nimg, k=-diag, dtype=np.bool)
mean = matrix[mask].mean()
std = matrix[mask].std()
return diag, mean, std
def matrix_process(root, matrix, ncpu=1):
'''Process a correlation matrix
The processing computes the average and standard deviation of the
matrix values along all the diagonals to extract statistics at
different time scales
Parameters
----------
root : str
Path to the working directory
matrix : str
Correlation matrix to be processed
ncpu : int
Number of CPUs to use. Default is 1
Returns
-------
vec_mean : array
Average of matrix values along all diagonals
vec_std : array
Standard deviation of matrix values along all diagonals
'''
print('Process matrix')
nimg = matrix.shape[-1]
matrix_data = mp.RawArray(ctypes.c_double, matrix.size)
matrix_shape = matrix.shape
matrix_np = array_to_numpy(matrix_data, matrix_shape, np.float)
matrix_np[:] = matrix
pool = mp.Pool(processes=ncpu, initializer=matrix_tpool_init,
initargs=(matrix_data, matrix_shape))
tasks = []
for i in range(nimg):
tasks.append(pool.apply_async(matrix_tpool_process, args=(i, )))
pool.close()
pool.join()
vec_mean = np.zeros(nimg)
vec_std = np.zeros(nimg)
for task in tasks:
idx, mean, std = task.get()
vec_mean[idx] = mean
vec_std[idx] = std
del tasks
return vec_mean, vec_std
def subtract_internal_turbulence(root=None, turb_sliding_mean=30, method='zernike',
nzern=80, filter_cutoff=40, pupil_mask=None,
turbulence_residuals=False,
psd_compute=True, psd_cutoff=40,
ncpa_sliding_mean=10, save_intermediate=False,
save_product=False, save_ncpa=True, test_mode=True):
'''Implements the subtraction of the internal turbulence in a long
OPD sequence
The subtract_turbulence() method estimates the contribution of the
internal turbulence in a sequence, subtracts it to the data and
calculates the final quasi-static NCPA variations. The procedure
is the following:
1. Compute a sliding mean of the OPD sequence over a given time
interval (turb_sliding_mean)
2. Subtract the sliding mean to the OPD sequence to isolate the
turbulence
3. Project the individual turbulence images on Zernike
polynomials (nzern)
4. Reconstruct the synthetic turbulence based on the projection
5. **Optional**: calculate residuals of the turbulence
(turbulence - reconstructed_turbulence) and compute their PSD
6. Subtract reconstructed turbulence to the original OPD sequence
7. Compute the PSD of the final sequence without turbulence
8. Subtract a sliding mean of ncpa_sliding_mean images to the
final sequence to measure the quasi-static NCPA
9. Compute the PSD of the quasi-static NCPA
Parameters
----------
root : str
Path to the working directory
turb_sliding_mean : int
Number of images over which the OPD maps will be averaged to
compute the sliding mean. Should be even. Default is 30
method : str
Method that will be used to estimate and subtract the turbulence.
Possible values are zernike or fft. Default is zernike
nzern: int
Number of Zernike modes to use for the projection of the
turbulence. Defaut is 80.
filter_cutoff : float
Spatial frequency used for the high-pass FFT filter when
method='fft'. Default is 40.
pupil_mask : array
Mask defining the pupil.
turbulence_residuals : bool
Compute the turbulence residuals and related statistics.
Default is False
psd_compute : bool
Perform all PSD computations. Can be disabled to save time.
Default is True.
psd_cutoff : float
Spatial frequency cutoff for the calculation of the turbulence
residuals PSD. Default is 40
ncpa_sliding_mean : int
Number of images over which the OPD maps will be averaged to
compute the sliding mean used for the final NCPA estimation.
Should be even. Default is 10
save_intermediate : bool
Save all intermediate data products. Default is False
save_product : bool
Save the OPD after turbulence subtraction. Default is False
save_ncpa : bool
Save final quasi-static NCPA cube after turbulence subtraction.
Default is False.
test_mode : bool
If True, limits the number of frames in the data to 100. Default is True
'''
log.info('Start turbulence subtraction')
if method.lower() == 'zernike':
suffix = 'method={:s}_smean={:03d}_nzern={:03d}'.format(method, turb_sliding_mean, nzern)
elif method.lower() == 'fft':
suffix = 'method={:s}_smean={:03d}_fcutoff={:.1f}'.format(method, turb_sliding_mean, filter_cutoff)
else:
raise ValueError('Unknown subtraction method {0}'.format(method))
# root
if root is None:
raise ValueError('root must contain the path to the data!')
# read data
log.info('Read data')
data = fits.getdata(root / 'products' / 'opd_cube.fits')
if test_mode:
data = data[0:100]
# pupil mask
if pupil_mask is None:
pupil_mask = (data[0] != 0)
else:
# hide values outside of the pupil
log.info('Hide values outside of the pupil')
for i in range(len(data)):
data[i] = data[i]*pupil_mask
# sliding mean over avg_time sec
log.info('Compute sliding mean')
data_sliding_mean = sliding_mean(root, data, nimg=turb_sliding_mean)
# subtract sliding mean to isolate turbulence
log.info('Subtract sliding mean')
turb = data - data_sliding_mean
# free memory
del data_sliding_mean
if save_intermediate:
fits.writeto(root / 'products' / 'sequence_turbulence_{:s}.fits'.format(suffix), turb, overwrite=True)
# compute PSD of turbulence
if psd_compute:
log.info('Compute PSD of turbulence')
psd_cube = compute_psd(root, turb, freq_cutoff=psd_cutoff, pupil_mask=pupil_mask, return_fft=False)
# integrate PSD of turbulence
psd_int, psd_bnds = integrate_psd(root, psd_cube, freq_cutoff=psd_cutoff)
# save as FITS table
dtype = np.dtype([('BOUNDS', 'f4', psd_bnds.shape), ('PSD', 'f4', psd_int.shape)])
rec = np.array([np.rec.array((psd_bnds, psd_int), dtype=dtype)])
fits.writeto(root / 'products' / 'sequence_turbulence_{:s}_psd.fits'.format(suffix), rec, overwrite=True)
# free memory
del psd_cube
# fit turbulence with Zernikes
if method.lower() == 'zernike':
log.info('Fit turbulence with Zernike')
basis, zern_coeff, turb_reconstructed = zernike_projection(root, turb, nzernike=nzern,
reconstruct=True, pupil_mask=pupil_mask)
# free memory
del basis
elif method.lower() == 'fft':
log.info('Fit turbulence with Fourier filtering')
# first remove some Zernike modes
basis, zern_coeff, turb_lf = zernike_projection(root, turb, nzernike=nzern,
reconstruct=True, pupil_mask=pupil_mask)
turb_hf = turb - turb_lf
turb_hf_filtered = ztools.fourier_filter(turb_hf, freq_cutoff=filter_cutoff, lowpass=True,
window='rect', mask=pupil_mask)
# reconstructed turbulence
turb_reconstructed = turb_lf + turb_hf_filtered
# free memory
del basis
if save_intermediate:
fits.writeto(root / 'products' / 'sequence_reconstructed_turbulence_{:s}.fits'.format(suffix), turb_reconstructed, overwrite=True)
# compute PSD of reconstructed turbulence
if psd_compute:
log.info('Compute PSD of reconstructed turbulence')
psd_cube = compute_psd(root, turb_reconstructed, freq_cutoff=psd_cutoff, pupil_mask=pupil_mask, return_fft=False)
# integrate PSD of residuals
psd_int, psd_bnds = integrate_psd(root, psd_cube, freq_cutoff=psd_cutoff)
# save as FITS table
dtype = np.dtype([('BOUNDS', 'f4', psd_bnds.shape), ('PSD', 'f4', psd_int.shape)])
rec = np.array([np.rec.array((psd_bnds, psd_int), dtype=dtype)])
fits.writeto(root / 'products' / 'sequence_reconstructed_turbulence_{:s}_psd.fits'.format(suffix), rec, overwrite=True)
# free | |
<gh_stars>0
# -*- coding: utf-8 -*-
#! \file ./tools/data/tr20140118.py
#! \author <NAME>, <jkucera AT redhat.com>
#! \stamp 2018-04-10 01:23:17 (UTC+01:00, DST+01:00)
#! \project passwd maintenance tools
#! \license MIT
#! \version 0.0.0
#! \fdesc Translation data.
#
def setup(t25l):
pofiles = t25l.PoFileSet()
po = t25l.PoFile.from_scratch("de")
po.additem("""\
#: libuser.c:91
#, c-format
msgid "%s: libuser initialization error:"
msgstr "%s: libuser Initialisierungs-Fehler:"
""")
po.additem("""\
#: libuser.c:267
msgid "Corrupted passwd entry."
msgstr "Beschädigte Passwort-Eintragung."
""")
po.additem("""\
#: libuser.c:284
msgid "Empty password."
msgstr "Leeres Passwort."
""")
po.additem("""\
#: libuser.c:305
msgid "Alternate authentication scheme in use."
msgstr "Alternatives Autentifizierungs-Muster wird verwendet"
""")
po.additem("""\
#: libuser.c:310
msgid "Password set, DES crypt."
msgstr "Passwort mit DES-Verschlüsselung gesetzt."
""")
po.additem("""\
#: libuser.c:323
#, c-format
msgid "No password set.\\n"
msgstr "Kein Passwort gesetzt.\\n"
""")
po.additem("""\
#: libuser.c:328
#, c-format
msgid "Unknown user.\\n"
msgstr "Unbekannter Benutzer.\\n"
""")
po.additem("""\
#: passwd.c:157
msgid "keep non-expired authentication tokens"
msgstr "behalte nicht-verfallende Authentifizierungs-Merkmale"
""")
po.additem("""\
#: passwd.c:159
msgid "delete the password for the named account (root only)"
msgstr "lösche das Passwort für das angegebene Konto (nur als root möglich)"
""")
po.additem("""\
#: passwd.c:162
msgid "lock the password for the named account (root only)"
msgstr "Das Kennwort für das angegebene Konto sperren (nur root)"
""")
po.additem("""\
#: passwd.c:165
msgid "unlock the password for the named account (root only)"
msgstr "Das Kennwort für das angegebene Konto entsperren (nur root)"
""")
po.additem("""\
#: passwd.c:168
msgid "expire the password for the named account (root only)"
msgstr "Das Kennwort für das angegebene Konto verfallen lassen (nur root)"
""")
po.additem("""\
#: passwd.c:177
msgid ""
"number of days warning users receives before password expiration (root only)"
msgstr ""
"Anzahl der Tage die Benutzer vor dem Ablauf des Passwortes gewarnt werden "
"soll (nur als root möglich)"
""")
po.additem("""\
#: passwd.c:183
msgid "report password status on the named account (root only)"
msgstr "melde Passwort Status des angegebenen Accounts (nur als root möglich)"
""")
po.additem("""\
#: passwd.c:266
#, c-format
msgid "%s: Cannot mix one of -l, -u, -d, -S and one of -i, -n, -w, -x.\\n"
msgstr ""
"%s: Die Parameter -l, -u, -d, -S und -i, -n, -w, -x können nicht kombiniert "
"werden.\\n"
""")
po.additem("""\
#: passwd.c:335
#, c-format
msgid "%s: Can not identify you!\\n"
msgstr "%s: Kann Sie nicht identifizieren!\\n"
""")
po.additem("""\
#: passwd.c:388
#, c-format
msgid "%s: SELinux denying access due to security policy.\\n"
msgstr ""
"%s: SELinux verweigert den Zugriff aufgrund der Sicherheitsrichtlinien.\\n"
""")
po.additem("""\
#: passwd.c:398
#, c-format
msgid "Locking password for user %s.\\n"
msgstr "Sperren Passwort für Benutzer %s.\\n"
""")
po.additem("""\
#: passwd.c:402 passwd.c:414 passwd.c:429 passwd.c:440 passwd.c:458
msgid "Success"
msgstr "Erfolgreich"
""")
po.additem("""\
#: passwd.c:410
#, c-format
msgid "Unlocking password for user %s.\\n"
msgstr "Entsperren Passwort für Benutzer %s.\\n"
""")
po.additem("""\
#: passwd.c:416
msgid "Unsafe operation (use -f to force)"
msgstr "Unsichere Operation (benutzen Sie -f zum Erzwingen)"
""")
po.additem("""\
#: passwd.c:425
#, c-format
msgid "Expiring password for user %s.\\n"
msgstr "Passwort für Benutzer %s verfallen lassen.\\n"
""")
po.additem("""\
#: passwd.c:437
#, c-format
msgid "Removing password for user %s.\\n"
msgstr "Entfernen Passwort für Benutzer %s.\\n"
""")
po.additem("""\
#: passwd.c:455
#, c-format
msgid "Adjusting aging data for user %s.\\n"
msgstr "justieren Verfallsdaten für Benutzer %s.\\n"
""")
po.additem("""\
#: passwd.c:471
#, c-format
msgid "Changing password for user %s.\\n"
msgstr "Ändern Passwort für Benutzer %s.\\n"
""")
po.additem("""\
#: passwd.c:553
#, c-format
msgid "%s: expired authentication tokens updated successfully.\\n"
msgstr ""
"%s: abgelaufene Authentifizierungs-Merkmale erfolgreich aktualisiert.\\n"
""")
po.additem("""\
#: passwd.c:556
#, c-format
msgid "%s: all authentication tokens updated successfully.\\n"
msgstr "%s: alle Authentifizierungs-Merkmale erfolgreich aktualisiert.\\n"
""")
pofiles.add(po)
po = t25l.PoFile.from_scratch("es")
po.additem("""\
#: libuser.c:300
msgid "Password set, SHA512 crypt."
msgstr "Contraseña establecida, cifrado SHA512."
""")
po.additem("""\
#: libuser.c:403
#, c-format
msgid "%s: user account has no support for password aging.\\n"
msgstr ""
"%s: la cuenta del usuario no tiene soporte para envejecimiento de contraseña."
"\\n"
""")
po.additem("""\
#: passwd.c:157
msgid "keep non-expired authentication tokens"
msgstr "mantener las marcas de autenticación no vencidos"
""")
po.additem("""\
#: passwd.c:165
msgid "unlock the password for the named account (root only)"
msgstr "desbloquear la contraseña para la cuenta indicada (solo root)"
""")
po.additem("""\
#: passwd.c:186
msgid "read new tokens from stdin (root only)"
msgstr "leer símbolos nuevos desde stdin (solo root)"
""")
po.additem("""\
#: passwd.c:193
msgid "[OPTION...] <accountName>"
msgstr "[OPCIÓN...] <accountName>"
""")
po.additem("""\
#: passwd.c:539
#, c-format
msgid "%s: unable to set failure delay: %s\\n"
msgstr "%s: no se pudo establecer la espera máxima para fallo: %s\\n"
""")
po.additem("""\
#: passwd.c:553
#, c-format
msgid "%s: expired authentication tokens updated successfully.\\n"
msgstr "%s: símbolos de autenticación vencidos actualizados con éxito.\\n"
""")
po.additem("""\
#: passwd.c:556
#, c-format
msgid "%s: all authentication tokens updated successfully.\\n"
msgstr "%s: todos los símbolos de autenticación se actualizaron con éxito.\\n"
""")
pofiles.add(po)
po = t25l.PoFile.from_scratch("gu")
po.additem("""\
#: passwd.c:162
msgid "lock the password for the named account (root only)"
msgstr "નામવાળા ખાતા માટે પાસવર્ડને તાળું મારો (માત્ર રુટ)"
""")
po.additem("""\
#: passwd.c:165
msgid "unlock the password for the named account (root only)"
msgstr "નામવાળા ખાતા માટે પાસવર્ડનું તાળું ખોલો (માત્ર રુટ)"
""")
po.additem("""\
#: passwd.c:168
msgid "expire the password for the named account (root only)"
msgstr "નામવાળા ખાતા માટે પાસવર્ડ નિવૃત્ત (માત્ર રુટ)"
""")
po.additem("""\
#: passwd.c:177
msgid ""
"number of days warning users receives before password expiration (root only)"
msgstr ""
"પાસવર્ડ સમયસમાપ્તિ પહેલાં વપરાશકર્તાઓ ચેતવણી મેળવે તે દિવસોની સંખ્યા (માત્ર "
"રુટ)"
""")
po.additem("""\
#: passwd.c:180
msgid ""
"number of days after password expiration when an account becomes disabled "
"(root only)"
msgstr ""
"પાસવર્ડ સમયસમાપ્ત થાય પછી જ્યારે ખાતું નિષ્ક્રિય બની જાય તે દિવસોની સંખ્યા "
"(માત્ર રુટ)"
""")
po.additem("""\
#: passwd.c:388
#, c-format
msgid "%s: SELinux denying access due to security policy.\\n"
msgstr "%s: સુરક્ષા પૉલીસિના કારણે SELinux વપરાશ નામંજૂર કરી રહ્યું છે.\\n"
""")
po.additem("""\
#: passwd.c:425
#, c-format
msgid "Expiring password for user %s.\\n"
msgstr "વપરાશકર્તા %s માટે પાસવર્ડ નિવૃત્ત થઇ રહ્યો છે.\\n"
""")
pofiles.add(po)
po = t25l.PoFile.from_scratch("hi")
po.additem("""\
#: libuser.c:403
#, c-format
msgid "%s: user account has no support for password aging.\\n"
msgstr ""
"%s: उपयोक्ता खाता के खाते में शब्दकूट एजिंग के लिये कोई समर्थन नहीं है.\\n"
""")
po.additem("""\
#: passwd.c:162
msgid "lock the password for the named account (root only)"
msgstr "नामित खाता के लिये कूटशब्द लॉक करें (सिर्फ रूट)"
""")
po.additem("""\
#: passwd.c:165
msgid "unlock the password for the named account (root only)"
msgstr "नामित खाता के लिये कूटशब्द अनलॉक करें (सिर्फ रूट)"
""")
po.additem("""\
#: passwd.c:168
msgid "expire the password for the named account (root only)"
msgstr "नामित खाता के लिये कूटशब्द समय समाप्त करें (सिर्फ रूट)"
""")
po.additem("""\
#: passwd.c:177
msgid ""
"number of days warning users receives before password expiration (root only)"
msgstr ""
"शब्दकूट के समय समाप्त होने के लिये पहले उपयोक्ता प्राप्त करता है दिनों की "
"संख्या (रूट सिर्फ)"
""")
po.additem("""\
#: passwd.c:180
msgid ""
"number of days after password expiration when an account becomes disabled "
"(root only)"
msgstr ""
"शब्दकूट समय समाप्ति के बाद दिनों की संख्या जब एक खाता निष्क्रिय हो जाता है "
"(सिर्फ रूट)"
""")
po.additem("""\
#: passwd.c:388
#, c-format
msgid "%s: SELinux denying access due to security policy.\\n"
msgstr "%s: SELinux सुरक्षा नीति के कारण पहुँच को मना कर रहा है.\\n"
""")
po.additem("""\
#: passwd.c:425
#, c-format
msgid "Expiring password for user %s.\\n"
msgstr "%s उपयोक्ता के लिए कूटशब्द समाप्त हो रहा है.\\n"
""")
pofiles.add(po)
po = t25l.PoFile.from_scratch("ml")
po.additem("""\
#: passwd.c:159
msgid "delete the password for the named account (root only)"
msgstr ""
"പറഞ്ഞിരിക്കുന്ന അക്കൌണ്ടിന് പാസ്വേറ്ഡ് നീക്കം ചെയ്യുക (root-ന് മാത്റം "
"അധികാരമുള്ളൂ)"
""")
po.additem("""\
#: passwd.c:162
msgid "lock the password for the named account (root only)"
msgstr "പറഞ്ഞ അക്കൌണ്ടിനുള്ള രഹസ്യവാക്ക് പൂട്ടൂക (റൂട്ട് മാത്രം)"
""")
po.additem("""\
#: passwd.c:165
msgid "unlock the password for the named account (root only)"
msgstr "പറഞ്ഞ അക്കൌണ്ടിനുള്ള രഹസ്യവാക്ക് ലഭ്യമാക്കുക (റൂട്ട് മാത്രം)"
""")
po.additem("""\
#: passwd.c:168
msgid "expire the password for the named account (root only)"
msgstr ""
"പറഞ്ഞ അക്കൌണ്ടിനുള്ള രഹസ്യവാക്കിന്റെ കാലാവധി പൂര്ത്തിയാക്കുക (റൂട്ട് "
"മാത്രം)"
""")
po.additem("""\
#: passwd.c:173
msgid "maximum password lifetime (root only)"
msgstr ""
"പാസ്വേറ്ഡിനുളള ഏറ്റവും കൂടുതല് കാലാവധി (root-ന് മാത്റം അധികാരമുള്ളൂ)"
""")
po.additem("""\
#: passwd.c:177
msgid ""
"number of days warning users receives before password expiration (root only)"
msgstr ""
"പാസ്വേറ്ഡിന്റെ കാലാവധി അവസാനിക്കുന്നതിന് മുന്പ് യൂസറുകള്ക്ക് എത്റ ദിവസം "
"മുന്നറിയിപ്പ് ലഭിക്കുന്നു (root-ന് മാത്റം അധികാരമുള്ളൂ)"
""")
po.additem("""\
#: passwd.c:180
msgid ""
"number of days after password expiration when an account becomes disabled "
"(root only)"
msgstr ""
"പാസ്വേറ്ഡിന്റെ കാലാവധി അവസാനിച്ച ശേഷം യൂസറിന്റെ അക്കൌണ്ട് എത്റ "
"ദിവസത്തിന് ശേഷംപ്റവറ്ത്തന രഹിതമാകുന്നു (root-ന് മാത്റം അധികാരമുള്ളൂ)"
""")
po.additem("""\
#: passwd.c:183
msgid "report password status on the named account (root only)"
msgstr ""
"പറഞ്ഞിരിക്കുന്ന അക്കൌണ്ടില് പാസ്വേറ്ഡിന്റെ നിലവാരം വ്യക്തമാക്കുക (root-"
"ന് മാത്റം അധികാരമുള്ളൂ)"
""")
po.additem("""\
#: passwd.c:186
msgid "read new tokens from stdin (root only)"
msgstr ""
"stdin-ല് നിന്നും പുതിയ ടോക്കനുകള് ലഭ്യമാക്കുക (root-ന് മാത്റം "
"അധികാരമുള്ളൂ)"
""")
po.additem("""\
#: passwd.c:266
#, c-format
msgid "%s: Cannot mix one of -l, -u, -d, -S and one of -i, -n, -w, -x.\\n"
msgstr ""
"%s: -l, -u, -d, -S എന്നിവയില് ഒന്ന് -i, -n, -w, -x എന്നിവയായി ചേറ്ത്ത് "
"നല്കുവാന് സാധ്യമല്ല.\\n"
""")
po.additem("""\
#: passwd.c:388
#, c-format
msgid "%s: SELinux denying access due to security policy.\\n"
msgstr "%s: സുരക്ഷ സംവിധാനം കാരണം SELinux പ്രവേശനം നിഷേധിയ്ക്കുന്നു.\\n"
""")
po.additem("""\
#: passwd.c:416
msgid "Unsafe operation (use -f to force)"
msgstr ""
"പാടില്ലാത്ത പ്റക്റിയ (നിറ്ബന്ധപൂറ്വ്വം ചെയ്യുന്നതിന് -f ഉപയോഗിക്കുക)"
""")
po.additem("""\
#: passwd.c:425
#, c-format
msgid "Expiring password for user %s.\\n"
msgstr "%s ഉപയോക്താവിനുള്ള രഹസ്യവാക്കിന്റെ കാലാവധി അവസാനിയ്ക്കുന്നു.\\n"
""")
po.additem("""\
#: passwd.c:553
#, c-format
msgid "%s: expired authentication tokens updated successfully.\\n"
msgstr ""
"%s: കാലാവധി കഴിഞ്ഞ ഓഥന്റിക്കേഷന് ടോക്കനുകള് വിജയകരമായി "
"പുതുക്കിയിരിക്കുന്നു.\\n"
""")
po.additem("""\
#: passwd.c:556
#, c-format
msgid "%s: all authentication tokens updated successfully.\\n"
msgstr ""
"%s: എല്ലാ ഓഥന്റിക്കേഷന് ടോക്കനുകളും | |
# Import ML Library
import pandas as pd
import numpy as np
from sklearn import preprocessing
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
import gensim
from gensim.test.utils import datapath
import pyLDAvis.gensim
from gensim.corpora import Dictionary
# Import helper Library
import logging
from pathlib import Path
import os
import sys
# Import local files
from get_data import load_data
from manage_path import *
def load_pickle(file_name="FINRA_TRACE_2014.pkl.zip",is_zip=True):
pickle_file_path = get_pickle_directory() / file_name
print("Getting data from{}...".format(pickle_file_path))
if is_zip:
data = pd.read_pickle(pickle_file_path,compression='zip')
else:
data = pd.read_pickle(pickle_file_path)
print("Data getting success from {}!".format(pickle_file_path))
return data
def create_document_2(first,second):
return str(first) + ',' + str(second)
def create_document_3(first,second,third):
return str(first) + ',' + str(second) + ',' + str(third)
def document_date2year(date):
return str(date[0:4])
def create_dummy_sink(Report_Dealer_Index,Contra_Party_Index):
"""transform Report_Dealer_Index that are 0 for OLD_Dc_v4"""
if str(Report_Dealer_Index) == '0':
return 'D' + str(Contra_Party_Index)
else:
return str(Report_Dealer_Index)
def create_dummy_source(Report_Dealer_Index,Contra_Party_Index):
"""transform Contra_Party_Index that are 99999 for OLD_Dc_v4"""
if str(Contra_Party_Index) == '99999':
return 'D' + str(Report_Dealer_Index)
else:
return str(Contra_Party_Index)
def client_to_delete(document):
if str(document.split(',')[0][0:]) == '0' or str(document.split(',')[0][0:]) == '99999':
return 'delete'
else:
return 'keep'
def create_buy_document(Report_Dealer_Index,Contra_Party_Index,document_date):
if str(Report_Dealer_Index) == '0':
return str(Contra_Party_Index) + ',' + str(document_date) + ',' + 'BfC'
else:
return str(Contra_Party_Index) + ',' + str(document_date) + ',' + 'BfD'
def create_sell_document(Report_Dealer_Index,Contra_Party_Index,document_date):
if str(Contra_Party_Index) == '99999':
return str(Report_Dealer_Index) + ',' + str(document_date) + ',' + 'StC'
else:
return str(Report_Dealer_Index) + ',' + str(document_date) + ',' + 'StD'
def create_buy_document_no_source(Report_Dealer_Index,Contra_Party_Index,document_date):
if str(Report_Dealer_Index) == '0':
return str(Contra_Party_Index) + ',' + str(document_date) + ',' + 'BfC'
elif str(Contra_Party_Index) == '99999':
return np.nan
else:
return str(Contra_Party_Index) + ',' + str(document_date) + ',' + 'BfD'
def create_sell_document_no_source(Report_Dealer_Index,Contra_Party_Index,document_date):
if str(Contra_Party_Index) == '99999':
return str(Report_Dealer_Index) + ',' + str(document_date) + ',' + 'StC'
elif str(Report_Dealer_Index) == '0':
return np.nan
else:
return str(Report_Dealer_Index) + ',' + str(document_date) + ',' + 'StD'
def compute_Dc_v1(data):
"""Compute Dc_v1 which is count of bonds on given dealer and day"""
create_document_vectorize = np.vectorize(create_document_2)
print("creating documents ......")
data['document_date'] = data['TRD_EXCTN_DTTM'].dt.date.apply(lambda x: str(x))
# Add new column Dc_v1_S which is the string representation of report dealer buy on the specific day
data['Dc_v1_S'] = create_document_vectorize(data['Report_Dealer_Index'].values , data['document_date'].values)
# Add new column Dc_v1_B which is the string representation of report dealer sell on the specific day
data['Dc_v1_B'] = create_document_vectorize(data['Contra_Party_Index'].values , data['document_date'].values)
print("documents created!!")
data_gb_sell = data.groupby(by=['Dc_v1_S','BOND_SYM_ID'])
data_gb_buy = data.groupby(by=['Dc_v1_B','BOND_SYM_ID'])
print("computing Dc_v1 ......")
Dc_v1 = data_gb_sell['BOND_SYM_ID'].size().astype(np.int16).unstack(fill_value=0)
Dc_v1 = Dc_v1.append(data_gb_buy['BOND_SYM_ID'].size().astype(np.int16).unstack(fill_value=0))
# Sort index before groupby index
Dc_v1 = Dc_v1.sort_index()
# Groupby index and sum them to get the count of bonds for the dealer on the certain day
Dc_v1 = Dc_v1.groupby(by=Dc_v1.index).sum()
# Sort columns so we get nice format
Dc_v1 = Dc_v1.sort_index(axis=1)
print("computing Dc_v1 done!")
return Dc_v1
def compute_Dc_v2(data):
"""Compute Dc_v2 which is count of bonds on given dealer and day seperated buy and sell"""
create_document_vectorize = np.vectorize(create_document_3)
print("creating documents ......")
data['document_date'] = data['TRD_EXCTN_DTTM'].dt.date.apply(lambda x: str(x))
# Add new column Dc_v2_S which is the string representation of report dealer buy on the specific day
data['Dc_v2_S'] = create_document_vectorize(data['Report_Dealer_Index'].values , data['document_date'].values , 'S')
# Add new column Dc_v2_B which is the string representation of report dealer sell on the specific day
data['Dc_v2_B'] = create_document_vectorize(data['Contra_Party_Index'].values , data['document_date'].values , 'B')
print("documents created!!")
data_gb_sell = data.groupby(by=['Dc_v2_S','BOND_SYM_ID'])
data_gb_buy = data.groupby(by=['Dc_v2_B','BOND_SYM_ID'])
print("computing Dc_v2 ......")
Dc_v2 = data_gb_sell['BOND_SYM_ID'].size().astype(np.int16).unstack(fill_value=0)
Dc_v2 = Dc_v2.append(data_gb_buy['BOND_SYM_ID'].size().astype(np.int16).unstack(fill_value=0))
Dc_v2 = Dc_v2.sort_index(axis=1)
print("computing Dc_v2 done!")
return Dc_v2
def compute_Dc_v3(data):
"""Compute Dc_v3 which is count of bonds on given dealer and day seperated buy and sell"""
create_document_vectorize = np.vectorize(create_document_3)
print("creating documents ......")
data['document_date'] = data['TRD_EXCTN_DTTM'].dt.date.apply(lambda x: str(x))
# Ignore Report_Dealer_Index that is '0' and Contra_Party_Index that is '99999'
data = data.loc[(data['Report_Dealer_Index'] != '0') & (data['Contra_Party_Index'] != '99999')].copy()
# Add new column Dc_v3 which is the string representation of report dealer buy on the specific day
data['Dc_v3_S'] = create_document_vectorize(data['Report_Dealer_Index'].values , data['document_date'].values , 'S')
# Add new column Dc_v3 which is the string representation of report dealer sell on the specific day
data['Dc_v3_B'] = create_document_vectorize(data['Contra_Party_Index'].values , data['document_date'].values , 'B')
print("documents created!!")
data_gb_sell = data.groupby(by=['Dc_v3_S','BOND_SYM_ID'])
data_gb_buy = data.groupby(by=['Dc_v3_B','BOND_SYM_ID'])
print("computing Dc_v3 ......")
Dc_v3 = data_gb_sell['BOND_SYM_ID'].size().astype(np.int16).unstack(fill_value=0)
Dc_v3 = Dc_v3.append(data_gb_buy['BOND_SYM_ID'].size().astype(np.int16).unstack(fill_value=0))
Dc_v3 = Dc_v3.sort_index(axis=1)
print("computing Dc_v3 done!")
return Dc_v3
def trade_vol_BoW(data,cap="large"):
"""Compute trade_vol_BoW which is sum of bonds total trading price on given dealer and day with seperated buy and sell"""
data['price'] = (data['ENTRD_VOL_QT'] * data['RPTD_PR'])/100
cap_threshold = 10000
if cap=="large":
data = data[data['price'] >= cap_threshold]
data['price'] = data['price'] / cap_threshold
else:
data = data[data['price'] < cap_threshold]
data['document_date'] = data['TRD_EXCTN_DTTM'].dt.date.apply(lambda x: str(x))
create_buy_document_no_source_vectorize = np.vectorize(create_buy_document_no_source)
create_sell_document_no_source_vectorize = np.vectorize(create_sell_document_no_source)
client_to_delete_vectorize = np.vectorize(client_to_delete)
print("creating documents ......")
# Add new column trade_vol_BoW_S which is the string representation of report dealer buy on the specific day
data['trade_vol_BoW_S'] = create_sell_document_no_source_vectorize(data['Report_Dealer_Index'].values,data['Contra_Party_Index'].values,data['document_date'].values)
# Add new column trade_vol_BoW_B which is the string representation of report dealer sell on the specific day
data['trade_vol_BoW_B'] = create_buy_document_no_source_vectorize(data['Report_Dealer_Index'].values,data['Contra_Party_Index'].values,data['document_date'].values)
print("documents created!!")
data = data[['trade_vol_BoW_S','trade_vol_BoW_B','BOND_SYM_ID','price']].copy()
data_gb_sell = data[data['trade_vol_BoW_S']!='nan'].groupby(by=['trade_vol_BoW_S','BOND_SYM_ID'])
data_gb_buy = data[data['trade_vol_BoW_B']!='nan'].groupby(by=['trade_vol_BoW_B','BOND_SYM_ID'])
print("computing bag_of_words ......")
bag_of_words = data_gb_sell['price'].sum().astype(np.int32).unstack(level=-1).to_sparse()
bag_of_words = bag_of_words.append(data_gb_buy['price'].sum().astype(np.int32).unstack(level=-1).to_sparse())
bag_of_words = bag_of_words.sort_index(axis=1)
print("computing bag_of_words done!")
return bag_of_words
def trade_vol_BoW_norm(data,cap="large"):
"""Compute trade_vol_BoW_norm which is sum of bonds total trading price normalized to percentage of total trading price of that bond on given dealer and day with seperated buy and sell"""
data['price'] = (data['ENTRD_VOL_QT'] * data['RPTD_PR'])/100
cap_threshold = 10000
if cap=="large":
data = data[data['price'] >= cap_threshold]
data['price'] = data['price'] / cap_threshold
else:
data = data[data['price'] < cap_threshold]
data['document_date'] = data['TRD_EXCTN_DTTM'].dt.date.apply(lambda x: str(x))
create_buy_document_no_source_vectorize = np.vectorize(create_buy_document_no_source)
create_sell_document_no_source_vectorize = np.vectorize(create_sell_document_no_source)
client_to_delete_vectorize = np.vectorize(client_to_delete)
print("creating documents ......")
# Add new column trade_vol_BoW_S which is the string representation of report dealer buy on the specific day
data['trade_vol_BoW_S'] = create_sell_document_no_source_vectorize(data['Report_Dealer_Index'].values,data['Contra_Party_Index'].values,data['document_date'].values)
# Add new column trade_vol_BoW_B which is the string representation of report dealer sell on the specific day
data['trade_vol_BoW_B'] = create_buy_document_no_source_vectorize(data['Report_Dealer_Index'].values,data['Contra_Party_Index'].values,data['document_date'].values)
print("documents created!!")
data = data[['trade_vol_BoW_S','trade_vol_BoW_B','BOND_SYM_ID','price']].copy()
data_gb_sell = data[data['trade_vol_BoW_S']!='nan'].groupby(by=['trade_vol_BoW_S','BOND_SYM_ID'])
data_gb_buy = data[data['trade_vol_BoW_B']!='nan'].groupby(by=['trade_vol_BoW_B','BOND_SYM_ID'])
print("computing bag_of_words ......")
bag_of_words = data_gb_sell['price'].sum().astype(np.int32).unstack(level=-1).to_sparse()
bag_of_words = bag_of_words.append(data_gb_buy['price'].sum().astype(np.int32).unstack(level=-1).to_sparse())
bag_of_words = bag_of_words.apply(lambda x: x / x.sum()) * 1000 # Normalize
bag_of_words = bag_of_words.sort_index(axis=1)
print("computing bag_of_words done!")
return bag_of_words
def trade_vol_BoW_outstanding(data,cap="large"):
"""Compute trade_vol_BoW_norm which is sum of bonds total trading price normalized to percentage of total trading price of that bond on given dealer and day with seperated buy and sell"""
data['price'] = (data['ENTRD_VOL_QT'] * data['RPTD_PR'])/100
cap_threshold = 10000
if cap=="large":
data = data[data['price'] >= cap_threshold]
data['price'] = data['price'] / cap_threshold
else:
data = data[data['price'] < cap_threshold]
data['document_date'] = data['TRD_EXCTN_DTTM'].dt.date.apply(lambda x: str(x))
create_buy_document_no_source_vectorize = np.vectorize(create_buy_document_no_source)
create_sell_document_no_source_vectorize = np.vectorize(create_sell_document_no_source)
client_to_delete_vectorize = np.vectorize(client_to_delete)
print("creating documents ......")
# Add new column trade_vol_BoW_S which is the string representation of report dealer buy on the specific day
data['trade_vol_BoW_S'] = create_sell_document_no_source_vectorize(data['Report_Dealer_Index'].values,data['Contra_Party_Index'].values,data['document_date'].values)
# Add new column trade_vol_BoW_B which is the string representation of report dealer sell on the specific day
data['trade_vol_BoW_B'] = create_buy_document_no_source_vectorize(data['Report_Dealer_Index'].values,data['Contra_Party_Index'].values,data['document_date'].values)
print("documents created!!")
data = data[['trade_vol_BoW_S','trade_vol_BoW_B','BOND_SYM_ID','price']].copy()
data_gb_sell = data[data['trade_vol_BoW_S']!='nan'].groupby(by=['trade_vol_BoW_S','BOND_SYM_ID'])
data_gb_buy = data[data['trade_vol_BoW_B']!='nan'].groupby(by=['trade_vol_BoW_B','BOND_SYM_ID'])
print("computing bag_of_words ......")
bag_of_words = data_gb_sell['price'].sum().astype(np.int32).unstack(level=-1).to_sparse()
bag_of_words = bag_of_words.append(data_gb_buy['price'].sum().astype(np.int32).unstack(level=-1).to_sparse())
bag_of_words = bag_of_words.apply(lambda x: x / x.sum()) * 1000 # Normalize
bag_of_words = bag_of_words.sort_index(axis=1)
print("computing bag_of_words done!")
return bag_of_words
def compute_Dc_v4(data):
"""Compute Dc_v4 which is count of bonds on given dealer and day seperated buy and sell"""
create_buy_document_vectorize = np.vectorize(create_buy_document)
create_sell_document_vectorize = np.vectorize(create_sell_document)
client_to_delete_vectorize = np.vectorize(client_to_delete)
print("creating documents ......")
data['document_date'] = data['TRD_EXCTN_DTTM'].dt.date.apply(lambda x: str(x))
# Add new column Dc_v4_S which is the string representation of report dealer buy on the specific day
data['Dc_v4_S'] = create_sell_document_vectorize(data['Report_Dealer_Index'].values,data['Contra_Party_Index'].values,data['document_date'].values)
# Add new column Dc_v4_B which is the string representation of report dealer sell on the specific day
data['Dc_v4_B'] = create_buy_document_vectorize(data['Report_Dealer_Index'].values,data['Contra_Party_Index'].values,data['document_date'].values)
print("documents created!!")
data_gb_sell = data.groupby(by=['Dc_v4_S','BOND_SYM_ID'])
data_gb_buy = data.groupby(by=['Dc_v4_B','BOND_SYM_ID'])
print("computing Dc_v4 ......")
Dc_v4 = data_gb_sell['BOND_SYM_ID'].size().astype(np.int16).unstack(fill_value=0)
Dc_v4 = Dc_v4.append(data_gb_buy['BOND_SYM_ID'].size().astype(np.int16).unstack(fill_value=0))
Dc_v4 = Dc_v4.sort_index(axis=1)
print("computing Dc_v4 done!")
print("flitering out general client in Dc_v4")
Dc_v4['to_delete'] = client_to_delete_vectorize(Dc_v4.index)
Dc_v4 = Dc_v4.loc[Dc_v4['to_delete']!='delete'].drop(['to_delete'],axis=1).copy()
Dc_v4 = Dc_v4[Dc_v4.sum(axis=1) > 3].copy()
Dc_v4.dropna(axis=1,how='all',inplace=True)
print("all done!")
return Dc_v4
def compute_Tc_v1(data):
"""Compute Tc_v1 which is a document will represent the triple (seller, bond, buyer, date) directly"""
create_document_vectorize = np.vectorize(create_document_3)
document_date2year_vectorize = np.vectorize(document_date2year)
print("creating documents ......")
data['document_date'] = data['TRD_EXCTN_DTTM'].dt.date.apply(lambda x: str(x))
# Add new column Dc_v3 which is the string representation of report dealer buy on the specific day
data['document_date'] = document_date2year_vectorize(data['document_date'].values)
data['Tc_v1_S_B_D'] = create_document_vectorize(data['Report_Dealer_Index'].values , data['Contra_Party_Index'].values , data['document_date'].values)
print("documents created!!")
data_gb = data.groupby(by=['Tc_v1_S_B_D','BOND_SYM_ID'])
print("computing Tc_v1 ......")
Tc_v1 = data_gb['BOND_SYM_ID'].size().astype(np.int16).unstack(fill_value=0)
Tc_v1 = Tc_v1.sort_index(axis=1)
print("computing Tc_v1 done!")
return Tc_v1
def compute_matrix1():
| |
width=1, height=2)
expected = (
'<table:table table:name="A Table">'
"<table:table-column/>"
"<table:table-row>"
"<table:table-cell/>"
"</table:table-row>"
"<table:table-row>"
"<table:table-cell/>"
"</table:table-row>"
"</table:table>"
)
self.assertEqual(table.serialize(), expected)
def test_display(self):
table = Table("Displayed")
expected = '<table:table table:name="Displayed"/>'
self.assertEqual(table.serialize(), expected)
def test_display_false(self):
table = Table("Hidden", display=False)
expected = '<table:table table:name="Hidden" table:display="false"/>'
self.assertEqual(table.serialize(), expected)
def test_print(self):
table = Table("Printable")
expected = '<table:table table:name="Printable"/>'
self.assertEqual(table.serialize(), expected)
def test_print_false(self):
table = Table("Hidden", printable=False)
expected = '<table:table table:name="Hidden" table:print="false"/>'
self.assertEqual(table.serialize(), expected)
def test_print_ranges_str(self):
table = Table("Ranges", print_ranges="E6:K12 P6:R12")
expected = (
'<table:table table:name="Ranges" ' 'table:print-ranges="E6:K12 P6:R12"/>'
)
self.assertEqual(table.serialize(), expected)
def test_print_ranges_list(self):
table = Table("Ranges", print_ranges=["E6:K12", "P6:R12"])
expected = (
'<table:table table:name="Ranges" ' 'table:print-ranges="E6:K12 P6:R12"/>'
)
self.assertEqual(table.serialize(), expected)
def test_style(self):
table = Table("A Table", style="A Style")
expected = '<table:table table:name="A Table" ' 'table:style-name="A Style"/>'
self.assertEqual(table.serialize(), expected)
class TestCell(TestCase):
def setUp(self):
self.cell = Cell(1, repeated=3, style="ce5")
def test_get_cell_value(self):
self.assertEqual(self.cell.get_value(), 1)
self.assertEqual(self.cell.get_value(get_type=True), (1, "float"))
def test_set_cell_value(self):
cell = self.cell.clone
cell.set_value("€")
self.assertEqual(cell.get_value(), "€")
self.assertEqual(cell.type, "string")
self.assertEqual(cell.get_value(get_type=True), ("€", "string"))
def test_get_cell_type(self):
cell = self.cell.clone
self.assertEqual(cell.type, "float")
cell.set_value("€")
self.assertEqual(cell.type, "string")
def test_get_cell_type_percentage(self):
cell = Cell(90, cell_type="percentage")
self.assertEqual(cell.type, "percentage")
self.assertEqual(cell.get_value(get_type=True), (90, "percentage"))
cell = self.cell.clone
cell.type = "percentage"
self.assertEqual(cell.type, "percentage")
self.assertEqual(cell.get_value(get_type=True), (1, "percentage"))
def test_set_cell_type(self):
cell = self.cell.clone
cell.type = "time"
self.assertEqual(cell.type, "time")
def test_set_cell_type_date(self):
cell = self.cell.clone
cell.type = "date"
self.assertEqual(cell.type, "date")
def test_get_cell_currency(self):
cell = Cell(123, cell_type="currency", currency="EUR")
self.assertEqual(cell.currency, "EUR")
self.assertEqual(cell.type, "currency")
self.assertEqual(cell.get_value(get_type=True), (123, "currency"))
def test_set_cell_currency(self):
cell = Cell(123, cell_type="currency", currency="EUR")
cell.currency = "CHF"
self.assertEqual(cell.currency, "CHF")
def test_get_cell_repeated(self):
self.assertEqual(self.cell.repeated, 3)
def test_set_cell_repeated(self):
cell = self.cell.clone
cell.repeated = 99
self.assertEqual(cell.repeated, 99)
cell.repeated = 1
self.assertEqual(cell.repeated, None)
cell.repeated = 2
self.assertEqual(cell.repeated, 2)
cell.repeated = None
self.assertEqual(cell.repeated, None)
def test_get_cell_style(self):
self.assertEqual(self.cell.style, "ce5")
self.cell.style = "something blue"
self.assertEqual(self.cell.style, "something blue")
self.cell.style = None
self.assertEqual(self.cell.style, None)
def test_set_cell_style(self):
cell = self.cell.clone
cell.style = "ce2"
self.assertEqual(cell.style, "ce2")
cell.style = None
self.assertEqual(cell.style, None)
def test_set_cell_formula(self):
cell = self.cell.clone
cell.formula = "any string"
self.assertEqual(cell.formula, "any string")
cell.formula = None
self.assertEqual(cell.formula, None)
class TestRow(TestCase):
def setUp(self):
row = Row(width=2, repeated=3, style="ro1")
# Add repeated cell
row.append(Cell(1, repeated=2))
# Add regular cell
row.append(Cell(style="ce5"))
self.row = row
def test_get_row_repeated(self):
self.assertEqual(self.row.repeated, 3)
def test_set_row_repeated(self):
row = self.row.clone
row.repeated = 99
self.assertEqual(row.repeated, 99)
row.repeated = 1
self.assertEqual(row.repeated, None)
row.repeated = 2
self.assertEqual(row.repeated, 2)
row.repeated = None
self.assertEqual(row.repeated, None)
def test_get_row_style(self):
self.assertEqual(self.row.style, "ro1")
def test_get_row_width(self):
self.assertEqual(self.row.width, 5)
def test_traverse_cells(self):
self.assertEqual(len(list(self.row.traverse())), 5)
def test_get_cell_values(self):
self.assertEqual(self.row.get_values(), [None, None, 1, 1, None])
def test_is_empty(self):
row = Row(width=100)
self.assertEqual(row.is_empty(), True)
def test_is_empty_no(self):
row = Row(width=100)
row.set_value(50, 1)
self.assertEqual(row.is_empty(), False)
def test_rstrip(self):
row = Row(width=100)
row.set_value(0, 1)
row.set_value(1, 2)
row.set_value(2, 3)
row.set_cell(3, Cell(style="ce5"))
row.rstrip()
self.assertEqual(row.width, 4)
class TestRowCell(TestCase):
# simpletable :
# 1 1 1 2 3 3 3
# 1 1 1 2 3 3 3 self.row
# 1 1 1 2 3 3 3
# 1 2 3 4 5 6 7
def setUp(self):
document = Document("samples/simple_table.ods")
body = document.body
table = body.get_table(name="Example1").clone
self.row_repeats = table.get_row(0)
self.row = table.get_row(1)
def test_traverse(self):
self.assertEqual(len(list(self.row.traverse())), 7)
def test_traverse_coord(self):
self.assertEqual(len(list(self.row.traverse(2, None))), 5)
self.assertEqual(len(list(self.row.traverse(2, 4))), 3)
self.assertEqual(len(list(self.row.traverse(0, 3))), 4)
self.assertEqual(len(list(self.row.traverse(0, 55))), 7)
self.assertEqual(len(list(self.row.traverse(100, 55))), 0)
self.assertEqual(len(list(self.row.traverse(100, None))), 0)
self.assertEqual(len(list(self.row.traverse(None, 1))), 2)
self.assertEqual(len(list(self.row.traverse(-5, 1))), 2)
self.assertEqual(len(list(self.row.traverse(2, -1))), 0)
self.assertEqual(len(list(self.row.traverse(-5, -1))), 0)
def test_get_cells(self):
self.assertEqual(len(list(self.row.get_cells())), 7)
def test_get_cells_on_emty_row(self):
row = Row()
self.assertEqual(len(row.get_cells()), 0)
self.assertEqual(len(row.get_cells((1, 2))), 0)
self.assertEqual(len(row.get_cells((-2, -3))), 0)
self.assertEqual(len(row.get_cells((0, 10))), 0)
def test_get_cells_coord(self):
coord = (0, 8)
self.assertEqual(len(self.row.get_cells(coord)), 7)
coord = "a1:c2"
self.assertEqual(len(self.row.get_cells(coord)), 3)
coord = "a1:a2"
self.assertEqual(len(self.row.get_cells(coord)), 1)
coord = "a1:EE2"
self.assertEqual(len(self.row.get_cells(coord)), 7)
coord = "D1"
self.assertEqual(len(self.row.get_cells(coord)), 0)
coord = "c5:a1"
self.assertEqual(len(self.row.get_cells(coord)), 0)
coord = (5, 6)
self.assertEqual(len(self.row.get_cells(coord)), 2)
coord = (-5, 6)
self.assertEqual(len(self.row.get_cells(coord)), 5)
coord = (0, -1)
self.assertEqual(len(self.row.get_cells(coord)), 7)
coord = (0, -2)
self.assertEqual(len(self.row.get_cells(coord)), 6)
coord = (-1, -1)
self.assertEqual(len(self.row.get_cells(coord)), 1)
coord = (1, 0)
self.assertEqual(len(self.row.get_cells(coord)), 0)
def test_get_cells_regex(self):
coordinates = [cell.x for cell in self.row.get_cells(content=r"3")]
expected = [4, 5, 6]
self.assertEqual(coordinates, expected)
def test_get_cells_style(self):
coordinates = [cell.x for cell in self.row.get_cells(style=r"ce5")]
expected = [1, 5]
self.assertEqual(coordinates, expected)
def test_get_cells_cell_type(self):
row = self.row.clone
cells = row.get_cells(cell_type="all")
self.assertEqual(len(cells), 7)
cells = row.get_cells(cell_type="float")
self.assertEqual(len(cells), 7)
cells = row.get_cells(cell_type="percentage")
self.assertEqual(len(cells), 0)
cells = row.get_cells(cell_type="string")
self.assertEqual(len(cells), 0)
def test_get_cells_cell_type2(self):
row = self.row.clone
row.append_cell(Cell(value="bob"), clone=False)
row.append_cell(Cell(value=14, cell_type="percentage"))
row.append_cell(Cell(value="bob2"), clone=False)
cells = row.get_cells(cell_type="all")
self.assertEqual(len(cells), 7 + 3)
cells = row.get_cells(cell_type="float")
self.assertEqual(len(cells), 7)
cells = row.get_cells(cell_type="percentage")
self.assertEqual(len(cells), 1)
cells = row.get_cells(cell_type="string")
self.assertEqual(len(cells), 2)
def test_get_cells_cell_type_and_coord(self):
row = self.row.clone
cells = row.get_cells(coord=(0, 5), cell_type="all")
self.assertEqual(len(cells), 6)
cells = row.get_cells(coord=(0, 5), cell_type="float")
self.assertEqual(len(cells), 6)
cells = row.get_cells(coord=(0, 5), cell_type="percentage")
self.assertEqual(len(cells), 0)
cells = row.get_cells(coord=(2, 5), cell_type="string")
self.assertEqual(len(cells), 0)
def test_get_cells_cell_type_and_coord2(self):
row = self.row.clone
row.append_cell(Cell(value="bob"), clone=False)
row.append_cell(Cell(value=14, cell_type="percentage"))
row.append_cell(Cell(value="bob2"), clone=False)
cells = row.get_cells(coord=(2, 9), cell_type="all")
self.assertEqual(len(cells), 8)
cells = row.get_cells(coord=(3, 9), cell_type="float")
self.assertEqual(len(cells), 4)
cells = row.get_cells(coord=(0, 5), cell_type="percentage")
self.assertEqual(len(cells), 0)
cells = row.get_cells(coord=(0, 5), cell_type="string")
self.assertEqual(len(cells), 0)
cells = row.get_cells(coord=(5, 9), cell_type="percentage")
self.assertEqual(len(cells), 1)
cells = row.get_cells(coord=(5, 9), cell_type="string")
self.assertEqual(len(cells), 2)
cells = row.get_cells(coord=(8, 9), cell_type="string")
self.assertEqual(len(cells), 1)
def test_get_cell_alpha(self):
row = self.row
cell_5 = row.get_cell("F")
self.assertEqual(cell_5.get_value(), 3)
self.assertEqual(cell_5.text_content, "3")
self.assertEqual(cell_5.type, "float")
self.assertEqual(cell_5.style, "ce5")
self.assertEqual(cell_5.x, 5)
self.assertEqual(cell_5.y, 1)
def test_get_cell_int(self):
row = self.row
cell_5 = row.get_cell(5)
self.assertEqual(cell_5.get_value(), 3)
self.assertEqual(cell_5.text_content, "3")
self.assertEqual(cell_5.type, "float")
self.assertEqual(cell_5.style, "ce5")
def test_get_cell_coord(self):
row = self.row.clone
cell = row.get_cell(-1)
self.assertEqual(cell.get_value(), 3)
cell = row.get_cell(-3)
self.assertEqual(cell.get_value(), 3)
cell = row.get_cell(-4)
self.assertEqual(cell.get_value(), 2)
cell = row.get_cell(-5)
self.assertEqual(cell.get_value(), 1)
cell = row.get_cell(-1 - 7)
self.assertEqual(cell.get_value(), 3)
cell = row.get_cell(-3 - 56)
self.assertEqual(cell.get_value(), 3)
cell = row.get_cell(-4 - 560)
self.assertEqual(cell.get_value(), 2)
cell = row.get_cell(-5 - 7000)
self.assertEqual(cell.get_value(), 1)
cell = row.get_cell(8)
self.assertEqual(cell.get_value(), None)
cell = row.get_cell(1000)
self.assertEqual(cell.get_value(), None)
def test_get_value_coord(self):
row = self.row.clone
row.append_cell(Cell("Appended"))
value = row.get_value(-1)
self.assertEqual(value, "Appended")
value = row.get_value(-3)
self.assertEqual(value, 3)
value = row.get_value(-4)
self.assertEqual(value, 3)
value = row.get_value(-5)
self.assertEqual(value, 2)
value = row.get_value(-1 - 8)
self.assertEqual(value, "Appended")
value = row.get_value(7)
self.assertEqual(value, "Appended")
value = row.get_value(8)
self.assertEqual(value, None)
value = row.get_value(1000)
self.assertEqual(value, None)
def test_get_value_coord_with_get_type(self):
row = self.row.clone
row.append_cell(Cell("Appended"))
value = row.get_value(-1, get_type=True)
self.assertEqual(value, ("Appended", "string"))
value = row.get_value(-3, get_type=True)
self.assertEqual(value, (3, "float"))
value = row.get_value(-4, get_type=True)
self.assertEqual(value, (3, "float"))
value = row.get_value(-5, get_type=True)
self.assertEqual(value, (2, "float"))
value = row.get_value(-1 - 8, get_type=True)
self.assertEqual(value, ("Appended", "string"))
value = row.get_value(7, get_type=True)
self.assertEqual(value, ("Appended", "string"))
value = row.get_value(8, get_type=True)
self.assertEqual(value, (None, None))
value = row.get_value(1000, get_type=True)
self.assertEqual(value, (None, None))
def test_set_cell(self):
row = self.row.clone
row.set_value(1, 3.14)
self.assertEqual(row.get_values(), [1, dec("3.14"), 1, 2, 3, 3, 3])
# Test repetitions are synchronized
self.assertEqual(row.width, 7)
def test_set_cell_far_away(self):
row = self.row.clone
row.set_value(7 + 3, 3.14)
self.assertEqual(
row.get_values(), [1, 1, 1, 2, 3, 3, 3, None, None, None, dec("3.14")]
)
# Test repetitions are synchronized
self.assertEqual(row.width, 11)
def test_set_cell_repeat(self):
row = self.row_repeats.clone
row.set_value(1, 3.14)
self.assertEqual(row.get_values(), [1, dec("3.14"), 1, 2, 3, 3, 3])
# Test repetitions are synchronized
self.assertEqual(row.width, 7)
def test_set_cell_repeat_repeat(self):
row = self.row_repeats.clone
cell = Cell(value=20, repeated=2)
row.set_cell(1, cell)
self.assertEqual(row.get_values(), [1, 20, 20, 2, 3, 3, 3])
# Test repetitions are synchronized
self.assertEqual(row.width, 7)
def test_insert(self):
row = self.row.clone
cell = row.insert_cell(3)
self.assertTrue(type(cell) is Cell)
self.assertEqual(cell.x, 3)
self.assertEqual(cell.y, 1)
def test_insert_cell(self):
row = self.row.clone
cell = row.insert_cell(3, Cell("Inserted"))
self.assertEqual(row.width, 8)
self.assertEqual(row.get_values(), [1, 1, 1, "Inserted", 2, 3, 3, 3])
# Test repetitions are synchronized
self.assertEqual(row.width, 8)
self.assertEqual(cell.x, 3)
self.assertEqual(cell.y, 1)
def test_insert_cell_repeat(self):
row = self.row_repeats.clone
cell = row.insert_cell(6, Cell("Inserted"))
self.assertEqual(row.get_values(), [1, 1, 1, 2, 3, 3, "Inserted", 3])
# Test repetitions are synchronized
self.assertEqual(row.width, 8)
self.assertEqual(cell.x, 6)
self.assertEqual(cell.y, 0)
def test_insert_cell_repeat_repeat(self):
row = self.row_repeats.clone
cell = row.insert_cell(6, Cell("Inserted", repeated=3))
self.assertEqual(
row.get_values(), [1, 1, 1, 2, 3, 3, "Inserted", "Inserted", "Inserted", 3]
)
# Test repetitions are synchronized
self.assertEqual(row.width, 10)
self.assertEqual(cell.x, 6)
self.assertEqual(cell.y, 0)
def test_insert_cell_repeat_repeat_bis(self):
row = self.row_repeats.clone
cell = row.insert_cell(1, Cell("Inserted", repeated=2))
self.assertEqual(
row.get_values(), [1, "Inserted", "Inserted", 1, 1, 2, 3, 3, 3]
)
# Test repetitions are synchronized
self.assertEqual(row.width, 9)
self.assertEqual(cell.x, 1)
self.assertEqual(cell.y, 0)
def test_append_cell(self):
row = self.row.clone
cell = row.append_cell()
self.assertTrue(type(cell) is Cell)
self.assertEqual(cell.x, self.row.width)
self.assertEqual(cell.y, 1)
def test_append_cell2(self):
row = self.row.clone
cell = row.append_cell(Cell("Appended"))
self.assertEqual(row.get_values(), [1, 1, 1, 2, 3, 3, 3, "Appended"])
# Test repetitions are synchronized
self.assertEqual(row.width, 8)
self.assertEqual(cell.x, self.row.width)
self.assertEqual(cell.y, 1)
def test_delete_cell(self):
row = self.row.clone
row.delete_cell(3)
self.assertEqual(row.get_values(), [1, 1, 1, 3, 3, 3])
# Test repetitions are synchronized
self.assertEqual(row.width, 6)
def test_delete_cell_repeat(self):
row = self.row_repeats.clone
row.delete_cell(-1)
self.assertEqual(row.get_values(), [1, 1, 1, 2, 3, 3])
# Test repetitions are synchronized
self.assertEqual(row.width, | |
"""
This function tests that loadCSV loads label names
correctly.
"""
result_Sample_Static = pycgmIO.loadCSV(self.filename_Sample_Static)
labels_results = result_Sample_Static[2][0:50]
expected_labels_results = ['LFHD', 'RFHD', 'LBHD', 'RBHD', 'C7', 'T10',
'CLAV', 'STRN', 'RBAK', 'LSHO', 'LELB', 'LWRA', 'LWRB',
'LFIN', 'RSHO', 'RELB', 'RWRA', 'RWRB', 'RFIN', 'LASI',
'RASI', 'LPSI', 'RPSI', 'LTHI', 'LKNE', 'LTIB', 'LANK',
'LHEE', 'LTOE', 'RTHI', 'RKNE', 'RTIB', 'RANK', 'RHEE',
'RTOE', 'HEDO', 'HEDA', 'HEDL', 'HEDP', 'LCLO', 'LCLA',
'LCLL', 'LCLP', 'LFEO', 'LFEA', 'LFEL', 'LFEP', 'LFOO',
'LFOA', 'LFOL']
assert labels_results == expected_labels_results
def test_loadCSV_exceptions(self):
"""
We test that an exception is raised when loading a non-existent
file name.
"""
with pytest.raises(Exception):
pycgmIO.loadCSV("NonExistentFile")
@pytest.mark.parametrize("frame, data_key, expected_data", [
(0, 'LFHD', np.array([174.5749207, 324.513031, 1728.94397])),
(16, 'LWRA', np.array([-233.2779846, 485.1967163, 1128.858276])),
(25, 'C7', np.array([251.1916809, 164.7823639, 1527.859253])),
(100, 'RANK', np.array([427.6116943, 188.8884583, 93.36972809])),
(12, 'RKNE', np.array([417.5567017, 241.5111389, 523.7767334]))
])
def test_loadData_csv(self, frame, data_key, expected_data):
"""
This function tests pycgmIO.loadData(filename), where filename
is a string indicating the file path of a CSV or C3D file to load.
This function uses Sample_Static.csv in SampleData for testing.
"""
csv_results = pycgmIO.loadData(self.filename_Sample_Static)
result_data = csv_results[frame][data_key]
np.testing.assert_almost_equal(result_data, expected_data, self.rounding_precision)
@pytest.mark.parametrize("frame, data_key, expected_data", [
(0, 'LFHD', np.array([60.1229744, 132.4755249, 1485.8293457])),
(16, 'LWRA', np.array([-422.2036438, 432.76647949, 1199.96057129])),
(25, 'C7', np.array([-27.17804909, -8.29536247, 1301.43286133])),
(100, 'RANK', np.array([52.61398697, -127.04923248, 58.46214676])),
(12, 'RKNE', np.array([96.54218292, -111.24856567, 412.34362793]))
])
def test_loadData_c3d(self, frame, data_key, expected_data):
"""
This function tests pycgmIO.loadData(filename), where filename
is a string indicating the file path of a CSV or C3D file to load.
This function use 59993_Frame_Static.c3d in SampleData for testing.
"""
c3d_results = pycgmIO.loadData(self.filename_59993_Frame)
result_data = c3d_results[frame][data_key]
np.testing.assert_almost_equal(result_data, expected_data, self.rounding_precision)
def test_loadData_invalid_filename(self):
#Test that loading a non-existent filename returns None.
assert pycgmIO.loadData("NonExistentFile") is None
@pytest.mark.parametrize("data, expected_result", [
({'A': [[1, 2], [4, 5], [7, 8]], 'B': [[4, 5], [7, 8], [10, 11]]},
[{'A': np.array([1, 4, 7]), 'B': np.array([ 4, 7, 10])},
{'A': np.array([2, 5, 8]), 'B': np.array([ 5, 8, 11])}]),
({'A': [np.array([1, 2]), np.array([4, 5]), np.array([7, 8])],
'B': [np.array([4, 5]), np.array([7, 8]), np.array([10, 11])]},
[{'A': np.array([1, 4, 7]), 'B': np.array([ 4, 7, 10])},
{'A': np.array([2, 5, 8]), 'B': np.array([ 5, 8, 11])}]),
({'A': [[1, 2], [4, 5], [7]]},
[{'A': np.array([1, 4, 7])}]),
({'A': [[2], [4], [6], [8]]},
[{'A': np.array([2, 4, 6])}]),
({'A': [[], [4, 5], [7, 8, 9]]},[])
])
def test_dataAsArray_accuracy(self, data, expected_result):
"""
This function tests pycgmIO.dataAsArray(data), where
data is a dictionary of marker data. The function returns
the dictionary data as an array of dictionaries.
We test cases where the input is lists or numpy arrays. We test
cases where the arrays are not all the same shape, when there are more
than 3 arrays per dictionary key, and when dictionary keys are empty.
"""
result = pycgmIO.dataAsArray(data)
np.testing.assert_equal(result, expected_result)
@pytest.mark.parametrize("data", [
({}),
({'A': []}),
({'A': [[1], [2]]}),
({'A': [[1, 2], [2, 3], [3, 4]],
'B': [[4, 5], [5, 6], [6]]})
])
def test_dataAsArray_exception(self, data):
"""
We test exceptions from keys with less than 3 arrays of data,
empty arrays of data, or inconsistent shapes of arrays across keys.
"""
with pytest.raises(Exception):
pycgmIO.dataAsArray(data)
@pytest.mark.parametrize("data, expected_result", [
([{'A': [1, 2, 3], 'B': [4, 5, 6]},
{'A': [2, 3, 4], 'B': [5, 6, 7]}],
{'A': [[1, 2, 3], [2, 3, 4]], 'B': [[4, 5, 6], [5, 6, 7]]}),
([{'A': [1, 2], 'B': [4]},
{'A': [4], 'B': []}],
{'A': [[1, 2], [4]], 'B': [[4], []]}),
([{'A': [1, 2]},
{'A': [4, 5], 'B': [6, 7]}],
{'A': [[1, 2], [4, 5]], 'B': [[6, 7]]}),
([{'A': 2} , {'B': [6, 7]}],
{'A': [2], 'B': [[6, 7]]}),
([], {})
])
def test_dataAsDict_accuracy(self, data, expected_result):
"""
This function tests pycgmIO.dataAsDict(data, npArray=False), where
data is a list of dictionaries of marker data. This function returns
a data as a dictionary.
We test cases with multiple markers with the same length of data,
empty arrays, non-array dictionary values, and inconsistent keys
across the indices of data.
"""
result = pycgmIO.dataAsDict(data)
np.testing.assert_equal(result, expected_result)
def test_dataAsDict_numpy_array(self):
#Test that data is returned as a numpy array if npArray = True
data = [{'A': [1, 2, 3]}]
result = pycgmIO.dataAsDict(data, npArray=True)
result_data = result['A']
assert isinstance(result_data, np.ndarray)
assert not isinstance(result_data, list)
@pytest.mark.parametrize("kinetics", [
([[1.1, 2.2, 3.3],
[4.4, 5.5, 6.6],
[7.7, 8.8, 9.9]]),
(np.array([[1.1, 2.2, 3.3],
[4.4, 5.5, 6.6],
[7.7, 8.8, 9.9]]))
])
def test_writeKinetics_accuracy(self, kinetics):
"""
This function tests pycgmIO.writeKinetics(CoM_output, kinetics),
where CoM_output is the filename to save output to,
and kinetics is the array_like output to be saved.
pycgmIO.writeKinetics() saves array data as .npy files.
This function tests saving lists and numpy arrays.
"""
CoM_output = os.path.join(self.tmp_dir_name, 'CoM')
pycgmIO.writeKinetics(CoM_output, kinetics)
write_result = np.load(CoM_output + '.npy')
np.testing.assert_equal(write_result, kinetics)
@pytest.mark.parametrize("kwargs, len_written, truncated_result", [
({}, 274,
[0, -0.308494914509454,-6.121292793370006,7.571431102151712,
2.914222929716658,-6.867068980446340,-18.821000709643130]),
({'angles': False}, 217,
[0, 251.608306884765625,391.741317749023438,1032.893493652343750,
251.740636241118779,392.726947206848479,1032.788500732036255]),
({'axis': False}, 58,
[0, -0.308494914509454,-6.121292793370006,7.571431102151712,
2.914222929716658,-6.867068980446340,-18.821000709643130]),
({'angles': ['R Hip', 'Head'],'axis': False}, 7,
[0, 2.914222929716658,-6.867068980446340,-18.821000709643130,
0.021196729275744,5.462252836649474,-91.496085343964339]),
({'axis': ['PELO', 'L RADZ'], 'angles': False}, 7,
[0, 251.608306884765625,391.741317749023438,1032.893493652343750,
-271.942564463838380,485.192166623350204,1091.967911874857009]),
({'axis': ['NonExistentKey'], 'angles': False}, 1, [0])
])
def test_writeResult(self, kwargs, len_written, truncated_result):
"""
This function tests pycgmIO.writeResult(data, filename, **kwargs),
where data is the pcygm output data to write, filename is the filename
to write to, and **kwargs is a dictionary of keyword arguments
specifying writing options.
We test for a truncated output, and the number of output values written.
We test writing all angles and axes, only angles, only axis,
a list of angles, a list of axis, and non-existent keys.
This function uses the previously computed kinematics data
in setup_method, and writes to a temporary directory for testing.
"""
data = self.kinematics
output_filename = os.path.join(self.tmp_dir_name, 'output')
pycgmIO.writeResult(data, output_filename, **kwargs)
with open(output_filename + '.csv', 'r') as f:
lines = f.readlines()
#Skip the first 6 lines of output since they are headers
result = lines[7].strip().split(',')
array_result = np.asarray(result, dtype=np.float64)
len_result = len(array_result)
#Test that the truncated results are equal
np.testing.assert_equal(truncated_result, array_result[:7])
#Test we have written the correct number of results
np.testing.assert_equal(len_result, len_written)
def test_smKeys(self):
"""
This function tests pycgmIO.smKeys(), which returns
a list of subject measurement keys.
"""
result = pycgmIO.smKeys()
expected_result = ['Bodymass', 'Height', 'HeadOffset', 'InterAsisDistance', 'LeftAnkleWidth',
'LeftAsisTrocanterDistance', 'LeftClavicleLength', 'LeftElbowWidth',
'LeftFemurLength', 'LeftFootLength', 'LeftHandLength', 'LeftHandThickness',
'LeftHumerusLength', 'LeftKneeWidth', 'LeftLegLength', 'LeftRadiusLength',
'LeftShoulderOffset', 'LeftTibiaLength', 'LeftWristWidth', 'RightAnkleWidth',
'RightClavicleLength', 'RightElbowWidth', 'RightFemurLength', 'RightFootLength',
'RightHandLength', 'RightHandThickness', 'RightHumerusLength', 'RightKneeWidth',
'RightLegLength', 'RightRadiusLength', 'RightShoulderOffset', 'RightTibiaLength',
'RightWristWidth']
assert result == expected_result
def test_loadVSK_list(self):
"""
This function tests pycgmIO.loadVSK(filename, dict=True),
where filename is the vsk file to be loaded and dict is a
bool indicating whether to return the vsk as a dictionary or list of
[keys, values].
RoboSM.vsk in SampleData is used to test the output.
We test returning as a list.
"""
result_vsk = pycgmIO.loadVSK(self.filename_RoboSM_vsk, dict=True)
result_keys = result_vsk[0]
result_values = result_vsk[1]
expected_keys = ['Bodymass', 'Height', 'InterAsisDistance', 'LeftLegLength', 'LeftAsisTrocanterDistance',
'LeftKneeWidth', 'LeftAnkleWidth', 'LeftTibialTorsion', 'LeftSoleDelta', 'LeftThighRotation',
'LeftShankRotation', 'LeftStaticPlantFlex', 'LeftStaticRotOff', 'LeftAnkleAbAdd',
'LeftShoulderOffset', 'LeftElbowWidth', 'LeftWristWidth', 'LeftHandThickness', 'RightLegLength',
'RightAsisTrocanterDistance', 'RightKneeWidth', 'RightAnkleWidth', 'RightTibialTorsion',
'RightSoleDelta', 'RightThighRotation', 'RightShankRotation', 'RightStaticPlantFlex',
'RightStaticRotOff', 'RightAnkleAbAdd', 'RightShoulderOffset', 'RightElbowWidth',
'RightWristWidth', 'RightHandThickness', 'MeanLegLength', 'C', 'Theta', 'Beta', 'HJCy',
'PelvisLength', 'LASIx', 'LASIz', 'RASIx', 'RASIz', 'ASISx', 'ASISz', 'LKNEy', 'LANKy',
'RKNEy', 'RANKy', 'LELBy', 'LWRy', 'LFINy', 'RELBy', 'RWRy', 'RFINy', 'HeadOffset', 'HEADy',
'LBHDx', 'BHDy', 'RBHDx', 'HeadOx', 'HeadOy', 'HeadOz', 'C7x', 'C7z', 'T10x', 'T10y', 'T10z',
'STRNz', 'RBAKx', 'RBAKy', 'RBAKz', 'ThorOx', 'ThorOy', 'ThorOz', 'LeftClavicleLength', 'LeftHumerusLength',
'LeftRadiusLength', 'LeftHandLength', 'LWRx', 'RightClavicleLength', 'RightHumerusLength', 'RightRadiusLength',
'RightHandLength', 'RWRx', 'ASISy', 'LPSIx', 'LPSIy', 'RPSIx', 'RPSIy', 'LeftFemurLength', 'LeftTibiaLength',
'LeftFootLength', 'LTHIy', 'LTHIz', 'LTIBy', 'LTIBz', 'LFOOy', 'LFOOz', 'LHEEx', 'LTOEx', 'RightFemurLength',
'RightTibiaLength', 'RightFootLength', 'RTHIy', 'RTHIz', 'RTIBy', 'RTIBz', 'RFOOy', 'RFOOz', 'RHEEx', 'RTOEx']
expected_values = [72.0, 1730.0, 281.118011474609, 1000.0, 0.0, 120.0, 90.0,
0.0, 0.0, 0.0, 0.0, 0.137504011392593, 0.0358467921614647, 0.0, 40.0, 80.0,
60.0, 17.0, 1000.0, 0.0, 120.0, 90.0, 0.0, 0.0, 0.0, 0.0, 0.17637075483799,
0.03440235927701, 0.0, 40.0, 80.0, 60.0, 17.0, 0.0, 0.0, 0.500000178813934,
0.314000427722931, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.290628731250763, 63.3674736022949,
-171.985321044922, 65.2139663696289, -159.32258605957, 0.167465895414352,
0.0252241939306259, 700.027526855469, -160.832626342773, 35.1444931030273,
| |
defined by the
# pattern. The synaptic weight will be proportional to the value defined by
# the pattern.
# - 'impact_modulation': STDP strengthens connections where a presynaptic spike
# causes a postsynaptic one. To strengthen connections that may excite the
# desired pattern, this method targets the way how synapses affect the
# postsynaptic potential. Usually, on a presynaptic spike, a synapse modifies
# the postsynaptic potential based on its weight. This method modulates this
# modification by the pattern strength. See config
# 'impact_modulation_default' for details.
pattern_induction_method = 'impact_modulation'
# Finally, the network should work without external stimulation, as the
# external stimulation requires knowledge of class labels. Therefore, the
# influence should decay with an increasing number of epochs, such that the
# teacher signal finally vanishes.
# Here, one defines a value (<=1), by which the pattern is multiplied after
# each epoch.
pattern_induction_decay = 0.5
# To generalize well, specialization should only be reached in the higher
# layers (closer to the output). Lower layers should be free to adapt to the
# input distribution.
# Therefore, pattern induction in lower layers should be weaker as in higher
# layers.
# To achieve this, one can choose an adaption method:
# - 'linear': Linear decay of pattern influence (1*pattern for output layer,
# 0*pattern for input layer, linear interpolation in between).
# - 'quadratic': Quadratic decay of pattern influence.
#
# In addition, one might pass an arbitrary lambda expression, that gets as
# input the number of layers and the current layer. The output should be a
# single value that scales the pattern.
# Examples:
# - 'linear' = lambda l, L: l/(L-1)
# - 'quadratic = lambda l, L: (l/(L-1))**2
# Note, that the output layer has index L-1.
pattern_layer_influence = 'linear'
## Options specific for the induction methods.
# The firing rate of the pattern neurons of the induction method 'simple'.
# Unit of this option is considered as Hertz.
pattern_simple_fr = 63.75
## Settings specific to certain induction method.
# The method 'impact_modulation' is implemented due to an extra variable
# 'patmod' in the neural dynamics. This variable shall modulate the synaptic
# influence on a postsynaptic neuron according to the pattern strength. The
# default value of this variable is specified here and is applied if no pattern
# is influencing the network (i.e., if no training sample is presented). If the
# user has not specified this variable in the chosen neural dynamics, then the
# following equation will be added to the execution on a presynaptic spike at
# exc.-exc. synapses:
# 'v_post += patmod'
# and the default value will be forced to 0.
# Sensible (user-defined) update rules might either be multiplicative or
# additive. Examples:
# 'v_post += (w + patmod) * volt' (default: 0)
# 'v_post += (w * patmod) * volt' (default: 1)
# 'v_post += (w * (1 + patmod)) * volt' (default: 0)
# Note, that the third multiplicative modulation should be preferred over the
# second one, as positive pattern values in the third one actually lead to a
# stronger excitation.
# Note, even if this induction method does not apply, but the variable 'patmod'
# appears in the neural dynamics, then the default value is applied to this
# variable the whole time.
impact_modulation_default = 0
########################################
### Network Structure
########################################
# To define the network structure, one basically determines the number of
# hidden layers and their shape. A hidden layer of size n has 2n neurons, as
# for every excitatory neuron one inhibitory neuron exists. The inhibitory
# neurons introduce lateral inhibition. Every excitatory neuron is connected to
# exactly one inhibitory neuron. Inhibitory to excitatory connections can be
# set up in two different ways. Either each inhibitory neuron is connected to
# all excitatory neurons of the layer except the incoming one or one specifies
# a window of size k, which means that only the k excitatory neurons to the
# left and the k ones to the right get connected. Note, that such local windows
# can emerge in the fully-connected case as well, since the weights still get
# learned.
# Note, that the output layer will have the same lateral inhibition structure
# if not specified otherwise.
# The connections in between layers will be fully-connected feedforward
# connections from excitatory to excitatory neurons.
# The number of hidden layers.
# Note, this parameter is only considered if the parameter 'hidden_layer_sizes'
# is a single value and not an array.
num_hidden_layers = 0
# The number of excitatory neurons per hidden layer.
# This parameter may either be a single value or an array of values. If one
# specifies a single number, then this will be the number of excitatory neurons
# in every hidden layer.
# If this parameter is an array, then each entry defines a new hidden layer and
# its number of excitatory neurons. Note, that this settings ignores the
# previously specified parameter 'num_hidden_layers'.
hidden_layer_sizes = 0 # 100
# The size of the output layer.
# Usually, the output layer has the size according to the number of classes.
# However, one may specify this differently. In such a case, the network output
# is classified due to an indirect measure. See 'Classification' options for
# details.
# If this option is set to None, then the Classification options are ignored
# and an output neuron is created for each class (the highest firing rate
# determines the class).
output_size = 400
# The size of the window, that marks the area of influence for an inhibitory
# neuron. There are 3 possible ways to define the behaviour. If the option is
# set to 'None', then an inhibitory neuron is connected to all excitatory
# neurons of that layer except the one that provides its input stimulus. If one
# specifies a single number k, then it connects to the 2k nearest neighbors.
# One may also specify an array of values, defining the window size in each
# layer (hidden + output layer).
# Note, if the window size if greater than half the number of neurons in a
# layer, then the inhibitory neuron is simply fully-connected to all excitatory
# neurons except for its own input neuron. If the window size is 0, then there
# will be no lateral inhibition (as well as no inhibitory neurons).
lateral_inhibition_window = None # [5, 3] # None
########################################
### Network Visualization
########################################
# One has the option to plot the network after it has been constructed. The
# simulation will freeze until the figure has been closed!
# Note, the plot won't contain feedback connections.
plot_network = False
# Drawing a fully-connected network may result in a large number of
# connections, that degrades readability and performance. Therefore, one may
# only want to draw the connections of some of the neurons in a homogenous
# network.
# If this option is activated, only the connections of the topmost and
# bottommost neuron in each layer are drawn.
plot_network_partly = False
# Additionally, one can save the figure.
save_network_plot = False
# The filename of the saved network plot.
network_plot_filename = '../plots/network.svg'
########################################
### Equations
########################################
# To specify the equations used to simulate neurons and synapses, one can
# specify a module name, that implements the neural dynamics. Therefore, one
# can choose a provided example model of neural dynamics or specify resp.
# costumize neural dynamics as a new module.
# Note, modules must reside in the 'equations' folder.
# The following example models are provided:
# - 'Diehl_Cook_SNN_Three_Factor': Implements conductance-based leaky-
# integrate-and-fire neurons and three-factor STDP learning for exc.-exc.
# synapses.
equation_module = 'Diehl_Cook_SNN_Three_Factor'
#equation_module = 'Simple_CurrLIF_and_STDP'
#equation_module = 'Kheradpisheh_NonLIF_STDP'
# If one wishes to costumize the equations, a copy of the file
# 'src/equations/equation_wrapper.py' has to be made and modified. Please refer
# to the documentation within this module to costumize the utilized neural
# dynamics.
# If a costumized module has been generated, it has to be imported by this
# module. Afterwards, the name of the module must be assigned to the variable
# 'equation_module'.
########################################
### Classification
########################################
# In case of a specified output layer size, one has to define here, how the
# network output is classified and evaluated.
# NOTE, if the output size of the network has not been specified, each class
# will be associated with a different output neuron, whose firing rate serves
# as classification criteria.
# Classification method. The classification method defines, how the output | |
of type {key_field}, with underlying type of {getattr(key_field, '_ty')} "
"is not hashable"
)
self._custom_deep_copy_implementation = True
super().__init__(*args, **kwargs)
self._set_immutable(getattr(self, "_immutable", False))
def __set__(self, instance, value):
if not isinstance(value, dict):
raise TypeError(f"{self._name}: Expected a dict")
self.validate_size(value, self._name)
if self.items is not None:
key_field, value_field = self.items[0], self.items[1]
setattr(key_field, "_name", self._name + "_key")
setattr(value_field, "_name", self._name + "_value")
res = OrderedDict()
for key, val in value.items():
temp_st = Structure()
key_field.__set__(temp_st, key)
value_field.__set__(temp_st, val)
res[getattr(temp_st, getattr(key_field, "_name"))] = getattr(
temp_st, getattr(value_field, "_name")
)
value = res
super().__set__(instance, _DictStruct(self, instance, value, self._name))
class Array(
SizedCollection, ContainNestedFieldMixin, TypedField, metaclass=_CollectionMeta
):
"""
An Array field, similar to a list. Supports the properties in JSON schema draft 4.
Expected input is of type `list`.
Arguments:
minItems(int): optional
minimal size
maxItems(int): optional
maximal size
unqieItems(bool): optional
are elements required to be unique?
additionalItems(bool): optional
Relevant in case items parameter is a list of Fields. Is it allowed to have additional
elements beyond the ones defined in "items"?
items(a :class:`Field` or :class:`Structure`, or a list/tuple of :class:`Field` or :class:`Structure`): optional
Describes the fields of the elements.
If a items if a :class:`Field`, then it applies to all items.
If a items is a list, then every element in the content is expected to be
of the corresponding field in items.
Examples:
.. code-block:: python
names = Array[String]
names = Array[String(minLengh=3)]
names = Array(minItems=5, items=String)
my_record = Array(items=[String, Integer(minimum=5), String])
my_lists = Array[Array[Integer]]
my_structs = Array[StructureReference(a=Integer, b=Float)]
# Let's say we defined a Structure "Person"
people = Array[Person]
# Assume Foo is an arbitrary (non-Typedpy) class
foos = Array[Foo]
"""
_ty = list
def __init__(
self, *args, items=None, uniqueItems=None, additionalItems=None, **kwargs
):
"""
Constructor
:param args: pass-through
:param items: either a single field, which will be enforced for all elements, or a list
of fields which enforce the elements with the correspondent index
:param uniqueItems: are elements required to be unique?
:param additionalItems: Relevant if "items" is a list. Is it allowed to have additional
elements beyond the ones defined in "items"?
:param kwargs: pass-through
"""
self.uniqueItems = uniqueItems
self.additionalItems = additionalItems
if isinstance(items, list):
self.items = []
for item in items:
self.items.append(_map_to_field(item))
else:
self.items = _map_to_field(items)
super().__init__(*args, **kwargs)
self._set_immutable(getattr(self, "_immutable", False))
@property
def get_type(self):
if (
not isinstance(self.items, (list, tuple))
and self.items
and python_ver_atleast_39
):
return list[self.items.get_type]
return list
def __set__(self, instance, value):
verify_type_and_uniqueness(list, value, self._name, self.uniqueItems)
self.validate_size(value, self._name)
if self.items is not None:
if isinstance(self.items, Field):
setattr(self.items, "_name", self._name)
res = []
for i, val in enumerate(value):
temp_st = Structure()
setattr(self.items, "_name", self._name + f"_{str(i)}")
self.items.__set__(temp_st, val)
res.append(getattr(temp_st, getattr(self.items, "_name")))
value = res
elif isinstance(self.items, list):
additional_properties_forbidden = self.additionalItems is False
if not getattr(instance, "_skip_validation", False):
if len(self.items) > len(value) or (
additional_properties_forbidden and len(self.items) > len(value)
):
raise ValueError(
f"{self._name}: Got {value}; Expected an array of length {len(self.items)}"
)
temp_st = Structure()
temp_st._skip_validation = getattr(instance, "_skip_validation", False)
res = []
for ind, item in enumerate(self.items):
if ind >= len(value):
continue
setattr(item, "_name", self._name + f"_{str(ind)}")
item.__set__(temp_st, value[ind])
res.append(getattr(temp_st, getattr(item, "_name")))
res += value[len(self.items) :]
value = res
super().__set__(instance, _ListStruct(self, instance, value, self._name))
class Deque(
SizedCollection, ContainNestedFieldMixin, TypedField, metaclass=_CollectionMeta
):
"""
An collections.deque field. Supports the properties in JSON schema draft 4.
Expected input is of type `collections.deque`.
Arguments:
minItems(int): optional
minimal size
maxItems(int): optional
maximal size
unqieItems(bool): optional
are elements required to be unique?
additionalItems(bool): optional
Relevant in case items parameter is a list of Fields. Is it allowed to have additional
elements beyond the ones defined in "items"?
items(a :class:`Field` or :class:`Structure`, or a list/tuple of :class:`Field` or :class:`Structure`): optional
Describes the fields of the elements.
If a items if a :class:`Field`, then it applies to all items.
If a items is a list, then every element in the content is expected to be
of the corresponding field in items.
Examples:
.. code-block:: python
names = Deque[String]
names = Deque[String(minLengh=3)]
names = Deque(minItems=5, items=String)
my_record = Deque(items=[String, Integer(minimum=5), String])
my_lists = Deque[Array[Integer]]
my_structs = Deque[StructureReference(a=Integer, b=Float)]
# Let's say we defined a Structure "Person"
people = Deque[Person]
# Assume Foo is an arbitrary (non-Typedpy) class
foos = Deque[Foo]
"""
_ty = deque
def __init__(
self, *args, items=None, uniqueItems=None, additionalItems=None, **kwargs
):
"""
Constructor
:param args: pass-through
:param items: either a single field, which will be enforced for all elements, or a list
of fields which enforce the elements with the correspondent index
:param uniqueItems: are elements required to be unique?
:param additionalItems: Relevant if "items" is a list. Is it allowed to have additional
elements beyond the ones defined in "items"?
:param kwargs: pass-through
"""
self.uniqueItems = uniqueItems
self.additionalItems = additionalItems
if isinstance(items, list):
self.items = []
for item in items:
self.items.append(_map_to_field(item))
else:
self.items = _map_to_field(items)
super().__init__(*args, **kwargs)
self._set_immutable(getattr(self, "_immutable", False))
def __set__(self, instance, value):
verify_type_and_uniqueness(deque, value, self._name, self.uniqueItems)
self.validate_size(value, self._name)
if self.items is not None:
if isinstance(self.items, Field):
setattr(self.items, "_name", self._name)
res = deque()
for i, val in enumerate(value):
temp_st = Structure()
setattr(self.items, "_name", self._name + f"_{str(i)}")
self.items.__set__(temp_st, val)
res.append(getattr(temp_st, getattr(self.items, "_name")))
value = res
elif isinstance(self.items, list):
additional_properties_forbidden = self.additionalItems is False
if not getattr(instance, "_skip_validation", False):
if len(self.items) > len(value) or (
additional_properties_forbidden and len(self.items) > len(value)
):
raise ValueError(
f"{self._name}: Got {value}; Expected an deque of length {len(self.items)}"
)
temp_st = Structure()
temp_st._skip_validation = getattr(instance, "_skip_validation", False)
res = deque()
for ind, item in enumerate(self.items):
if ind >= len(value):
continue
setattr(item, "_name", self._name + f"_{str(ind)}")
item.__set__(temp_st, value[ind])
res.append(getattr(temp_st, getattr(item, "_name")))
for i in range(len(self.items), len(value)):
res.append(value[i])
value = res
super().__set__(instance, _DequeStruct(self, instance, value, self._name))
def verify_type_and_uniqueness(the_type, value, name, has_unique_items):
if not isinstance(value, the_type):
raise TypeError(f"{name}: Got {wrap_val(value)}; Expected {str(the_type)}")
if has_unique_items:
unique = reduce(
lambda unique_vals, x: unique_vals.append(x) or unique_vals
if x not in unique_vals
else unique_vals,
value,
[],
)
if len(unique) < len(value):
raise ValueError(f"{name}: Got {wrap_val(value)}; Expected unique items")
class Tuple(ContainNestedFieldMixin, TypedField, metaclass=_CollectionMeta):
"""
A tuple field, supports unique items option.
Expected input is of type `tuple`.
Arguments:
unqieItems(`bool`): optional
are elements required to be unique?
items(`list`/`tuple` of :class:`Field` or :class:`Structure`): optional
Describes the fields of the elements.
Every element in the content is expected to be
of the corresponding :class:`Field` in items.
Examples:
.. code-block:: python
# a is a tuple of exactly 2 strings that are different from each other.
a = Tuple(uniqueItems=True, items = [String, String])
# b is a tuple of 3: string, string and a number up to 10.
b = Tuple(items = [String, String, Number(maximum=10)])
# c is a tuple of 3: integer, string, float.
c = Tuple[Integer, String, Float]
# The following define a tuple of any number of Integers
d = Tuple[Integer]
# It can also contain other structures:
# Assume we have something like: class Foo(Structure): pass
# e is a tuple of any number of Integers or Foo instances
e = Tuple[AnyOf[Integer, Foo]]
# It can also have arbitrary class
class MyCustomClass: pass
Tuple[MyCustomClass]
"""
_ty = tuple
def __init__(self, *args, items, uniqueItems=None, **kwargs):
"""
Constructor
:param args: pass-through
:param items: either a single field, which will be enforced for all elements, or a list
of fields which enforce the elements with the correspondent index
:param uniqueItems: are elements required to be unique?
:param kwargs: pass-through
"""
self.uniqueItems = uniqueItems
if isinstance(items, (list, tuple)):
self.items = []
for item in items:
if isinstance(item, Field):
self.items.append(item)
elif Field in item.__mro__:
self.items.append(item())
else:
raise TypeError("Expected a Field class or instance")
elif isinstance(items, (Field,)) or Field in items.__mro__:
self.items = [items]
else:
raise TypeError("Expected a list/tuple of Fields or a single Field")
super().__init__(*args, **kwargs)
self._set_immutable(getattr(self, "_immutable", False))
@property
def get_type(self):
if self.items and python_ver_atleast_39:
if not isinstance(self.items, (list, tuple)):
return tuple[self.items.get_type]
if len(self.items) == 2:
return tuple[self.items[0].get_type, self.items[1].get_type]
if len(self.items) == 3:
return tuple[
self.items[0].get_type,
self.items[1].get_type,
self.items[2].get_type,
]
return tuple
def __set__(self, instance, value):
verify_type_and_uniqueness(tuple, value, self._name, self.uniqueItems)
if len(self.items) != len(value) and len(self.items) > 1:
raise ValueError(
| |
from transformers import Trainer, TrainingArguments, BertConfig, RobertaConfig, ElectraConfig
from transformers import HfArgumentParser
from transformers import BertTokenizerFast, RobertaTokenizerFast
from transformers import RobertaForMaskedLM
import transformers
transformers.logging.set_verbosity_debug()
import torch
from torch.utils.data import DataLoader
import datasets
from datasets import concatenate_datasets
datasets.logging.set_verbosity(datasets.logging.ERROR)
from pathlib import Path
import time
import copy
from model import (
RobertaForShuffledWordClassification,
RobertaForShuffleRandomThreeWayClassification,
RobertaForFourWayTokenTypeClassification,
RobertaForFirstCharPrediction,
RobertaForRandomWordClassification
)
from model import compute_metrics_fn_for_shuffle_random
from model import (
DataCollatorForShuffledWordClassification,
DataCollatorForShuffleRandomThreeWayClassification,
DataCollatorForMaskedLanguageModeling,
DataCollatorForFourWayTokenTypeClassification,
DataCollatorForFirstCharPrediction,
DataCollatorForRandomWordClassification
)
from model import LoggingCallback
import logging
import os
import sys
import dataclasses
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class AdditionalArguments:
"""Define additional arguments that are not included in `TrainingArguments`."""
data_dir: str = field(
metadata={"help": "Path to a processed dataset for pre-training"}
)
model_path: Optional[str] = field(
default=None,
metadata={"help": "Local path to the model if the model to train has been instantiated from a local path. "
+ "If present, training will resume from the optimizer/scheduler states loaded here."}
)
hidden_size: int = field(
default=768,
metadata={"help": "Dimensionality of the encoder layers and the pooler layer."}
)
num_hidden_layers: int = field(
default=12,
metadata={"help": "Number of hidden layers in the Transformer encoder."}
)
num_attention_heads: int = field(
default=12,
metadata={"help": "Number of attention heads for each attention layer in the Transformer encoder."}
)
intermediate_size: int = field(
default=3072,
metadata={"help": "Dimensionality of the intermediate (feed-forward) layer in the Transformer encoder."}
)
attention_probs_dropout_prob: float = field(
default=0.1,
metadata={"help": "The dropout ratio for the attention probabilities."}
)
hidden_dropout_prob: float = field(
default=0.1,
metadata={"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."}
)
pretrain_model: Optional[str] = field(
default="RobertaForMaskedLM",
metadata={"help": "The type of a model. Choose one from "
+ "`RobertaForShuffledWordClassification`, "
+ "`RobertaForShuffleRandomThreeWayClassification`, `RobertaForFirstCharPrediction`, "
+ "`RobertaForMaskedLM`, `RobertaForFourWayTokenTypeClassification`, "
+ "`RobertaForRandomWordClassification`."}
)
shuffle_prob: Optional[float] = field(
default=0.15,
metadata={"help": "[Shuffle] The ratio of shuffled words."}
)
mlm_prob: Optional[float] = field(
default=0.15,
metadata={"help": "[MLM] The ratio of masked tokens for MaskedLM."}
)
manipulate_prob: Optional[float] = field(
default=0.10,
metadata={"help": "[Shuffle+Random] The ratio of shuffled / random tokens over all tokens. "
+ "The resulting manipulated ratio will be twice larger than `manipulate_prob`."}
)
mask_prob: Optional[float] = field(
default=0.15,
metadata={"help": "[First Char, Token Type] The ratio of token masking."}
)
random_prob: Optional[float] = field(
default=0.15,
metadata={"help": "[Random] The ratio of random tokens."}
)
save_interval: Optional[float] = field(
default=21600.0,
metadata={"help": "An interval to save weights in seconds."}
)
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
parser = HfArgumentParser((AdditionalArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
def roberta_shuffled_cls():
"""Pre-train a RoBERTa model with shuffled word detection in a given sequence.
Notes:
* To see possible args, please run `python pretrainer.py --help`
* To monitor training, run `tensorboard --logdir=/path/to/logging_dir/`
References:
https://huggingface.co/transformers/main_classes/trainer.html#transformers.TrainingArguments
"""
# build a base model
logger.info("Building a model...")
if args.model_path is None:
# pre-training from scratch
tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
config = RobertaConfig(
attention_probs_dropout_prob=args.attention_probs_dropout_prob,
bos_token_id=0,
eos_token_id=2,
gradient_checkpointing=False,
hidden_act="gelu",
hidden_dropout_prob=args.hidden_dropout_prob,
hidden_size=args.hidden_size,
initializer_range=0.02,
intermediate_size=args.intermediate_size,
layer_norm_eps=1e-05,
max_position_embeddings=514,
model_type="roberta",
num_attention_heads=args.num_attention_heads,
num_hidden_layers=args.num_hidden_layers,
pad_token_id=1,
type_vocab_size=1,
vocab_size=50265
)
model = RobertaForShuffledWordClassification(config)
model.resize_token_embeddings(len(tokenizer))
logger.info(config)
else:
# resume pre-training from a given checkpoint
tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
config = RobertaConfig.from_pretrained(args.model_path)
model = RobertaForShuffledWordClassification.from_pretrained(args.model_path, config=config)
logger.info(f"Save a checkpoint every {training_args.save_steps} steps.")
logger.info(f"Logging every {training_args.logging_steps} steps.")
# load datasets
logger.info("Load the processed dataset...")
full_dataset = []
if Path(args.data_dir).exists() is False:
raise FileNotFoundError("The specified dataset path does not exist!")
for ratio in range(0, 100, 10):
temp_data_dir = Path(args.data_dir) / str(ratio)
dataset = datasets.load_from_disk(temp_data_dir)
if full_dataset != []:
full_dataset = concatenate_datasets([full_dataset, dataset])
else:
full_dataset = dataset
full_dataset.remove_columns_(['text'])
full_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'])
full_dataset = full_dataset.shuffle(seed=training_args.seed)
# set up a trainer
data_collator = DataCollatorForShuffledWordClassification(
tokenizer=tokenizer,
shuffle_prob=args.shuffle_prob
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=full_dataset,
data_collator=data_collator
)
# Add a callback
trainer.add_callback(
LoggingCallback(save_interval=args.save_interval)
)
# training
# if `model_path` is not None, training will resume from the given checkpoint.
logger.info("Training a model...")
start_time = time.time()
trainer.train(model_path=args.model_path)
train_time = time.time() - start_time
logger.info(f"Training time: {train_time}")
# save final weights
trainer.save_model(training_args.output_dir)
def roberta_maskedlm():
"""Pre-train a RoBERTa model with masked language modeling.
Notes:
* To see possible args, please run `python pretrainer.py --help`
* To monitor training, run `tensorboard --logdir=/path/to/logging_dir/`
References:
https://huggingface.co/transformers/main_classes/trainer.html#transformers.TrainingArguments
"""
# build a base model
logger.info("Building a model...")
if args.model_path is None:
# pre-training from scratch
tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
config = RobertaConfig(
attention_probs_dropout_prob=args.attention_probs_dropout_prob,
bos_token_id=0,
eos_token_id=2,
gradient_checkpointing=False,
hidden_act="gelu",
hidden_dropout_prob=args.hidden_dropout_prob,
hidden_size=args.hidden_size,
initializer_range=0.02,
intermediate_size=args.intermediate_size,
layer_norm_eps=1e-05,
max_position_embeddings=514,
model_type="roberta",
num_attention_heads=args.num_attention_heads,
num_hidden_layers=args.num_hidden_layers,
pad_token_id=1,
type_vocab_size=1,
vocab_size=50265
)
model = RobertaForMaskedLM(config)
model.resize_token_embeddings(len(tokenizer))
logger.info(config)
else:
# resume pre-training from a given checkpoint
tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
config = RobertaConfig.from_pretrained(args.model_path)
model = RobertaForMaskedLM.from_pretrained(args.model_path, config=config)
logger.info(f"Save a checkpoint every {training_args.save_steps} steps.")
logger.info(f"Logging every {training_args.logging_steps} steps.")
# load datasets
logger.info("Load the processed dataset...")
full_dataset = []
if Path(args.data_dir).exists() is False:
raise FileNotFoundError("The specified dataset path does not exist!")
for ratio in range(0, 100, 10):
temp_data_dir = Path(args.data_dir) / str(ratio)
dataset = datasets.load_from_disk(temp_data_dir)
if full_dataset != []:
full_dataset = concatenate_datasets([full_dataset, dataset])
else:
full_dataset = dataset
full_dataset.remove_columns_(['text'])
full_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'])
full_dataset = full_dataset.shuffle(seed=training_args.seed)
# set up a trainer
data_collator = DataCollatorForMaskedLanguageModeling(
tokenizer=tokenizer,
mlm_prob=args.mlm_prob
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=full_dataset,
data_collator=data_collator
)
# Add a callback
trainer.add_callback(
LoggingCallback(save_interval=args.save_interval)
)
# training
# if `model_path` is not None, training will resume from the given checkpoint.
logger.info("Training a model...")
start_time = time.time()
trainer.train(model_path=args.model_path)
train_time = time.time() - start_time
logger.info(f"Training time: {train_time}")
# save final weights
trainer.save_model(training_args.output_dir)
def roberta_shuffle_random_threeway_cls():
"""Pre-train a RoBERTa model with three-way shuffle/random/non-raplaced classification.
Notes:
* To see possible args, please run `python pretrainer.py --help`
* To monitor training, run `tensorboard --logdir=/path/to/logging_dir/`
References:
https://huggingface.co/transformers/main_classes/trainer.html#transformers.TrainingArguments
"""
# build a base model
logger.info("Building a model...")
if args.model_path is None:
# pre-training from scratch
tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
config = RobertaConfig(
attention_probs_dropout_prob=args.attention_probs_dropout_prob,
bos_token_id=0,
eos_token_id=2,
gradient_checkpointing=False,
hidden_act="gelu",
hidden_dropout_prob=args.hidden_dropout_prob,
hidden_size=args.hidden_size,
initializer_range=0.02,
intermediate_size=args.intermediate_size,
layer_norm_eps=1e-05,
max_position_embeddings=514,
model_type="roberta",
num_attention_heads=args.num_attention_heads,
num_hidden_layers=args.num_hidden_layers,
pad_token_id=1,
type_vocab_size=1,
vocab_size=50265
)
model = RobertaForShuffleRandomThreeWayClassification(config)
model.resize_token_embeddings(len(tokenizer))
logger.info(config)
else:
# resume pre-training from a given checkpoint
tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
config = RobertaConfig.from_pretrained(args.model_path)
model = RobertaForShuffleRandomThreeWayClassification.from_pretrained(args.model_path, config=config)
logger.info(f"Save a checkpoint every {training_args.save_steps} steps.")
logger.info(f"Logging every {training_args.logging_steps} steps.")
# load datasets
logger.info("Load the processed dataset...")
full_dataset = []
if Path(args.data_dir).exists() is False:
raise FileNotFoundError("The specified dataset path does not exist!")
for ratio in range(0, 100, 10):
temp_data_dir = Path(args.data_dir) / str(ratio)
dataset = datasets.load_from_disk(temp_data_dir)
if full_dataset != []:
full_dataset = concatenate_datasets([full_dataset, dataset])
else:
full_dataset = dataset
full_dataset.remove_columns_(['text'])
full_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'])
full_dataset = full_dataset.shuffle(seed=training_args.seed)
# set up a trainer
data_collator = DataCollatorForShuffleRandomThreeWayClassification(
tokenizer=tokenizer,
manipulate_prob=args.manipulate_prob
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=full_dataset,
data_collator=data_collator
)
# Add a callback
trainer.add_callback(
LoggingCallback(save_interval=args.save_interval)
)
# training
# if `model_path` is not None, training will resume from the given checkpoint.
logger.info("Training a model...")
start_time = time.time()
trainer.train(model_path=args.model_path)
train_time = time.time() - start_time
logger.info(f"Training time: {train_time}")
# save final weights
trainer.save_model(training_args.output_dir)
def roberta_random_cls():
"""Pre-train a RoBERTa model with random token detection.
Notes:
* To see possible args, please run `python pretrainer.py --help`
* To monitor training, run `tensorboard --logdir=/path/to/logging_dir/`
References:
https://huggingface.co/transformers/main_classes/trainer.html#transformers.TrainingArguments
"""
# build a base model
logger.info("Building a model...")
if args.model_path is None:
# pre-training from scratch
tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
config = RobertaConfig(
attention_probs_dropout_prob=args.attention_probs_dropout_prob,
bos_token_id=0,
eos_token_id=2,
gradient_checkpointing=False,
hidden_act="gelu",
hidden_dropout_prob=args.hidden_dropout_prob,
hidden_size=args.hidden_size,
initializer_range=0.02,
intermediate_size=args.intermediate_size,
layer_norm_eps=1e-05,
max_position_embeddings=514,
model_type="roberta",
num_attention_heads=args.num_attention_heads,
num_hidden_layers=args.num_hidden_layers,
pad_token_id=1,
type_vocab_size=1,
vocab_size=50265
)
model = RobertaForRandomWordClassification(config)
model.resize_token_embeddings(len(tokenizer))
logger.info(config)
else:
# resume pre-training from a given checkpoint
tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
config = RobertaConfig.from_pretrained(args.model_path)
model = RobertaForRandomWordClassification.from_pretrained(args.model_path, config=config)
logger.info(f"Save a checkpoint every {training_args.save_steps} steps.")
logger.info(f"Logging every {training_args.logging_steps} steps.")
# load datasets
logger.info("Load the processed dataset...")
full_dataset = []
if Path(args.data_dir).exists() is False:
raise FileNotFoundError("The specified dataset path does not exist!")
for ratio in range(0, 100, 10):
temp_data_dir = Path(args.data_dir) / str(ratio)
dataset = datasets.load_from_disk(temp_data_dir)
if full_dataset != []:
full_dataset = concatenate_datasets([full_dataset, dataset])
else:
full_dataset = dataset
full_dataset.remove_columns_(['text'])
full_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'])
full_dataset = full_dataset.shuffle(seed=training_args.seed)
# set up | |
1/pressure)
:param M_guess: float guess saturation loading (units: loading)
"""
# parameter names (cannot rely on order in Dict)
param_names = [param for param in self.params.keys()]
# guess
guess = np.array([self.param_guess[param] for param in param_names])
def residual_sum_of_squares(params_):
"""
Residual Sum of Squares between model and data in df
:param params_: Array of parameters
"""
# change params to those in x
for i in range(len(param_names)):
self.params[param_names[i]] = params_[i]
return np.sum((self.df[self.loading_key].values - self.loading(
self.df[self.pressure_key].values))**2)
# minimize RSS
opt_res = scipy.optimize.minimize(
residual_sum_of_squares, guess, method=optimization_method, bounds=bounds)
if not opt_res.success:
print((opt_res.message))
print(("\n\tDefault starting guess for parameters:",
self.param_guess))
raise Exception("""Minimization of RSS for %s isotherm fitting
failed. Try a different starting point in the nonlinear optimization
by passing a dictionary of parameter guesses, param_guess, to the
constructor""" % self.model)
# assign params
for j in range(len(param_names)):
self.params[param_names[j]] = opt_res.x[j]
self.rmse = np.sqrt(opt_res.fun / self.df.shape[0])
y_i = self.df[self.loading_key]
y_bar = self.df[self.loading_key].mean()
y_i_hat = self.loading(self.df[self.pressure_key].values)
n = len(self.df)
rmse = np.sqrt((1. / n) * np.sum((y_i - y_i_hat)**2.))
sse = np.sum((y_i - y_i_hat)**2.)
tss = np.sum((y_i - y_bar)**2.)
self.r2 = 1. - sse / tss
def spreading_pressure(self, pressure):
"""
Calculate reduced spreading pressure at a bulk gas pressure P.
The reduced spreading pressure is an integral involving the isotherm
:math:`L(P)`:
.. math::
\\Pi(p) = \\int_0^p \\frac{L(\\hat{p})}{ \\hat{p}} d\\hat{p},
which is computed analytically, as a function of the model isotherm
parameters.
:param pressure: float pressure (in corresponding units as df in
instantiation)
:return: spreading pressure, :math:`\\Pi`
:rtype: Float
"""
if pressure < 0:
pressure = 0
Warning('pressure (concentration) is below zero, this is unphysical, so set to zero.')
if self.model == "Langmuir":
return self.params["M"] * np.log(1.0 + self.params["K"] * pressure)
if self.model == "Freundlich":
raise Exception("IAST is not allowed for Freundlich isotherms as they do not obey Henry's law for low concentrations.")
if self.model == "Quadratic":
return self.params["M"] * np.log(1.0 + self.params["Ka"] * pressure
+ self.params["Kb"] * pressure**2)
if self.model == "BET":
return self.params["M"] * np.log(
(1.0 - self.params["Kb"] * pressure + self.params["Ka"] *
pressure) / (1.0 - self.params["Kb"] * pressure))
if self.model == "DSLangmuir":
return self.params["M1"] * np.log(
1.0 + self.params["K1"] * pressure) +\
self.params["M2"] * np.log(
1.0 + self.params["K2"] * pressure)
if self.model == "Henry":
return self.params["KH"] * pressure
if self.model == "TemkinApprox":
one_plus_kp = 1.0 + self.params["K"] * pressure
return self.params["M"] * (
np.log(one_plus_kp) + self.params["theta"] *
(2.0 * self.params["K"] * pressure + 1.0) /
(2.0 * one_plus_kp**2))
def print_params(self):
"""
Print identified model parameters
"""
print(("%s identified model parameters:" % self.model))
for param, val in self.params.items():
print(("\t%s = %f" % (param, val)))
print(("RMSE = ", self.rmse))
class InterpolatorIsotherm:
"""
Interpolator isotherm object to store pure-component adsorption isotherm.
Here, the isotherm is characterized by linear interpolation of data.
Loading = 0.0 at pressure = 0.0 is enforced here automatically for
interpolation at low pressures.
Default for extrapolating isotherm beyond highest pressure in available data
is to throw an exception. Pass a value for `fill_value` in instantiation to
extrapolate loading as `fill_value`.
"""
def __init__(self,
df,
loading_key=None,
pressure_key=None,
fill_value=None):
"""
Instantiation. InterpolatorIsotherm is instantiated by passing it the
pure-component adsorption isotherm data in the form of a Pandas
DataFrame.
Linear interpolation done with `interp1d` function in Scipy.
e.g. to extrapolate loading beyond highest pressure point as 100.0,
pass `fill_value=100.0`.
:param df: DataFrame adsorption isotherm data
:param loading_key: String key for loading column in df
:param pressure_key: String key for pressure column in df
:param fill_value: Float value of loading to assume when an attempt is
made to interpolate at a pressure greater than the largest pressure
observed in the data
:return: self
:rtype: InterpolatorIsotherm
"""
# if pressure = 0 not in data frame, add it for interpolation between
# p = 0 and the lowest, nonzero pressure point.
if 0.0 not in df[pressure_key].values:
df = pd.concat([
pd.DataFrame({
pressure_key: 0.0,
loading_key: 0.0
}, index=[0]), df
])
# store isotherm data in self
#: Pandas DataFrame on which isotherm was fit
self.df = df.sort_values(pressure_key, ascending=True)
if None in [loading_key, pressure_key]:
raise Exception("Pass loading_key and pressure_key, names of "
"loading and pressure cols in DataFrame, to "
"constructor.")
#: name of loading column
self.loading_key = loading_key
#: name of pressure column
self.pressure_key = pressure_key
if fill_value is None:
self.interp1d = interp1d(self.df[pressure_key],
self.df[loading_key])
else:
self.interp1d = interp1d(
self.df[pressure_key],
self.df[loading_key],
fill_value=fill_value,
bounds_error=False)
#: value of loading to assume beyond highest pressure in the data
self.fill_value = fill_value
def loading(self, pressure):
"""
Linearly interpolate isotherm to compute loading at pressure P.
:param pressure: float pressure (in corresponding units as df in
instantiation)
:return: predicted loading at pressure P (in corresponding units as df
in instantiation)
:rtype: Float or Array
"""
return self.interp1d(pressure)
def spreading_pressure(self, pressure):
"""
Calculate reduced spreading pressure at a bulk gas pressure P.
(see Tarafder eqn 4)
Use numerical quadrature on isotherm data points to compute the reduced
spreading pressure via the integral:
.. math::
\\Pi(p) = \\int_0^p \\frac{q(\\hat{p})}{ \\hat{p}} d\\hat{p}.
In this integral, the isotherm :math:`q(\\hat{p})` is represented by a
linear interpolation of the data.
See <NAME>, <NAME>, <NAME>. pyIAST: Ideal Adsorbed Solution
Theory (IAST) Python Package. Computer Physics Communications.
:param pressure: float pressure (in corresponding units as df in
instantiation)
:return: spreading pressure, :math:`\\Pi`
:rtype: Float
"""
# throw exception if interpolating outside the range.
if (self.fill_value is None) & \
(pressure > self.df[self.pressure_key].max()):
raise Exception("""To compute the spreading pressure at this bulk
gas pressure, we would need to extrapolate the isotherm since this
pressure is outside the range of the highest pressure in your
pure-component isotherm data, %f.
At present, your InterpolatorIsotherm object is set to throw an
exception when this occurs, as we do not have data outside this
pressure range to characterize the isotherm at higher pressures.
Option 1: fit an analytical model to extrapolate the isotherm
Option 2: pass a `fill_value` to the construction of the
InterpolatorIsotherm object. Then, InterpolatorIsotherm will
assume that the uptake beyond pressure %f is equal to
`fill_value`. This is reasonable if your isotherm data exhibits
a plateau at the highest pressures.
Option 3: Go back to the lab or computer to collect isotherm data
at higher pressures. (Extrapolation can be dangerous!)""" %
(self.df[self.pressure_key].max(),
self.df[self.pressure_key].max()))
# Get all data points that are at nonzero pressures
pressures = self.df[self.pressure_key].values[
self.df[self.pressure_key].values != 0.0]
loadings = self.df[self.loading_key].values[
self.df[self.pressure_key].values != 0.0]
# approximate loading up to first pressure point with Henry's law
# loading = henry_const * P
# henry_const is the initial slope in the adsorption isotherm
henry_const = loadings[0] / pressures[0]
# get how many of the points are less than pressure P
n_points = np.sum(pressures < pressure)
if n_points == 0:
# if this pressure is between 0 and first pressure point...
# \int_0^P henry_const P /P dP = henry_const * P ...
return henry_const * pressure
else:
# P > first pressure point
area = loadings[0] # area of first segment \int_0^P_1 n(P)/P dP
# get area between P_1 and P_k, where P_k < P < P_{k+1}
for i in range(n_points - 1):
# linear interpolation of isotherm data
slope = (loadings[i + 1] - loadings[i]) / (pressures[i + 1] - \
pressures[i])
intercept = loadings[i] - slope * pressures[i]
# add area of this segment
area += slope * (pressures[i + 1] - pressures[i]) + intercept * \
np.log(pressures[i + 1] / pressures[i])
# finally, area of last segment
slope = (self.loading(pressure) - loadings[n_points - 1]) / (
pressure - pressures[n_points - 1])
intercept = loadings[n_points -
1] - slope * pressures[n_points - 1]
area += slope * (pressure - pressures[n_points - 1]) + intercept * \
np.log(pressure / pressures[n_points - 1])
return area
def plot_isotherm(isotherm,
withfit=True,
xlogscale=False,
ylogscale=False,
pressure=None):
"""
Plot isotherm data and fit using Matplotlib.
:param isotherm: pyIAST isotherm object
:param withfit: Bool plot fit as well
:param pressure: numpy.array optional pressure array to pass for plotting
| |
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the Cost Model serializers."""
import logging
import random
from decimal import Decimal
from uuid import uuid4
import faker
from rest_framework import serializers
from tenant_schemas.utils import tenant_context
from api.iam.test.iam_test_case import IamTestCase
from api.metrics import constants as metric_constants
from api.metrics.constants import SOURCE_TYPE_MAP
from api.provider.models import Provider
from cost_models.models import CostModel
from cost_models.models import CostModelMap
from cost_models.serializers import CostModelSerializer
from cost_models.serializers import RateSerializer
from cost_models.serializers import UUIDKeyRelatedField
LOG = logging.getLogger(__name__)
def format_tag_value(**kwarg_dict):
"""Returns a tag_value."""
return {
"tag_value": kwarg_dict.get("tag_value", "value_one"),
"unit": kwarg_dict.get("unit", "USD"),
"usage": {
"unit": kwarg_dict.get("unit", "USD"),
"usage_end": kwarg_dict.get("usage_end", None),
"usage_start": kwarg_dict.get("usage_start", None),
},
"value": kwarg_dict.get("value", 0.2),
"description": kwarg_dict.get("description", ""),
"default": kwarg_dict.get("default", False),
}
def format_tag_rate(tag_key="key_one", tag_values=None):
"""Returns a tag_rate."""
final_tag_values = []
if tag_values:
for tag_value_kwarg in tag_values:
final_tag_values.append(format_tag_value(**tag_value_kwarg))
else:
if tag_values == []:
final_tag_values = tag_values
else:
final_tag_values = [format_tag_value(**{})]
return {"tag_key": tag_key, "tag_values": final_tag_values}
class CostModelSerializerTest(IamTestCase):
"""Cost Model serializer tests."""
fake = faker.Faker()
def setUp(self):
"""Set up the tests."""
super().setUp()
self.provider = Provider.objects.filter(type=Provider.PROVIDER_OCP).first()
ocp_metric = metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR
ocp_source_type = Provider.PROVIDER_OCP
tiered_rates = [{"unit": "USD", "value": 0.22}]
self.ocp_data = {
"name": "Test Cost Model",
"description": "Test",
"source_type": ocp_source_type,
"providers": [{"uuid": self.provider.uuid, "name": self.provider.name}],
"markup": {"value": 10, "unit": "percent"},
"rates": [{"metric": {"name": ocp_metric}, "tiered_rates": tiered_rates}],
}
self.basic_model = {
"name": "Test Cost Model",
"description": "Test",
"source_type": Provider.PROVIDER_OCP,
"providers": [{"uuid": self.provider.uuid, "name": self.provider.name}],
"markup": {"value": 10, "unit": "percent"},
"rates": [{"metric": {"name": ocp_metric}}],
}
def tearDown(self):
"""Clean up test cases."""
with tenant_context(self.tenant):
CostModel.objects.all().delete()
CostModelMap.objects.all().delete()
def test_valid_data(self):
"""Test rate and markup for valid entries."""
with tenant_context(self.tenant):
instance = None
serializer = CostModelSerializer(data=self.ocp_data)
if serializer.is_valid(raise_exception=True):
instance = serializer.save()
self.assertIn(instance.source_type, SOURCE_TYPE_MAP.keys())
self.assertIsNotNone(instance.markup)
self.assertIsNotNone(instance.rates)
def test_uuid_key_related_field(self):
"""Test the uuid key related field."""
uuid_field = UUIDKeyRelatedField(queryset=Provider.objects.all(), pk_field="uuid")
self.assertFalse(uuid_field.use_pk_only_optimization())
self.assertEqual(self.provider.uuid, uuid_field.to_internal_value(self.provider.uuid))
self.assertEqual(self.provider.uuid, uuid_field.to_representation(self.provider))
self.assertEqual(self.provider.uuid, uuid_field.display_value(self.provider))
def test_error_on_invalid_provider(self):
"""Test error with an invalid provider id."""
self.ocp_data.update({"source_uuids": ["1dd7204c-72c4-4ec4-95bc-d5c447688b27"]})
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_not_OCP_source_type_with_markup(self):
"""Test that a source type is valid if it has markup."""
self.ocp_data["source_type"] = Provider.PROVIDER_AWS
self.ocp_data["rates"] = []
with tenant_context(self.tenant):
instance = None
serializer = CostModelSerializer(data=self.ocp_data)
if serializer.is_valid(raise_exception=True):
instance = serializer.save()
self.assertIsNotNone(instance)
self.assertIsNotNone(instance.markup)
def test_error_source_type_with_markup(self):
"""Test that non-existent source type is invalid."""
self.ocp_data["source_type"] = "invalid-source"
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_error_on_source_type_without_markup(self):
"""Test error when non OCP source is added without markup."""
self.ocp_data["source_type"] = Provider.PROVIDER_AWS
self.ocp_data["markup"] = {}
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_error_on_nonOCP_source_type_with_markup_and_rates(self):
"""Test error when non OCP source is added with markup and rates."""
self.ocp_data["source_type"] = Provider.PROVIDER_AWS
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_error_on_invalid_metric(self):
"""Test error on an invalid metric rate."""
self.ocp_data.get("rates", [])[0]["metric"]["name"] = "invalid_metric"
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_error_on_usage_bad_start_bound(self):
"""Test error on a usage_start that does not cover lower bound."""
self.ocp_data["rates"][0]["tiered_rates"][0]["usage"] = {"usage_start": 5, "usage_end": None}
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_error_on_usage_bad_upper_bound(self):
"""Test error on a usage_end that does not cover lower bound."""
self.ocp_data["rates"][0]["tiered_rates"][0]["usage"] = {"usage_start": None, "usage_end": 5}
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_error_on_rate_type(self):
"""Test error when trying to create an invalid rate input."""
self.ocp_data["rates"][0].pop("tiered_rates")
self.ocp_data["rates"][0]["bad_rates"] = []
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_error_on_negative_rate(self):
"""Test error when trying to create an negative rate input."""
self.ocp_data["rates"][0]["tiered_rates"][0]["value"] = float(round(Decimal(random.random()), 6) * -1)
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_error_no_rate(self):
"""Test error when trying to create an empty rate."""
self.ocp_data["rates"][0]["tiered_rates"] = []
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_error_neg_tier_usage_start(self):
"""Test error when trying to create a negative tiered usage_start."""
self.ocp_data["rates"][0]["tiered_rates"][0]["usage"] = {
"usage_start": float(round(Decimal(random.random()), 6) * -1),
"usage_end": 20.0,
}
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_error_neg_tier_usage_end(self):
"""Test error when trying to create a negative tiered usage_end."""
self.ocp_data["rates"][0]["tiered_rates"][0]["usage"] = {
"usage_start": 10.0,
"usage_end": float(round(Decimal(random.random()), 6) * -1),
}
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_error_tier_usage_end_less_than(self):
"""Test error when trying to create a tiered usage_end less than usage_start."""
self.ocp_data["rates"][0]["tiered_rates"][0]["usage"] = {"usage_start": 10.0, "usage_end": 3.0}
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_create_cpu_core_per_hour_tiered_rate(self):
"""Test creating a cpu_core_per_hour rate."""
self.ocp_data["rates"][0]["tiered_rates"] = [
{"unit": "USD", "value": 0.22, "usage": {"usage_start": None, "usage_end": 10.0}},
{"unit": "USD", "value": 0.26, "usage": {"usage_start": 10.0, "usage_end": None}},
]
with tenant_context(self.tenant):
instance = None
serializer = CostModelSerializer(data=self.ocp_data)
if serializer.is_valid(raise_exception=True):
instance = serializer.save()
self.assertIsNotNone(instance)
self.assertIsNotNone(instance.uuid)
def test_tiered_rate_null_start_end(self):
"""Test creating a rate with out a start and end."""
self.ocp_data["rates"][0]["tiered_rates"] = [
{"unit": "USD", "value": 0.22, "usage": {"usage_start": 0.0, "usage_end": 7.0}},
{"unit": "USD", "value": 0.26, "usage": {"usage_start": 10.0, "usage_end": 20.0}},
]
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_tiered_rate_with_gaps(self):
"""Test creating a tiered rate with a gap between the tiers."""
self.ocp_data["rates"][0]["tiered_rates"] = [
{"unit": "USD", "value": 0.22, "usage": {"usage_start": None, "usage_end": 7.0}},
{"unit": "USD", "value": 0.26, "usage_start": 10.0, "usage_end": None},
]
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_create_storage_tiered_rate(self):
"""Test creating a storage tiered rate."""
storage_rates = (
metric_constants.OCP_METRIC_STORAGE_GB_REQUEST_MONTH,
metric_constants.OCP_METRIC_STORAGE_GB_USAGE_MONTH,
)
for storage_rate in storage_rates:
ocp_data = {
"name": "Test Cost Model",
"description": "Test",
"source_type": Provider.PROVIDER_OCP,
"providers": [{"uuid": self.provider.uuid, "name": self.provider.name}],
"rates": [
{
"metric": {"name": storage_rate},
"tiered_rates": [
{"unit": "USD", "value": 0.22, "usage": {"usage_start": None, "usage_end": 10.0}},
{"unit": "USD", "value": 0.26, "usage": {"usage_start": 10.0, "usage_end": None}},
],
}
],
}
with tenant_context(self.tenant):
instance = None
serializer = CostModelSerializer(data=ocp_data)
if serializer.is_valid(raise_exception=True):
instance = serializer.save()
self.assertIsNotNone(instance)
self.assertIsNotNone(instance.uuid)
def test_create_storage_no_tiers_rate(self):
"""Test creating a non tiered storage rate."""
storage_rates = (
metric_constants.OCP_METRIC_STORAGE_GB_REQUEST_MONTH,
metric_constants.OCP_METRIC_STORAGE_GB_USAGE_MONTH,
)
for storage_rate in storage_rates:
ocp_data = {
"name": "Test Cost Model",
"description": "Test",
"source_type": Provider.PROVIDER_OCP,
"providers": [{"uuid": self.provider.uuid, "name": self.provider.name}],
"rates": [{"metric": {"name": storage_rate}, "tiered_rates": [{"unit": "USD", "value": 0.22}]}],
}
with tenant_context(self.tenant):
instance = None
serializer = CostModelSerializer(data=ocp_data)
if serializer.is_valid(raise_exception=True):
instance = serializer.save()
self.assertIsNotNone(instance)
self.assertIsNotNone(instance.uuid)
def test_tiered_rate_with_overlaps(self):
"""Test creating a tiered rate with a overlaps between the tiers."""
self.ocp_data["rates"][0]["tiered_rates"] = [
{"unit": "USD", "value": 0.22, "usage": {"usage_start": None, "usage_end": 10.0}},
{"unit": "USD", "value": 0.26, "usage": {"usage_start": 5.0, "usage_end": 20.0}},
{"unit": "USD", "value": 0.26, "usage": {"usage_start": 20.0, "usage_end": None}},
]
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_tiered_rate_with_duplicate(self):
"""Test creating a tiered rate with duplicate tiers."""
self.ocp_data["rates"][0]["tiered_rates"] = [
{"unit": "USD", "value": 0.22, "usage": {"usage_start": None, "usage_end": 10.0}},
{"unit": "USD", "value": 0.26, "usage": {"usage_start": 10.0, "usage_end": 20.0}},
{"unit": "USD", "value": 0.26, "usage": {"usage_start": 10.0, "usage_end": 20.0}},
{"unit": "USD", "value": 0.26, "usage": {"usage_start": 20.0, "usage_end": None}},
]
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_get_metric_display_data_openshift(self):
"""Test the display data helper function for OpenShift metrics."""
serializer = CostModelSerializer(data=None)
for metric_choice in metric_constants.METRIC_CHOICES:
response = serializer._get_metric_display_data(Provider.PROVIDER_OCP, metric_choice[0])
self.assertIsNotNone(response.get("label_measurement_unit"))
self.assertIsNotNone(response.get("label_measurement"))
self.assertIsNotNone(response.get("label_metric"))
def test_validate_rates_allows_duplicate_metric(self):
"""Check that duplicate rate types for a metric are rejected."""
rate = self.ocp_data["rates"][0]
expected_metric_name = rate.get("metric", {}).get("name")
expected_metric_count = 2
self.assertIsNotNone(expected_metric_name)
# Add another tiered rate entry for the same metric
self.ocp_data["rates"].append(rate)
result_metric_count = 0
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
valid_rates = serializer.validate_rates(self.ocp_data["rates"])
for valid_rate in valid_rates:
if valid_rate.get("metric", {}).get("name") == expected_metric_name:
result_metric_count += 1
self.assertEqual(expected_metric_count, result_metric_count)
def test_rate_cost_type_valid(self):
"""Test that a valid cost type is accepted."""
self.ocp_data["rates"][0]["tiered_rates"] = [
{
"unit": "USD",
"value": 0.22,
"usage": {"usage_start": None, "usage_end": None},
"cost_type": "Infrastructure",
}
]
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
if serializer.is_valid(raise_exception=True):
serializer.save()
self.ocp_data["rates"][0]["tiered_rates"] = [
{
"unit": "USD",
"value": 0.22,
"usage": {"usage_start": None, "usage_end": None},
"cost_type": "Supplementary",
}
]
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_rate_cost_type_invalid(self):
"""Test that an invalid cost type is rejected."""
self.ocp_data["rates"][0]["cost_type"] = "Infrastructurez"
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.ocp_data)
with self.assertRaises(serializers.ValidationError):
if serializer.is_valid(raise_exception=True):
serializer.save()
def test_error_on_multiple_tag_values_marked_as_default(self):
"""Test that multiple default set to true fails."""
tag_values_kwargs = [{"default": True}, {"tag_value": "value_two", "value": 0.3, "default": True}]
self.basic_model["rates"][0]["tag_rates"] = format_tag_rate(tag_values=tag_values_kwargs)
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.basic_model)
with self.assertRaises(serializers.ValidationError):
self.assertFalse(serializer.is_valid(raise_exception=True))
print(serializer.errors)
result_err_msg = serializer.errors["rates"][0]["tag_values"][0]
expected_err_msg = "Only one tag_value per tag_key can be marked as a default."
self.assertEqual(result_err_msg, expected_err_msg)
def test_tag_rates_error_on_negitive_tag_value(self):
"""Test that a negivite value in the tag value fails."""
tag_values_kwargs = [{"value": -0.2}]
self.basic_model["rates"][0]["tag_rates"] = format_tag_rate(tag_values=tag_values_kwargs)
with tenant_context(self.tenant):
serializer = CostModelSerializer(data=self.basic_model)
with self.assertRaises(serializers.ValidationError):
self.assertFalse(serializer.is_valid(raise_exception=True))
result_err_msg | |
<filename>lowhaio.py
import asyncio
import contextlib
import ipaddress
import logging
import urllib.parse
import ssl
import socket
from aiodnsresolver import (
TYPES,
DnsError,
Resolver,
ResolverLoggerAdapter,
)
class HttpError(Exception):
pass
class HttpConnectionError(HttpError):
pass
class HttpDnsError(HttpConnectionError):
pass
class HttpTlsError(HttpConnectionError):
pass
class HttpDataError(HttpError):
pass
class HttpConnectionClosedError(HttpDataError):
pass
class HttpHeaderTooLong(HttpDataError):
pass
class HttpLoggerAdapter(logging.LoggerAdapter):
def process(self, msg, kwargs):
return \
('[http] %s' % (msg,), kwargs) if not self.extra else \
('[http:%s] %s' % (','.join(str(v) for v in self.extra.values()), msg), kwargs)
def get_logger_adapter_default(extra):
return HttpLoggerAdapter(logging.getLogger('lowhaio'), extra)
def get_resolver_logger_adapter_default(http_extra):
def _get_resolver_logger_adapter_default(resolver_extra):
http_adapter = HttpLoggerAdapter(logging.getLogger('aiodnsresolver'), http_extra)
return ResolverLoggerAdapter(http_adapter, resolver_extra)
return _get_resolver_logger_adapter_default
async def empty_async_iterator():
while False:
yield
get_current_task = \
asyncio.current_task if hasattr(asyncio, 'current_task') else \
asyncio.Task.current_task
def streamed(data):
async def _streamed():
yield data
return _streamed
async def buffered(data):
return b''.join([chunk async for chunk in data])
def get_nonblocking_sock():
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=socket.IPPROTO_TCP)
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
sock.setblocking(False)
return sock
def set_tcp_cork(sock):
sock.setsockopt(socket.SOL_TCP, socket.TCP_CORK, 1) # pylint: disable=no-member
def unset_tcp_cork(sock):
sock.setsockopt(socket.SOL_TCP, socket.TCP_CORK, 0) # pylint: disable=no-member
async def send_body_async_gen_bytes(logger, loop, sock, socket_timeout,
body, body_args, body_kwargs):
logger.debug('Sending body')
num_bytes = 0
async for chunk in body(*body_args, **dict(body_kwargs)):
num_bytes += len(chunk)
await send_all(loop, sock, socket_timeout, chunk)
logger.debug('Sent body bytes: %s', num_bytes)
async def send_header_tuples_of_bytes(logger, loop, sock, socket_timeout,
http_version, method, parsed_url, params, headers):
logger.debug('Sending header')
outgoing_qs = urllib.parse.urlencode(params, doseq=True).encode()
outgoing_path = urllib.parse.quote(parsed_url.path).encode()
outgoing_path_qs = outgoing_path + \
((b'?' + outgoing_qs) if outgoing_qs != b'' else b'')
host_specified = any(True for key, value in headers if key == b'host')
headers_with_host = \
headers if host_specified else \
((b'host', parsed_url.hostname.encode('idna')),) + headers
await send_all(loop, sock, socket_timeout, b'%s %s %s\r\n%s\r\n' % (
method, outgoing_path_qs, http_version, b''.join(
b'%s:%s\r\n' % (key, value)
for (key, value) in headers_with_host
)
))
logger.debug('Sent header')
def Pool(
get_dns_resolver=Resolver,
get_sock=get_nonblocking_sock,
get_ssl_context=ssl.create_default_context,
sock_pre_message=set_tcp_cork if hasattr(socket, 'TCP_CORK') else lambda _: None,
sock_post_message=unset_tcp_cork if hasattr(socket, 'TCP_CORK') else lambda _: None,
send_header=send_header_tuples_of_bytes,
send_body=send_body_async_gen_bytes,
http_version=b'HTTP/1.1',
keep_alive_timeout=15,
recv_bufsize=16384,
max_header_length=16384,
socket_timeout=10,
get_logger_adapter=get_logger_adapter_default,
get_resolver_logger_adapter=get_resolver_logger_adapter_default,
):
loop = \
asyncio.get_running_loop() if hasattr(asyncio, 'get_running_loop') else \
asyncio.get_event_loop()
ssl_context = get_ssl_context()
logger_extra = {}
logger = get_logger_adapter({})
dns_resolve, dns_resolver_clear_cache = get_dns_resolver(
get_logger_adapter=get_resolver_logger_adapter_default(logger_extra),
)
pool = {}
async def request(method, url, params=(), headers=(),
body=empty_async_iterator, body_args=(), body_kwargs=(),
get_logger_adapter=get_logger_adapter,
get_resolver_logger_adapter=get_resolver_logger_adapter,
):
parsed_url = urllib.parse.urlsplit(url)
logger_extra = {'lowhaio_method': method.decode(), 'lowhaio_url': url}
logger = get_logger_adapter(logger_extra)
try:
ip_addresses = (ipaddress.ip_address(parsed_url.hostname),)
except ValueError:
try:
ip_addresses = await dns_resolve(
parsed_url.hostname, TYPES.A,
get_logger_adapter=get_resolver_logger_adapter(logger_extra),
)
except DnsError as exception:
raise HttpDnsError() from exception
key = (parsed_url.scheme, parsed_url.netloc)
sock = get_from_pool(logger, key, ip_addresses)
if sock is None:
sock = get_sock()
try:
logger.debug('Connecting: %s', sock)
await connect(sock, parsed_url, str(ip_addresses[0]))
logger.debug('Connected: %s', sock)
except asyncio.CancelledError:
sock.close()
raise
except Exception as exception:
sock.close()
raise HttpConnectionError() from exception
except BaseException:
sock.close()
raise
try:
if parsed_url.scheme == 'https':
logger.debug('TLS handshake started')
sock = tls_wrapped(sock, parsed_url.hostname)
await tls_complete_handshake(loop, sock, socket_timeout)
logger.debug('TLS handshake completed')
except asyncio.CancelledError:
sock.close()
raise
except Exception as exception:
sock.close()
raise HttpTlsError() from exception
except BaseException:
sock.close()
raise
try:
sock_pre_message(sock)
await send_header(logger, loop, sock, socket_timeout, http_version,
method, parsed_url, params, headers)
await send_body(logger, loop, sock, socket_timeout, body, body_args, body_kwargs)
sock_post_message(sock)
code, version, response_headers, unprocessed = await recv_header(sock)
logger.debug('Received header with code: %s', code)
connection, body_length, body_handler = connection_length_body_handler(
logger, method, version, response_headers)
response_body = response_body_generator(
logger, sock, unprocessed, key, connection, body_length, body_handler)
except asyncio.CancelledError:
sock.close()
raise
except Exception as exception:
sock.close()
if isinstance(exception, HttpDataError):
raise
raise HttpDataError() from exception
except BaseException:
sock.close()
raise
return code, response_headers, response_body
def get_from_pool(logger, key, ip_addresses):
try:
socks = pool[key]
except KeyError:
logger.debug('Connection not in pool: %s', key)
return None
while socks:
_sock, close_callback = next(iter(socks.items()))
close_callback.cancel()
del socks[_sock]
try:
connected_ip = ipaddress.ip_address(_sock.getpeername()[0])
except OSError:
logger.debug('Unable to get peer name: %s', _sock)
_sock.close()
continue
if connected_ip not in ip_addresses:
logger.debug('Not current for domain, closing: %s', _sock)
_sock.close()
continue
logger.debug('Reusing connection %s', _sock)
if _sock.fileno() != -1:
return _sock
del pool[key]
def add_to_pool(key, sock):
try:
key_pool = pool[key]
except KeyError:
key_pool = {}
pool[key] = key_pool
key_pool[sock] = loop.call_later(keep_alive_timeout, close_by_keep_alive_timeout,
key, sock)
def close_by_keep_alive_timeout(key, sock):
logger.debug('Closing by timeout: %s,%s', key, sock)
sock.close()
del pool[key][sock]
if not pool[key]:
del pool[key]
async def connect(sock, parsed_url, ip_address):
scheme = parsed_url.scheme
_, _, port_specified = parsed_url.netloc.partition(':')
port = \
port_specified if port_specified != '' else \
443 if scheme == 'https' else \
80
address = (ip_address, port)
await loop.sock_connect(sock, address)
def tls_wrapped(sock, host):
return ssl_context.wrap_socket(sock, server_hostname=host, do_handshake_on_connect=False)
async def recv_header(sock):
unprocessed = b''
while True:
unprocessed += await recv(loop, sock, socket_timeout, recv_bufsize)
try:
header_end = unprocessed.index(b'\r\n\r\n')
except ValueError:
if len(unprocessed) >= max_header_length:
raise HttpHeaderTooLong()
continue
else:
break
header_bytes, unprocessed = unprocessed[:header_end], unprocessed[header_end + 4:]
lines = header_bytes.split(b'\r\n')
code = lines[0][9:12]
version = lines[0][5:8]
response_headers = tuple(
(key.strip().lower(), value.strip())
for line in lines[1:]
for (key, _, value) in (line.partition(b':'),)
)
return code, version, response_headers, unprocessed
async def response_body_generator(
logger, sock, unprocessed, key, connection, body_length, body_handler):
try:
generator = body_handler(logger, sock, body_length, unprocessed)
unprocessed = None # So can be garbage collected
logger.debug('Receiving body')
num_bytes = 0
async for chunk in generator:
yield chunk
num_bytes += len(chunk)
logger.debug('Received transfer-decoded body bytes: %s', num_bytes)
except BaseException:
sock.close()
raise
else:
if connection == b'keep-alive':
logger.debug('Keeping connection alive: %s', sock)
add_to_pool(key, sock)
else:
logger.debug('Closing connection: %s', sock)
sock.close()
def connection_length_body_handler(logger, method, version, response_headers):
headers_dict = dict(response_headers)
transfer_encoding = headers_dict.get(b'transfer-encoding', b'identity')
logger.debug('Effective transfer-encoding: %s', transfer_encoding)
connection = \
b'close' if keep_alive_timeout == 0 else \
headers_dict.get(b'connection', b'keep-alive').lower() if version == b'1.1' else \
headers_dict.get(b'connection', b'close').lower()
logger.debug('Effective connection: %s', connection)
body_length = \
0 if method == b'HEAD' else \
0 if connection == b'keep-alive' and b'content-length' not in headers_dict else \
None if b'content-length' not in headers_dict else \
int(headers_dict[b'content-length'])
uses_identity = (method == b'HEAD' or transfer_encoding == b'identity')
body_handler = \
identity_handler_known_body_length if uses_identity and body_length is not None else \
identity_handler_unknown_body_length if uses_identity else \
chunked_handler
return connection, body_length, body_handler
async def identity_handler_known_body_length(logger, sock, body_length, unprocessed):
logger.debug('Expected incoming body bytes: %s', body_length)
total_remaining = body_length
if unprocessed and total_remaining:
total_remaining -= len(unprocessed)
yield unprocessed
while total_remaining:
unprocessed = None # So can be garbage collected
unprocessed = await recv(loop, sock, socket_timeout,
min(recv_bufsize, total_remaining))
total_remaining -= len(unprocessed)
yield unprocessed
async def identity_handler_unknown_body_length(logger, sock, _, unprocessed):
logger.debug('Unknown incoming body length')
if unprocessed:
yield unprocessed
unprocessed = None # So can be garbage collected
try:
while True:
yield await recv(loop, sock, socket_timeout, recv_bufsize)
except HttpConnectionClosedError:
pass
async def chunked_handler(_, sock, __, unprocessed):
while True:
# Fetch until have chunk header
while b'\r\n' not in unprocessed:
if len(unprocessed) >= max_header_length:
raise HttpHeaderTooLong()
unprocessed += await recv(loop, sock, socket_timeout, recv_bufsize)
# Find chunk length
chunk_header_end = unprocessed.index(b'\r\n')
chunk_header_hex = unprocessed[:chunk_header_end]
chunk_length = int(chunk_header_hex, 16)
# End of body signalled by a 0-length chunk
if chunk_length == 0:
while b'\r\n\r\n' not in unprocessed:
if len(unprocessed) >= max_header_length:
raise HttpHeaderTooLong()
unprocessed += await recv(loop, sock, socket_timeout, recv_bufsize)
break
# Remove chunk header
unprocessed = unprocessed[chunk_header_end + 2:]
# Yield whatever amount of chunk we have already, which
# might be nothing
chunk_remaining = chunk_length
in_chunk, unprocessed = \
unprocessed[:chunk_remaining], unprocessed[chunk_remaining:]
if in_chunk:
yield in_chunk
chunk_remaining -= len(in_chunk)
# Fetch and yield rest of chunk
while chunk_remaining:
unprocessed += await recv(loop, sock, socket_timeout, recv_bufsize)
in_chunk, unprocessed = \
unprocessed[:chunk_remaining], unprocessed[chunk_remaining:]
chunk_remaining -= len(in_chunk)
yield in_chunk
# Fetch until have chunk footer, and remove
while len(unprocessed) < 2:
unprocessed += await recv(loop, sock, socket_timeout, recv_bufsize)
unprocessed = unprocessed[2:]
async def close(
get_logger_adapter=get_logger_adapter,
get_resolver_logger_adapter=get_resolver_logger_adapter,
):
logger_extra = {}
logger = get_logger_adapter(logger_extra)
logger.debug('Closing pool')
await dns_resolver_clear_cache(
get_logger_adapter=get_resolver_logger_adapter(logger_extra),
)
for key, socks in pool.items():
for sock, close_callback in socks.items():
logger.debug('Closing: %s,%s', key, sock)
close_callback.cancel()
sock.close()
pool.clear()
return request, close
async def send_all(loop, sock, socket_timeout, data):
try:
latest_num_bytes = sock.send(data)
except (BlockingIOError, ssl.SSLWantWriteError):
latest_num_bytes = 0
else:
if latest_num_bytes == 0:
raise HttpConnectionClosedError()
if latest_num_bytes == len(data):
return
total_num_bytes = latest_num_bytes
def writer():
nonlocal total_num_bytes
try:
latest_num_bytes = sock.send(data_memoryview[total_num_bytes:])
except (BlockingIOError, ssl.SSLWantWriteError):
pass
except Exception as exception:
loop.remove_writer(fileno)
if not result.done():
result.set_exception(exception)
else:
total_num_bytes += latest_num_bytes
if latest_num_bytes == 0 and not result.done():
loop.remove_writer(fileno)
result.set_exception(HttpConnectionClosedError())
elif total_num_bytes == len(data) and not result.done():
loop.remove_writer(fileno)
result.set_result(None)
else:
reset_timeout()
result = asyncio.Future()
fileno = sock.fileno()
loop.add_writer(fileno, writer)
data_memoryview = memoryview(data)
try:
with timeout(loop, socket_timeout) as reset_timeout:
return await result
finally:
loop.remove_writer(fileno)
async | |
93: OoooooooOO / I1Ii111
if 91 - 91: I1Ii111
if 18 - 18: ooOoO0o * I11i
if 53 - 53: I11i . i11iIiiIii - iIii1I11I1II1 / I1Ii111
if 86 - 86: i1IIi % OoO0O00 - OoooooooOO
for OO0Ii1iii1iIIII in lisp_crypto_keys_by_rloc_decap :
OOOO0o = OO0Ii1iii1iIIII . split ( ":" )
if ( len ( OOOO0o ) == 1 ) : continue
OOOO0o = OOOO0o [ 0 ] if len ( OOOO0o ) == 2 else ":" . join ( OOOO0o [ 0 : - 1 ] )
if ( OOOO0o == OoOOoooO000 ) :
II1i = lisp_crypto_keys_by_rloc_decap [ OO0Ii1iii1iIIII ]
lisp_crypto_keys_by_rloc_decap [ OoOOoooO000 ] = II1i
return ( OoOOoooO000 )
if 57 - 57: O0 - I1Ii111 . IiII
if 56 - 56: OoooooooOO
return ( None )
if 12 - 12: ooOoO0o
if 97 - 97: i1IIi . Oo0Ooo
if 81 - 81: OoOoOO00
if 81 - 81: O0
if 57 - 57: oO0o - o0oOOo0O0Ooo % i11iIiiIii / OoOoOO00 . iIii1I11I1II1
if 68 - 68: iII111i
if 59 - 59: O0 - i11iIiiIii + OoooooooOO - iII111i - Oo0Ooo . OoooooooOO
if 60 - 60: O0 * iIii1I11I1II1 - Ii1I * II111iiii . ooOoO0o
if 61 - 61: I1IiiI . iII111i
if 19 - 19: iIii1I11I1II1 * Oo0Ooo - I1IiiI - I1IiiI + O0 - I1Ii111
if 56 - 56: I1Ii111 - i1IIi + I11i . i1IIi / II111iiii * oO0o
def lisp_build_crypto_decap_lookup_key ( addr , port ) :
addr = addr . print_address_no_iid ( )
o0oo000o = addr + ":" + str ( port )
if 68 - 68: OoO0O00 % I11i % IiII + Ii1I
if ( lisp_i_am_rtr ) :
if ( lisp_rloc_probe_list . has_key ( addr ) ) : return ( addr )
if 86 - 86: i1IIi / O0
if 64 - 64: I1Ii111 + O0 * IiII % OoOoOO00 % OOooOOo - iII111i
if 73 - 73: ooOoO0o + I1IiiI % oO0o . O0
if 18 - 18: o0oOOo0O0Ooo * I11i
if 24 - 24: oO0o / o0oOOo0O0Ooo + i1IIi
if 15 - 15: i11iIiiIii / O0
for IiiiI11I1 in lisp_nat_state_info . values ( ) :
for oOo0o0ooO0OOO in IiiiI11I1 :
if ( addr == oOo0o0ooO0OOO . address ) : return ( o0oo000o )
if 34 - 34: I1Ii111 . IiII % iII111i
if 94 - 94: OOooOOo % i11iIiiIii . OOooOOo
return ( addr )
if 55 - 55: OoOoOO00 . OoOoOO00 % o0oOOo0O0Ooo . I11i . I1ii11iIi11i - o0oOOo0O0Ooo
return ( o0oo000o )
if 1 - 1: i11iIiiIii - i1IIi * oO0o - iIii1I11I1II1
if 75 - 75: i1IIi * i11iIiiIii
if 40 - 40: I1ii11iIi11i + OoO0O00
if 8 - 8: i11iIiiIii - iIii1I11I1II1
if 73 - 73: OoOoOO00
if 25 - 25: iII111i / oO0o
if 61 - 61: OoooooooOO . Ii1I . I11i + oO0o
def lisp_set_ttl ( lisp_socket , ttl ) :
try :
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_TTL , ttl )
except :
lprint ( "socket.setsockopt(IP_TTL) not supported" )
pass
if 73 - 73: II111iiii % i11iIiiIii * I1ii11iIi11i + O0
return
if 61 - 61: I1IiiI / OOooOOo
if 67 - 67: OoOoOO00
if 22 - 22: Ii1I * I1ii11iIi11i * o0oOOo0O0Ooo - I1IiiI . i11iIiiIii
if 30 - 30: O0 / oO0o * i11iIiiIii + iIii1I11I1II1 + O0 % I1IiiI
if 95 - 95: ooOoO0o % OOooOOo
if 17 - 17: i1IIi + Ii1I
if 35 - 35: iIii1I11I1II1 - Oo0Ooo - OoooooooOO % I1ii11iIi11i
def lisp_is_rloc_probe_request ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x12 )
if 27 - 27: Oo0Ooo * II111iiii - OOooOOo + o0oOOo0O0Ooo
if 26 - 26: oO0o / I1ii11iIi11i - oO0o
if 9 - 9: ooOoO0o * iIii1I11I1II1 * OoooooooOO
if 13 - 13: iII111i . i11iIiiIii * o0oOOo0O0Ooo . iII111i
if 96 - 96: Ii1I
if 90 - 90: II111iiii
if 93 - 93: i11iIiiIii / Ii1I * Oo0Ooo . iII111i % iII111i / IiII
def lisp_is_rloc_probe_reply ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x28 )
if 15 - 15: OoOoOO00 % I1Ii111 - iIii1I11I1II1
if 52 - 52: i11iIiiIii * ooOoO0o
if 15 - 15: OoooooooOO . oO0o . i11iIiiIii / o0oOOo0O0Ooo
if 91 - 91: ooOoO0o
if 47 - 47: II111iiii + I11i + ooOoO0o % Oo0Ooo / iII111i
if 9 - 9: O0 + IiII
if 69 - 69: I1IiiI
if 11 - 11: I11i % I1Ii111 + O0 . Ii1I . I1ii11iIi11i % I1Ii111
if 28 - 28: IiII . o0oOOo0O0Ooo + iII111i - OoOoOO00 / OOooOOo
if 86 - 86: ooOoO0o * OoOoOO00 + oO0o / II111iiii % OOooOOo
if 89 - 89: O0 * Ii1I / OoO0O00 / OoOoOO00 % iII111i * iIii1I11I1II1
if 72 - 72: iIii1I11I1II1 / iIii1I11I1II1 * I11i
if 19 - 19: I1ii11iIi11i
if 42 - 42: OoOoOO00 / IiII
if 65 - 65: ooOoO0o - ooOoO0o * OoO0O00
if 99 - 99: I11i % ooOoO0o . I1Ii111
if 34 - 34: ooOoO0o + oO0o + II111iiii . I1Ii111 . i1IIi
if 14 - 14: OoO0O00 . ooOoO0o - i1IIi * I1IiiI
if 24 - 24: iIii1I11I1II1 / I1Ii111
def lisp_is_rloc_probe ( packet , rr ) :
IIi1ii1 = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == 17 )
if ( IIi1ii1 == False ) : return ( [ packet , None , None , None ] )
if 16 - 16: OoOoOO00 * I1Ii111 - I1IiiI / I1Ii111
if ( rr == 0 ) :
O00oOoo0OoOOO = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( O00oOoo0OoOOO == False ) : return ( [ packet , None , None , None ] )
elif ( rr == 1 ) :
O00oOoo0OoOOO = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( O00oOoo0OoOOO == False ) : return ( [ packet , None , None , None ] )
elif ( rr == - 1 ) :
O00oOoo0OoOOO = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( O00oOoo0OoOOO == False ) :
O00oOoo0OoOOO = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( O00oOoo0OoOOO == False ) : return ( [ packet , None , None , None ] )
if 64 - 64: I1ii11iIi11i . i1IIi % II111iiii % Oo0Ooo + oO0o - I1IiiI
if 24 - 24: IiII . II111iiii . II111iiii . OoOoOO00 . i11iIiiIii
if 11 - 11: Ii1I
if 82 - 82: I11i - i1IIi . Oo0Ooo * I1Ii111
if 44 - 44: iII111i
if 56 - 56: II111iiii / Oo0Ooo % IiII * II111iiii - iIii1I11I1II1 + ooOoO0o
IIi1IiIii = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
IIi1IiIii . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
if 33 - 33: o0oOOo0O0Ooo . I11i / I1IiiI
if 29 - 29: o0oOOo0O0Ooo - ooOoO0o
if 59 - 59: I11i / IiII * OoO0O00 / IiII . I1Ii111
if 82 - 82: OOooOOo . iIii1I11I1II1 + I1Ii111
if ( IIi1IiIii . is_local ( ) ) : return ( [ None , None , None , None ] )
if 14 - 14: IiII . i11iIiiIii
if 17 - 17: ooOoO0o % ooOoO0o * oO0o
if 8 - 8: ooOoO0o + OoO0O00 . II111iiii / iIii1I11I1II1 - OOooOOo
if 87 - 87: iIii1I11I1II1 . IiII % I1IiiI . OoO0O00 - I1Ii111
IIi1IiIii = IIi1IiIii . print_address_no_iid ( )
OOo0000o0 = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] | |
#!/usr/bin/python
import re
import sys
import os
import platform
is_windows = platform.system().lower().startswith("win")
import subprocess
import shutil
from markdown2 import *
from datetime import *
from multiprocessing import Process
from utils import *
from jira import *
from docbook import *
try:
from xml.etree.ElementTree import ElementTree
except:
prettyprint('''
Welcome to the ModeShape Release Script.
This release script requires that you use at least Python 2.5.0. It appears
that you do not thave the ElementTree XML APIs available, which are available
by default in Python 2.5.0.
''', Levels.FATAL)
sys.exit(1)
modules = []
docbooks = []
uploader = None
git = None
jira = None
def get_modules(directory):
'''Analyses the pom.xml file and extracts declared modules'''
tree = ElementTree()
f = directory + "/pom.xml"
if settings['verbose']:
print "Parsing %s to get a list of modules in project" % f
tree.parse(f)
mods = tree.findall(".//{%s}module" % maven_pom_xml_namespace)
for m in mods:
modules.append(m.text)
def help_and_exit():
prettyprint('''
%s ModeShape Release Script%s
This script automates much of the work of releasing a new version of the ModeShape project, and includes
the following tasks:
- create a local branch for the new release;
- change the project-related versions in the POM files and documentation;
- commit those changes locally;
- create a tag for the release;
- generate the release notes in multiple formats;
- generate emails to all the people who have filed, commented on, or worked on issues
fixed in this release;
- run a full assembly build of the software to product all artifacts and documentation;
- place a copy of all artifacts and documentation in the '../archive' folder;
- deploy all artifacts to the JBoss.org Maven repository in a staging area (authorization required)
- upload all artifacts and documentation to JBoss.org (authorization required); and
- push the commit and tag to the official Git repository (authorization required)
Note that the last three steps are not performed during a dry run.
Before this script is executed, be sure to update and commit the 'release_notes.md' file. It also ensures
that the local Git repository is a writable clone of the official ModeShape repository on GitHub.
%s Usage:%s
$ bin/release.py [options] <version> [<branch>]
where:
<version> The name of the new version (e.g., '2.4.0.Final' but without quotes), which must
comply with the format '<major>.<minor>.<patch>.<qualifier>', where the qualifier
must be one of 'Final', 'Alpha', 'Beta', or 'CR'.
branch The name of the existing branch from which the release should be made. This defaults
to 'master'.
and where the options include:
--verbose Show more detailed logging and messages
--dry-run Used for trial runs of the release process. This leaves a temporary branch in the local
git repository that contains the committed changes, but does NOT push to the official
Git repository and it does NOT publish artifacts to JBoss.org.
--skip-tests Do not run the unit or integration tests when building the software
--single-threaded Perform all operations sequentially without using multiple threads
--multi-threaded Perform some operations in parallel to reduce the overall run time.
--key-file=file A fully qualified path to a private key file that should be used when copying remote files via SCP or RSYNC.
If provided, is the equivalent of using the '-i' switch for 'scp' or 'rsync'. If not, these commands are
invoked without this switch.
This option is not available with '--dry-run'
--help|? Display this usage message
%s Examples:%s
$ bin/release.py 3.0.0.Final
This will release '3.0.0.Final' based off of 'master'
$ bin/release.py 2.8.1.Final 2.x
This will release '2.8.1.Final' based off of the existing '2.x' branch
''' % (Colors.yellow(), Colors.end_color(), Colors.yellow(), Colors.end_color(), Colors.yellow(), Colors.end_color()), Levels.INFO)
sys.exit(0)
def validate_version(version):
version_pattern = get_version_pattern()
if version_pattern.match(version):
return version.strip()
else:
prettyprint("Invalid version '"+version+"'!\n", Levels.FATAL)
help_and_exit()
def tag_release(version, branch):
if git.remote_branch_exists():
git.switch_to_branch()
git.create_tag_branch()
else:
prettyprint("Branch %s cannot be found on upstream repository. Aborting!" % branch, Levels.FATAL)
sys.exit(100)
def get_project_version_tag(tree):
return tree.find("./{%s}version" % (maven_pom_xml_namespace))
def get_parent_version_tag(tree):
return tree.find("./{%s}parent/{%s}version" % (maven_pom_xml_namespace, maven_pom_xml_namespace))
def patch_poms(working_dir, version):
patched_poms = list()
walker = GlobDirectoryWalker(working_dir, "pom.xml")
for pom_file in walker:
tree = ElementTree()
tree.parse(pom_file)
# The current version of the POM is what we're looking for ...
current_version_elem = get_project_version_tag(tree)
if current_version_elem == None:
# There is no version for the POM, so get it from the parent ...
current_version_elem = get_parent_version_tag(tree)
current_version = current_version_elem.text
if walker.replace_all_in(pom_file,"<version>%s</version>" % current_version,"<version>%s</version>" % version):
patched_poms.append(pom_file)
return patched_poms
def generate_release_notes(markdown_file,version,output_dir):
f = open(markdown_file)
readme_md = f.read()
# Replace the version entity with the actual version ...
readme_md = re.sub('&version;',version,readme_md)
# Append the JIRA-generated release notes
issues_md = jira.get_release_notes_in_markdown()
readme_md = readme_md + "\n\n" + issues_md
# Convert the lines to HTML using Markdown ...
readme_html = Markdown().convert(readme_md);
# Convert the lines to text by removing the Markdown patterns ...
readme_text = unmarkdown(readme_md)
# Write out the two files in the desired location ...
if not os.path.exists(output_dir):
os.makedirs(output_dir)
path = os.path.join(output_dir,"release.html")
mdf = open(path,'w')
mdf.write(readme_html)
mdf.close()
path = os.path.join(output_dir,"release.txt")
mdt = open(path,'w')
mdt.write(readme_text)
mdt.close()
def generate_contribution_emails(output_dir,bcc_address):
'''Generates an HTML page in the output directory containing mailto links that can be used to create the contribution emails in your mail application'''
html_content = jira.get_contribution_html(bcc_address)
file_path = os.path.join(output_dir,"contributions.html")
f = open(file_path,'w')
f.write(html_content)
f.close()
def copy_artifacts_to_archive_location(archive_path,version):
try:
os.makedirs(archive_path)
except:
pass
# Copy the 'modeshape-distribution' artifacts ...
from_files = ['dist.zip', 'source.zip', 'jboss-wf-dist.zip', 'javadoc.zip']
to_files = ['dist.zip', 'source.zip', 'jboss-wf-dist.zip', 'javadoc.zip']
for fsuffix,tsuffix in zip(from_files,to_files):
shutil.copy("modeshape-distribution/target/modeshape-%s-%s" % (version,fsuffix), "%s/modeshape-%s-%s" % (archive_path,version,tsuffix))
# Make an area for the documentation ...
docs_path = os.path.join(archive_path,version)
if not os.path.exists(docs_path):
os.makedirs(docs_path)
# Copy the Full JavaDoc ...
from_path = os.path.join('modeshape-distribution','target','api')
copy_folder(from_path,os.path.join(docs_path,'api'))
## Copy the API JavaDoc ...
#from_path = os.path.join('modeshape-distribution','target','api')
#copy_folder(from_path,os.path.join(docs_path,'api'))
#
## Copy the XRef ...
#from_path = os.path.join('modeshape-distribution','target','xref')
#if os.path.exists(from_path):
# copy_folder(from_path,os.path.join(docs_path,'xref'))
# Copy the release notes into the archive area...
for readme in ['release.html','release.txt']:
from_path = os.path.join('target',readme)
shutil.copy(from_path,os.path.join(docs_path,readme))
shutil.copy(from_path,os.path.join(archive_path,readme))
def copy_release_notes_to_archive_location(archive_path,version):
try:
os.makedirs(archive_path)
except:
pass
# Copy the release notes into the archive area...
for readme in ['release.html','release.txt']:
from_path = os.path.join('target',readme)
shutil.copy(from_path,os.path.join(archive_path,readme))
def copy_folder( from_path, to_path ):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path,to_path)
def update_versions(version):
modified_files = []
## Update versions in the POM files ...
for pom in patch_poms('.',version):
modified_files.append(pom)
# Now make sure this goes back into the repository.
git.commit(modified_files)
def get_module_name(pom_file):
tree = ElementTree()
tree.parse(pom_file)
return tree.findtext("./{%s}artifactId" % maven_pom_xml_namespace)
def upload_artifacts(base_dir, version):
"""Downloadable artifacts get rsync'ed to filemgmt.jboss.org, in the downloads_htdocs/modeshape directory"""
# Create an area under 'target' where we can move all the files/folders that we need to upload ...
os.chdir("%s/target/" % (base_dir))
os.makedirs("downloads/%s" % version)
# Copy the 'modeshape-distribution' artifacts ...
from_files = ['dist.zip', 'source.zip', 'jboss-wf-dist.zip', 'javadoc.zip']
to_files = ['dist.zip', 'source.zip', 'jboss-wf-dist.zip', 'javadoc.zip']
for fsuffix,tsuffix in zip(from_files,to_files):
shutil.copy("%s/modeshape-distribution/target/modeshape-%s-%s" % (base_dir,version,fsuffix), "downloads/%s/modeshape-%s-%s" % (version,version,tsuffix))
# Copy the readme files ...
for readme in ['release.html','release.txt']:
from_path = os.path.join(base_dir,'target',readme)
to_path = os.path.join('downloads',version,readme)
shutil.copy(from_path,to_path)
# rsync this stuff to filemgmt.jboss.org
os.chdir("%s/target/downloads" % (base_dir))
flags=[]
if ('key_file' in settings):
flags=['-i ' + settings['key_file']]
if is_windows:
uploader.upload_scp(version, "<EMAIL>:/downloads_htdocs/modeshape", flags)
else:
flags = flags + ['--protocol=28']
uploader.upload_rsync(version, "<EMAIL>:/downloads_htdocs/modeshape", flags)
# We're done, so go back to where we were ...
os.chdir(base_dir)
def upload_documentation(base_dir, version):
"""Javadocs get rsync'ed to filemgmt.jboss.org, in the docs_htdocs/modeshape directory"""
# Create an area under 'target' where we can move all the files/folders that we need to upload ...
os.chdir("%s/target/" % (base_dir))
os.makedirs("docs/%s" % version)
# Move the 'api' folder into the 'docs/<version>/' folder so we can rsync that '<version>' folder
os.rename("%s/modeshape-distribution/target/api" % base_dir, "docs/%s/api" % version)
# Copy the readme files ...
for readme in ['release.html','release.txt']:
from_path = os.path.join(base_dir,'target',readme)
to_path = os.path.join('docs',version,readme)
shutil.copy(from_path,to_path)
# rsync this stuff to filemgmt.jboss.org
os.chdir("%s/target/docs" % (base_dir))
flags=[]
if ('key_file' in settings):
flags=['-i ' + settings['key_file']]
if is_windows:
uploader.upload_scp(version, "<EMAIL>:/docs_htdocs/modeshape", flags)
else:
flags = flags + ['--protocol=28']
uploader.upload_rsync(version, "<EMAIL>:/docs_htdocs/modeshape", flags)
# We're done, so go back to where we were ...
os.chdir(base_dir)
def do_task(target, args, async_processes):
if settings['multi_threaded']:
async_processes.append(Process(target = target, args = args))
else:
target(*args)
### This is the starting place for this script.
def release():
global settings
global uploader
global git
global jira
assert_python_minimum_version(2, 5)
base_dir = os.getcwd()
# | |
<filename>grasp_evaluator/grasp_evaluator.py
# Copyright (c) 2020 NVIDIA Corporation
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""GraspEvaluator class to et up and run grasp evaluations."""
import h5py
import numpy as np
import os
import timeit
import xml.etree.ElementTree as ET
import yaml
from isaacgym import gymapi
from scipy.spatial.transform import Rotation as R
from utils import pandafsm
from utils import uniform_sphere
from utils import metrics_features_utils
class GraspEvaluator:
"""Simulate selected object, grasp, material params, and evaluation mode."""
def __init__(self, object_name, grasp_ind, oris, density,
youngs, poissons, friction, mode, tag=''):
"""Initialize parameters for simulation for the specific grasp and material properties."""
with open("config.yaml") as yamlfile:
self.cfg = yaml.safe_load(yamlfile)
# Soft object material parameters
self.object_name = object_name.lower()
self.grasp_ind = grasp_ind
self.oris = oris # Array of [ori_start, ori_end]
self.density = density
self.youngs = youngs
self.poissons = poissons
self.friction = float(friction)
self.mode = mode.lower()
# Directories of assets and results
self.assets_dir = os.path.abspath(self.cfg['dir']['assets_dir'])
self.franka_urdf = os.path.abspath(self.cfg['dir']['franka_urdf'])
self.results_dir = os.path.abspath(self.cfg['dir']['results_dir'])
self.object_path = os.path.join(self.assets_dir, self.object_name)
self.tag = tag
# Load candidate grasp and initialize results folder
self.get_grasp_candidates()
self.data_exists = self.init_results_folder()
if not self.cfg['replace_existing_results'] and self.data_exists:
return
# Create and set up simulation environment
self.viewer = None
self.gym = gymapi.acquire_gym()
self.sim, self.sim_params = self.create_sim()
self.create_env()
self.set_asset_properties()
self.set_camera()
self.set_transforms()
self.get_regular_vectors()
self.setup_scene()
def init_results_folder(self):
"""Create folder where results are saved. Returns whether existing results will be kept."""
folder_name = self.object_name + "_" + self.cfg['tags']['results_storage_tag']
object_file_name = self.object_name + "_" + self.density + "_" + self.youngs + "_" + \
self.poissons + "_" + self.mode + "_tag" + self.tag + "_results.h5"
self.h5_file_path = os.path.join(
self.results_dir, folder_name, self.youngs, object_file_name)
if os.path.exists(self.h5_file_path) and not self.cfg['replace_existing_results']:
existing_h5 = h5py.File(self.h5_file_path, 'r')
existing_timed_out = existing_h5['timed_out'][self.grasp_ind,
self.oris[0]]
existing_succeeded = True
if self.mode == "pickup":
existing_pos_under_gravity_dset = existing_h5[
'positions_under_gravity']
if np.all(existing_pos_under_gravity_dset[self.grasp_ind] == 0):
existing_succeeded = False
if self.mode == "reorient":
reorientation_meshes_dset = existing_h5['reorientation_meshes']
if np.all(reorientation_meshes_dset[self.grasp_ind, self.oris[0],
0] == 0):
existing_succeeded = False
if self.mode == "lin_acc":
lin_acc_fail_accs_dset = existing_h5['lin_acc_fail_accs']
if lin_acc_fail_accs_dset[self.grasp_ind, self.oris[0]] == 0.0:
existing_succeeded = False
if self.mode == "ang_acc":
ang_acc_fail_accs_dset = existing_h5['ang_acc_fail_accs']
if ang_acc_fail_accs_dset[self.grasp_ind, self.oris[0]] == 0.0:
existing_succeeded = False
if self.mode == "squeeze_no_gravity":
max_forces_dset = existing_h5["squeeze_no_gravity_max_force"]
if np.all(max_forces_dset[self.grasp_ind] == 0):
existing_succeeded = False
existing_h5.close()
if existing_timed_out == 0.0 and existing_succeeded:
print("Data already exists, returning")
return True
else:
print("Existing data is imperfect, rerunning")
return False
def get_grasp_candidates(self):
"""Load the candidate grasp of interest."""
grasp_file_name = self.object_name + "_grasps.h5"
f = h5py.File(os.path.realpath(os.path.join(self.object_path, grasp_file_name)), 'r')
self.grasp_candidate_poses = f['poses'][self.grasp_ind:self.grasp_ind + 1]
self.num_grasp_poses = f['poses'].shape[0]
print("Number of total grasp candidates", self.num_grasp_poses)
f.close()
def create_sim(self):
"""Set sim parameters and create a Sim object."""
# Set simulation parameters
sim_type = gymapi.SIM_FLEX
sim_params = gymapi.SimParams()
sim_params.dt = 1.0 / 1500
sim_params.substeps = 1
sim_params.gravity = gymapi.Vec3(0.0, -9.81, 0.0)
if self.mode in ["lin_acc", "ang_acc", "squeeze_no_gravity"]:
sim_params.gravity = gymapi.Vec3(0.0, 0.0, 0.0)
# Set stress visualization parameters
sim_params.stress_visualization = True
sim_params.stress_visualization_min = 1.0e2
sim_params.stress_visualization_max = 1e5
# Set FleX-specific parameters
sim_params.flex.solver_type = 5
sim_params.flex.num_outer_iterations = 10
sim_params.flex.num_inner_iterations = 200
sim_params.flex.relaxation = 0.75
sim_params.flex.warm_start = 0.8
sim_params.flex.deterministic_mode = True
# Set contact parameters
sim_params.flex.shape_collision_distance = 5e-4
sim_params.flex.contact_regularization = 1.0e-6
sim_params.flex.shape_collision_margin = 1.0e-4
sim_params.flex.dynamic_friction = self.friction
# Create Sim object
gpu_physics = 0
gpu_render = 0
if not self.cfg['use_viewer']:
gpu_render = -1
return self.gym.create_sim(gpu_physics, gpu_render, sim_type,
sim_params), sim_params
def create_env(self):
"""Set dimensions of environments."""
self.envs_per_row = 6
self.env_dim = 0.3
if self.mode in ["lin_acc", "ang_acc"]:
self.env_dim = 1.0
# Define environment as half-cube (half in vertical direction)
self.env_lower = gymapi.Vec3(-self.env_dim, 0, -self.env_dim)
self.env_upper = gymapi.Vec3(self.env_dim, self.env_dim, self.env_dim)
def set_object_parameters(self, asset_file_object, **kwargs):
"""Write object parameters into URDF file."""
try:
tree = ET.parse(asset_file_object)
root = tree.getroot()
for key, value in kwargs.items():
for attribute in root.iter(key):
attribute.set('value', str(value))
tree.write(asset_file_object)
return True
except BaseException:
return False
def set_asset_properties(self):
"""Define asset properties."""
asset_root = ''
asset_options = gymapi.AssetOptions()
asset_options.flip_visual_attachments = False
asset_options.armature = 0.0
asset_options.thickness = 0.0
asset_options.linear_damping = 1.0 # Linear damping for rigid bodies
asset_options.angular_damping = 0.0 # Angular damping for rigid bodies
asset_options.disable_gravity = True
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_VEL
# Load Franka and object assets
asset_file_platform = os.path.join(self.assets_dir, 'platform.urdf')
asset_file_object = os.path.join(self.object_path, "soft_body.urdf")
# Set object parameters for object material properties
set_parameter_result = False
fail_counter = 0
while set_parameter_result is False and fail_counter < 10:
try:
set_parameter_result = self.set_object_parameters(
asset_file_object,
density=self.density,
youngs=self.youngs,
poissons=self.poissons)
except BaseException:
fail_counter += 1
pass
# Set asset options
asset_options.fix_base_link = True
self.asset_handle_franka = self.gym.load_asset(self.sim, asset_root, self.franka_urdf,
asset_options)
asset_options.fix_base_link = False
asset_options.min_particle_mass = 1e-20
self.asset_handle_object = self.gym.load_asset(self.sim, asset_root, asset_file_object,
asset_options)
asset_options.fix_base_link = True
self.asset_handle_platform = self.gym.load_asset(self.sim, asset_root,
asset_file_platform, asset_options)
def set_camera(self):
"""Define camera properties and create Viewer object."""
camera_props = gymapi.CameraProperties()
camera_props.width = 1920
camera_props.height = 1080
if self.cfg['use_viewer']:
self.viewer = self.gym.create_viewer(self.sim, camera_props)
camera_target = gymapi.Vec3(0.0, 1.0, 0.0)
camera_pos = gymapi.Vec3(-0, 1.02, 0.5)
self.gym.viewer_camera_look_at(self.viewer, None, camera_pos, camera_target)
def set_transforms(self):
"""Define transforms to convert between Trimesh and Isaac Gym conventions."""
self.from_trimesh_transform = gymapi.Transform()
self.from_trimesh_transform.r = gymapi.Quat(0, 0.7071068, 0,
0.7071068)
self.neg_rot_x_transform = gymapi.Transform()
self.neg_rot_x = gymapi.Quat(0.7071068, 0, 0, -0.7071068)
self.neg_rot_x_transform.r = self.neg_rot_x
def get_regular_vectors(self):
"""Get directions of regularly spaced vectors in a unit ball."""
all_directions, _, _, _ = uniform_sphere.get_uniform_directions_regular(16)
self.num_directions = len(all_directions)
self.all_directions = all_directions[self.oris[0]:self.oris[1] + 1]
def get_height_of_objects(self, tet_file):
"""Return the height of the soft object."""
mesh_lines = list(open(tet_file, "r"))
mesh_lines = [line.strip('\n') for line in mesh_lines]
zs = []
for ml in mesh_lines:
sp = ml.split(" ")
if sp[0] == 'v':
zs.append(float(sp[3]))
return 2 * abs(min(zs))
def setup_scene(self):
"""Create environments, Franka actor, and object actor."""
self.env_handles = []
self.franka_handles = []
object_handles = []
self.platform_handles = []
self.hand_origins = []
self.env_spread = self.grasp_candidate_poses
if self.mode.lower() in ["reorient", "lin_acc", "ang_acc"]:
self.env_spread = self.all_directions
for i, test_grasp_pose in enumerate(self.env_spread):
if self.mode.lower() in ["reorient", "lin_acc", "ang_acc"]:
test_grasp_pose = self.grasp_candidate_poses[0]
direction = np.array(
[self.all_directions[i][1], self.all_directions[i][2],
self.all_directions[i][0]]) # Read direction as y-up convention
else:
direction = np.array(
[self.all_directions[0][1], self.all_directions[0][2],
self.all_directions[0][0]]) # Read direction as y-up convention
# Create environment
env_handle = self.gym.create_env(
self.sim, self.env_lower, self.env_upper, self.envs_per_row)
self.env_handles.append(env_handle)
# Define shared pose/collision parameters
pose = gymapi.Transform()
grasp_transform = gymapi.Transform()
grasp_transform.r = gymapi.Quat(test_grasp_pose[4], test_grasp_pose[5],
test_grasp_pose[6], test_grasp_pose[3])
_, franka_rpy = metrics_features_utils.get_franka_rpy(grasp_transform.r)
collision_group = i
collision_filter = 0
# Create Franka actors
pose.p = gymapi.Vec3(test_grasp_pose[0], test_grasp_pose[1],
test_grasp_pose[2])
pose.p = self.neg_rot_x_transform.transform_vector(pose.p)
pose.p.y += self.cfg['sim_params']['platform_height']
franka_handle = self.gym.create_actor(env_handle, self.asset_handle_franka, pose,
f"franka_{i}", collision_group, 1)
self.franka_handles.append(franka_handle)
curr_joint_positions = self.gym.get_actor_dof_states(
env_handle, franka_handle, gymapi.STATE_ALL)
ang_acc_axis = np.array([0., 0., 1.])
pose_transform = R.from_euler('ZYX', franka_rpy)
ang_acc_transform = R.align_vectors(np.expand_dims(direction, axis=0),
np.expand_dims(ang_acc_axis,
axis=0))[0]
ang_acc_eulers = ang_acc_transform.as_euler('xyz')
pose_correction = ang_acc_transform.inv() * pose_transform
pose_correction_euler = pose_correction.as_euler('xyz')
# Correct for translation offset to match grasp. Allows for one joint to
# be solely responsible for generating angular acceleration
q0 = np.array([0., 0., -0.112])
q0_ = ang_acc_transform.apply(q0)
disp_offset = q0 - q0_
curr_joint_positions['pos'] = [
disp_offset[0], disp_offset[1], disp_offset[2], ang_acc_eulers[2],
ang_acc_eulers[1], ang_acc_eulers[0], 0., pose_correction_euler[2],
pose_correction_euler[1], pose_correction_euler[0], 0.0, 0.0, 0.0,
0, 0.04, 0.04
]
self.hand_origins.append(pose)
finger_pose = gymapi.Transform()
finger_pose.p = pose.p
self.gym.set_actor_dof_states(env_handle, franka_handle,
curr_joint_positions, gymapi.STATE_ALL)
# Create soft object
tet_file_name = os.path.join(self.object_path, self.object_name + ".tet")
height_of_object = self.get_height_of_objects(tet_file_name)
pose = gymapi.Transform()
pose.r = self.neg_rot_x_transform.r
pose.p = self.from_trimesh_transform.transform_vector(
gymapi.Vec3(0.0, 0.0, 0.0))
object_height_buffer = 0.001
if self.mode == "squeeze_no_gravity":
object_height_buffer = 0.0
pose.p.y += self.cfg['sim_params']['platform_height'] + object_height_buffer
object_handle = self.gym.create_actor(env_handle, self.asset_handle_object, pose,
f"object_{i}", collision_group,
collision_filter)
object_handles.append(object_handle)
| |
not None:
energy_levels = energy_levels[energy_levels >= E_min]
if E_max is not None:
energy_levels = energy_levels[energy_levels <= E_max]
bins = np.arange(0, bin_max + bin_width, bin_width)
n_bins = len(bins)
counts = np.zeros(n_bins)
for i in range(n_bins - 1):
counts[i] = np.sum(bins[i] <= energy_levels[energy_levels < bins[i + 1]])
density = (counts/bin_width)[:-1]
bins = bins[1:]
if plot:
fig, ax = plt.subplots()
ax.step(bins, density, color="black")
ax.set_ylabel(r"Density [MeV$^{-1}$]")
ax.set_xlabel("E [MeV]")
ax.legend([f"{bin_width=} MeV"])
ax.grid()
if save_plot:
fname = "nld.png"
print(f"NLD saved as '{fname}'")
fig.savefig(fname=fname, dpi=300)
plt.show()
return bins, density
def porter_thomas(
transitions: np.ndarray,
Ei: Union[int, float, list],
BXL_bin_width: Union[int, float],
j_list: Union[list, None] = None,
Ei_bin_width: Union[int, float] = 0.1,
return_chi2: bool = False,
) -> tuple[np.ndarray, np.ndarray, Optional[np.ndarray]]:
"""
Calculate the distribution of B(XL)/mean(B(XL)) values scaled to
a chi-squared distribution of 1 degree of freedom.
Parameters
----------
transitions : np.ndarray
Array containing transition data for the specified
multipolarity.
[2*spin_initial, parity_initial, idx_initial, Ex_initial,
2*spin_final, parity_final, idx_final, Ex_final, E_gamma,
B(XL, i->f), B(XL, f<-i)]
Ei : int, float, list
The initial excitation energy of the transitions where the
distribution will be calculated. If Ei is only a number, then a
bin of size 'Ei_bin_width' around Ei will be used. If Ei is a
list, tuple, or array with both a lower and an upper limit, then
all excitation energies in that interval will be used.
BXL_bin_width : int, float
The bin size of the BXL values for the distribution (not the Ei
bin size!).
Ei_bin_width : int, float
The size of the initial energy bin if 'Ei' is only one number.
Will not be used if 'Ei' is both a lower and an upper limit.
return_chi2 : bool
If True, the chi-squared distribution y values will be returned
as a third return value.
Returns
-------
BXL_bins : np.ndarray
The BXL bins (x values).
BXL_counts : np.ndarray
The number of counts in each BXL_bins bin (y values).
rv.pdf(BXL_bins) : np.ndarray
The chi-squared distribution y values.
"""
pt_prepare_data_time = time.perf_counter()
if isinstance(Ei, (list, tuple, np.ndarray)):
"""
If Ei defines a lower and an upper limit.
"""
Ei_mask = np.logical_and(
transitions[:, 3] >= Ei[0],
transitions[:, 3] < Ei[-1]
)
BXL = transitions[Ei_mask]
else:
BXL = transitions[np.abs(transitions[:, 3] - Ei) < Ei_bin_width] # Consider only levels around Ei.
if j_list is not None:
"""
Create a mask of j values for the transitions array. Allow only
entries with initial angular momenta in j_list.
"""
if not isinstance(j_list, list):
msg = f"j_list must be of type list! Got {type(j_list)}."
raise TypeError(msg)
j_list = [2*j for j in j_list] # Angular momenta are stored as 2*j to avoid fractions.
mask_list = []
for j in j_list:
"""
Create a [bool1, bool2, ...] mask for each j.
"""
mask_list.append(BXL[:, 0] == j)
BXL = BXL[np.logical_or.reduce(mask_list)] # Contains only transitions of j in the filter.
# BXL = np.copy(BXL[:, 9]) # The 9th col. is the reduced decay transition probabilities.
n_BXL_before = len(BXL)
idxi_masks = []
pii_masks = []
ji_masks = []
BXL_tmp = []
initial_indices = np.unique(BXL[:, 2]).astype(int)
initial_parities = np.unique(BXL[:, 1]).astype(int)
initial_j = np.unique(BXL[:, 0])
for idxi in initial_indices:
idxi_masks.append(BXL[:, 2] == idxi)
for pii in initial_parities:
pii_masks.append(BXL[:, 1] == pii)
for ji in initial_j:
ji_masks.append(BXL[:, 0] == ji)
for pii in pii_masks:
for idxi in idxi_masks:
for ji in ji_masks:
mask = np.logical_and(ji, np.logical_and(pii, idxi))
tmp = BXL[mask][:, 9] # 9 is B decay.
if not tmp.size:
"""
Some combinations of masks might not match any
levels.
"""
continue
BXL_tmp.extend(tmp/tmp.mean())
BXL = np.asarray(BXL_tmp)
BXL.sort()
# BXL = BXL/np.mean(BXL)
n_BXL_after = len(BXL)
if n_BXL_before != n_BXL_after:
msg = "The number of BXL values has changed during the Porter-Thomas analysis!"
msg += f" This should not happen! {n_BXL_before = }, {n_BXL_after = }."
raise RuntimeError(msg)
BXL_bins = np.arange(0, BXL[-1] + BXL_bin_width, BXL_bin_width)
n_BXL_bins = len(BXL_bins)
BXL_counts = np.zeros(n_BXL_bins)
pt_prepare_data_time = time.perf_counter() - pt_prepare_data_time
pt_count_time = time.perf_counter()
for i in range(n_BXL_bins - 1):
"""
Calculate the number of transitions with BXL values between
BXL_bins[i] and BXL_bins[i + 1].
"""
BXL_counts[i] = np.sum(BXL_bins[i] <= BXL[BXL < BXL_bins[i + 1]])
pt_count_time = time.perf_counter() - pt_count_time
pt_post_process_time = time.perf_counter()
rv = chi2(1)
BXL_counts = BXL_counts[1:]
BXL_bins = BXL_bins[1:]
BXL_counts /= np.trapz(BXL_counts) # Normalize counts.
# popt, _ = curve_fit(
# f = lambda x, scale: scale*rv.pdf(x),
# xdata = BXL_bins,
# ydata = BXL_counts,
# p0 = [rv.pdf(BXL_bins)[1]/BXL_counts[1]],
# method = "lm",
# )
# BXL_counts *= popt[0] # Scale counts to match chi2.
BXL_counts *= np.mean(rv.pdf(BXL_bins)[1:20]/BXL_counts[1:20])
pt_post_process_time = time.perf_counter() - pt_post_process_time
if flags["debug"]:
print("--------------------------------")
print(f"Porter-Thomas: Prepare data time: {pt_prepare_data_time:.3f} s")
print(f"Porter-Thomas: Count time: {pt_count_time:.3f} s")
print(f"Porter-Thomas: Post process time: {pt_post_process_time:.3f} s")
print("--------------------------------")
if return_chi2:
return BXL_bins, BXL_counts, rv.pdf(BXL_bins)
else:
return BXL_bins, BXL_counts
def nuclear_shell_model():
"""
Generate a diagram of the nuclear shell model shell structure.
"""
plt.rcParams.update({
"backend": "pgf",
"text.usetex": True,
"font.family": "serif",
"font.serif": ["roman"],
"legend.fontsize": 14,
"xtick.labelsize": 15,
"ytick.labelsize": 15,
"axes.labelsize": 14,
"axes.titlesize": 15,
})
fig, ax = plt.subplots(figsize=(6.4, 8))
ax.axis(False)
fontsize = 15
x_offset = 0.6
x_text_offset = x_offset - 0.5
first_layer_labels = [
r"$1s$", r"$1p$", r"$1d$", r"$2s$", r"$1f$", r"$2p$", r"$1g$",
r"$2d$", r"$3s$"
]
first_layer_y = [1, 2.4, 4.2, 4.45, 6.3, 6.8, 9, 10.0, 10.5]
second_layer_labels = [
r"$1s_{1/2}$", r"$1p_{3/2}$", r"$1p_{1/2}$", r"$1d_{5/2}$",
r"$2s_{1/2}$", r"$1d_{3/2}$", r"$1f_{7/2}$", r"$2p_{3/2}$",
r"$1f_{5/2}$", r"$2p_{1/2}$", r"$1g_{9/2}$", r"$2d_{5/2}$",
r"$1g_{7/2}$", r"$3s_{1/2}$", r"$2d_{3/2}$"
]
second_layer_y = [
first_layer_y[0], first_layer_y[1] - 0.15, first_layer_y[1] + 0.15,
first_layer_y[2] - 0.3, first_layer_y[3], first_layer_y[2] + 0.51,
first_layer_y[4] - 0.6, first_layer_y[5] - 0.10, first_layer_y[4] + 0.7,
first_layer_y[5] + 0.5, first_layer_y[6] - 1.0, first_layer_y[7] - 0.4,
first_layer_y[6] + 0.9, first_layer_y[7] + 0.8, first_layer_y[8]
]
dash_layer = [
[2 + x_offset, first_layer_y[0], 2.5 + x_offset, second_layer_y[0]],
[2 + x_offset, first_layer_y[1], 2.5 + x_offset, second_layer_y[1]],
[2 + x_offset, first_layer_y[1], 2.5 + x_offset, second_layer_y[2]],
[2 + x_offset, first_layer_y[2], 2.5 + x_offset, second_layer_y[3]],
[2 + x_offset, first_layer_y[2], 2.5 + x_offset, second_layer_y[5]],
[2 + x_offset, first_layer_y[3], 2.5 + x_offset, second_layer_y[4]],
[2 + x_offset, first_layer_y[4], 2.5 + x_offset, second_layer_y[6]],
[2 + x_offset, first_layer_y[4], 2.5 + x_offset, second_layer_y[8]],
[2 + x_offset, first_layer_y[5], 2.5 + x_offset, second_layer_y[7]],
[2 + x_offset, first_layer_y[5], 2.5 + x_offset, second_layer_y[9]],
[2 + x_offset, first_layer_y[6], 2.5 + x_offset, second_layer_y[10]],
[2 + x_offset, first_layer_y[7], 2.5 + x_offset, second_layer_y[11]],
[2 + x_offset, first_layer_y[6], 2.5 + x_offset, second_layer_y[12]],
[2 + x_offset, first_layer_y[7], 2.5 + x_offset, second_layer_y[13]],
[2 + x_offset, first_layer_y[8], 2.5 + x_offset, second_layer_y[14]],
]
core_layer_labels = [
r"$^{16}$O", r"$^{40}$Ca", r"$^{56}$Ni"
]
core_layer_y = [
second_layer_y[2] + 0.5, second_layer_y[5] + 0.5, second_layer_y[6] + 0.5
]
occupations = [
2, 4, 2, 6, 2, 4, 8, 4, 6, 2, 10, 6, 8, 2, 4
]
occupations_y = second_layer_y
cum_occupations = [
2, 8, 20, 28, 50
]
cum_occupations_y = [
second_layer_y[0], second_layer_y[2], second_layer_y[5],
second_layer_y[6], second_layer_y[10]
]
ax.hlines( # To force the width of the figure.
y = 1,
xmin = 3.5 + x_offset,
xmax = 4.5 + x_offset,
color = "white"
)
ax.hlines( # To force the width of the figure.
y = 1,
xmin = 1,
xmax = 2,
color = "white"
)
for y, label in zip(first_layer_y, first_layer_labels):
ax.hlines(
y = y,
xmin = 1 + x_offset,
xmax = 2 + x_offset,
color = "black",
)
fig.text(
x = 0.12 + x_text_offset,
y = y/13.95 + 0.067,
s = label,
fontsize = fontsize
)
for y, label in zip(second_layer_y, second_layer_labels):
ax.hlines(
y = y,
xmin = 2.5 + x_offset,
xmax = 3.5 + x_offset,
color = "black",
)
fig.text(
x = 0.6 + x_text_offset,
y = y/14.2 + 0.067,
s = label,
fontsize = fontsize
)
for x1, y1, x2, y2 in dash_layer:
ax.plot([x1, x2], [y1, y2], linestyle="dashed", color="black")
for occupation, y in zip(occupations, occupations_y):
fig.text(
x = 0.69 + x_text_offset,
y = y/14.2 + 0.067,
s = occupation,
fontsize = fontsize - 1
)
for occupation, y in zip(cum_occupations, cum_occupations_y):
fig.text(
x = 0.73 + x_text_offset,
y = y/14.2 + 0.067,
s = occupation,
fontsize = fontsize - | |
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various math utilities"""
import math
import re
import numpy as np
import scipy.optimize as sciopt
# special pattern to deal with FORTRAN-produced scipats without E, like 3.2234-234
SCIPAT_SPECIAL = re.compile(r"([+-]?\d*\.\d+)[eEdD]?([+-]\d+)")
def average1DWithinTolerance(vals, tolerance=0.2):
"""
Compute the average of a series of arrays with a tolerance.
Tuned for averaging assembly meshes or block heights.
Parameters
----------
vals : 2D np.array
could be assembly x axial mesh tops or heights
"""
vals = np.array(vals)
filterOut = np.array([False]) # this gets discarded
while not filterOut.all(): # 20% difference is the default tolerance
avg = vals.mean(axis=0) # average over all columns
diff = abs(vals - avg) / avg # no nans, because all vals are non-zero
# True = 1, sum across axis means any height in assem is off
filterOut = (diff > tolerance).sum(axis=1) == 0
vals = vals[filterOut] # filter anything that is skewing
if vals.size == 0:
raise ValueError("Nothing was near the mean, there are no acceptable values!")
if (avg <= 0.0).any():
raise ValueError(
"A non-physical value (<=0) was computed, but this is not possible.\n"
"Values: {}\navg: {}".format(vals, avg)
)
return avg
def convertToSlice(x, increment=False):
"""
Convert a int, float, list of ints or floats, None, or slice
to a slice. Also optionally increments that slice to make it easy to line
up lists that don't start with 0.
Use this with np.array (np.ndarray) types to easily get selections of it's elements.
Parameters
----------
x : multiple types allowed.
int: select one index.
list of int: select these index numbers.
None: select all indices.
slice: select this slice
Returns
-------
slice : slice
Returns a slice object that can be used in an array
like a[x] to select from its members.
Also, the slice has its index numbers decremented by 1.
It can also return a numpy array, which can be used
to slice other numpy arrays in the same way as a slice.
Examples
--------
a = np.array([10, 11, 12, 13])
>>> convertToSlice(2)
slice(2, 3, None)
>>> a[convertToSlice(2)]
array([12])
>>> convertToSlice(2, increment=-1)
slice(1, 2, None)
>>> a[convertToSlice(2, increment=-1)]
array([11])
>>> a[convertToSlice(None)]
array([10, 11, 12, 13])
>>> a[utils.convertToSlice([1, 3])]
array([11, 13])
>>> a[utils.convertToSlice([1, 3], increment=-1)]
array([10, 12])
>>> a[utils.convertToSlice(slice(2, 3, None), increment=-1)]
array([11])
"""
if increment is False:
increment = 0
if not isinstance(increment, int):
raise Exception("increment must be False or an integer in utils.convertToSlice")
if x is None:
x = np.s_[:]
if isinstance(x, list):
x = np.array(x)
if isinstance(x, (int, np.integer)) or isinstance(x, (float, np.floating)):
x = slice(int(x), int(x) + 1, None)
# Correct the slice indices to be group instead of index based.
# The energy groups are 1..x and the indices are 0..x-1.
if isinstance(x, slice):
if x.start is not None:
jstart = x.start + increment
else:
jstart = None
if x.stop is not None:
if isinstance(x.stop, list):
jstop = [x + increment for x in x.stop]
else:
jstop = x.stop + increment
else:
jstop = None
jstep = x.step
return np.s_[jstart:jstop:jstep]
elif isinstance(x, np.ndarray):
return np.array([i + increment for i in x])
else:
raise Exception(
(
"It is not known how to handle x type: " "{0} in utils.convertToSlice"
).format(type(x))
)
def efmt(a: str) -> str:
r"""Converts string exponential number to another string with just 2 digits in the exponent."""
# this assumes that none of our numbers will be more than 1e100 or less than 1e-100...
if len(a.split("E")) != 2:
two = a.split("e")
else:
two = a.split("E")
# print two
exp = two[1] # this is '+002' or '+02' or something
if len(exp) == 4: # it has 3 digits of exponent
exp = exp[0] + exp[2:] # gets rid of the hundred's place digit
return two[0] + "E" + exp
def expandRepeatedFloats(repeatedList):
"""
Return an expanded repeat list.
Notes
-----
R char is valid for showing the number of repeats in MCNP. For examples the list:
[150, 200, '9R']
indicates a 150 day cycle followed by 10 200 day cycles.
"""
nonRepeatList = []
for val in repeatedList:
isRepeat = False
if isinstance(val, str):
val = val.upper()
if val.count("R") > 1:
raise ValueError("List had strings that were not repeats")
elif "R" in val:
val = val.replace("R", "")
isRepeat = True
if isRepeat:
nonRepeatList += [nonRepeatList[-1]] * int(val)
else:
nonRepeatList.append(float(val))
return nonRepeatList
def findClosest(listToSearch, val, indx=False):
r"""
find closest item in a list.
Parameters
----------
listToSearch : list
The list to search through
val : float
The target value that is being searched for in the list
indx : bool, optional
If true, returns minVal and minIndex, otherwise, just the value
Returns
-------
minVal : float
The item in the listToSearch that is closest to val
minI : int
The index of the item in listToSearch that is closest to val. Returned if indx=True.
"""
d = float("inf")
minVal = None
minI = None
for i, item in enumerate(listToSearch):
if abs(item - val) < d:
d = abs(item - val)
minVal = item
minI = i
if indx:
return minVal, minI
else:
# backwards compatibility
return minVal
def findNearestValue(searchList, searchValue):
"""Search a given list for the value that is closest to the given search value."""
return findNearestValueAndIndex(searchList, searchValue)[0]
def findNearestValueAndIndex(searchList, searchValue):
"""Search a given list for the value that is closest to the given search value. Return a tuple
containing the value and its index in the list."""
searchArray = np.array(searchList)
closestValueIndex = (np.abs(searchArray - searchValue)).argmin()
return searchArray[closestValueIndex], closestValueIndex
def fixThreeDigitExp(strToFloat: str) -> float:
"""
Convert FORTRAN numbers that cannot be converted into floats.
Notes
-----
Converts a number line "9.03231714805651-101" (no e or E) to "9.03231714805651e-101".
Some external depletion kernels currently need this fix. From contact with developer:
The notation like 1.0-101 is a FORTRAN thing, with history going back to the 60's.
They will only put E before an exponent 99 and below. Fortran will also read these guys
just fine, and they are valid floating point numbers. It would not be a useful effort,
in terms of time, trying to get FORTRAN to behave differently.
The approach has been to write a routine in the reading code which will interpret these.
This helps when the scientific number exponent does not fit.
"""
match = SCIPAT_SPECIAL.match(strToFloat)
return float("{}E{}".format(*match.groups()))
def getFloat(val):
r"""returns float version of val, or None if it's impossible. Useful for converting
user-input into floats when '' might be possible."""
try:
newVal = float(val)
return newVal
except:
return None
def getStepsFromValues(values, prevValue=0.0):
"""Convert list of floats to list of steps between each float."""
steps = []
for val in values:
currentVal = float(val)
steps.append(currentVal - prevValue)
prevValue = currentVal
return steps
def linearInterpolation(x0, y0, x1, y1, targetX=None, targetY=None):
r"""
does a linear interpolation (or extrapolation) for y=f(x)
Parameters
----------
x0,y0,x1,y1 : float
Coordinates of two points to interpolate between
targetX : float, optional
X value to evaluate the line at
targetY : float, optional
Y value we want to find the x value for (inverse interpolation)
Returns
-------
interpY : float
The value of y(targetX), if targetX is not None
interpX : float
The value of x where y(x) = targetY (if targetY is not None)
y = m(x-x0) + b
x = (y-b)/m
"""
if x1 == x0:
raise ZeroDivisionError("The x-values are identical. Cannot interpolate.")
m = (y1 - y0) / (x1 - x0)
b = -m * x0 + y0
if targetX is not None:
return m * targetX + b
else:
return (targetY - b) | |
<= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
####################################################################################
def test_percentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.percentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.percentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.percentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.percentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.percentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_polar():
components = np.random.rand(2).astype(np.double)
assert NumCpp.polarScaler(components[0], components[1])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
magArray = NumCpp.NdArray(shape)
angleArray = NumCpp.NdArray(shape)
mag = np.random.rand(shape.rows, shape.cols)
angle = np.random.rand(shape.rows, shape.cols)
magArray.setArray(mag)
angleArray.setArray(angle)
assert NumCpp.polarArray(magArray, angleArray) is not None
####################################################################################
def test_power():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_powerf():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
exponents = np.random.rand(shape.rows, shape.cols) * 3
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.rand(shape.rows, shape.cols) * 3 + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_prod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * | |
<filename>test/test_allkinds.py
# type: ignore
"""
test_allkinds.py
Tests for GreynirCorrect module
Copyright (C) 2021 by <NAME>.
This software is licensed under the MIT License:
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This module tests both the token-level error detection and correction
and the sentence-level annotation functionality of GreynirCorrect.
"""
# Run with 'pytest -v' for verbose mode
import reynir_correct as rc
from reynir_correct import detokenize
from reynir_correct.wrappers import check_errors
# Tests for errtokenizer.py
def dump(tokens):
print("\n{0} tokens:\n".format(len(tokens)))
for token in tokens:
print("{0}".format(token))
err = token.error_description
if err:
print(" {0}: {1}".format(token.error_code, err))
def normalize(g):
"""Return a corrected, normalized string form of the token list in g"""
return detokenize(g, normalize=True)
def check(p):
"""Return a corrected, normalized string form of the input along with the tokens"""
options: Dict[str, Union[str, bool]] = {}
options["format"] = "textplustoks"
options["all_errors"] = True
options["infile"] = [p]
options["one_sent"] = False
return check_errors(**options)
def test_punctuation(verbose=False):
# Quotation marks
s, g = check('Hann var kallaður ,,pottormur" og var "hrekkjusvín".')
assert "„pottormur“" in s
assert "„hrekkjusvín“" in s
# Ellipsis
s, g = check("Ég veit ekki...")
assert "..." not in s
# Ath. þetta finnst í s, en í viðmótinu birtist þetta ekki í hríslutrénu.
assert "…" in s
def test_multiple_spaces(verbose=False):
s, g = check("Hér er langt bil.")
assert "Hér er" in s
assert " " not in s
def test_doubling(verbose=False):
"""Test words that are erroneously written twice or more often"""
# Simple instance
s, g = check("Ég hélt mér mér fast í sætið.")
assert "hélt mér fast" in s
assert "mér mér" not in s
assert len(g) == 9
assert g[3].error_code == "C001" # mér
# Test many instances in same sentence
s, g = check("Potturinn kom ekki ekki í ljós ljós fyrr en en í dag dag.")
assert "kom ekki í" in s
assert "ekki ekki" not in s
assert "í ljós fyrr" in s
assert "ljós ljós" not in s
assert "fyrr en í" in s
assert "en en" not in s
assert "í dag." in s
assert "dag dag" not in s
assert len(g) == 12
errors = {3, 5, 7, 9}
for ix in range(len(g)):
if ix in errors:
assert g[ix].error_code == "C001" # ekki, ljós, en, dag
else:
assert not g[ix].error_code
# Here the first word is just an uppercase version of the following common noun. This should be corrected.
s, g = check("Slysið slysið átti sér stað í gærkvöldi.")
assert g[2].error_code == "C004/w" # slysið, bara uppástunga, ekki leiðrétt
# Testing multiple words in a row. This should be corrected.
s, g = check(
"Það er stanslaust fjör fjör fjör fjör fjör fjör fjör fjör í sveitinni."
)
assert len(g) == 9
assert "stanslaust fjör í" in s
assert "fjör fjör" not in s
assert g[4].error_code == "C001"
# 'á' has different meanings here, 'á á' should be marked as a possible error
# 'en en' should not be accepted
s, g = check("Ég á á sem heitir Lína langsokkur en en en hún kann ekki að jarma.")
assert len(g) == 16 or len(g) == 15 # á á is corrected but is marked differently
# assert "á á" in s # Now corrected by default, but can be handled differently with a special option
# assert "Ég á sem" not in s
assert "langsokkur en hún" in s
assert "en en" not in s
assert g[3].error_code == "C004/w" # á
assert g[8].error_code == "C001" or g[7].error_code == "C001" # en
def test_accepted_doubling(verbose=False):
# Test examples with a comma between. This should be accepted.
s, g = check("Lífið, sem er flokkar, flokkar potta.")
assert len(g) == 11
assert "flokkar, flokkar" in s
assert not g[4].error_code
# Another example with a comma between. This should be accepted.
s, g = check("Lífið er svaka, svaka gaman.")
assert len(g) == 9
assert "svaka, svaka" in s
assert not g[3].error_code
# Test whether lowercase instance following uppercase instance is corrected
# First, these are separate words and should be accepted but possible error pointed out.
# 'í í' should be pointed out but not corrected, as 'í í' can occur,
# for instance as a particle and then a preposition.
s, g = check("Finnur finnur gull í í Tálknafirði.")
assert len(g) == 9 or len(g) == 7
# assert "<NAME>" in s
# assert not "Fin<NAME>ull" in s
assert g[2].error_code == "C004/w" # Finnur
assert g[4].error_code == "C004/w" or g[5].error_code == "C004/w"
# Same example except now the common noun is capitalized in the beginning,
# followed by the proper noun. This should be accepted.
s, g = check("<NAME> í Tálknafirði?")
# assert len(g) == 8 # TODO útfæra að þetta er ekki leiðrétt
# assert "<NAME>" in s
# Here are separate words, as Gaukur is a proper noun. This should be accepted.
s, g = check("Gaukur gaukur slasaðist í slysinu.")
assert len(g) == 8
# TODO þetta á líklega frekar heima í checker.py, ath. hvort þetta er bæði
# sérnafn og samnafn og þannig.
# assert "Ga<NAME>" in s
# assert "Gaukur slasaðist" not in s
assert g[2].error_code == "C004/w" # gaukur
# 'heldur' is allowed to appear more than once in a row. This should be accepted.
s, g = check("Kvikan heldur heldur mikið í jörðina.")
assert len(g) == 9
# assert "Kvikan heldur heldur mikið" in s
# assert "Kvikan heldur mikið" not in s
assert g[3].error_code == "C004/w" # heldur
# 'gegn' has different meanings here, should be accepted
s, g = check("Hún var góð og gegn gegn Svíum í úrslitaleiknum.")
# assert len(g) == 12
# assert "og gegn gegn Svíum" in s
# assert "og gegn Svíum" not in s
assert g[6].error_code == "C004/w"
def test_wrong_compounds(verbose=False):
s, g = check(
"Fötin koma í margskonar litum og fara afturábak afþvíað annarstaðar "
"eru fjögurhundruð mikilsháttar hestar."
)
assert len(g) == 23
assert "margs konar" in s
assert "aftur á bak" in s
assert "af því að" in s
assert "annars staðar" in s
assert "fjögur hundruð" in s
assert "mikils háttar" in s
errors = {4, 9, 12, 15, 17, 18}
for ix in range(len(g)):
if ix in errors:
assert (
g[ix].error_code == "C002"
) # margs konar, aftur á bak, af því að, annars staðar, fjögur hundruð, mikils háttar
else:
assert not g[ix].error_code
s, g = check(
"Vagninn fór niðrá torg og svo ofan í níuhundruð samskonar seinnihluta."
)
assert "niður á" in s
assert "níu hundruð" in s
assert "sams konar" in s
assert "seinni hluta" in s
errors = {3, 10, 11, 13}
for ix, t in enumerate(g):
if ix in errors:
assert (
t.error_code == "C002"
) # niður á, níu hundruð, sams konar, seinni hluta
else:
assert not t.error_code
def test_split_compounds(verbose=False):
s, g = check("Aðal inngangur að auka herbergi er gagn stæður öðrum gangi.")
g = list(g)
assert len(g) == 10
assert "Aðalinngangur" in s
assert "Aðal inngangur" not in s
assert "aukaherbergi" in s
assert "auka herbergi" not in s
| |
= Constraint(expr= - m.x1712 + m.x1713 - m.x2675 + m.x2867 == 0)
m.c703 = Constraint(expr= - m.x1713 + m.x1714 - m.x2676 + m.x2868 == 0)
m.c704 = Constraint(expr= - m.x1714 + m.x1715 - m.x2677 + m.x2869 == 0)
m.c705 = Constraint(expr= - m.x1715 + m.x1716 - m.x2678 + m.x2870 == 0)
m.c706 = Constraint(expr= - m.x1716 + m.x1717 - m.x2679 + m.x2871 == 0)
m.c707 = Constraint(expr= - m.x1717 + m.x1718 - m.x2680 + m.x2872 == 0)
m.c708 = Constraint(expr= - m.x1718 + m.x1719 - m.x2681 + m.x2873 == 0)
m.c709 = Constraint(expr= - m.x1719 + m.x1720 - m.x2682 + m.x2874 == 0)
m.c710 = Constraint(expr= - m.x1720 + m.x1721 - m.x2683 + m.x2875 == 0)
m.c711 = Constraint(expr= - m.x1721 + m.x1722 - m.x2684 + m.x2876 == 0)
m.c712 = Constraint(expr= - m.x1722 + m.x1723 - m.x2685 + m.x2877 == 0)
m.c713 = Constraint(expr= - m.x1723 + m.x1724 - m.x2686 + m.x2878 == 0)
m.c714 = Constraint(expr= - m.x1724 + m.x1725 - m.x2687 + m.x2879 == 0)
m.c715 = Constraint(expr= - m.x1725 + m.x1726 - m.x2688 + m.x2880 == 0)
m.c716 = Constraint(expr= - m.x1726 + m.x1727 - m.x2689 + m.x2881 == 0)
m.c717 = Constraint(expr= - m.x1727 + m.x1728 - m.x2690 + m.x2882 == 0)
m.c718 = Constraint(expr= - m.x1728 + m.x1729 - m.x2691 + m.x2883 == 0)
m.c719 = Constraint(expr= - m.x1729 + m.x1730 - m.x2692 + m.x2884 == 0)
m.c720 = Constraint(expr= - m.x1730 + m.x1731 - m.x2693 + m.x2885 == 0)
m.c721 = Constraint(expr= - m.x1731 + m.x1732 - m.x2694 + m.x2886 == 0)
m.c722 = Constraint(expr= - m.x1732 + m.x1733 - m.x2695 + m.x2887 == 0)
m.c723 = Constraint(expr= - m.x1733 + m.x1734 - m.x2696 + m.x2888 == 0)
m.c724 = Constraint(expr= - m.x1734 + m.x1735 - m.x2697 + m.x2889 == 0)
m.c725 = Constraint(expr= - m.x1735 + m.x1736 - m.x2698 + m.x2890 == 0)
m.c726 = Constraint(expr= - m.x1736 + m.x1737 - m.x2699 + m.x2891 == 0)
m.c727 = Constraint(expr= - m.x1737 + m.x1738 - m.x2700 + m.x2892 == 0)
m.c728 = Constraint(expr= - m.x1738 + m.x1739 - m.x2701 + m.x2893 == 0)
m.c729 = Constraint(expr= - m.x1739 + m.x1740 - m.x2702 + m.x2894 == 0)
m.c730 = Constraint(expr= - m.x1740 + m.x1741 - m.x2703 + m.x2895 == 0)
m.c731 = Constraint(expr= - m.x1741 + m.x1742 - m.x2704 + m.x2896 == 0)
m.c732 = Constraint(expr= - m.x1742 + m.x1743 - m.x2705 + m.x2897 == 0)
m.c733 = Constraint(expr= - m.x1743 + m.x1744 - m.x2706 + m.x2898 == 0)
m.c734 = Constraint(expr= - m.x1744 + m.x1745 - m.x2707 + m.x2899 == 0)
m.c735 = Constraint(expr= - m.x1745 + m.x1746 - m.x2708 + m.x2900 == 0)
m.c736 = Constraint(expr= - m.x1746 + m.x1747 - m.x2709 + m.x2901 == 0)
m.c737 = Constraint(expr= - m.x1747 + m.x1748 - m.x2710 + m.x2902 == 0)
m.c738 = Constraint(expr= - m.x1748 + m.x1749 - m.x2711 + m.x2903 == 0)
m.c739 = Constraint(expr= - m.x1749 + m.x1750 - m.x2712 + m.x2904 == 0)
m.c740 = Constraint(expr= - m.x1750 + m.x1751 - m.x2713 + m.x2905 == 0)
m.c741 = Constraint(expr= - m.x1751 + m.x1752 - m.x2714 + m.x2906 == 0)
m.c742 = Constraint(expr= - m.x1752 + m.x1753 - m.x2715 + m.x2907 == 0)
m.c743 = Constraint(expr= - m.x1753 + m.x1754 - m.x2716 + m.x2908 == 0)
m.c744 = Constraint(expr= - m.x1754 + m.x1755 - m.x2717 + m.x2909 == 0)
m.c745 = Constraint(expr= - m.x1755 + m.x1756 - m.x2718 + m.x2910 == 0)
m.c746 = Constraint(expr= - m.x1756 + m.x1757 - m.x2719 + m.x2911 == 0)
m.c747 = Constraint(expr= - m.x1757 + m.x1758 - m.x2720 + m.x2912 == 0)
m.c748 = Constraint(expr= - m.x1758 + m.x1759 - m.x2721 + m.x2913 == 0)
m.c749 = Constraint(expr= - m.x1759 + m.x1760 - m.x2722 + m.x2914 == 0)
m.c750 = Constraint(expr= - m.x1760 + m.x1761 - m.x2723 + m.x2915 == 0)
m.c751 = Constraint(expr= - m.x1761 + m.x1762 - m.x2724 + m.x2916 == 0)
m.c752 = Constraint(expr= - m.x1762 + m.x1763 - m.x2725 + m.x2917 == 0)
m.c753 = Constraint(expr= - m.x1763 + m.x1764 - m.x2726 + m.x2918 == 0)
m.c754 = Constraint(expr= - m.x1764 + m.x1765 - m.x2727 + m.x2919 == 0)
m.c755 = Constraint(expr= - m.x1765 + m.x1766 - m.x2728 + m.x2920 == 0)
m.c756 = Constraint(expr= - m.x1766 + m.x1767 - m.x2729 + m.x2921 == 0)
m.c757 = Constraint(expr= - m.x1767 + m.x1768 - m.x2730 + m.x2922 == 0)
m.c758 = Constraint(expr= - m.x1768 + m.x1769 - m.x2731 + m.x2923 == 0)
m.c759 = Constraint(expr= - m.x1769 + m.x1770 - m.x2732 + m.x2924 == 0)
m.c760 = Constraint(expr= - m.x1770 + m.x1771 - m.x2733 + m.x2925 == 0)
m.c761 = Constraint(expr= - m.x1771 + m.x1772 - m.x2734 + m.x2926 == 0)
m.c762 = Constraint(expr= - m.x1772 + m.x1773 - m.x2735 + m.x2927 == 0)
m.c763 = Constraint(expr= - m.x1773 + m.x1774 - m.x2736 + m.x2928 == 0)
m.c764 = Constraint(expr= - m.x1774 + m.x1775 - m.x2737 + m.x2929 == 0)
m.c765 = Constraint(expr= - m.x1775 + m.x1776 - m.x2738 + m.x2930 == 0)
m.c766 = Constraint(expr= - m.x1776 + m.x1777 - m.x2739 + m.x2931 == 0)
m.c767 = Constraint(expr= - m.x1777 + m.x1778 - m.x2740 + m.x2932 == 0)
m.c768 = Constraint(expr= - m.x1778 + m.x1779 - m.x2741 + m.x2933 == 0)
m.c769 = Constraint(expr= - m.x1779 + m.x1780 - m.x2742 + m.x2934 == 0)
m.c770 = Constraint(expr= - m.x1780 + m.x1781 - m.x2743 + m.x2935 == 0)
m.c771 = Constraint(expr= - m.x1781 + m.x1782 - m.x2744 + m.x2936 == 0)
m.c772 = Constraint(expr= - m.x1782 + m.x1783 - m.x2745 + m.x2937 == 0)
m.c773 = Constraint(expr= - m.x1783 + m.x1784 - m.x2746 + m.x2938 == 0)
m.c774 = Constraint(expr= - m.x1784 + m.x1785 - m.x2747 + m.x2939 == 0)
m.c775 = Constraint(expr= - m.x1785 + m.x1786 - m.x2748 + m.x2940 == 0)
m.c776 = Constraint(expr= - m.x1786 + m.x1787 - m.x2749 + m.x2941 == 0)
m.c777 = Constraint(expr= - m.x1787 + m.x1788 - m.x2750 + m.x2942 == 0)
m.c778 = Constraint(expr= m.x1789 == 500)
m.c779 = Constraint(expr= - m.x1789 + m.x1790 + m.x2943 == 0)
m.c780 = Constraint(expr= - m.x1790 + m.x1791 + m.x2944 == 0)
m.c781 = Constraint(expr= - m.x1791 + m.x1792 + m.x2945 == 0)
m.c782 = Constraint(expr= - m.x1792 + m.x1793 + m.x2946 == 0)
m.c783 = Constraint(expr= - m.x1793 + m.x1794 + m.x2947 == 0)
m.c784 = Constraint(expr= - m.x1794 + m.x1795 + m.x2948 == 0)
m.c785 = Constraint(expr= - m.x1795 + m.x1796 + m.x2949 == 0)
m.c786 = Constraint(expr= - m.x1796 + m.x1797 + m.x2950 == 0)
m.c787 = Constraint(expr= - m.x1797 + m.x1798 + m.x2951 == 0)
m.c788 = Constraint(expr= - m.x1798 + m.x1799 + m.x2952 == 0)
m.c789 = Constraint(expr= - m.x1799 + m.x1800 + m.x2953 == 0)
m.c790 = Constraint(expr= - m.x1800 + m.x1801 + m.x2954 == 0)
m.c791 = Constraint(expr= - m.x1801 + m.x1802 + m.x2955 == 0)
m.c792 = Constraint(expr= - m.x1802 + m.x1803 + m.x2956 == 0)
m.c793 = Constraint(expr= - m.x1803 + m.x1804 + m.x2957 == 0)
m.c794 = Constraint(expr= - m.x1804 + m.x1805 + m.x2958 == 0)
m.c795 = Constraint(expr= - m.x1805 + m.x1806 + m.x2959 == 0)
m.c796 = Constraint(expr= - m.x1806 + m.x1807 + m.x2960 == 0)
m.c797 = Constraint(expr= - m.x1807 + m.x1808 + m.x2961 == 0)
m.c798 = Constraint(expr= - m.x1808 + m.x1809 + m.x2962 == 0)
m.c799 = Constraint(expr= - m.x1809 + m.x1810 + m.x2963 == 0)
m.c800 = Constraint(expr= - m.x1810 + m.x1811 + m.x2964 == 0)
m.c801 = Constraint(expr= - m.x1811 + m.x1812 + m.x2965 == 0)
m.c802 = Constraint(expr= - m.x1812 + m.x1813 + m.x2966 == 0)
m.c803 = Constraint(expr= - m.x1813 + m.x1814 + m.x2967 == 0)
m.c804 = Constraint(expr= - m.x1814 + m.x1815 + m.x2968 == 0)
m.c805 = Constraint(expr= - m.x1815 + m.x1816 + m.x2969 == 0)
m.c806 = Constraint(expr= - m.x1816 + m.x1817 + m.x2970 == 0)
m.c807 = Constraint(expr= - m.x1817 + m.x1818 + m.x2971 == 0)
m.c808 = Constraint(expr= - m.x1818 + m.x1819 + m.x2972 == 0)
m.c809 = Constraint(expr= - m.x1819 + m.x1820 + m.x2973 == 0)
m.c810 = Constraint(expr= - m.x1820 + m.x1821 + m.x2974 == 0)
m.c811 = Constraint(expr= - m.x1821 + m.x1822 + m.x2975 == 0)
m.c812 = Constraint(expr= - m.x1822 + m.x1823 + m.x2976 == 0)
m.c813 = Constraint(expr= - m.x1823 + m.x1824 + m.x2977 == 0)
m.c814 = Constraint(expr= - m.x1824 + m.x1825 + m.x2978 == 0)
m.c815 = Constraint(expr= - | |
<gh_stars>0
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1554930773.925955
__CHEETAH_genTimestamp__ = 'Wed Apr 10 14:12:53 2019'
__CHEETAH_src__ = 'hpp.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 10 11:25:47 2019'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class hpp(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(hpp, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''// ======================================================================
// \\title ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 2, col 12
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 2, col 12.
write(u'''Impl.hpp
// \\author ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"user",True) # u'$user' on line 3, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$user')) # from line 3, col 12.
write(u'''
// \\brief hpp file for ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 4, col 25
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 4, col 25.
write(u''' component implementation class
//
// \\copyright
// Copyright 2009-2015, by the California Institute of Technology.
// ALL RIGHTS RESERVED. United States Government Sponsorship
// acknowledged. Any commercial use must be negotiated with the Office
// of Technology Transfer at the California Institute of Technology.
//
// This software may be subject to U.S. export control laws and
// regulations. By accepting this document, the user agrees to comply
// with all U.S. export laws and regulations. User has the
// responsibility to obtain export licenses, or other export authority
// as may be required before exporting such information to foreign
// countries or providing access to foreign persons.
// ======================================================================
#ifndef ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 20, col 10
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 20, col 10.
write(u'''_HPP
#define ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 21, col 10
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 21, col 10.
write(u'''_HPP
#include "''')
_v = VFSL([locals()]+SL+[globals(), builtin],"include_path",True) # u'${include_path}' on line 23, col 12
if _v is not None: write(_filter(_v, rawExpr=u'${include_path}')) # from line 23, col 12.
write(u'''/''')
_v = VFSL([locals()]+SL+[globals(), builtin],"include_name",True) # u'${include_name}' on line 23, col 28
if _v is not None: write(_filter(_v, rawExpr=u'${include_name}')) # from line 23, col 28.
write(u'''ComponentAc.hpp"
''')
if VFSL([locals()]+SL+[globals(), builtin],"namespace_list",True) != None: # generated from line 25, col 1
for namespace in VFSL([locals()]+SL+[globals(), builtin],"namespace_list",True): # generated from line 26, col 3
write(u'''namespace ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"namespace",True) # u'${namespace}' on line 27, col 11
if _v is not None: write(_filter(_v, rawExpr=u'${namespace}')) # from line 27, col 11.
write(u''' {
''')
write(u'''
class ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 31, col 9
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 31, col 9.
write(u'''ComponentImpl :
public ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"component_base",True) # u'$component_base' on line 32, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$component_base')) # from line 32, col 12.
write(u'''
{
public:
// ----------------------------------------------------------------------
// Construction, initialization, and destruction
// ----------------------------------------------------------------------
//! Construct object ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'$name' on line 41, col 28
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 41, col 28.
write(u'''
//!
''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 43, col 7
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 43, col 7.
write(u'''ComponentImpl(
#if FW_OBJECT_NAMES == 1
''')
_v = VFSL([locals()]+SL+[globals(), builtin],"emit_non_port_params",False)([ VFSL([locals()]+SL+[globals(), builtin],"param_compName",True) ]) # u'$emit_non_port_params([ $param_compName ])' on line 45, col 1
if _v is not None: write(_filter(_v, rawExpr=u'$emit_non_port_params([ $param_compName ])')) # from line 45, col 1.
write(u'''
#else
void
#endif
);
//! Initialize object ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'$name' on line 51, col 29
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 51, col 29.
write(u'''
//!
void init(
''')
_v = VFSL([locals()]+SL+[globals(), builtin],"emit_non_port_params",False)(VFSL([locals()]+SL+[globals(), builtin],"params_init_hpp",True)) # u'$emit_non_port_params($params_init_hpp)' on line 54, col 1
if _v is not None: write(_filter(_v, rawExpr=u'$emit_non_port_params($params_init_hpp)')) # from line 54, col 1.
write(u'''
);
//! Destroy object ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'$name' on line 57, col 26
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 57, col 26.
write(u'''
//!
~''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 59, col 8
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 59, col 8.
write(u'''ComponentImpl(void);
''')
if len(VFSL([locals()]+SL+[globals(), builtin],"typed_user_input_ports",True)) > 0: # generated from line 61, col 1
write(u''' PRIVATE:
// ----------------------------------------------------------------------
// Handler implementations for user-defined typed input ports
// ----------------------------------------------------------------------
''')
for instance, type, sync, priority, full, role, max_num in VFSL([locals()]+SL+[globals(), builtin],"typed_user_input_ports",True): # generated from line 68, col 3
write(u''' //! Handler implementation for ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"instance",True) # u'$instance' on line 69, col 38
if _v is not None: write(_filter(_v, rawExpr=u'$instance')) # from line 69, col 38.
write(u'''
//!
void ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"instance",True) # u'${instance}' on line 71, col 12
if _v is not None: write(_filter(_v, rawExpr=u'${instance}')) # from line 71, col 12.
write(u'''_handler(
''')
_v = VFSL([locals()]+SL+[globals(), builtin],"emit_port_params",False)([ VFSL([locals()]+SL+[globals(), builtin],"param_portNum",True) ] + VFSL([locals()]+SL+[globals(), builtin],"port_params",True)[VFSL([locals()]+SL+[globals(), builtin],"instance",True)]) # u'$emit_port_params([ $param_portNum ] + $port_params[$instance])' on line 72, col 1
if _v is not None: write(_filter(_v, rawExpr=u'$emit_port_params([ $param_portNum ] + $port_params[$instance])')) # from line 72, col 1.
write(u'''
);
''')
if len(VFSL([locals()]+SL+[globals(), builtin],"serial_input_ports",True)) > 0: # generated from line 77, col 1
write(u''' PRIVATE:
// ----------------------------------------------------------------------
// Handler implementations for user-defined serial input ports
// ----------------------------------------------------------------------
''')
for instance, sync, priority, full, max_num in VFSL([locals()]+SL+[globals(), builtin],"serial_input_ports",True): # generated from line 84, col 3
write(u''' //! Handler implementation for ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"instance",True) # u'$instance' on line 85, col 38
if _v is not None: write(_filter(_v, rawExpr=u'$instance')) # from line 85, col 38.
write(u'''
//!
void ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"instance",True) # u'${instance}' on line 87, col 12
if _v is not None: write(_filter(_v, rawExpr=u'${instance}')) # from line 87, col 12.
write(u'''_handler(
NATIVE_INT_TYPE portNum, ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"doxygen_post_comment",False)("The port number") # u'$doxygen_post_comment("The port number")' on line 88, col 34
if _v is not None: write(_filter(_v, rawExpr=u'$doxygen_post_comment("The port number")')) # from line 88, col 34.
write(u'''
Fw::SerializeBufferBase &Buffer ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"doxygen_post_comment",False)("The serialization buffer") # u'$doxygen_post_comment("The serialization buffer")' on line 89, col 41
if _v is not None: write(_filter(_v, rawExpr=u'$doxygen_post_comment("The serialization buffer")')) # from line 89, col 41.
write(u'''
);
''')
if VFSL([locals()]+SL+[globals(), builtin],"has_commands",True): # generated from line 94, col 1
write(u''' PRIVATE:
// ----------------------------------------------------------------------
// Command handler implementations
// ----------------------------------------------------------------------
''')
for mnemonic, opcode, sync, priority, full, comment in VFSL([locals()]+SL+[globals(), builtin],"commands",True): # generated from line 101, col 3
params = VFSL([locals()]+SL+[globals(), builtin],"command_params",True)[VFSL([locals()]+SL+[globals(), builtin],"mnemonic",True)]
write(u''' //! Implementation for ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"mnemonic",True) # u'$mnemonic' on line 103, col 30
if _v is not None: write(_filter(_v, rawExpr=u'$mnemonic')) # from line 103, col 30.
write(u''' command handler
//! ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"comment",True) # u'$comment' on line 104, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$comment')) # from line 104, col 11.
write(u'''
void ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"mnemonic",True) # u'${mnemonic}' on line 105, col 12
if _v is not None: write(_filter(_v, rawExpr=u'${mnemonic}')) # from line 105, col 12.
write(u'''_cmdHandler(
''')
_v = VFSL([locals()]+SL+[globals(), builtin],"emit_non_port_params",False)([ VFSL([locals()]+SL+[globals(), builtin],"param_opCode",True), VFSL([locals()]+SL+[globals(), builtin],"param_cmdSeq",True) ] + VFSL([locals()]+SL+[globals(), | |
"""
Serve ingstion statistics for consumption in to a Sheets spreadsheet.
Development:
$ adev runserver server.py
"""
import asyncio
import csv
import logging
import os
import sys
from datetime import datetime
from io import StringIO
import stripe
from aiohttp import web
from aiohttp.web_request import Request
from aiohttp.web_response import Response
from dateutil.parser import parse as dateparse
from dateutil.relativedelta import relativedelta
from dotenv import load_dotenv
from chargify import Chargify
load_dotenv()
POLL_INTERVAL = 300 # seconds
CHARGIFY_DOMAIN = os.environ['CHARGIFY_DOMAIN']
CHARGIFY_API_KEY = os.environ['CHARGIFY_API_KEY']
STRIPE_API_KEY = os.environ['STRIPE_API_KEY']
stripe.api_key = STRIPE_API_KEY
async def start_background_tasks(app: web.Application):
""" Start background tasks. """
app['data_task'] = asyncio.create_task(data_task(app))
async def stop_background_tasks(app: web.Application):
""" Stop background tasks. """
app['data_task'].cancel()
await app['data_task']
async def data_task(app: web.Application):
""" Fetch data periodically. """
log = app['log']
while True:
(app['customers_data'], app['subscriptions_data'], app['invoices_data'],
app['chargify_subscriptions_data'], app['chargify_invoices_data'],
app['stripe_customers_data']) = await export_data()
log.info('CSV data loaded')
await asyncio.sleep(POLL_INTERVAL)
def create_app():
"""
Create web application.
"""
logging.getLogger("stripe").setLevel(logging.WARNING)
log = logging.Logger(__name__)
start_logger()
app = web.Application()
app['log'] = log
app['customers_data'], app['subscriptions_data'], app['invoices_data'] = dict(), dict(), dict()
app['chargify_subscriptions_data'], app['chargify_invoices_data'], app['stripe_customers_data'] = dict(), dict(), dict()
app['chargify_domain'] = CHARGIFY_DOMAIN
app['chargify_api_key'] = CHARGIFY_API_KEY
app.on_startup.append(start_background_tasks)
app.on_cleanup.append(stop_background_tasks)
app.router.add_get('/healthcheck', healthcheck, name='healthcheck')
app.router.add_get('/customers/csv', customers_csv, name='customers_csv')
app.router.add_get('/subscriptions/csv', subscriptions_csv, name='subscriptions_csv')
app.router.add_get('/invoices/csv', invoices_csv, name='invoices_csv')
app.router.add_get(
'/chargify_subscriptions/csv',
chargify_subscriptions_csv,
name='chargify_subscriptions_csv')
app.router.add_get(
'/chargify_invoices/csv',
chargify_invoices_csv,
name='chargify_invoices_csv')
app.router.add_get('/stripe_customers/csv', stripe_customers_csv, name='stripe_customers_csv')
return app
async def healthcheck(_: Request):
"""
A simple healthcheck.
"""
return Response(text="OK", content_type="text/html")
async def customers_csv(request: Request):
"""
Output customers CSV format for consumption by Google Sheets.
"""
output = StringIO()
writer = csv.writer(output)
if not request.app['customers_data']:
raise Exception("data not loaded yet")
writer.writerows(request.app['customers_data'])
return Response(text=output.getvalue(), content_type="text/csv")
async def subscriptions_csv(request: Request):
"""
Output subscriptions CSV format for consumption by Google Sheets.
"""
output = StringIO()
writer = csv.writer(output)
if not request.app['subscriptions_data']:
raise Exception("data not loaded yet")
writer.writerows(request.app['subscriptions_data'])
return Response(text=output.getvalue(), content_type="text/csv")
async def invoices_csv(request: Request):
"""
Output invoices CSV format for consumption by Google Sheets.
"""
output = StringIO()
writer = csv.writer(output)
if not request.app['invoices_data']:
raise Exception("data not loaded yet")
writer.writerows(request.app['invoices_data'])
return Response(text=output.getvalue(), content_type="text/csv")
async def chargify_subscriptions_csv(request: Request):
"""
Output Chargify subscriptions data.
"""
output = StringIO()
writer = csv.writer(output)
if not request.app['chargify_subscriptions_data']:
raise Exception("data not loaded yet")
writer.writerows(request.app['chargify_subscriptions_data'])
return Response(text=output.getvalue(), content_type="text/csv")
async def chargify_invoices_csv(request: Request):
"""
Output Chargify invoices data.
"""
output = StringIO()
writer = csv.writer(output)
if not request.app['chargify_invoices_data']:
raise Exception("data not loaded yet")
writer.writerows(request.app['chargify_invoices_data'])
return Response(text=output.getvalue(), content_type="text/csv")
async def stripe_customers_csv(request: Request):
"""
Output Stripe customers data.
"""
output = StringIO()
writer = csv.writer(output)
if not request.app['stripe_customers_data']:
raise Exception("data not loaded yet")
writer.writerows(request.app['stripe_customers_data'])
return Response(text=output.getvalue(), content_type="text/csv")
async def export_data():
"""
Get customer data from Stripe and Chargify.
"""
# Get data we need from Chargify
chargify = Chargify(CHARGIFY_DOMAIN, CHARGIFY_API_KEY)
async with chargify:
subscriptions = [record['subscription'] async for record in await chargify.get_subscriptions()]
invoices = [record async for record in await chargify.get_invoices()]
subscriptions_lookup = {subscription['id']: subscription for subscription in subscriptions}
# Get the customers from Stripe so we have access to their cards
customers = []
response = stripe.Customer.list(limit=100)
customers.extend(response['data'])
# Get more
while response['data']:
response = stripe.Customer.list(limit=100, starting_after=customers[-1]['id'])
customers.extend(response['data'])
customers_lookup = {customer['id']: customer for customer in customers}
columns = (
"customer[id]",
"customer[first_name]",
"customer[last_name]",
"customer[phone]",
"customer[company]",
"customer[email]",
"payment_method[type]",
"payment_method[gateway_account_id]",
"payment_method[reference_id]",
"customer[auto_collection]",
"customer[taxability]",
"customer[vat_number]",
"customer[preferred_currency_code]",
"customer[net_term_days]",
"customer[allow_direct_debit]",
"customer[locale]",
"customer[meta_data]",
"customer[consolidated_invoicing]",
"customer[invoice_notes]",
"billing_address[first_name]",
"billing_address[last_name]",
"billing_address[email]", ''
"billing_address[company]",
"billing_address[phone]",
"billing_address[line1]",
"billing_address[line2]",
"billing_address[line3]",
"billing_address[city]",
"billing_address[state_code]",
"billing_address[state]",
"billing_address[zip]",
"billing_address[country]",
"billing_address[validation_status]",
"customer[registered_for_gst]",
"customer[entity_code]",
"customer[exempt_number]",
)
rows = [columns]
for subscription in subscriptions:
customer = subscription['customer']
try:
credit_card = subscription['credit_card']
except KeyError:
credit_card = dict(
first_name=None,
last_name=None,
billing_address=None,
billing_address_2=None,
billing_city=None,
billing_state=None,
billing_zip=None,
billing_country=None,
vault_token=None,
)
stripe_customer_id = credit_card['vault_token']
if stripe_customer_id:
stripe_customer = customers_lookup[stripe_customer_id]
default_source = stripe_customer['default_source']
card_token = f"{stripe_customer_id}/{default_source}"
else:
card_token = None
taxability = "taxable" if credit_card['billing_country'] == "GB" else "exempt"
row = (
customer['reference'], # customer[id]",
customer['first_name'], # customer[first_name]",
customer['last_name'], # customer[last_name]",
customer['phone'], # customer[phone]",
customer['organization'], # customer[company]",
customer['email'], # customer[email]",
"card" if card_token else None, # payment_method[type]",
"stripe" if card_token else None, # payment_method[gateway_account_id]",
card_token, # payment_method[reference_id]",
"on" if card_token else "off", # customer[auto_collection]",
taxability, # customer[taxability]",
customer['vat_number'] if taxability == "taxable" else None, # customer[vat_number]",
subscription['currency'], # customer[preferred_currency_code]",
None, # customer[net_term_days]",
None, # customer[allow_direct_debit]",
None, # customer[locale]",
None, # customer[meta_data]",
None, # customer[consolidated_invoicing]",
None, # customer[invoice_notes]",
credit_card['first_name'], # billing_address[first_name]",
credit_card['last_name'], # billing_address[last_name]",
customer['email'], # billing_address[email]",
None, # billing_address[company]",
None, # billing_address[phone]",
credit_card['billing_address'], # billing_address[line1]",
credit_card['billing_address_2'], # billing_address[line2]",
None, # billing_address[line3]",
credit_card['billing_city'], # billing_address[city]",
None, # billing_address[state_code]",
credit_card['billing_state'], # billing_address[state]",
credit_card['billing_zip'], # billing_address[zip]",
credit_card['billing_country'], # billing_address[country]",
"yes" if customer['verified'] else "no", # billing_address[validation_status]",
None, # customer[registered_for_gst]",
None, # customer[entity_code]",
None, # customer[exempt_number]",
)
rows.append(row)
customers_rows = rows
# Build the subscriptions data
columns = (
"customer[id]",
"subscription[id]",
"subscription[plan_id]",
"subscription[plan_quantity]",
"subscription[plan_unit_price]",
"currency",
"subscription[setup_fee]",
"subscription[status]",
"subscription[start_date]",
"subscription[trial_start]",
"subscription[trial_end]",
"subscription[started_at]",
"subscription[current_term_start]",
"subscription[current_term_end]",
"subscription[cancelled_at]",
"subscription[pause_date]",
"subscription[resume_date]",
"billing_cycles",
"subscription[auto_collection]",
"subscription[po_number]",
"coupon_ids[0]",
"coupon_ids[1]",
"subscription[payment_source_id]",
"subscription[invoice_notes]",
"subscription[meta_data]",
"shipping_address[first_name]",
"shipping_address[last_name]",
"shipping_address[email]",
"shipping_address[company]",
"shipping_address[phone]",
"shipping_address[line1]",
"shipping_address[line2]",
"shipping_address[line3]",
"shipping_address[city]",
"shipping_address[state_code]",
"shipping_address[state]",
"shipping_address[zip]",
"shipping_address[country]",
"shipping_address[validation_status]",
"addons[id][0]",
"addons[quantity][0]",
"addons[unit_price][0]",
"addons[id][1]",
"addons[quantity][1]",
"addons[unit_price][1]")
rows = [columns]
subscriptions_to_plans = dict()
for subscription in subscriptions:
customer = subscription['customer']
plan_id = subscription['product']['handle']
if not plan_id:
continue
plan_id = {
"unlimited": "unlimited-gbp",
"pro-plus": "professional-gbp",
"pro": "scale-gbp",
"basic": "starter-gbp",
}[plan_id]
subscriptions_to_plans[subscription['id']] = plan_id
status = {
"active": "active",
"canceled": "cancelled",
"expired": "cancelled",
"trial_ended": "cancelled",
"trialing": "trial",
"past_due": "active",
"on_hold": "paused"
}[subscription['state']]
if subscription['coupon_codes']:
coupon_ids = subscription['coupon_codes']
if len(coupon_ids) == 1:
coupon_ids.append(None)
else:
coupon_ids = [None, None]
row = (
customer['reference'], # customer[id]",
subscription['id'], # subscription[id]",
plan_id, # subscription[plan_id]",
1, # subscription[plan_quantity]",
None,
# subscription['product']['price_in_cents'] / 100# subscription[plan_unit_price]",
"GBP", # curreny",
0, # subscription[setup_fee]",
status, # subscription[status]",
None, # subscription[start_date]" // this is the date for future subscriptions,
# subscription[trial_start]",
format_date(subscription['trial_started_at'] if subscription['state'] == "trialing"
else None),
# subscription[trial_end]",
format_date(subscription['trial_ended_at']if subscription['state'] == "trialing"
else None),
# subscription[started_at]",
format_date(subscription['created_at'] if status in ("active", "cancelled") else None),
# subscription[current_term_start]",
format_date(subscription['current_period_started_at']
if status in ("active", "paused") else None),
# subscription[current_term_end]",
format_date(subscription['current_period_ends_at']
if status in ("active", "paused") else None),
format_date(subscription['trial_ended_at'] if subscription['state'] == "trial_ended"
else subscription['canceled_at']),
# subscription[cancelled_at]",
format_date(subscription['on_hold_at']), # subscription[pause_date]",
None, # subscription[resume_date]",
None, # billing_cycles",
"on", # subscription[auto_collection]",
None, # subscription[po_number]",
coupon_ids[0], # coupon_ids[0]",
coupon_ids[1], # coupon_ids[1]",
None, # subscription[payment_source_id]",
None, # subscription[invoice_notes]",
None, # subscription[meta_data]",
None, # shipping_address[first_name]",
None, # shipping_address[last_name]",
None, # shipping_address[email]",
None, # shipping_address[company]",
None, # shipping_address[phone]",
None, # shipping_address[line1]",
None, # shipping_address[line2]",
None, # shipping_address[line3]",
None, # shipping_address[city]",
None, # shipping_address[state_code]",
None, # shipping_address[state]",
None, # shipping_address[zip]",
None, # shipping_address[country]",
None, # shipping_address[validation_status]",
None, # addons[id][0]",
None, # addons[quantity][0]",
None, # addons[unit_price][0]",
None, # addons[id][1]",
None, # addons[quantity][1]",
None, # addons[unit_price][1]"
)
rows.append(row)
subscriptions_rows = rows
# Build the invoices data
columns = (
"invoice[id]",
"invoice[currency_code]",
"invoice[customer_id]",
"invoice[subscription_id]",
"invoice[status]",
"invoice[date]",
"invoice[po_number]",
"invoice[price_type]",
"tax_override_reason",
"invoice[vat_number]",
"invoice[total]",
"round_off",
"invoice[due_date]",
"invoice[net_term_days]",
"use_for_proration",
"billing_address[first_name]",
"billing_address[last_name]",
"billing_address[email]",
"billing_address[company]",
"billing_address[phone]",
"billing_address[line1]",
"billing_address[line2]",
"billing_address[line3]",
"billing_address[city]",
"billing_address[state_code]",
"billing_address[state]",
"billing_address[zip]",
"billing_address[country]",
"billing_address[validation_status]",
"shipping_address[first_name]",
"shipping_address[last_name]",
"shipping_address[email]",
"shipping_address[company]",
"shipping_address[phone]",
"shipping_address[line1]",
"shipping_address[line2]",
"shipping_address[line3]",
"shipping_address[city]",
"shipping_address[state_code]",
"shipping_address[state]",
"shipping_address[zip]",
"shipping_address[country]",
"shipping_address[validation_status]",
"line_items[id][0]",
"line_items[entity_type][0]",
"line_items[entity_id][0]",
"line_items[date_from][0]",
"line_items[date_to][0]",
"line_items[description][0]",
"line_items[unit_amount][0]",
"line_items[quantity][0]",
"line_items[amount][0]",
"line_items[item_level_discount1_entity_id][0]",
"line_items[item_level_discount1_amount][0]",
"line_items[item_level_discount2_entity_id][0]",
"line_items[item_level_discount2_amount][0]",
"line_items[tax1_name][0]",
"line_items[tax1_amount][0]",
"line_items[tax2_name][0]",
"line_items[tax2_amount][0]",
"line_items[tax3_name][0]",
"line_items[tax3_amount][0]",
"line_items[tax4_name][0]",
"line_items[tax4_amount][0]",
"line_item_tiers[line_item_id][0]",
"line_item_tiers[starting_unit][0]",
"line_item_tiers[ending_unit][0]",
"line_item_tiers[quantity_used][0]",
"line_item_tiers[unit_amount][0]",
"discounts[entity_type][0]",
"discounts[entity_id][0]",
"discounts[description][0]",
"discounts[amount][0]",
"taxes[name][0]",
"taxes[rate][0]",
"taxes[amount][0]",
"taxes[description][0]",
"taxes[juris_type][0]",
"taxes[juris_name][0]",
"taxes[juris_code][0]",
"payments[amount][0]",
"payments[payment_method][0]",
"payments[date][0]",
"payments[reference_number][0]",
"notes[entity_type][0]",
"notes[entity_id][0]",
"notes[note][0]",
"line_items[date_from][1]",
"line_items[date_to][1]",
"line_items[description][1]",
"line_items[unit_amount][1]",
"line_items[quantity][1]",
"line_items[amount][1]",
"line_items[entity_type][1]",
"line_items[entity_id][1]",
)
rows = [columns]
for invoice in invoices:
customer = invoice['customer']
subscription = subscriptions_lookup[invoice['subscription_id']]
try:
plan_id = subscriptions_to_plans[subscription['id']]
except KeyError:
continue
billing_address = invoice['billing_address']
period_from = dateparse(invoice['issue_date'])
period_to = period_from + relativedelta(months=1)
taxed = bool(float(invoice['tax_amount']))
if invoice['status'] == "canceled":
continue
if not float(invoice['subtotal_amount']):
continue
amount = float(invoice['subtotal_amount']) - float(invoice['credit_amount'])
discount_amount = float(invoice['discount_amount'])
total_amount = amount - discount_amount + float(invoice['tax_amount'])
# if invoice['uid'] == "inv_97h2jvz5jv2x7":
# import pdb
# pdb.set_trace()
# pass
row = (
invoice['uid'], # invoice[id],
"GBP", # invoice[currency_code],
None, # Not applicable if subscription is specified # invoice[customer_id],
invoice['subscription_id'], # invoice[subscription_id],
invoice['status'], # invoice[status],
format_date(invoice['issue_date']), # invoice[date],
invoice['sequence_number'], # invoice[po_number],
"tax_inclusive", # invoice[price_type],
None, # tax_override_reason,
None, # invoice[vat_number],
total_amount, # invoice[total],
None, # round_off,
None, # invoice[due_date],
None, # invoice[net_term_days],
"TRUE", # use_for_proration,
customer['first_name'], # billing_address[first_name],
customer['last_name'], # billing_address[last_name],
customer['email'], # billing_address[email],
customer['organization'], # billing_address[company],
None, # billing_address[phone],
billing_address['street'], # billing_address[line1],
billing_address['line2'], # billing_address[line2],
None, # billing_address[line3],
billing_address['city'], # billing_address[city],
None, # billing_address[state_code],
billing_address['state'], # billing_address[state],
billing_address['zip'], # billing_address[zip],
billing_address['country'], # billing_address[country],
None, # billing_address[validation_status],
None, # shipping_address[first_name],
None, # shipping_address[last_name],
None, # shipping_address[email],
None, # | |
"8": {}, "9": {}},
"ospfv3AsLsdbEntry": {"4": {}, "5": {}, "6": {}, "7": {}, "8": {}},
"ospfv3CfgNbrEntry": {"5": {}, "6": {}},
"ospfv3GeneralGroup": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"22": {},
"23": {},
"24": {},
"25": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ospfv3HostEntry": {"3": {}, "4": {}, "5": {}},
"ospfv3IfEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"20": {},
"21": {},
"22": {},
"23": {},
"24": {},
"25": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ospfv3LinkLsdbEntry": {"10": {}, "6": {}, "7": {}, "8": {}, "9": {}},
"ospfv3NbrEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ospfv3VirtIfEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ospfv3VirtLinkLsdbEntry": {"10": {}, "6": {}, "7": {}, "8": {}, "9": {}},
"ospfv3VirtNbrEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"pim": {"1": {}},
"pimAnycastRPSetLocalRouter": {},
"pimAnycastRPSetRowStatus": {},
"pimBidirDFElectionState": {},
"pimBidirDFElectionStateTimer": {},
"pimBidirDFElectionWinnerAddress": {},
"pimBidirDFElectionWinnerAddressType": {},
"pimBidirDFElectionWinnerMetric": {},
"pimBidirDFElectionWinnerMetricPref": {},
"pimBidirDFElectionWinnerUpTime": {},
"pimCandidateRPEntry": {"3": {}, "4": {}},
"pimComponentEntry": {"2": {}, "3": {}, "4": {}, "5": {}},
"pimGroupMappingPimMode": {},
"pimGroupMappingPrecedence": {},
"pimInAsserts": {},
"pimInterfaceAddress": {},
"pimInterfaceAddressType": {},
"pimInterfaceBidirCapable": {},
"pimInterfaceDFElectionRobustness": {},
"pimInterfaceDR": {},
"pimInterfaceDRPriority": {},
"pimInterfaceDRPriorityEnabled": {},
"pimInterfaceDomainBorder": {},
"pimInterfaceEffectOverrideIvl": {},
"pimInterfaceEffectPropagDelay": {},
"pimInterfaceElectionNotificationPeriod": {},
"pimInterfaceElectionWinCount": {},
"pimInterfaceEntry": {
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"pimInterfaceGenerationIDValue": {},
"pimInterfaceGraftRetryInterval": {},
"pimInterfaceHelloHoldtime": {},
"pimInterfaceHelloInterval": {},
"pimInterfaceJoinPruneHoldtime": {},
"pimInterfaceJoinPruneInterval": {},
"pimInterfaceLanDelayEnabled": {},
"pimInterfaceOverrideInterval": {},
"pimInterfacePropagationDelay": {},
"pimInterfacePruneLimitInterval": {},
"pimInterfaceSRPriorityEnabled": {},
"pimInterfaceStatus": {},
"pimInterfaceStubInterface": {},
"pimInterfaceSuppressionEnabled": {},
"pimInterfaceTrigHelloInterval": {},
"pimInvalidJoinPruneAddressType": {},
"pimInvalidJoinPruneGroup": {},
"pimInvalidJoinPruneMsgsRcvd": {},
"pimInvalidJoinPruneNotificationPeriod": {},
"pimInvalidJoinPruneOrigin": {},
"pimInvalidJoinPruneRp": {},
"pimInvalidRegisterAddressType": {},
"pimInvalidRegisterGroup": {},
"pimInvalidRegisterMsgsRcvd": {},
"pimInvalidRegisterNotificationPeriod": {},
"pimInvalidRegisterOrigin": {},
"pimInvalidRegisterRp": {},
"pimIpMRouteEntry": {"1": {}, "2": {}, "3": {}, "4": {}, "5": {}},
"pimIpMRouteNextHopEntry": {"2": {}},
"pimKeepalivePeriod": {},
"pimLastAssertGroupAddress": {},
"pimLastAssertGroupAddressType": {},
"pimLastAssertInterface": {},
"pimLastAssertSourceAddress": {},
"pimLastAssertSourceAddressType": {},
"pimNbrSecAddress": {},
"pimNeighborBidirCapable": {},
"pimNeighborDRPriority": {},
"pimNeighborDRPriorityPresent": {},
"pimNeighborEntry": {"2": {}, "3": {}, "4": {}, "5": {}},
"pimNeighborExpiryTime": {},
"pimNeighborGenerationIDPresent": {},
"pimNeighborGenerationIDValue": {},
"pimNeighborLanPruneDelayPresent": {},
"pimNeighborLossCount": {},
"pimNeighborLossNotificationPeriod": {},
"pimNeighborOverrideInterval": {},
"pimNeighborPropagationDelay": {},
"pimNeighborSRCapable": {},
"pimNeighborTBit": {},
"pimNeighborUpTime": {},
"pimOutAsserts": {},
"pimRPEntry": {"3": {}, "4": {}, "5": {}, "6": {}},
"pimRPMappingChangeCount": {},
"pimRPMappingNotificationPeriod": {},
"pimRPSetEntry": {"4": {}, "5": {}},
"pimRegisterSuppressionTime": {},
"pimSGDRRegisterState": {},
"pimSGDRRegisterStopTimer": {},
"pimSGEntries": {},
"pimSGIAssertState": {},
"pimSGIAssertTimer": {},
"pimSGIAssertWinnerAddress": {},
"pimSGIAssertWinnerAddressType": {},
"pimSGIAssertWinnerMetric": {},
"pimSGIAssertWinnerMetricPref": {},
"pimSGIEntries": {},
"pimSGIJoinExpiryTimer": {},
"pimSGIJoinPruneState": {},
"pimSGILocalMembership": {},
"pimSGIPrunePendingTimer": {},
"pimSGIUpTime": {},
"pimSGKeepaliveTimer": {},
"pimSGOriginatorState": {},
"pimSGPimMode": {},
"pimSGRPFIfIndex": {},
"pimSGRPFNextHop": {},
"pimSGRPFNextHopType": {},
"pimSGRPFRouteAddress": {},
"pimSGRPFRouteMetric": {},
"pimSGRPFRouteMetricPref": {},
"pimSGRPFRoutePrefixLength": {},
"pimSGRPFRouteProtocol": {},
"pimSGRPRegisterPMBRAddress": {},
"pimSGRPRegisterPMBRAddressType": {},
"pimSGRptEntries": {},
"pimSGRptIEntries": {},
"pimSGRptIJoinPruneState": {},
"pimSGRptILocalMembership": {},
"pimSGRptIPruneExpiryTimer": {},
"pimSGRptIPrunePendingTimer": {},
"pimSGRptIUpTime": {},
"pimSGRptUpTime": {},
"pimSGRptUpstreamOverrideTimer": {},
"pimSGRptUpstreamPruneState": {},
"pimSGSPTBit": {},
"pimSGSourceActiveTimer": {},
"pimSGStateRefreshTimer": {},
"pimSGUpTime": {},
"pimSGUpstreamJoinState": {},
"pimSGUpstreamJoinTimer": {},
"pimSGUpstreamNeighbor": {},
"pimSGUpstreamPruneLimitTimer": {},
"pimSGUpstreamPruneState": {},
"pimStarGEntries": {},
"pimStarGIAssertState": {},
"pimStarGIAssertTimer": {},
"pimStarGIAssertWinnerAddress": {},
"pimStarGIAssertWinnerAddressType": {},
"pimStarGIAssertWinnerMetric": {},
"pimStarGIAssertWinnerMetricPref": {},
"pimStarGIEntries": {},
"pimStarGIJoinExpiryTimer": {},
"pimStarGIJoinPruneState": {},
"pimStarGILocalMembership": {},
"pimStarGIPrunePendingTimer": {},
"pimStarGIUpTime": {},
"pimStarGPimMode": {},
"pimStarGPimModeOrigin": {},
"pimStarGRPAddress": {},
"pimStarGRPAddressType": {},
"pimStarGRPFIfIndex": {},
"pimStarGRPFNextHop": {},
"pimStarGRPFNextHopType": {},
"pimStarGRPFRouteAddress": {},
"pimStarGRPFRouteMetric": {},
"pimStarGRPFRouteMetricPref": {},
"pimStarGRPFRoutePrefixLength": {},
"pimStarGRPFRouteProtocol": {},
"pimStarGRPIsLocal": {},
"pimStarGUpTime": {},
"pimStarGUpstreamJoinState": {},
"pimStarGUpstreamJoinTimer": {},
"pimStarGUpstreamNeighbor": {},
"pimStarGUpstreamNeighborType": {},
"pimStaticRPOverrideDynamic": {},
"pimStaticRPPimMode": {},
"pimStaticRPPrecedence": {},
"pimStaticRPRPAddress": {},
"pimStaticRPRowStatus": {},
"qllcLSAdminEntry": {"1": {}, "2": {}, "3": {}, "4": {}, "5": {}, "6": {}, "7": {}},
"qllcLSOperEntry": {
"1": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
},
"qllcLSStatsEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"22": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ripCircEntry": {
"1": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ripSysEntry": {"1": {}, "2": {}, "3": {}},
"rmon.10.106.1.2": {},
"rmon.10.106.1.3": {},
"rmon.10.106.1.4": {},
"rmon.10.106.1.5": {},
"rmon.10.106.1.6": {},
"rmon.10.106.1.7": {},
"rmon.10.145.1.2": {},
"rmon.10.145.1.3": {},
"rmon.10.186.1.2": {},
"rmon.10.186.1.3": {},
"rmon.10.186.1.4": {},
"rmon.10.186.1.5": {},
"rmon.10.229.1.1": {},
"rmon.10.229.1.2": {},
"rmon.19.1": {},
"rmon.10.76.1.1": {},
"rmon.10.76.1.2": {},
"rmon.10.76.1.3": {},
"rmon.10.76.1.4": {},
"rmon.10.76.1.5": {},
"rmon.10.76.1.6": {},
"rmon.10.76.1.7": {},
"rmon.10.76.1.8": {},
"rmon.10.76.1.9": {},
"rmon.10.135.1.1": {},
"rmon.10.135.1.2": {},
"rmon.10.135.1.3": {},
"rmon.19.12": {},
"rmon.10.4.1.2": {},
"rmon.10.4.1.3": {},
"rmon.10.4.1.4": {},
"rmon.10.4.1.5": {},
"rmon.10.4.1.6": {},
"rmon.10.69.1.2": {},
"rmon.10.69.1.3": {},
"rmon.10.69.1.4": {},
"rmon.10.69.1.5": {},
"rmon.10.69.1.6": {},
"rmon.10.69.1.7": {},
"rmon.10.69.1.8": {},
"rmon.10.69.1.9": {},
"rmon.19.15": {},
"rmon.19.16": {},
"rmon.19.2": {},
"rmon.19.3": {},
"rmon.19.4": {},
"rmon.19.5": {},
"rmon.19.6": {},
"rmon.19.7": {},
"rmon.19.8": {},
"rmon.19.9": {},
"rs232": {"1": {}},
"rs232AsyncPortEntry": {
"1": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
},
"rs232InSigEntry": {"1": {}, "2": {}, "3": {}},
"rs232OutSigEntry": {"1": {}, "2": {}, "3": {}},
"rs232PortEntry": {
"1": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
},
"rs232SyncPortEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"rsrbRemotePeerEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"rsrbRingEntry": {"2": {}, "3": {}, "4": {}, "5": {}, "6": {}, "7": {}, "8": {}},
"rsrbVirtRingEntry": {"2": {}, "3": {}},
"rsvp.2.1": {},
"rsvp.2.2": {},
"rsvp.2.3": {},
"rsvp.2.4": {},
"rsvp.2.5": {},
"rsvpIfEntry": {
"1": {},
"10": {},
"11": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"rsvpNbrEntry": {"2": {}, "3": {}},
"rsvpResvEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"22": {},
"23": {},
"24": {},
"25": {},
"26": {},
"27": {},
"28": {},
"29": {},
"3": {},
"30": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"rsvpResvFwdEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"22": {},
"23": {},
"24": {},
"25": {},
"26": {},
"27": {},
"28": {},
"29": {},
"3": {},
"30": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"rsvpSenderEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"22": {},
"23": {},
"24": {},
"25": {},
"26": {},
"27": {},
"28": {},
"29": {},
"3": {},
"30": {},
"31": {},
"32": {},
"33": {},
"34": {},
"35": {},
"36": {},
"37": {},
"38": {},
"39": {},
"4": {},
"40": {},
"41": {},
"42": {},
"43": {},
"44": {},
"45": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"rsvpSenderOutInterfaceStatus": {},
"rsvpSessionEntry": {
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"rtmpEntry": {"1": {}, "2": {}, "3": {}, "4": {}, "5": {}, "6": {}, "7": {}},
"rttMonApplAuthKeyChain": {},
"rttMonApplAuthKeyString1": {},
"rttMonApplAuthKeyString2": {},
"rttMonApplAuthKeyString3": {},
"rttMonApplAuthKeyString4": {},
"rttMonApplAuthKeyString5": {},
"rttMonApplAuthStatus": {},
"rttMonApplFreeMemLowWaterMark": {},
"rttMonApplLatestSetError": {},
"rttMonApplLpdGrpStatsReset": {},
"rttMonApplMaxPacketDataSize": {},
| |
(y < y1))
elif side in (-1, 'l', 'left'):
ind = np.logical_and((y >= y0), (y < y1))
elif side in (1, 'r', 'right'):
ind = np.logical_and((y > y0), (y <= y1))
elif side in (2, 'b', 'both'):
ind = np.logical_and((y >= y0), (y <= y1))
else:
raise ValueError("unknow value of side!")
return np.where(ind) if i_ else ind
else:
if y0 > y1 and y0 - y1 < r_ / 2:
y0, y1 = y1, y0
else:
y1 = cyl_(y1, y0 + r_, y0)
return ind_inRange_(cyl_(y, y0 + r_, y0), y0, y1, side=side, i_=i_)
def ind_win_(doy, d, w, rb=366, lb=1):
dw = cyl_(np.arange(d - w, d + 1 + w), rb=rb, lb=lb)
return np.isin(doy, dw)
def nanMask_(data):
"""
... give nan where masked ...
"""
if np.ma.isMaskedArray(data):
if np.ma.is_masked(data):
data.data[data.mask] = np.nan
data = data.data
return data
def rPeriod_(p_bounds, TeX_on=False):
"""
... return readable style of period from period bounds ...
"""
if p_bounds[0] != p_bounds[-1]:
if TeX_on:
return r'{:d}$-${:d}'.format(p_bounds[0], p_bounds[-1])
else:
return r'{:d}-{:d}'.format(p_bounds[0], p_bounds[-1])
else:
return r'{:d}'.format(p_bounds[0])
def rTime_(t):
"""
... return readable style of time interval ...
"""
import time
d = t / (60 * 60 * 24)
if d >= 1:
return r'passing :t::i:::m::::e::::: {:.2f} day(s)'.format(d)
else:
return time.strftime('passing :t::i:::m::::e::::: %H:%M:%S +0000',
time.gmtime(t))
def uniqL_(l):
"""
... return sorted unique elements of list l ...
"""
return list(np.unique(np.array(flt_l(l))))
def ouniqL_(l):
"""
... return ordered unique elements of list l ...
"""
return list(dict.fromkeys(flt_l(l)).keys())
def schF_keys_(idir, *keys, ext='*', ordered=False, h_=False):
"""
... find files that contain specified keyword(s) in the directory ...
"""
import glob
import os
from itertools import permutations
s = '*'
if ordered:
pm = [s.join(keys)]
else:
a = set(permutations(keys))
pm = [s.join(i) for i in a]
fn = []
for i in pm:
if h_:
fn += glob.iglob(os.path.join(idir, '.*' + s.join([i, ext])))
fn += glob.glob(os.path.join(idir, '*' + s.join([i, ext])))
fn = list(set(fn))
fn.sort()
return fn
def valueEqFront_(l, v):
"""
... move element(s) equal to the specified value v in the list l to front
"""
l0 = [i for i in l if i == v]
l1 = [i for i in l if i != v]
return l0 + l1
def indFront_(l, v):
ind = valueEqFront_(list(range(len(l))), v)
return l_ind_(l, ind)
def iter_str_(iterable):
"""
... transform elements to string ...
"""
tmp = flt_l(iterable)
return [str(i) for i in tmp]
def ext_(s):
"""
... get extension from filename (str) ...
"""
import os
return os.path.splitext(s)[1]
#import re
#tmp = re.search(r'(?<=\w)\.\w+$', s)
#return tmp.group() if tmp else ''
def find_patt_(p, s):
"""
... return s or list of items in s that match the given pattern ...
"""
import re
if isinstance(s, str):
return s if re.search(p, s) else None
elif isIter_(s, xi=str):
return [i for i in s if find_patt_(p, i)]
def pure_fn_(s, no_etc=True):
"""
... get pure filename without path to and without extension ...
"""
#import re
import os
def _rm_etc(s):
#return re.sub(r'\.\w+$', '', s) if no_etc else s
return os.path.splitext(s)[0] if no_etc else s
if isinstance(s, str):
#tmp = re.search(r'((?<=[\\/])[^\\/]*$)|(^[^\\/]+$)', s)
#fn = tmp.group() if tmp else tmp
fn = os.path.basename(s)
return _rm_etc(fn) #if fn else ''
elif isIter_(s, str):
return [pure_fn_(i) for i in s]
def get_path_(s):
"""
... get path from filename ...
"""
import re
tmp = re.sub(r'[^\/]+$', '', s)
return tmp if tmp else './'
def isMonth_(mn, short_=True, nm=3):
"""
... if input string is name of a month ...
"""
mns = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december']
n = len(mn)
if n < 3:
warnings.warn("month string shorter than 3 letters; return 'False'!")
return False
mn3s = [i[:n] for i in mns]
if short_:
return mn.lower() in mn3s
else:
return mn.lower() in mns or mn.lower() in mn3s
def mnN_(mn):
"""
... month order in calendar...
"""
mns = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december']
n = len(mn)
if n < 3:
warnings.warn("month string short than 3 letters; 1st guess used!")
mn3s = [i[:n] for i in mns]
return mn3s.index(mn.lower()) + 1
def isSeason_(mmm, ismmm_=True):
"""
... if input string is a season named with 1st letters of composing
months ...
"""
mns = 'jfmamjjasond' * 2
n = mns.find(mmm.lower())
s4 = {'spring', 'summer', 'autumn', 'winter'}
if ismmm_:
return (1 < len(mmm) < 12 and n != -1)
else:
return (1 < len(mmm) < 12 and n != -1) or mmm.lower() in s4
def valid_seasons_(seasons, ismmm_=True):
o = all(isSeason_(season, ismmm_=ismmm_) for season in seasons)
if o:
o_ = sorted(flt_l(mmmN_(season) for season in seasons))
return np.array_equal(o_, np.arange(12) + 1)
else:
return False
def _month_season_numbers(seasons):
month_season_numbers = [None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for season_number, season in enumerate(seasons):
for month in mmmN_(season):
month_season_numbers[month] = season_number
return month_season_numbers
def _month_year_adjust(seasons):
month_year_adjusts = [None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for season in seasons:
months_in_season = mmmN_(season)
for month in months_in_season:
if month > months_in_season[-1]:
month_year_adjusts[month] = 1
return month_year_adjusts
def _m2sm(month, season):
return month in mmmN_(season)
def _m2sya(month, seasons=('djf', 'mam', 'jja', 'son')):
sya = _month_year_adjust(seasons)
return sya[month]
def _m2s(month, seasons=('djf', 'mam', 'jja', 'son')):
ssm = _month_season_numbers(seasons)
return seasons[ssm[month]]
m2s_ = np.vectorize(_m2s, excluded=['seasons'])
m2sm_ = np.vectorize(_m2sm, excluded=['season'])
m2sya_ = np.vectorize(_m2sya, excluded=['seasons'])
def mmmN_(mmm):
"""
... months in season ...
"""
ss = {'spring': 'mam',
'summer': 'jja',
'autumn': 'son',
'winter': 'djf'}
mmm = ss[mmm] if mmm in ss.keys() else mmm
mns = 'jfmamjjasond' * 2
n = mns.find(mmm.lower())
if n != -1:
return cyl_(range(n+1, n+1+len(mmm)), 13, 1)
else:
raise ValueError("{!r} unrecognised as a season!".format(mmm))
def rest_mns_(mmm):
"""
... get rest season named with months' 1st letter ...
"""
mns = 'jfmamjjasond' * 2
n = mns.find(mmm.lower())
if n == -1:
raise Exception('unknown season provided!')
else:
return mns[n + len(mmm):n + 12]
def rSUM1d_(y, n, mode='valid'):
"""
... sum over a n-point rolling_window ...
"""
if hasattr(y, 'mask'):
msk = np.ma.getmaskarray(y)
else:
msk = np.isnan(y)
return np.convolve(np.where(msk, 0, y), np.ones((n,)), mode)
def rMEAN1d_(y, n, mode='valid'):
"""
... mean over a n-point rolling_window ...
"""
if hasattr(y, 'mask'):
msk = np.ma.getmaskarray(y)
else:
msk = np.isnan(y)
uu = np.convolve(np.where(msk, 0, y), np.ones((n,)), mode)
dd = np.convolve(~msk, np.ones((n,)), mode)
dd[dd == 0] = np.nan
out = uu / dd
return np.ma.masked_where(np.isnan(out), out) if hasattr(y, 'mask') else\
out
def l__(msg, out=True):
"""
... starting logging msg giving a time stamp ...
"""
import time
import logging
logging.info(' ' + msg + ' -->')
if out:
return time.time()
def ll_(msg, t0=None):
"""
... ending logging msg giving a time lapse if starting time stamp given
"""
import time
import logging
logging.info(' {}{}'.format(msg, ' <--' if t0 else ''))
if t0:
logging.info(' ' + rTime_(time.time() - t0))
logging.info(' ')
def slctStrL_(strl, incl=None, excl=None): #, incl_or=False, excl_and=False):
"""
... select items including/excluding sts(s) for a list of str ...
"""
def _in(s, L):
if isinstance(L, str):
return L in s
else:
return _inc(s, L)
def _inc(s, L):
return all([i in s if isinstance(i, str) else _incl(s, i) for i in L])
def _incl(s, L):
return any([i in s if isinstance(i, str) else _inc(s, i) for i in L])
def _ex(s, L):
if isinstance(L, str):
return L not in s
else:
return _exc(s, L)
def _exc(s, L):
return any([i not in s if isinstance(i, str) else _excl(s, i)
for i in L])
def _excl(s, L):
return all([i not in s if isinstance(i, str) else _exc(s, i)
for i in L])
if incl:
strl = [i for i in strl if _in(i, incl)]
if excl:
strl = [i for i in strl if _ex(i, excl)]
#if incl:
# incl = [incl] if isinstance(incl, str) else incl
# if incl_or:
# strl = [i for i in strl if any([ii in i for ii in incl])]
# else:
# strl = [i for i in strl if all([ii in i for ii in incl])]
#if excl:
# excl = [excl] if isinstance(excl, str) else excl
# if excl_and:
# strl = | |
(16 - len(s)) + s
coords = [divmod(i, 4) for i in range(len(s)) if s[i] == '1']
for c in coords:
board[c] = color
return board
def _getBlankBoard(self):
# TODO: change to numpy.array
board = []
for x in range(BOARDWIDTH):
board.append([EMPTY_SPACE] * BOARDHEIGHT)
return board
def _getSwappingGems(self, board, firstXY, secondXY):
# If the gems at the (X, Y) coordinates of the two gems are adjacent,
# then their 'direction' keys are set to the appropriate direction
# value to be swapped with each other.
# Otherwise, (None, None) is returned.
firstGem = {'imageNum': board[firstXY['x']][firstXY['y']],
'x': firstXY['x'],
'y': firstXY['y']}
secondGem = {'imageNum': board[secondXY['x']][secondXY['y']],
'x': secondXY['x'],
'y': secondXY['y']}
highlightedGem = None
if firstGem['x'] == secondGem['x'] + 1 and firstGem['y'] == secondGem['y']:
firstGem['direction'] = LEFT
secondGem['direction'] = RIGHT
elif firstGem['x'] == secondGem['x'] - 1 and firstGem['y'] == secondGem['y']:
firstGem['direction'] = RIGHT
secondGem['direction'] = LEFT
elif firstGem['y'] == secondGem['y'] + 1 and firstGem['x'] == secondGem['x']:
firstGem['direction'] = UP
secondGem['direction'] = DOWN
elif firstGem['y'] == secondGem['y'] - 1 and firstGem['x'] == secondGem['x']:
firstGem['direction'] = DOWN
secondGem['direction'] = UP
else:
# These gems are not adjacent and can't be swapped.
return None, None
return firstGem, secondGem
def _canMakeMove(self, board):
return len(self._possibleMoves(board)) > 0
def _possibleMoves(self, board):
# Return True if the board is in a state where a matching
# move can be made on it. Otherwise return False.
# The patterns in oneOffPatterns represent gems that are configured
# in a way where it only takes one move to make a triplet.
oneOffPatterns = (((0,1), (1,0), (2,0), ((0,0), (0,1))),
((0,1), (1,1), (2,0), ((2,0), (2,1))),
((0,0), (1,1), (2,0), ((1,0), (1,1))),
((0,1), (1,0), (2,1), ((1,0), (1,1))),
((0,0), (1,0), (2,1), ((2,0), (2,1))),
((0,0), (1,1), (2,1), ((0,0), (0,1))),
((0,0), (0,2), (0,3), ((0,0), (0,1))),
((0,0), (0,1), (0,3), ((0,2), (0,3))))
# The x and y variables iterate over each space on the board.
# If we use + to represent the currently iterated space on the
# board, then this pattern: ((0,1), (1,0), (2,0))refers to identical
# gems being set up like this:
#
# +A
# B
# C
#
# That is, gem A is offset from the + by (0,1), gem B is offset
# by (1,0), and gem C is offset by (2,0). In this case, gem A can
# be swapped to the left to form a vertical three-in-a-row triplet.
#
# There are eight possible ways for the gems to be one move
# away from forming a triple, hence oneOffPattern has 8 patterns.
moves = []
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
for pat in oneOffPatterns:
# check each possible pattern of "match in next move" to
# see if a possible move can be made.
if (self._getGemAt(board, x+pat[0][0], y+pat[0][1]) == \
self._getGemAt(board, x+pat[1][0], y+pat[1][1]) == \
self._getGemAt(board, x+pat[2][0], y+pat[2][1]) != None):
moves.append(map(lambda z: (z[0] + x, z[1] + y), pat[3]))
if (self._getGemAt(board, x+pat[0][1], y+pat[0][0]) == \
self._getGemAt(board, x+pat[1][1], y+pat[1][0]) == \
self._getGemAt(board, x+pat[2][1], y+pat[2][0]) != None):
moves.append(map(lambda z: (z[1] + x, z[0] + y), pat[3]))
return moves
def _pullDownAllGems(self, board):
# pulls down gems on the board to the bottom to fill in any gaps
for x in range(BOARDWIDTH):
gemsInColumn = []
for y in range(BOARDHEIGHT):
if board[x][y] != EMPTY_SPACE:
gemsInColumn.append(board[x][y])
board[x] = ([EMPTY_SPACE] * (BOARDHEIGHT - len(gemsInColumn))) + gemsInColumn
def _getGemAt(self, board, x, y):
if x < 0 or y < 0 or x >= BOARDWIDTH or y >= BOARDHEIGHT:
return None
else:
return board[x][y]
def _getDropSlots(self, board):
# Creates a "drop slot" for each column and fills the slot with a
# number of gems that that column is lacking. This function assumes
# that the gems have been gravity dropped already.
boardCopy = copy.deepcopy(board)
self._pullDownAllGems(boardCopy)
dropSlots = []
for i in range(BOARDWIDTH):
dropSlots.append([])
# TODO: remove restriction that there can be no combos from new gems?
# count the number of empty spaces in each column on the board
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT-1, -1, -1): # start from bottom, going up
if boardCopy[x][y] == EMPTY_SPACE:
possibleGems = list(range(len(GEMTYPES)))
for offsetX, offsetY in ((0, -1), (1, 0), (0, 1), (-1, 0)):
# Narrow down the possible gems we should put in the
# blank space so we don't end up putting an two of
# the same gems next to each other when they drop.
neighborGem = self._getGemAt(boardCopy, x + offsetX, y + offsetY)
if neighborGem != None and neighborGem in possibleGems:
possibleGems.remove(neighborGem)
newGem = random.choice(possibleGems)
boardCopy[x][y] = newGem
dropSlots[x].append(newGem)
return dropSlots
def _findMatchingGems(self, board):
gemsToRemove = [] # a list of lists of gems in matching triplets that should be removed
boardCopy = copy.deepcopy(board)
# loop through each space, checking for 3 adjacent identical gems
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
# TODO: make 3x3 L/T-shape matches work
# look for horizontal matches
if self._getGemAt(boardCopy, x, y) == self._getGemAt(boardCopy, x + 1, y) == self._getGemAt(boardCopy, x + 2, y) and self._getGemAt(boardCopy, x, y) != EMPTY_SPACE:
targetGem = boardCopy[x][y]
offset = 0
removeSet = []
while self._getGemAt(boardCopy, x + offset, y) == targetGem:
# keep checking if there's more than 3 gems in a row
removeSet.append((x + offset, y))
boardCopy[x + offset][y] = EMPTY_SPACE
offset += 1
gemsToRemove.append(removeSet)
# look for vertical matches
if self._getGemAt(boardCopy, x, y) == self._getGemAt(boardCopy, x, y + 1) == self._getGemAt(boardCopy, x, y + 2) and self._getGemAt(boardCopy, x, y) != EMPTY_SPACE:
targetGem = boardCopy[x][y]
offset = 0
removeSet = []
while self._getGemAt(boardCopy, x, y + offset) == targetGem:
# keep checking, in case there's more than 3 gems in a row
removeSet.append((x, y + offset))
boardCopy[x][y + offset] = EMPTY_SPACE
offset += 1
gemsToRemove.append(removeSet)
return gemsToRemove
def _getDroppingGems(self, board):
# Find all the gems that have an empty space below them
boardCopy = copy.deepcopy(board)
droppingGems = []
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT - 2, -1, -1):
if boardCopy[x][y + 1] == EMPTY_SPACE and boardCopy[x][y] != EMPTY_SPACE:
# This space drops if not empty but the space below it is
droppingGems.append( {'imageNum': boardCopy[x][y], 'x': x, 'y': y, 'direction': DOWN} )
boardCopy[x][y] = EMPTY_SPACE
return droppingGems
def _moveGems(self, board, movingGems):
# movingGems is a list of dicts with keys x, y, direction, imageNum
for gem in movingGems:
if gem['y'] != ROWABOVEBOARD:
board[gem['x']][gem['y']] = EMPTY_SPACE
movex = 0
movey = 0
if gem['direction'] == LEFT:
movex = -1
elif gem['direction'] == RIGHT:
movex = 1
elif gem['direction'] == DOWN:
movey = 1
elif gem['direction'] == UP:
movey = -1
board[gem['x'] + movex][gem['y'] + movey] = gem['imageNum']
else:
# gem is located above the board (where new gems come from)
board[gem['x']][0] = gem['imageNum'] # move to top row
def _fillBoard(self, board, points, score, animate):
dropSlots = self._getDropSlots(board)
while dropSlots != [[]] * BOARDWIDTH:
# do the dropping animation as long as there are more gems to drop
movingGems = self._getDroppingGems(board)
for x in range(len(dropSlots)):
if len(dropSlots[x]) != 0:
# cause the lowest gem in each slot to begin moving in the DOWN direction
movingGems.append({'imageNum': dropSlots[x][0], 'x': x, 'y': ROWABOVEBOARD, 'direction': DOWN})
boardCopy = self._getBoardCopyMinusGems(board, movingGems)
if animate:
animateMovingGems(boardCopy, movingGems, points, score)
self._moveGems(board, movingGems)
# Make the next row of gems from the drop slots
# the lowest by deleting the previous lowest gems.
for x in range(len(dropSlots)):
if len(dropSlots[x]) == 0:
continue
board[x][0] = dropSlots[x][0]
del dropSlots[x][0]
def _getBoardCopyMinusGems(self, board, gems):
# Creates and returns a copy of the passed board data structure,
# with the gems in the "gems" list removed from it.
#
# Gems is a list of dicts, with keys x, y, direction, imageNum
boardCopy = copy.deepcopy(board)
# Remove some of the gems from this board data structure copy.
for gem in gems:
if gem['y'] != ROWABOVEBOARD:
boardCopy[gem['x']][gem['y']] = EMPTY_SPACE
return boardCopy
def __str__(self):
""" Ascii representation of the maze, with the current state """
return | |
2620, " Xlimli": 2621,
"hatXp": -2622, "at X ": 2623, "fXrat": 2624, "aGcX": 2626, "dIz X": 2627,
"ay X ": 2628, "elCX": -2629, "ihtX": -2630, "Xon": -2631, " hXzi": 2632,
"Xrna": 2633, "anrX": 2634, "GzX": 2635, "Xdv": 2636, " idarX": -2637,
"ap lX ": 2638, "bartX": 2639, "rkCX": 2640, "Xpla": 2641, "hIrXs": -2642,
" kXlm": 2643, " tayX": -2644, "gahX": 2645, "afsX": 2646, " hanX ": -2647,
"kadXr": -2648, "azsX": 2649, "kkabX": 2650, "bXl": -2651, " kXs ": 2652,
"kaybX": 2653, "arcX": 2654, "IlgX": 2655, "fXl": -2656, "bank Xn": 2657,
" bastX": 2658, " makX": -2659, " fXrl": 2660, " cXa ": 2661, "aS X ": 2662,
"cXv": -2663, "inamX": -2664, "IX": 2665, "fXkra": 2666, " kXna": 2667,
"aflX": 2668, "Xnirle": -2669, "ekmX": -2670, "arazX": -2671, "GdX": 2672,
"Xno": -2673, "afXn": 2675, "kXsim": 2676, " sXgi": 2677, "aCmX": 2678,
"CakX": 2679, "SartX": 2680, "ISkX": 2681, " yXp": 2682, "ak mX ": 2683,
" panX": -2684, " acXl ": -2685, "kXlic": 2686, "kkatlX": -2687, " Xlie": 2688,
"Xnde": -2689, "pahalX": 2690, "ap Xn ": 2691, "ammX": 2692, "Im Xn ": 2693,
" harX": -2694, "Xng ": -2695, "ecatX": -2696, "SahXs": 2697, "fXrt": 2698,
"IncX": 2699, "IttX": 2700, "alCX": 2701, "yazmX": 2702, " gXda": 2703,
"IksX": 2704, "vabX": 2705, "sXyon": -2706, " Xrk": 2707, "rXza": 2708,
"ar X ": 2709, "aplX": 2710, " klasX": -2711, "alXz": -2712, "IlsX": 2713,
"arzX": 2714, "kXsit": 2715, " Xsig": 2716, " hadX": -2717, "kapXt": -2718,
"Xmar": -2719, "hayalX": -2720, "sarX": 2721, "rIs X": 2722, "zXran": -2723,
"gazX": -2724, "silahX": 2725, "basXt": -2726, "kXsk": 2727, "I yX ": 2729,
"mXsir": 2730, "mXl": -2731, "kampX": 2732, "ikkatX": -2733, " adXl": -2734,
"ikahX": 2735, "am Xn ": 2736, "tXll": -2737, "InavX": 2738, "dXsk": -2739,
"Xpk": 2740, " malX ": -2741, "ad X": 2742, "IpkX": 2743, " sXf": 2744,
"mXr": -2745, "ItlX": 2746, "attXn ": -2747, "hamX": -2748, "rasXm": -2749,
"caksX": 2750, "allX": 2751, "n yanX": 2752, " samX": -2753, "haylX": -2754,
" tXk": 2755, "ISmX": 2756, " taktX": -2757, "langX": 2758, "at Xn ": 2759,
" islamX ": -2760, "dXka": -2761, "ImlX": 2762, "garX": -2763, "carX": -2764,
" mantX": 2765, "Xre": -2766, "ganXz": -2767, "IrakX": 2768, "amcX": 2769,
"IzdX": 2770, "IsmX": 2771, "vakXf": 2772, "a mX ": 2773, " manX": -2774,
"zayXr": -2775, "k nXn ": 2776, " yakX": 2777, "IfX": 2778, "Xe": -2779,
"ISlX": 2780, "parXs": -2781, "sabX": 2782, "aSmX": 2783, "Xlat": -2784,
"aClX": 2785, "avrX": 2786, "dXsip": -2787, "InmX": 2788, "kartX": 2789,
" atXna": -2790, "CarpX": 2791, " kXrl": -2792, "tXrma": 2793, "rXf": -2794,
" yangX": 2795, "Xmf": 2796, "al Xn ": 2797, "tdX": 2798, "I mX ": 2799,
"az Xn ": 2800, "akCX": 2801, "Xsrar": 2802, "tavXr": 2803, "ahlX ": 2804,
"IrsX": 2805, "ak Xn ": 2806, "atkX": 2807, "valX": -2809, "ah Xn ": 2810,
"Xcak": 2811, "ar mX ": 2812, "itabX": 2813, "akfX": 2815, "hapXs": -2816,
"aS Xn ": 2817, "ykX": 2818, "armX": 2819, "ay Xn ": 2820, "atmXs": 2821,
" talX": -2822, " kXsm": 2823, "fadXm": -2824, "arafX": 2825, "kmX": 2826,
"Xrsa": 2827, "Xsyo": -2828, "tatX": -2829, " galX": -2830, " yXk": 2831,
"Xzm": -2832, "aSkX": 2833, " hXr": 2834, "arsX": 2835, "hakX": -2836,
"arttX": 2837, "aylX": 2838, "Xa": -2839, "pmX": 2840, "IrmX": 2841,
"IydX": 2842, "amlX": 2843, "aksXz": 2844, "Xte": -2845, "an X ": 2846,
"sanayX": -2847, " Xsik": 2848, "ImsX": 2849, "aCtX": 2850, "azlX": 2851,
"azdX": 2852, "tXf": -2853, "ajX": 2854, " kXy": 2855, "ar Xn ": 2856,
"a yX ": 2857, "IntX": 2858, "akXl": 2860, "aGrX": 2861, " Xl": -2862,
"Xnav": 2863, "atCX": 2864, "Xne": -2865, "almX": 2866, " hakkX": 2867,
"Xsle": -2868, "hXzl": 2869, "atsX": 2870, "InlX": 2871, "alnX": 2872,
" yanX ": -2873, "arkX": 2874, "IllX": 2876, " kXr": 2877, "IndX": 2878,
"dlX ": 2879, "arklX": 2880, "Xstan": -2881, "rXka": -2882, "bakX": 2883,
"kXsa": 2884, "avcX": 2885, "atlX": 2886, " camX": -2887, "ImcX": 2888,
" alX ": -2889, " sXni": 2890, "aktX": 2891, "yargX": 2892, "ItX": 2893,
"IyX": 2894, "aSlX": 2895, "IlmX": 2896, "anmX": 2898, "IktX": 2899,
"IrdX": 2900, "IrlX": 2901, "Xye": -2902, "atXk": -2904, "IbrX": 2905,
"IzlX": 2906, "kXbris": 2907, "aklX": 2908, "Xlah": -2910, "tartX": 2911,
"akXm": 2912, "ansX": 2913, "IcX": 2914, "siyasX": -2915, "lantX": 2916,
"alkX": 2917, "akXn": 2918, "GlX": 2919, "attX": 2920, "halX": -2921,
" sXk": 2922, "Xrak": 2924, "altX": 2926, "ancX": 2927, "I nX": 2928,
"aydX": 2929, "Xh": -2932, " artX": 2933, "arlX": 2934, "an Xn ": 2935,
" ayrX": 2936, "a nXn": 2937, " sXr": 2938, "aStX": 2939, "IzX": 2940,
"acX": 2941, "IStX": 2942, " dXs": 2943, "aCX": 2944, "ptX": 2945,
"amX": 2946, "IldX": 2947, "ISX": 2948, "ardX": 2949, "apX": 2950,
"aldX": 2951, "andX": 2952, "arSX": 2953, "azX": 2954, "IrX": 2955,
"anlX": 2956, "IsX": 2957, "ImX": 2958, "atX": 2959, "aSX": 2960,
"IlX": 2961, "alX": 2962, "yXl": 2963, "Xla": 2964, "CXk": 2966,
"ayX": 2967, "adX": 2968, "Xnda": 2969, "IGX": 2970, "anX": 2971,
"asX": 2972, "InX": 2973, "arX": 2974, "X": -2975}}
turkish_context_size = 10
turkish_asciify_table = {u'ç': u'c',
u'Ç': u'C',
u'ğ': u'g',
u'Ğ': u'G',
u'ö': u'o',
u'Ö': u'O',
u'ü': u'u',
u'Ü': u'U',
u'ı': u'i',
u'İ': u'I',
u'ş': u's',
u'Ş': u'S'}
turkish_downcase_asciify_table = {}
for ch in string.ascii_uppercase:
turkish_downcase_asciify_table[ch] = ch.lower()
turkish_downcase_asciify_table[ch.lower()] = ch.lower()
turkish_downcase_asciify_table[u'ç'] = u'c'
turkish_downcase_asciify_table[u'Ç'] = u'c'
turkish_downcase_asciify_table[u'ğ'] = u'g'
turkish_downcase_asciify_table[u'Ğ'] = u'g'
turkish_downcase_asciify_table[u'ö'] = u'o'
turkish_downcase_asciify_table[u'Ö'] = u'o'
turkish_downcase_asciify_table[u'ı'] = u'i'
turkish_downcase_asciify_table[u'İ'] = u'i'
turkish_downcase_asciify_table[u'ş'] = u's'
turkish_downcase_asciify_table[u'Ş'] = u's'
turkish_downcase_asciify_table[u'ü'] = u'u'
turkish_downcase_asciify_table[u'Ü'] = u'u'
turkish_upcase_accents_table = {}
for ch in string.ascii_uppercase:
turkish_upcase_accents_table[ch] = ch.lower()
turkish_upcase_accents_table[ch.lower()] = ch.lower()
turkish_upcase_accents_table[u'ç'] = u'C'
turkish_upcase_accents_table[u'Ç'] = u'C'
turkish_upcase_accents_table[u'ğ'] = u'G'
turkish_upcase_accents_table[u'Ğ'] = u'G'
turkish_upcase_accents_table[u'ö'] = u'O'
turkish_upcase_accents_table[u'Ö'] = u'O'
turkish_upcase_accents_table[u'ı'] = u'I'
turkish_upcase_accents_table[u'İ'] = u'i'
turkish_upcase_accents_table[u'ş'] = u'S'
turkish_upcase_accents_table[u'Ş'] = u'S'
turkish_upcase_accents_table[u'ü'] = u'U'
turkish_upcase_accents_table[u'Ü'] = u'U'
def __init__(self, ascii_string):
self.ascii_string = ascii_string
self.turkish_string = ascii_string
def print_turkish_string(self):
print(self.turkish_string)
def set_char_at(self, mystr, pos, c):
return mystr[0:pos] + c + mystr[pos+1:]
def convert_to_turkish(self):
"""Convert a string with ASCII-only letters into one with
Turkish letters."""
for i in range(len(self.turkish_string)):
c = self.turkish_string[i]
if self.turkish_need_correction(c, point = i):
#self.turkish_string[i] = turkish_toggle_accent(c)
#self.turkish_string = self.turkish_string + self.turkish_toggle_accent(c)
self.turkish_string = self.set_char_at(self.turkish_string, i, self.turkish_toggle_accent(c))
else:
#self.turkish_string[i] = c
#self.turkish_string = self.turkish_string + c
self.turkish_string = self.set_char_at(self.turkish_string, i, c)
return self.turkish_string
def turkish_toggle_accent(self, c):
turkish_toggle_accent_table = {
u'c': u'ç',
u'C': u'Ç',
u'g': u'ğ',
u'G': u'Ğ',
u'o': u'ö',
u'O': u'Ö',
u'u': u'ü',
u'U': u'Ü',
u'i': u'ı',
u'I': u'İ',
u's': u'ş',
u'S': u'Ş',
u'ç': u'c',
u'Ç': u'C',
u'ğ': u'g',
u'Ğ': u'G',
u'ö': u'o',
u'Ö': u'O',
u'ü': u'u',
u'Ü': u'U',
u'ı': u'i',
u'İ': u'I',
u'ş': u's',
u'Ş': u'S'
}
return turkish_toggle_accent_table.get(c, c)
def turkish_need_correction(self, char, point = 0):
"""Determine if char at cursor needs correction."""
ch = char
tr = Deasciifier.turkish_asciify_table.get(ch, ch)
pl = Deasciifier.turkish_pattern_table.get(tr.lower(),
False)
if pl != False:
m = self.turkish_match_pattern(pl, point)
else:
m = False
if tr == u'I':
if ch == tr:
return not m
else:
return m
else:
if ch == tr:
return m
else:
return not m
def turkish_match_pattern(self, dlist, point = 0):
"""Check if the pattern is in the pattern table.
"""
rank = 2 * len(dlist)
str = self.turkish_get_context(Deasciifier.turkish_context_size,
point = point)
start = 0
end = 0
_len = len(str)
while start <= Deasciifier.turkish_context_size:
end = 1 + Deasciifier.turkish_context_size
while end <= _len:
s = str[start:end]
r = dlist.get(s, False)
if r and abs(r) < abs(rank):
rank = r
end = 1 + end
start = 1 + start
return rank > 0
def turkish_get_context(self, size = turkish_context_size, point = 0):
s = ' ' * (1 + (2 * size))
s = s[0:size] + 'X' + s[size+1:]
i = 1 + size
space = False
index = point
current_char = self.turkish_string[index]
index = index + 1
while i < len(s) and not space and index < len(self.ascii_string):
current_char = self.turkish_string[index]
x = Deasciifier.turkish_downcase_asciify_table.get(current_char
,False)
if not x:
if not space:
i = i + 1
| |
import json
import logging
import os
import glob
from os.path import join, exists
import shutil
import tempfile
import dateutil.parser
from util.job_util import exec_command
from pm_proxy.pm_base import PackageManagerProxy
class NpmjsProxy(PackageManagerProxy):
# npm-scripts: How npm handles the "scripts" field
# https://docs.npmjs.com/misc/scripts
# default values:
# server.js -> "start": "node server.js"
# binding.gyp -> "install": "node-gyp rebuild"
_BUILD_SCRIPTS = ('build',)
_INSTALL_SCRIPTS = ('install', 'preinstall', 'postinstall')
_UNINSTALL_SCRIPTS = ('uninstall', 'preuninstall', 'postuninstall')
_TEST_SCRIPTS = ('test', 'pretest', 'posttest', 'test:browser', 'test:browserless')
_PUBLISH_SCRIPTS = ('prepublish', 'prepare', 'prepublishOnly', 'prepack', 'postpack', 'publish', 'postpublish')
_START_SCRIPTS = ('prestart', 'start', 'poststart')
_STOP_SCRIPTS = ('prestop', 'stop', 'poststop')
_RESTART_SCRIPTS = ('prerestart', 'restart', 'postrestart')
_SHRINKWRAP_SCRIPTS = ('preshrinkwrap', 'shrinkwrap', 'postshrinkwrap')
def __init__(self, registry=None, cache_dir=None, isolate_pkg_info=False):
super(NpmjsProxy, self).__init__()
self.registry = registry
self.cache_dir = cache_dir
self.isolate_pkg_info = isolate_pkg_info
self.metadata_format = 'json'
self.dep_format = 'json'
def _get_pkg_fname(self, pkg_name, pkg_version=None, suffix='tgz'):
if pkg_name.startswith('@'):
pkg_name = pkg_name.lstrip('@').replace('/', '-')
if pkg_version is None:
return '%s-*.%s' % (pkg_name, suffix)
else:
return '%s-%s.%s' % (pkg_name, pkg_version, suffix)
def download(self, pkg_name, pkg_version=None, outdir=None, binary=False, with_dep=False):
if pkg_version:
download_cmd = ['npm', 'pack', '%s@%s' % (pkg_name, pkg_version)]
else:
download_cmd = ['npm', 'pack', pkg_name]
# Node.js tool for easy binary deployment of C++ addons
# https://github.com/mapbox/node-pre-gyp/
if binary:
logging.warning("support for binary downloading is not added yet!")
# npm pack with dependencies
# https://github.com/npm/npm/issues/4210
if with_dep:
logging.warning("support for packing dependencies is not added yet!")
exec_command('npm pack', download_cmd, cwd=outdir)
download_path = join(outdir, self._get_pkg_fname(pkg_name=pkg_name, pkg_version=pkg_version))
if pkg_version is None:
download_paths = glob.glob(download_path)
if len(download_paths) == 1:
return download_paths[0]
else:
if exists(download_path):
return download_path
logging.error("failed to download pkg %s ver %s", pkg_name, pkg_version)
return None
def _install_init(self, install_dir):
# run npm init to initialize repo
npm_init_cmd = ['npm', 'init', '-y']
exec_command('npm init', npm_init_cmd, cwd=install_dir)
def install(self, pkg_name, pkg_version=None, trace=False, trace_string_size=1024, install_dir=None, outdir=None,
sudo=False):
if pkg_version:
install_cmd = ['npm', 'install', '%s@%s' % (pkg_name, pkg_version)]
else:
install_cmd = ['npm', 'install', pkg_name]
# install with sudo privilege and globally
if sudo:
install_cmd = ['sudo'] + install_cmd + ['-g']
install_cmd = self.decorate_strace(pkg_name=pkg_name, pkg_version=pkg_version, trace=trace,
trace_string_size=trace_string_size, sudo=sudo, outdir=outdir,
command=install_cmd)
exec_command('npm install', install_cmd, cwd=install_dir)
def install_file(self, infile, trace=False, trace_string_size=1024, sudo=False, install_dir=None, outdir=None):
# FIXME: install prebuilt C++ addons to avoid building dependencies
install_cmd = ['npm', 'install', infile]
if sudo:
install_cmd = ['sudo'] + install_cmd + ['-g']
install_cmd = self.decorate_strace_file(infile=infile, trace=trace, trace_string_size=trace_string_size,
sudo=sudo, outdir=outdir, command=install_cmd)
exec_command('npm install file', install_cmd, cwd=install_dir)
def uninstall(self, pkg_name, pkg_version=None, trace=False, trace_string_size=1024, sudo=False, install_dir=None,
outdir=None):
if pkg_version:
uninstall_cmd = ['npm', 'uninstall', '%s@%s' % (pkg_name, pkg_version)]
else:
uninstall_cmd = ['npm', 'uninstall', pkg_name]
if sudo:
uninstall_cmd = ['sudo'] + uninstall_cmd + ['-g']
uninstall_cmd = self.decorate_strace(pkg_name=pkg_name, pkg_version=pkg_version, trace=trace,
trace_string_size=trace_string_size, sudo=sudo, outdir=outdir,
command=uninstall_cmd)
exec_command('npm uninstall', uninstall_cmd, cwd=install_dir)
def get_metadata(self, pkg_name, pkg_version=None):
# load cached metadata information
pkg_info_dir = self.get_pkg_info_dir(pkg_name=pkg_name)
if pkg_info_dir is not None:
metadata_fname = self.get_metadata_fname(pkg_name=pkg_name, pkg_version=pkg_version,
fmt=self.metadata_format)
metadata_file = join(pkg_info_dir, metadata_fname)
if exists(metadata_file):
logging.warning("get_metadata: using cached metadata_file %s!", metadata_file)
if self.metadata_format == 'json':
try:
pkg_info = json.load(open(metadata_file, 'r'))
if (len(pkg_info) == 1 and "error" in pkg_info and pkg_info["error"]["summary"] ==
"getaddrinfo ENOTFOUND registry.npmjs.us registry.npmjs.us:443"):
logging.error("previous fetch of metadata failed, regenerating!")
else:
return pkg_info
except:
logging.debug("fail to load metadata_file: %s, regenerating!", metadata_file)
else:
logging.error("get_metadata: output format %s is not supported!", self.metadata_format)
return None
# run npm view to get the package info, show/info/v are aliases of view
view_cmd = ['npm', 'view', pkg_name, '--json']
try:
pkg_info_str = exec_command('npm view', view_cmd, ret_stdout=True)
pkg_info = json.loads(pkg_info_str)
except:
logging.error("fail in get_metadata for pkg %s, ignoring!", pkg_name)
return None
# optionally cache metadata
if pkg_info_dir is not None:
if not exists(pkg_info_dir):
os.makedirs(pkg_info_dir)
metadata_fname = self.get_metadata_fname(pkg_name=pkg_name, pkg_version=pkg_version,
fmt=self.metadata_format)
metadata_file = join(pkg_info_dir, metadata_fname)
if self.metadata_format == 'json':
json.dump(pkg_info, open(metadata_file, 'w'), indent=2)
else:
logging.error("get_metadata: output format %s is not supported!", self.metadata_format)
return pkg_info
def get_versions(self, pkg_name, max_num=15, min_gap_days=30, with_time=False):
pkg_info = self.get_metadata(pkg_name=pkg_name)
if pkg_info is None or 'time' not in pkg_info:
return []
try:
version_date = [(ver, dateutil.parser.parse(ts)) for ver, ts in pkg_info['time'].items()
if ver not in ('modified', 'created')]
except Exception as e:
logging.error("error parsing timestamps in %s", pkg_info['time'])
return []
return self.filter_versions(version_date=version_date, max_num=max_num, min_gap_days=min_gap_days,
with_time=with_time)
def get_author(self, pkg_name):
pkg_info = self.get_metadata(pkg_name=pkg_name)
if pkg_info is None or 'time' not in pkg_info:
return {}
author = pkg_info.get('author', None)
maintainers = pkg_info.get('maintainers', None)
# users = pkg_info.get('users', None)
npmUser = pkg_info.get('_npmUser', None)
return {'author': author, 'maintainers': maintainers, 'npmUser': npmUser}
def get_dep(self, pkg_name, pkg_version=None, flatten=False, cache_only=False):
super(NpmjsProxy, self).get_dep(pkg_name=pkg_name, pkg_version=pkg_version, flatten=flatten,
cache_only=cache_only)
# load cached dependency information
pkg_info_dir = self.get_pkg_info_dir(pkg_name=pkg_name)
if pkg_info_dir is not None:
if flatten:
dep_fname = self.get_flatten_dep_fname(pkg_name=pkg_name, pkg_version=pkg_version, fmt=self.dep_format)
else:
dep_fname = self.get_dep_fname(pkg_name=pkg_name, pkg_version=pkg_version, fmt=self.dep_format)
dep_file = join(pkg_info_dir, dep_fname)
if exists(dep_file):
logging.warning("get_dep: using cached dep_file %s!", dep_file)
if self.dep_format == 'json':
try:
return json.load(open(dep_file, 'r'))
except:
logging.debug("fail to load dep_file: %s, regenerating!", dep_file)
else:
logging.error("get_dep: output format %s is not supported!", self.dep_format)
return None
if cache_only:
return None
# use npm install to get the dependencies
temp_install_dir = tempfile.mkdtemp(prefix='get_dep-')
self.install(pkg_name=pkg_name, pkg_version=pkg_version, install_dir=temp_install_dir)
shrinkwrap_cmd = ['npm', 'shrinkwrap']
exec_command('npm shrinkwrap', shrinkwrap_cmd, cwd=temp_install_dir)
# FIXME: seems that package-lock.json is not always available
temp_npm_shrinkwrap = join(temp_install_dir, 'npm-shrinkwrap.json')
dep_pkgs = {}
flatten_dep_pkgs = {}
if not exists(temp_npm_shrinkwrap):
logging.error("fail to get dependency for %s", pkg_name)
else:
try:
npm_shrinkwrap_info = json.load(open(temp_npm_shrinkwrap, 'r'))
if 'dependencies' in npm_shrinkwrap_info and pkg_name in npm_shrinkwrap_info['dependencies']:
flatten_dep_pkgs = {dep_name: dep_info['version'] for dep_name, dep_info
in npm_shrinkwrap_info['dependencies'].items() if dep_name != pkg_name}
if 'requires' in npm_shrinkwrap_info['dependencies'][pkg_name]:
dep_pkg_names = npm_shrinkwrap_info['dependencies'][pkg_name]['requires'].keys()
dep_pkgs = {dep_name: dep_version for dep_name, dep_version in flatten_dep_pkgs.items()
if dep_name in dep_pkg_names}
else:
logging.error("no dependency including self is found for %s, info: %s", pkg_name, npm_shrinkwrap_info)
except Exception as e:
logging.error("failed while getting dependencies (%s) for pkg %s: %s!", flatten_dep_pkgs, pkg_name, str(e))
logging.warning("%s has %d deps and %d flatten deps", pkg_name, len(dep_pkgs), len(flatten_dep_pkgs))
if pkg_info_dir is not None:
if not exists(pkg_info_dir):
os.makedirs(pkg_info_dir)
dep_fname = self.get_dep_fname(pkg_name=pkg_name, pkg_version=pkg_version, fmt=self.dep_format)
dep_file = join(pkg_info_dir, dep_fname)
flatten_dep_fname = self.get_flatten_dep_fname(pkg_name=pkg_name, pkg_version=pkg_version, fmt=self.dep_format)
flatten_dep_file = join(pkg_info_dir, flatten_dep_fname)
if self.dep_format == 'json':
json.dump(dep_pkgs, open(dep_file, 'w'), indent=2)
json.dump(flatten_dep_pkgs, open(flatten_dep_file, 'w'), indent=2)
else:
logging.error("get_dep: output format %s is not supported!", self.dep_format)
# remove the installation directory
shutil.rmtree(temp_install_dir)
return flatten_dep_pkgs if flatten else dep_pkgs
def install_dep(self, pkg_name, pkg_version=None, trace=False, trace_string_size=1024, sudo=False, install_dir=None,
outdir=None):
# sanity check
if install_dir is None and not sudo:
logging.error("for npmjs nonsudo, install_dir in install_dep is None, doesn't make sense!")
return
# get package dependency, and then init and install the dependencies
dep_pkgs = self.get_dep(pkg_name=pkg_name, pkg_version=pkg_version)
dep_pkgs_args = ['%s@%s' % (dep_name, dep_version) for dep_name, dep_version in dep_pkgs.items()]
install_dep_cmd = ['npm', 'install'] + dep_pkgs_args
if sudo:
install_dep_cmd = ['sudo'] + install_dep_cmd + ['-g']
else:
self._install_init(install_dir=install_dir)
install_dep_cmd = self.decorate_strace(pkg_name=pkg_name, pkg_version=pkg_version, trace=trace,
trace_string_size=trace_string_size, sudo=sudo, outdir=outdir,
command=install_dep_cmd, is_dep=True)
exec_command('npm install dependency', install_dep_cmd, cwd=install_dir)
def has_install(self, pkg_name, pkg_version=None, binary=False, with_dep=False):
"""
pkg_info = self.get_metadata(pkg_name=pkg_name, pkg_version=pkg_version)
return pkg_info and 'scripts' in pkg_info and any(s in pkg_info['scripts'] for s in self._INSTALL_SCRIPTS)
"""
return True
def test(self, pkg_name, pkg_version=None, trace=False, trace_string_size=1024, sudo=False, install_dir=None,
outdir=None, timeout=None):
# run npm test
pass
def has_test(self, pkg_name, pkg_version=None, binary=False, with_dep=False):
pkg_info = self.get_metadata(pkg_name=pkg_name, pkg_version=pkg_version)
return pkg_info and 'scripts' in pkg_info and any(s in pkg_info['scripts'] for s in self._TEST_SCRIPTS)
def _get_npm_root(self, sudo, install_dir):
if not sudo and install_dir is None:
logging.error("for npmjs nonsudo, install_dir in main is None, doesn't make sense!")
return
if sudo:
npm_root = exec_command('npm root', ['npm', 'root', '-g'], ret_stdout=True).strip()
else:
npm_root = exec_command('npm root', ['npm', 'root'], cwd=install_dir, ret_stdout=True).strip()
return npm_root
def main(self, pkg_name, pkg_version=None, trace=False, trace_string_size=1024, sudo=False, install_dir=None,
outdir=None, timeout=None):
# assume that the package is installed, and get the root directory for the installed package.
main_cmd = ['python', 'main.py', pkg_name, '-m', 'npmjs', '-r',
self._get_npm_root(sudo=sudo, install_dir=install_dir)]
# get the scripts or binaries to run
# http://2ality.com/2016/01/locally-installed-npm-executables.html
pkg_info = self.get_metadata(pkg_name=pkg_name, pkg_version=pkg_version)
binaries = pkg_info.get('bin', {})
if type(binaries) == str:
# if there is only one binary, then name is pkg_name
binaries = {pkg_name: binaries}
for binary in binaries:
main_cmd += ['-b', binary]
# available scripts and to-test scripts
scripts = pkg_info.get('scripts', {})
scripts_to_test = self._START_SCRIPTS + self._STOP_SCRIPTS + self._RESTART_SCRIPTS
main_scripts = {k: scripts[k] for k in set(scripts) & set(scripts_to_test)}
for main_script in main_scripts:
main_cmd += ['-s', main_script]
exec_command('python main.py', main_cmd, cwd="pm_proxy/scripts", timeout=timeout)
def has_main(self, pkg_name, pkg_version=None, binary=False, with_dep=False):
# Specifics of npm's package.json handling
| |
<filename>scripts/data_processing_combined.py
"""
This module processes the data of simulated pairwise interactions.
The maximum characters per line is set to be 120.
"""
import glob
import os
import shelve
import platform
# import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# import progressbar
# from scipy.integrate import RK45
# from scipy.integrate import solve_ivp
# from scipy.spatial import Voronoi as scipyVoronoi
# import scipy.io
# from scipy.spatial import distance as scipy_distance
if platform.node() == 'NOTESIT43' and platform.system() == 'Windows':
projectDir = "D:\\simulationFolder\\spinning_rafts_sim2"
elif platform.node() == 'NOTESIT71' and platform.system() == 'Linux':
projectDir = r'/media/wwang/shared/spinning_rafts_simulation/spinning_rafts_sim2'
else:
projectDir = os.getcwd()
if projectDir != os.getcwd():
os.chdir(projectDir)
import scripts.functions_spinning_rafts as fsr
dataDir = os.path.join(projectDir, 'data')
# %% load simulated data in one main folder
os.chdir(dataDir)
rootFolderTreeGen = os.walk(dataDir)
_, mainFolders, _ = next(rootFolderTreeGen)
mainFolderID = 1
os.chdir(mainFolders[mainFolderID])
dataFileList = glob.glob('*.dat')
dataFileList.sort()
mainDataList = []
variableListsForAllMainData = []
for dataID in range(len(dataFileList)):
dataFileToLoad = dataFileList[dataID].partition('.dat')[0]
tempShelf = shelve.open(dataFileToLoad)
variableListOfOneMainDataFile = list(tempShelf.keys())
expDict = {}
for key in tempShelf:
try:
expDict[key] = tempShelf[key]
except TypeError:
pass
tempShelf.close()
mainDataList.append(expDict)
variableListsForAllMainData.append(variableListOfOneMainDataFile)
# %% data treatment and output to csv
numOfRafts = mainDataList[0]['numOfRafts']
if numOfRafts == 2:
timeStepSize = mainDataList[0]['timeStepSize'] # unit: s, assuming the same for all data in the list
samplingRate = 1 / timeStepSize # unit fps
diameterOfRaftInMicron = 300 # micron
startOfSamplingStep = 0 # check the variable mainDataList[0]['numOfTimeSteps']
# diameterOfRaftInPixel = 146 # pixel 124 for 2x mag, 146 for 2.5x object,
# scaleBar = diameterOfRaftInMicron/diameterOfRaftInPixel # micron per pixel.
# 300 micron = 124 pixel -> 2x objective, 300 micron = 146 pixel -> 2.5x objective
# initialize data frames
varsForMainData = ['mainFolderName', 'experimentName', 'batchNum', 'magneticFieldRotationRPS',
'distancesMean', 'distancesSTD', 'orbitingSpeedsMean', 'orbitingSpeedsSTD',
'raft1SpinSpeedsMean', 'raft1SpinSpeedsSTD', 'raft2SpinSpeedsMean', 'raft2SpinSpeedsSTD']
dfMainData = pd.DataFrame(columns=varsForMainData, index=range(len(mainDataList)))
dfFFTDist = pd.DataFrame(columns=['fDistances'])
dfFFTOrbitingSpeeds = pd.DataFrame(columns=['fOrbitingSpeeds'])
dfFFTRaft1Spin = pd.DataFrame(columns=['fRaft1SpinSpeeds'])
dfFFTRaft2Spin = pd.DataFrame(columns=['fRaft2SpinSpeeds'])
for dataID in range(len(mainDataList)):
raft1Locations = mainDataList[dataID]['raftLocations'][0, startOfSamplingStep:, :]
raft2Locations = mainDataList[dataID]['raftLocations'][1, startOfSamplingStep:, :]
vector1To2 = raft2Locations - raft1Locations
distances = np.sqrt(vector1To2[:, 0] ** 2 + vector1To2[:, 1] ** 2)
distancesMean = distances.mean() #
distancesSTD = np.std(distances)
fDistances, pDistances = fsr.fft_general(samplingRate, distances)
phase1To2 = np.arctan2(vector1To2[:, 1], vector1To2[:, 0]) * 180 / np.pi
# note that the sign of y is flipped, so as to keep the coordination in the right-handed coordinate
phasesAjusted = fsr.adjust_phases(phase1To2)
orbitingSpeeds = np.gradient(phasesAjusted) * samplingRate / 180 * np.pi
orbitingSpeedsMean = orbitingSpeeds.mean()
orbitingSpeedsSTD = orbitingSpeeds.std()
fOrbitingSpeeds, pOrbitingSpeeds = fsr.fft_general(samplingRate, orbitingSpeeds)
raft1Orientations = mainDataList[dataID]['raftOrientations'][0, startOfSamplingStep:]
raft2Orientations = mainDataList[dataID]['raftOrientations'][1, startOfSamplingStep:]
raft1OrientationsAdjusted = fsr.adjust_phases(raft1Orientations)
raft2OrientationsAdjusted = fsr.adjust_phases(raft2Orientations)
raft1SpinSpeeds = np.gradient(raft1OrientationsAdjusted) * samplingRate / 360
raft2SpinSpeeds = np.gradient(raft2OrientationsAdjusted) * samplingRate / 360
raft1SpinSpeedsMean = raft1SpinSpeeds.mean()
raft2SpinSpeedsMean = raft2SpinSpeeds.mean()
raft1SpinSpeedsSTD = raft1SpinSpeeds.std()
raft2SpinSpeedsSTD = raft2SpinSpeeds.std()
fRaft1SpinSpeeds, pRaft1SpinSpeeds = fsr.fft_general(samplingRate, raft1SpinSpeeds)
fRaft2SpinSpeeds, pRaft2SpinSpeeds = fsr.fft_general(samplingRate, raft2SpinSpeeds)
# store in dataframes
dfMainData.loc[dataID, 'mainFolderName'] = mainFolders[mainFolderID]
# if mainDataList[dataID]['isVideo'] == 0:
# dfMainData.loc[dataID,'experimentName'] = \
# mainDataList[dataID]['subfolders'][mainDataList[dataID]['expID']]
# elif mainDataList[dataID]['isVideo'] == 1:
# dfMainData.loc[dataID,'experimentName'] = \
# mainDataList[dataID]['videoFileList'][mainDataList[dataID]['expID']]
# dfMainData.loc[dataID,'batchNum'] = mainDataList[dataID]['batchNum']
dfMainData.loc[dataID, 'magneticFieldRotationRPS'] = - mainDataList[dataID]['magneticFieldRotationRPS']
dfMainData.loc[dataID, 'distancesMean'] = distancesMean - diameterOfRaftInMicron
dfMainData.loc[dataID, 'distancesSTD'] = distancesSTD
dfMainData.loc[dataID, 'orbitingSpeedsMean'] = -orbitingSpeedsMean
dfMainData.loc[dataID, 'orbitingSpeedsSTD'] = orbitingSpeedsSTD
dfMainData.loc[dataID, 'raft1SpinSpeedsMean'] = -raft1SpinSpeedsMean
dfMainData.loc[dataID, 'raft1SpinSpeedsSTD'] = raft1SpinSpeedsSTD
dfMainData.loc[dataID, 'raft2SpinSpeedsMean'] = -raft2SpinSpeedsMean
dfMainData.loc[dataID, 'raft2SpinSpeedsSTD'] = raft2SpinSpeedsSTD
if len(dfFFTDist) == 0:
dfFFTDist['fDistances'] = fDistances
# colName = str(mainDataList[dataID]['batchNum']) + '_' \
# + str(mainDataList[dataID]['magneticFieldRotationRPS']).zfill(4)
colName = str(-mainDataList[dataID]['magneticFieldRotationRPS']).zfill(4)
dfFFTDist[colName] = pDistances
if len(dfFFTOrbitingSpeeds) == 0:
dfFFTOrbitingSpeeds['fOrbitingSpeeds'] = fOrbitingSpeeds
dfFFTOrbitingSpeeds[colName] = pOrbitingSpeeds
if len(dfFFTRaft1Spin) == 0:
dfFFTRaft1Spin['fRaft1SpinSpeeds'] = fRaft1SpinSpeeds
dfFFTRaft1Spin[colName] = pRaft1SpinSpeeds
if len(dfFFTRaft2Spin) == 0:
dfFFTRaft2Spin['fRaft2SpinSpeeds'] = fRaft2SpinSpeeds
dfFFTRaft2Spin[colName] = pRaft2SpinSpeeds
dfMainData = dfMainData.infer_objects()
# dfMainData.sort_values(by = ['batchNum','magneticFieldRotationRPS'], ascending = [True, False], inplace = True)
dfMainData.sort_values(by=['magneticFieldRotationRPS'], ascending=[False], inplace=True)
dfFFTDist = dfFFTDist.infer_objects()
dfFFTOrbitingSpeeds = dfFFTOrbitingSpeeds.infer_objects()
dfFFTRaft1Spin = dfFFTRaft1Spin.infer_objects()
dfFFTRaft2Spin = dfFFTRaft2Spin.infer_objects()
dfFFTDist = dfFFTDist.reindex(sorted(dfFFTDist.columns, reverse=True), axis='columns')
dfFFTOrbitingSpeeds = dfFFTOrbitingSpeeds.reindex(sorted(dfFFTOrbitingSpeeds.columns, reverse=True), axis='columns')
dfFFTRaft1Spin = dfFFTRaft1Spin.reindex(sorted(dfFFTRaft1Spin.columns, reverse=True), axis='columns')
dfFFTRaft2Spin = dfFFTRaft2Spin.reindex(sorted(dfFFTRaft2Spin.columns, reverse=True), axis='columns')
dfMainData.plot.scatter(x='magneticFieldRotationRPS', y='distancesMean')
plt.show()
# output to csv files
mainDataFileName = mainFolders[mainFolderID]
colNames = ['batchNum', 'magneticFieldRotationRPS',
'distancesMean', 'distancesSTD', 'orbitingSpeedsMean', 'orbitingSpeedsSTD',
'raft1SpinSpeedsMean', 'raft1SpinSpeedsSTD', 'raft2SpinSpeedsMean', 'raft2SpinSpeedsSTD']
dfMainData.to_csv(mainDataFileName + '.csv', index=False, columns=colNames)
# BFieldStrength = '10mT'
BFieldStrength = str(mainDataList[0]['magneticFieldStrength'] * 1000).zfill(4) + 'mT'
dfFFTDist.to_csv('fft_' + BFieldStrength + '_distance.csv', index=False)
dfFFTOrbitingSpeeds.to_csv('fft_' + BFieldStrength + '_orbitingSpeeds.csv', index=False)
dfFFTRaft1Spin.to_csv('fft_' + BFieldStrength + '_raft1SpinSpeeds.csv', index=False)
dfFFTRaft2Spin.to_csv('fft_' + BFieldStrength + '_raft2SpinSpeeds.csv', index=False)
# testing the random distribution sampling:
# mu, sigma = 0, 0.01 # mean and standard deviation
# s = np.random.normal(mu, sigma, 10000)
# count, bins, ignored = plt.hist(s, 30, density=True)
# plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
# linewidth=2, color='r')
# plt.show()
elif numOfRafts > 2:
dfOrderParameters = pd.DataFrame(columns=['time(s)'])
dfEntropies = pd.DataFrame(columns=['time(s)'])
selectEveryNPoint = 10
for dataID in range(len(mainDataList)):
numOfTimeSteps = mainDataList[dataID]['numOfTimeSteps']
timeStepSize = mainDataList[dataID]['timeStepSize']
dfOrderParameters['time(s)'] = np.arange(0, numOfTimeSteps, selectEveryNPoint) * timeStepSize
dfEntropies['time(s)'] = np.arange(0, numOfTimeSteps, selectEveryNPoint) * timeStepSize
magneticFieldRotationRPS = mainDataList[dataID]['magneticFieldRotationRPS']
hexaticOrderParameterAvgNorms = mainDataList[dataID]['hexaticOrderParameterAvgNorms']
hexaticOrderParameterModuliiAvgs = mainDataList[dataID]['hexaticOrderParameterModuliiAvgs']
hexaticOrderParameterModuliiStds = mainDataList[dataID]['hexaticOrderParameterModuliiStds']
entropyByNeighborDistances = mainDataList[dataID]['entropyByNeighborDistances']
colName = str(-mainDataList[dataID]['magneticFieldRotationRPS']).zfill(4)
dfOrderParameters[colName + '_avgNorm'] = hexaticOrderParameterAvgNorms[0::selectEveryNPoint]
dfOrderParameters[colName + '_ModuliiAvg'] = hexaticOrderParameterModuliiAvgs[0::selectEveryNPoint]
dfOrderParameters[colName + '_ModuliiStds'] = hexaticOrderParameterModuliiStds[0::selectEveryNPoint]
dfEntropies[colName] = entropyByNeighborDistances[0::selectEveryNPoint]
dfOrderParameters.to_csv('orderParameters.csv', index=False)
dfEntropies.to_csv('entropies.csv', index=False)
#%% load one specific simulated data and look at the results
numOfRafts = mainDataList[0]['numOfRafts']
if numOfRafts == 2:
dataID = 0
variableListFromSimulatedFile = list(mainDataList[dataID].keys())
# # just to avoid Pycharm scolding me for using undefined variables
raftLocations = mainDataList[dataID]['raftLocations']
magneticFieldRotationRPS = mainDataList[dataID]['magneticFieldRotationRPS']
raftOrientations = mainDataList[dataID]['raftOrientations']
timeStepSize = mainDataList[dataID]['timeStepSize']
# for key, value in mainDataList[dataID].items(): # loop through key-value pairs of python dictionary
# globals()[key] = value
# data treatment
startOfSamplingStep = 0 # 0, 10000
samplingRate = 1 / timeStepSize #
raft1Locations = raftLocations[0, startOfSamplingStep:, :] # unit: micron
raft2Locations = raftLocations[1, startOfSamplingStep:, :] # unit: micron
vector1To2 = raft2Locations - raft1Locations # unit: micron
distances = np.sqrt(vector1To2[:, 0] ** 2 + vector1To2[:, 1] ** 2) # unit micron, pairwise ccDistances
distancesMean = distances.mean()
distancesSTD = distances.std()
distancesDownSampled = distances[::100]
fDistances, pDistances = fsr.fft_general(samplingRate, distances)
phase1To2 = np.arctan2(vector1To2[:, 1], vector1To2[:, 0]) * 180 / np.pi
phasesAjusted = fsr.adjust_phases(phase1To2)
orbitingSpeeds = np.gradient(phasesAjusted) * samplingRate / 180 * np.pi
orbitingSpeedsMean = orbitingSpeeds.mean()
orbitingSpeedsSTD = orbitingSpeeds.std()
fOrbitingSpeeds, pOrbitingSpeeds = fsr.fft_general(samplingRate, orbitingSpeeds)
raft1Orientations = raftOrientations[0, startOfSamplingStep:]
raft2Orientations = raftOrientations[1, startOfSamplingStep:]
raft1OrientationsAdjusted = fsr.adjust_phases(raft1Orientations)
raft2OrientationsAdjusted = fsr.adjust_phases(raft2Orientations)
raft1SpinSpeeds = np.gradient(raft1OrientationsAdjusted) * samplingRate / 360 # unit: rps
raft2SpinSpeeds = np.gradient(raft2OrientationsAdjusted) * samplingRate / 360 # unit: rps
raft1SpinSpeedsMean = raft1SpinSpeeds.mean()
raft2SpinSpeedsMean = raft2SpinSpeeds.mean()
raft1SpinSpeedsSTD = raft1SpinSpeeds.std()
raft2SpinSpeedsSTD = raft2SpinSpeeds.std()
fRaft1SpinSpeeds, pRaft1SpinSpeeds = fsr.fft_general(samplingRate, raft1SpinSpeeds)
fRaft2SpinSpeeds, pRaft2SpinSpeeds = fsr.fft_general(samplingRate, raft2SpinSpeeds)
# plotting analyzed results
# comparison of force terms
# fig, ax = plt.subplots(ncols=1, nrows=1)
# vector1To2X_Unitized = vector1To2[:, 0] / np.sqrt(vector1To2[:, 0] ** 2 + vector1To2[:, 1] ** 2)
# ax.plot(magDipoleForceOnAxisTerm[1, startOfSamplingStep:, 0] / vector1To2X_Unitized * timeStepSize, '-',
# label='magnetic-dipole-force velocity term on raft2 / vector1To2')
# ax.plot(capillaryForceTerm[1, startOfSamplingStep:, 0] / vector1To2X_Unitized * timeStepSize, '-',
# label='capillary-force velocity term on raft2 / vector1To2')
# ax.plot(hydrodynamicForceTerm[1, startOfSamplingStep:, 0] / vector1To2X_Unitized * timeStepSize, '-',
# label='hydrodynamic-force velocity term on raft2 / vector1To2')
# ax.plot(wallRepulsionTerm[1, startOfSamplingStep:, 0] / vector1To2X_Unitized * timeStepSize, '-',
# label='wall-repulsion velocity term on raft2 / vector1To2')
# ax.plot(forceCurvatureTerm[1, startOfSamplingStep:, 0] / vector1To2X_Unitized * timeStepSize, '-',
# label='force-curvature velocity term on raft2 / vector1To2')
# ax.plot(magDipoleForceOnAxisTerm[0,startOfSamplingStep:,0]/vector1To2[:,0], '-',
# label = 'magnetic-dipole-force velocity term on raft1 / vector1To2')
# ax.plot(capillaryForceTerm[0,startOfSamplingStep:,0]/vector1To2[:,0], '-',
# label = 'capillary-force velocity term on raft1 / vector1To2')
# ax.plot(hydrodynamicForceTerm[0,startOfSamplingStep:,0]/vector1To2[:,0], '-',
# label = 'hydrodynamic-force velocity term on raft1 / vector1To2')
# ax.plot(wallRepulsionTerm[0,startOfSamplingStep:,0]/vector1To2[:,0], '-',
# label = 'wall-repulsion velocity term on raft1 / vector1To2')
# ax.plot(forceCurvatureTerm[0,startOfSamplingStep:,0]/vector1To2[:,0], '-',
# label = 'force-curvature velocity term on raft1 / vector1To2')
# ax.set_xlabel('time step number', size=20)
# ax.set_ylabel('displacement along vector1To2 (um)', size=20)
# ax.set_title('Simulation at {}rps'.format(magneticFieldRotationRPS))
# ax.legend()
# plt.show()
# plotting distances between rafts vs frame# (time)
_, ax = plt.subplots(ncols=1, nrows=1)
ax.plot(np.arange(len(distances)) * timeStepSize, distances, '-o', label='c')
ax.set_xlabel('Time (s)', size=20)
ax.set_ylabel('ccdistances between rafts (micron)', size=20)
ax.set_title('Simulation at {}rps'.format(magneticFieldRotationRPS))
ax.legend()
plt.show()
# plotting the fft of distances
_, ax = plt.subplots(ncols=1, nrows=1)
ax.plot(fDistances[1:], pDistances[1:], '-o', label='c')
ax.set_xlabel('fDistances (Hz)', size=20)
ax.set_ylabel('Power P1 (a.u.)', size=20)
ax.set_title('Simulation at {}rps'.format(magneticFieldRotationRPS))
ax.legend()
plt.show()
# plotting orbiting speeds vs time
_, ax = plt.subplots(ncols=1, nrows=1)
ax.plot(orbitingSpeeds, '-o', label='orbiting speeds calculated from orientation')
ax.set_xlabel('Frames(Time)', size=20)
ax.set_ylabel('orbiting speeds in rad/s', size=20)
| |
import ops.utils
import networkx as nx
import pandas as pd
import numpy as np
import scipy.spatial.kdtree
from collections import Counter
from scipy.spatial.distance import cdist
from scipy.interpolate import UnivariateSpline
from statsmodels.stats.multitest import multipletests
def format_stats_wide(df_stats):
index = ['gene_symbol']
columns = ['stat_name', 'stimulant']
values = ['statistic', 'pval', 'pval_FDR_10']
stats = (df_stats
.pivot_table(index=index, columns=columns, values=values)
.pipe(ops.utils.flatten_cols))
counts = (df_stats
.pivot_table(index=index, columns='stimulant', values='count')
.rename(columns=lambda x: 'cells_' + x))
return pd.concat([stats, counts], axis=1)
def distribution_difference(df):
col = 'dapi_gfp_corr_early'
y_neg = (df
.query('gene_symbol == "non-targeting"')
[col]
)
return df.groupby('gene_symbol').apply(lambda x:
scipy.stats.wasserstein_distance(x[col], y_neg))
def add_est_timestamps(df_all):
s_per_frame = 24 * 60
sites_per_frame = 2 * 364
s_per_site = s_per_frame / sites_per_frame
starting_time = 3 * 60
cols = ['frame', 'well', 'site']
df_ws = df_all[cols].drop_duplicates().sort_values(cols)
est_timestamps = [(starting_time + i*s_per_site) / 3600
for i in range(len(df_ws))]
df_ws['timestamp'] = est_timestamps
return df_all.join(df_ws.set_index(cols), on=cols)
def add_dapi_diff(df_all):
index = ['well', 'site', 'cell_ph']
dapi_diff = (df_all
.pivot_table(index=index, columns='frame',
values='dapi_max')
.pipe(lambda x: x/x.mean())
.pipe(lambda x: x.max(axis=1) - x.min(axis=1))
.rename('dapi_diff')
)
return df_all.join(dapi_diff, on=index)
def add_spline_diff(df, s=25):
T_neg, Y_neg = (df
.query('gene_symbol == "non-targeting"')
.groupby('timestamp')
['dapi_gfp_corr'].mean()
.reset_index().values.T
)
ix = np.argsort(T_neg)
spl = UnivariateSpline(T_neg[ix], Y_neg[ix], s=s)
return (df
.assign(splined=lambda x: spl(df['timestamp']))
.assign(spline_diff=lambda x: x.eval('dapi_gfp_corr - splined'))
)
def get_stats(df, col='spline_diff'):
df_diff = (df
.groupby(['gene_symbol', 'cell'])
[col].mean()
.sort_values(ascending=False)
.reset_index())
negative_vals = (df_diff
.query('gene_symbol == "non-targeting"')
[col]
)
test = lambda x: scipy.stats.ttest_ind(x, negative_vals).pvalue
stats = (df_diff.groupby('gene_symbol')
[col]
.pipe(ops.utils.groupby_reduce_concat, 'mean', 'count',
pval=lambda x: x.apply(test))
.assign(pval_FDR_10=lambda x:
multipletests(x['pval'], 0.1)[1]))
return stats
# track nuclei nearest neighbor
def initialize_graph(df):
arr_df = [x for _, x in df.groupby('frame')]
nodes = df[['frame', 'label']].values
nodes = [tuple(x) for x in nodes]
G = nx.DiGraph()
G.add_nodes_from(nodes)
edges = []
for df1, df2 in zip(arr_df, arr_df[1:]):
edges = get_edges(df1, df2)
G.add_weighted_edges_from(edges)
return G
def get_edges(df1, df2):
neighboring_points = 3
get_label = lambda x: tuple(int(y) for y in x[[2, 3]])
x1 = df1[['i', 'j', 'frame', 'label']].values
x2 = df2[['i', 'j', 'frame', 'label']].values
kdt = scipy.spatial.kdtree.KDTree(df1[['i', 'j']])
points = df2[['i', 'j']]
result = kdt.query(points, neighboring_points)
edges = []
for i2, (ds, ns) in enumerate(zip(*result)):
end_node = get_label(x2[i2])
for d, i1 in zip(ds, ns):
start_node = get_label(x1[i1])
w = d
edges.append((start_node, end_node, w))
return edges
def displacement(x):
d = np.sqrt(np.diff(x['x'])**2 + np.diff(x['y'])**2)
return d
def analyze_graph(G, cutoff=100):
"""Trace a path forward from each nucleus in the starting frame. Only keep
the paths that reach the final frame.
"""
start_nodes = [n for n in G.nodes if n[0] == 0]
max_frame = max([frame for frame, _ in G.nodes])
cost, path = nx.multi_source_dijkstra(G, start_nodes, cutoff=cutoff)
cost = {k:v for k,v in cost.items() if k[0] == max_frame}
path = {k:v for k,v in path.items() if k[0] == max_frame}
return cost, path
def filter_paths(cost, path, threshold=35):
"""Remove intersecting paths.
returns list of one [(frame, label)] per trajectory
"""
# remove intersecting paths (node in more than one path)
node_count = Counter(sum(path.values(), []))
bad = set(k for k,v in node_count.items() if v > 1)
print('bad', len(bad), len(node_count))
# remove paths with cost over threshold
too_costly = [k for k,v in cost.items() if v > threshold]
bad = bad | set(too_costly)
relabel = [v for v in path.values() if not (set(v) & bad)]
assert(len(relabel) > 0)
return relabel
def relabel_nuclei(nuclei, relabel):
nuclei_ = nuclei.copy()
max_label = nuclei.max() + 1
for i, nodes in enumerate(zip(*relabel)):
labels = [n[1] for n in nodes]
table = np.zeros(max_label).astype(int)
table[labels] = range(len(labels))
nuclei_[i] = table[nuclei_[i]]
return nuclei_
# track nuclei trackmate
def call_TrackMate_centroids(input_path, output_path='trackmate_output.csv', fiji_path=None, threads=1, tracker_settings=dict()):
'''warnings: - `threads` is probably not actually setting the max threads for fiji.
- to allow multiple instances of fiji to run concurrently (e.g., launched from snakemake pipeline), likely have
to set `allowMultiple` parameter in Fiji.app/Contents/Info.plist to true.
`CUTOFF_PERCENTILE` parameter in tracker_settings changes the alternative cost to gap closing/merging/splitting. Higher values ->
more gap closures/merges/splits.
'''
import subprocess, json
if fiji_path is None:
import sys
if sys.platform == "darwin":
fiji_path = '/Applications/Fiji.app/Contents/MacOS/ImageJ-macosx'
elif sys.platform == "linux":
fiji_path = '~/Fiji.app/ImageJ-linux64'
else:
raise ValueError("Currently only OS X and linux systems can infer Fiji install location.")
tracker_defaults = {"LINKING_MAX_DISTANCE":60.,"GAP_CLOSING_MAX_DISTANCE":60.,
"ALLOW_TRACK_SPLITTING":True,"SPLITTING_MAX_DISTANCE":60.,
"ALLOW_TRACK_MERGING":True,"MERGING_MAX_DISTANCE":60.,
"MAX_FRAME_GAP":2,"CUTOFF_PERCENTILE":0.90}
for key, val in tracker_defaults.items():
_ = tracker_settings.setdefault(key,val)
trackmate_call = ('''{fiji_path} --ij2 --headless --console --run {ops_path}/external/TrackMate/track_centroids.py'''
.format(fiji_path=fiji_path,ops_path=ops.__path__[0]))
variables = ('''"input_path='{input_path}',output_path='{output_path}',threads={threads},tracker_settings='{tracker_settings}'"'''
.format(input_path=input_path,output_path=output_path,
threads=int(threads),tracker_settings=json.dumps(tracker_settings)))
output = subprocess.check_output(' '.join([trackmate_call,variables]), shell=True)
print(output.decode("utf-8"))
def format_trackmate(df):
import ast
df = (pd.concat([df,
pd.DataFrame(df['parent_ids'].apply(lambda x: ast.literal_eval(x)).tolist(),
index = df.index,columns=['parent_id_0','parent_id_1'])
],axis=1)
.fillna(value=-1)
.drop(columns=['parent_ids'])
.assign(relabel=-1,parent_cell_0=-1,parent_cell_1=-1)
.astype(int)
.set_index('id')
)
lookup = np.zeros((df.index.max()+2,3),dtype=int)
lookup[df.index] = (df
[['cell','parent_id_0','parent_id_1']]
.values
)
lookup[-1] = np.array([-1,-1,-1])
set_cols = ['relabel','parent_cell_0','parent_cell_1']
current = 1
arr_frames = []
for frame,df_frame in df.groupby('frame'):
df_frame = df_frame.copy()
if frame==0:
arr_frames.append(df_frame.assign(relabel = list(range(current,current+df_frame.pipe(len))),
parent_cell_0 = -1,
parent_cell_1 = -1))
current += df_frame.pipe(len)
continue
# unique child from single parent
idx_propagate = ((df_frame.duplicated(['parent_id_0','parent_id_1'],keep=False)==False)
&
((df_frame[['parent_id_0','parent_id_1']]==-1).sum(axis=1)==1)
).values
lookup[df_frame[idx_propagate].index.values] = df_frame.loc[idx_propagate,set_cols] = lookup[df_frame.loc[idx_propagate,'parent_id_0'].values]
# split, merge, or new
idx_new = ((df_frame.duplicated(['parent_id_0','parent_id_1'],keep=False))
|
((df_frame[['parent_id_0','parent_id_1']]==-1).sum(axis=1)!=1)
).values
lookup[df_frame[idx_new].index.values] = df_frame.loc[idx_new,set_cols] = np.array([list(range(current,current+idx_new.sum())),
lookup[df_frame.loc[idx_new,'parent_id_0'].values,0],
lookup[df_frame.loc[idx_new,'parent_id_1'].values,0]
]).T
current += idx_new.sum()
arr_frames.append(df_frame)
return pd.concat(arr_frames).reset_index()
# recover parent relationships
## during some iterations of trackmate, saving of parent cell identities was unintentionally
## commented out. these functions infer these relationships. For a single tile, correctly assigned
## same parent-child relationships as trackmate for >99.8% of cells. Well-constrained problem.
def recover_parents(df_tracked,threshold=60, cell='cell', ij=('i','j'), keep_cols=['well','tile','track_id','cell']):
# to be run on a table from a single tile
# get junction cells
df_pre_junction = (df_tracked
.groupby(['track_id',cell],group_keys=False)
.apply(lambda x: x.nlargest(1,'frame'))
)
df_post_junction = (df_tracked
.groupby(['track_id',cell],group_keys=False)
.apply(lambda x: x.nsmallest(1,'frame'))
)
arr = []
# assign frame 0 cells or un-tracked cells with no parents
arr.append(df_post_junction
.query('frame==0 | track_id==-1')
[keep_cols]
.assign(parent_cell_0=-1,parent_cell_1=-1)
)
# clean up tables
last_frame = int(df_tracked['frame'].nlargest(1))
df_pre_junction = df_pre_junction.query('frame!=@last_frame & track_id!=-1')
df_post_junction = df_post_junction.query('frame!=0 & track_id!=-1')
# categorize frames to avoid issues with no-cell junction frames
df_pre_junction.loc[:,'frame'] = pd.Categorical(df_pre_junction['frame'],
categories=np.arange(0,last_frame),
ordered=True)
df_post_junction.loc[:,'frame'] = pd.Categorical(df_post_junction['frame'],
categories=np.arange(1,last_frame+1),
ordered=True)
for (frame_pre,df_pre),(frame_post,df_post) in zip(df_pre_junction.groupby('frame'),
df_post_junction.groupby('frame')):
if df_post.pipe(len)==0:
continue
elif df_pre.pipe(len)==0:
arr.append(df_post[keep_cols].assign(parent_cell_0=-1,parent_cell_1=-1))
else:
arr.extend(junction_parent_assignment(pd.concat([df_pre,df_post]),
frame_0=frame_pre,
threshold=threshold,
ij=ij,
cell=cell,
keep_cols=keep_cols
)
)
return pd.concat(arr,ignore_index=True)
def junction_parent_assignment(df_junction, frame_0, threshold, ij, cell, keep_cols):
arr = []
i,j = ij
for track,df_track_junction in df_junction.groupby('track_id'):
if (df_track_junction['frame'].nunique()==1):
if df_track_junction.iloc[0]['frame']==(frame_0+1):
# only post-junction cells -> parents = -1
arr.append(df_track_junction[keep_cols].assign(parent_cell_0=-1,parent_cell_1=-1))
elif df_track_junction.iloc[0]['frame']==frame_0:
# only pre-junction cells -> ends of tracks, don't have to assign
continue
else:
before,after = (g[[cell,i,j]].values
for _,g
in df_track_junction.groupby('frame')
)
distances = cdist(after[:,1:],before[:,1:])
edges = distances<threshold
edges = resolve_conflicts(edges,distances, conflict_type='extra')
edges = resolve_conflicts(edges,distances,conflict_type='tangle')
parents = tuple(before[edge,0]
if edge.sum()>0 else np.array([-1,-1])
for edge in edges)
if len(parents) != edges.shape[0]:
raise ValueError('Length of parents tuple does not match number of post-junction cells')
if max([len(p) for p in parents])>2:
raise ValueError(f'''Conflict resolution error; too many parents selected for at least one cell
for track {track} in frame {frame_0}
''')
parents = np.array([np.concatenate([p,np.array([-1])])
if len(p)==1
else p
for p in parents
]
)
arr.append(df_track_junction.query('frame==@frame_0+1')
[keep_cols]
.assign(parent_cell_0=parents[:,0],parent_cell_1=parents[:,1])
)
return arr
def resolve_conflicts(edges,distances,conflict_type='tangle'):
if conflict_type=='tangle':
# any cell with more than one edge is a potential conflict
edge_threshold = 1
conflict_threshold = 1
elif conflict_type=='extra':
# any cell with more than 2 edges is a potential conflict
edge_threshold = 2
conflict_threshold = 0
def evaluate_conflicts(edges,edge_threshold):
# check for conflicting edges
# conflicts matrix for `tangle`: 1 is an edge, >1 is a conflict edge
# for `extra`: >0 is a conflict edge: more than 2 edges to a single cell
conflicts = np.zeros(edges.shape)
conflicts += (edges.sum(axis=0)>edge_threshold)
conflicts += (edges.sum(axis=1)>edge_threshold)[:,None]
conflicts[~edges] = 0
return conflicts
conflicts = evaluate_conflicts(edges,edge_threshold)
while (conflicts>conflict_threshold).sum()>0:
# remove longest edge
edges[distances==distances[conflicts>conflict_threshold].max()] = False
# re-evaluate conflicts
conflicts = evaluate_conflicts(edges,edge_threshold)
return edges
# plot traces
def plot_traces_gene_stim(df, df_neg, gene):
import ops.figures.plotting
fig, axs = plt.subplots(nrows=4, ncols=3, figsize=(12, 12),
sharex=True, sharey=True)
for stim, df_1 in df.groupby('stimulant'):
if stim == 'TNFa':
axs_ = axs[:2]
color = ops.figures.plotting.ORANGE
else:
axs_ = axs[2:]
color = ops.figures.plotting.BLUE
x_neg, y_neg = (df_neg
.query('stimulant == @stim')
.groupby(['frame'])
[['timestamp', 'dapi_gfp_corr']].mean()
.values.T)
for ax, (sg, df_2) in zip(axs_.flat[:],
df_1.groupby('sgRNA_name')):
plot_traces(df_2, ax, sg, color)
ax.plot(x_neg, y_neg, c='black')
return fig
def plot_traces(df, ax, sgRNA_label, color):
| |
<gh_stars>0
#!/usr/bin/env python
# Programmer: <NAME>, <NAME>
# General-purpose Python library imports
import json
import os
import re
import socket
import subprocess
import sys
import tempfile
import time
import unittest
# Third party libraries
import boto.ec2
from flexmock import flexmock
import SOAPpy
# AppScale import, the library that we're testing here
lib = os.path.dirname(__file__) + os.sep + ".." + os.sep + "lib"
sys.path.append(lib)
from agents.euca_agent import EucalyptusAgent
from agents.gce_agent import CredentialTypes
from agents.gce_agent import GCEAgent
from appcontroller_client import AppControllerClient
from appscale_logger import AppScaleLogger
from appscale_tools import AppScaleTools
from custom_exceptions import AppScaleException
from custom_exceptions import BadConfigurationException
from custom_exceptions import ShellException
from local_state import APPSCALE_VERSION
from local_state import LocalState
from node_layout import NodeLayout
from remote_helper import RemoteHelper
class TestRemoteHelper(unittest.TestCase):
def setUp(self):
# mock out all logging, since it clutters our output
flexmock(AppScaleLogger)
AppScaleLogger.should_receive('log').and_return()
# mock out all sleeps, as they aren't necessary for unit testing
flexmock(time)
time.should_receive('sleep').and_return()
# set up some fake options so that we don't have to generate them via
# ParseArgs
self.options = flexmock(infrastructure='ec2', group='boogroup',
machine='ami-ABCDEFG', instance_type='m1.large', keyname='bookey',
table='cassandra', verbose=False, test=False, use_spot_instances=False,
zone='my-zone-1b', static_ip=None)
self.my_id = "12345"
self.node_layout = NodeLayout(self.options)
# set up phony AWS credentials for each test
# ones that test not having them present can
# remove them
for credential in EucalyptusAgent.REQUIRED_EC2_CREDENTIALS:
os.environ[credential] = "baz"
os.environ['EC2_URL'] = "http://boo"
# mock out calls to EC2
# begin by assuming that our ssh keypair doesn't exist, and thus that we
# need to create it
key_contents = "key contents here"
fake_key = flexmock(name="fake_key", material=key_contents)
fake_key.should_receive('save').with_args(os.environ['HOME']+'/.appscale').and_return(None)
fake_ec2 = flexmock(name="fake_ec2")
fake_ec2.should_receive('get_key_pair').with_args('bookey') \
.and_return(None)
fake_ec2.should_receive('create_key_pair').with_args('bookey') \
.and_return(fake_key)
# mock out writing the secret key
builtins = flexmock(sys.modules['__builtin__'])
builtins.should_call('open') # set the fall-through
secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret"
fake_secret = flexmock(name="fake_secret")
fake_secret.should_receive('write').and_return()
builtins.should_receive('open').with_args(secret_key_location, 'w') \
.and_return(fake_secret)
# also, mock out the keypair writing and chmod'ing
ssh_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.key"
fake_file = flexmock(name="fake_file")
fake_file.should_receive('write').with_args(key_contents).and_return()
builtins.should_receive('open').with_args(ssh_key_location, 'w') \
.and_return(fake_file)
flexmock(os)
os.should_receive('chmod').with_args(ssh_key_location, 0600).and_return()
# next, assume there are no security groups up at first, but then it gets
# created.
udp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='udp')
tcp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='tcp')
icmp_rule = flexmock(from_port=-1, to_port=-1, ip_protocol='icmp')
group = flexmock(name='boogroup', rules=[tcp_rule, udp_rule, icmp_rule])
fake_ec2.should_receive('get_all_security_groups').with_args().and_return([])
fake_ec2.should_receive('get_all_security_groups').with_args('boogroup').and_return([group])
# and then assume we can create and open our security group fine
fake_ec2.should_receive('create_security_group').with_args('boogroup',
'AppScale security group').and_return()
fake_ec2.should_receive('authorize_security_group').and_return()
# next, add in mocks for run_instances
# the first time around, let's say that no machines are running
# the second time around, let's say that our machine is pending
# and that it's up the third time around
fake_pending_instance = flexmock(state='pending')
fake_pending_reservation = flexmock(instances=fake_pending_instance)
fake_running_instance = flexmock(state='running', key_name='bookey',
id='i-12345678', public_dns_name='public1', private_dns_name='private1')
fake_running_reservation = flexmock(instances=fake_running_instance)
fake_ec2.should_receive('get_all_instances').and_return([]) \
.and_return([]) \
.and_return([fake_pending_reservation]) \
.and_return([fake_running_reservation])
# next, assume that our run_instances command succeeds
fake_ec2.should_receive('run_instances').and_return()
# finally, inject our mocked EC2
flexmock(boto.ec2)
boto.ec2.should_receive('connect_to_region').and_return(fake_ec2)
# assume that ssh comes up on the third attempt
fake_socket = flexmock(name='fake_socket')
fake_socket.should_receive('connect').with_args(('public1',
RemoteHelper.SSH_PORT)).and_raise(Exception).and_raise(Exception) \
.and_return(None)
flexmock(socket)
socket.should_receive('socket').and_return(fake_socket)
# throw some default mocks together for when invoking via shell succeeds
# and when it fails
self.fake_temp_file = flexmock(name='fake_temp_file')
self.fake_temp_file.should_receive('seek').with_args(0).and_return()
self.fake_temp_file.should_receive('read').and_return('boo out')
self.fake_temp_file.should_receive('close').and_return()
flexmock(tempfile)
tempfile.should_receive('NamedTemporaryFile')\
.and_return(self.fake_temp_file)
self.success = flexmock(name='success', returncode=0)
self.success.should_receive('wait').and_return(0)
self.failed = flexmock(name='success', returncode=1)
self.failed.should_receive('wait').and_return(1)
# assume that root login isn't already enabled
local_state = flexmock(LocalState)
local_state.should_receive('shell') \
.with_args(re.compile('^ssh .*root'), False, 1, stdin='ls') \
.and_return(RemoteHelper.LOGIN_AS_UBUNTU_USER)
# and assume that we can ssh in as ubuntu to enable root login
local_state = flexmock(LocalState)
local_state.should_receive('shell')\
.with_args(re.compile('^ssh .*ubuntu'),False,5)\
.and_return()
# also assume that we can scp over our ssh keys
local_state.should_receive('shell')\
.with_args(re.compile('scp .*/root/.ssh/id_'),False,5)\
.and_return()
local_state.should_receive('shell')\
.with_args(re.compile('scp .*/root/.appscale/bookey.key'),False,5)\
.and_return()
def test_start_head_node_in_cloud_but_ami_not_appscale(self):
local_state = flexmock(LocalState)
# Mock out our attempts to enable the root login.
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5,
stdin='sudo touch /root/.ssh/authorized_keys').and_return()
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5,
stdin='sudo chmod 600 /root/.ssh/authorized_keys').and_return()
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5, stdin='mktemp').and_return()
local_state.should_receive('shell') \
.with_args(re.compile('^ssh'), False, 5,
stdin='ls') \
.and_return(RemoteHelper.LOGIN_AS_UBUNTU_USER)
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5,
stdin=re.compile(
'sudo sort -u ~/.ssh/authorized_keys /root/.ssh/authorized_keys -o '
)
).and_return()
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5,
stdin=re.compile(
'sudo sed -n '
'\'\/\.\*Please login\/d; w\/root\/\.ssh\/authorized_keys\' '
)
).and_return()
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5, stdin=re.compile('rm -f ')
).and_return()
local_state.should_receive('shell').with_args(
re.compile('^ssh'), False, 5,
stdin=re.compile('ls {}'.format(RemoteHelper.CONFIG_DIR))
).and_raise(ShellException).ordered()
# Check that the cleanup routine is called on error.
flexmock(AppScaleTools).should_receive('terminate_instances')\
.and_return().ordered()
self.assertRaises(AppScaleException, RemoteHelper.start_head_node,
self.options, self.my_id, self.node_layout)
def test_start_head_node_in_cloud_but_ami_wrong_version(self):
local_state = flexmock(LocalState)
# mock out our attempts to enable the root login.
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5,
stdin='sudo touch /root/.ssh/authorized_keys').and_return()
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5,
stdin='sudo chmod 600 /root/.ssh/authorized_keys').and_return()
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5, stdin='mktemp').and_return()
local_state.should_receive('shell') \
.with_args(re.compile('^ssh'), False, 5,
stdin='ls') \
.and_return(RemoteHelper.LOGIN_AS_UBUNTU_USER)
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5,
stdin=re.compile(
'sudo sort -u ~/.ssh/authorized_keys /root/.ssh/authorized_keys -o '
)
).and_return()
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5,
stdin=re.compile(
'sudo sed -n '
'\'\/\.\*Please login\/d; w\/root\/\.ssh\/authorized_keys\' '
)
).and_return()
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5, stdin=re.compile('rm -f ')
).and_return()
# Assume configuration directory exists.
local_state.should_receive('shell').with_args(
re.compile('^ssh'), False, 5,
stdin=re.compile('ls {}'.format(RemoteHelper.CONFIG_DIR))
).and_return()
# Assume the version file does not exist.
version_dir = '{}/{}'.format(RemoteHelper.CONFIG_DIR, APPSCALE_VERSION)
local_state.should_receive('shell').with_args(re.compile('^ssh'), False,
5, stdin=re.compile('ls {}'.format(version_dir))).\
and_raise(ShellException)
# check that the cleanup routine is called on error
flexmock(AppScaleTools).should_receive('terminate_instances')\
.and_return()
self.assertRaises(AppScaleException, RemoteHelper.start_head_node,
self.options, self.my_id, self.node_layout)
def test_start_head_node_in_cloud_but_using_unsupported_database(self):
local_state = flexmock(LocalState)
# Mock out our attempts to enable the root login.
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5,
stdin='sudo touch /root/.ssh/authorized_keys').and_return()
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5,
stdin='sudo chmod 600 /root/.ssh/authorized_keys').and_return()
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5, stdin='mktemp').and_return()
local_state.should_receive('shell') \
.with_args(re.compile('^ssh'), False, 5,
stdin='ls') \
.and_return(RemoteHelper.LOGIN_AS_UBUNTU_USER)
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5,
stdin=re.compile(
'sudo sort -u ~/.ssh/authorized_keys /root/.ssh/authorized_keys -o '
)
).and_return()
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5,
stdin=re.compile(
'sudo sed -n '
'\'\/\.\*Please login\/d; w\/root\/\.ssh\/authorized_keys\' '
)
).and_return()
local_state.should_receive('shell').with_args(
re.compile('ssh'), False, 5, stdin=re.compile('rm -f ')
).and_return()
# Assume the configuration directory exists.
local_state.should_receive('shell').with_args(re.compile('^ssh'), False,
5, stdin=re.compile('ls {}'.format(RemoteHelper.CONFIG_DIR))).\
and_return().ordered()
# Assume the version directory exists.
version_dir = '{}/{}'.format(RemoteHelper.CONFIG_DIR, APPSCALE_VERSION)
local_state.should_receive('shell').with_args(re.compile('^ssh'), False,
5, stdin=re.compile('ls {}'.format(version_dir))).\
and_return().ordered()
# Assume the given database is not supported.
db_file = '{}/{}/{}'.\
format(RemoteHelper.CONFIG_DIR, APPSCALE_VERSION, 'cassandra')
local_state.should_receive('shell').with_args(
re.compile('^ssh'), False, 5,
stdin=re.compile('ls {}'.format(db_file))
).and_raise(ShellException).ordered()
# check that the cleanup routine is called on error
flexmock(AppScaleTools).should_receive('terminate_instances')\
.and_return().ordered()
self.assertRaises(AppScaleException, RemoteHelper.start_head_node,
self.options, self.my_id, self.node_layout)
def test_rsync_files_from_dir_that_doesnt_exist(self):
# if the user specifies that we should copy from a directory that doesn't
# exist, we should throw up and die
flexmock(os.path)
os.path.should_receive('exists').with_args('/tmp/booscale-local').\
and_return(False)
self.assertRaises(BadConfigurationException, RemoteHelper.rsync_files,
'public1', 'booscale', '/tmp/booscale-local', False)
def test_rsync_files_from_dir_that_does_exist(self):
# if the user specifies that we should copy from a directory that does
# exist, and has all the right directories in it, we should succeed
flexmock(os.path)
os.path.should_receive('exists').with_args('/tmp/booscale-local').\
and_return(True)
# assume the rsyncs succeed
local_state = flexmock(LocalState)
local_state.should_receive('shell')\
.with_args(re.compile('^rsync'),False)\
.and_return().ordered()
RemoteHelper.rsync_files('public1', 'booscale', '/tmp/booscale-local',
False)
def test_copy_deployment_credentials_in_cloud(self):
options = flexmock(
keyname='key1',
infrastructure='ec2',
verbose=True,
)
local_state = flexmock(LocalState)
remote_helper = flexmock(RemoteHelper)
local_state.should_receive('get_secret_key_location').and_return()
local_state.should_receive('get_key_path_from_name').and_return()
local_state.should_receive('get_certificate_location').and_return()
local_state.should_receive('get_private_key_location').and_return()
remote_helper.should_receive('scp').and_return()
local_state.should_receive('generate_ssl_cert').and_return()
popen_object = flexmock(communicate=lambda: ['hash_id'])
flexmock(subprocess).should_receive('Popen').and_return(popen_object)
remote_helper.should_receive('ssh').and_return()
flexmock(AppScaleLogger).should_receive('log').and_return()
RemoteHelper.copy_deployment_credentials('public1', options)
flexmock(GCEAgent).should_receive('get_secrets_type').\
and_return(CredentialTypes.OAUTH)
flexmock(os.path).should_receive('exists').and_return(True)
options = flexmock(
keyname='key1',
infrastructure='gce',
verbose=True,
)
local_state.should_receive('get_oauth2_storage_location').and_return()
RemoteHelper.copy_deployment_credentials('public1', options)
def test_start_remote_appcontroller(self):
# mock out removing the old json file
local_state = flexmock(LocalState)
local_state.should_receive('shell')\
.with_args(re.compile('^ssh'),False,5,stdin=re.compile('rm -rf'))\
.and_return()
# assume we started monit on public1 fine
local_state.should_receive('shell')\
.with_args(re.compile('^ssh'), False, 5, stdin=re.compile('monit'))\
.and_return()
# also assume that we scp'ed over the god config file fine
local_state.should_receive('shell')\
.with_args(re.compile('scp .*controller-17443.cfg*'),False,5)\
.and_return()
# and assume we started the AppController on public1 fine
local_state.should_receive('shell')\
.with_args(re.compile('^ssh'), False, 5,
stdin=re.compile('^monit start -g controller'))\
.and_return()
# finally, assume the appcontroller comes up after a few tries
# assume that ssh comes up on the third attempt
fake_socket = flexmock(name='fake_socket')
fake_socket.should_receive('connect').with_args(('public1',
AppControllerClient.PORT)).and_raise(Exception) \
.and_raise(Exception).and_return(None)
socket.should_receive('socket').and_return(fake_socket)
# Mock out additional remote calls.
local_state.should_receive('shell').with_args('ssh -i /root/.appscale/bookey.key -o LogLevel=quiet -o NumberOfPasswordPrompts=0 -o StrictHostkeyChecking=no -o UserKnownHostsFile=/dev/null root@public1 ', False, 5, stdin='cp /root/appscale/AppController/scripts/appcontroller /etc/init.d/').and_return()
local_state.should_receive('shell').with_args('ssh -i /root/.appscale/bookey.key -o LogLevel=quiet -o NumberOfPasswordPrompts=0 -o StrictHostkeyChecking=no -o UserKnownHostsFile=/dev/null root@public1 ', False, 5, stdin='chmod +x /etc/init.d/appcontroller').and_return()
local_state.should_receive('shell').with_args('ssh -i /root/.appscale/boobazblargfoo.key -o LogLevel=quiet -o NumberOfPasswordPrompts=0 -o StrictHostkeyChecking=no -o UserKnownHostsFile=/dev/null root@elastic-ip ', False, 5, stdin='chmod +x /etc/init.d/appcontroller').and_return()
RemoteHelper.start_remote_appcontroller('public1', 'bookey', False)
def test_copy_local_metadata(self):
# Assume the locations files were copied successfully.
local_state = flexmock(LocalState)
locations_yaml = '{}/locations-bookey.yaml'.\
format(RemoteHelper.CONFIG_DIR)
local_state.should_receive('shell').with_args(
re.compile('^scp .*{}'.format(locations_yaml)), False, 5)
locations_json = '{}/locations-bookey.json'.\
format(RemoteHelper.CONFIG_DIR)
local_state.should_receive('shell').with_args(
re.compile('^scp .*{}'.format(locations_json)), False, 5)
local_state.should_receive('shell').with_args(
re.compile('^scp .*/root/.appscale/locations-bookey.json'), False, 5)
# Assume the secret file was copied successfully.
local_state.should_receive('shell').with_args(
re.compile('^scp .*bookey.secret'), False, 5)
RemoteHelper.copy_local_metadata('public1', 'bookey', False)
def test_create_user_accounts(self):
# mock out reading the secret key
builtins = flexmock(sys.modules['__builtin__'])
builtins.should_call('open') # set the fall-through
secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret"
fake_secret = flexmock(name="fake_secret")
fake_secret.should_receive('read').and_return('the secret')
builtins.should_receive('open').with_args(secret_key_location, 'r') \
.and_return(fake_secret)
# mock out reading the locations.json file, and | |
import pandas
import numpy as np
from cornellGrading import cornellQualtrics
import os
def genReadingAssignments(infile, outfile):
# generate reading assignments
# infile must be xlsx with two sheets (Readers & Canddiates)
# grab all input data
if isinstance(infile, str):
tmp = pandas.ExcelFile(infile, engine="openpyxl")
readers = tmp.parse("Readers")
candidates = tmp.parse("Candidates")
tmp.close()
readers = readers["Reader Names"].values
candidates = candidates["Candidate Names"].values
else:
readers = infile[0]
candidates = infile[1]
# Each person needs to be read by 2 readers
nperreader = int(np.round(len(candidates) * 2 / len(readers)))
# shuffle candidates and split by readers
clist = np.hstack((candidates.copy(), candidates.copy()))
np.random.shuffle(clist)
out = {}
for reader in readers:
tmp = clist[:nperreader]
while np.unique(tmp).size != tmp.size:
np.random.shuffle(clist)
tmp = clist[:nperreader]
out[reader] = tmp
clist = clist[nperreader:]
# check for unassigned
if len(clist) > 0:
for c in clist:
r = np.random.choice(readers, size=1)[0]
while c in out[r]:
r = np.random.choice(readers, size=1)[0]
out[r] = np.hstack((out[r], c))
# final consistency check
asslist = []
for key, val in out.items():
assert np.unique(val).size == val.size, "{} has non-unique list.".format(key)
asslist = np.hstack((asslist, val))
assert np.all(
np.unique(asslist) == np.sort(candidates)
), "Not all candidates assigned."
for c in candidates:
assert np.where(asslist == c)[0].size == 2, "{} not assigned twice.".format(c)
# write assignemnts out to disk
outdf = pandas.DataFrame()
for key, val in out.items():
outdf = pandas.concat([outdf, pandas.DataFrame({key: val})], axis=1)
ew = pandas.ExcelWriter(outfile, options={"encoding": "utf-8"})
outdf.to_excel(ew, sheet_name="Assignments", index=False)
ew.save()
ew.close()
def genRubricSurvey(surveyname, candidates, rubrics, scoreOptions, shareWith=None):
"""
surveyname (str)
candidates (iterable)
rubrics (iterable)
scoreOptions (iterable)
shareWith (str) optional
"""
# connect and craete survey
c = cornellQualtrics()
surveyId = c.createSurvey(surveyname)
# candidate dropdown
desc = "Select Candidate Name"
choices = {}
for j, choice in enumerate(candidates):
choices[str(j + 1)] = {"Display": choice}
choiceOrder = list(range(1, len(choices) + 1))
questionDef = {
"QuestionText": desc,
"DefaultChoices": False,
"DataExportTag": "Q1",
"QuestionType": "MC",
"Selector": "DL",
"Configuration": {"QuestionDescriptionOption": "UseText"},
"QuestionDescription": desc,
"Choices": choices,
"ChoiceOrder": choiceOrder,
"Validation": {
"Settings": {
"ForceResponse": "ON",
"ForceResponseType": "ON",
"Type": "None",
}
},
"Language": [],
"QuestionID": "QID1",
"QuestionText_Unsafe": desc,
}
qid1 = c.addSurveyQuestion(surveyId, questionDef)
questionDef = {
'QuestionText': surveyname,
'DefaultChoices': False,
'DataExportTag': 'Q0',
'QuestionType': 'DB',
'Selector': 'TB',
'Configuration': {'QuestionDescriptionOption': 'UseText'},
'QuestionDescription': surveyname,
'ChoiceOrder': [],
'Validation': {'Settings': {'Type': 'None'}},
'GradingData': [],
'Language': [],
'NextChoiceId': 4,
'NextAnswerId': 1,
'QuestionID': 'QID0',
'QuestionText_Unsafe': surveyname}
_ = c.addSurveyQuestion(surveyId, questionDef)
# rubric multiple choice
choices = {}
for j, choice in enumerate(scoreOptions):
choices[str(j + 1)] = {"Display": str(choice)}
choiceOrder = list(range(1, len(choices) + 1))
for j in range(1, len(rubrics) + 1):
desc = rubrics[j - 1]
questionDef = {
"QuestionText": desc,
"DataExportTag": "Q%d" % (j + 1),
"QuestionType": "MC",
"Selector": "SAVR",
"SubSelector": "TX",
"Configuration": {"QuestionDescriptionOption": "UseText"},
"QuestionDescription": desc,
"Choices": choices,
"ChoiceOrder": choiceOrder,
"Validation": {
"Settings": {
"ForceResponse": "ON",
"ForceResponseType": "ON",
"Type": "None",
}
},
"Language": [],
"QuestionID": "QID%d" % (j + 1),
"DataVisibility": {"Private": False, "Hidden": False},
"QuestionText_Unsafe": desc,
}
c.addSurveyQuestion(surveyId, questionDef)
questionDef = {
'QuestionText': 'Comments',
'DefaultChoices': False,
'DataExportTag': "Q%d" % (len(rubrics) + 2),
'QuestionType': 'TE',
'Selector': 'SL',
'Configuration': {'QuestionDescriptionOption': 'UseText'},
'QuestionDescription': 'Comments',
'Validation': {'Settings': {'ForceResponse': 'OFF',
'ForceResponseType': 'ON',
'Type': 'None'}},
'GradingData': [],
'Language': [],
'NextChoiceId': 4,
'NextAnswerId': 1,
'SearchSource': {'AllowFreeResponse': 'false'},
'QuestionID': "QID%d" % (len(rubrics) + 2),
'QuestionText_Unsafe': 'Comments'}
_ = c.addSurveyQuestion(surveyId, questionDef)
# generate quotas
quotaGroupName = "q1quotas"
quotaGroupId = c.addSurveyQuotaGroup(surveyId, quotaGroupName)
quotas = []
for j, s in enumerate(candidates):
quotaDef = {
"Name": "name{}quota".format(j + 1),
"Occurrences": 1,
"Logic": {
"0": {
"0": {
"LogicType": "Question",
"QuestionID": "QID1",
"QuestionIsInLoop": "no",
"ChoiceLocator": "q://QID1/SelectableChoice/{}".format(j + 1),
"Operator": "Selected",
"QuestionIDFromLocator": "QID1",
"LeftOperand": "q://QID1/SelectableChoice/{}".format(j + 1),
"Type": "Expression",
"Description": "",
},
"Type": "If",
},
"Type": "BooleanExpression",
},
"LogicType": "Simple",
"QuotaAction": "ForBranching",
"ActionInfo": {
"0": {
"0": {
"ActionType": "ForBranching",
"Type": "Expression",
"LogicType": "QuotaAction",
},
"Type": "If",
},
"Type": "BooleanExpression",
},
"QuotaRealm": "Survey",
"Count": 0,
}
quotas.append(c.addSurveyQuota(surveyId, quotaDef))
# and now we can update Q1 with the quotas
desc = "Select Candidate Name"
choices = {}
for j, choice in enumerate(candidates):
choices[str(j + 1)] = {
"Display": choice,
"DisplayLogic": {
"0": {
"0": {
"LogicType": "Quota",
"QuotaID": quotas[j],
"QuotaType": "Simple",
"Operator": "QuotaNotMet",
"LeftOperand": "qo://{}/QuotaNotMet".format(quotas[j]),
"QuotaName": "name{}quota".format(j + 1),
"Type": "Expression",
"Description": "",
},
"Type": "If",
},
"Type": "BooleanExpression",
"inPage": False,
},
}
choiceOrder = list(range(1, len(choices) + 1))
questionDef = {
"QuestionText": desc,
"DefaultChoices": False,
"DataExportTag": "Q1",
"QuestionType": "MC",
"Selector": "DL",
"Configuration": {"QuestionDescriptionOption": "UseText"},
"QuestionDescription": desc,
"Choices": choices,
"ChoiceOrder": choiceOrder,
"Validation": {
"Settings": {
"ForceResponse": "ON",
"ForceResponseType": "ON",
"Type": "None",
}
},
"Language": [],
"QuestionID": "QID1",
"QuestionText_Unsafe": desc,
}
c.updateSurveyQuestion(surveyId, qid1, questionDef)
if shareWith:
c.shareSurvey(surveyId, shareWith)
# publish & activate
c.publishSurvey(surveyId)
c.activateSurvey(surveyId)
link = "https://cornell.qualtrics.com/jfe/form/%s" % surveyId
return link
def genRankSurvey(readername, candidates, binsize, shareWith=None):
"""
readername (str)
candidates (iterable)
binsize (int)
shareWith (str) optional
"""
# connect and craete survey
c = cornellQualtrics()
surveyname = "Ranking Survey for {}".format(readername)
surveyId = c.createSurvey(surveyname)
desc = (
u"This survey is for: {0}.\n\n"
u"Rank students into the top 50%-ile bins. "
u"Put exactly {1} students in each bin. "
u"All uncategorized students will automatically "
u"be placed in the bottom 50%-ile. Ordering within a bin "
u"does not matter.".format(readername, binsize)
)
choices = {}
for j, choice in enumerate(candidates):
choices[str(j + 1)] = {"Display": choice}
choiceOrder = list(range(1, len(choices) + 1))
questionDef = {
"QuestionText": desc,
"DefaultChoices": False,
"DataExportTag": "Q1",
"QuestionID": "QID1",
"QuestionType": "PGR",
"Selector": "DragAndDrop",
"SubSelector": "Columns",
"Configuration": {
"QuestionDescriptionOption": "UseText",
"Stack": False,
"StackItemsInGroups": False,
},
"QuestionDescription": desc,
"Choices": choices,
"ChoiceOrder": choiceOrder,
"Validation": {
"Settings": {
"ForceResponse": "ON",
"Type": "GroupChoiceRange",
"MinChoices": "{}".format(binsize),
"MaxChoices": "{}".format(binsize),
}
},
"GradingData": [],
"Language": [],
"NextChoiceId": len(choices) + 1,
"NextAnswerId": 6,
"Groups": ["Top 10%", "Top 20%", "Top 30%", "Top 40%", "Top 50%"],
"NumberOfGroups": 5,
"QuestionText_Unsafe": desc,
}
c.addSurveyQuestion(surveyId, questionDef)
if shareWith:
c.shareSurvey(surveyId, shareWith)
c.publishSurvey(surveyId)
c.activateSurvey(surveyId)
link = "https://cornell.qualtrics.com/jfe/form/%s" % surveyId
return link
def getRankSurveyRes(assignments, outfile):
c = cornellQualtrics()
outdict = {}
for readername in assignments.columns:
surveyname = "Ranking Survey for {}".format(readername)
surveyId = c.getSurveyId(surveyname)
tmpdir = c.exportSurvey(surveyId)
tmpfile = os.path.join(tmpdir, surveyname + ".csv")
assert os.path.isfile(tmpfile), "Survey results not where expected."
res = pandas.read_csv(tmpfile, header=[0, 1, 2])
if len(res) == 0:
continue
allnames = np.array([])
for j in range(5):
gcolinds = np.array(
["Q1_{}_GROUP".format(j) in c for c in res.columns.get_level_values(0)]
)
gcols = res.columns.get_level_values(0)[gcolinds]
names = res[gcols].iloc[-1].values
names = names[names == names]
assert len(names) == 3
allnames = np.hstack((allnames, names))
for n in names:
if n in outdict:
outdict[n] += ((j + 1) * 10,)
else:
outdict[n] = ((j + 1) * 10,)
unranked = np.array(list(set(assignments[readername].values) - set(allnames)))
unranked = unranked[unranked != "nan"]
for n in unranked:
if n in outdict:
outdict[n] += (100,)
else:
outdict[n] = (100,)
# build output
outnames = []
outrank1 = []
outrank2 = []
for key, val in outdict.items():
outnames.append(key.split(", "))
outrank1.append(val[0])
if len(val) == 2:
outrank2.append(val[1])
else:
outrank2.append(np.nan)
outnames = np.array(outnames)
out = pandas.DataFrame(
{
"First Name": outnames[:, 0],
"Lat Name": outnames[:, 1],
"Rank 1": outrank1,
"Rank 2": outrank2,
}
)
ew = pandas.ExcelWriter(outfile, options={"encoding": "utf-8"})
out.to_excel(ew, sheet_name="Ranks", index=False)
ew.save()
ew.close()
def genRankDragSurvey(surveyname, candidates, shareWith=None):
"""
surveyname (str)
candidates (iterable)
shareWith (str) optional
"""
# connect and craete survey
c = cornellQualtrics()
surveyId = c.createSurvey(surveyname)
desc = "Drag Students into your preferred rank order. Note that the bottom half is undifferentiated."
choices = {}
for j, choice in enumerate(candidates):
choices[str(j + 1)] = {"Display": choice}
choiceOrder = list(range(1, len(choices) + 1))
questionDef = {
"QuestionText": desc,
"DefaultChoices": False,
"DataExportTag": "Q1",
"QuestionType": "RO",
"Selector": "DND",
"SubSelector": "TX",
"Configuration": {"QuestionDescriptionOption": "UseText"},
"QuestionDescription": desc,
"Choices": choices,
"ChoiceOrder": choiceOrder,
"Validation": {"Settings": {"ForceResponse": "OFF", "Type": "None"}},
"Language": [],
"QuestionID": "QID1",
"QuestionText_Unsafe": desc,
}
c.addSurveyQuestion(surveyId, questionDef)
if shareWith:
c.shareSurvey(surveyId, shareWith)
c.publishSurvey(surveyId)
c.activateSurvey(surveyId)
link = "https://cornell.qualtrics.com/jfe/form/%s" % surveyId
return link
def binRubricSurveyResults(surveyname, outfile):
c = cornellQualtrics()
surveyId = c.getSurveyId(surveyname)
tmpdir = c.exportSurvey(surveyId)
tmpfile = os.path.join(tmpdir, surveyname + ".csv")
assert os.path.isfile(tmpfile), "Survey results not where expected."
res = pandas.read_csv(tmpfile, header=[0, 1, 2])
namecol = res.columns.get_level_values(0)[
np.array(
["Select Candidate Name" in c for c in res.columns.get_level_values(1)]
)
]
names = np.array([n[0] for n in res[namecol].values])
# calculate total scores
quescolinds = np.array(
["Rubric" in c | |
ZCA(bias=0.1)
zca.fit(X)
self.norm = zca
elif self.normalization == '-1:1':
self.norm = lambda x: ((x - np.min(x)) / (np.max(x) - np.min(x)) * 2.0 - 1.0)
def preprocess(self, X):
'''Take an image in X, and transform it with local contrast normalization.
Parameters
----------
X : numpy.ndarray
image to perform local contrast normalization on
Returns
-------
img : numpy.ndarray
Local contrast normalized image
'''
res = None
try:
res = self.norm.transform(X)
except:
res = self.norm(X)
pass
return res
def features_for_layer(self, X, layer_num):
if layer_num in self.fns.keys():
fn = self.fns[layer_num]
else:
layer_output = lasagne.layers.get_output(
lasagne.layers.get_all_layers(
self.net.model)[layer_num],
self.net.x, deterministic=True)
fn = theano.function([self.net.x], [layer_output])
self.fns[layer_num] = fn
out = fn(lasagne.utils.floatX(X))
return out
def features(self, X):
'''Get siamese net features for the images in X.
Parameters
----------
X : numpy.ndarray
N x C x W x H tensor defining the N images of W x H.
For colorscale, C = 3, while for grayscale, C = 1.
Returns
-------
features : numpy.ndarray
N x M array of features
'''
return self.fn(X)
def predict(self, X):
'''Predict whether images contain the same face or not.
Parameters
----------
X : numpy.ndarray
2*N x C x W x H tensor defining the N sequence of image pairs W x H.
For colorscale, C = 3, while for grayscale, C = 1.
Returns
-------
predictions : numpy.ndarray
N x 1 vector of True/False predictions of whether the image
pairs contain the same face or not.
'''
features = self.fn(X)
Xs_L1 = np.abs(features[0][0::2] - features[0][1::2])
final = self.clf.predict(Xs_L1)
return final
def get_normalization(self):
'''Return the normalization type of the pre-trained network.
Returns
-------
normalization_type : string
'LCN', 'LCN-', '-1:1', 'ZCA'
'''
return self.result['params']['normalization']
def get_crop(self):
'''Return the crop type of the pre-trained network.'''
return self.result['params']['crop']
def get_resolution(self):
'''Return the resolution of the images required by the pre-trained network.
Returns
-------
(%d, %d) : tuple
Resolution of the image
'''
return self.result['params']['resolution']
def get_colorscale(self):
'''Return the colorscale of the images required by the pre-trained network
Returns
-------
is_grayscale : bool
True if grayscale, else, False for RGB color.
'''
return self.result['params']['b_convert_to_grayscale']
class ConvSiameseNet:
"""Builds an object used for training a siamese net
with different types of models and options.
Attributes
----------
batch_size : TYPE
Description
batch_slice : TYPE
Description
distance_fn : TYPE
Description
hyperparameter_margin : TYPE
Description
hyperparameter_threshold : TYPE
Description
index : TYPE
Description
input_channels : TYPE
Description
input_height : TYPE
Description
input_width : TYPE
Description
l_in : TYPE
Description
learning_rate : TYPE
Description
loss_fn : TYPE
Description
model : TYPE
Description
n_out : TYPE
Description
nonlinearity : TYPE
Description
srng : TYPE
Description
test_x : TYPE
Description
train_x : TYPE
Description
update : TYPE
Description
validation_x : TYPE
Description
weight_init : TYPE
Description
x : TYPE
Description
y : TYPE
Description
"""
def __init__(self,
input_channels,
input_width,
input_height,
n_out,
batch_size=None,
distance_fn='l1',
nonlinearity='scaled_tanh'):
"""Builds a ConvSiameseNet for training.
Parameters
----------
input_channels : TYPE
Description
input_width : TYPE
Description
input_height : TYPE
Description
n_out : TYPE
Description
batch_size : TYPE, optional
Description
distance_fn : str, optional
Description
nonlinearity : str, optional
Description
Raises
------
ValueError
Description
"""
self.input_channels = input_channels
self.input_width = input_width
self.input_height = input_height
self.n_out = n_out
self.batch_size = batch_size
self.l_in = lasagne.layers.InputLayer(
shape=(None, input_channels, input_width, input_height))
self.n_out = n_out
self.srng = theano.sandbox.rng_mrg.MRG_RandomStreams()
self.loss_fn = contrastive_loss
if distance_fn.lower() == 'cosine':
self.distance_fn = distance_cosine
elif distance_fn.lower() == 'l1':
self.distance_fn = distance_L1
elif distance_fn.lower() == 'l2':
self.distance_fn = distance_L2
else:
raise ValueError(
'Must specify distance as either "cosine", "l1", or "l2".')
self.x = T.tensor4('x')
self.y = T.ivector('y')
if nonlinearity == 'scaled_tanh':
self.nonlinearity = lasagne.nonlinearities.ScaledTanH(
scale_in=2. / 3, scale_out=1.7159)
elif nonlinearity == 'rectify':
self.nonlinearity = lasagne.nonlinearities.rectify
else:
raise ValueError(
'Must specify nonlinearity as either "scaled_tanh" or "rectify".')
self.weight_init = lasagne.init.Normal(std=0.05, mean=0.0)
def use_hani_model(self, dropout_pct=0.0, b_spatial=False):
"""Summary
Parameters
----------
dropout_pct : float, optional
Description
b_spatial : bool, optional
Description
Returns
-------
name : TYPE
Description
"""
self.model = self.get_hani_2014_net(
self.l_in, dropout_pct=dropout_pct, b_spatial=b_spatial)
def use_custom_model(self, b_spatial=False):
"""Summary
Parameters
----------
b_spatial : bool, optional
Description
Returns
-------
name : TYPE
Description
"""
self.model = self.get_custom_net(self.l_in, b_spatial=b_spatial)
def use_chopra_model(self, dropout_pct=0.0, b_spatial=False):
"""Summary
Parameters
----------
dropout_pct : float, optional
Description
b_spatial : bool, optional
Description
Returns
-------
name : TYPE
Description
"""
self.model = self.get_chopra_net(
self.l_in, dropout_pct=dropout_pct, b_spatial=b_spatial)
def use_deepid_model(self, b_spatial=False):
"""Summary
Parameters
----------
b_spatial : bool, optional
Description
Returns
-------
name : TYPE
Description
"""
self.model = self.get_deep_id_net(self.l_in, b_spatial=b_spatial)
def get_spatial_transform_net(self, input_layer):
"""Summary
Parameters
----------
input_layer : TYPE
Description
Returns
-------
name : TYPE
Description
"""
# http://lasagne.readthedocs.org/en/latest/modules/layers/special.html?highlight=trainable#lasagne.layers.TransformerLayer
# Localization network
# Spatial Transformer Networks <NAME>, <NAME>, <NAME>, <NAME> Submitted on 5 Jun 2015
# Here we set up the layer to initially do the identity transform,
# similarly to [R34]. Note that you will want to use a localization
# with linear output. If the output from the localization networks
# is [t1, t2, t3, t4, t5, t6] then t1 and t5 determines zoom, t2
# and t4 determines skewness, and t3 and t6 move the center
# position.
b = np.zeros((2, 3), dtype=theano.config.floatX)
b[0, 0] = 1
b[1, 1] = 1
b = b.flatten()
loc_l1 = lasagne.layers.MaxPool2DLayer(input_layer, pool_size=(2, 2))
loc_l2 = lasagne.layers.Conv2DLayer(
loc_l1,
num_filters=20,
filter_size=(5, 5),
W=self.weight_init
)
loc_l3 = lasagne.layers.MaxPool2DLayer(loc_l2, pool_size=(2, 2))
loc_l4 = lasagne.layers.Conv2DLayer(
loc_l3,
num_filters=20,
filter_size=(5, 5),
W=self.weight_init
)
loc_l5 = lasagne.layers.DenseLayer(
loc_l4,
num_units=50,
W=self.weight_init
)
loc_out = lasagne.layers.DenseLayer(
loc_l5,
num_units=6,
b=b,
W=self.weight_init,
nonlinearity=lasagne.nonlinearities.identity
)
# Transformer network
transformed_input_layer = lasagne.layers.TransformerLayer(
input_layer, loc_out, downsample_factor=2.0)
print('Transformed Input Shape: ',
transformed_input_layer.output_shape)
return transformed_input_layer
def get_chopra_net(self, input_layer, dropout_pct=0.0, b_spatial=False):
'''Return a lasagne network defining the siamese network
<NAME>., <NAME>., & <NAME>. (2005). Learning a similiarty
metric discriminatively, with application to face verification.
Proceedings of IEEE Conference on Computer Vision and Pattern
Recognition, 349–356.
Modifications
-------------
dropout_pct -- Instead of a fixed connection layer, use dropout
with this much percentage [0.5]
b_spatial -- Prepend a spatial transformer network which applies
an affine transformation and a 2x crop [False]
Args
----
input_layer : TYPE
Description
dropout_pct : float, optional
Description
b_spatial : bool, optional
Description
Deleted Parameters
------------------
input_layer (TYPE) : Description
dropout_pct (float : Description
optional), b_spatial (bool : Description
'''
l_conv1 = None
if b_spatial:
# returns a 15x40x40
l_conv1 = lasagne.layers.Conv2DLayer(
self.get_spatial_transform_net(input_layer),
num_filters=15,
filter_size=(7, 7),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
else:
# returns a 15x40x40
l_conv1 = lasagne.layers.Conv2DLayer(
input_layer,
num_filters=15,
filter_size=(7, 7),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
# returns a 15x20x20
l_pool1 = lasagne.layers.MaxPool2DLayer(l_conv1, pool_size=(2, 2))
# returns a 45x15x15
l_conv2 = lasagne.layers.Conv2DLayer(
l_pool1,
num_filters=45,
filter_size=(6, 6),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
# returns a 45x5x5
l_pool2 = lasagne.layers.MaxPool2DLayer(l_conv2, pool_size=(3, 3))
l_pool2_dropout = lasagne.layers.DropoutLayer(l_pool2, p=dropout_pct)
# returns a 250x1x1
l_conv3 = lasagne.layers.Conv2DLayer(
l_pool2_dropout,
num_filters=250,
filter_size=(5, 5),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_hidden = lasagne.layers.DenseLayer(
l_conv3,
num_units=self.n_out,
nonlinearity=self.nonlinearity,
W=self.weight_init
)
model = lasagne.layers.DenseLayer(
l_hidden,
num_units=self.n_out,
nonlinearity=self.nonlinearity,
W=self.weight_init
)
return model
def get_custom_net(self, input_layer, b_spatial=False):
'''Return a lasagne network defining a custom siamese network
Modifications
-------------
dropout_pct -- Instead of a fixed connection layer, use dropout
with this much percentage [0.5]
b_spatial -- Prepend a spatial transformer network which applies an
affine transformation and a 2x crop [False]
Args
----
input_layer : TYPE
Description
b_spatial : bool, optional
Description
Deleted Parameters
------------------
input_layer (TYPE) : Description
b_spatial (bool, optional) : Description
'''
l_conv1a = None
if b_spatial:
l_conv1a = lasagne.layers.Conv2DLayer(
self.get_spatial_transform_net(input_layer),
num_filters=16,
filter_size=(3, 3),
nonlinearity=self.relu,
W=self.weight_init
)
else:
l_conv1a = lasagne.layers.Conv2DLayer(
input_layer,
num_filters=16,
filter_size=(3, 3),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_conv1b = lasagne.layers.Conv2DLayer(
l_conv1a,
num_filters=32,
filter_size=(3, 3),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_pool1 = lasagne.layers.MaxPool2DLayer(l_conv1b, pool_size=(2, 2))
l_conv2a = lasagne.layers.Conv2DLayer(
l_pool1,
num_filters=32,
filter_size=(3, 3),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_conv2b = lasagne.layers.Conv2DLayer(
l_conv2a,
num_filters=64,
filter_size=(3, 3),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_pool2 = lasagne.layers.MaxPool2DLayer(l_conv2b, pool_size=(2, 2))
l_conv3a = lasagne.layers.Conv2DLayer(
l_pool2,
num_filters=64,
filter_size=(3, 3),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_conv3b = lasagne.layers.Conv2DLayer(
l_conv3a,
num_filters=128,
filter_size=(3, 3),
nonlinearity=self.nonlinearity,
W=self.weight_init
)
l_pool3 = lasagne.layers.MaxPool2DLayer(l_conv3b, pool_size=(2, 2))
l_full4 = lasagne.layers.DenseLayer(
l_pool3,
num_units=self.n_out,
nonlinearity=self.nonlinearity,
W=self.weight_init
)
model = lasagne.layers.DenseLayer(
l_full4,
num_units=self.n_out,
nonlinearity=self.nonlinearity,
W=self.weight_init
)
return model
# this model actually requires a different training | |
"":
try:
nodeid = int(self.NodeID.GetValue(), 16)
except:
message = _("Node ID must be integer!")
if message != "":
message = wx.MessageDialog(self, message, _("ERROR"), wx.OK|wx.ICON_ERROR)
message.ShowModal()
message.Destroy()
self.NodeName.SetFocus()
else:
self.EndModal(wx.ID_OK)
def GetValues(self):
name = self.NodeName.GetValue()
nodeid = 0
if self.NodeID.GetValue() != "":
nodeid = int(self.NodeID.GetValue(), 16)
type = NODE_TYPES_DICT[self.Type.GetStringSelection()]
description = self.Description.GetValue()
return name, nodeid, type, description
def GetProfile(self):
name = self.Profile.GetStringSelection()
if name == _("None"):
name = "None"
return name, self.ListProfile[name]
def GetNMTManagement(self):
if self.NMT_None.GetValue():
return "None"
elif self.NMT_NodeGuarding.GetValue():
return "NodeGuarding"
elif self.NMT_Heartbeat.GetValue():
return "Heartbeat"
return None
def GetOptions(self):
options = []
if self.DS302.GetValue():
options.append("DS302")
if self.GenSYNC.GetValue():
options.append("GenSYNC")
if self.Emergency.GetValue():
options.append("Emergency")
if self.SaveConfig.GetValue():
options.append("SaveConfig")
if self.StoreEDS.GetValue():
options.append("StoreEDS")
return options
def OnProfileChoice(self, event):
if self.Profile.GetStringSelection() == _("Other"):
dialog = wx.FileDialog(self, _("Choose a file"), self.Directory, "", _("OD Profile files (*.prf)|*.prf|All files|*.*"), wx.OPEN|wx.CHANGE_DIR)
dialog.ShowModal()
filepath = dialog.GetPath()
dialog.Destroy()
if os.path.isfile(filepath):
name = os.path.splitext(os.path.basename(filepath))[0]
self.ListProfile[name] = filepath
length = self.Profile.GetCount()
self.Profile.Insert(name, length - 2)
self.Profile.SetStringSelection(name)
else:
self.Profile.SetStringSelection(_("None"))
event.Skip()
#-------------------------------------------------------------------------------
# ADD Slave to NodeList Dialog
#-------------------------------------------------------------------------------
[ID_ADDSLAVEDIALOG, ID_ADDSLAVEDIALOGSLAVENAME,
ID_ADDSLAVEDIALOGSLAVENODEID, ID_ADDSLAVEDIALOGEDSFILE,
ID_ADDSLAVEDIALOGIMPORTEDS, ID_ADDSLAVEDIALOGSTATICTEXT1,
ID_ADDSLAVEDIALOGSTATICTEXT2, ID_ADDSLAVEDIALOGSTATICTEXT3,
] = [wx.NewId() for _init_ctrls in range(8)]
class AddSlaveDialog(wx.Dialog):
def _init_coll_flexGridSizer1_Items(self, parent):
parent.AddSizer(self.MainSizer, 0, border=20, flag=wx.GROW|wx.TOP|wx.LEFT|wx.RIGHT)
parent.AddSizer(self.ButtonSizer, 0, border=20, flag=wx.ALIGN_RIGHT|wx.BOTTOM|wx.LEFT|wx.RIGHT)
def _init_coll_flexGridSizer1_Growables(self, parent):
parent.AddGrowableCol(0)
parent.AddGrowableRow(0)
def _init_coll_MainSizer_Items(self, parent):
parent.AddWindow(self.staticText1, 0, border=5, flag=wx.GROW|wx.BOTTOM)
parent.AddWindow(self.SlaveName, 0, border=10, flag=wx.GROW|wx.BOTTOM)
parent.AddWindow(self.staticText2, 0, border=5, flag=wx.GROW|wx.BOTTOM)
parent.AddWindow(self.SlaveNodeID, 0, border=10, flag=wx.GROW|wx.BOTTOM)
parent.AddWindow(self.staticText3, 0, border=5, flag=wx.GROW|wx.BOTTOM)
parent.AddSizer(self.BottomSizer, 0, border=0, flag=wx.GROW)
def _init_coll_BottomSizer_Items(self, parent):
parent.AddWindow(self.EDSFile, 0, border=4, flag=wx.GROW|wx.TOP|wx.BOTTOM)
parent.AddWindow(self.ImportEDS, 0, border=0, flag=0)
def _init_coll_BottomSizer_Growables(self, parent):
parent.AddGrowableCol(0)
parent.AddGrowableRow(0)
def _init_sizers(self):
self.flexGridSizer1 = wx.FlexGridSizer(cols=1, hgap=0, rows=2, vgap=10)
self.MainSizer = wx.BoxSizer(wx.VERTICAL)
self.BottomSizer = wx.FlexGridSizer(cols=2, hgap=5, rows=1, vgap=0)
self._init_coll_flexGridSizer1_Items(self.flexGridSizer1)
self._init_coll_flexGridSizer1_Growables(self.flexGridSizer1)
self._init_coll_MainSizer_Items(self.MainSizer)
self._init_coll_BottomSizer_Items(self.BottomSizer)
self._init_coll_BottomSizer_Growables(self.BottomSizer)
self.SetSizer(self.flexGridSizer1)
def _init_ctrls(self, prnt):
wx.Dialog.__init__(self, id=ID_ADDSLAVEDIALOG,
name='AddSlaveDialog', parent=prnt, pos=wx.Point(376, 223),
size=wx.Size(300, 250), style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER,
title=_('Add a slave to nodelist'))
self.SetClientSize(wx.Size(300, 250))
self.staticText1 = wx.StaticText(id=ID_ADDSLAVEDIALOGSTATICTEXT1,
label=_('Slave Name:'), name='staticText1', parent=self,
pos=wx.Point(0, 0), size=wx.Size(0, 17), style=0)
self.SlaveName = wx.TextCtrl(id=ID_ADDSLAVEDIALOGSLAVENAME,
name='SlaveName', parent=self, pos=wx.Point(0, 0),
size=wx.Size(0, 24), style=0)
self.staticText2 = wx.StaticText(id=ID_ADDSLAVEDIALOGSTATICTEXT2,
label=_('Slave Node ID:'), name='staticText2', parent=self,
pos=wx.Point(0, 0), size=wx.Size(0, 17), style=0)
self.SlaveNodeID = wx.TextCtrl(id=ID_ADDSLAVEDIALOGSLAVENODEID,
name='SlaveName', parent=self, pos=wx.Point(0, 0),
size=wx.Size(0, 24), style=wx.ALIGN_RIGHT)
self.staticText3 = wx.StaticText(id=ID_ADDSLAVEDIALOGSTATICTEXT3,
label=_('EDS File:'), name='staticText3', parent=self,
pos=wx.Point(0, 0), size=wx.Size(0, 17), style=0)
self.EDSFile = wx.ComboBox(id=ID_ADDSLAVEDIALOGEDSFILE,
name='EDSFile', parent=self, pos=wx.Point(0, 0),
size=wx.Size(0, 28), style=wx.CB_READONLY)
self.ImportEDS = wx.Button(id=ID_ADDSLAVEDIALOGIMPORTEDS, label=_('Import EDS'),
name='ImportEDS', parent=self, pos=wx.Point(0, 0),
size=wx.Size(100, 32), style=0)
self.ImportEDS.Bind(wx.EVT_BUTTON, self.OnImportEDSButton,
id=ID_ADDSLAVEDIALOGIMPORTEDS)
self.ButtonSizer = self.CreateButtonSizer(wx.OK|wx.CANCEL|wx.CENTRE)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=self.ButtonSizer.GetAffirmativeButton().GetId())
self._init_sizers()
def __init__(self, parent):
self._init_ctrls(parent)
self.SlaveNodeID.SetValue("0x00")
def OnOK(self, event):
error = []
if self.SlaveName.GetValue() == "":
error.append(_("Slave Name"))
if self.SlaveNodeID.GetValue() == "":
error.append(_("Slave Node ID"))
if self.EDSFile.GetStringSelection() == "":
error.append(_("EDS File"))
if len(error) > 0:
text = ""
for i, item in enumerate(error):
if i == 0:
text += item
elif i == len(error) - 1:
text += _(" and %s")%item
else:
text += _(", %s")%item
message = wx.MessageDialog(self, _("Form isn't complete. %s must be filled!")%text, _("Error"), wx.OK|wx.ICON_ERROR)
message.ShowModal()
message.Destroy()
else:
try:
nodeid = self.SlaveNodeID.GetValue()
if nodeid.find("x") != -1:
nodeid = int(nodeid, 16)
else:
nodeid = int(nodeid)
except:
message = wx.MessageDialog(self, _("Slave Node ID must be a value in decimal or hexadecimal!"), _("Error"), wx.OK|wx.ICON_ERROR)
message.ShowModal()
message.Destroy()
return
if not 0 <= nodeid <= 127:
message = wx.MessageDialog(self, _("Slave Node ID must be between 0 and 127!"), _("Error"), wx.OK|wx.ICON_ERROR)
message.ShowModal()
message.Destroy()
elif nodeid == 0 or nodeid in self.NodeList.SlaveNodes.keys():
message = wx.MessageDialog(self, _("A Node with this ID already exist in the network!"), _("Error"), wx.OK|wx.ICON_ERROR)
message.ShowModal()
message.Destroy()
else:
self.EndModal(wx.ID_OK)
def OnImportEDSButton(self, event):
dialog = wx.FileDialog(self,
_("Choose an EDS file"),
os.getcwd(),
"",
_("EDS files (*.eds)|*.eds|All files|*.*"),
wx.OPEN)
if dialog.ShowModal() == wx.ID_OK:
filepath = dialog.GetPath()
if os.path.isfile(filepath):
result = self.NodeList.ImportEDSFile(filepath)
if result:
message = wx.MessageDialog(self, _("%s\nWould you like to replace it ?")%result, _("Question"), wx.YES_NO|wx.ICON_QUESTION)
if message.ShowModal() == wx.ID_YES:
self.NodeList.ImportEDSFile(filepath, True)
message.Destroy()
dialog.Destroy()
self.RefreshEDSFile()
event.Skip()
def RefreshEDSFile(self):
selection = self.EDSFile.GetStringSelection()
self.EDSFile.Clear()
for option in self.NodeList.EDSNodes.keys():
self.EDSFile.Append(option)
if self.EDSFile.FindString(selection) != wx.NOT_FOUND:
self.EDSFile.SetStringSelection(selection)
def SetNodeList(self, nodelist):
self.NodeList = nodelist
self.RefreshEDSFile()
def GetValues(self):
values = {}
values["slaveName"] = self.SlaveName.GetValue()
nodeid = self.SlaveNodeID.GetValue()
if nodeid.find("x") != -1:
values["slaveNodeID"] = int(nodeid, 16)
else:
values["slaveNodeID"] = int(nodeid)
values["edsFile"] = self.EDSFile.GetStringSelection()
return values
#-------------------------------------------------------------------------------
# Editing DCF Entry Dialog
#-------------------------------------------------------------------------------
def DCFEntryTableColnames():
_ = lambda x : x
return [_("Index"), _("Subindex"), _("Size"), _("Value")]
class DCFEntryValuesTable(wx.grid.PyGridTableBase):
"""
A custom wxGrid Table using user supplied data
"""
def __init__(self, parent, data, colnames):
# The base class must be initialized *first*
wx.grid.PyGridTableBase.__init__(self)
self.data = data
self.colnames = colnames
self.Parent = parent
# XXX
# we need to store the row length and col length to
# see if the table has changed size
self._rows = self.GetNumberRows()
self._cols = self.GetNumberCols()
def GetNumberCols(self):
return len(self.colnames)
def GetNumberRows(self):
return len(self.data)
def GetColLabelValue(self, col, translate=True):
if col < len(self.colnames):
if translate:
return _(self.colnames[col])
return self.colnames[col]
def GetRowLabelValues(self, row, translate=True):
return row
def GetValue(self, row, col):
if row < self.GetNumberRows():
return str(self.data[row].get(self.GetColLabelValue(col, False), ""))
def GetEditor(self, row, col):
if row < self.GetNumberRows():
return self.editors[row].get(self.GetColLabelValue(col, False), "")
def GetValueByName(self, row, colname):
return self.data[row].get(colname)
def SetValue(self, row, col, value):
if col < len(self.colnames):
self.data[row][self.GetColLabelValue(col, False)] = value
def ResetView(self, grid):
"""
(wx.grid.Grid) -> Reset the grid view. Call this to
update the grid if rows and columns have been added or deleted
"""
grid.BeginBatch()
for current, new, delmsg, addmsg in [
(self._rows, self.GetNumberRows(), wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED, wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED),
(self._cols, self.GetNumberCols(), wx.grid.GRIDTABLE_NOTIFY_COLS_DELETED, wx.grid.GRIDTABLE_NOTIFY_COLS_APPENDED),
]:
if new < current:
msg = wx.grid.GridTableMessage(self,delmsg,new,current-new)
grid.ProcessTableMessage(msg)
elif new > current:
msg = wx.grid.GridTableMessage(self,addmsg,new-current)
grid.ProcessTableMessage(msg)
self.UpdateValues(grid)
grid.EndBatch()
self._rows = self.GetNumberRows()
self._cols = self.GetNumberCols()
# update the column rendering scheme
self._updateColAttrs(grid)
# update the scrollbars and the displayed part of the grid
grid.AdjustScrollbars()
grid.ForceRefresh()
def UpdateValues(self, grid):
"""Update all displayed values"""
# This sends an event to the grid table to update all of the values
msg = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)
grid.ProcessTableMessage(msg)
def _updateColAttrs(self, grid):
"""
wx.grid.Grid -> update the column attributes to add the
appropriate renderer given the column name.
Otherwise default to the default renderer.
"""
for row in range(self.GetNumberRows()):
for col in range(self.GetNumberCols()):
editor = wx.grid.GridCellTextEditor()
renderer = wx.grid.GridCellStringRenderer()
grid.SetCellEditor(row, col, editor)
grid.SetCellRenderer(row, col, renderer)
grid.SetCellBackgroundColour(row, col, wx.WHITE)
def SetData(self, data):
self.data = data
def AppendRow(self, row_content):
self.data.append(row_content)
def Empty(self):
self.data = []
self.editors = []
[ID_DCFENTRYVALUESDIALOG, ID_DCFENTRYVALUESDIALOGVALUESGRID,
ID_DCFENTRYVALUESDIALOGADDBUTTON, ID_DCFENTRYVALUESDIALOGDELETEBUTTON,
ID_DCFENTRYVALUESDIALOGUPBUTTON, ID_DCFENTRYVALUESDIALOGDOWNBUTTON,
ID_VARIABLEEDITORPANELSTATICTEXT1,
] = [wx.NewId() for _init_ctrls in range(7)]
class DCFEntryValuesDialog(wx.Dialog):
if wx.VERSION < (2, 6, 0):
def Bind(self, event, function, id = None):
if id is not None:
event(self, id, function)
else:
event(self, function)
def _init_coll_MainSizer_Items(self, parent):
parent.AddWindow(self.staticText1, 0, border=20, flag=wx.GROW|wx.TOP|wx.LEFT|wx.RIGHT)
parent.AddWindow(self.ValuesGrid, 0, border=20, flag=wx.GROW|wx.TOP|wx.LEFT|wx.RIGHT)
parent.AddSizer(self.ButtonPanelSizer, 0, border=20, flag=wx.ALIGN_RIGHT|wx.LEFT|wx.RIGHT)
parent.AddSizer(self.ButtonSizer, 0, border=20, flag=wx.ALIGN_RIGHT|wx.BOTTOM|wx.LEFT|wx.RIGHT)
def _init_coll_MainSizer_Growables(self, parent):
parent.AddGrowableCol(0)
parent.AddGrowableRow(1)
def _init_coll_ButtonPanelSizer_Items(self, parent):
parent.AddWindow(self.UpButton, 0, border=5, flag=wx.ALL)
parent.AddWindow(self.AddButton, 0, border=5, flag=wx.ALL)
parent.AddWindow(self.DownButton, 0, border=5, flag=wx.ALL)
parent.AddWindow(self.DeleteButton, 0, border=5, flag=wx.ALL)
def _init_sizers(self):
self.MainSizer = wx.FlexGridSizer(cols=1, hgap=0, rows=3, vgap=0)
self.ButtonPanelSizer = wx.BoxSizer(wx.HORIZONTAL)
self._init_coll_MainSizer_Items(self.MainSizer)
self._init_coll_MainSizer_Growables(self.MainSizer)
self._init_coll_ButtonPanelSizer_Items(self.ButtonPanelSizer)
self.SetSizer(self.MainSizer)
def _init_ctrls(self, prnt):
wx.Dialog.__init__(self, id=ID_DCFENTRYVALUESDIALOG,
name='DCFEntryValuesDialog', parent=prnt, pos=wx.Point(376, 223),
size=wx.Size(400, 300), style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER,
title=_('Edit DCF Entry Values'))
self.SetClientSize(wx.Size(400, 300))
self.staticText1 = wx.StaticText(id=ID_VARIABLEEDITORPANELSTATICTEXT1,
label=_('Entry Values:'), name='staticText1', parent=self,
pos=wx.Point(0, 0), size=wx.Size(95, 17), style=0)
self.ValuesGrid = wx.grid.Grid(id=ID_DCFENTRYVALUESDIALOGVALUESGRID,
name='ValuesGrid', parent=self, pos=wx.Point(0, 0),
size=wx.Size(0, 150), style=wx.VSCROLL)
self.ValuesGrid.SetFont(wx.Font(12, 77, wx.NORMAL, wx.NORMAL, False,
'Sans'))
self.ValuesGrid.SetLabelFont(wx.Font(10, 77, wx.NORMAL, wx.NORMAL,
False, 'Sans'))
self.ValuesGrid.SetRowLabelSize(0)
self.ValuesGrid.SetSelectionBackground(wx.WHITE)
self.ValuesGrid.SetSelectionForeground(wx.BLACK)
if wx.VERSION >= (2, 6, 0):
self.ValuesGrid.Bind(wx.grid.EVT_GRID_CELL_CHANGE, self.OnValuesGridCellChange)
self.ValuesGrid.Bind(wx.grid.EVT_GRID_SELECT_CELL, self.OnValuesGridSelectCell)
else:
wx.grid.EVT_GRID_CELL_CHANGE(self.ValuesGrid, self.OnValuesGridCellChange)
wx.grid.EVT_GRID_SELECT_CELL(self.ValuesGrid, self.OnValuesGridSelectCell)
self.AddButton = wx.Button(id=ID_DCFENTRYVALUESDIALOGADDBUTTON, label=_('Add'),
name='AddButton', parent=self, pos=wx.Point(0, 0),
size=wx.Size(72, 32), style=0)
self.Bind(wx.EVT_BUTTON, self.OnAddButton, id=ID_DCFENTRYVALUESDIALOGADDBUTTON)
self.DeleteButton = wx.Button(id=ID_DCFENTRYVALUESDIALOGDELETEBUTTON, label=_('Delete'),
name='DeleteButton', parent=self, pos=wx.Point(0, 0),
size=wx.Size(72, 32), style=0)
self.Bind(wx.EVT_BUTTON, self.OnDeleteButton, id=ID_DCFENTRYVALUESDIALOGDELETEBUTTON)
self.UpButton = wx.Button(id=ID_DCFENTRYVALUESDIALOGUPBUTTON, label='^',
name='UpButton', parent=self, pos=wx.Point(0, 0),
size=wx.Size(32, 32), style=0)
self.Bind(wx.EVT_BUTTON, self.OnUpButton, id=ID_DCFENTRYVALUESDIALOGUPBUTTON)
self.DownButton = wx.Button(id=ID_DCFENTRYVALUESDIALOGDOWNBUTTON, label='v',
name='DownButton', parent=self, pos=wx.Point(0, 0),
size=wx.Size(32, 32), style=0)
self.Bind(wx.EVT_BUTTON, self.OnDownButton, id=ID_DCFENTRYVALUESDIALOGDOWNBUTTON)
self.ButtonSizer = self.CreateButtonSizer(wx.OK|wx.CANCEL|wx.CENTRE)
self._init_sizers()
def __init__(self, parent):
self._init_ctrls(parent)
self.Values = []
self.DefaultValue = {"Index" : 0, "Subindex" : 0, "Size" : 1, "Value" : 0}
self.Table = DCFEntryValuesTable(self, [], DCFEntryTableColnames())
self.ValuesGrid.SetTable(self.Table)
def OnValuesGridCellChange(self, event):
row, col = event.GetRow(), event.GetCol()
colname = self.Table.GetColLabelValue(col)
value = self.Table.GetValue(row, col)
try:
self.Values[row][colname] = int(value, 16)
| |
# Perform tolerance sweep tolerance by ESM-SSP and well-characterized errors
# for an audience seeking to emulate from the full CMIP6 archive and for the
# scenarioMIP approach, for all ESMs
# For each ESM, loop over various tolerances and generate Ndraws = 500
# GSAT trajectories. Archives with and without each target to characterize
# error. (Reproducible mode off so draws are different). And the ScenMIP.
# Compare to the target ensemble via 4 metrics (E1, E2 on both Tgavs and
# jumps) and record errors for each draw and tolerance.
## TODO: comment out saving the GSATs
## TODO functionalize at least some part of the analysis or at least use a for-loop
## over the different SSP targets so that the code isn't so long and repetitive
# making a table of avail runs X planned archives and for looping over that would
# trim things down (see approach for max tol runs). And rewrite the tolerance
# iteration to be a while loop, comparing current to prev instead of calculating
# and saving it all? Update writes and reads to be subdir so things are tidier
# would be better to functionalize this script with ESM, tol and Ndraws as arguments
# and then have the .sh just call the function and dispatch to diff nodes for each run I guess.
# #############################################################################
# General setup
# #############################################################################
# Import packages
import pandas as pd
import numpy as np
import stitches as stitches
import pkg_resources
import os
from pathlib import Path
pd.set_option('display.max_columns', None)
OUTPUT_DIR = pkg_resources.resource_filename('stitches', 'data/created_data')
# OUTPUT_DIR = '/pic/projects/GCAM/stitches_pic/paper1_outputs'
# #############################################################################
# Experiment setup
# #############################################################################
# experiment parameters
tolerances = np.round(np.arange(0.05, 0.225, 0.005), 3)
Ndraws =20
error_threshold = 0.1
# pangeo table of ESMs for reference
pangeo_path = pkg_resources.resource_filename('stitches', 'data/pangeo_table.csv')
pangeo_data = pd.read_csv(pangeo_path)
pangeo_data = pangeo_data[((pangeo_data['variable'] == 'tas') | (pangeo_data['variable'] == 'pr') | (pangeo_data['variable'] == 'psl'))
& ((pangeo_data['domain'] == 'Amon') ) ].copy()
# Keep only the runs that have data for all vars X all timesteps:
pangeo_good_ensembles =[]
for name, group in pangeo_data.groupby(['model', 'experiment', 'ensemble']):
df = group.drop_duplicates().copy()
if len(df) == 3:
pangeo_good_ensembles.append(df)
del(df)
pangeo_good_ensembles = pd.concat(pangeo_good_ensembles)
pangeo_good_ensembles = pangeo_good_ensembles[['model', 'experiment', 'ensemble']].drop_duplicates().copy()
pangeo_good_ensembles = pangeo_good_ensembles.reset_index(drop=True).copy()
# won't use idealized runs
pangeo_good_ensembles = pangeo_good_ensembles[~((pangeo_good_ensembles['experiment'] == '1pctCO2') |
(pangeo_good_ensembles['experiment'] == 'abrupt-4xCO2')|
(pangeo_good_ensembles['experiment'] == 'ssp534-over')) ].reset_index(drop=True).copy()
# #############################################################################
# Load full archive and target data
# #############################################################################
# Load the full archive of all staggered windows, which we will be matching on
full_archive_path = pkg_resources.resource_filename('stitches', 'data/matching_archive.csv')
full_archive_data = pd.read_csv(full_archive_path)
# Keep only the entries that appeared in pangeo_good_ensembles:
keys =['model', 'experiment', 'ensemble']
i1 = full_archive_data.set_index(keys).index
i2 = pangeo_good_ensembles.set_index(keys).index
full_archive_data= full_archive_data[i1.isin(i2)].copy()
del(i1)
del(i2)
# get list of ESMs that are both pangeo good ensembles and in archive
df1 = full_archive_data[['model', 'experiment', 'ensemble']].drop_duplicates()
d = pd.merge(df1, pangeo_good_ensembles.drop_duplicates(), how = 'inner')
esms = d.model.unique().copy()
del(df1)
del(d)
# Load the original archive without staggered windows, which we will draw
# the target trajectories from for matching
full_target_path = pkg_resources.resource_filename('stitches', 'data/matching_archive.csv')
full_target_data = pd.read_csv(full_target_path)
# Keep only the entries that appeared in pangeo_good_ensembles:
keys =['model', 'experiment', 'ensemble']
i1 = full_target_data.set_index(keys).index
i2 = pangeo_good_ensembles.set_index(keys).index
full_target_data = full_target_data[i1.isin(i2)].copy()
del(i1)
del(i2)
del(keys)
# Make sure no 2012-2014 windows got through
# TODO won't work with staggering??
full_target_data['window_size'] = full_target_data['end_yr'] - full_target_data['start_yr']
full_target_data = full_target_data[full_target_data['window_size'] >=7].drop(columns=['window_size']).copy()
full_archive_data['window_size'] = full_archive_data['end_yr'] - full_archive_data['start_yr']
full_archive_data = full_archive_data[full_archive_data['window_size'] >=7].drop(columns=['window_size']).copy()
# #############################################################################
# Some helper functions
# #############################################################################
def prep_target_data(target_df):
if not target_df.empty:
grped = target_df.groupby(['experiment', 'variable', 'ensemble', 'model'])
for name, group in grped:
df1 = group.copy()
# if it isn't a complete time series (defined as going to 2099 or 2100),
# remove it from the target data frame:
if max(df1.end_yr) < 2099:
target_df = target_df.loc[(target_df['ensemble'] != df1.ensemble.unique()[0])].copy().reset_index(
drop=True)
del (df1)
del (grped)
target_df = target_df.reset_index(drop=True).copy()
return(target_df)
def get_orig_data(target_df):
if not target_df.empty:
esm_name = target_df.model.unique()[0]
scn_name = target_df.experiment.unique()[0]
full_rawtarget_path = pkg_resources.resource_filename('stitches', ('data/tas-data/' + esm_name + '_tas.csv'))
full_rawtarget_data = pd.read_csv(full_rawtarget_path)
orig_data = full_rawtarget_data[(full_rawtarget_data['experiment'] == scn_name)].copy()
keys = ['experiment', 'ensemble', 'model']
i1 = orig_data.set_index(keys).index
i2 = target_df.set_index(keys).index
orig_data = orig_data[i1.isin(i2)].copy()
del (i1)
del (i2)
del (keys)
del (full_rawtarget_data)
del (full_rawtarget_path)
orig_data = orig_data.reset_index(drop=True).copy()
return (orig_data)
def match_draw_stitchTgav(target_df, archive_df, toler, num_draws, TGAV_OUTPUT_DIR, reproducible):
esm_name = archive_df.model.unique()[0]
if not target_df.empty:
# Use the match_neighborhood function to generate all of the matches between the target and
# archive data points.
match_df = stitches.match_neighborhood(target_df, archive_df, tol=toler)
scn_name = target_df.experiment.unique()[0]
if ((not ('ssp245' in archive_df.experiment.unique())) & (not ('ssp370' in archive_df.experiment.unique()))):
archive_id = 'scenarioMIP'
elif scn_name in archive_df.experiment.unique():
archive_id = 'w_target'
else:
archive_id = 'wo_target'
for draw in range(0, num_draws):
# Do the random draw of recipes
if reproducible:
unformatted_recipe = stitches.permute_stitching_recipes(N_matches=10000,
matched_data=match_df,
archive=archive_df,
testing=True)
else:
unformatted_recipe = stitches.permute_stitching_recipes(N_matches=10000,
matched_data=match_df,
archive=archive_df,
testing=False)
new_ids = ('tol' + str(toler) + '~draw' + str(draw) + '~' + archive_id + '~'+
unformatted_recipe['stitching_id'].astype(str)).copy()
unformatted_recipe = unformatted_recipe.drop(columns=['stitching_id']).copy()
unformatted_recipe['stitching_id'] = new_ids
del (new_ids)
# format the recipe
recipe = stitches.generate_gridded_recipe(unformatted_recipe)
recipe.columns = ['target_start_yr', 'target_end_yr', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'stitching_id', 'archive_start_yr',
'archive_end_yr', 'tas_file']
recipe['tolerance'] = toler
recipe['draw'] = draw
recipe['archive'] = archive_id
recipe.to_csv((OUTPUT_DIR + '/' + esm_name + '/experiment_CMIP6/' +
'gridded_recipes_' + esm_name + '_target_' + scn_name +
'_tol' + str(toler) +
'_draw' + str(draw) +
'_archive_' + archive_id + '.csv'), index=False)
del (unformatted_recipe)
# stitch the GSAT values and save as csv
try:
gsat = stitches.gmat_stitching(recipe)
gsat['tolerance'] = toler
gsat['draw'] = draw
gsat['archive'] = archive_id
for id in gsat.stitching_id.unique():
ds = gsat[gsat['stitching_id'] == id].copy()
fname = (TGAV_OUTPUT_DIR +
'stitched_' + esm_name + '_GSAT_' + id + '.csv')
ds.to_csv(fname, index=False)
del (ds)
del (gsat)
except:
print(("Some issue stitching GMAT for " + esm_name + ". Skipping and moving on"))
else:
recipe = []
print('Some missing target data for ' + esm_name + '. Analysis will be skipped')
return(recipe)
def get_jumps(tgav_df):
tgav_jump = []
for name, group in tgav_df.groupby(['variable', 'experiment', 'ensemble', 'model']):
ds = group.copy()
ds['jump'] = ds.value.diff().copy()
ds = ds.dropna().copy()
tgav_jump.append(ds)
del (ds)
tgav_jump = pd.concat(tgav_jump)
tgav_jump = tgav_jump.drop(columns=['value']).copy()
tgav_jump = tgav_jump.drop_duplicates().reset_index(drop=True).copy()
return(tgav_jump)
def four_errors(gen_data, orig_data):
gen_data_jump = get_jumps(gen_data)
orig_data_jump = get_jumps(orig_data)
orig_stats = []
for name, group in orig_data.groupby(['model', 'variable', 'experiment']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment']].drop_duplicates().copy()
ds1['mean_orig_tgav'] = np.mean(ds.value.values)
ds1['sd_orig_tgav'] = np.std(ds.value.values)
orig_stats.append(ds1)
del (ds)
del (ds1)
orig_stats = pd.concat(orig_stats).reset_index(drop=True).copy()
orig_stats_jump = []
for name, group in orig_data_jump.groupby(['model', 'variable', 'experiment']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment']].drop_duplicates().copy()
ds1['mean_orig_jump'] = np.mean(ds.jump.values)
ds1['sd_orig_jump'] = np.std(ds.jump.values)
orig_stats_jump.append(ds1)
del (ds)
del (ds1)
orig_stats_jump = pd.concat(orig_stats_jump).reset_index(drop=True).copy()
orig_stats = orig_stats.merge(orig_stats_jump, how='left', on=['model', 'variable', 'experiment']).copy()
del (orig_stats_jump)
gen_stats = []
for name, group in gen_data.groupby(['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']].drop_duplicates().copy()
ds1['mean_gen_tgav'] = np.mean(ds.value.values)
ds1['sd_gen_tgav'] = np.std(ds.value.values)
gen_stats.append(ds1)
del (ds)
del (ds1)
gen_stats = pd.concat(gen_stats).reset_index(drop=True).copy()
gen_stats_jump = []
for name, group in gen_data_jump.groupby(['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']].drop_duplicates().copy()
ds1['mean_gen_jump'] = np.mean(ds.jump.values)
ds1['sd_gen_jump'] = np.std(ds.jump.values)
gen_stats_jump.append(ds1)
del (ds)
del (ds1)
gen_stats_jump = pd.concat(gen_stats_jump).reset_index(drop=True).copy()
gen_stats = gen_stats.merge(gen_stats_jump, how='left',
on=['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']).copy()
del (gen_stats_jump)
compare = gen_stats.merge(orig_stats, how='left', on=['model', 'variable', 'experiment']).copy()
del (gen_stats)
del (orig_stats)
compare['E1_tgav'] = abs(compare.mean_orig_tgav - compare.mean_gen_tgav) / compare.sd_orig_tgav
compare['E2_tgav'] = compare.sd_gen_tgav / compare.sd_orig_tgav
compare['E1_jump'] = abs(compare.mean_orig_jump - compare.mean_gen_jump) / compare.sd_orig_jump
compare['E2_jump'] = compare.sd_gen_jump / compare.sd_orig_jump
compare = compare[['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive',
'E1_tgav', 'E2_tgav', 'E1_jump', 'E2_jump']].copy()
four_values = []
for name, group in compare.groupby(['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']):
ds = group.copy()
ds['max_metric'] = np.max(
[ds.E1_tgav.values, abs(1 - ds.E2_tgav.values), ds.E1_jump.values, abs(1 - ds.E2_jump.values)])
four_values.append(ds)
del (ds)
four_values = pd.concat(four_values).reset_index(drop=True).copy()
del (compare)
return(four_values)
def match_draw_stitch_evalTgav(target_df, archive_df, toler, num_draws, ERR_OUTPUT_DIR, reproducible):
esm_name = archive_df.model.unique()[0]
if not target_df.empty:
# Use the match_neighborhood function to generate all of the matches between the target and
# archive data points.
match_df = stitches.match_neighborhood(target_df, archive_df, tol=toler)
scn_name = target_df.experiment.unique()[0]
# get corresponding original data to the target
orig_df = get_orig_data(target_df).copy()
if ((not ('ssp245' in archive_df.experiment.unique())) & (not ('ssp370' in archive_df.experiment.unique()))):
archive_id = 'scenarioMIP'
elif scn_name in archive_df.experiment.unique():
archive_id = 'w_target'
else:
archive_id = 'wo_target'
compared = []
for draw in range(0, num_draws):
# Do the random draw of recipes
if reproducible:
unformatted_recipe = stitches.permute_stitching_recipes(N_matches=10000,
matched_data=match_df,
archive=archive_df,
testing=True)
else:
unformatted_recipe = stitches.permute_stitching_recipes(N_matches=10000,
matched_data=match_df,
archive=archive_df,
testing=False)
new_ids = ('tol' + str(toler) + '~draw' + str(draw) + '~' + archive_id + '~'+
unformatted_recipe['stitching_id'].astype(str)).copy()
unformatted_recipe = unformatted_recipe.drop(columns=['stitching_id']).copy()
unformatted_recipe['stitching_id'] = new_ids
del (new_ids)
# format the recipe
recipe = stitches.generate_gridded_recipe(unformatted_recipe)
recipe.columns = ['target_start_yr', 'target_end_yr', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'stitching_id', 'archive_start_yr',
'archive_end_yr', 'tas_file']
recipe['tolerance'] = toler
recipe['draw'] = | |
int number <= {0}. {1}".format(max, msg))
return
return result
def input_float(self, input_name, min=None, max=None, msg=None):
input_txt = str(self.uiList[input_name].text())
result = None
try:
result = float(input_txt)
except (ValueError, TypeError):
return
# min
if min != None:
if result < min:
print("Please enter a valid int number >= {0}. {1}".format(min, msg))
return
# max
if max != None:
if result > max:
print("Please enter a valid int number <= {0}. {1}".format(max, msg))
return
return result
def input_choice(self, ui_name):
if ui_name in self.uiList.keys():
return self.uiList[ui_name].currentIndex()
else:
return
def input_check(self, ui_name):
if ui_name in self.uiList.keys():
return self.uiList[ui_name].isChecked()
else:
return
def output_text(self, ui_name, text):
if ui_name in self.uiList.keys():
self.uiList[ui_name].setText(text)
#=======================================
# file data functions
#=======================================
def ____file_functions____():
pass
def readDataFile(self,file,binary=0):
with open(file) as f:
if binary == 0:
data = json.load(f)
else:
data = cPickle.load(f)
return data
def writeDataFile(self,data,file,binary=0):
with open(file, 'w') as f:
if binary == 0:
json.dump(data, f)
else:
cPickle.dump(data, f)
def readTextFile(self, file):
with open(file) as f:
txt = f.read()
return txt
def writeTextFile(self, txt, file):
with open(file, 'w') as f:
f.write(txt)
def dict_merge(self, default_dict, extra_dict, addKey=0):
# dictionary merge, with optional adding extra data from extra_dict
new_dict = {}
for key in default_dict.keys():
if not isinstance( default_dict[key], dict ):
# value case
if key in extra_dict.keys():
is_same_text_type = isinstance(extra_dict[key], (str,unicode)) and isinstance(default_dict[key], (str,unicode))
is_same_non_text_type = type(extra_dict[key]) is type(default_dict[key])
if is_same_text_type or is_same_non_text_type:
print('use config file value for key: '+key)
new_dict[key] = extra_dict[key]
else:
new_dict[key] = default_dict[key]
else:
new_dict[key] = default_dict[key]
else:
# dictionary case
if key in extra_dict.keys() and isinstance( extra_dict[key], dict ):
new_dict[key] = self.dict_merge( default_dict[key], extra_dict[key], addKey )
else:
new_dict[key] = default_dict[key]
# optional, add additional keys
if addKey == 1:
for key in [ x for x in extra_dict.keys() if x not in default_dict.keys() ]:
new_dict[key] = extra_dict[key]
return new_dict
#=======================================
# ui text functions
#=======================================
def ____ui_text_functions____():
pass
def fontNormal_action(self):
self.memoData['font_size'] = self.memoData['font_size_default']
self.setStyleSheet("QLabel,QPushButton { font-size: %dpt;}" % self.memoData['font_size'])
def fontUp_action(self):
self.memoData['font_size'] += 2
self.setStyleSheet("QLabel,QPushButton { font-size: %dpt;}" % self.memoData['font_size'])
def fontDown_action(self):
if self.memoData['font_size'] >= self.memoData['font_size_default']:
self.memoData['font_size'] -= 2
self.setStyleSheet("QLabel,QPushButton { font-size: %dpt;}" % self.memoData['font_size'])
def loadLang(self, build_menu=1):
# store default language
self.memoData['lang']={}
self.memoData['lang']['default']={}
for ui_name in self.uiList.keys():
ui_element = self.uiList[ui_name]
if isinstance(ui_element, (QtWidgets.QLabel, QtWidgets.QPushButton, QtWidgets.QAction, QtWidgets.QCheckBox) ):
# uiType: QLabel, QPushButton, QAction(menuItem), QCheckBox
self.memoData['lang']['default'][ui_name] = unicode(ui_element.text())
elif isinstance(ui_element, (QtWidgets.QGroupBox, QtWidgets.QMenu) ):
# uiType: QMenu, QGroupBox
self.memoData['lang']['default'][ui_name] = unicode(ui_element.title())
elif isinstance(ui_element, QtWidgets.QTabWidget):
# uiType: QTabWidget
tabCnt = ui_element.count()
tabNameList = []
for i in range(tabCnt):
tabNameList.append(unicode(ui_element.tabText(i)))
self.memoData['lang']['default'][ui_name]=';'.join(tabNameList)
elif isinstance(ui_element, QtWidgets.QComboBox):
# uiType: QComboBox
itemCnt = ui_element.count()
itemNameList = []
for i in range(itemCnt):
itemNameList.append(unicode(ui_element.itemText(i)))
self.memoData['lang']['default'][ui_name]=';'.join(itemNameList)
elif isinstance(ui_element, QtWidgets.QTreeWidget):
# uiType: QTreeWidget
labelCnt = ui_element.headerItem().columnCount()
labelList = []
for i in range(labelCnt):
labelList.append(unicode(ui_element.headerItem().text(i)))
self.memoData['lang']['default'][ui_name]=';'.join(labelList)
elif isinstance(ui_element, QtWidgets.QTableWidget):
# uiType: QTableWidget
colCnt = ui_element.columnCount()
headerList = []
for i in range(colCnt):
if ui_element.horizontalHeaderItem(i):
headerList.append( unicode(ui_element.horizontalHeaderItem(i).text()) )
else:
headerList.append('')
self.memoData['lang']['default'][ui_name]=';'.join(headerList)
elif isinstance(ui_element, (str, unicode) ):
# uiType: string for msg
self.memoData['lang']['default'][ui_name] = self.uiList[ui_name]
# language menu
lang_menu = 'language_menu'
if build_menu == 1:
self.qui_menubar('language_menu;&Language')
self.qui_menu('langDefault_atnLang;Default | _', lang_menu)
self.uiList['langDefault_atnLang'].triggered.connect(partial(self.setLang,'default'))
# scan for language file
lang_path = os.path.dirname(self.location)
baseName = os.path.splitext( os.path.basename(self.location) )[0]
for file in self.getPathChild(lang_path, pattern=baseName+'_lang_[a-zA-Z]+.json', isfile=1):
langName = re.findall(baseName+'_lang_(.+)\.json', file)
if len(langName) == 1:
langName = langName[0].upper()
self.memoData['lang'][ langName ] = self.readDataFile( os.path.join(lang_path, file) )
if build_menu == 1:
self.qui_menu('{0}_atnLang;{0}'.format(langName), lang_menu)
self.uiList[langName+'_atnLang'].triggered.connect(partial(self.setLang,langName))
# if no language file detected, add export default language option
if build_menu == 1:
if isinstance(self, QtWidgets.QMainWindow) and len(self.memoData['lang']) == 1:
self.qui_menu('langExport_atnLang;Export Default Language', lang_menu)
self.uiList['langExport_atnLang'].triggered.connect(self.exportLang)
def setLang(self, langName):
lang_data = self.memoData['lang'][langName]
for ui_name in lang_data.keys():
if ui_name in self.uiList.keys() and lang_data[ui_name] != '':
ui_element = self.uiList[ui_name]
# '' means no translation availdanle in that data file
if isinstance(ui_element, (QtWidgets.QLabel, QtWidgets.QPushButton, QtWidgets.QAction, QtWidgets.QCheckBox) ):
# uiType: QLabel, QPushButton, QAction(menuItem), QCheckBox
ui_element.setText(lang_data[ui_name])
elif isinstance(ui_element, (QtWidgets.QGroupBox, QtWidgets.QMenu) ):
# uiType: QMenu, QGroupBox
ui_element.setTitle(lang_data[ui_name])
elif isinstance(ui_element, QtWidgets.QTabWidget):
# uiType: QTabWidget
tabCnt = ui_element.count()
tabNameList = lang_data[ui_name].split(';')
if len(tabNameList) == tabCnt:
for i in range(tabCnt):
if tabNameList[i] != '':
ui_element.setTabText(i,tabNameList[i])
elif isinstance(ui_element, QtWidgets.QComboBox):
# uiType: QComboBox
itemCnt = ui_element.count()
itemNameList = lang_data[ui_name].split(';')
ui_element.clear()
ui_element.addItems(itemNameList)
elif isinstance(ui_element, QtWidgets.QTreeWidget):
# uiType: QTreeWidget
labelCnt = ui_element.headerItem().columnCount()
labelList = lang_data[ui_name].split(';')
ui_element.setHeaderLabels(labelList)
elif isinstance(ui_element, QtWidgets.QTableWidget):
# uiType: QTableWidget
colCnt = ui_element.columnCount()
headerList = lang_data[ui_name].split(';')
cur_table.setHorizontalHeaderLabels( headerList )
elif isinstance(ui_element, (str, unicode) ):
# uiType: string for msg
self.uiList[ui_name] = lang_data[ui_name]
def exportLang(self):
file = self.quickFileAsk('export', ext='json')
if file != '':
self.writeDataFile( self.memoData['lang']['default'], file )
self.quickMsg("Languge File created: '"+file)
#=======================================
# qui functions
#=======================================
def ____ui_creation_functions____():
pass
def setAsUI(self):
# turn win to widget
self.setWindowFlags(QtCore.Qt.Widget)
self.statusBar().hide()
self.uiList['main_layout'].setContentsMargins(0, 0, 0, 0)
def qui(self, ui_list_string, parent_ui_string='', insert_opt=''):
ui_creation_list = [ x.strip() for x in ui_list_string.split('|') ]
ui_creation_quickUI_list = []
# ------------
# - ui list
# ------------
for ui_creation in ui_creation_list:
arg_list = ui_creation.split(';')
uiName = arg_list[0].split('@')[0]
# ------------
# continue if ui is already created. pass as ui reference
if uiName in self.uiList.keys():
ui_creation_quickUI_list.append(self.uiList[uiName])
continue
# ------------
# create quickUI string
# - expand short name for Class
uiClass = uiName.rsplit('_',1)[-1]
if uiClass == 'layout' and len(arg_list)>1:
uiClass = arg_list[1]
arg_list = [ arg_list[0] ]
if uiClass in self.qui_user_dict:
uiClass = self.qui_user_dict[uiClass] # first, try user dict
elif uiClass in self.qui_core_dict:
uiClass = self.qui_core_dict[uiClass] # then, try default core dict
# - check it is valid Qt class or a user class
if hasattr(QtWidgets, uiClass) or uiClass in sys.modules:
pass # uiClass is valid for Qt class, user module
else:
print("WARNING: ({0}) is not defined in self.qui_user_dict and it is not a Qt widget class or User class; Item {1} Ignored.".format(uiClass, uiName))
continue
# - set quickUI creation format
arg_list[0] = arg_list[0] +';'+uiClass
if len(arg_list)==1:
if uiClass in ('QPushButton','QLabel'):
arg_list.append(uiName) # give empty button and label a place holder name
ui_creation_quickUI_list.append(';'.join(arg_list))
# ------------
# - ui parent
# ------------
parent_creation_quickUI_input = ''
parent_arg_list = parent_ui_string.split(';')
parent_uiName = parent_arg_list[0]
# - continue if parent ui is already created. pass as ui reference
if parent_uiName in self.uiList.keys():
parent_creation_quickUI_input = self.uiList[parent_uiName]
else:
parent_uiClass = parent_uiName.rsplit('_',1)[-1]
if parent_uiClass == 'layout' and len(parent_arg_list)>1:
parent_uiClass = parent_arg_list[1]
parent_arg_list = [ parent_arg_list[0] ]
if parent_uiClass in self.qui_user_dict:
parent_uiClass = self.qui_user_dict[parent_uiClass] # first, try user dict
elif parent_uiClass in self.qui_core_dict:
parent_uiClass = self.qui_core_dict[parent_uiClass] # then, try default core dict
# - check it is valid Qt class or a user class
if hasattr(QtWidgets, parent_uiClass) or parent_uiClass in sys.modules:
pass # uiClass is valid for Qt class, user module
else:
print("WARNING: ({0}) is not defined in self.qui_user_dict and it is not a Qt widget class or User class; Item {1} Ignored.".format(parent_uiClass, parent_uiName))
return
# - set quickUI creation format
parent_arg_list[0] = parent_arg_list[0] +';'+parent_uiClass
parent_creation_quickUI_input = ';'.join(parent_arg_list)
self.quickUI(ui_creation_quickUI_list, parent_creation_quickUI_input, insert_opt)
return parent_uiName
def qui_menu(self, action_list_str, menu_str):
# qui menu creation
# syntax: self.qui_menu('right_menu_createFolder_atn;Create Folder,Ctrl+D | right_menu_openFolder_atn;Open Folder', 'right_menu')
if menu_str not in self.uiList.keys():
self.uiList[menu_str] = QtWidgets.QMenu()
create_opt_list = [ x.strip() for x in action_list_str.split('|') ]
for each_creation in create_opt_list:
ui_info = [ x.strip() for x in each_creation.split(';') ]
atn_name = ui_info[0]
atn_title = ''
atn_hotkey = ''
if len(ui_info) > 1:
options = ui_info[1].split(',')
atn_title = '' if len(options) < 1 else options[0]
atn_hotkey = '' if len(options) < 2 else options[1]
if atn_name != '':
if atn_name == '_':
self.uiList[menu_str].addSeparator()
else:
if atn_name not in self.uiList.keys():
self.uiList[atn_name] = QtWidgets.QAction(atn_title, self)
if atn_hotkey != '':
self.uiList[atn_name].setShortcut(QtGui.QKeySequence(atn_hotkey))
self.uiList[menu_str].addAction(self.uiList[atn_name])
def qui_atn(self, ui_name, title, tip=None, icon=None, parent=None, key=None):
self.uiList[ui_name] = QtWidgets.QAction(title, self)
if icon!=None:
self.uiList[ui_name].setIcon(QtGui.QIcon(icon))
if tip !=None:
self.uiList[ui_name].setStatusTip(tip)
if key != None:
self.uiList[ui_name].setShortcut(QtGui.QKeySequence(key))
if parent !=None:
if isinstance(parent, (str, unicode)) and parent in self.uiList.keys():
self.uiList[parent].addAction(self.uiList[ui_name])
elif isinstance(parent, QtWidgets.QMenu):
parent.addAction(self.uiList[ui_name])
return ui_name
def qui_key(self, key_name, key_combo, func):
self.hotkey[key_name] = QtWidgets.QShortcut(QtGui.QKeySequence(key_combo), self)
self.hotkey[key_name].activated.connect( func )
def qui_menubar(self, menu_list_str):
if not isinstance(self, QtWidgets.QMainWindow):
print("Warning: Only QMainWindow can have menu bar.")
return
menubar = self.menuBar()
create_opt_list = [ x.strip() for x in menu_list_str.split('|') ]
for each_creation in create_opt_list:
ui_info = [ | |
<filename>modules/s3/pyvttbl/pystaggrelite3.py
from __future__ import print_function
# Copyright (c) 2011, <NAME> [see LICENSE.txt]
# This software is funded in part by NIH Grant P20 RR016454.
"""
This module contains custom aggregators for sqlite3
sqlite has the following aggregate functions built-in:
avg(X)
count(X)
count(*)
group_concat(X)
group_concat(X,Y)
max(X)
min(X)
sum(X)
total(X)
The aggregate functions in sqlite are much faster then the
methods implemented here. On the downside they are rather
limited. This module implements the following aggregate
functions:
abs_mean(X)
arbitrary(X)
ci(X)
datarange(X)
geometric_mean(X)
hasinf(X)
hasnan(X)
kurt(X)
kurtp(X)
median(X)
mode(X)
prod(X)
rms(X)
sem(X)
skew(X)
skewp(X)
stdev(X)
stdevp(X)
var(X)
varp(X)
The respective docstrings for these modules provide more
information as to there specific functionality. The aggregate
functions ignore NULL, non-float text, and nan values. When X
is empty the aggregates return None. Inf values may cause the
aggregate to return None or Inf depending on function. See the
test module for specifics. All the functions except for median
and mode are implemented with running tallies.
"""
import sys
import inspect
from math import sqrt,isnan,isinf,log10,log,exp,floor
from copy import copy
try:
from collections import Counter
except ImportError:
from counter import Counter
# Python 2 to 3 workarounds
import sys
if sys.version_info[0] == 2:
_strobj = basestring
_xrange = xrange
elif sys.version_info[0] == 3:
_strobj = str
_xrange = range
maxfloat= sys.float_info.max
minfloat=-1.*sys.float_info.max
def getaggregators():
"""returns a generator of the (name,arity,function) of the
available aggregators"""
mod=sys.modules[__name__]
for name,func in inspect.getmembers(mod,inspect.isclass):
if hasattr(func,'step') and hasattr(func,'finalize'):
arity=len(inspect.getargspec(func.step).args)-1
yield (name,arity,func)
def isfloat(x):
"""
>>> isfloat(12)
True
>>> isfloat(12)
True
>>> isfloat('a')
False
>>> isfloat(float('nan'))
True
>>> isfloat(float('inf'))
True
"""
try: float(x)
except: return False
return True
def _flatten(x):
"""_flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> _flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(_flatten(el))
else:
result.append(el)
return result
def hist(V, bins=10, range=None, density=False, weights=None, cumulative=False):
# docstring mostly borrowed from numpy.histogram and pylab.hist
# numpy doesn't offer the cummulative. pylab.hist always makes a histogram
# plot. This function requires neither numpy or pylab and returns the
# same values. It has been tested
"""
Compute the histogram of a set of data.
Parameters
----------
V : list_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(min(V), max(V))``. Values outside the range are
ignored.
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
weights : list_like, optional
An array of weights, of the same shape as `V`. Each value in `V`
only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are normalized,
so that the integral of the density over the range remains 1
cumulative : bool, options
If True, then a histogram is computed where each bin gives the
counts in that bin plus all bins for smaller values. The last bin
gives the total number of datapoints. If normed is also True then
the histogram is normalized such that the last bin equals 1. If
cumulative evaluates to less than 0 (e.g. -1), the direction of
accumulation is reversed. In this case, if normed is also True,
then the histogram is normalized such that the first bin equals 1.
Returns
-------
hist : list
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : list
Return the bin edges ``(length(hist)+1)``.
"""
if bins < 1:
raise Exception('bins must be >= 1')
if not isinstance(cumulative, bool):
raise TypeError('cumulative must be a bool')
if not isinstance(density, bool):
raise TypeError('cumulative must be a bool')
if range == None:
vmin, vmax = min(V), max(V)
else:
vmin, vmax = range
rng = vmax - vmin # the range of the histogram
dbin = rng / float(bins) # the spacing between the bins
# build the weights if they aren't specified
if weights == None:
W = [1. for i in _xrange(len(V))]
else:
W = weights
if len(V) != len(W):
raise Exception('V and weights must be same length')
histCounter = Counter() # a multi-set object from collections
for i, (v, w) in enumerate(zip(V, W)):
# the range defines a closed interval. The floor function
# treats it as open so if we find a value that is equal
# to max we move it to the appropriate bin.
if v==vmax: v-=dbin/2.
# based on the min and the range rescale the data so it
# has a min of 0 and a max given by the number of bins
histCounter[floor(bins*(v-vmin)/rng)] += w
N = [histCounter[0]] # the counts
B = [vmin] # the bin edges to be returned
if cumulative:
for i in _xrange(1, bins):
B.append((i/float(bins))*rng+vmin)
N.append(N[-1]+histCounter[i])
else:
for i in _xrange(1, bins):
B.append((i/float(bins))*rng+vmin)
N.append(histCounter[i])
B.append(vmax) # append the last edge
if cumulative and density:
total = sum(v for k,v in histCounter.items() if k<bins)
for i in _xrange(bins):
N[i] /= total
if not cumulative and density:
total = sum(v for k,v in histCounter.items() if k<bins)
for i in _xrange(bins):
N[i] /= (dbin*total)
## for n,b in zip(N, B):
## print(_str(b,'f',3),n)
return N,B
class ignore:
"""getaggregators shouldn't return this"""
def __init__(self):
pass
class hasnan:
"""
Returns 1 if array contains 1 or more 'nan' values
Returns 0 if the array does not contain any 'nan' values
"""
def __init__(self):
self.value=False
def step(self, value):
if isfloat(value):
if isnan(float(value)):
self.value=True
def finalize(self):
return self.value
class hasinf:
"""
Returns 1 if array contains 1 or more 'inf' values
Returns 0 if the array does not contain any 'inf' values
"""
def __init__(self):
self.value=False
def step(self, value):
if isfloat(value):
if isinf(float(value)):
self.value=True
def finalize(self):
return self.value
class arbitrary:
"""
sqlite does not guarentee the order of returned rows will
be sorted. This will most likely return the first value.
It is intended to be used in cases where you know all of
the values are the same.
"""
def __init__(self):
self.value=None
def step(self, value):
if self.value==None:
self.value=value
def finalize(self):
return self.value
class datarange:
"""
Returns non if given an empty set.
Otherwise returns the range of the elements.
"""
def __init__(self):
global maxfloat,minfloat
self.min=maxfloat
self.max=minfloat
def step(self, value):
if isfloat(value):
v=float(value)
if not isnan(v):
if v<self.min:
self.min=v
if v>self.max:
self.max=v
def finalize(self):
if self.min==maxfloat and self.max==minfloat:
return None
return self.max-self.min
class abs_mean:
"""
Takes the absolute value of the elements and
computes the mean.
"""
def __init__(self):
self.s=0.
self.N=0
def step(self, value):
if isfloat(value):
v=float(value)
if not isnan(v):
self.s+=abs(v)
self.N+=1
def finalize(self):
if self.N==0:
return None
return self.s/float(self.N)
class geometric_mean:
"""
Takes the absolute value of the elements and
computes the mean. Modeled after scipy.stats.gmean.
If x contains any values < 0. return nan, if
"""
def __init__(self):
self.s=0.
self.N=0
self.ret_value = -1
def step(self, value):
if isfloat(value):
v=float(value)
if not isnan(v):
if v<0:
self.ret_value=None
elif v==0 and self.ret_value!=None:
self.ret_value=0.
else:
self.s+=log(v)
self.N+=1
def finalize(self):
if self.N==0:
return None
if self.ret_value != -1:
return self.ret_value
return exp(self.s/float(self.N))
class median:
"""
Returns the median of the elements.
"""
def __init__(self):
self.sequence=[]
def step(self, value):
if isfloat(value):
v=float(value)
if not isnan(v):
self.sequence.append(v)
def finalize(self):
N=len(self.sequence)
if | |
from typing import List, Dict, Tuple, Optional
from itertools import product, chain
from collections import Counter
from random import choice
from copy import deepcopy
import numpy as np
from sudoku import Sudoku, SudokuElement
class SudokuSolver:
def __init__(self, sudoku: Sudoku):
self.s: Sudoku = sudoku
self.counter: int = 0
def main_sequence(self) -> None:
"""
General flow of procedures. Starts with basic checks for blank cell that can be already completed.
Then the main solving methods are used.
If sudoku is still incomplete after previous procedures, program tries random insertions.
:return: None
"""
if not self.is_sudoku_completed():
self.pre_solving_check()
self.solve()
# self.try_random_insert()
self.backtracking()
return None
def validate_sudoku(self) -> int:
"""
Checks if sudoku array is valid, i.e. if rows, columns or squares don't contain duplicated digits.
:return: 0 if array is empty, 1 if is valid, -1 if invalid
"""
if self.s.elements_count[0] == 81:
return 0
if not self.check_occurences():
return -1
return 1
def check_occurences(self) -> bool:
"""
Checks if every row, column and square contains only one occurrence of number.
:return: True if sudoku is valid, otherwise False
"""
if self.check_if_one_occurence_of_digit(self.s.rows):
if self.check_if_one_occurence_of_digit(self.s.columns):
if self.check_if_one_occurence_of_digit(self.s.squares):
return True
return False
def check_if_one_occurence_of_digit(self, elements_list: List[SudokuElement]) -> bool:
"""
Checks if row, column or square contains only one of non-zero digits.
:param elements_list: list of rows, columns or squares
:return: bool
"""
for element in elements_list:
array = self.s.array[element.row_start:element.row_stop, element.column_start:element.column_stop]
digits = [i for i in np.unique(array) if i > 0]
for digit in digits:
if np.count_nonzero(array == digit) > 1:
return False
return True
def pre_solving_check(self) -> None:
"""
Checks if there are rows, columns or squares ready to fill.
:return: None
"""
self.check_elements(self.s.rows)
self.check_elements(self.s.columns)
self.check_elements(self.s.squares)
return None
def check_elements(self, elements_list: List[SudokuElement]) -> None:
"""
Checks if element can be completed immediately.
:param elements_list: list of sudoku rows, columns, or squares
:return: None
"""
for element in elements_list:
self.fill_element(element)
self.check_stack()
return None
def fill_element(self, element: SudokuElement) -> None:
"""
For element with 8 digits already in place, find last number and insert into blank cell.
Also upadte statistics of row, column and square containing filled cell.
:param element: row, column or square
:return: None
"""
if element.completed:
return None
if element.digits == 8:
array = self.s.array[element.row_start:element.row_stop, element.column_start:element.column_stop]
row_index, column_index = self.get_zero_coordinate(array, element.row_start, element.column_start)
digit = self.find_last_digit(array)
self.insert_digit(digit, row_index, column_index)
return None
@staticmethod
def get_zero_coordinate(element_array: np.ndarray, row_start: int, column_start: int) -> Tuple[int, int]:
"""
Generates coordinates of blank cell in given array.
:param element_array: sudoku element as list of digits
:param row_start: no. of sudoku row element starts with
:param column_start: no. of sudoku column element starts with
:return: tuple of coordinates (row, column)
"""
coord = np.where(element_array == 0)
return coord[0][0] + row_start, coord[1][0] + column_start
def find_last_digit(self, array: np.ndarray) -> int:
"""
Get number an array is lacking. Searches only in available digits' list.
:param array: sudoku element as an array of digits
:return: number not present in an array; number to insert
"""
for digit in self.s.available_digits:
if digit not in array:
return digit
def insert_digit(self, digit, row_index, column_index) -> None:
"""
Insert number into cell of provided coordinates, delete cell from list of zeros' coordinates.
Upadtes available digits and statistics of row, column and square.
:param digit: number to insert
:param row_index: row coordinate of cell to fill
:param column_index: column coordinate of cell to fill
:return: None
"""
if digit is None:
return None
self.s.array[row_index, column_index] = digit
self.s.zero_positions.remove((row_index, column_index))
self.update_available_elements(digit)
self.update_elements_statistics(row_index, column_index)
return None
def update_available_elements(self, digit: int) -> None:
"""
Adds number to the sudoku's number counter and upadates available digits.
I inserted number is a 9th occurrence, number is removed from available digits' list.
:param digit: inserted number
:return: None
"""
self.s.elements_count[digit] += 1
self.s.set_available_digits()
return None
def update_elements_statistics(self, row_index: int, column_index: int) -> None:
"""
Increment number of digits in row, column and square containing recently filled cell.
Also adds those elements to stack in order to check if (after insertion) elements are easy solvable.
:param row_index: row coordinate of recently filled cell
:param column_index: column coordinate of recently filled cell
:return: None
"""
self.increment_digit_and_add_to_stack(self.s.rows[row_index])
self.increment_digit_and_add_to_stack(self.s.columns[column_index])
square_index = self.find_square(row_index, column_index)
self.increment_digit_and_add_to_stack(self.s.squares[square_index])
return None
def increment_digit_and_add_to_stack(self, element: SudokuElement) -> None:
"""
Increments number in element's counter and adds element into stack (queue of recently updates elements,
which enable to check if elements are not easy solvable).
:param element: sudoku element
:return: None
"""
element.increment_digits()
self.s.unchecked_stack.append(element)
return None
def find_square(self, row_index: int, column_index: int) -> int:
"""
Check which sudoku square contains a cell of provided coordinates
:param row_index: row coordinate of recently filled cell
:param column_index: column coordinate of recently filled cell
:return: number of square which contains cell of provided coordinates
"""
sq_number = 0
while not (
self.s.square_coordinates[sq_number][0] <= row_index < self.s.square_coordinates[sq_number][1]
and
self.s.square_coordinates[sq_number][2] <= column_index < self.s.square_coordinates[sq_number][3]
):
sq_number += 1
return sq_number
def check_stack(self) -> None:
"""
For all elements in stack function checks if element can be easly solve and solves that element.
Stack can be incremented during a process, so function runs as long as there are no more elements to check.
:return: None
"""
self.s.unchecked_stack = list(set(self.s.unchecked_stack))
while len(self.s.unchecked_stack):
self.fill_element(self.s.unchecked_stack[0])
self.s.unchecked_stack.pop(0)
self.s.unchecked_stack = list(set(self.s.unchecked_stack))
return None
def solve(self) -> None:
"""
Main solving sequence.
Checks if there are blank cells where only one number can be inserted.
:return: None
"""
self.counter = 0
self.s.possible_digits_in_cells = {coordinate: [] for coordinate in self.s.zero_positions}
while self.counter < len(self.s.available_digits):
self.check_available_digits()
self.check_blank_cells()
return None
def check_available_digits(self) -> None:
"""
For given number function generates all permissible blank cells.
Checks in squares, rows and columns if there is only one possible cell to insert the number.
If all checks don't cause number insertion, function updates dictionary with all blank cells
and their corresponding permissible digits.
:return: None
"""
digit = self.s.available_digits[self.counter]
self.s.changed = False
positions_in_squares = self.get_positions_in_squares(digit)
self.insert_digit_if_only_one_possible_position_in_square(digit, positions_in_squares)
if self.s.changed:
self.after_change_procedure()
else:
potential_positions, solutions = self.positions_in_one_row_or_column(positions_in_squares)
self.apply_solutions(digit, solutions)
if self.s.changed:
self.after_change_procedure()
else:
self.add_digit_to_blank_cells(digit, potential_positions)
return None
def get_positions_in_squares(self, digit: int) -> Dict[int, List[Tuple[int, int]]]:
"""
Creates a dictionary with blank cells' coordinates divided by square numbers.
:param digit: evaluating number
:return: dictionary with blank cells' coordinates divided by square numbers
"""
available_positions = self.search_for_available_positions(digit)
positions_in_squares = self.divide_positions_by_squares(available_positions, digit)
for i in range(2):
positions_in_squares = self.update_positions_if_one_dimensional_positions_in_squares(positions_in_squares,
i)
return positions_in_squares
def search_for_available_positions(self, digit: int) -> List[Tuple[int, int]]:
"""
Searches for blank cells where there are no interferences (same number in a row, column or square)
:param digit: number we are evaluating
:return: list of blank cells' coordinates where number can be inserted
"""
positions = np.where(self.s.array == digit)
available_rows = set(i[0] for i in self.s.zero_positions).difference(set(positions[0]))
available_cols = set(i[1] for i in self.s.zero_positions).difference(set(positions[1]))
return [i for i in product(available_rows, available_cols) if self.s.array[i[0], i[1]] == 0]
def divide_positions_by_squares(self, available_positions: List[Tuple[int, int]], digit: int) -> Dict[
int, List[int]]:
"""
Creates a dictionary with square numbers as keys and list of black cells' coordinates,
where number can be inserted.
:param available_positions:
:param digit: number we are evaluating
:return: dictionary with blank cells' coordinates divided by square numbers
"""
positions_in_squares = {}
for element in available_positions:
square_number = self.find_square(element[0], element[1])
if self.digit_in_square(digit, square_number):
continue
if square_number not in positions_in_squares:
positions_in_squares[square_number] = []
positions_in_squares[square_number].append(element)
return positions_in_squares
def update_positions_if_one_dimensional_positions_in_squares(self,
positions_in_squares: Dict[int, List[Tuple[int, int]]],
flag: int) -> Dict[int, List[Tuple[int, int]]]:
"""
Checks if there are rows or columns inside squares where number must be inserted.
Deletes the same rows/columns' coordinates in another squares (inside list od available coordinates).
:param positions_in_squares: list of available blank cells' coordinates divided by squares
:param flag: 0 for row, 1 for column
:return: updated list of available blank cells' coordinates divided by squares
"""
for square in positions_in_squares:
element = self.positions_in_one_dimention(positions_in_squares[square], flag)
self.delete_unavailable_coordinates(element, flag, positions_in_squares, square)
return positions_in_squares
@staticmethod
def delete_unavailable_coordinates(element: int, flag: int, positions_in_squares: Dict[int, List[Tuple[int, int]]],
square: int) -> None:
"""
Get all potential (for insertion) blank cells' coordinates and deletes non-valid coordinates.
Non-valid coordinates are ones | |
"""
The physcalc module contains the routine that calculates the ionic
fraction and the NEI X-ray spectrum based on the physical conditions
in an ASCII file.
V0.1 - <NAME>, May 22, 2019: initial release
V0.2 - <NAME>, May 28, 2019: separate the CIE and NEI case
V0.3 - <NAME>, Jun 01, 2019: standardize for github uploading
V0.4 - <NAME>, Jun 03, 2019: add keyword "dolines" to calculate
pure continuum spectrum
"""
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
import pickle, os
import numpy as np
import pyatomdb
import astropy
from astropy.io import ascii
from scipy.signal import find_peaks
def calc_nei_ionfrac(Zlist, condifile=False, diff_thres=False, \
init_file=False, begin_index=False, end_index=False, outfilename=False, \
rootpath=False):
"""
Calculate the ionic fraction based on the physical conditions in
an ASCII file. The only input parameter is the index (array) of
the elements.
Parameters
----------
Zlist: [int]
list of element nuclear charges
Keywords
--------
condifile: string or dictionary
the ASCII file containing the physical condition array. can also
pass in a structure read from the ASCII file;
init_file: str or dictionary of ionic fraction
the pickle file containing the ionic fraction at prior condition
position. Could be the dictionary of loaded from the pickle file;
begin_index: int
the beginning index of the condition position, where the ionic
fraction will be calculated outwards till <end_index>, def: 0;
end_index: int
the ending index of the condition position, where the calculation
of the ionic fraction will be stopped, def: len(conditions);
outfilename: str
the name of output pickle file recording the ionic fraction.
The name of the output file is adopted as following sequence:
1. specified by <outfilename>;
2. adopted from <init_file>, if applicable;
3. "tionfrac_Element.List.pkl".
Returns
-------
No return, but the pickle file is created/updated with derived ionic
fraction at condition positions.
"""
# System parameters
atomdbpath = os.environ['ATOMDB']
ionbalfile = atomdbpath+'APED/ionbal/v3.0.7_ionbal.fits'
if not pyatomdb.util.keyword_check(rootpath):
rootpath = os.getcwd()+'/'
# Parameters related to the element list
NZlist = len(Zlist)
Zmax = np.max(Zlist)
if not pyatomdb.util.keyword_check(diff_thres):
diff_thres = 1e-7
# Check the setting of the condition array
if pyatomdb.util.keyword_check(condifile):
# If it is a string, look for the file name and read it if exists
if isinstance(condifile, str):
confile = os.path.expandvars(rootpath+condifile)
if not os.path.isfile(confile):
print("*** ERROR: no such condition file %s. Exiting ***" \
%(confile))
return -1
conditions = ascii.read(confile)
elif isinstance(condifile, astropy.table.table.Table):
conditions = condifile
else:
print("Unknown data type for condition file. Please pass a " \
"string or an ASCIIList")
return -1
ncondi = len(conditions)
# The final result - ionfrac
ionfrac = {}
for Z in Zlist:
ionfrac[Z] = np.zeros([Z+1,ncondi],dtype=float)
# Alternative way:
# ionfrac = [np.zeros([Z+1,ncondi],dtype=float) for Z in Zlist]
# which does not have the "Z" index.
# settings of the initial ionic fraction file
if pyatomdb.util.keyword_check(init_file):
# If it is a string, look for the file name and read it if exists
if isinstance(init_file, str):
initfile = os.path.expandvars(rootpath+init_file)
if not os.path.isfile(initfile):
print("*** ERROR: no such initial ionic fraction file %s. " \
"Exiting ***" %(initfile))
return -1
old_ionfrac = pickle.load(open(init_file,'rb'))
elif isinstance(init_file, dict):
old_ionfrac = init_file
else:
print("Unknown data type for condition file. Please pass a " \
"string or an DICTList")
return -1
# Deal with the length of read ionic fraction
for Z in Zlist:
ncondi_old_ionfrac = len(old_ionfrac[Z][0,:])
if ncondi_old_ionfrac < ncondi:
ionfrac[Z][:,0:ncondi_old_ionfrac] = old_ionfrac[Z]
else:
ionfrac[Z] = old_ionfrac[Z][:,0:ncondi]
# settings of the name of the output pickle file
if not pyatomdb.util.keyword_check(outfilename):
if pyatomdb.util.keyword_check(init_file) and \
isinstance(init_file, str):
outfilename = init_file
else:
outfilename = 'tionfrac_'
for Z in Zlist:
outfilename += pyatomdb.atomic.Ztoelsymb(Z)
outfilename += '.pkl'
# Initial ionic fraction: specified by init_file and begin_index,
# or at r=0
ion_init = {}
if pyatomdb.util.keyword_check(begin_index) and begin_index >0 \
and begin_index < ncondi:
for Z in Zlist:
ion_init[Z] = ionfrac[Z][:,begin_index]
else:
begin_index = 0
te_init = conditions[begin_index]['kT']/pyatomdb.const.KBOLTZ #in K
for Z in Zlist:
ion_init[Z] = pyatomdb.atomdb.get_ionfrac(ionbalfile,
Z, te_init)
ionfrac[Z][:,0] = ion_init[Z]
# Deal with the ending index
if (not pyatomdb.util.keyword_check(end_index)) or end_index < 0 \
or end_index >= ncondi:
end_index = ncondi-1
# Some derived physical parameters
temp_arr = conditions['kT']
dens_arr = conditions['dens']
radi_arr = conditions['R']
velo_arr = conditions['velo']
delr_arr = np.zeros(ncondi, dtype=float)
delr_arr[1:] = radi_arr[1:]-radi_arr[0:(ncondi-1)]
meanv_arr = np.zeros(ncondi, dtype=float)
meanv_arr[0] = velo_arr[0]
meanv_arr[1:] = (velo_arr[1:]+velo_arr[0:(ncondi-1)])/2
time_arr = delr_arr / meanv_arr
meandens = np.zeros(ncondi, dtype=float)
meandens[0] = dens_arr[0]
meandens[1:] = (dens_arr[1:]+dens_arr[0:(ncondi-1)])/2
tau_arr = time_arr * meandens
# Find the extreme values in the temperature array
res1, _ = find_peaks(temp_arr)
res2, _ = find_peaks(-temp_arr)
extr_ind = np.sort(np.concatenate(([begin_index,end_index],res1,res2)))
nextreme = len(extr_ind)
for i in range(0,nextreme-1):
iind = extr_ind[i]
find = extr_ind[i+1]
while iind < extr_ind[i+1]:
taut = np.sum(tau_arr[(iind+1):(find+1)])
# Derive the ionic fraction using the eletron temperature
# at the beginning and the ending positions
ionbal_l = {}
ionbal_r = {}
for Z in Zlist:
ionbal_l[Z] = pyatomdb.apec.solve_ionbal_eigen(Z, temp_arr[iind], \
init_pop=ion_init[Z], tau=taut, teunit='keV')
for Z in Zlist:
ionbal_r[Z] = pyatomdb.apec.solve_ionbal_eigen(Z, temp_arr[find], \
init_pop=ion_init[Z], tau=taut, teunit='keV')
# Compare the ionic fractions derived in one and two steps
maxdiff = 0.0
for Z in Zlist:
# for iZ in range(0,Z+1):
# maxdiff = max([np.abs(ionbal_l[Z][iZ]-ionbal_r[Z][iZ]) / \
# max([ionbal_l[Z][iZ],ionbal_r[Z][iZ],1e-5]), \
# maxdiff])
# print(maxdiff)
maxdiff = max([max(np.abs(ionbal_l[Z]-ionbal_r[Z])), maxdiff])
print(iind,find)
print(ionbal_l[Z])
print(ionbal_r[Z])
print(maxdiff,taut/1e7)
if maxdiff > diff_thres:
if find == iind+1:
if iind == begin_index:
for Z in Zlist:
ionfrac[Z] = ionbal_r[Z]
ion_init = ionbal_r
iind, find = find, min([find*2-iind+2, extr_ind[i+1]])
else:
print("The condition file at %d is too coarse!" % find)
return -1
else:
find = int((iind+find)/2)
continue
else:
accum_tau = np.cumsum(tau_arr[iind:(find+1)]) - tau_arr[iind]
for Z in Zlist:
for iZ in range(0,Z+1):
ionfrac[Z][iZ,iind:(find+1)] = \
np.interp(accum_tau,[0,taut], \
[ion_init[Z][iZ],ionbal_r[Z][iZ]])
ion_init = ionbal_r
iind, find = find, min([find*2-iind+2, extr_ind[i+1]])
# # ionic fraction calculations
# iind, find = begin_index, end_index
# mind = int((iind+find) / 2)
#
# m_te = temp_arr[mind]
# taut = np.sum(tau_arr)
# ionbal_t = {}
# for Z in Zlist:
# ionbal_t[Z] = pyatomdb.apec.solve_ionbal_eigen(Z, m_te, \
# init_pop=ion_init[Z], tau=taut, teunit='keV')
#
# while iind < end_index:
# # Calculate the ionic fraction in one and two steps
# if mind > iind:
# # Tabled parameter
# i_te, m_te, f_te = temp_arr[[iind, mind, find]]
# # m_te = temp_arr[mind]
# # Derived parameter
# tau1 = np.sum(tau_arr[(iind+1):(mind+1)])
# # taut = tau1 + tau2
#
# ionbal_1 = {}
# for Z in Zlist:
# ionbal_1[Z] = pyatomdb.apec.solve_ionbal_eigen(Z, m_te, \
# init_pop=ion_init[Z], tau=tau1, teunit='keV')
#
# if np.abs(i_te-m_te)/m_te > 1e-2 or np.abs(f_te-m_te)/m_te > 1e-2:
# find = mind
# mind = int((iind+find)/2)
# ionbal_t = ionbal_1
# continue
#
# tau2 = np.sum(tau_arr[(mind+1):(find+1)])
# ionbal_2 = {}
# for Z in Zlist:
# ionbal_2[Z] = pyatomdb.apec.solve_ionbal_eigen(Z, m_te, \
# init_pop=ionbal_1[Z], tau=tau2, teunit='keV')
# # Compare the ionic fractions derived in one and two steps
# maxdiff = 0.0
# for Z in Zlist:
# # for iZ in range(0,Z+1):
# # maxdiff = max([np.abs(ionbal_t[Z][iZ]-ionbal_2[Z][iZ]) / \
# # max([ionbal_t[Z][iZ],1e-14]), maxdiff])
# maxdiff = max([max(np.abs(ionbal_t[Z]-ionbal_2[Z])),maxdiff])
# if maxdiff > diff_thres:
# find = mind
# mind = int((iind+find)/2)
# ionbal_t = ionbal_1
# continue
#
# accum_tau = np.cumsum(tau_arr[iind:(find+1)]) - tau_arr[iind]
# for Z in Zlist:
# for iZ in range(0,Z+1):
# ionfrac[Z][iZ,iind:(find+1)] = \
# np.interp(accum_tau,[0,taut], \
# [ion_init[Z][iZ],ionbal_t[Z][iZ]])
# ion_init = ionbal_t
# iind, find = find, min([find*2-iind+2, end_index])
# mind = int((iind+find)/2)
# Save calculated ionic fraction as pickle file
tmp = open(outfilename,'wb')
pickle.dump(ionfrac,tmp)
tmp.close()
return 0
#--------------------------------------------------------------------
#--------------------------------------------------------------------
#--------------------------------------------------------------------
def calc_nei_spectrum(Zlist, outfilename=False, condifile=False, \
condi_index=False, ebins=False, ionfracfile=False, \
linefile="$ATOMDB/apec_nei_line.fits", \
cocofile="$ATOMDB/apec_nei_comp.fits", \
dolines=True, appfile=False, rootpath=False):
"""
calculate the NEI X-ray spectrum, based on the ionic fraction derived
by the "calc_ionfrac" routine. The only input parameter is the index
(array) of the elements.
Parameters
----------
Zlist: [int]
list of element nuclear charges
Keywords
--------
condifile: string or dictionary
the ASCII file containing the physical condition array. can also
pass in a structure read from the ASCII file;
condi_index: [int]
index array of at which condition position to derive the spectrum;
ebins: | |
<gh_stars>0
#!/usr/bin/env python3
import configparser
from datetime import datetime
import spotipy
from random import random
import logging
import time
import sys
import os
from io import BytesIO
import os.path
from spotipy.oauth2 import SpotifyOAuth
from spotipy.cache_handler import CacheFileHandler
import simplejson
from rgbmatrix import RGBMatrix, RGBMatrixOptions, graphics
from PIL import Image, ImageEnhance, ImageFont, ImageDraw, ImageChops, ImageFilter, ImageOps
import urllib
import urllib3
import requests
import http
import socket
config = configparser.ConfigParser()
basepath = os.path.dirname(sys.argv[0])
if basepath == "":
basepath = "."
if len(sys.argv) > 1:
configfile = sys.argv[1]
else:
configfile = "%s/local.config" % basepath
config.read(configfile)
username = config["spotify"]["username"]
image_cache = "%s/imagecache" % (basepath)
weather = False
music = False
def getFont(fontconfig):
path, size = fontconfig.split(",")
return ImageFont.truetype(path, int(size))
# r = urllib.request.urlopen(url)
# return ImageFont.truetype(BytesIO(r.read()), size)
ttfFont = getFont(config["fonts"]["regular"])
ttfFontSm = getFont(config["fonts"]["small"])
ttfFontLg = getFont(config["fonts"]["large"])
weatherFont = ImageFont.truetype("%s/weathericons-regular-webfont.ttf" % basepath, 20)
# logging.basicConfig(filename='/tmp/spotify-matrix.log',level=logging.INFO)
logger = logging.getLogger(__name__)
class Frame:
def __init__(self):
self.options = RGBMatrixOptions()
self.options.brightness = int(config["matrix"]["brightness"])
self.options.hardware_mapping = "adafruit-hat-pwm"
self.options.rows = int(config["matrix"]["height"])
self.options.cols = int(config["matrix"]["width"])
self.options.disable_hardware_pulsing = False
self.options.gpio_slowdown = 3
self.matrix = RGBMatrix(options=self.options)
self.offscreen_canvas = self.matrix.CreateFrameCanvas()
self.width = self.options.cols
self.height = self.options.rows
def gamma(value):
if weather.night():
return round(pow(value / 255.0, 1.2) * 128.0)
else:
return round(pow(value / 255.0, 1.8) * 255.0)
def swap(self, canvas):
canvas = Image.eval(canvas, Frame.gamma)
self.offscreen_canvas.SetImage(canvas, 0, 0)
self.offscreen_canvas = self.matrix.SwapOnVSync(self.offscreen_canvas)
def getTextImage(texts, color):
txtImg = Image.new('RGBA', (64, 32), (255, 255, 255, 0))
draw = ImageDraw.Draw(txtImg)
for text, position, *font in texts:
if font:
lineFont = font[0]
lineColor = font[1]
else:
lineFont = ttfFont
lineColor = color
(x, y) = position
draw.fontmode = "1"
draw.text((x - 1, y + 1), text, (0,0,0), font=lineFont)
draw.text((x, y ), text, lineColor, font=lineFont)
return txtImg
textColor = (192, 192, 192)
def ktof(k):
return (k - 273.15) * 1.8 + 32.0
class Weather:
api = "https://api.openweathermap.org/data/2.5/onecall?lat=39.9623348&lon=-75.1927043&appid=%s" % (config["openweathermap"]["api_key"])
def __init__(self):
self.nextupdate = 0
self._update()
def _update(self):
if time.time() < self.nextupdate:
return False
try:
r = urllib.request.urlopen(Weather.api)
except (http.client.RemoteDisconnected, urllib3.exceptions.ProtocolError, urllib.error.URLError) as err:
logger.error("Problem getting weather")
logger.error(err)
time.sleep(30)
return self.nextupdate - time.time()
self._payload = simplejson.loads(r.read())
self._now = self._payload["current"]
# Update every 30 minutes overnight to save API calls
if time.localtime()[3] <= 5:
self.nextupdate = time.time() + (60 * 30)
else:
self.nextupdate = time.time() + (60 * 5)
def night(self):
if self._now["dt"] > (self._now["sunset"] + 1080) or self._now["dt"] < (self._now["sunrise"] - 1080):
return True
else:
return False
def icon(self):
if self.night():
skyColor = (0, 0, 0)
else:
clouds = self.clouds() / 100.0
# skyColor = (0, 0, 32)
skyColor = (int(clouds * 32), int(clouds * 32), 32)
iconBox = Image.new('RGBA', (32, 32), skyColor)
if self.night():
phase = (((round(self._payload["daily"][0]["moon_phase"] * 8) % 8) + 11))
moonImage = Image.open("%s/Emojione_1F3%2.2d.svg.png" % (image_cache, phase)).resize((20,20))
moonDim = ImageOps.expand(ImageEnhance.Brightness(moonImage).enhance(0.75), border=4, fill=(0,0,0,0))
iconBox.alpha_composite(moonDim, dest=(2, -2))
else:
url = "http://openweathermap.org/img/wn/%s.png" % (self._now["weather"][0]["icon"])
filename = "%s/weather-%s.png" % (image_cache, self._now["weather"][0]["icon"])
if not os.path.isfile(filename):
logger.info("Getting %s" % url)
urllib.request.urlretrieve(url, filename)
iconImage = Image.open(filename)
iconImage = iconImage.crop((3, 3, 45, 45)).resize((24, 24))
iconBox.alpha_composite(iconImage, dest=(4, -2))
return iconBox
def hour(self, hour):
return self._payload["hourly"][hour]
def temp(self):
return "%.0f°" % ktof(self._payload["current"]["temp"])
# If the "feels_like" temp is over 80 it's probably steamy outside
def steamy(self):
return ktof(self._payload["current"]["feels_like"]) > 90
def icy(self):
return ktof(self._payload["current"]["feels_like"]) < 32
def feelslike(self):
if self.steamy() or self.icy():
return "~%.0f°" % ktof(self._payload["current"]["feels_like"])
else:
return self.temp()
def humidity(self):
return "%.0f%%" % self._payload["current"]["humidity"]
def clouds(self):
return self._now["clouds"]
def wind_speed(self):
return "%.0f mph" % (self._payload["current"]["wind_speed"] * 2.237)
# The screen is actually too low-res for this to look good
def wind_dir(self):
d = self._now["wind_deg"] - 45
if d < 0:
d = d + 360.0
wind_dirs = ["N", "E", "S", "W"]
return wind_dirs[int(d / 90)]
def pressure(self):
return "%.1f\"" % (self._payload["current"]["pressure"] * 0.0295301)
def image(self):
self._update()
canvas = Image.new('RGBA', (64, 32), (0, 0, 0))
draw = ImageDraw.Draw(canvas)
for x in range(24):
t = time.localtime(self.hour(x+1)["dt"])
if t[3] == 0:
draw.line([(26, x+4), (28, x+4)], fill=(64, 64, 64))
if t[3] in [6, 18]:
draw.line([(27, x+4), (29, x+4)], fill=(64, 64, 64))
if t[3] == 12:
draw.line([(28, x+4), (30, x+4)], fill=(64, 64, 64))
diff = self.hour(x)["temp"] - self.hour(0)["temp"]
if diff > 1.0:
draw.point((28, x+4), fill=(128, 96, 16))
elif diff < -1.0:
draw.point((28, x+4), fill=(32, 32, 192))
else:
draw.point((28, x+4), fill=(64, 64, 64))
iconImage = self.icon()
# We're replaceing the entire right side of
# the image, so no need for alpha blending
canvas.paste(iconImage, (32, 0))
# A little indicator of rain in the next hour. Each pixel represents two minutes.
for m in range(30):
try: # one time the payload didn't include minutely data...
rain = self._payload["minutely"][2 * m]["precipitation"] + self._payload["minutely"][2 * m + 1]["precipitation"]
if rain > 0:
draw.point((m + 1, 0), fill=(128,128,255))
else:
draw.point((m + 1, 0), fill=(32,32,32))
except (KeyError, IndexError):
draw.point((m + 1, 0), fill=(8, 8, 8))
mytime=datetime.now().strftime("%-I:%M")
mytimewidth = ttfFont.getsize(mytime)[0]
txtImg = getTextImage([
(self.temp(), (1, -1), ttfFontLg, (192, 192, 128)),
(self.humidity(), (1, 12), ttfFontSm, (128, 192, 128)),
(self.wind_speed(), (1, 18), ttfFontSm, (128, 192, 192)),
(self.pressure(), (1, 24), ttfFontSm, (128, 128, 128)),
(mytime, (48 - (mytimewidth >> 1), 21), ttfFont, (128, 128, 128)), # Centered in the right half
],
textColor)
return Image.alpha_composite(canvas, txtImg).convert('RGB')
class Music:
def __init__(self):
self._spotify = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=config["spotify"]["spotify_id"],
client_secret=config["spotify"]["spotify_secret"],
cache_handler=CacheFileHandler(cache_path="%s/tokens/%s" % (basepath, username)),
redirect_uri="http://localhost:8080/callback",
show_dialog=True,
open_browser=False,
scope="user-library-read,user-read-playback-state"))
user = self._spotify.current_user()
logger.info("Now Playing for %s [%s]" % (user["display_name"], user["id"]))
self.nextupdate = 0
self.lastAlbum = ""
self.lastSong = ""
self._nowplaying = False
self._update()
def timeleft(self):
return round((self._nowplaying["item"]["duration_ms"] - self._nowplaying["progress_ms"]) / 1000.0)
def nowplaying(self):
if self._nowplaying and self._nowplaying["is_playing"] and self._nowplaying["item"]:
return True
else:
return False
def _update(self):
if time.time() < self.nextupdate:
return self.nextupdate - time.time()
try:
self._nowplaying = self._spotify.current_user_playing_track()
except (spotipy.exceptions.SpotifyException,
spotipy.oauth2.SpotifyOauthError) as err:
logger.error("Spotify error getting current_user_playing_track:")
logger.error(err)
self.nextupdate = time.time() + 60 * 5 # cooloff for 5 minutes
self._nowplaying = False
return False
except (requests.exceptions.ReadTimeout,
requests.exceptions.ConnectionError,
simplejson.errors.JSONDecodeError) as err:
logger.error("Protocol problem getting current_user_playing_track")
logger.error(err)
self.nextupdate = time.time() + 60 # cooloff for 60 seconds
self._nowplaying = False
return False
if not self.nowplaying():
if time.localtime()[3] <= 7:
self.nextupdate = time.time() + (5 * 60) # check in 5 minutes
else:
self.nextupdate = time.time() + 30 # check in 30 seconds
return False
elif self.timeleft() > 30:
self.nextupdate = time.time() + 10
else:
self.nextupdate = time.time() + self.timeleft()
if self.nowplaying():
self.track = self._nowplaying["item"]["name"]
self.album = self._nowplaying["item"]["album"]["name"]
self.artist = ", ".join(map(lambda x: x["name"], self._nowplaying["item"]["artists"]))
self.album_id = self._nowplaying["item"]["album"]["id"]
self.track_id = self._nowplaying["item"]["id"]
if self.lastSong != self.track_id:
self.album_art_url = self.album_art()
self.artist_art_url = self.artist_art()
return self.nextupdate - time.time()
def new_album(self):
if self.lastAlbum == self.album_id:
return False
else:
self.lastAlbum = self.album_id
return True
def new_song(self):
if self.lastSong == self.track_id:
return False
else:
self.lastSong = self.track_id
return True
def artists(self):
return tuple(map(lambda x: x["name"], self._nowplaying["item"]["artists"]))
def is_local(self):
return self._nowplaying["item"]["uri"].startswith("spotify:local:")
def album_art(self):
try:
return self._nowplaying["item"]["album"]["images"][-1]["url"]
except IndexError:
return None
def artist_art(self):
results = self._spotify.search(q='artist:' + self.artists()[0], type='artist')
try:
return results["artists"]["items"][0]["images"][-1]["url"]
except IndexError:
return None
def album_image(self):
if self.album_art_url:
url = self.album_art_url
else:
url = self.artist_art_url
m = url.rsplit('/', 1)
processed = "%s/spotify-%s.png" % (image_cache, m[-1])
# We're going to save the processed image instead of the raw one.
if os.path.isfile(processed):
image = Image.open(processed)
else:
logger.info("Getting %s" % url)
with urllib.request.urlopen(url) as rawimage:
image = ImageOps.pad(Image.open(rawimage), size=(32, 32), method=Image.LANCZOS, centering=(1,0))
image.save(processed, "PNG")
image = ImageEnhance.Color(image).enhance(0.5)
image = ImageEnhance.Brightness(image).enhance(0.85)
return image
def canvas(self):
canvas = Image.new('RGBA', (64, 32), (0,0,0))
canvas.paste(self.album_image(), (32, 0))
if weather.steamy():
canvas.alpha_composite(getTextImage([(weather.feelslike(), (0, -2), ttfFont, (128, 128, 64)),], textColor))
elif weather.icy():
canvas.alpha_composite(getTextImage([(weather.feelslike(), (0, -2), ttfFont, (128, 148, 196)),], textColor))
else:
canvas.alpha_composite(getTextImage([(weather.feelslike(), (0, -2), ttfFont, (128, 128, 128)),], textColor))
return canvas
def get_text_length(self):
if self.album_art_url == None:
return max(ttfFont.getsize(self.track)[0], ttfFont.getsize(self.album)[0], ttfFont.getsize(self.artist)[0])
else:
return max(ttfFont.getsize(self.track)[0], ttfFont.getsize(self.artist)[0])
def get_text(self, x, y, textColor):
if self.album_art_url == None:
return getTextImage([
(self.track, (x, y - 10)),
(self.album, (x, y)),
(self.artist, (x, y + 10))
], textColor)
else:
return getTextImage([
(self.track, (x, y)),
(self.artist, (x, y + 10))
], textColor)
def main():
global weather
frame = Frame()
weather = Weather()
music = Music()
while True:
music._update()
weather._update()
# We have a playing track.
if music.nowplaying():
canvas = music.canvas()
if music.new_album():
for x in range(127):
frame.swap(ImageEnhance.Brightness(canvas).enhance(x * 2 / 255.0).convert('RGB'))
time.sleep(0.5)
# Length of the longest line of text, in pixels.
length = music.get_text_length()
# If either line of text is longer than the display, scroll
if | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' Resistive-capacitive model implementation.
The class have methods for setting potential of streamer heads,
and for relaxing the potential each iteration.
The class have methods to get RC-factors for
resistance in channel, capacitance towards the plane,
breakdown in channel, conduction due to dissociation.
'''
# General imports
import numpy as np
import logging
from scipy.special import iv as bessel_iv # bessel function
# Import from project files
from ..core import coordinate_functions
from .streamer_head import SHList
from .streamer_head import StreamerHead
# settings
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
eps = np.finfo(float).eps # 2.22e-16 for double
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# RC #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class RC(object):
def __init__(self,
origin, # usually the needle
tau0, # tau = RCtau0
U_grad, # minimum E-field within channel
resistance, # how to model channel resistance
capacitance, # how to model capacitance
breakdown, # threshold for breakdown in channel
breakdown_factor, # tau *= bdf, when there is a breakdown
onsager, # if true, enable Onsager model
potential_merged, # potential model to use
potential_branched, # potential model to use
):
self.origin = origin
self.U_grad = U_grad
self.tau0 = tau0
self.resistance = resistance
self.capacitance = capacitance
self.breakdown_threshold = breakdown
self.breakdown_factor = breakdown_factor
self.onsager = onsager
self.potential_merged = potential_merged
self.potential_branched = potential_branched
logger.debug('Initiated RC')
logger.log(5, 'RC.__dict__')
for k, v in self.__dict__.items():
logger.log(5, ' "{}": {}'.format(k, v))
@staticmethod
def _cap_factor_constant(heads):
# constant capacitance -- no dependence on anything
return np.ones_like(heads.d)
@staticmethod
def _cap_factor_plane(heads):
# model each streamer heads as a parallel plate capacitor
# scale by gap length
return (1 / heads.d)
@staticmethod
def _cap_factor_hyperbole(heads, origin):
# model each streamer heads as a hyperboloid capacitor
den = 4 * heads.a / heads.rp
return 1 / np.log(den)
@staticmethod
def _cap_factor_sphere(heads, origin):
# model capacitance as an expanding sphere, see Crowley 2008
d = origin.d
rp = origin.rp
z = heads.d
r = (d + 2 * rp - z) / 2 # sphere radius
return r * (1 + 0.5 * np.log(1 + r / z))
@staticmethod
def _cap_factor_half_sphere(heads, origin):
# model capacitance as an expanding half-sphere, see Crowley 2008
d = origin.d
rp = origin.rp
z = heads.d
r = (d + rp - z) # half sphere radius
# note: the half sphere is about twice the size of the sphere
return r * (1 + 0.5 * np.log(1 + r / z))
def _get_cap_factor(self, heads, origin, cdm):
# return unscaled capacitance of each head, given origin and model
# choose model for capacitance towards plane
if (cdm == 'constant') or (cdm == '1') or (cdm == 1):
return self._cap_factor_constant(heads)
elif cdm == 'plane':
return self._cap_factor_plane(heads)
elif cdm == 'hyperbole':
return self._cap_factor_hyperbole(heads, origin)
elif cdm == 'sphere':
return self._cap_factor_sphere(heads, origin)
elif cdm == 'half_sphere':
return self._cap_factor_half_sphere(heads, origin)
else:
msg = 'Error! Unknown capacitance model: {}'
logger.error(msg.format(cdm))
raise SystemExit
def get_cap_factor(self, heads, origin=None, cdm=None):
# return capacitance of the heads, scaled by the needle capacitance
if origin is None:
origin = self.origin # the needle
if cdm is None: # capacitance dependence model
cdm = self.capacitance
c_origin = self._get_cap_factor(
heads=origin, origin=origin, cdm=cdm)
c_heads = self._get_cap_factor(
heads=heads, origin=origin, cdm=cdm)
return c_heads / c_origin
def get_res_factor(self, heads, origin=None, ldm=None):
# return length/resistance dependence, scaled by the gap distance
if origin is None:
origin = self.origin # the needle
if ldm is None: # length dependence model
ldm = self.resistance
# choose model for resistance in channel
if ldm == 'constant':
# constant resistance -- no dependence on anything
return np.ones_like(heads.d)
elif ldm == 'linear':
# scale resistance with length of channel
length = origin.dist_to(heads.pos)
return length / origin.z
else:
msg = 'Error! Unknown resistance model: {}'
logger.error(msg.format(ldm))
raise SystemExit
def get_breakdown_factor(self, heads, origin=None, bdt=None, bdf=None):
# return low resistance if/where breakdown in channel
if origin is None:
origin = self.origin
if bdt is None: # breakdown threshold value
bdt = self.breakdown_threshold
if bdf is None: # breakdown factor
bdf = self.breakdown_factor
length = origin.dist_to(heads.pos)
estr = (origin.U0 - heads.U0) / (length + eps) # eps for safe needle
bd = np.ones_like(length) # default to factor 1
bd[estr > bdt] = bdf # set to bdf for breakdown
return bd
def get_onsager_faktor(self, heads):
# enhanced conductance from ion dissociation, see Gäfvert 1992
# note: this model was implemented to demonstrate non-linear effects
# it is for a liquid, not for a gas/plasma
# the temperature and the permittivity could be changed later
if not self.onsager:
return np.ones_like(heads.d)
# field in channel
length = self.origin.dist_to(heads.pos)
# eps for safe needle
estr = (self.origin.U0 - heads.U0) / (length + eps)
# standard parameters
T = 293 # K
kb_J = 1.381e-23 # J/K
e0 = 8.85e-12 # F/m, vacuum permittivity
er = 2 * e0
ec = 1.6e-19 # C, elementary charge
# calculate dissociation
estr = estr + eps
_n = ec**3 * estr # nominator
_d = 16 * np.pi * er * T**2 * kb_J**2 # denominator * 2
_b = np.sqrt(_n / _d) # sq(b/2) ==> 2sq(b/2)=sq(2b)
_f = bessel_iv(1, 4 * _b) / (2 * _b) # 4sq(b/2)=sq(8b)
h = 1 / _f # increased conductance implies lower tau-factor here
return h
def relax(self, streamer, needle, dt):
''' Calculate the time constant and
relax the potential of each streamer head.
'''
# get factors for time constant
_ld = self.get_res_factor(streamer.heads)
_cd = self.get_cap_factor(streamer.heads)
_bd = self.get_breakdown_factor(streamer.heads)
_od = self.get_onsager_faktor(streamer.heads)
# combine all the factors
tau = self.tau0
tau *= _ld # channel length dependence
tau *= _cd # capacitance dependence
tau *= _bd # breakdown in channel?
tau *= _od # Onsager dissociation
tau = np.minimum(tau, 1 / eps) # ensure tau < inf
tau = np.maximum(tau, eps) # ensure tau > 0
# final potential
Uf = self.get_final_potential(streamer.heads)
# potentials differences
diff_prev = Uf - streamer.heads.U0
diff_new = diff_prev * np.exp(- dt / tau)
diff_diff = diff_prev - diff_new
if diff_diff.max() > 100:
msg = 'Relaxed potential, max {:.1f} kV'
logger.log(5, msg.format(diff_diff.max() * 1e-3))
# set relaxed potentials
streamer.heads.U0 = Uf - diff_new
def _set_potential(self, streamer, heads, model):
''' Modify the potential of the heads,
and possibly the streamer,
depending on the chosen model.
'''
if isinstance(heads, (StreamerHead,)):
heads = [heads]
heads = SHList(heads) # ensure streamer head list
if model == 'zero': # set all potentials to 0
heads.U0 = 0
elif model == 'previous': # use potential at current position
U0 = streamer.heads.epot(heads.pos)
heads.U0 = U0
elif model == 'propagate': # propagate charge
self.propagate_charge(streamer, heads)
elif model == 'share_charge': # share charge
self.share_charge(streamer, heads)
elif model == 'final': # relax fully
U0 = self.get_final_potential(heads)
heads.U0 = U0
else:
msg = 'Error! Unknown potential model! ({})'
logger.error(msg.format(model))
raise SystemExit
def set_potential_merged(self, streamer, heads):
# apply the correct model to set potential for merged heads
self._set_potential(streamer, heads, model=self.potential_merged)
def set_potential_branched(self, streamer, heads):
# apply the correct model to set potential for branched heads
self._set_potential(streamer, heads, model=self.potential_branched)
def propagate_charge(self, streamer, heads):
''' Set potential of the heads by propagate charge
from the nearest existing head.
'''
for head in heads:
# find nearest head
(nn_idx, nn_dst) = head.find_nearest(streamer.heads.pos)
nn_head = streamer.heads[int(nn_idx)]
# get the relative capacitance, which is ok
c_nn = self.get_cap_factor(SHList([nn_head]))
c_h = self.get_cap_factor(SHList([head]))
# u' = q' / c' = u * c / c'
c_frac = c_nn / c_h
# ensure that the potential does not increase
head.U0 = nn_head.U0 * min(1, c_frac)
msg = 'Propagating head set to {:.1f} kV'
logger.log(5, msg.format(head.U0 * 1e-3))
if c_frac > 1:
msg = 'Propagating potential capped.'
logger.log(5, | |
'vProvinceState': User.clsPhysicalAddress.readProvinceState(),
'vCountry': User.clsPhysicalAddress.readCountry(),
'CountryList': self.Country_list,
'vPostalZipCode': User.clsPhysicalAddress.readPostalZipCode(),
'vPreferredLanguage': User.clsPrivate.readPrefferedLanguage(),
'vMaritalStatus': User.clsPrivate.readMarital_Status(),
'vGender': User.clsPrivate.readGender(),
'vBirthDate': User.clsPrivate.readDateof_Birth(),
'vAge': User.clsPrivate.readAge(),
'vDependents': User.clsPrivate.readDependents()}
self.response.write(template.render(context))
logging.info('Members Area Render Complete')
else:
login_url = users.create_login_url(self.request.path)
logout_url = users.create_logout_url(dest_url='/')
template = template_env.get_template('/templates/EditPersonalDetails.html')
context = {'loginURL': login_url, 'logoutURL': logout_url, 'MemberMessage': 'Please login to access the members Area'}
self.response.write(template.render(context))
except:
login_url = users.create_login_url(self.request.path)
logout_url = users.create_logout_url(dest_url='/')
template = template_env.get_template('/templates/EditPersonalDetails.html')
context = {'loginURL': login_url, 'logoutURL': logout_url, 'MemberMessage': 'There was an Error accessing your records please try again in a minute'}
self.response.write(template.render(context))
def post(self):
try:
Guser = users.get_current_user()
if Guser:
if isGoogleServer:
reference = Guser.user_id()
else:
reference = self._tempCode
if reference == self.request.get('vReferencer'):
User.clsReference.writeUsername(self.request.get('vUsernamer'))
User.clsReference.writeReference(self.request.get('vReferencer'))
User.clsReference.writePassword(self.request.get('vPasswordr'))
User.clsReference.writeIDNumber(self.request.get('vIDNumberr'))
User.clsReference.writeVerEmail(self.request.get('vEmailAddressr'))
result = User.AddReferenceclasstoStore()
if result == User._referenceNumConflict:
result = User.GetReferenceByRefNum(User.clsReference.readReference()) #looking for the existing reference number in order to obtain teh pkeyvalue
if not(User._pkeyvalue == self.undefined):
result = User.editReferenceByPkey() #Editing the Reference Class its already on the Store
if (result == User._pkeyvalue):
refmessage = 'Account Details Updated'
logging.info('Account Details Updated')
else:
refmessage = 'Fail to Update Account Details'
logging.info('Fail to Update Account Details')
else:
refmessage = 'Catastrophic Error trying to update Account Details'
logging.info('Catastrophic Error trying to update Account Details')
elif result == User._userNameConflict:
refmessage = 'Your Username or nickname already exist please create a unique name'
logging.info('Your Username or nickname already exist please create a unique name')
elif result == False:
refmessage = 'Your Account Details are not complete please fill in all the required fields'
logging.info('Your Account Details are not complete please fill in all the required fields')
else:
#The reference Class is succesfully created
refmessage ='Account Details Created'
logging.info('Account Details Created')
#We must update vRefMessage to reflect The Message Related to Reference
User.clsNames.writeFirstname(self.request.get('vFirstnamer'))
User.clsNames.writeSecondname(self.request.get('vSecondnamer'))
User.clsNames.writeSurname(self.request.get('vSurnamer'))
result = User.getNamesbyRefNum(reference)
if result == self.undefined: #names not found meaning i can add new names
result = User.addNamesByRefNum(reference)
if not(User._namesPkeyvalue == self.undefined):
namesmessage ='Your Personal Details have been added'
logging.info('Your Personal Details have been added')
elif result == self.undefined:
namesmessage = 'The login Details supplied might be invalid please create a new account'
logging.info('Your Personal Details have been added')
else:
namesmessage = 'Error Creating a new Personal information record try again in a minute'
logging.info('Error Creating a new Personal information record try again in a minute')
elif result == User._generalError:
namesmessage ='Catastrophic Error Updating Names Details'
logging.info('Catastrophic Error Updating Names Details')
else:
#names have been found meaning they exist then edit them
result = User.editNamesbyNamesPkey()
namesmessage = 'Your Names record has been Succesfully Updated'
logging.info('Your Names record has been Succesfully Updated')
#we must update vNamesMessage to inlcude the namesmessage
#We must update vRefMessage to reflect The Message Related to Reference
User.clsContactDetails.writeCell(self.request.get('vCellr'))
User.clsContactDetails.writeEmail(self.request.get('vEmailr'))
User.clsContactDetails.writeFax(self.request.get('vFaxr'))
User.clsContactDetails.writeTel(self.request.get('vTelr'))
User.clsContactDetails.writeFacebook(self.request.get('vFacebookr'))
User.clsContactDetails.writeTwitter(self.request.get('vTwitterr'))
User.clsContactDetails.writeLinkedIn(self.request.get('vLinkedinr'))
User.clsContactDetails.writeGooglePlus(self.request.get('vGooglePlusr'))
User.clsContactDetails.writePinterest(self.request.get('vPinterestr'))
User.clsContactDetails.writeSkype(self.request.get(('vSkyper')))
User.clsContactDetails.writeBlog(self.request.get('vBlogr'))
User.clsContactDetails.writeWhosWho(self.request.get('vWhosWhor'))
User.clsContactDetails.writeAboutMe(self.request.get('vAboutMer'))
User.clsContactDetails.writeWebsite(self.request.get('vWebsiter'))
result = User.getContactDetailsByRefNum(reference)
if result == User._referenceDoNotExist:
#There's no contact details record for this user a new one must be added from the form
contactmessage = 'Catastrophic Error Adding Contact Details'
logging.info('Catastrophic Error Adding Contact Details')
elif result == self.undefined: #empty list
result = User.addContactDetailsByRefNum(reference)
if result == self.undefined:
contactmessage = 'Error Adding New Contact Details Record'
logging.info('Error Adding New Contact Details Record')
elif result == User._generalError:
contactmessage = 'Catastrophic Error Adding Contact Details'
logging.info('Catastrophic Error Adding Contact Details')
else:
contactmessage = 'Contact Details Record was succesfully Added'
logging.info('Contact Details Record was succesfully Added')
elif not(User._contactPkey == self.undefined):
result = User.editContactDetailsbyPkey()
if result == self.undefined:
contactmessage = 'Error Editing your Contact Details Record'
logging.info('Error Editing your Contact Details Record')
elif result == User._generalError:
contactmessage = 'Catastrophic Error Editing Contact Details Record'
logging.info('Catastrophic Error Editing Contact Details Record')
else:
contactmessage = 'Contact Details Record Succesfully Edited'
logging.info('Contact Details Record Succesfully Edited')
else:
contactmessage = 'Catastrophic Error Updating Contact Details Record'
logging.info('Catastrophic Error Updating Contact Details Record')
#We Must Update the vContactMessage to reflect the value of the contactmessage variable
#we must update vNamesMessage to inlcude the namesmessage
#We must update vRefMessage to reflect The Message Related to refmessage
User.clsPhysicalAddress.writeStandNumber(self.request.get('vStandNumberr'))
User.clsPhysicalAddress.writeStreetName(self.request.get('vStreetnamer'))
User.clsPhysicalAddress.writeCityTown(self.request.get('vCityTownr'))
User.clsPhysicalAddress.writeProvinceState(self.request.get('vProvinceStater'))
User.clsPhysicalAddress.writeCountry(self.request.get('vCountryr'))
User.clsPhysicalAddress.writePostalZipCode(self.request.get('vPostalZipCoder'))
result = User.getPhysicalAddressByRefnum(reference)
if result == User._referenceDoNotExist:
#User not loggedin or the reference number is not valid logging the info and exit
physicalmessage ='Physical Address Record cannot be added you might not be loggedin'
logging.info('Physical Address Record cannot be added you might not be loggedin')
elif result == self._clsPhysicalDonotExist:
result = User.addPhysicalAddressByRefNum(reference)
if User._physicalAddressPkey == self.undefined:
physicalmessage = 'Error Adding Physical Address Record'
logging.info('Error Adding Physical Address Record')
else:
physicalmessage = 'Physical Address Record has been succesfully added'
logging.info('Physical Address Record has been succesfully added')
elif result == User._generalError:
#Catastrophic Error
physicalmessage = 'Catastrophic Error Updating your Physical Address Record'
logging.info('Catastrophic Error Updating your Physical Address Record')
else:
result = User.editPhysicalAddressByPkey()
if result == self.undefined:
physicalmessage = 'Error Editing your Physical Address Record'
logging.info('Error Editing your Physical Address Record')
elif result == User._generalError:
physicalmessage = 'Catastrophic Error Editing your Physical Address Record'
logging.info('Catastrophic Error Editing your Physical Address Record')
else:
physicalmessage = 'Your Physical Address Record has been edited'
logging.info('Your Physical Address Record has been edited')
#Update teh vPhysicalMessage to reflecr physicalmessage
#We Must Update the vContactMessage to reflect the value of the contactmessage variable
#we must update vNamesMessage to inlcude the namesmessage
#We must update vRefMessage to reflect The Message Related to refmessage
User.clsPrivate.writePreferredLanguage(self.request.get('vPreferredLanguager'))
User.clsPrivate.writeMarital_Status(self.request.get('vMaritalStatusr'))
User.clsPrivate.writeGender(self.request.get('vGenderr'))
User.clsPrivate.writeDateofBirth(self.request.get('vBirthDater'))
User.clsPrivate.writeAge(self.request.get('vAger'))
User.clsPrivate.writeDependents(self.request.get('vDependentsr'))
User.clsPrivate.writeNationality(self.request.get('vNationalityr'))
result = User.getPrivateinfoByRefNum(reference)
if User._privatePkey == self.undefined:
#Record not found add a new one
result = User.addPrivateInfoByReference(reference)
if User._privatePkey == self.undefined:
privatemessage = 'Error Adding Private Record'
logging.info('Error Adding Private Record')
elif result == User._generalError:
privatemessage = 'Catastrophic Error Adding Private Record'
logging.info('Catastrophic Error Adding Private Record')
else:
privatemessage = 'Private Record Succesfully Added'
logging.info('Private Record Succesfully Added')
elif result == User._generalError:
#Catastrophic Error
privatemessage = 'Catastrophic Error with Private Information Record'
logging.info('Catastrophic Error with Private Information Record')
else:
#Record Found Edit it.
result = User.editPrivateInfobyPkey()
if (result == self.undefined):
privatemessage = 'Private Record NOT Succesfully Edited'
logging.info('Private Record NOT Succesfully Edited')
elif result == User._pkeyNotSet:
privatemessage = 'Bad Private Record cannot be edited'
logging.info('Bad Private Record cannot be edite')
elif result == User._generalError:
privatemessage = 'Error Editing Private Information try Again in 1 minute'
logging.info('Error Editing Private Information try Again in 1 minute')
else:
privatemessage = 'Private Information Record Edited Succesfully'
logging.info('Private Information Record Edited succesfully')
login_url = users.create_login_url(self.request.path)
logout_url = users.create_logout_url(dest_url='/')
template = template_env.get_template('/templates/ViewPersonalDetails.html')
context = {'vUsername': User.clsReference.readUsername(), 'loginURL': login_url, 'logoutURL': logout_url, 'vPassword': User.clsReference.readPassword(),
'vPrivateMessage': privatemessage,
'vReference': User.clsReference.readReference(),
'vEmailAddress': User.clsReference.readVerEmail(),
'vFirstname': User.clsNames.readFirstname(),
'vSecondname': User.clsNames.readSecondname(),
'vSurname': User.clsNames.readSurname(),
'vIDNumber': User.clsReference.readIDNumber(),
'vNationality': User.clsPrivate.readNationality(),
'vCell': User.clsContactDetails.readCell(),
'vEmail': User.clsContactDetails.readEmail(),
'vFax': User.clsContactDetails.readFax(),
'vTel': User.clsContactDetails.readTel(),
'vFacebook': User.clsContactDetails.readFacebook(),
'vTwitter': User.clsContactDetails.readTwitter(),
'vLinkedin': User.clsContactDetails.readLinkedIn(),
'vGooglePlus': User.clsContactDetails.readGooglePlus(),
'vPinterest': User.clsContactDetails.readPinterest(),
'vSkype': User.clsContactDetails.readSkype(),
'vBlog': User.clsContactDetails.readBlog(),
'vWhosWho': User.clsContactDetails.readWhosWho(),
'vAboutMe': User.clsContactDetails.readAboutMe(),
'vWebsite': User.clsContactDetails.readWebsite(),
'vStandNumber': User.clsPhysicalAddress.readStandNumber(),
'vStreetname': User.clsPhysicalAddress.readStreetName(),
'vCityTown': User.clsPhysicalAddress.readCityTown(),
'vProvinceState': User.clsPhysicalAddress.readProvinceState(),
'vCountry': User.clsPhysicalAddress.readCountry(),
'vPostalZipCode': User.clsPhysicalAddress.readPostalZipCode(),
'vPreferredLanguage': User.clsPrivate.readPrefferedLanguage(),
'vMaritalStatus': User.clsPrivate.readMarital_Status(),
'vGender': User.clsPrivate.readGender(),
'vBirthDate': User.clsPrivate.readDateof_Birth(),
'vAge': User.clsPrivate.readAge(),
'vDependents': User.clsPrivate.readDependents()}
self.response.write(template.render(context))
logging.info('Members Area Render Complete')
except:
doRender(self,'ViewPersonalDetails.html',{'MemberMessage': 'Error Accessing the Database try again in a minute'})
class ViewPersonalDetails(webapp2.RequestHandler, MyConstants, ErrorCodes):
def get(self):
try:
Guser = users.get_current_user()
if Guser:
if isGoogleServer:
reference = Guser.user_id()
else:
reference = self._tempCode
User.clsReference.writeReference(reference)
result = User.GetReferenceByRefNum(reference)
if not(User._pkeyvalue == self.undefined):
User.clsReference.writeUsername(result.readUsername())
User.clsReference.writeReference(result.readReference())
User.clsReference.writeIDNumber(result.readIDNumber())
User.clsReference.writePassword(result.readPassword())
User.clsReference.writeVerEmail(result.readVerEmail())
logging.info('Reference Field was Refreshed from DataStore')
elif result == User._referenceDoNotExist:
logging.info('Bad User Account please try login in again if this error persist create a new account')
referencemessage = 'Bad User Account please try login in again if this error persist create a new account'
User.clsReference.writeUsername(Guser.nickname())
User.clsReference.writeVerEmail(Guser.email())
else:
logging.info('Error Loading Account Details user might be logged off')
referencemessage = 'Error Loading Account Details user might be logged off'
User.clsReference.writeUsername(Guser.nickname())
User.clsReference.writeVerEmail(Guser.email())
result = User.getNamesbyRefNum(reference)
if not(User._namesPkeyvalue == self.undefined):
logging.info('Names Class was Refreshed from Datastore')
User.clsNames.writeFirstname(result.readFirstname())
User.clsNames.writeSecondname(result.readSecondname())
User.clsNames.writeSurname(result.readSurname())
elif result == self.undefined:
logging.info('Names class was not refreshed from datastore')
namesmessage ='Names class was not refreshed from datastore'
else:
logging.info('Error Loading Names Details from Datastore')
namesmessage = 'Error Loading Names Details from Datastore'
result = User.getPrivateinfoByRefNum(reference)
if not(User._privatePkey == self.undefined):
logging.info('Private Class was refreshed from Datastore')
User.clsPrivate.writeAge(result.readAge())
| |
# Copyright (c) 2018 <NAME>.
# Uranium is released under the terms of the LGPLv3 or higher.
from json import JSONDecodeError
from typing import Any, Dict, List, Optional, Set, Tuple, cast, TYPE_CHECKING
import json
import os
import shutil
import zipfile
import tempfile
import urllib.parse # For interpreting escape characters using unquote_plus.
from PyQt5.QtCore import pyqtSlot, QObject, pyqtSignal, QUrl, pyqtProperty
from UM import i18nCatalog
from UM.Logger import Logger
from UM.Message import Message
from UM.MimeTypeDatabase import MimeTypeDatabase # To get the type of container we're loading.
from UM.Resources import Resources
from UM.Version import Version as UMVersion
catalog = i18nCatalog("uranium")
if TYPE_CHECKING:
from UM.Qt.QtApplication import QtApplication
class PackageManager(QObject):
Version = 1
def __init__(self, application: "QtApplication", parent: Optional[QObject] = None) -> None:
super().__init__(parent)
self._application = application
self._container_registry = self._application.getContainerRegistry()
self._plugin_registry = self._application.getPluginRegistry()
# JSON files that keep track of all installed packages.
self._user_package_management_file_path = None # type: Optional[str]
self._bundled_package_management_file_paths = [] # type: List[str]
for search_path in Resources.getAllPathsForType(Resources.BundledPackages):
if not os.path.isdir(search_path):
continue
# Load all JSON files that are located in the bundled_packages directory.
for file_name in os.listdir(search_path):
if not file_name.endswith(".json"):
continue
file_path = os.path.join(search_path, file_name)
if not os.path.isfile(file_path):
continue
self._bundled_package_management_file_paths.append(file_path)
Logger.log("i", "Found bundled packages JSON file: {location}".format(location = file_path))
for search_path in (Resources.getDataStoragePath(), Resources.getConfigStoragePath()):
candidate_user_path = os.path.join(search_path, "packages.json")
if os.path.exists(candidate_user_path):
self._user_package_management_file_path = candidate_user_path
if self._user_package_management_file_path is None: # Doesn't exist yet.
self._user_package_management_file_path = os.path.join(Resources.getDataStoragePath(), "packages.json")
self._installation_dirs_dict = {"plugins": os.path.abspath(Resources.getStoragePath(Resources.Plugins))} # type: Dict[str, str]
self._bundled_package_dict = {} # type: Dict[str, Dict[str, Any]] # A dict of all bundled packages
self._installed_package_dict = {} # type: Dict[str, Dict[str, Any]] # A dict of all installed packages
self._to_remove_package_set = set() # type: Set[str] # A set of packages that need to be removed at the next start
self._to_install_package_dict = {} # type: Dict[str, Dict[str, Any]] # A dict of packages that need to be installed at the next start
# There can be plugins that provide remote packages (and thus, newer / different versions for a package).
self._available_package_versions = {} # type: Dict[str, Set[UMVersion]]
self._packages_with_update_available = set() # type: Set[str]
installedPackagesChanged = pyqtSignal() # Emitted whenever the installed packages collection have been changed.
packagesWithUpdateChanged = pyqtSignal()
def initialize(self) -> None:
self._loadManagementData()
self._removeAllScheduledPackages()
self._installAllScheduledPackages()
# Notify the Package manager that there is an alternative version for a given package.
def addAvailablePackageVersion(self, package_id: str, version: "UMVersion") -> None:
if package_id not in self._available_package_versions:
self._available_package_versions[package_id] = set()
self._available_package_versions[package_id].add(version)
if self.checkIfPackageCanUpdate(package_id):
self._packages_with_update_available.add(package_id)
self.packagesWithUpdateChanged.emit()
@pyqtProperty("QStringList", notify = packagesWithUpdateChanged)
def packagesWithUpdate(self) -> Set[str]:
return self._packages_with_update_available
## Alternative way of setting the available package updates without having to check all packages in the cloud.
def setPackagesWithUpdate(self, packages: Set[str]):
self._packages_with_update_available = packages
self.packagesWithUpdateChanged.emit()
def checkIfPackageCanUpdate(self, package_id: str) -> bool:
available_versions = self._available_package_versions.get(package_id)
if available_versions is None:
return False
current_version = None
bundled_package_dict = self._bundled_package_dict.get(package_id)
if bundled_package_dict is not None:
current_version = UMVersion(bundled_package_dict["package_info"]["package_version"])
installed_package_dict = self._installed_package_dict.get(package_id)
if installed_package_dict is not None:
current_version = UMVersion(installed_package_dict["package_info"]["package_version"])
# One way to check if the package has been updated in looking at the to_install information in the packages.json
to_install_package_dict = self._to_install_package_dict.get(package_id)
if to_install_package_dict is not None: # If it's marked as to_install, that means package will be installed upon restarting
return False
if current_version is not None:
for available_version in available_versions:
if current_version < available_version:
# Stop looking, there is at least one version that is higher.
return True
return False
# (for initialize) Loads the package management file if exists
def _loadManagementData(self) -> None:
# The bundled package management file should always be there
if len(self._bundled_package_management_file_paths) == 0:
Logger.log("w", "Bundled package management files could not be found!")
return
# Load the bundled packages:
self._bundled_package_dict = {}
for search_path in self._bundled_package_management_file_paths:
with open(search_path, "r", encoding = "utf-8") as f:
self._bundled_package_dict.update(json.load(f, encoding = "utf-8"))
Logger.log("i", "Loaded bundled packages data from %s", search_path)
# Need to use the file lock here to prevent concurrent I/O from other processes/threads
container_registry = self._application.getContainerRegistry()
with container_registry.lockFile():
try:
# Load the user packages:
with open(cast(str, self._user_package_management_file_path), "r", encoding="utf-8") as f:
try:
management_dict = json.load(f, encoding="utf-8")
except JSONDecodeError:
# The file got corrupted, ignore it. This happens extremely infrequently.
# The file will get overridden once a user downloads something.
return
self._installed_package_dict = management_dict.get("installed", {})
self._to_remove_package_set = set(management_dict.get("to_remove", []))
self._to_install_package_dict = management_dict.get("to_install", {})
Logger.log("i", "Loaded user packages management file from %s", self._user_package_management_file_path)
except FileNotFoundError:
Logger.log("i", "User package management file %s doesn't exist, do nothing", self._user_package_management_file_path)
return
# For packages that become bundled in the new releases, but a lower version was installed previously, we need
# to remove the old lower version that's installed in the user's folder.
for package_id, installed_package_dict in self._installed_package_dict.items():
bundled_package_dict = self._bundled_package_dict.get(package_id)
if bundled_package_dict is None:
continue
result = self._comparePackageVersions(installed_package_dict["package_info"],
bundled_package_dict["package_info"])
# The bundled package is newer
if result <= 0:
self._to_remove_package_set.add(package_id)
continue
# Also check the to-install packages to avoid installing packages that have a lower version than the bundled
# ones.
to_remove_package_ids = set()
for package_id, to_install_package_dict in self._to_install_package_dict.items():
bundled_package_dict = self._bundled_package_dict.get(package_id)
if bundled_package_dict is None:
continue
result = self._comparePackageVersions(to_install_package_dict["package_info"],
bundled_package_dict["package_info"])
# The bundled package is newer
if result <= 0:
to_remove_package_ids.add(package_id)
continue
for package_id in to_remove_package_ids:
del self._to_install_package_dict[package_id]
# Compares the SDK versions and the package versions of the two given package info dicts.
# Returns -1, 0, 1 indicating if the versions in dict1 is lower than, equal to, or higher than dict2.
# - The package with the higher SDK version is considered having the higher version number. If they are the same,
# - if the bundled package version is greater than or equal to the given package, -1 is returned. Otherwise, 1.
def _comparePackageVersions(self, info_dict1: Dict[str, Any], info_dict2: Dict[str, Any]) -> int:
# If the bundled version has a higher SDK version, use the bundled version by removing the installed one.
sdk_version1 = UMVersion(info_dict1["sdk_version"])
sdk_version2 = UMVersion(info_dict2["sdk_version"])
if sdk_version1 < sdk_version2:
return -1
# Remove the package with the old version to favour the newer bundled version.
version1 = UMVersion(info_dict1["package_version"])
version2 = UMVersion(info_dict2["package_version"])
if version1 < version2:
return -1
if version1 == version2:
return 0
return 1
def _saveManagementData(self) -> None:
# Need to use the file lock here to prevent concurrent I/O from other processes/threads
container_registry = self._application.getContainerRegistry()
with container_registry.lockFile():
with open(cast(str,self._user_package_management_file_path), "w", encoding = "utf-8") as f:
data_dict = {"version": PackageManager.Version,
"installed": self._installed_package_dict,
"to_remove": list(self._to_remove_package_set),
"to_install": self._to_install_package_dict}
json.dump(data_dict, f, sort_keys = True, indent = 4)
Logger.log("i", "Package management file %s was saved", self._user_package_management_file_path)
# (for initialize) Removes all packages that have been scheduled to be removed.
def _removeAllScheduledPackages(self) -> None:
remove_failures = set()
for package_id in self._to_remove_package_set:
try:
self._purgePackage(package_id)
del self._installed_package_dict[package_id]
except:
remove_failures.add(package_id)
if remove_failures:
message = Message(catalog.i18nc("@error:uninstall",
"There were some errors uninstalling the following packages:\n{packages}".format(
packages = "- " + "\n- ".join(remove_failures))),
title = catalog.i18nc("@info:title", "Uninstalling errors"))
message.show()
self._to_remove_package_set = remove_failures
self._saveManagementData()
# (for initialize) Installs all packages that have been scheduled to be installed.
def _installAllScheduledPackages(self) -> None:
while self._to_install_package_dict:
package_id, package_info = list(self._to_install_package_dict.items())[0]
self._installPackage(package_info)
del self._to_install_package_dict[package_id]
self._saveManagementData()
def getBundledPackageInfo(self, package_id: str) -> Optional[Dict[str, Any]]:
package_info = None
if package_id in self._bundled_package_dict:
package_info = self._bundled_package_dict[package_id]["package_info"]
return package_info
# Checks the given package is installed. If so, return a dictionary that contains the package's information.
def getInstalledPackageInfo(self, package_id: str) -> Optional[Dict[str, Any]]:
if package_id in self._to_remove_package_set:
return None
package_info = None
if package_id in self._to_install_package_dict:
package_info = self._to_install_package_dict[package_id]["package_info"]
package_info["is_installed"] = False
elif package_id in self._installed_package_dict:
package_info = self._installed_package_dict[package_id]["package_info"]
package_info["is_installed"] = True
elif package_id in self._bundled_package_dict:
package_info = self._bundled_package_dict[package_id]["package_info"]
package_info["is_installed"] = True
if package_info:
# We also need to get information from the plugin registry such as if a plugin is active
package_info["is_active"] = self._plugin_registry.isActivePlugin(package_id)
# If the package ID is in bundled, label it as such
package_info["is_bundled"] = package_info["package_id"] in self._bundled_package_dict.keys() and not self.isUserInstalledPackage(package_info["package_id"])
return package_info
def getAllInstalledPackageIDs(self) -> Set[str]:
# Add bundled, installed, and to-install packages to the set of installed package IDs
all_installed_ids = set() # type: Set[str]
if self._bundled_package_dict.keys():
all_installed_ids = all_installed_ids.union(set(self._bundled_package_dict.keys()))
if self._installed_package_dict.keys():
all_installed_ids = all_installed_ids.union(set(self._installed_package_dict.keys()))
all_installed_ids = all_installed_ids.difference(self._to_remove_package_set)
# If it's going to be installed and to | |
<filename>test/unit/common/middleware/test_recon.py
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from webob import Request
from swift.common.middleware import recon
from unittest import TestCase
from contextlib import contextmanager
from posix import stat_result, statvfs_result
import os
import swift.common.constraints
class FakeApp(object):
def __call__(self, env, start_response):
return "FAKE APP"
def start_response(*args):
pass
class OpenAndReadTester(object):
def __init__(self, output_iter):
self.index = 0
self.out_len = len(output_iter) - 1
self.data = output_iter
self.output_iter = iter(output_iter)
self.read_calls = []
self.open_calls = []
def __iter__(self):
return self
def next(self):
if self.index == self.out_len:
raise StopIteration
else:
line = self.data[self.index]
self.index += 1
return line
def read(self, *args, **kwargs):
self.read_calls.append((args, kwargs))
try:
return self.output_iter.next()
except StopIteration:
return ''
@contextmanager
def open(self, *args, **kwargs):
self.open_calls.append((args, kwargs))
yield self
class MockOS(object):
def __init__(self, ls_out=None, pe_out=None, statvfs_out=None,
lstat_out=(1, 1, 5, 4, 5, 5, 55, 55, 55, 55)):
self.ls_output = ls_out
self.path_exists_output = pe_out
self.statvfs_output = statvfs_out
self.lstat_output_tuple = lstat_out
self.listdir_calls = []
self.statvfs_calls = []
self.path_exists_calls = []
self.lstat_calls = []
def fake_listdir(self, *args, **kwargs):
self.listdir_calls.append((args, kwargs))
return self.ls_output
def fake_path_exists(self, *args, **kwargs):
self.path_exists_calls.append((args, kwargs))
return self.path_exists_output
def fake_statvfs(self, *args, **kwargs):
self.statvfs_calls.append((args, kwargs))
return statvfs_result(self.statvfs_output)
def fake_lstat(self, *args, **kwargs):
self.lstat_calls.append((args, kwargs))
return stat_result(self.lstat_output_tuple)
class TestReconSuccess(TestCase):
def setUp(self):
self.app = recon.ReconMiddleware(FakeApp(), {})
self.mockos = MockOS()
self.real_listdir = os.listdir
self.real_path_exists = os.path.exists
self.real_lstat = os.lstat
self.real_statvfs = os.statvfs
os.listdir = self.mockos.fake_listdir
os.path.exists = self.mockos.fake_path_exists
os.lstat = self.mockos.fake_lstat
os.statvfs = self.mockos.fake_statvfs
def tearDown(self):
os.listdir = self.real_listdir
os.path.exists = self.real_path_exists
os.lstat = self.real_lstat
os.statvfs = self.real_statvfs
del self.mockos
def test_get_mounted(self):
mounts_content = ['rootfs / rootfs rw 0 0',
'none /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0',
'none /proc proc rw,nosuid,nodev,noexec,relatime 0 0',
'none /dev devtmpfs rw,relatime,size=248404k,nr_inodes=62101,mode=755 0 0',
'none /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0',
'/dev/disk/by-uuid/e5b143bd-9f31-49a7-b018-5e037dc59252 / ext4 rw,relatime,errors=remount-ro,barrier=1,data=ordered 0 0',
'none /sys/fs/fuse/connections fusectl rw,relatime 0 0',
'none /sys/kernel/debug debugfs rw,relatime 0 0',
'none /sys/kernel/security securityfs rw,relatime 0 0',
'none /dev/shm tmpfs rw,nosuid,nodev,relatime 0 0',
'none /var/run tmpfs rw,nosuid,relatime,mode=755 0 0',
'none /var/lock tmpfs rw,nosuid,nodev,noexec,relatime 0 0',
'none /lib/init/rw tmpfs rw,nosuid,relatime,mode=755 0 0',
'/dev/loop0 /mnt/sdb1 xfs rw,noatime,nodiratime,attr2,nobarrier,logbufs=8,noquota 0 0',
'rpc_pipefs /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0',
'nfsd /proc/fs/nfsd nfsd rw,relatime 0 0',
'none /proc/fs/vmblock/mountPoint vmblock rw,relatime 0 0',
'']
mounted_resp = [{'device': 'rootfs', 'path': '/'},
{'device': 'none', 'path': '/sys'},
{'device': 'none', 'path': '/proc'},
{'device': 'none', 'path': '/dev'},
{'device': 'none', 'path': '/dev/pts'},
{'device': '/dev/disk/by-uuid/e5b143bd-9f31-49a7-b018-5e037dc59252', 'path': '/'},
{'device': 'none', 'path': '/sys/fs/fuse/connections'},
{'device': 'none', 'path': '/sys/kernel/debug'},
{'device': 'none', 'path': '/sys/kernel/security'},
{'device': 'none', 'path': '/dev/shm'},
{'device': 'none', 'path': '/var/run'},
{'device': 'none', 'path': '/var/lock'},
{'device': 'none', 'path': '/lib/init/rw'},
{'device': '/dev/loop0', 'path': '/mnt/sdb1'},
{'device': 'rpc_pipefs', 'path': '/var/lib/nfs/rpc_pipefs'},
{'device': 'nfsd', 'path': '/proc/fs/nfsd'},
{'device': 'none', 'path': '/proc/fs/vmblock/mountPoint'}]
oart = OpenAndReadTester(mounts_content)
rv = self.app.get_mounted(openr=oart.open)
self.assertEquals(oart.open_calls, [(('/proc/mounts', 'r'), {})])
self.assertEquals(rv, mounted_resp)
def test_get_load(self):
oart = OpenAndReadTester(['0.03 0.03 0.00 1/220 16306'])
rv = self.app.get_load(openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('/proc/loadavg', 'r'), {})])
self.assertEquals(rv, {'5m': 0.029999999999999999, '15m': 0.0,
'processes': 16306, 'tasks': '1/220',
'1m': 0.029999999999999999})
def test_get_mem(self):
meminfo_content = ['MemTotal: 505840 kB',
'MemFree: 26588 kB',
'Buffers: 44948 kB',
'Cached: 146376 kB',
'SwapCached: 14736 kB',
'Active: 194900 kB',
'Inactive: 193412 kB',
'Active(anon): 94208 kB',
'Inactive(anon): 102848 kB',
'Active(file): 100692 kB',
'Inactive(file): 90564 kB',
'Unevictable: 0 kB',
'Mlocked: 0 kB',
'SwapTotal: 407544 kB',
'SwapFree: 313436 kB',
'Dirty: 104 kB',
'Writeback: 0 kB',
'AnonPages: 185268 kB',
'Mapped: 9592 kB',
'Shmem: 68 kB',
'Slab: 61716 kB',
'SReclaimable: 46620 kB',
'SUnreclaim: 15096 kB',
'KernelStack: 1760 kB',
'PageTables: 8832 kB',
'NFS_Unstable: 0 kB',
'Bounce: 0 kB',
'WritebackTmp: 0 kB',
'CommitLimit: 660464 kB',
'Committed_AS: 565608 kB',
'VmallocTotal: 34359738367 kB',
'VmallocUsed: 266724 kB',
'VmallocChunk: 34359467156 kB',
'HardwareCorrupted: 0 kB',
'HugePages_Total: 0',
'HugePages_Free: 0',
'HugePages_Rsvd: 0',
'HugePages_Surp: 0',
'Hugepagesize: 2048 kB',
'DirectMap4k: 10240 kB',
'DirectMap2M: 514048 kB',
'']
meminfo_resp = {'WritebackTmp': '0 kB',
'SwapTotal': '407544 kB',
'Active(anon)': '94208 kB',
'SwapFree': '313436 kB',
'DirectMap4k': '10240 kB',
'KernelStack': '1760 kB',
'MemFree': '26588 kB',
'HugePages_Rsvd': '0',
'Committed_AS': '565608 kB',
'Active(file)': '100692 kB',
'NFS_Unstable': '0 kB',
'VmallocChunk': '34359467156 kB',
'Writeback': '0 kB',
'Inactive(file)': '90564 kB',
'MemTotal': '505840 kB',
'VmallocUsed': '266724 kB',
'HugePages_Free': '0',
'AnonPages': '185268 kB',
'Active': '194900 kB',
'Inactive(anon)': '102848 kB',
'CommitLimit': '660464 kB',
'Hugepagesize': '2048 kB',
'Cached': '146376 kB',
'SwapCached': '14736 kB',
'VmallocTotal': '34359738367 kB',
'Shmem': '68 kB',
'Mapped': '9592 kB',
'SUnreclaim': '15096 kB',
'Unevictable': '0 kB',
'SReclaimable': '46620 kB',
'Mlocked': '0 kB',
'DirectMap2M': '514048 kB',
'HugePages_Surp': '0',
'Bounce': '0 kB',
'Inactive': '193412 kB',
'PageTables': '8832 kB',
'HardwareCorrupted': '0 kB',
'HugePages_Total': '0',
'Slab': '61716 kB',
'Buffers': '44948 kB',
'Dirty': '104 kB'}
oart = OpenAndReadTester(meminfo_content)
rv = self.app.get_mem(openr=oart.open)
self.assertEquals(oart.open_calls, [(('/proc/meminfo', 'r'), {})])
self.assertEquals(rv, meminfo_resp)
def test_get_async_info(self):
obj_recon_content = """{"object_replication_time": 200.0, "async_pending": 5}"""
oart = OpenAndReadTester([obj_recon_content])
rv = self.app.get_async_info(openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('/var/cache/swift/object.recon', 'r'), {})])
self.assertEquals(rv, {'async_pending': 5})
def test_get_async_info_empty_file(self):
obj_recon_content = """{"object_replication_time": 200.0}"""
oart = OpenAndReadTester([obj_recon_content])
rv = self.app.get_async_info(openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('/var/cache/swift/object.recon', 'r'), {})])
self.assertEquals(rv, {'async_pending': -1})
def test_get_replication_info(self):
obj_recon_content = """{"object_replication_time": 200.0, "async_pending": 5}"""
oart = OpenAndReadTester([obj_recon_content])
rv = self.app.get_replication_info(openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('/var/cache/swift/object.recon', 'r'), {})])
self.assertEquals(rv, {'object_replication_time': 200.0})
def test_get_replication_info_empty_file(self):
obj_recon_content = """{"async_pending": 5}"""
oart = OpenAndReadTester([obj_recon_content])
rv = self.app.get_replication_info(openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('/var/cache/swift/object.recon', 'r'), {})])
self.assertEquals(rv, {'object_replication_time': -1})
def test_get_device_info(self):
rv = self.app.get_device_info()
self.assertEquals(rv, '/srv/node/')
def test_get_unmounted(self):
def fake_checkmount_true(*args):
return True
unmounted_resp = [{'device': 'fakeone', 'mounted': False},
{'device': 'faketwo', 'mounted': False}]
self.mockos.ls_output=['fakeone', 'faketwo']
self.mockos.path_exists_output=False
real_checkmount = swift.common.constraints.check_mount
swift.common.constraints.check_mount = fake_checkmount_true
rv = self.app.get_unmounted()
swift.common.constraints.check_mount = real_checkmount
self.assertEquals(self.mockos.listdir_calls, [(('/srv/node/',), {})])
self.assertEquals(rv, unmounted_resp)
def test_get_diskusage(self):
#posix.statvfs_result(f_bsize=4096, f_frsize=4096, f_blocks=1963185,
# f_bfree=1113075, f_bavail=1013351, f_files=498736,
# f_ffree=397839, f_favail=397839, f_flag=0,
# f_namemax=255)
statvfs_content=(4096, 4096, 1963185, 1113075, 1013351, 498736, 397839,
397839, 0, 255)
du_resp = [{'device': 'canhazdrive1', 'avail': 4150685696,
'mounted': True, 'used': 3890520064, 'size': 8041205760}]
self.mockos.ls_output=['canhazdrive1']
self.mockos.statvfs_output=statvfs_content
self.mockos.path_exists_output=True
rv = self.app.get_diskusage()
self.assertEquals(self.mockos.statvfs_calls,[(('/srv/node/canhazdrive1',), {})])
self.assertEquals(rv, du_resp)
def test_get_diskusage_checkmount_fail(self):
du_resp = [{'device': 'canhazdrive1', 'avail': '',
'mounted': False, 'used': '', 'size': ''}]
self.mockos.ls_output=['canhazdrive1']
self.mockos.path_exists_output=False
rv = self.app.get_diskusage()
self.assertEquals(self.mockos.listdir_calls,[(('/srv/node/',), {})])
self.assertEquals(self.mockos.path_exists_calls,[(('/srv/node/canhazdrive1',), {})])
self.assertEquals(rv, du_resp)
def test_get_quarantine_count(self):
#posix.lstat_result(st_mode=1, st_ino=2, st_dev=3, st_nlink=4,
# st_uid=5, st_gid=6, st_size=7, st_atime=8,
# st_mtime=9, st_ctime=10)
lstat_content = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
self.mockos.ls_output=['sda']
self.mockos.path_exists_output=True
self.mockos.lstat_output=lstat_content
rv = self.app.get_quarantine_count()
self.assertEquals(rv, {'objects': 2, 'accounts': 2, 'containers': 2})
def test_get_socket_info(self):
sockstat_content = ['sockets: used 271',
'TCP: inuse 30 orphan 0 tw 0 alloc 31 mem 0',
'UDP: inuse 16 mem 4', 'UDPLITE: inuse 0',
'RAW: inuse 0', 'FRAG: inuse 0 memory 0',
'']
sockstat6_content = ['TCP6: inuse 1',
'UDP6: inuse 3',
'UDPLITE6: inuse 0',
'RAW6: inuse 0',
'FRAG6: inuse 0 memory 0',
'']
oart = OpenAndReadTester(sockstat_content)
rv = self.app.get_socket_info(openr=oart.open)
self.assertEquals(oart.open_calls, [(('/proc/net/sockstat', 'r'), {}),
(('/proc/net/sockstat6', 'r'), {})])
#todo verify parsed result of sockstat6
#self.assertEquals(rv, {'time_wait': 0, 'tcp_in_use': 30, 'orphan': 0, 'tcp_mem_allocated_bytes': 0})
class FakeRecon(object):
def fake_mem(self):
return {'memtest': "1"}
def fake_load(self):
return {'loadtest': "1"}
def fake_async(self):
return {'asynctest': "1"}
def fake_replication(self):
return {'replicationtest': "1"}
def fake_mounted(self):
return {'mountedtest': "1"}
def fake_unmounted(self):
return {'unmountedtest': "1"}
def fake_diskusage(self):
return {'diskusagetest': "1"}
def fake_ringmd5(self):
return {'ringmd5test': "1"}
def fake_quarantined(self):
return {'quarantinedtest': "1"}
def fake_sockstat(self):
return {'sockstattest': "1"}
def raise_IOError(self):
raise IOError
def raise_ValueError(self):
raise ValueError
class TestHealthCheck(unittest.TestCase):
def setUp(self):
self.frecon = FakeRecon()
self.app = recon.ReconMiddleware(FakeApp(), {})
self.app.get_mem = self.frecon.fake_mem
self.app.get_load = self.frecon.fake_load
self.app.get_async_info = self.frecon.fake_async
self.app.get_replication_info = self.frecon.fake_replication
self.app.get_mounted = self.frecon.fake_mounted
self.app.get_unmounted = self.frecon.fake_unmounted
self.app.get_diskusage = self.frecon.fake_diskusage
self.app.get_ring_md5 = self.frecon.fake_ringmd5
self.app.get_quarantine_count = self.frecon.fake_quarantined
self.app.get_socket_info = self.frecon.fake_sockstat
def test_recon_get_mem(self):
get_mem_resp = ['{"memtest": "1"}']
req = Request.blank('/recon/mem', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_mem_resp)
def test_recon_get_load(self):
get_load_resp = ['{"loadtest": "1"}']
req = Request.blank('/recon/load', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_load_resp)
def test_recon_get_async(self):
get_async_resp = ['{"asynctest": "1"}']
req = Request.blank('/recon/async', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_async_resp)
def test_recon_get_async_ioerror(self):
orig = self.app.get_async_info
self.app.get_async_info = self.frecon.raise_IOError
req = Request.blank('/recon/async', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.app.get_async_info = orig
self.assertEquals(resp, | |
<reponame>nuagenetworks/nuage-openstack-neutron
# Copyright 2018 NOKIA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import time
import netaddr
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log
from oslo_utils import excutils
from neutron._i18n import _
from neutron.api import extensions as neutron_extensions
from neutron.db import agents_db
from neutron.db import db_base_plugin_v2
from neutron.db import provisioning_blocks
from neutron.extensions import securitygroup as ext_sg
from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import portbindings
from neutron_lib.api import validators as lib_validators
from neutron_lib.callbacks import resources
from neutron_lib import constants as os_constants
from neutron_lib import context as n_context
from neutron_lib.exceptions import PortInUse
from neutron_lib.exceptions import SubnetNotFound
from neutron_lib.plugins.ml2 import api
from nuage_neutron.plugins.common.addresspair import NuageAddressPair
from nuage_neutron.plugins.common import base_plugin
from nuage_neutron.plugins.common import constants
from nuage_neutron.plugins.common.exceptions import NuageBadRequest
from nuage_neutron.plugins.common.exceptions import NuagePortBound
from nuage_neutron.plugins.common import extensions
from nuage_neutron.plugins.common.extensions import nuagepolicygroup
from nuage_neutron.plugins.common import nuagedb
from nuage_neutron.plugins.common import port_security
from nuage_neutron.plugins.common import qos_driver
from nuage_neutron.plugins.common import utils
from nuage_neutron.plugins.common.utils import handle_nuage_api_errorcode
from nuage_neutron.plugins.common.utils import ignore_no_update
from nuage_neutron.plugins.common.utils import ignore_not_found
from nuage_neutron.plugins.nuage_ml2.securitygroup import NuageSecurityGroup
from nuage_neutron.plugins.nuage_ml2 import trunk_driver
from nuage_neutron.vsdclient.common import constants as vsd_constants
from nuage_neutron.vsdclient.common.helper import get_l2_and_l3_sub_id
from nuage_neutron.vsdclient import restproxy
LB_DEVICE_OWNER_V2 = os_constants.DEVICE_OWNER_LOADBALANCERV2
PORT_UNPLUGGED_TYPES = (portbindings.VIF_TYPE_BINDING_FAILED,
portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_OVS)
DEVICE_OWNER_DHCP = os_constants.DEVICE_OWNER_DHCP
LOG = log.getLogger(__name__)
class NuageMechanismDriver(base_plugin.RootNuagePlugin,
api.MechanismDriver,
db_base_plugin_v2.NeutronDbPluginV2,
agents_db.AgentDbMixin):
def __init__(self):
self._core_plugin = None
self.trunk_driver = None
self.qos_driver = None
self.psec_handler = None
self.supported_network_types = [os_constants.TYPE_VXLAN,
constants.NUAGE_HYBRID_MPLS_NET_TYPE]
super(NuageMechanismDriver, self).__init__()
def initialize(self):
LOG.debug('Initializing driver')
neutron_extensions.append_api_extensions_path(extensions.__path__)
self._validate_mech_nuage_configuration()
self.init_vsd_client()
self._wrap_vsdclient()
NuageAddressPair().register()
self.register_callbacks()
self.trunk_driver = trunk_driver.NuageTrunkDriver.create(self)
self.qos_driver = qos_driver.NuageQosDriver.create(self,
self.vsdclient)
# Nuage Security Group works with callbacks but is initialized through
# mech nuage.
NuageSecurityGroup()
self.psec_handler = port_security.NuagePortSecurityHandler(
self.vsdclient, self)
LOG.debug('Initializing complete')
def _validate_mech_nuage_configuration(self):
service_plugins = constants.MIN_MECH_NUAGE_SERVICE_PLUGINS_IN_CONFIG
extensions = constants.MIN_MECH_NUAGE_EXTENSIONS_IN_CONFIG
self._validate_config_for_nuage_driver(constants.NUAGE_ML2_DRIVER_NAME,
service_plugins,
extensions)
def _wrap_vsdclient(self):
"""Wraps nuageclient methods with try-except to ignore certain errors.
When updating an entity on the VSD and there is nothing to actually
update because the values don't change, VSD will throw an error. This
is not needed for neutron so all these exceptions are ignored.
When VSD responds with a 404, this is sometimes good (for example when
trying to update an entity). Yet sometimes this is not required to be
an actual exception. When deleting an entity that does no longer exist
it is fine for neutron. Also when trying to retrieve something from VSD
having None returned is easier to work with than RESTProxy exceptions.
"""
methods = inspect.getmembers(self.vsdclient,
lambda x: inspect.ismethod(x))
for m in methods:
wrapped = ignore_no_update(m[1])
if m[0].startswith('get_') or m[0].startswith('delete_'):
wrapped = ignore_not_found(wrapped)
setattr(self.vsdclient, m[0], wrapped)
@utils.context_log
def create_network_precommit(self, context):
network = context.current
db_context = context._plugin_context
# A network attached to an L2bridge is not allowed to be external or
# shared
self._validate_network_physnet(db_context, network)
def _validate_network_physnet(self, context, network):
l2bridge_id = nuagedb.get_nuage_l2bridge_id_for_network(
context.session, network['id'])
if l2bridge_id:
is_external = network.get(external_net.EXTERNAL)
if is_external:
msg = _("It is not allowed to create a network as external in "
"a physical_network attached to a nuage_l2bridge")
raise NuageBadRequest(msg=msg)
is_shared = network.get('shared')
if is_shared:
msg = _("It is not allowed to create a shared network in "
"a physical_network attached to a nuage_l2bridge")
raise NuageBadRequest(msg=msg)
physnets = self._get_l2bridge_physnets(context, network)
l2bridges = {p['l2bridge_id'] for p in physnets}
if len(l2bridges) > 1:
msg = _("It is not allowed to attach a network to multiple"
"nuage_l2bridges.")
raise NuageBadRequest(msg=msg)
# Block vxlan and nuage_hybrid_segments in a single network
self.check_vxlan_mpls_segments_in_network(network.get('segments', []))
@handle_nuage_api_errorcode
@utils.context_log
def update_network_precommit(self, context):
updated_network = context.current
original_network = context.original
db_context = context._plugin_context
(external_change,
shared_change,
physnets_change,
_) = self._network_no_action(original_network,
updated_network)
if any([external_change, shared_change, physnets_change]):
self._validate_update_network(db_context, external_change,
shared_change, physnets_change,
original_network,
updated_network)
# Block vxlan and nuage_hybrid_segments in a single network
# This cannot be included in the above structure since after the
# create segment operation, neutron calls update_network_precommit
# with the same value for the original and updated network
self.check_vxlan_mpls_segments_in_network(
updated_network.get('segments', []))
@handle_nuage_api_errorcode
@utils.context_log
def update_network_postcommit(self, context):
updated_network = context.current
original_network = context.original
db_context = context._plugin_context
(external_change,
shared_change,
physnets_change,
name_change) = self._network_no_action(original_network,
updated_network)
self.qos_driver.update_network(db_context, original_network,
updated_network)
if not any([external_change, shared_change, physnets_change,
name_change]):
# No update required
return
subnets = self.core_plugin.get_subnets_by_network(
db_context, updated_network['id'])
if external_change:
for subn in subnets:
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(
db_context.session, subn['id'])
LOG.debug("Found subnet %(subn_id)s to l2 domain mapping"
" %(nuage_subn_id)s",
{'subn_id': subn['id'],
'nuage_subn_id':
subnet_l2dom['nuage_subnet_id']})
self.vsdclient.delete_subnet(
l2dom_id=subnet_l2dom['nuage_subnet_id'])
nuagedb.delete_subnetl2dom_mapping(db_context.session,
subnet_l2dom)
# delete the neutron port that was reserved with IP of
# the dhcp server that is reserved.
# Now, this port is not reqd.
self.delete_dhcp_nuage_port(db_context, subn)
self._add_nuage_sharedresource(db_context, subn,
constants.SR_TYPE_FLOATING,
subnets)
if shared_change and not updated_network.get(external_net.EXTERNAL):
for subnet in subnets:
nuage_subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(
db_context.session, subnet['id'])
if self._is_l2(nuage_subnet_l2dom):
# change of perm only reqd in l2dom case
self.vsdclient.change_perm_of_subns(
nuage_subnet_l2dom['net_partition_id'],
nuage_subnet_l2dom['nuage_subnet_id'],
updated_network['shared'],
subnet['tenant_id'], remove_everybody=True)
if name_change:
ipv4s = len([s for s in subnets if self._is_ipv4(s)])
ipv6s = len([s for s in subnets if self._is_ipv6(s)])
if ipv4s == 1 and ipv6s == 1:
# only dualstack subnets use network name as description
subnet = subnets[0]
subnet_mapping = nuagedb.get_subnet_l2dom_by_id(
db_context.session, subnet['id'])
params = {
'dualstack': True,
'network_name': updated_network['name']
}
if self._is_l2(subnet_mapping):
self.vsdclient.update_l2domain_template(
subnet_mapping['nuage_l2dom_tmplt_id'], **params)
self.vsdclient.update_l2domain(
subnet_mapping['nuage_subnet_id'], **params)
else:
params.update({
"subnet_nuage_underlay":
subnet.get(constants.NUAGE_UNDERLAY)
})
self.vsdclient.update_domain_subnet(
subnet_mapping['nuage_subnet_id'], params)
def check_dhcp_agent_alive(self, context):
get_dhcp_agent = self.get_agents(
context, filters={"alive": [True],
"binary": ['neutron-dhcp-agent']})
if get_dhcp_agent:
return True
return False
@utils.context_log
@handle_nuage_api_errorcode
def create_subnet_precommit(self, context):
subnet = context.current
network = context.network.current
db_context = context._plugin_context
prefixlen = netaddr.IPNetwork(subnet['cidr']).prefixlen
nuagenet_set = lib_validators.is_attr_set(subnet.get('nuagenet'))
net_part_set = lib_validators.is_attr_set(subnet.get('net_partition'))
if not self.is_network_type_supported(network):
if nuagenet_set or net_part_set:
# Nuage attributes set on unsupported network types
msg = _("Network should have 'provider:network_type' "
"vxlan or nuage_hybrid_mpls, or have such a segment")
raise NuageBadRequest(msg=msg)
else:
return # Not for us
with db_context.session.begin(subtransactions=True):
self.create_nuage_subnet_precommit(db_context,
network,
prefixlen, subnet,
nuagenet_set)
def _validate_create_subnet(self, db_context, network, prefixlen,
subnet, vsd_managed, l2bridge):
for attribute in ('ipv6_ra_mode', 'ipv6_address_mode'):
if not lib_validators.is_attr_set(subnet.get(attribute)):
continue
if subnet[attribute] != os_constants.DHCPV6_STATEFUL:
msg = _("Attribute %(attribute)s must be '%(allowed)s' or "
"not set.")
raise NuageBadRequest(
resource='subnet',
msg=msg % {'attribute': attribute,
'allowed': os_constants.DHCPV6_STATEFUL})
network_subnets = self.core_plugin.get_subnets(
db_context,
filters={'network_id': [subnet['network_id']]})
if vsd_managed:
self._validate_create_vsd_managed_subnet(network, subnet)
else:
self._validate_create_openstack_managed_subnet(
db_context, subnet, network_subnets)
subnet_ids = [s['id'] for s in network_subnets]
subnet_mappings = nuagedb.get_subnet_l2doms_by_subnet_ids(
db_context.session,
subnet_ids)
if len(set([vsd_managed] + [m['nuage_managed_subnet']
for m in subnet_mappings])) > 1:
msg = _("Can't mix openstack and vsd managed subnets under 1 "
"network.")
raise NuageBadRequest(resource='subnet', msg=msg)
ipv4s = len([s for s in network_subnets if self._is_ipv4(s)])
ipv6s = len([s for s in network_subnets if self._is_ipv6(s)])
if ((ipv4s > 1 or ipv6s > 1) and
self.check_dhcp_agent_alive(db_context) and
not self.is_external(db_context, network['id'])):
msg = _("A network with multiple ipv4 or ipv6 subnets is not "
"allowed when neutron-dhcp-agent is enabled")
raise NuageBadRequest(msg=msg)
# nuage_l2bridge tests
if l2bridge:
if self.check_dhcp_agent_alive(db_context):
msg = _("A network cannot be attached to an l2bridge "
"when neutron-dhcp-agent is enabled")
raise NuageBadRequest(msg=msg)
if ipv4s > 1 or ipv6s > 1:
msg = _("A network attached to a nuage_l2bridge cannot have"
" more than one ipv4 or more than one ipv6 subnet.")
raise NuageBadRequest(msg=msg)
# For l2bridges, certain parameters need to be equal for all
# bridged subnets, as they are reflected on VSD.
bridged_subnets = nuagedb.get_subnets_for_nuage_l2bridge(
db_context.session,
l2bridge['id'])
# Make subnet dict to include extensions
ipv_bridged = [
self.core_plugin._make_subnet_dict(s)
for s in bridged_subnets if
s['id'] != subnet['id'] and
s['ip_version'] == subnet['ip_version']]
if not ipv_bridged:
return
for param in constants.L2BRIDGE_SUBNET_EQUAL_ATTRIBUTES:
self._validate_l2bridge_added_subnet_parameter(
ipv_bridged[0], subnet, param, l2bridge)
@handle_nuage_api_errorcode
@utils.context_log
def create_subnet_postcommit(self, context):
# Set QOS
self.qos_driver.create_subnet(context)
@handle_nuage_api_errorcode
@utils.context_log
def update_subnet_precommit(self, context):
self.update_subnet(context)
@utils.context_log
@handle_nuage_api_errorcode
def delete_subnet_precommit(self, context):
"""Get subnet_l2dom_mapping for later.
In postcommit this nuage_subnet_l2dom_mapping is no longer available
because it is set to CASCADE with the subnet. So this row will be
deleted prior to delete_subnet_postcommit
"""
subnet = context.current
db_context = context._plugin_context
context.nuage_mapping = nuagedb.get_subnet_l2dom_by_id(
db_context.session, subnet['id'])
context.dual_stack_subnet = self.get_dual_stack_subnet(db_context,
subnet)
if not context.nuage_mapping:
return
if self._is_l3(context.nuage_mapping) and context.dual_stack_subnet:
self._validate_vips_in_use(db_context, subnet)
def _validate_vips_in_use(self, db_context, subnet):
other_version = 4 if self._is_ipv6(subnet) else 6
nuage_subnets = (
nuagedb.get_subnet_mapping_by_network_id_and_ip_version(
db_context.session, subnet['network_id'],
ip_version=other_version))
for nuage_mapping in nuage_subnets:
| |
<filename>se_resnext50/src/models/resnet_bam_wider.py
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
The model of ResNet50+BAM. MindSpore1.2.0-Ascend.
"""
import numpy as np
import mindspore.nn as nn
import mindspore.ops.operations as P
from mindspore.ops import functional as F
from mindspore.common.tensor import Tensor
import mindspore.common.dtype as mstype
from scipy.stats import truncnorm
conv_weight_init = "HeUniform"
def _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):
fan_in = in_channel * kernel_size * kernel_size
scale = 1.0
scale /= max(1.0, fan_in)
stddev = (scale ** 0.5) / 0.87962566103423978
mu, sigma = 0, stddev
weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(
out_channel * in_channel * kernel_size * kernel_size
)
weight = np.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size))
return Tensor(weight, dtype=mstype.float32)
def _weight_variable(shape, factor=0.01):
init_value = np.random.randn(*shape).astype(np.float32) * factor
return Tensor(init_value)
def _conv3x3(in_channel, out_channel, use_se=False):
if use_se:
weight = _conv_variance_scaling_initializer(
in_channel, out_channel, kernel_size=3
)
else:
weight_shape = (out_channel, in_channel, 3, 3)
weight = _weight_variable(weight_shape)
return weight
def _conv1x1(in_channel, out_channel, use_se=False):
if use_se:
weight = _conv_variance_scaling_initializer(
in_channel, out_channel, kernel_size=1
)
else:
weight_shape = (out_channel, in_channel, 1, 1)
weight = _weight_variable(weight_shape)
return weight
def _conv7x7(in_channel, out_channel, use_se=False):
if use_se:
weight = _conv_variance_scaling_initializer(
in_channel, out_channel, kernel_size=7
)
else:
weight_shape = (out_channel, in_channel, 7, 7)
weight = _weight_variable(weight_shape)
return weight
def _bn(channel):
return nn.BatchNorm2d(
channel,
eps=1e-4,
momentum=0.9,
gamma_init=1,
beta_init=0,
moving_mean_init=0,
moving_var_init=1,
)
def _bn1(channel):
return nn.BatchNorm1d(
channel,
eps=1e-4,
momentum=0.9,
gamma_init=1,
beta_init=0,
moving_mean_init=0,
moving_var_init=1,
)
def _bn_last(channel):
return nn.BatchNorm2d(
channel,
eps=1e-4,
momentum=0.9,
gamma_init=0,
beta_init=0,
moving_mean_init=0,
moving_var_init=1,
)
def _fc(in_channel, out_channel, use_se=False):
if use_se:
weight = np.random.normal(loc=0, scale=0.01, size=out_channel * in_channel)
weight = Tensor(
np.reshape(weight, (out_channel, in_channel)), dtype=mstype.float32
)
else:
weight_shape = (out_channel, in_channel)
weight = _weight_variable(weight_shape)
return weight
class BasicBlock(nn.Cell):
"""
BasicBlock
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, use_cbam=False):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_channels=inplanes,
out_channels=planes,
kernel_size=3,
stride=stride,
pad_mode="pad",
padding=1,
weight_init=conv_weight_init,
has_bias=False,
)
self.bn1 = nn.BatchNorm2d(num_features=planes, momentum=0.9)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(
in_channels=planes,
out_channels=planes,
kernel_size=3,
stride=1,
pad_mode="pad",
padding=1,
weight_init=conv_weight_init,
has_bias=False,
)
self.bn2 = nn.BatchNorm2d(num_features=planes, momentum=0.9)
self.downsample = downsample
self.stride = stride
if use_cbam:
self.cbam = CBAM(planes, 16)
else:
self.cbam = None
def construct(self, x):
"""
construct
"""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if not self.cbam is None:
out = self.cbam(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Cell):
"""
Bottleneck
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, use_cbam=False):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(
in_channels=inplanes,
out_channels=planes,
kernel_size=1,
pad_mode="pad",
weight_init=conv_weight_init,
has_bias=False,
)
self.bn1 = nn.BatchNorm2d(num_features=planes, momentum=0.9)
self.conv2 = nn.Conv2d(
in_channels=planes,
out_channels=planes,
kernel_size=3,
stride=stride,
pad_mode="pad",
padding=1,
weight_init=conv_weight_init,
has_bias=False,
)
self.bn2 = nn.BatchNorm2d(num_features=planes, momentum=0.9)
self.conv3 = nn.Conv2d(
in_channels=planes,
out_channels=planes * 4,
kernel_size=1,
pad_mode="pad",
weight_init=conv_weight_init,
has_bias=False,
)
self.bn3 = nn.BatchNorm2d(num_features=planes * 4, momentum=0.9)
self.relu = nn.ReLU()
self.downsample = downsample
self.stride = stride
if use_cbam:
self.cbam = CBAM(planes * 4, 16)
else:
self.cbam = None
def construct(self, x):
"""
construct
"""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if not self.cbam is None:
out = self.cbam(out)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Cell):
"""
ResNet
"""
def __init__(self, block, layers, network_type, num_classes, att_type=None):
self.inplanes = 64
super(ResNet, self).__init__()
self.network_type = network_type
# different model config between ImageNet and CIFAR
if network_type == "ImageNet":
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=64,
kernel_size=7,
stride=2,
pad_mode="pad",
padding=3,
weight_init=conv_weight_init,
has_bias=False,
)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
self.avgpool = nn.AvgPool2d(kernel_size=7, stride=7, pad_mode="valid")
else:
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=64,
kernel_size=3,
stride=1,
pad_mode="pad",
padding=1,
weight_init=conv_weight_init,
has_bias=False,
)
self.bn1 = nn.BatchNorm2d(num_features=64, momentum=0.9)
self.relu = nn.ReLU()
if att_type == "BAM":
self.bam1 = BAM(64 * block.expansion)
self.bam2 = BAM(128 * block.expansion)
self.bam3 = BAM(256 * block.expansion)
else:
self.bam1, self.bam2, self.bam3 = None, None, None
self.layer1 = self._make_layer(block, 64, layers[0], att_type=att_type)
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, att_type=att_type
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, att_type=att_type
)
self.layer4 = self._make_layer(
block, 512 * 2, layers[3], stride=2, att_type=att_type
)
self.fc = nn.Dense(
in_channels=512 * block.expansion * 2,
out_channels=num_classes,
has_bias=True,
weight_init=_fc(512 * block.expansion * 2, num_classes),
bias_init=0,
)
def _make_layer(self, block, planes, blocks, stride=1, att_type=None):
"""
_make_layer
"""
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.SequentialCell(
[
nn.Conv2d(
in_channels=self.inplanes,
out_channels=planes * block.expansion,
kernel_size=1,
stride=stride,
pad_mode="pad",
weight_init=conv_weight_init,
has_bias=False,
),
nn.BatchNorm2d(num_features=planes * block.expansion, momentum=0.9),
]
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, use_cbam=att_type == "CBAM"
)
)
self.inplanes = planes * block.expansion
useless_ = 0
for i in range(1, blocks):
useless_ += i
layers.append(block(self.inplanes, planes, use_cbam=att_type == "CBAM"))
return nn.SequentialCell(layers)
def construct(self, x):
"""
construct
"""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if self.network_type == "ImageNet":
x = self.maxpool(x)
x = self.layer1(x)
if not self.bam1 is None:
x = self.bam1(x)
x = self.layer2(x)
if not self.bam2 is None:
x = self.bam2(x)
x = self.layer3(x)
if not self.bam3 is None:
x = self.bam3(x)
x = self.layer4(x)
if self.network_type == "ImageNet":
x = self.avgpool(x)
else:
x = P.AvgPool(4, 4, "valid")(x)
x = P.Reshape()(
x,
(
P.Shape()(x)[0],
-1,
),
)
x = self.fc(x)
return x
# BAM
class Flatten(nn.Cell):
def construct(self, x):
return P.Reshape()(
x,
(
P.Shape()(x)[0],
-1,
),
)
class ChannelGate(nn.Cell):
"""
ChannelGate
"""
def __init__(self, gate_channel, reduction_ratio=16, num_layers=1):
super(ChannelGate, self).__init__()
self.gate_c_list = [Flatten()]
gate_channels = [gate_channel]
gate_channels += [gate_channel // reduction_ratio] * num_layers
gate_channels += [gate_channel]
for i in range(len(gate_channels) - 2):
self.gate_c_list.append(
nn.Dense(
in_channels=gate_channels[i],
out_channels=gate_channels[i + 1],
has_bias=True,
weight_init=_fc(gate_channels[i], gate_channels[i + 1]),
bias_init=0,
)
)
self.gate_c_list.append(
nn.BatchNorm1d(num_features=gate_channels[i + 1], momentum=0.9)
)
self.gate_c_list.append(nn.ReLU())
self.gate_c_list.append(
nn.Dense(
in_channels=gate_channels[-2],
out_channels=gate_channels[-1],
has_bias=True,
weight_init=_fc(gate_channels[-2], gate_channels[-1]),
bias_init=0,
)
)
self.gate_c = nn.SequentialCell(self.gate_c_list)
def construct(self, in_tensor):
"""
construct
"""
size = F.shape(in_tensor)
avg_pool = P.AvgPool(size[2], size[2])(in_tensor)
expand_dims = P.ExpandDims()
need = self.gate_c(avg_pool)
need = expand_dims(need, 2)
need = expand_dims(need, 3)
broadcast_to = P.BroadcastTo(size)
need = broadcast_to(need)
return need
class SpatialGate(nn.Cell):
"""
SpatialGate
"""
def __init__(
self, gate_channel, reduction_ratio=16, dilation_conv_num=2, dilation_val=4
):
super(SpatialGate, self).__init__()
self.gate_s_list = [
nn.Conv2d(
in_channels=gate_channel,
out_channels=gate_channel // reduction_ratio,
weight_init=conv_weight_init,
kernel_size=1,
pad_mode="pad",
has_bias=True,
)
]
self.gate_s_list.append(
nn.BatchNorm2d(num_features=gate_channel // reduction_ratio, momentum=0.9)
)
self.gate_s_list.append(nn.ReLU())
useless_2 = 0
for i in range(dilation_conv_num):
self.gate_s_list.append(
nn.Conv2d(
in_channels=gate_channel // reduction_ratio,
out_channels=gate_channel // reduction_ratio,
kernel_size=3,
pad_mode="pad",
padding=dilation_val,
dilation=dilation_val,
weight_init=conv_weight_init,
has_bias=True,
)
)
self.gate_s_list.append(
nn.BatchNorm2d(
num_features=gate_channel // reduction_ratio, momentum=0.9
)
)
self.gate_s_list.append(nn.ReLU())
useless_2 += i
self.gate_s_list.append(
nn.Conv2d(
in_channels=gate_channel // reduction_ratio,
out_channels=1,
kernel_size=1,
weight_init=conv_weight_init,
pad_mode="pad",
has_bias=True,
)
)
self.gate_s = nn.SequentialCell(self.gate_s_list)
def construct(self, in_tensor):
"""
construct
"""
size = F.shape(in_tensor)
broadcast_to = P.BroadcastTo(size)
return broadcast_to(self.gate_s(in_tensor))
class BAM(nn.Cell):
"""
BAM
"""
def __init__(self, gate_channel):
super(BAM, self).__init__()
self.channel_att = ChannelGate(gate_channel)
self.spatial_att = SpatialGate(gate_channel)
def construct(self, in_tensor):
"""
construct
"""
att = 1 + P.Sigmoid()(self.channel_att(in_tensor) * self.spatial_att(in_tensor))
return att * in_tensor
# CBAM
class BasicConv(nn.Cell):
"""
BasicConv
"""
def __init__(
self,
in_planes,
out_planes,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
relu=True,
bn=True,
bias=False,
):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
pad_mode="pad",
padding=padding,
dilation=dilation,
has_bias=bias,
)
self.bn = (
nn.BatchNorm2d(
num_features=out_planes, eps=1e-5, momentum=0.01, affine=True
)
if bn
else None
)
self.relu = nn.ReLU() if relu else None
def construct(self, x):
"""
construct
"""
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelGate_CBAM(nn.Cell):
"""
ChannelGate_CBAM
"""
def __init__(self, gate_channels, reduction_ratio=16):
super(ChannelGate_CBAM, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.SequentialCell(
[
Flatten(),
nn.Dense(
in_channels=gate_channels,
out_channels=gate_channels // reduction_ratio,
),
nn.ReLU(),
nn.Dense(
in_channels=gate_channels // reduction_ratio,
out_channels=gate_channels,
),
]
)
self.reducemax_ = P.ReduceMax(keep_dims=True)
def construct(self, x):
"""
construct
"""
size = F.shape(x)
avg_pool = P.AvgPool((size[2], size[3]), (size[2], size[3]))(x)
channel_att_raw = self.mlp(avg_pool)
channel_att_sum = channel_att_raw
max_pool = self.reducemax_(x, (2, 3))
| |
from __future__ import division
from tensorflow.examples.tutorials.mnist import input_data
import re
from ops import *
from utils import *
from kh_tools import *
import logging
import matplotlib.pyplot as plt
class ALOCC_Model(object):
def __init__(self, sess,
input_height=45,input_width=45, output_height=64, output_width=64,
batch_size=128, sample_num = 128, attention_label=1, is_training=True,
z_dim=100, gf_dim=16, df_dim=16, gfc_dim=512, dfc_dim=512, c_dim=3,
dataset_name=None, dataset_address=None, input_fname_pattern=None,
checkpoint_dir=None, log_dir=None, sample_dir=None, r_alpha = 0.2,
kb_work_on_patch=True, nd_input_frame_size=(240, 360), nd_patch_size=(10, 10), n_stride=1,
n_fetch_data=10, n_per_itr_print_results=500):
"""
This is the main class of our Adversarially Learned One-Class Classifier for Novelty Detection
:param sess: TensorFlow session
:param batch_size: The size of batch. Should be specified before training. [128]
:param attention_label: Conditioned label that growth attention of training label [1]
:param r_alpha: Refinement parameter [0.2]
:param z_dim: (optional) Dimension of dim for Z. [100]
:param gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
:param df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
:param gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
:param dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
:param c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
:param sample_dir: Directory address which save some samples [.]
:param kb_work_on_patch: Boolean value for working on PatchBased System or not [True]
:param nd_input_frame_size: Input frame size
:param nd_patch_size: Input patch size
:param n_stride: PatchBased data preprocessing stride
:param n_fetch_data: Fetch size of Data
:param n_per_itr_print_results: # of printed iteration
"""
self.n_per_itr_print_results=n_per_itr_print_results
self.nd_input_frame_size = nd_input_frame_size
self.b_work_on_patch = kb_work_on_patch
self.sample_dir = sample_dir
self.sess = sess
self.is_training = is_training
self.r_alpha = r_alpha
self.batch_size = batch_size
self.sample_num = sample_num
self.input_height = input_height
self.input_width = input_width
self.output_height = output_height
self.output_width = output_width
self.z_dim = z_dim
self.gf_dim = gf_dim
self.df_dim = df_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
# batch normalization : deals with poor initialization helps gradient flow
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.d_bn3 = batch_norm(name='d_bn3')
self.d_bn4 = batch_norm(name='d_bn4')
self.g_bn0 = batch_norm(name='g_bn0')
self.g_bn1 = batch_norm(name='g_bn1')
self.g_bn2 = batch_norm(name='g_bn2')
self.g_bn3 = batch_norm(name='g_bn3')
self.g_bn4 = batch_norm(name='g_bn4')
self.g_bn5 = batch_norm(name='g_bn5')
self.g_bn6 = batch_norm(name='g_bn6')
self.dataset_name = dataset_name
self.dataset_address= dataset_address
self.input_fname_pattern = input_fname_pattern
self.checkpoint_dir = checkpoint_dir
self.log_dir = log_dir
self.attention_label = attention_label
if self.is_training:
logging.basicConfig(filename='ALOCC_loss.log', level=logging.INFO)
if self.dataset_name == 'mnist':
mnist = input_data.read_data_sets(self.dataset_address)
specific_idx = np.where(mnist.train.labels == self.attention_label)[0]
self.data = mnist.train.images[specific_idx].reshape(-1, 28, 28, 1)
self.c_dim = 1
elif self.dataset_name == 'UCSD':
self.nStride = n_stride
self.patch_size = nd_patch_size
self.patch_step = (n_stride, n_stride)
lst_image_paths = []
for s_image_dir_path in glob(os.path.join(self.dataset_address, self.input_fname_pattern)):
for sImageDirFiles in glob(os.path.join(s_image_dir_path+'/*')):
lst_image_paths.append(sImageDirFiles)
self.dataAddress = lst_image_paths
lst_forced_fetch_data = [self.dataAddress[x] for x in random.sample(range(0, len(lst_image_paths)), n_fetch_data)]
self.data = lst_forced_fetch_data
self.c_dim = 1
else:
assert('Error in loading dataset')
self.grayscale = (self.c_dim == 1)
self.build_model()
# =========================================================================================================
def build_model(self):
image_dims = [self.input_height, self.input_width, self.c_dim]
self.inputs = tf.placeholder(tf.float32, [self.batch_size] + image_dims, name='real_images')
self.sample_inputs = tf.placeholder(tf.float32, [self.sample_num] + image_dims, name='sample_inputs')
inputs = self.inputs
sample_inputs = self.sample_inputs
self.z = tf.placeholder(tf.float32,[self.batch_size] + image_dims, name='z')
self.G = self.generator(self.z)
self.D, self.D_logits = self.discriminator(inputs)
self.sampler = self.sampler(self.z)
self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True)
# tesorboard setting
# self.z_sum = histogram_summary("z", self.z)
#self.d_sum = histogram_summary("d", self.D)
#self.d__sum = histogram_summary("d_", self.D_)
#self.G_sum = image_summary("G", self.G)
# Simple GAN's losses
self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.D_logits, logits=tf.ones_like(self.D)))
self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.D_logits_, logits=tf.zeros_like(self.D_)))
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.D_logits_, logits=tf.ones_like(self.D_)))
# Refinement loss
self.g_r_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.G,logits=self.z))
self.g_loss = self.g_loss + self.g_r_loss * self.r_alpha
self.d_loss = self.d_loss_real + self.d_loss_fake
self.d_loss_real_sum = scalar_summary("d_loss_real", self.d_loss_real)
self.d_loss_fake_sum = scalar_summary("d_loss_fake", self.d_loss_fake)
self.g_loss_sum = scalar_summary("g_loss", self.g_loss)
self.d_loss_sum = scalar_summary("d_loss", self.d_loss)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
# =========================================================================================================
def train(self, config):
d_optim = tf.train.RMSPropOptimizer(config.learning_rate).minimize(self.d_loss, var_list=self.d_vars)
g_optim = tf.train.RMSPropOptimizer(config.learning_rate).minimize(self.g_loss, var_list=self.g_vars)
try:
tf.global_variables_initializer().run()
except:
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
self.g_sum = merge_summary([self.d_loss_fake_sum, self.g_loss_sum])
self.d_sum = merge_summary([self.d_loss_real_sum, self.d_loss_sum])
log_dir = os.path.join(self.log_dir, self.model_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.writer = SummaryWriter(log_dir, self.sess.graph)
if config.dataset == 'mnist':
sample = self.data[0:self.sample_num]
elif config.dataset =='UCSD':
if self.b_work_on_patch:
sample_files = self.data[0:10]
else:
sample_files = self.data[0:self.sample_num]
sample,_ = read_lst_images(sample_files, self.patch_size, self.patch_step, self.b_work_on_patch)
sample = np.array(sample).reshape(-1, self.patch_size[0], self.patch_size[1], 1)
sample = sample[0:self.sample_num]
# export images
sample_inputs = np.array(sample).astype(np.float32)
scipy.misc.imsave('./{}/train_input_samples.jpg'.format(config.sample_dir), montage(sample_inputs[:,:,:,0]))
# load previous checkpoint
counter = 1
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# load traning data
if config.dataset == 'mnist':
sample_w_noise = get_noisy_data(self.data)
if config.dataset == 'UCSD':
sample_files = self.data
sample, _ = read_lst_images(sample_files, self.patch_size, self.patch_step, self.b_work_on_patch)
sample = np.array(sample).reshape(-1, self.patch_size[0], self.patch_size[1], 1)
sample_w_noise,_ = read_lst_images_w_noise(sample_files, self.patch_size, self.patch_step)
sample_w_noise = np.array(sample_w_noise).reshape(-1, self.patch_size[0], self.patch_size[1], 1)
for epoch in xrange(config.epoch):
print('Epoch ({}/{})-------------------------------------------------'.format(epoch,config.epoch))
if config.dataset == 'mnist':
batch_idxs = min(len(self.data), config.train_size) // config.batch_size
elif config.dataset == 'UCSD':
batch_idxs = min(len(sample), config.train_size) // config.batch_size
# for detecting valuable epoch that we must stop training step
# sample_input_for_test_each_train_step.npy
sample_test = np.load('SIFTETS.npy').reshape([504,45,45,1])[0:128]
for idx in xrange(0, batch_idxs):
if config.dataset == 'mnist':
batch = self.data[idx * config.batch_size:(idx + 1) * config.batch_size]
batch_noise = sample_w_noise[idx * config.batch_size:(idx + 1) * config.batch_size]
elif config.dataset == 'UCSD':
batch = sample[idx * config.batch_size:(idx + 1) * config.batch_size]
batch_noise = sample_w_noise[idx * config.batch_size:(idx + 1) * config.batch_size]
batch_images = np.array(batch).astype(np.float32)
batch_noise_images = np.array(batch_noise).astype(np.float32)
batch_z = np.random.uniform(-1, 1, [config.batch_size, self.z_dim]).astype(np.float32)
if config.dataset == 'mnist':
# Update D network
_, summary_str = self.sess.run([d_optim, self.d_sum],
feed_dict={self.inputs: batch_images, self.z: batch_noise_images})
self.writer.add_summary(summary_str, counter)
# Update G network
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={self.z: batch_noise_images})
self.writer.add_summary(summary_str, counter)
# Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={self.z: batch_noise_images})
self.writer.add_summary(summary_str, counter)
errD_fake = self.d_loss_fake.eval({self.z: batch_noise_images})
errD_real = self.d_loss_real.eval({self.inputs: batch_images})
errG = self.g_loss.eval({self.z: batch_noise_images})
else:
# update discriminator
_, summary_str = self.sess.run([d_optim, self.d_sum],
feed_dict={ self.inputs: batch_images, self.z: batch_noise_images })
self.writer.add_summary(summary_str, counter)
# update refinement(generator)
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={ self.z: batch_noise_images })
self.writer.add_summary(summary_str, counter)
# Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={ self.z: batch_noise_images })
self.writer.add_summary(summary_str, counter)
errD_fake = self.d_loss_fake.eval({ self.z: batch_noise_images })
errD_real = self.d_loss_real.eval({ self.inputs: batch_images })
errG = self.g_loss.eval({self.z: batch_noise_images})
counter += 1
msg = "Epoch:[%2d][%4d/%4d]--> d_loss: %.8f, g_loss: %.8f" % (epoch, idx, batch_idxs, errD_fake+errD_real, errG)
print(msg)
logging.info(msg)
if np.mod(counter, self.n_per_itr_print_results) == 0:
if config.dataset == 'mnist':
samples, d_loss, g_loss = self.sess.run(
[self.sampler, self.d_loss, self.g_loss],
feed_dict={
self.z: sample_inputs,
self.inputs: sample_inputs
}
)
manifold_h = int(np.ceil(np.sqrt(samples.shape[0])))
manifold_w = int(np.floor(np.sqrt(samples.shape[0])))
save_images(samples, [manifold_h, manifold_w],
'./{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx))
print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss))
# ====================================================================================================
else:
#try:
samples, d_loss, g_loss = self.sess.run(
[self.sampler, self.d_loss, self.g_loss],
feed_dict={
self.z: sample_inputs,
self.inputs: sample_inputs,
},
)
sample_test_out = self.sess.run(
[self.sampler],
feed_dict={
self.z: sample_test
},
)
# export images
scipy.misc.imsave('./{}/z_test_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx),
montage(samples[:, :, :, 0]))
# export images
scipy.misc.imsave('./{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx),
montage(samples[:, :, :, 0]))
msg = "[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss)
print(msg)
logging.info(msg)
self.save(config.checkpoint_dir, epoch)
# =========================================================================================================
def discriminator(self, image,reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')
h5 = tf.nn.sigmoid(h4,name='d_output')
return h5, h4
# =========================================================================================================
def generator(self, z):
with tf.variable_scope("generator") as scope:
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
hae0 = lrelu(self.g_bn4(conv2d(z , self.df_dim * 2, name='g_encoder_h0_conv')))
hae1 = lrelu(self.g_bn5(conv2d(hae0, self.df_dim * 4, name='g_encoder_h1_conv')))
hae2 = lrelu(self.g_bn6(conv2d(hae1, self.df_dim * 8, name='g_encoder_h2_conv')))
h2, self.h2_w, self.h2_b = deconv2d(
hae2, [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='g_decoder_h1', with_w=True)
h2 = tf.nn.relu(self.g_bn2(h2))
h3, self.h3_w, self.h3_b = deconv2d(
h2, [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='g_decoder_h0', with_w=True)
h3 = tf.nn.relu(self.g_bn3(h3))
h4, self.h4_w, self.h4_b = deconv2d(
h3, [self.batch_size, s_h, s_w, self.c_dim], name='g_decoder_h00', with_w=True)
return tf.nn.tanh(h4,name='g_output')
# =========================================================================================================
def sampler(self, z, y=None):
with tf.variable_scope("generator") as scope:
scope.reuse_variables()
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
| |
if not os.path.isdir(os.path.dirname(name)):
if makedirs:
_makedirs(name, user=user, group=group, mode=mode)
else:
return _error(
ret, 'No directory to create {0} in'.format(name))
if not os.path.isdir(name):
_makedirs(name, user=user, group=group, mode=mode)
os.makedirs(name)
if not os.path.isdir(name):
return _error(ret, 'Failed to create directory {0}'.format(name))
# Check permissions
ret, perms = _check_perms(name, ret, user, group, mode)
if recurse:
if not set(['user', 'group']) >= set(recurse):
ret['result'] = False
ret['comment'] = 'Types for "recurse" limited to "user" and ' \
'"group"'
else:
targets = copy.copy(recurse)
if 'user' in targets:
if user:
uid = __salt__['file.user_to_uid'](user)
# file.user_to_uid returns '' if user does not exist. Above
# check for user is not fatal, so we need to be sure user
# exists.
if type(uid).__name__ == 'str':
ret['result'] = False
ret['comment'] = 'Failed to enforce ownership for ' \
'user {0} (user does not ' \
'exist)'.format(user)
# Remove 'user' from list of recurse targets
targets = list(x for x in targets if x != 'user')
else:
ret['result'] = False
ret['comment'] = 'user not specified, but configured as ' \
'a target for recursive ownership management'
# Remove 'user' from list of recurse targets
targets = list(x for x in targets if x != 'user')
if 'group' in targets:
if group:
gid = __salt__['file.group_to_gid'](group)
# As above with user, we need to make sure group exists.
if type(gid).__name__ == 'str':
ret['result'] = False
ret['comment'] = 'Failed to enforce group ownership ' \
'for group {0}'.format(group, user)
# Remove 'group' from list of recurse targets
targets = list(x for x in targets if x != 'group')
else:
ret['result'] = False
ret['comment'] = 'group not specified, but configured ' \
'as a target for recursive ownership management'
# Remove 'group' from list of recurse targets
targets = list(x for x in targets if x != 'group')
needs_fixed = {}
if targets:
file_tree = __salt__['file.find'](name)
for path in file_tree:
fstat = os.stat(path)
if 'user' in targets and fstat.st_uid != uid:
needs_fixed['user'] = True
if needs_fixed.get('group'):
break
if 'group' in targets and fstat.st_gid != gid:
needs_fixed['group'] = True
if needs_fixed.get('user'):
break
if needs_fixed.get('user'):
# Make sure the 'recurse' subdict exists
ret['changes'].setdefault('recurse', {})
if 'user' in targets:
if __salt__['cmd.retcode']('chown -R {0} "{1}"'.format(
user, name)) != 0:
ret['result'] = False
ret['comment'] = 'Failed to enforce ownership on ' \
'{0} for user {1}'.format(name, group)
else:
ret['changes']['recurse']['user'] = \
__salt__['file.uid_to_user'](uid)
if needs_fixed.get('group'):
ret['changes'].setdefault('recurse', {})
if 'group' in targets:
if __salt__['cmd.retcode']('chown -R :{0} "{1}"'.format(
group, name)) != 0:
ret['result'] = False
ret['comment'] = 'Failed to enforce group ownership ' \
'on {0} for group ' \
'{1}'.format(name, group)
else:
ret['changes']['recurse']['group'] = \
__salt__['file.gid_to_group'](gid)
if clean:
keep = _gen_keep_files(name, require)
removed = _clean_dir(name, list(keep))
if removed:
ret['changes']['removed'] = removed
ret['comment'] = 'Files cleaned from directory {0}'.format(name)
if not ret['comment']:
ret['comment'] = 'Directory {0} updated'.format(name)
if __opts__['test']:
ret['comment'] = 'Directory {0} not updated'.format(name)
elif not ret['changes'] and ret['result']:
ret['comment'] = 'Directory {0} is in the correct state'.format(name)
return ret
def recurse(name,
source,
clean=False,
require=None,
user=None,
group=None,
dir_mode=None,
file_mode=None,
env=None,
include_empty=False,
**kwargs):
'''
Recurse through a subdirectory on the master and copy said subdirecory
over to the specified path.
name
The directory to set the recursion in
source
The source directory, this directory is located on the salt master file
server and is specified with the salt:// protocol. If the directory is
located on the master in the directory named spam, and is called eggs,
the source string is salt://spam/eggs
clean
Make sure that only files that are set up by salt and required by this
function are kept. If this option is set then everything in this
directory will be deleted unless it is required.
require
Require other resources such as packages or files
user
The user to own the directory, this defaults to the user salt is
running as on the minion
group
The group ownership set for the directory, this defaults to the group
salt is running as on the minion
dir_mode
The permissions mode to set any directories created
file_mode
The permissions mode to set any files created
include_empty
Set this to True if empty directories should also be created
(default is False)
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if env is None:
env = kwargs.get('__env__', 'base')
keep = set()
# Verify the target directory
if not os.path.isdir(name):
if os.path.exists(name):
# it is not a dir, but it exists - fail out
return _error(
ret, 'The path {0} exists and is not a directory'.format(name))
os.makedirs(name)
if __opts__['test']:
ret['result'], ret['comment'] = _check_recurse(
name,
source,
clean,
require,
user,
group,
dir_mode,
file_mode,
env,
include_empty)
return ret
vdir = set()
for fn_ in __salt__['cp.cache_dir'](source, env, include_empty):
if not fn_.strip():
continue
dest = os.path.join(name,
os.path.relpath(
fn_,
os.path.join(
__opts__['cachedir'],
'files',
env,
source[7:]
)
)
)
dirname = os.path.dirname(dest)
if not os.path.isdir(dirname):
_makedirs(dest, user=user, group=group)
if not dirname in vdir:
# verify the directory perms if they are set
# _check_perms(name, ret, user, group, mode)
_ret, perms = _check_perms(dirname, {}, user, group, dir_mode)
if _ret['changes']:
ret['changes'][dirname] = 'updated'
vdir.add(dirname)
if os.path.isfile(dest):
_ret, perms = _check_perms(dest, {}, user, group, file_mode)
if _ret['changes']:
ret['changes'][dest] = 'updated'
keep.add(dest)
srch = ''
dsth = ''
# The file is present, if the sum differes replace it
with nested(open(fn_, 'r'), open(dest, 'r')) as (src_, dst_):
srch = hashlib.md5(src_.read()).hexdigest()
dsth = hashlib.md5(dst_.read()).hexdigest()
if srch != dsth:
# The downloaded file differes, replace!
# FIXME: no metadata (ownership, permissions) available
shutil.copyfile(fn_, dest)
ret['changes'][dest] = 'updated'
elif os.path.isdir(dest) and include_empty:
#check perms
_ret, perms = _check_perms(dest, {}, user, group, dir_mode)
if _ret['changes']:
ret['changes'][dest] = 'updated'
keep.add(dest)
else:
keep.add(dest)
if os.path.isdir(fn_) and include_empty:
#create empty dir
os.mkdir(dest, dir_mode)
else:
# The destination file is not present, make it
# FIXME: no metadata (ownership, permissions) available
shutil.copyfile(fn_, dest)
ret['changes'][dest] = 'new'
keep = list(keep)
if clean:
keep += _gen_keep_files(name, require)
removed = _clean_dir(name, list(keep))
if removed:
ret['changes']['removed'] = removed
ret['comment'] = 'Files cleaned from directory {0}'.format(name)
return ret
def sed(name, before, after, limit='', backup='.bak', options='-r -e',
flags='g'):
'''
Maintain a simple edit to a file
The file will be searched for the ``before`` pattern before making the edit
and then searched for the ``after`` pattern to verify the edit was
successful using :mod:`salt.modules.file.contains`. In general the
``limit`` pattern should be as specific as possible and ``before`` and
``after`` should contain the minimal text to be changed.
Usage::
# Disable the epel repo by default
/etc/yum.repos.d/epel.repo:
file.sed:
- before: 1
- after: 0
- limit: ^enabled=
# Remove ldap from nsswitch
/etc/nsswitch.conf:
file.sed:
- before: 'ldap'
- after: ''
- limit: '^passwd:'
.. versionadded:: 0.9.5
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
# sed returns no output if the edit matches anything or not so we'll have
# to look for ourselves
# Mandate that before and after are strings
before = str(before)
after = str(after)
# Look for the pattern before attempting the edit
if not __salt__['file.contains_regex'](name, before):
# Pattern not found; try to guess why
if __salt__['file.contains_regex'](name, after):
ret['comment'] = 'Edit already performed'
ret['result'] = True
return ret
else:
ret['comment'] = 'Pattern not matched'
return ret
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
return ret
# should be ok now; perform the edit
__salt__['file.sed'](name, before, after, limit, backup, options, flags)
# check the result
ret['result'] = __salt__['file.contains_regex'](name, after)
if ret['result']:
ret['comment'] = 'File successfully edited'
ret['changes'].update({'old': before, 'new': after})
else:
ret['comment'] = 'Expected edit does not appear in file'
return ret
def comment(name, regex, char='#', backup='.bak'):
'''
Usage::
/etc/fstab:
file.comment:
- regex: ^bind 127.0.0.1
.. versionadded:: 0.9.5
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
unanchor_regex | |
"""
Tests for Markov Regression models
Author: <NAME>
License: BSD-3
"""
from __future__ import division, absolute_import, print_function
import os
import warnings
import pytest
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from sm2.tsa.regime_switching import markov_switching, markov_regression
current_path = os.path.dirname(os.path.abspath(__file__))
# See https://www.stata-press.com/data/r14/usmacro
fedfunds = [1.03, 0.99, 1.34, 1.5, 1.94, 2.36, 2.48, 2.69, 2.81, 2.93, 2.93,
3.0, 3.23, 3.25, 1.86, 0.94, 1.32, 2.16, 2.57, 3.08, 3.58, 3.99,
3.93, 3.7, 2.94, 2.3, 2.0, 1.73, 1.68, 2.4, 2.46, 2.61, 2.85,
2.92, 2.97, 2.96, 3.33, 3.45, 3.46, 3.49, 3.46, 3.58, 3.97, 4.08,
4.07, 4.17, 4.56, 4.91, 5.41, 5.56, 4.82, 3.99, 3.89, 4.17, 4.79,
5.98, 5.94, 5.92, 6.57, 8.33, 8.98, 8.94, 8.57, 7.88, 6.7, 5.57,
3.86, 4.56, 5.47, 4.75, 3.54, 4.3, 4.74, 5.14, 6.54, 7.82, 10.56,
10.0, 9.32, 11.25, 12.09, 9.35, 6.3, 5.42, 6.16, 5.41, 4.83, 5.2,
5.28, 4.87, 4.66, 5.16, 5.82, 6.51, 6.76, 7.28, 8.1, 9.58, 10.07,
10.18, 10.95, 13.58, 15.05, 12.69, 9.84, 15.85, 16.57, 17.78,
17.58, 13.59, 14.23, 14.51, 11.01, 9.29, 8.65, 8.8, 9.46, 9.43,
9.69, 10.56, 11.39, 9.27, 8.48, 7.92, 7.9, 8.1, 7.83, 6.92, 6.21,
6.27, 6.22, 6.65, 6.84, 6.92, 6.66, 7.16, 7.98, 8.47, 9.44, 9.73,
9.08, 8.61, 8.25, 8.24, 8.16, 7.74, 6.43, 5.86, 5.64, 4.82, 4.02,
3.77, 3.26, 3.04, 3.04, 3.0, 3.06, 2.99, 3.21, 3.94, 4.49, 5.17,
5.81, 6.02, 5.8, 5.72, 5.36, 5.24, 5.31, 5.28, 5.28, 5.52, 5.53,
5.51, 5.52, 5.5, 5.53, 4.86, 4.73, 4.75, 5.09, 5.31, 5.68, 6.27,
6.52, 6.47, 5.59, 4.33, 3.5, 2.13, 1.73, 1.75, 1.74, 1.44, 1.25,
1.25, 1.02, 1.0, 1.0, 1.01, 1.43, 1.95, 2.47, 2.94, 3.46, 3.98,
4.46, 4.91, 5.25, 5.25, 5.26, 5.25, 5.07, 4.5, 3.18, 2.09, 1.94,
0.51, 0.18, 0.18, 0.16, 0.12, 0.13, 0.19, 0.19, 0.19]
# See https://www.stata-press.com/data/r14/usmacro
ogap = [-0.53340107, 0.72974336, 2.93532324, 3.58194304, 4.15760183,
4.28775644, 3.01683831, 2.64185619, 1.82473528, 2.37461162,
2.39338565, 1.24197006, 1.1370815, -1.28657401, -4.46665335,
-4.79258966, -3.06711817, -1.3212384, -0.54485309, 0.86588413,
-0.2469136, -0.75004685, 0.7417022, -0.71350163, -1.5151515,
-3.80444455, -4.02601957, -3.17873883, -2.48841596, -1.42372882,
-0.61779928, -0.6430338, -0.73277968, -1.38330388, -1.31537247,
-0.95626277, 0., -0.15248552, 0.93233085, 1.03888392,
1.27174389, 0.63400578, 2.13007665, 2.44789481, 3.37605071,
4.72771597, 6.20753956, 5.39234877, 5.0825758, 4.8605876,
4.65116262, 3.52755141, 3.35122228, 3.09326482, 4.10191917,
4.69641066, 4.38452244, 3.79841614, 4.38338947, 3.63766766,
3.24129653, 1.84967709, 0.75554705, -0.02802691, -0.03673432,
-1.90527546, -0.14918824, -0.42940569, -0.46382189, -0.97892815,
-0.12142799, 1.37281513, 1.5143193, 2.47730422, 3.9762032,
4.08987427, 2.62857127, 2.90107131, 0.97277576, 0.42547619,
-1.60488391, -2.97784758, -4.98650694, -5.03382635, -4.25698328,
-3.74993205, -2.39661908, -2.41223454, -2.66694117, -2.62232494,
-2.29969597, -1.38809109, -0.67855304, -1.08100712, -1.82682908,
0.92868561, 0.87040615, 1.32669306, 0.56407404, -0.13848817,
-0.13089494, -0.58975571, -1.00534534, -3.55482054, -4.20365095,
-2.97225475, -1.57762408, -2.77206445, -2.32418823, -4.01929235,
-6.25393772, -6.46356869, -7.47437572, -8.06377602, -7.57157278,
-6.14639282, -5.00167227, -3.74511886, -2.54788184, -1.64858043,
-1.47994602, -1.44707143, -1.31824112, -1.20102882, -0.57691002,
-0.64480144, -0.57239723, -0.93083948, -0.8392899, -1.19972074,
-1.18918467, -0.87174636, -0.78151888, 0.10762761, -0.10596547,
0.40488175, 0.17958413, 0.67704558, 0.99767941, 1.00495291,
0.98304421, 0.47067845, 0.80427116, 0.45058677, -0.26300991,
-1.84629929, -2.99437666, -2.90482664, -3.09490418, -3.32399321,
-2.87384319, -2.47262239, -2.19618678, -1.91843009, -2.46574545,
-2.58180451, -2.72212362, -2.17548561, -1.96046102, -1.3287729,
-1.42521954, -1.04951096, -1.47037697, -1.87099183, -1.72912872,
-1.76828432, -1.85885167, -0.9193368, -0.95776832, -0.62119246,
-0.53508854, -0.04090983, 0.47511154, 0.41246772, 0.57928383,
0.67604625, 1.1378212, 1.96481478, 2.05066752, 1.93714142,
2.34412026, 3.16807413, 2.57455897, 3.59218717, 2.79711962,
2.41787243, 1.19362748, 0.82524049, -0.36692095, -1.00542021,
-0.89346135, -1.23166943, -1.56921482, -2.29188299, -2.56877398,
-2.37549472, -1.4183135, -1.00017595, -1.03901041, -0.86736482,
-0.63541794, -0.38296556, 0.11404825, 0.07249562, 0.30608681,
0.27121997, 0.90333837, 0.595429, 0.08057959, 0.25154814,
-0.27741581, -0.14053501, -0.06035376, -0.2722317, -1.5122633,
-1.5272249, -2.5325017, -5.14671373, -6.88223982, -7.36753035,
-7.43927145, -6.89403868, -6.8306222, -6.26507998, -5.93287086,
-5.59370756]
# See https://www.stata-press.com/data/r14/usmacro
inf = [np.nan, np.nan, np.nan, np.nan, -0.2347243,
0.37373397, 0.25006533, 1.04645514, 2.01665616, 2.58033299,
3.41399837, 3.60986805, 3.46304512, 3.08529949, 3.45609665,
3.27347994, 2.29982662, 1.91197193, 0.89083761, 0.390598,
0.96842253, 1.47531354, 1.39343977, 1.82488036, 1.35991514,
1.39598227, 1.50695646, 0.8690359, 1.20648873, 0.70517123,
0.89477205, 1.30740857, 1.20212376, 1.30043352, 1.22895002,
1.03573787, 1.36272156, 1.39236343, 1.48636675, 1.46398985,
1.07421875, 1.26611042, 1.1639185, 1.64622331, 1.71658623,
1.78565705, 2.41930342, 2.6897428, 3.27391338, 3.5685041,
2.87078357, 2.56671929, 2.70717716, 2.99242783, 3.74010396,
4.11855173, 4.47761202, 4.62397051, 4.87426901, 5.50198364,
5.52285719, 5.83354473, 6.22577858, 6.03848171, 5.68597221,
5.60000038, 4.81102371, 4.31496382, 4.27074528, 3.53535342,
3.50587225, 3.22580624, 3.02948403, 3.33414626, 4.1129365,
5.60817289, 6.83709764, 8.41692829, 9.91564655, 10.54788017,
11.45758915, 12.04798317, 11.13530636, 9.53939915, 8.67963028,
7.38337183, 6.34047985, 6.01503754, 5.58903217, 5.18573475,
5.90339899, 6.79609919, 6.57417107, 6.59522104, 6.47466183,
7.02936935, 8.02397346, 8.9289465, 9.78376389, 10.75433922,
11.72252846, 12.64148235, 14.20953751, 14.42577076, 12.93487072,
12.53929329, 11.26111889, 9.87392902, 10.85386753, 9.5831337,
7.58190918, 6.90676928, 5.81573057, 4.44292784, 3.59408045,
3.29905081, 2.52680969, 3.23384356, 4.62551022, 4.40519285,
4.29570436, 4.1543026, 3.64175439, 3.60676312, 3.35249043,
3.5137701, 3.1053853, 1.67858768, 1.66821122, 1.34587157,
2.03802228, 3.69979739, 4.16317225, 4.40493536, 3.96511626,
3.97994113, 4.1420536, 4.3066597, 4.67509222, 5.15961123,
4.70588255, 4.62759781, 5.23231459, 4.58372736, 5.56420517,
6.27646685, 5.25958157, 4.84686804, 3.85226536, 2.96485686,
2.89388347, 3.07301927, 3.07467055, 3.12198234, 3.17306924,
3.12524581, 2.8174715, 2.76977897, 2.53936958, 2.38237333,
2.85493255, 2.60332823, 2.84049082, 3.09557867, 2.66420412,
2.62607908, 2.78390908, 2.8270874, 2.8999064, 3.23162007,
2.94453382, 2.30179024, 2.22504783, 1.89075232, 1.48277605,
1.58312511, 1.59639311, 1.5253576, 1.68703699, 2.11280179,
2.34625125, 2.61982656, 3.25799918, 3.29342604, 3.46889949,
3.44350553, 3.40975904, 3.32491398, 2.67803454, 1.87507534,
1.23194993, 1.31765401, 1.57628381, 2.25352097, 2.97640777,
2.00593972, 2.21688938, 2.00165296, 1.81766617, 2.78586531,
2.67522621, 3.38513398, 3.0353508, 2.92293549, 3.81956744,
3.6745038, 3.69086194, 3.92426181, 3.34028482, 1.96539891,
2.43147993, 2.66511655, 2.34880662, 4.03147316, 4.13719845,
4.31058264, 5.25250196, 1.59580016, -0.1842365, -0.94229329,
-1.60695589, 1.48749816, 2.33687115, 1.78588998, 1.22873163,
1.21550024]
# See https://www.stata-press.com/data/r14/snp500
areturns = [1.60864139, 0.6581642, 0.91177338,
1.88970506, 0.76378739, 0.10790635, 0.29509732,
0.16913767, 1.30772412, 0.85901159, 0.92307973,
0.9833895, 0.9116146, 2.58575296, 0.36441925,
1.89720023, 0.65161127, 1.17255056, 0.53518051,
0.00534112, 1.25064528, 2.00023437, 0.79801333,
1.42980587, 0.02078664, 2.31948757, 2.78705025,
1.36003578, 0.15257211, 0.30815724, 0.40030465,
0.89941251, 0.36925647, 0.75660467, 0.87896836,
1.07261622, 0.1137321, 1.32838523, 1.03085732,
1.33930087, 0.66706187, 0.94959277, 1.07173061,
0.80687243, 1.35347247, 1.56781077, 0.71599048,
0.50293237, 0.33926481, 2.94415998, 0.72026408,
0.28967711, 1.05362082, 0.3702977, 2.05277085,
0.49342933, 0.03423685, 0.34392089, 1.01741159,
1.43457139, 0.03759775, 1.54626679, 1.07742834,
0.28664029, 0.72592038, 0.91093767, 0.06915179,
0.88005662, 0.47802091, 1.2907486, 0.57604247,
0.71046084, 0.81753206, 0.26241753, 2.57300162,
0.16590172, 0.2918649, 0.96136051, 1.6711514,
0.94229084, 1.83614326, 0.28854966, 0.35050908,
0.04593768, 0.07599987, 0.09888303, 0.12907109,
2.0099268, 0.23006552, 1.18803704, 0.99970037,
1.32702613, 0.45646569, 1.43720019, 0.04425191,
0.53156406, 0.45951003, 1.26583254, 0.26994073,
0.1238014, 0.53068936, 0.21927625, 0.73882329,
0.13153869, 0.97837049, 2.36890459, 2.29313374,
0.75562358, 0.08656374, 2.4979558, 0.64189923,
0.22916116, 2.27840376, 0.46641645, 2.02508688,
1.25530422, 1.27711689, 0.07773363, 0.23380435,
1.58663058, 0.19108967, 0.52218717, 0.18055375,
1.18262017, 0.47418493, 0.88282752, 0.98944044,
1.04560554, 0.65470523, 0.2604697, 0.14658713,
0.77688956, 1.10911596, 0.69967973, 1.04578161,
0.29641318, 0.98087156, 0.46531865, 0.11846001,
0.44440377, 1.11066306, 0.02238905, 0.19865835,
1.48028743, 0.27695858, 0.9391492, 1.70575404,
2.94507742, 0.35386264, 0.72816408, 1.80369282,
0.12440593, 1.04197288, 1.2957871, 1.35031664,
0.55384284, 1.13915396, 0.29186234, 1.21344364,
0.23005128, 0.85578758, 1.80613887, 1.55996382,
1.46395147, 0.59826899, 0.65880769, 1.68974137,
1.12778795, 4.19566727, 0.14379959, 2.09945345,
0.29264972, 1.25936544, 0.84738803, 0.54094779,
2.27655816, 1.48392296, 1.13808954, 1.16038692,
0.46204364, 2.09433556, 1.16782069, 2.0192802,
2.6190269, 1.63471925, 0.25279006, 2.64083171,
1.64290273, 2.42852569, 1.54714262, 1.14975035,
3.59362221, 1.16689992, 5.11030865, 1.81326246,
0.93489766, 1.38605726, 0.53841805, 1.02298951,
2.03038621, 2.8340385, 0.13691254, 3.18769765,
0.23076122, 1.95332313, 1.63122225, 2.66484141,
0.86377442, 1.1782372, 0.57231718, 1.11979997,
2.07001758, 0.08726255, 1.71130466, 1.04979181,
1.9825747, 3.43235064, 1.50204682, 1.75699294,
2.56816769, 0.75786251, 0.93131924, 1.45494628,
0.49975556, 0.32756457, 0.47183469, 3.3737793,
2.25759649, 0.34138981, 3.09048033, 10.32189178,
10.15319347, 0.12398402, 4.65263939, 7.62032652,
7.04052448, 4.55579329, 3.52704573, 3.38968754,
3.00466204, 0.46617937, 1.42808878, 1.00660408,
4.65142584, 5.20996618, 4.80301046, 0.99780792,
1.15280604, 1.87296033, 4.60985804, 5.41294718,
6.06733084, 3.18375754, 10.0548315, 4.22182512,
1.24640226, 2.66358495, 2.60049844, 0.00352026,
1.02208447, 4.09924603, 1.27764511, 0.90124834,
0.5303241, 3.84383249, 1.24640775, 1.39796948,
2.34609175, 1.7742399, 3.56689548, 1.27681601,
5.32056713, 3.19770503, 1.89575887, 0.59274858,
0.64010525, 2.65920091, 0.81912726, 0.4868626,
3.13063931, 1.3960743, 1.03451502, 1.28983963,
3.27489519, 1.41772103, 2.00014663, 2.02787399,
3.50289273, 1.65296888, 0.02450024, 0.04084374,
0.17252181, 0.78132814, 0.20216605, 1.48436368,
0.3301619, 1.12080252, 0.00699845, 3.87074757,
0.84627002, 2.26680374, 2.07992935, 1.62452054,
0.66078293, 2.26608515, 1.58540344, 0.98763937,
0.25370923, 1.2576412, 1.07146478, 0.48786601,
0.02327727, 1.29385257, 3.52217674, 1.05305433,
5.13598871, 1.43351507, 2.12951326, 3.03700447,
0.65843326, 4.28524971, 2.3428576, 4.72853422,
0.58606911, 2.70345545, 0.8207835, 0.16228235,
2.80714321, 1.97183621, 0.5928334, 3.61601782,
1.82700455, 1.52638936, 0.72525144, 0.6499536,
1.58741212, 0.72647524, 0.65064299, 0.43771812,
2.68048692, 2.20902133, 0.0988697, 0.31138307,
2.79860616, 1.13209391, 0.91427463, 0.69550049,
0.68990183, 0.65359998, 1.04932129, 0.00310441,
0.48663121, 1.68144464, 0.99051267, 0.22263506,
0.97846323, 0.55040002, 2.56734443, 0.12510587,
2.15363359, 1.18440747, 0.66974002, 0.48981813,
2.08285856, 1.03952742, 1.00747502, 0.52523118,
0.81593889, 0.22168602, 2.73786068, 1.21678591,
0.235705, 0.56248677, 3.66057348, 0.35822684,
0.97550339, 1.21677041, 4.03415823, 9.10342026,
2.24355674, 3.6120553, 4.36456299, 0.83891636,
1.07712805, 2.28685427, 4.04548168, 1.67408013,
4.57762337, 2.47123241, 1.88890803, 1.62245703,
0.02149973, 0.48483402, 4.40716505, 0.28621164,
4.56798553, 1.6255945, 0.6124717, 2.72943926,
0.80645156, 1.26738918, 0.91451788, 1.59959269,
0.0356785, 1.93719864, 0.42164543, 0.87313241,
0.52508104, 0.44771862, 1.38226497, 1.83891225,
0.00711749, 0.26621303, 2.25254321, 0.27307722,
0.26436633, 1.80608702, 2.29477572, 2.0931437,
2.2915051, 0.82041657, 2.09074521, 1.87793779,
2.15142703, 1.549685, 2.44940472, 0.45297864,
0.35515305, 0.23224437, 1.77138305, 0.98827285,
0.98435384, 0.80031335, 0.49445853, 0.36061874,
2.15444446, 1.92558503, 0.75404048, 0.31921348,
0.32092738, 0.48054051, 0.98650485, 1.1810472,
0.28533801, 3.02953291, 0.16818592, 2.20164418,
0.3911584, 0.6942575, 0.55016953, 0.06157291,
0.19509397, 2.3744297, 0.73775989, 1.12842739,
0.87197775, 0.30168825, 0.71310955, 0.27689508,
1.13476491, 1.60331428, 1.56165123, 0.31513214,
0.02698154, 0.49029687, 0.17265303, 0.36386153,
0.56225872, 1.59077382, 1.84919345, 1.4230696,
1.28607559, 0.57890779, 1.14760947, 0.22594096,
0.43510813, 2.90668917, 1.49716794, 1.9549973,
2.10786223, 0.71948445, 0.19396119, 0.86563414,
0.63498968, 2.3593328, 0.18950517, 0.45737442,
1.82937241, 1.72589195, 0.29414186, 0.74434268,
1.22564518, 2.01444268, 2.32068515, 0.98414028,
0.1174908, 0.22450124, 1.24669802, 0.70953292,
0.21857196, 0.11119327, 0.60500813, 2.04446197,
1.146896, 0.54849964, 0.23402978, 0.32219616,
2.7076292, 1.57800817, 2.08260155, 1.81090641,
0.45189673, 1.01260054, 0.65379494, 0.94736898,
0.37556711, 0.44287458, 0.34578958, 1.48449266,
1.95924711, 0.09717447]
# See https://www.stata-press.com/data/r14/mumpspc
# Note that this has already been seasonally differenced at period 12
mumpspc = [0.29791319, 0.41467956, 1.13061404, 1.23267496,
1.55659747, 1.41078568, 0.45335022, 0.1419628,
0.03802268, 0.04621375, 0.01261204, 0.04653099,
0.10195512, 0.18079406, -0.1898452, -0.24501109,
-0.71440864, -0.82188988, -0.32300544, -0.07680188,
-0.0183593, -0.02145147, -0.14442876, -0.13897884,
-0.41970083, | |
from __future__ import annotations
import torch
import numpy as np
from torch import nn
from torch import Tensor
from collections import OrderedDict
from typing import List
from functools import partial
from glasses.nn.blocks import ConvBnAct
from glasses.nn.att import ChannelSE
from ....models.utils.scaler import CompoundScaler
from ....models.base import Encoder
from ..resnet import ResNetLayer
from glasses.utils.weights.PretrainedWeightsProvider import pretrained
from ..base import ClassificationModule
from glasses.nn import StochasticDepth
class InvertedResidualBlock(nn.Module):
"""Inverted residual block proposed originally for MobileNetV2.
.. image:: https://github.com/FrancescoSaverioZuppichini/glasses/blob/develop/docs/_static/images/EfficientNetBasicBlock.png?raw=true
Args:
in_features (int): Number of input features
out_features (int): Number of output features
stride (int, optional): Stide used in the depth convolution. Defaults to 1.
expansion (int, optional): The expansion ratio applied. Defaults to 6.
activation (nn.Module, optional): The activation funtion used. Defaults to nn.SiLU.
drop_rate (float, optional): If > 0, add a nn.Dropout2d at the end of the block. Defaults to 0.2.
se (bool, optional): If True, add a ChannelSE module after the depth convolution. Defaults to True.
kernel_size (int, optional): [description]. Defaults to 3.
"""
def __init__(
self,
in_features: int,
out_features: int,
stride: int = 1,
expansion: int = 6,
activation: nn.Module = nn.SiLU,
drop_rate: float = 0.2,
se: bool = True,
kernel_size: int = 3,
**kwargs
):
super().__init__()
expanded_features = in_features * expansion
# do not apply residual when downsamping and when features are different
# in mobilenet we do not use a shortcut
self.should_apply_residual = stride == 1 and in_features == out_features
self.block = nn.Sequential(
OrderedDict(
{
"exp": ConvBnAct(
in_features,
expanded_features,
activation=activation,
kernel_size=1,
)
if expansion > 1
else nn.Identity(),
"depth": ConvBnAct(
expanded_features,
expanded_features,
activation=activation,
kernel_size=kernel_size,
stride=stride,
groups=expanded_features,
**kwargs,
),
# apply se after depth-wise
"att": ChannelSE(
expanded_features,
reduced_features=in_features // 4,
activation=activation,
)
if se
else nn.Identity(),
"point": nn.Sequential(
ConvBnAct(
expanded_features,
out_features,
kernel_size=1,
activation=None,
)
),
"drop": StochasticDepth(drop_rate)
if self.should_apply_residual and drop_rate > 0
else nn.Identity(),
}
)
)
def forward(self, x: Tensor) -> Tensor:
res = x
x = self.block(x)
if self.should_apply_residual:
x += res
return x
EfficientNetBasicBlock = InvertedResidualBlock
EfficientNetStem = ConvBnAct
EfficientNetLayer = partial(ResNetLayer, block=EfficientNetBasicBlock)
class EfficientNetEncoder(Encoder):
"""
EfficientNet encoder composed by multiple different layers with increasing features.
Be awere that `widths` and `strides` also includes the width and stride for the steam in the first position.
Args:
in_channels (int, optional): [description]. Defaults to 3.
widths (List[int], optional): [description]. Defaults to [32, 16, 24, 40, 80, 112, 192, 320, 1280].
depths (List[int], optional): [description]. Defaults to [1, 2, 2, 3, 3, 4, 1].
strides (List[int], optional): [description]. Defaults to [2, 1, 2, 2, 2, 1, 2, 1].
expansions (List[int], optional): [description]. Defaults to [1, 6, 6, 6, 6, 6, 6].
kernel_sizes (List[int], optional): [description]. Defaults to [3, 3, 5, 3, 5, 5, 3].
se (List[bool], optional): [description]. Defaults to [True, True, True, True, True, True, True].
drop_rate (float, optional): [description]. Defaults to 0.2.
activation (nn.Module, optional): [description]. Defaults to nn.SiLU.
"""
def __init__(
self,
in_channels: int = 3,
widths: List[int] = [32, 16, 24, 40, 80, 112, 192, 320, 1280],
depths: List[int] = [1, 2, 2, 3, 3, 4, 1],
strides: List[int] = [2, 1, 2, 2, 2, 1, 2, 1],
expansions: List[int] = [1, 6, 6, 6, 6, 6, 6],
kernel_sizes: List[int] = [3, 3, 5, 3, 5, 5, 3],
se: List[bool] = [True, True, True, True, True, True, True],
drop_rate: float = 0.2,
stem: nn.Module = EfficientNetStem,
activation: nn.Module = partial(nn.SiLU, inplace=True),
**kwargs
):
super().__init__()
self.widths, self.depths = widths, depths
self.strides, self.expansions, self.kernel_sizes = (
strides,
expansions,
kernel_sizes,
)
self.stem = stem(
in_channels,
widths[0],
activation=activation,
kernel_size=3,
stride=strides[0],
)
strides = strides[1:]
self.in_out_widths = list(zip(widths, widths[1:-1]))
self.layers = nn.ModuleList(
[
*[
EfficientNetLayer(
in_features,
out_features,
depth=n,
stride=s,
expansion=t,
kernel_size=k,
se=se,
drop_rate=drop_rate,
activation=activation,
**kwargs,
)
for (in_features, out_features), n, s, t, k, se in zip(
self.in_out_widths,
depths,
strides,
expansions,
kernel_sizes,
se,
)
]
]
)
self.layers.append(
ConvBnAct(
self.widths[-2], self.widths[-1], activation=activation, kernel_size=1
)
)
def forward(self, x):
x = self.stem(x)
for layer in self.layers:
x = layer(x)
return x
@property
def stages(self):
# find the layers where the input is // 2
# skip first stride because it is for the stem!
# skip the last layer because it is just a conv-bn-act
# and we haven't a stride for it
layers = np.array(self.layers[:-1])[np.array(self.strides[1:]) == 2].tolist()[
:-1
]
return [self.stem[-1], *layers]
@property
def features_widths(self):
# skip the last layer because it is just a conv-bn-act
# and we haven't a stride for it
widths = np.array(self.widths[:-1])[np.array(self.strides) == 2].tolist()
# we also have to remove the last one, because it is the spatial size of the network output
return widths[:-1]
class EfficientNetHead(nn.Sequential):
"""
This class represents the head of EfficientNet. It performs a global pooling, dropout and maps the output to the
correct class by using a fully connected layer.
"""
def __init__(self, in_features: int, n_classes: int, drop_rate: float = 0.2):
super().__init__()
self.avg = nn.AdaptiveAvgPool2d((1, 1))
self.drop = nn.Dropout2d(drop_rate)
self.fc = nn.Linear(in_features, n_classes)
def forward(self, x):
x = self.avg(x)
x = x.view(x.size(0), -1)
x = self.drop(x)
x = self.fc(x)
return x
class EfficientNet(ClassificationModule):
"""Implementation of EfficientNet proposed in `EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks <https://arxiv.org/abs/1905.11946>`_
.. image:: https://github.com/FrancescoSaverioZuppichini/glasses/blob/develop/docs/_static/images/EfficientNet.png?raw=true
The basic architecture is similar to MobileNetV2 as was computed by using `Progressive Neural Architecture Search <https://arxiv.org/abs/1905.11946>`_ .
The following table shows the basic architecture (EfficientNet-efficientnet_b0):
.. image:: https://github.com/FrancescoSaverioZuppichini/glasses/blob/develop/docs/_static/images/EfficientNetModelsTable.jpeg?raw=true
Then, the architecture is scaled up from `-efficientnet_b0` to `-efficientnet_b7` using compound scaling.
.. image:: https://github.com/FrancescoSaverioZuppichini/glasses/blob/develop/docs/_static/images/EfficientNetScaling.jpg?raw=true
.. code-block:: python
EfficientNet.efficientnet_b0()
EfficientNet.efficientnet_b1()
EfficientNet.efficientnet_b2()
EfficientNet.efficientnet_b3()
EfficientNet.efficientnet_b4()
EfficientNet.efficientnet_b5()
EfficientNet.efficientnet_b6()
EfficientNet.efficientnet_b7()
EfficientNet.efficientnet_b8()
EfficientNet.efficientnet_l2()
Examples:
.. code-block:: python
EfficientNet.efficientnet_b0(activation = nn.SELU)
# change number of classes (default is 1000 )
EfficientNet.efficientnet_b0(n_classes=100)
# pass a different block
EfficientNet.efficientnet_b0(block=...)
# store each feature
x = torch.rand((1, 3, 224, 224))
model = EfficientNet.efficientnet_b0()
# first call .features, this will activate the forward hooks and tells the model you'll like to get the features
model.encoder.features
model(torch.randn((1,3,224,224)))
# get the features from the encoder
features = model.encoder.features
print([x.shape for x in features])
# [torch.Size([1, 32, 112, 112]), torch.Size([1, 24, 56, 56]), torch.Size([1, 40, 28, 28]), torch.Size([1, 80, 14, 14])]
Args:
in_channels (int, optional): Number of channels in the input Image (3 for RGB and 1 for Gray). Defaults to 3.
n_classes (int, optional): Number of classes. Defaults to 1000.
"""
models_config = {
# name : width_factor, depth_factor, dropout_rate
"efficientnet_b0": (1.0, 1.0, 0.2),
"efficientnet_b1": (1.0, 1.1, 0.2),
"efficientnet_b2": (1.1, 1.2, 0.3),
"efficientnet_b3": (1.2, 1.4, 0.3),
"efficientnet_b4": (1.4, 1.8, 0.4),
"efficientnet_b5": (1.6, 2.2, 0.4),
"efficientnet_b6": (1.8, 2.6, 0.5),
"efficientnet_b7": (2.0, 3.1, 0.5),
"efficientnet_b8": (2.2, 3.6, 0.5),
"efficientnet_l2": (4.3, 5.3, 0.5),
}
default_depths: List[int] = [1, 2, 2, 3, 3, 4, 1]
default_widths: List[int] = [32, 16, 24, 40, 80, 112, 192, 320, 1280]
def __init__(
self,
encoder: nn.Module = EfficientNetEncoder,
head: nn.Module = EfficientNetHead,
*args,
**kwargs
):
super().__init__(
encoder, partial(head, drop_rate=kwargs["drop_rate"]), *args, **kwargs
)
self.initialize()
def initialize(self):
# initialization copied from MobileNetV2
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
@classmethod
def from_config(cls, config, key, *args, **kwargs) -> EfficientNet:
width_factor, depth_factor, drop_rate = config[key]
widths, depths = CompoundScaler()(
width_factor, depth_factor, cls.default_widths, cls.default_depths
)
return cls(
*args,
depths=depths,
widths=widths,
drop_rate=drop_rate,
**kwargs,
)
@classmethod
@pretrained()
def efficientnet_b0(cls, *args, **kwargs) -> EfficientNet:
return cls.from_config(cls.models_config, "efficientnet_b0", *args, **kwargs)
@classmethod
@pretrained()
def efficientnet_b1(cls, *args, **kwargs) -> EfficientNet:
return cls.from_config(cls.models_config, "efficientnet_b1", *args, **kwargs)
@classmethod
@pretrained()
def efficientnet_b2(cls, *args, **kwargs) -> EfficientNet:
return cls.from_config(cls.models_config, "efficientnet_b2", *args, **kwargs)
@classmethod
@pretrained()
def efficientnet_b3(cls, *args, **kwargs) -> EfficientNet:
return cls.from_config(cls.models_config, "efficientnet_b3", *args, **kwargs)
@classmethod
@pretrained()
def efficientnet_b4(cls, *args, **kwargs) -> EfficientNet:
return cls.from_config(cls.models_config, "efficientnet_b4", *args, **kwargs)
@classmethod
def efficientnet_b5(cls, *args, **kwargs) -> EfficientNet:
return cls.from_config(cls.models_config, "efficientnet_b5", *args, **kwargs)
@classmethod
def efficientnet_b6(cls, *args, **kwargs) -> EfficientNet:
return cls.from_config(cls.models_config, "efficientnet_b6", *args, **kwargs)
@classmethod
def efficientnet_b7(cls, *args, **kwargs) -> EfficientNet:
return cls.from_config(cls.models_config, "efficientnet_b7", *args, **kwargs)
@classmethod
def efficientnet_b8(cls, *args, **kwargs) -> EfficientNet:
return cls.from_config(cls.models_config, "efficientnet_b8", *args, **kwargs)
@classmethod
def efficientnet_l2(cls, *args, **kwargs) -> EfficientNet:
return cls.from_config(cls.models_config, "efficientnet_l2", *args, **kwargs)
class EfficientNetLite(EfficientNet):
"""Implementations of EfficientNetLite proposed | |
#import required packages
import pandas as pd
import re
import html.parser
import urllib.request
import urllib.parse
import string
def read_glove_vecs(file):
with open(file, 'r') as f:
words = set()
word_to_vec_map = {}
for line in f:
line = line.strip().split()
word = line[0]
words.add(word)
word_to_vec_map[word] = np.array(line[1:], dtype=np.float64)
return words, word_to_vec_map
words, word_to_vec_map = read_glove_vecs('data/glove.6B.50d.txt') # replace file path with your location for 50-d embeddings
# size of embeddings.
vector_dim = 50
#read 'data_dump' file
df1 = pd.read_csv('data_dump.txt' ,sep = '\t', header = None)
# code for translating text to english, inspired by code shared on GitHub
agent = {'User-Agent':
"Mozilla/4.0 (\
compatible;\
MSIE 6.0;\
Windows NT 5.1;\
SV1;\
.NET CLR 1.1.4322;\
.NET CLR 2.0.50727;\
.NET CLR 3.0.04506.30\
)"}
def unescape(text):
parser = html.parser.HTMLParser()
return (parser.unescape(text))
def translate(sent_to_translate, to_language="auto", from_language="auto"):
sent_to_translate = urllib.parse.quote(sent_to_translate)
link = "https://translate.google.com/m?hl={}&sl={}&q={}".format(to_language, from_language, sent_to_translate)
request = urllib.request.Request(link, headers=agent)
data = urllib.request.urlopen(request).read().decode("utf-8")
translation = re.findall(r'class="t0">(.*?)<', data)
if (len(translation) == 0):
result = ''
else:
result = unescape(translation[0])
return result
#pass all entries from data set into translator, and add them to list 'lst'
# and print out no. completed after every 500 samples
j = 0
lst = []
for i in df1[0]:
j+=1
lst.append(translate(i, to_language = 'en'))
if j%500 == 0: print ('{} entries completed'.format(j))
# make a new column in the dataframe that shows the translated text
df1['new'] = lst
#separate entries like '2am' to '2 am', ie, add space b/w number and text after making it lower case.
df1.new = df1.new.map(lambda x: (re.sub('(\d)([a-zA-Z]+)', r'\1 \2', x.lower())))
#transforms the text into a list, with words separated from punctuations (just doing a text.split() would have tokens where
#punctuations immediately following a character would not be separated from the character)
df1.new = df1.new.map(lambda x: re.findall('[\w]+|[,;.?!#&]', x))
# the common stop words in the english language. I didn't have access to nltk.stopwords, so I did it this way, as
# the stop words wore easily available on a google search
# transformed into a list
lst3 = 'i,me,my,myself,we,our,ours,ourselves,you,your,yours,yourself,yourselves,he,him,his,himself,she,her,hers,herself,it,its,itself,they,them,their,theirs,themselves,what,which,who,whom,this,that,these,those,am,is,are,was,were,be,been,being,have,has,had,having,do,does,did,doing,a,an,the,and,but,if,or,because,as,until,while,of,at,by,for,with,about,against,between,into,through,during,before,after,above,below,to,from,up,down,in,out,on,off,over,under,again,further,then,once,here,there,when,where,why,how,all,any,both,each,few,more,most,other,some,such,no,nor,not,only,own,same,so,than,too,very,s,t,can,will,just,don,should,now'
lst3 = lst3.split(',')
# for use later on
def cosine_similarity(x, y):
# Compute the dot product between u and v (≈1 line)
dot = np.dot(x,y)
# Compute the L2 norm of u (≈1 line)
norm_x = np.sqrt(np.sum(x**2))
# Compute the L2 norm of v (≈1 line)
norm_y = np.sqrt(np.sum(y**2))
# Compute the cosine similarity defined by formula (1) (≈1 line)
cosine_similarity = dot/(norm_x * norm_y)
return cosine_similarity
# train your own word embeddings on top of pre-trained GLoVe vectors
# training our own word vectors on our own corpus helps find further similarities between the words
# TO MAKE DATA INTO THE FORMAT REQUIRED BY KERAS:
# make the entire text from the data set into one huge corpus, with each example separated from the rest
# by 4 spaces (4 has been used because later on, during the generation of skipgrams, a window size of 4 wil be used
# to identify context words of a target word)
# if the spaces weren't added, in the huge corpus, if one sentene ends in a positive way like 'good', and the
# next sentence started with 'fuck' then 'good' would turn into a context word for 'fuck', thereby pushing their embeddings
# towards each other, making their cosine similarity increase
lst4 = []
for i in df1.index:
lst4+=df1.new.loc[i] + [(' '), (' '), (' '), (' ')]
# do similar pre-processing to remove punctuations and stop words
[lst4.remove(i) for i in lst4 if i in string.punctuation]
[lst4.remove(i) for i in lst4 if i in lst3]
# make it into a series to get unique entries of the huge corpus
# (set() could have been used, but it generates an unordered set of words, which changes the index being assigned to each word
# each time the code is run.)
series = pd.Series(lst4)
dic = {}
# make a dictionary of words with corresponding indexes
# Here, the index of a particular word remains same every time the code is run
for index,word in enumerate(series.unique()):
dic[word] = index
# transform the huge corpus into corresponding indexes
for i,j in enumerate(lst4):
lst4[i] = dic[j]
# TRAIN NEW WORD EMBEDDINGS ON CORPUS
# import necessary keras modules
from keras.preprocessing import sequence
from keras.layers import Dot, Reshape, Dense
from keras.models import Model
# size of the vocabulary ,ie, no. of unique words in corpus
vocab_size = len(dic) + 1
# sampling table used to make skipgrams, so that in the negative samples, the most common words are assigned a lower weight
sampling_table = sequence.make_sampling_table(vocab_size)
# make the skipgrams from the corpus, with a window size of 4 for the context words and use samples generated by previous line
# returns tuple of (target word, context word) and associated label of the tuple (1 for whether context in tuple is in fact
# context for the word in the actual data set, 0 otherwise)
tuples, labels = sequence.skipgrams(lst4, vocab_size, window_size=window_size, sampling_table=sampling_table)
# extract the target and context words and convert them into arrays (bear in mind that target and context words are
# now represented by their corresponding indexes from 'dic' dictionary)
target_word, context_word = zip(*tuples)
word_target = np.array(target_word, dtype="int32")
word_context = np.array(context_word, dtype="int32")
# make a new embedding matrix. The pre-trained GloVe vectors are going to be loaded into this matrix
# initialise with zeros
embedding_matrix = np.zeros((vocab_size, vector_dim))
# corresponding to the index of each row of embedding matrix, fill in the values of 50 dimensional word embedddings
for word,index in dic.items():
try:
embedding_matrix[index,:] = word_to_vec_map[word]
except:
continue # if word is not present in GloVe vectors, that index position is already filled with zeros, as we had initialized
# all rows to zero in the first place
# START BUILDING THE KERAS MODEL FOR TRAINING
input_target = Input((1,))
input_context = Input((1,))
# make a Keras embedding layer of shape (vocab_size, vector_dim) and set 'trainable' argument to 'True'
embedding = Embedding(input_dim = vocab_size, output_dim = vector_dim, input_length = 1, name='embedding', trainable = True)
# load pre-trained weights(embeddings) from 'embedding_matrix' into the Keras embedding layer
embedding.build((None,))
embedding.set_weights([embedding_matrix])
# run the context and target words through the embedding layer
context = embedding(input_context)
context = Reshape((vector_dim, 1))(context)
target = embedding(input_target)
target = Reshape((vector_dim, 1))(target)
# compute the dot product of the context and target words, to find the similarity (dot product is usually a measure of similarity)
dot = Dot(axes = 1)([context, target])
dot = Reshape((1,))(dot)
# pass it through a 'sigmoid' activation neuron; this is then comapared with the value in 'label' generated from the skipgram
out = Dense(1, activation = 'sigmoid')(dot)
# create model instance
model = Model(input = [input_context, input_target], output = out)
model.compile(loss = 'binary_crossentropy', optimizer = 'adam')
# fit the model, default batch_size of 32
# running for 10 epochs seems to generate good enough results, although running for more iterations may improve performance further
model.fit(x = [word_target, word_context], y = labels, epochs = 10,)
# get the new word embeddings and save the new array of shape (vocab_size, vector_dim), to 'word_vecs'.
# here, the second layer of the model is the embedding layer, as can be seen from the index '[2]'
word_vecs = model.layers[2].get_weights()[0]
# NOTE: since 'make_sampling table' and 'skipgrams' select context and target words randomly, each time the model is trained,
# it will give rise to slightly different word embeddings that ultimately will give poor results when used to find cosine similarity
# with the existing comparing values, such as .45 for 'fuck' etc
# So, in order to obtain results good results with existing comparison values, load the embeddings of shape
# (vocab_size, vector_dim) from 'weights.npy' file
# please make sure that you have loaded the 'weights.npy' file into the current directory
word_vecs = np.load('weights.npy')
# loop through data set and lookup the cosine similarites of the sentence with embeddings of different words
# such as 'appropriate', 'fucking' etc.
# The values have been hard coded after a lot of experimentation, and attempts to strike a balance between recall and
# precision, although it is impossible to get an exact figure for each without a supervised approach.
df1['new1_trained'] = 0
lst2 = []
for i in df1.index:
lst = []
words = [j for j in df1.new.loc[i] if j.isalpha()]
words = [j for j in words if not j in string.punctuation]
words = [j for j in words if not j in lst3 + ['hello', 'hi', 'hey']]
for word in words:
try:
lst.append(word_vecs[dic[word]]) | |
[t[1], t[2],t[3],t[4]] , childsProduction)
addCad("**\<ASIG_BASICA>** ::= ':''=' \<EXP> ';' ")
t[0] = Declaration(t[1], False, None, True, t[4], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
elif len(t) == 4:
childsProduction = addNotNoneChild(t,[3])
graph_ref = graph_node(str("asig_basica"), [t[1], t[2],t[3]] , childsProduction)
addCad("**\<ASIG_BASICA>** ::= '=' \<EXP> ';' ")
t[0] = Declaration(t[1], False, None, True, t[3], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
def p_stm_perform(t):
'''stm_perform : PERFORM ID PARA TEXTO COMA ID PARC '''
if len(t) == 8:
graph_ref = graph_node(str("stm_perform"), [t[1], t[2], t[3], t[4], t[5], t[6],t[7]] ,[])
addCad("**\<STM_PERFORM>** ::= tPerform tIdentifier PARA tTexto ',' tIdentifier PARC ';' ")
t[0] = upNodo("token", 0, 0, graph_ref)
#####
else:
t[0]=None
def p_stm_if(t):
'''stm_if : IF condition THEN if_inst elseif_opt else_opt END IF '''
#childsProduction = addNotNoneChild(t,[2,4,5,6])
#graph_ref = graph_node(str("stm_if"), [t[1], t[2], t[3], t[4],t[5],t[6],t[7],t[8]], childsProduction)
#addCad("**\<STM_IF>** ::= tIf \<CONDITION> THEN [\<IF_INST>] [\<ELSEIF_OPT>] [\<ELSE_OPT>] tEnd tIf ';' ")
graph_ref = None
t[0] = IfNode(t[2], t[4], t[5], t[6], t.slice[1].lineno, t.slice[1].lexpos,graph_ref)
def p_condition(t):
'''condition : NOT FOUND
| predicateExpression
'''
if len(t) == 3:
graph_ref = graph_node(str("condition"), [t[1], t[2]],[])
addCad("**\<CONDITION>** ::= tNot tFound ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif len(t) == 2:
childsProduction = addNotNoneChild(t,[1])
graph_ref = graph_node(str("condition"), [t[1]], childsProduction)
addCad("**\<CONDITION>** ::= [\<EXP_PREDICATE> | \<GROUP_LIST>] ")
t[0] = t[1]
#print(t)
def p_elseif_opt(t):
'''elseif_opt : ELSEIF condition THEN if_inst '''
if len(t) == 5:
#childsProduction = addNotNoneChild(t,[2,4])
#graph_ref = graph_node(str("elseif_opt"), [t[1], t[2], t[3], t[4]],childsProduction )
#addCad("**\<ELSEIF_OPT>** ::= tElseIf \<CONDITION> tThen \<IF_INST> ")
graph_ref = None
t[0] = IfNode(t[2], t[4], None, None, t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
#print(t)
def p_elseif_opt0(t):
'''elseif_opt : empty '''
t[0] = None
#print(t)
def p_else_opt(t):
'''else_opt : ELSE if_inst '''
if len(t) == 3:
lista = None
childsProduction = []
if t[2] != None:
lista = t[2][0]
childsProduction.append(lista.graph_ref)
graph_ref = graph_node(str("else_opt"), [t[1], lista], childsProduction )
addCad("**\<ELSEIF_OPT>** ::= tElseIf \<CONDITION> tThen \<IF_INST> ")
t[0] = ElseNode(t[2], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)
#print(t)
def p_else_opt0(t):
'''else_opt : empty '''
t[0] = None
#print(t)
def p_if_inst(t):
'''if_inst : if_inst statements_sql PUNTOCOMA
| if_inst raise_op
| if_inst asig_basica PUNTOCOMA
'''
token = t.slice[2]
if token.type == "statements_sql":
childsProduction = addNotNoneChild(t,[1,2])
graph_ref = graph_node(str("if_inst"), [t[1], t[2]],childsProduction )
addCad("**\<IF_INST>** ::= if_inst \<STATEMENTS_SQL> ';' ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif token.type == "raise_op":
childsProduction = addNotNoneChild(t,[1,2])
graph_ref = graph_node(str("if_inst"), [t[1], t[2]],childsProduction )
addCad("**\<IF_INST>** ::= if_inst \<RAISE_OP> ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif token.type == "asig_basica":
#childsProduction = addNotNoneChild(t,[1,2])
#graph_ref = graph_node(str("if_inst"), [t[1], t[2]],childsProduction )
lista = None
childsProduction = []
if t[1] != None:
lista = t[1][0]
childsProduction.append(lista.graph_ref)
graph_ref = graph_node(str("if_inst"), [lista, t[2], t[3]], childsProduction)
addCad("**\<IF_INST>** ::= if_inst \<ASIG_BASICA> ';' ")
############
if t[1] is None:
t[0] = [t[2]]
else:
t[1].append(t[2])
t[0] = t[1]
def p_if_inst0(t):
'''if_inst : empty '''
t[0]= None
#TODO @SergioUnix Arreglar grafo y reporte gramatical
def p_stm_begin(t):
'''stm_begin : declares_opt BEGIN statements_begin exception_opt return_opt END if_opt '''
childsProduction = addNotNoneChild(t,[1,3,4,5,7])
graph_ref = graph_node(str("stm_if"), [t[1], t[2], t[3], t[4],t[5],t[6],t[7]], childsProduction)
##- graph_ref = None
addCad("**\<STM_BEGIN>** ::= [\<DECLARE_OPT>] tIf \<CONDITION> THEN [\<IF_INST>] [\<ELSEIF_OPT>] [\<ELSE_OPT>] tEnd tIf ")
##-t[0] = FunctionBody(t[1], t[3], t[4], t[5], t.slice[2].lineno, t.slice[2].lexpos, graph_ref)
#print(t)
t[0] = upNodo("token", 0, 0, graph_ref)
def p_statements_begin(t):
'''statements_begin : statements_begin statements_sql PUNTOCOMA
| statements_begin stm_if PUNTOCOMA
| statements_begin asig_basica PUNTOCOMA
| statements_begin stm_case PUNTOCOMA '''
token = t.slice[2]
if token.type == "statements_sql":
childsProduction = addNotNoneChild(t,[1,2])
graph_ref = graph_node(str("statements_begin"), [t[1], t[2]],childsProduction )
addCad("**\<statements_begin>** ::= statements_begin \<STATEMENTS_SQL> ';' ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif token.type == "stm_if":
childsProduction = addNotNoneChild(t,[1,2])
graph_ref = graph_node(str("statements_begin"), [t[1], t[2]],childsProduction )
addCad("**\<statements_begin>** ::= statements_begin \<STM_IF> ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif token.type == "asig_basica":
##childsProduction = addNotNoneChild(t,[1,2])
##graph_ref = graph_node(str("statements_begin"), [t[1], t[2]],childsProduction )
addCad("**\<statements_begin>** ::= statements_begin \<ASIG_BASICA> ")
if t[1] is None:
t[0] = [t[2]]
else:
t[1].append(t[2])
t[0] = t[1]
elif token.type == "stm_case":
childsProduction = addNotNoneChild(t,[1,2])
graph_ref = graph_node(str("statements_begin"), [t[1], t[2]],childsProduction )
addCad("**\<statements_begin>** ::= statements_begin \<STM_CASE> ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
def p_statements_begin0(t):
'''statements_begin : empty '''
t[0]= None
def p_exception_opt(t):
'''exception_opt : EXCEPTION when_opt '''
if len(t) == 3:
childsProduction = addNotNoneChild(t,[2])
graph_ref = graph_node(str("exception_opt"), [t[1], t[2]],childsProduction )
addCad("**\<EXCEPTION_OPT>** ::= tException \<WHEN_OPT> ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
def p_exception_opt1(t):
'''exception_opt : empty '''
t[0] =None
def p_when_opt(t):
'''when_opt : when_opt WHEN atr_when then_op
| WHEN atr_when then_op
| WHEN '''
token = t.slice[1]
if len(t) == 5:
childsProduction = addNotNoneChild(t,[1,3,4])
graph_ref = graph_node(str("when_opt"), [t[1], t[2],t[3],t[4]],childsProduction )
addCad("**\<WHEN_OPT>** ::= \<WHEN_OPT> tWhen \<ATR_WHEN> \<then_op> ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif len(t) == 4:
childsProduction = addNotNoneChild(t,[2,3])
graph_ref = graph_node(str("when_opt"), [t[1], t[2],t[3]],childsProduction )
addCad("**\<WHEN_OPT>** ::= tWhen \<ATR_WHEN> \<then_opT> ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif token.type == "WHEN" and len(t)==2:
graph_ref = graph_node(str("when_opt"), [t[1]],[] )
addCad("**\<WHEN_OPT>** ::= tWhen ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
def p_when_opt0(t):
'''when_opt : empty '''
t[0]= None
def p_atr_when(t):
'''atr_when : NO_DATA_FOUND
| TOO_MANY_ROWS
| ID '''
token = t.slice[1]
if token.type == "NO_DATA_FOUND" :
graph_ref = graph_node(str("atr_when"), [t[1]],[] )
addCad("**\<ATR_WHEN>** ::= tNo_data_found ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif token.type == "TOO_MANY_ROWS" :
graph_ref = graph_node(str("atr_when"), [t[1]],[] )
addCad("**\<ATR_WHEN>** ::= tToo_many_rows ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif token.type == "ID" :
graph_ref = graph_node(str("atr_when"), [t[1]],[] )
addCad("**\<ATR_WHEN>** ::= tIdentifier ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
def p_then_op(t):
''' then_op : THEN raise_op
'''
if len(t) == 3:
childsProduction = addNotNoneChild(t,[2])
graph_ref = graph_node(str("then_op"), [t[1],t[2]], childsProduction )
addCad("**\<then_op>** ::= tTHEN \<RAISE_OP> ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
def p_then_op1(t):
''' then_op : THEN NULL PUNTOCOMA
'''
if len(t) == 4:
graph_ref = graph_node(str("then_op"), [t[1],t[2],t[3]], [] )
addCad("**\<then_op>** ::= tThen tNull ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
def p_raise_op(t):
''' raise_op : RAISE atr_raise TEXTO COMA col_name PUNTOCOMA
| RAISE atr_raise TEXTO PUNTOCOMA'''
token = t.slice[1]
if len(t) == 7:
childsProduction = addNotNoneChild(t,[2,5])
graph_ref = graph_node(str("raise_op"), [t[1],t[2],t[3],t[4],t[5],t[6]], childsProduction )
addCad("**\<RAISE_OP>** ::= tRaise \<ATR_RAISE> tTexto ',' \<COL_NAME> ';' ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif len(t) == 5:
childsProduction = addNotNoneChild(t,[2])
graph_ref = graph_node(str("raise_op"), [t[1],t[2],t[3],t[4]], childsProduction )
addCad("**\<RAISE_OP>** ::= tRaise \<ATR_RAISE> tTexto ';' ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
def p_raise_op1(t):
''' raise_op : empty '''
t[0] = None
#print(t)
def p_atr_raise(t):
'''atr_raise : NOTICE
| EXCEPTION
| ID '''
token = t.slice[1]
if token.type == "NOTICE" :
graph_ref = graph_node(str("atr_when"), [t[1]],[] )
addCad("**\<ATR_RAISE>** ::= tNotice ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif token.type == "TOO_MANY_ROWS" :
graph_ref = graph_node(str("atr_when"), [t[1]],[] )
addCad("**\<ATR_RAISE>** ::= tException ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif token.type == "ID" :
graph_ref = graph_node(str("atr_when"), [t[1]],[] )
addCad("**\<ATR_RAISE>** ::= tIdentifier ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
def p_return_opt(t):
'''return_opt : RETURN ID PUNTOCOMA '''
if len(t) == 4:
graph_ref = graph_node(str("return_opt"), [t[1], t[2], t[3]], [] )
addCad("**\<RETURN_OPT>** ::= tReturn tIdentifier ';' ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
def p_return_opt0(t):
'''return_opt : empty '''
t[0]=None
def p_stm_execute(t):
'''stm_execute : EXECUTE TEXTO INTO ID USING group_list
| EXECUTE ID PARA TEXTO COMA column_list PARC INTO ID USING group_list
| EXECUTE ID PARA TEXTO TEXTO COMA column_list PARC INTO ID USING group_list
| EXECUTE ID PARA TEXTO COMA column_list PARC USING group_list
| EXECUTE ID PARA TEXTO TEXTO COMA column_list PARC USING group_list
| EXECUTE ID PARA TEXTO COMA column_list PARC
'''
token = t.slice[1]
if len(t) == 7:
childsProduction = addNotNoneChild(t,[6])
graph_ref = graph_node(str("STM_EXECUTE"), [t[1], t[2],t[3],t[4],t[5],t[6]],childsProduction )
addCad("**\<STM_EXECUTE>** ::= tExecute TEXTO tInto tIdentifier tUsing \<GROUP_LIST> ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif len(t) == 12:
childsProduction = addNotNoneChild(t,[11])
lista = None
if t[6] != None:
lista = t[6][0]
childsProduction.append(lista.graph_ref)
graph_ref = graph_node(str("STM_EXECUTE"), [t[1], t[2],t[3],t[4],t[5],lista,t[7],t[8],t[9],t[10],t[11]],childsProduction )
addCad("**\<STM_EXECUTE>** ::= tExecute tIdentifier ‘(’ TEXTO ‘,’ \<COLUMN_LIST> ’)’ tInto tIdentifier tUsing \<GROUP_LIST> ")
t[0] = upNodo("token", 0, 0, graph_ref)
#print(t)
elif len(t) == 13:
childsProduction = addNotNoneChild(t,[12])
lista = None
if t[7] != None:
lista = t[7][0]
childsProduction.append(lista.graph_ref)
graph_ref = graph_node(str("STM_EXECUTE"), [t[1], t[2],t[3],t[4],t[5],t[6],lista,t[8],t[9],t[10],t[11],t[12]],childsProduction )
addCad("**\<STM_EXECUTE>** ::= tExecute tIdentifier ‘(’ TEXTO TEXTO ‘,’ \<COLUMN_LIST> | |
today.
assert self.client.get(path, headers={
'if-modified-since': http_date(yesterday.timestamp),
'if-unmodified-since': http_date(today.timestamp),
}).status_code == 304
# Send me the resource if it has been modified between two days ago
# and yesterday.
assert self.client.get(path, headers={
'if-modified-since': http_date(yesterday.shift(days=-1).timestamp),
'if-unmodified-since': http_date(yesterday.timestamp),
}).status_code == 304
# Send me the resource if it has been modified between tomorrow
# and two days from today.
assert self.client.get(path, headers={
'if-modified-since': http_date(tomorrow.timestamp),
'if-unmodified-since': http_date(tomorrow.shift(days=1).timestamp),
}).status_code == 304
def test_time_etag_combo(self, timeframe):
"""
Test evaluation priorities among ETag and time headers.
"""
_, today, yesterday, tomorrow = timeframe
path = f'/ldp/{uuid4()}'
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content).hexdigest()
bogus_cksum = uuid4().hex
self.client.put(
path, data=content, headers={'content-type': 'text/plain'})
# Negative ETag match wins.
assert self.client.get(path, headers={
'if-match': f'"{bogus_cksum}"',
'if-modified-since': http_date(yesterday.timestamp),
}).status_code == 412
assert self.client.get(path, headers={
'if-match': f'"{bogus_cksum}"',
'if-unmodified-since': http_date(tomorrow.timestamp),
}).status_code == 412
assert self.client.get(path, headers={
'if-none-match': f'"{content_cksum}"',
'if-modified-since': http_date(yesterday.timestamp),
}).status_code == 304
assert self.client.get(path, headers={
'if-none-match': f'"{content_cksum}"',
'if-unmodified-since': http_date(tomorrow.timestamp),
}).status_code == 304
# Positive ETag match wins.
assert self.client.get(path, headers={
'if-match': f'"{content_cksum}"',
'if-unmodified-since': http_date(yesterday.timestamp),
}).status_code == 200
assert self.client.get(path, headers={
'if-match': f'"{content_cksum}"',
'if-modified-since': http_date(tomorrow.timestamp),
}).status_code == 200
assert self.client.get(path, headers={
'if-none-match': f'"{bogus_cksum}"',
'if-unmodified-since': http_date(yesterday.timestamp),
}).status_code == 200
assert self.client.get(path, headers={
'if-none-match': f'"{bogus_cksum}"',
'if-modified-since': http_date(tomorrow.timestamp),
}).status_code == 200
@pytest.mark.usefixtures('client_class')
class TestRange:
"""
Test byte range retrieval.
This should not need too deep testing since it's functionality implemented
in Werkzeug/Flask.
"""
@pytest.fixture(scope='class')
def bytestream(self):
"""
Create a sample bytestream with predictable (8x8 bytes) content.
"""
return b''.join([bytes([n] * 8) for n in range(8)])
def test_get_range(self, bytestream):
"""
Get different ranges of the bitstream.
"""
path = '/ldp/test_range'
self.client.put(path, data=bytestream)
# First 8 bytes.
assert self.client.get(
path, headers={'range': 'bytes=0-7'}).data == b'\x00' * 8
# Last 4 bytes of first block, first 4 of second block.
assert self.client.get(
path, headers={'range': 'bytes=4-11'}
).data == b'\x00' * 4 + b'\x01' * 4
# Last 8 bytes.
assert self.client.get(
path, headers={'range': 'bytes=56-'}).data == b'\x07' * 8
def test_fail_ranges(self, bytestream):
"""
Test malformed or unsupported ranges.
"""
path = '/ldp/test_range'
# TODO This shall be a 206 when multiple ranges are supported.
fail_rsp = self.client.get(path, headers={'range': 'bytes=0-1, 7-8'})
assert fail_rsp.status_code == 501
# Bad ranges will be ignored.
for rng in ((10, 4), ('', 3), (3600, 6400)):
bad_rsp = self.client.get(
path, headers={'range': 'bytes={rng[0]}-{rng[1]}'})
assert bad_rsp.status_code == 200
assert bad_rsp.data == bytestream
assert int(bad_rsp.headers['content-length']) == len(bytestream)
def test_range_rsp_headers(self, bytestream):
"""
Test various headers for a ranged response.
"""
path = '/ldp/test_range'
start_b = 0
end_b = 7
full_rsp = self.client.get(path)
part_rsp = self.client.get(path, headers={
'range': f'bytes={start_b}-{end_b}'})
for hdr_name in ['etag', 'digest', 'content-type']:
assert part_rsp.headers[hdr_name] == full_rsp.headers[hdr_name]
for hdr in part_rsp.headers['link']:
assert hdr in full_rsp.headers['link']
assert int(part_rsp.headers['content-length']) == end_b - start_b + 1
assert part_rsp.headers['content-range'] == \
f'bytes {start_b}-{end_b} / {len(bytestream)}'
@pytest.mark.usefixtures('client_class')
class TestPrefHeader:
"""
Test various combinations of `Prefer` header.
"""
@pytest.fixture(scope='class')
def cont_structure(self):
"""
Create a container structure to be used for subsequent requests.
"""
parent_path = '/ldp/test_parent'
self.client.put(parent_path, content_type='text/turtle')
self.client.put(parent_path + '/child1', content_type='text/turtle')
self.client.put(parent_path + '/child2', content_type='text/turtle')
self.client.put(parent_path + '/child3', content_type='text/turtle')
return {
'path' : parent_path,
'response' : self.client.get(parent_path),
}
def test_put_prefer_handling(self, random_uuid):
"""
Trying to PUT an existing resource should:
- Return a 204 if the payload is empty
- Return a 204 if the payload is RDF, server-managed triples are
included and the 'Prefer' header is set to 'handling=lenient'
- Return a 412 (ServerManagedTermError) if the payload is RDF,
server-managed triples are included and handling is set to 'strict',
or not set.
"""
path = '/ldp/put_pref_header01'
assert self.client.put(path, content_type='text/turtle').status_code == 201
assert self.client.get(path).status_code == 200
assert self.client.put(path, content_type='text/turtle').status_code == 204
# Default handling is strict.
with open('tests/data/rdf_payload_w_srv_mgd_trp.ttl', 'rb') as f:
rsp_default = self.client.put(
path,
headers={
'Content-Type' : 'text/turtle',
},
data=f
)
assert rsp_default.status_code == 412
with open('tests/data/rdf_payload_w_srv_mgd_trp.ttl', 'rb') as f:
rsp_len = self.client.put(
path,
headers={
'Prefer' : 'handling=lenient',
'Content-Type' : 'text/turtle',
},
data=f
)
assert rsp_len.status_code == 204
with open('tests/data/rdf_payload_w_srv_mgd_trp.ttl', 'rb') as f:
rsp_strict = self.client.put(
path,
headers={
'Prefer' : 'handling=strict',
'Content-Type' : 'text/turtle',
},
data=f
)
assert rsp_strict.status_code == 412
# @HOLD Embed children is debated.
def _disabled_test_embed_children(self, cont_structure):
"""
verify the "embed children" prefer header.
"""
self.client.get('/ldp')
parent_path = cont_structure['path']
cont_resp = cont_structure['response']
cont_subject = URIRef(g.webroot + '/test_parent')
#minimal_resp = self.client.get(parent_path, headers={
# 'Prefer' : 'return=minimal',
#})
incl_embed_children_resp = self.client.get(parent_path, headers={
'Prefer' : 'return=representation; include={}'\
.format(Ldpr.EMBED_CHILD_RES_URI),
})
omit_embed_children_resp = self.client.get(parent_path, headers={
'Prefer' : 'return=representation; omit={}'\
.format(Ldpr.EMBED_CHILD_RES_URI),
})
default_gr = Graph().parse(data=cont_resp.data, format='turtle')
incl_gr = Graph().parse(
data=incl_embed_children_resp.data, format='turtle')
omit_gr = Graph().parse(
data=omit_embed_children_resp.data, format='turtle')
assert isomorphic(omit_gr, default_gr)
children = set(incl_gr[cont_subject : nsc['ldp'].contains])
assert len(children) == 3
children = set(incl_gr[cont_subject : nsc['ldp'].contains])
for child_uri in children:
assert set(incl_gr[ child_uri : : ])
assert not set(omit_gr[ child_uri : : ])
def test_return_children(self, cont_structure):
"""
verify the "return children" prefer header.
"""
self.client.get('/ldp')
parent_path = cont_structure['path']
cont_resp = cont_structure['response']
cont_subject = URIRef(g.webroot + '/test_parent')
incl_children_resp = self.client.get(parent_path, headers={
'Prefer' : 'return=representation; include={}'\
.format(Ldpr.RETURN_CHILD_RES_URI),
})
omit_children_resp = self.client.get(parent_path, headers={
'Prefer' : 'return=representation; omit={}'\
.format(Ldpr.RETURN_CHILD_RES_URI),
})
default_gr = Graph().parse(data=cont_resp.data, format='turtle')
incl_gr = Graph().parse(data=incl_children_resp.data, format='turtle')
omit_gr = Graph().parse(data=omit_children_resp.data, format='turtle')
assert isomorphic(incl_gr, default_gr)
children = incl_gr[cont_subject : nsc['ldp'].contains]
for child_uri in children:
assert not omit_gr[cont_subject : nsc['ldp'].contains : child_uri]
def test_inbound_rel(self, cont_structure):
"""
verify the "inbound relationships" prefer header.
"""
self.client.put('/ldp/test_target', content_type='text/turtle')
data = '<> <http://ex.org/ns#shoots> <{}> .'.format(
g.webroot + '/test_target')
self.client.put('/ldp/test_shooter', data=data,
headers={'Content-Type': 'text/turtle'})
cont_resp = self.client.get('/ldp/test_target')
incl_inbound_resp = self.client.get('/ldp/test_target', headers={
'Prefer' : 'return=representation; include="{}"'\
.format(Ldpr.RETURN_INBOUND_REF_URI),
})
omit_inbound_resp = self.client.get('/ldp/test_target', headers={
'Prefer' : 'return=representation; omit="{}"'\
.format(Ldpr.RETURN_INBOUND_REF_URI),
})
default_gr = Graph().parse(data=cont_resp.data, format='turtle')
incl_gr = Graph().parse(data=incl_inbound_resp.data, format='turtle')
omit_gr = Graph().parse(data=omit_inbound_resp.data, format='turtle')
subject = URIRef(g.webroot + '/test_target')
inbd_subject = URIRef(g.webroot + '/test_shooter')
assert isomorphic(omit_gr, default_gr)
assert len(set(incl_gr[inbd_subject : : ])) == 1
assert incl_gr[
inbd_subject : URIRef('http://ex.org/ns#shoots') : subject]
assert not len(set(omit_gr[inbd_subject : :]))
def test_srv_mgd_triples(self, cont_structure):
"""
verify the "server managed triples" prefer header.
"""
self.client.get('/ldp')
parent_path = cont_structure['path']
cont_resp = cont_structure['response']
cont_subject = URIRef(g.webroot + '/test_parent')
incl_srv_mgd_resp = self.client.get(parent_path, headers={
'Prefer' : 'return=representation; include={}'\
.format(Ldpr.RETURN_SRV_MGD_RES_URI),
})
omit_srv_mgd_resp = self.client.get(parent_path, headers={
'Prefer' : 'return=representation; omit={}'\
.format(Ldpr.RETURN_SRV_MGD_RES_URI),
})
default_gr = Graph().parse(data=cont_resp.data, format='turtle')
incl_gr = Graph().parse(data=incl_srv_mgd_resp.data, format='turtle')
omit_gr = Graph().parse(data=omit_srv_mgd_resp.data, format='turtle')
assert isomorphic(incl_gr, default_gr)
for pred in {
nsc['fcrepo'].created,
nsc['fcrepo'].createdBy,
nsc['fcrepo'].lastModified,
nsc['fcrepo'].lastModifiedBy,
nsc['ldp'].contains,
}:
assert set(incl_gr[ cont_subject : pred : ])
assert not set(omit_gr[ cont_subject : pred : ])
for type in {
nsc['fcrepo'].Resource,
nsc['ldp'].Container,
nsc['ldp'].Resource,
}:
assert incl_gr[ cont_subject : RDF.type : type ]
assert not omit_gr[ cont_subject : RDF.type : type ]
def test_delete_no_tstone(self):
"""
Test the `no-tombstone` Prefer option.
"""
self.client.put('/ldp/test_delete_no_tstone01')
self.client.put('/ldp/test_delete_no_tstone01/a')
self.client.delete('/ldp/test_delete_no_tstone01', headers={
'prefer' : 'no-tombstone'})
resp = self.client.get('/ldp/test_delete_no_tstone01')
assert resp.status_code == 404
child_resp = self.client.get('/ldp/test_delete_no_tstone01/a')
assert child_resp.status_code == 404
#<EMAIL>fixtures('client_class')
#@<EMAIL>.usefixtures('db')
#class TestDigest:
# """
# Test digest and ETag handling.
# """
# @pytest.mark.skip(reason='TODO Need to implement async digest queue')
# def test_digest_post(self):
# """
# Test ``Digest`` and ``ETag`` headers on resource POST.
# """
# resp = self.client.post('/ldp/')
# assert 'Digest' in resp.headers
# assert 'ETag' in resp.headers
# assert (
# b64encode(bytes.fromhex(
# resp.headers['ETag'].replace('W/', '')
# )).decode('ascii') ==
# resp.headers['Digest'].replace('SHA256=', ''))
#
#
# @pytest.mark.skip(reason='TODO Need to implement async digest queue')
# def test_digest_put(self):
# """
# Test ``Digest`` and ``ETag`` headers on resource PUT.
# """
# resp_put = self.client.put('/ldp/test_digest_put')
# assert 'Digest' in resp_put.headers
# assert 'ETag' in resp_put.headers
# assert (
# b64encode(bytes.fromhex(
# resp_put.headers['ETag'].replace('W/', '')
# )).decode('ascii') ==
# resp_put.headers['Digest'].replace('SHA256=', ''))
#
# resp_get = self.client.get('/ldp/test_digest_put')
# assert 'Digest' in resp_get.headers
# assert 'ETag' in resp_get.headers
# assert (
# b64encode(bytes.fromhex(
# resp_get.headers['ETag'].replace('W/', '')
# )).decode('ascii') ==
# resp_get.headers['Digest'].replace('SHA256=', ''))
#
#
# @pytest.mark.skip(reason='TODO Need to implement async digest queue')
# def test_digest_patch(self):
# """
# Verify that the digest and ETag change on resource change.
# """
# path = '/ldp/test_digest_patch'
# self.client.put(path)
# rsp1 = self.client.get(path)
#
# self.client.patch(
# path, data=b'DELETE {} INSERT {<> a <http://ex.org/Test> .} '
# b'WHERE {}',
# headers={'Content-Type': 'application/sparql-update'})
# rsp2 = self.client.get(path)
#
# assert rsp1.headers['ETag'] != rsp2.headers['ETag']
# assert rsp1.headers['Digest'] != rsp2.headers['Digest']
@pytest.mark.usefixtures('client_class')
@pytest.mark.usefixtures('db')
class TestVersion:
"""
Test version creation, retrieval and deletion.
"""
def test_create_versions(self):
"""
Test that POSTing multiple times to fcr:versions creates the
'hasVersions' triple and yields multiple version snapshots.
"""
self.client.put('/ldp/test_version', content_type='text/turtle')
create_rsp = self.client.post('/ldp/test_version/fcr:versions')
assert create_rsp.status_code == 201
rsrc_rsp = self.client.get('/ldp/test_version')
rsrc_gr = Graph().parse(data=rsrc_rsp.data, format='turtle')
assert len(set(rsrc_gr[: nsc['fcrepo'].hasVersions :])) == 1
info_rsp = self.client.get('/ldp/test_version/fcr:versions')
assert info_rsp.status_code == 200
info_gr = Graph().parse(data=info_rsp.data, format='turtle')
assert len(set(info_gr[: nsc['fcrepo'].hasVersion :])) == 1
self.client.post('/ldp/test_version/fcr:versions')
info2_rsp = | |
FXA_STORE_ACCESS_TOKEN = config("FXA_STORE_ACCESS_TOKEN", default=False, cast=bool)
FXA_STORE_ID_TOKEN = config("FXA_STORE_ID_TOKEN", default=False, cast=bool)
FXA_SUPPORT_FORM = config(
"FXA_SUPPORT_FORM", default="https://accounts.firefox.com/support"
)
FXA_SET_ISSUER = config("FXA_SET_ISSUER", default="https://accounts.firefox.com")
ADMIN_REDIRECT_URL = config("ADMIN_REDIRECT_URL", default=None)
AUTH_PROFILE_MODULE = "users.Profile"
USER_AVATAR_PATH = "uploads/avatars/"
DEFAULT_AVATAR = "sumo/img/avatar.png"
AVATAR_SIZE = 200 # in pixels
MAX_AVATAR_FILE_SIZE = 131072 # 100k, in bytes
GROUP_AVATAR_PATH = "uploads/groupavatars/"
ACCOUNT_ACTIVATION_DAYS = 30
PASSWORD_HASHERS = ("<PASSWORD>.users.hashers.SHA<PASSWORD>",)
USERNAME_BLACKLIST = path("kitsune", "configs", "username-blacklist.txt")
ROOT_URLCONF = "%s.urls" % PROJECT_MODULE
# TODO: Figure out why changing the order of apps (for example, moving
# taggit higher in the list) breaks tests.
INSTALLED_APPS = (
"django.contrib.contenttypes",
"django.contrib.auth",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"mozilla_django_oidc",
"corsheaders",
"kitsune.users",
"dennis.django_dennis",
"puente",
"pipeline",
"authority",
"waffle",
"storages",
"kitsune.access",
"kitsune.sumo",
"kitsune.search",
"kitsune.search.v2",
"kitsune.forums",
"tidings",
"rest_framework.authtoken",
"kitsune.questions",
"adminplus",
"kitsune.kadmin",
"kitsune.kbadge",
"taggit",
"kitsune.flagit",
"kitsune.upload",
"product_details",
"kitsune.wiki",
"kitsune.kbforums",
"kitsune.dashboards",
"kitsune.gallery",
"kitsune.customercare",
"kitsune.twitter",
"kitsune.inproduct",
"kitsune.postcrash",
"kitsune.landings",
"kitsune.announcements",
"kitsune.community",
"kitsune.messages",
"commonware.response.cookies",
"kitsune.groups",
"kitsune.karma",
"kitsune.tags",
"kitsune.kpi",
"kitsune.products",
"kitsune.notifications",
"kitsune.journal",
"kitsune.motidings",
"rest_framework",
"statici18n",
"watchman",
# 'axes',
# Extra apps for testing.
"django_nose",
# Extra app for python migrations.
"django_extensions",
# In Django <= 1.6, this "must be placed somewhere after all the apps that
# are going to be generating activities". Putting it at the end is the safest.
"actstream",
"django_user_agents",
# Last so we can override admin templates.
"django.contrib.admin",
)
TEST_RUNNER = "kitsune.sumo.tests.TestSuiteRunner"
def JINJA_CONFIG():
config = {
"extensions": [
"puente.ext.i18n",
],
"finalize": lambda x: x if x is not None else "",
"autoescape": True,
}
return config
# Tells the extract script what files to look for l10n in and what
# function handles the extraction. Puente expects this.
PUENTE = {
"BASE_DIR": ROOT,
"DOMAIN_METHODS": {
"django": [
("kitsune/forums/**.py", "ignore"),
("kitsune/forums/**.html", "ignore"),
("kitsune/**/tests/**.py", "ignore"),
("kitsune/**/management/**.py", "ignore"),
("kitsune/forums/**.lhtml", "ignore"),
("kitsune/**.py", "python"),
("kitsune/**/templates/**.html", "jinja2"),
("kitsune/**/jinja2/**.html", "jinja2"),
("kitsune/**/jinja2/**.lhtml", "jinja2"),
("kitsune/**/jinja2/**.ltxt", "jinja2"),
("vendor/src/django-tidings/**/templates/**.html", "jinja2"),
],
"djangojs": [
# We can't say **.js because that would dive into any libraries.
("kitsune/**/static/**/js/*-all.js", "ignore"),
("kitsune/**/static/**/js/*-min.js", "ignore"),
("kitsune/**/static/**/js/*.js", "javascript"),
("kitsune/**/static/**/tpl/**.html", "jinja2"),
],
},
}
# These domains will not be merged into messages.pot and will use
# separate PO files. See the following URL for an example of how to
# set these domains in DOMAIN_METHODS.
# http://github.com/jbalogh/zamboni/blob/d4c64239c24aa2f1e91276909823d1d1b290f0ee/settings.py#L254 # nopep8
STANDALONE_DOMAINS = [
TEXT_DOMAIN,
"djangojs",
]
STATICI18N_DOMAIN = "djangojs"
STATICI18N_PACKAGES = ["kitsune.sumo"]
# Save jsi18n files outside of static so that collectstatic will pick
# them up and save it with hashed filenames in the static directory.
STATICI18N_ROOT = path("jsi18n")
#
# Django Pipline
PIPELINE = {
"PIPELINE_ENABLED": config("PIPELINE_ENABLED", default=False, cast=bool),
"COMPILERS": (
"kitsune.lib.pipeline_compilers.BrowserifyCompiler",
"pipeline.compilers.es6.ES6Compiler",
),
"JAVASCRIPT": PIPELINE_JS,
"DISABLE_WRAPPER": True,
"JS_COMPRESSOR": "pipeline.compressors.uglifyjs.UglifyJSCompressor",
"UGLIFYJS_BINARY": path("node_modules/.bin/uglifyjs"),
"UGLIFYJS_ARGUMENTS": "",
"BROWSERIFY_BINARY": path("node_modules/.bin/browserify"),
"BROWSERIFY_ARGUMENTS": "-t babelify",
"BABEL_BINARY": "node_modules/.bin/babel",
}
if DEBUG:
PIPELINE["BROWSERIFY_ARGUMENTS"] += " -d"
NUNJUCKS_PRECOMPILE_BIN = path("node_modules/.bin/nunjucks-precompile")
#
# Sessions
SESSION_COOKIE_AGE = config(
"SESSION_COOKIE_AGE", default=4 * 7 * 24 * 60 * 60, cast=int
) # 4 weeks
SESSION_COOKIE_SECURE = config("SESSION_COOKIE_SECURE", default=not DEBUG, cast=bool)
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_NAME = "session_id"
SESSION_ENGINE = config("SESSION_ENGINE", default="django.contrib.sessions.backends.cache")
SESSION_SERIALIZER = config(
"SESSION_SERIALIZER", default="django.contrib.sessions.serializers.PickleSerializer"
)
# CSRF
CSRF_COOKIE_SECURE = config("CSRF_COOKIE_SECURE", default=not DEBUG, cast=bool)
#
# Connection information for Elastic
ES_URLS = [config("ES_URLS", default="localhost:9200")]
# Connection information for Elastic 7
ES7_URLS = config("ES7_URLS", cast=Csv(), default="elasticsearch7:9200")
ES7_CLOUD_ID = config("ES7_CLOUD_ID", default="")
ES7_USE_SSL = config("ES7_USE_SSL", default=False, cast=bool)
ES7_HTTP_AUTH = config("ES7_HTTP_AUTH", default="", cast=Csv())
ES7_ENABLE_CONSOLE_LOGGING = config("ES7_ENABLE_CONSOLE_LOGGING", default=False, cast=bool)
# Indexes for reading
ES_INDEXES = {
"default": config("ES_INDEXES_DEFAULT", default="default"),
"non-critical": config("ES_INDEXES_NON_CRITICAL", default="non-critical"),
"metrics": config("ES_INDEXES_METRICS", "metrics"),
}
# Indexes for indexing--set this to ES_INDEXES if you want to read to
# and write to the same index.
ES_WRITE_INDEXES = ES_INDEXES
# This is prepended to index names to get the final read/write index
# names used by kitsune. This is so that you can have multiple
# environments pointed at the same ElasticSearch cluster and not have
# them bump into one another.
ES_INDEX_PREFIX = config("ES_INDEX_PREFIX", default="sumo")
# Keep indexes up to date as objects are made/deleted.
ES_LIVE_INDEXING = config("ES_LIVE_INDEXING", default=True, cast=bool)
# Timeout for querying requests
ES_TIMEOUT = 5
ES_USE_SSL = config("ES_USE_SSL", default=False, cast=bool)
ES_HTTP_AUTH = config("ES_HTTP_AUTH", default="", cast=Csv())
SEARCH_MAX_RESULTS = 1000
SEARCH_RESULTS_PER_PAGE = 10
# Search default settings
SEARCH_DEFAULT_CATEGORIES = (
10,
20,
)
SEARCH_DEFAULT_MAX_QUESTION_AGE = 180 * 24 * 60 * 60 # seconds
# IA default settings
IA_DEFAULT_CATEGORIES = (
10,
20,
30,
)
# The length for which we would like the user to cache search forms
# and results, in minutes.
SEARCH_CACHE_PERIOD = config("SEARCH_CACHE_PERIOD", default=15, cast=int)
# Maximum length of the filename. Forms should use this and raise
# ValidationError if the length is exceeded.
# @see http://code.djangoproject.com/ticket/9893
# Columns are 250 but this leaves 50 chars for the upload_to prefix
MAX_FILENAME_LENGTH = 200
MAX_FILEPATH_LENGTH = 250
# Default storage engine - ours does not preserve filenames
DEFAULT_FILE_STORAGE = "kitsune.upload.storage.RenameFileStorage"
# AWS S3 Storage Settings
AWS_ACCESS_KEY_ID = config("AWS_ACCESS_KEY_ID", default="")
AWS_SECRET_ACCESS_KEY = config("AWS_SECRET_ACCESS_KEY", default="")
AWS_STORAGE_BUCKET_NAME = config("AWS_STORAGE_BUCKET_NAME", default="")
AWS_S3_CUSTOM_DOMAIN = config(
"AWS_S3_CUSTOM_DOMAIN", default="user-media-prod-cdn.itsre-sumo.mozilla.net"
)
AWS_S3_HOST = config("AWS_S3_HOST", default="s3-us-west-2.amazonaws.com")
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": "max-age=2592000",
}
# Auth and permissions related constants
LOGIN_URL = "/users/login"
LOGOUT_URL = "/users/logout"
LOGIN_REDIRECT_URL = "/"
REGISTER_URL = "/users/register"
# Video settings, hard coded here for now.
# TODO: figure out a way that doesn't need these values
WIKI_VIDEO_WIDTH = 640
WIKI_VIDEO_HEIGHT = 480
IMAGE_MAX_FILESIZE = 1048576 # 1 megabyte, in bytes
THUMBNAIL_SIZE = 120 # Thumbnail size, in pixels
THUMBNAIL_UPLOAD_PATH = "uploads/images/thumbnails/"
IMAGE_UPLOAD_PATH = "uploads/images/"
# A string listing image mime types to accept, comma separated.
# String must not contain double quotes!
IMAGE_ALLOWED_MIMETYPES = "image/jpeg,image/png,image/gif"
# Topics
TOPIC_IMAGE_PATH = "uploads/topics/"
# Products
PRODUCT_IMAGE_PATH = "uploads/products/"
# Badges (kbadge)
BADGE_IMAGE_PATH = "uploads/badges/"
# Email
EMAIL_BACKEND = config("EMAIL_BACKEND", default="kitsune.lib.email.LoggingEmailBackend")
EMAIL_LOGGING_REAL_BACKEND = config(
"EMAIL_LOGGING_REAL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
EMAIL_SUBJECT_PREFIX = config("EMAIL_SUBJECT_PREFIX", default="[support] ")
if EMAIL_LOGGING_REAL_BACKEND == "django.core.mail.backends.smtp.EmailBackend":
EMAIL_HOST = config("EMAIL_HOST")
EMAIL_HOST_USER = config("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = config("EMAIL_HOST_PASSWORD")
EMAIL_PORT = config("EMAIL_PORT", default=25, cast=int)
EMAIL_USE_TLS = config("EMAIL_USE_TLS", default=False, cast=bool)
# Celery
CELERY_TASK_PROTOCOL = 2
CELERY_TASK_SERIALIZER = config("CELERY_TASK_SERIALIZER", default="json")
CELERY_RESULT_SERIALIZER = config("CELERY_RESULT_SERIALIZER", default="json")
CELERY_ACCEPT_CONTENT = config(
"CELERY_ACCEPT_CONTENT",
default="pickle, json",
cast=lambda v: [s.strip() for s in v.split(",")],
)
CELERY_TASK_IGNORE_RESULT = config("CELERY_TASK_IGNORE_RESULT", default=True, cast=bool)
if not CELERY_TASK_IGNORE_RESULT:
# E.g. redis://localhost:6479/1
CELERY_RESULT_BACKEND = config("CELERY_RESULT_BACKEND")
CELERY_TASK_ALWAYS_EAGER = config(
"CELERY_TASK_ALWAYS_EAGER", default=DEBUG, cast=bool
) # For tests. Set to False for use.
if not CELERY_TASK_ALWAYS_EAGER:
CELERY_BROKER_URL = config("CELERY_BROKER_URL", default="")
# TODO:PY3: Setting gone, use celery worker --loglevel flag.
# CELERYD_LOG_LEVEL = config('CELERYD_LOG_LEVEL', default='INFO', cast=lambda x: getattr(logging, x))
CELERY_WORKER_CONCURRENCY = config("CELERY_WORKER_CONCURRENCY", default=4, cast=int)
CELERY_TASK_EAGER_PROPAGATES = config(
"CELERY_TASK_EAGER_PROPAGATES", default=True, cast=bool
) # Explode loudly during tests.
CELERY_WORKER_HIJACK_ROOT_LOGGER = config(
"CELERY_WORKER_HIJACK_ROOT_LOGGER", default=False, cast=bool
)
# Wiki rebuild settings
WIKI_REBUILD_TOKEN = "sumo:wiki:full-rebuild"
# Anonymous user cookie
ANONYMOUS_COOKIE_NAME = config("ANONYMOUS_COOKIE_NAME", default="SUMO_ANONID")
ANONYMOUS_COOKIE_MAX_AGE = config(
"ANONYMOUS_COOKIE_MAX_AGE", default=30 * 86400, cast=int
) # One month
# Do not change this without also deleting all wiki documents:
WIKI_DEFAULT_LANGUAGE = LANGUAGE_CODE
# Gallery settings
GALLERY_DEFAULT_LANGUAGE = WIKI_DEFAULT_LANGUAGE
GALLERY_IMAGE_PATH = "uploads/gallery/images/"
GALLERY_IMAGE_THUMBNAIL_PATH = "uploads/gallery/images/thumbnails/"
GALLERY_VIDEO_PATH = "uploads/gallery/videos/"
GALLERY_VIDEO_URL = MEDIA_URL + "uploads/gallery/videos/"
GALLERY_VIDEO_THUMBNAIL_PATH = "uploads/gallery/videos/thumbnails/"
GALLERY_VIDEO_THUMBNAIL_PROGRESS_URL = MEDIA_URL + "img/video-thumb.png"
THUMBNAIL_PROGRESS_WIDTH = 32 # width of the above image
THUMBNAIL_PROGRESS_HEIGHT = 32 # height of the above image
VIDEO_MAX_FILESIZE = 52428800 # 50 megabytes, in bytes
# Customer Care settings
CC_MAX_TWEETS = 500 # Max. no. of tweets in DB
CC_TWEETS_PERPAGE = 100 # How many tweets to collect in one go. Max: 100.
CC_SHOW_REPLIES = True # Show replies to tweets?
CC_ALLOW_REMOVE = True # Allow users to hide tweets?
CC_TOP_CONTRIB_CACHE_KEY = "sumo-cc-top-contrib-stats"
CC_TOP_CONTRIB_SORT = "1w"
CC_TOP_CONTRIB_LIMIT = 10
CC_STATS_CACHE_TIMEOUT = 24 * 60 * 60 # 24 hours
CC_STATS_WARNING = 30 * 60 * 60 # Warn if JSON data is older than 30 hours
CC_REPLIES_GOAL = 175 # Goal # of replies in 24 hours.
CC_TWEETS_DAYS = 7 # Limit tweets to those from the last 7 days.
# If any of these words show up in a tweet, it probably isn't
# actionable, so don't add it to the AoA.
CC_WORD_BLACKLIST = [
"#UninstallFirefox",
"pocket", # bug 1164008
"vagina",
"slut",
]
BITLY_API_URL = config("BITLY_API_URL", default="http://api.bitly.com/v3/shorten?callback=?")
BITLY_LOGIN = config("BITLY_LOGIN", default=None)
BITLY_API_KEY = config("BITLY_API_KEY", default=None)
TWITTER_COOKIE_SECURE = config("TWITTER_COOKIE_SECURE", default=True, cast=bool)
TWITTER_CONSUMER_KEY = config("TWITTER_CONSUMER_KEY", default="")
TWITTER_CONSUMER_SECRET = config("TWITTER_CONSUMER_SECRET", default="")
TWITTER_ACCESS_TOKEN = config("TWITTER_ACCESS_TOKEN", default="")
TWITTER_ACCESS_TOKEN_SECRET = config("TWITTER_ACCESS_TOKEN_SECRET", default="")
TIDINGS_FROM_ADDRESS = config("TIDINGS_FROM_ADDRESS", default="<EMAIL>")
# Anonymous watches must be confirmed.
TIDINGS_CONFIRM_ANONYMOUS_WATCHES = config(
"TIDINGS_CONFIRM_ANONYMOUS_WATCHES", default=True, cast=bool
)
TIDINGS_MODEL_BASE = "kitsune.sumo.models.ModelBase"
TIDINGS_REVERSE = "kitsune.sumo.urlresolvers.reverse"
# Google Analytics settings.
# GA_KEY is expected b64 encoded.
GA_KEY = config("GA_KEY", default=None) # Google API client key
if GA_KEY:
import base64
GA_KEY = base64.b64decode(GA_KEY)
GA_ACCOUNT = config(
"GA_ACCOUNT", "<EMAIL>"
) # Google API Service Account email address
GA_PROFILE_ID = config(
"GA_PROFILE_ID", default="12345678"
) # Google Analytics profile id for SUMO prod
GA_START_DATE = date(2012, 11, 10)
GTM_CONTAINER_ID = config("GTM_CONTAINER_ID", default="") # Google container ID
REDIS_BACKENDS = {
# TODO: Make sure that db number is respected
"default": config("REDIS_DEFAULT_URL"),
"helpfulvotes": config("REDIS_HELPFULVOTES_URL"),
}
HELPFULVOTES_UNHELPFUL_KEY = "helpfulvotes_topunhelpful"
LAST_SEARCH_COOKIE = "last_search"
OPTIPNG_PATH = config("OPTIPNG_PATH", default="/usr/bin/optipng")
# Tasty Pie
API_LIMIT_PER_PAGE = 0
# Change the default for XFrameOptionsMiddleware.
X_FRAME_OPTIONS = "DENY"
# SurveyGizmo API
SURVEYGIZMO_USER = config("SURVEYGIZMO_USER", default=None)
SURVEYGIZMO_PASSWORD = config("<PASSWORD>", default=None)
SURVEYGIZMO_API_TOKEN = config("SURVEYGIZMO_API_TOKEN", default=None)
SURVEYGIZMO_API_TOKEN_SECRET = config("SURVEYGIZMO_API_TOKEN_SECRET", default=None)
# Django Rest Framework
REST_FRAMEWORK = {
"DEFAULT_FILTER_BACKENDS": ("django_filters.rest_framework.DjangoFilterBackend",),
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.TokenAuthentication",
"kitsune.sumo.api_utils.InactiveSessionAuthentication",
),
"DEFAULT_RENDERER_CLASSES": ("kitsune.sumo.api_utils.JSONRenderer",),
"UNICODE_JSON": False,
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 20,
"TEST_REQUEST_DEFAULT_FORMAT": "json",
}
# Django-axes settings.
AXES_LOGIN_FAILURE_LIMIT = config("AXES_LOGIN_FAILURE_LIMIT", default=10, cast=int)
AXES_LOCK_OUT_AT_FAILURE = config("AXES_LOCK_OUT_AT_FAILURE", default=True, cast=bool)
AXES_USE_USER_AGENT = config("AXES_USE_USER_AGENT", default=False, cast=bool)
AXES_COOLOFF_TIME = config("AXES_COOLOFF_TIME", default=1, cast=int) # hour
AXES_BEHIND_REVERSE_PROXY = config("AXES_BEHIND_REVERSE_PROXY", default=not DEBUG, cast=bool)
AXES_REVERSE_PROXY_HEADER = config("AXES_REVERSE_PROXY_HEADER", default="HTTP_X_CLUSTER_CLIENT_IP")
USE_DEBUG_TOOLBAR = config("USE_DEBUG_TOOLBAR", default=False, cast=bool)
def | |
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import unittest
from google.gax import errors
from google.cloud.firestore_v1beta1.gapic import firestore_client
from google.cloud.firestore_v1beta1.proto import common_pb2
from google.cloud.firestore_v1beta1.proto import document_pb2
from google.cloud.firestore_v1beta1.proto import firestore_pb2
from google.protobuf import empty_pb2
class CustomException(Exception):
pass
class TestFirestoreClient(unittest.TestCase):
@mock.patch('google.gax.config.create_stub', spec=True)
def test_get_document(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]',
'[ANY_PATH]')
# Mock response
name_2 = 'name2-1052831874'
expected_response = {'name': name_2}
expected_response = document_pb2.Document(**expected_response)
grpc_stub.GetDocument.return_value = expected_response
response = client.get_document(name)
self.assertEqual(expected_response, response)
grpc_stub.GetDocument.assert_called_once()
args, kwargs = grpc_stub.GetDocument.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = firestore_pb2.GetDocumentRequest(name=name)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_get_document_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]',
'[ANY_PATH]')
# Mock exception response
grpc_stub.GetDocument.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.get_document, name)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_list_documents(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]',
'[ANY_PATH]')
collection_id = 'collectionId-821242276'
# Mock response
next_page_token = ''
documents_element = {}
documents = [documents_element]
expected_response = {
'next_page_token': next_page_token,
'documents': documents
}
expected_response = firestore_pb2.ListDocumentsResponse(
**expected_response)
grpc_stub.ListDocuments.return_value = expected_response
paged_list_response = client.list_documents(parent, collection_id)
resources = list(paged_list_response)
self.assertEqual(1, len(resources))
self.assertEqual(expected_response.documents[0], resources[0])
grpc_stub.ListDocuments.assert_called_once()
args, kwargs = grpc_stub.ListDocuments.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = firestore_pb2.ListDocumentsRequest(
parent=parent, collection_id=collection_id)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_list_documents_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]',
'[ANY_PATH]')
collection_id = 'collectionId-821242276'
# Mock exception response
grpc_stub.ListDocuments.side_effect = CustomException()
paged_list_response = client.list_documents(parent, collection_id)
self.assertRaises(errors.GaxError, list, paged_list_response)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_create_document(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]',
'[ANY_PATH]')
collection_id = 'collectionId-821242276'
document_id = 'documentId506676927'
document = {}
# Mock response
name = 'name3373707'
expected_response = {'name': name}
expected_response = document_pb2.Document(**expected_response)
grpc_stub.CreateDocument.return_value = expected_response
response = client.create_document(parent, collection_id, document_id,
document)
self.assertEqual(expected_response, response)
grpc_stub.CreateDocument.assert_called_once()
args, kwargs = grpc_stub.CreateDocument.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = firestore_pb2.CreateDocumentRequest(
parent=parent,
collection_id=collection_id,
document_id=document_id,
document=document)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_create_document_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]',
'[ANY_PATH]')
collection_id = 'collectionId-821242276'
document_id = 'documentId506676927'
document = {}
# Mock exception response
grpc_stub.CreateDocument.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.create_document, parent,
collection_id, document_id, document)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_update_document(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
document = {}
update_mask = {}
# Mock response
name = 'name3373707'
expected_response = {'name': name}
expected_response = document_pb2.Document(**expected_response)
grpc_stub.UpdateDocument.return_value = expected_response
response = client.update_document(document, update_mask)
self.assertEqual(expected_response, response)
grpc_stub.UpdateDocument.assert_called_once()
args, kwargs = grpc_stub.UpdateDocument.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = firestore_pb2.UpdateDocumentRequest(
document=document, update_mask=update_mask)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_update_document_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
document = {}
update_mask = {}
# Mock exception response
grpc_stub.UpdateDocument.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.update_document, document,
update_mask)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_delete_document(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]',
'[ANY_PATH]')
client.delete_document(name)
grpc_stub.DeleteDocument.assert_called_once()
args, kwargs = grpc_stub.DeleteDocument.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = firestore_pb2.DeleteDocumentRequest(name=name)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_delete_document_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]',
'[ANY_PATH]')
# Mock exception response
grpc_stub.DeleteDocument.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.delete_document, name)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_batch_get_documents(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
database = client.database_root_path('[PROJECT]', '[DATABASE]')
documents = []
# Mock response
missing = 'missing1069449574'
transaction = b'-34'
expected_response = {'missing': missing, 'transaction': transaction}
expected_response = firestore_pb2.BatchGetDocumentsResponse(
**expected_response)
grpc_stub.BatchGetDocuments.return_value = iter([expected_response])
response = client.batch_get_documents(database, documents)
resources = list(response)
self.assertEqual(1, len(resources))
self.assertEqual(expected_response, resources[0])
grpc_stub.BatchGetDocuments.assert_called_once()
args, kwargs = grpc_stub.BatchGetDocuments.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = firestore_pb2.BatchGetDocumentsRequest(
database=database, documents=documents)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_batch_get_documents_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
database = client.database_root_path('[PROJECT]', '[DATABASE]')
documents = []
# Mock exception response
grpc_stub.BatchGetDocuments.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.batch_get_documents,
database, documents)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_begin_transaction(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
database = client.database_root_path('[PROJECT]', '[DATABASE]')
# Mock response
transaction = b'-34'
expected_response = {'transaction': transaction}
expected_response = firestore_pb2.BeginTransactionResponse(
**expected_response)
grpc_stub.BeginTransaction.return_value = expected_response
response = client.begin_transaction(database)
self.assertEqual(expected_response, response)
grpc_stub.BeginTransaction.assert_called_once()
args, kwargs = grpc_stub.BeginTransaction.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = firestore_pb2.BeginTransactionRequest(
database=database)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_begin_transaction_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
database = client.database_root_path('[PROJECT]', '[DATABASE]')
# Mock exception response
grpc_stub.BeginTransaction.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.begin_transaction, database)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_commit(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
database = client.database_root_path('[PROJECT]', '[DATABASE]')
writes = []
# Mock response
expected_response = {}
expected_response = firestore_pb2.CommitResponse(**expected_response)
grpc_stub.Commit.return_value = expected_response
response = client.commit(database, writes)
self.assertEqual(expected_response, response)
grpc_stub.Commit.assert_called_once()
args, kwargs = grpc_stub.Commit.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = firestore_pb2.CommitRequest(
database=database, writes=writes)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_commit_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
database = client.database_root_path('[PROJECT]', '[DATABASE]')
writes = []
# Mock exception response
grpc_stub.Commit.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.commit, database, writes)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_rollback(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
database = client.database_root_path('[PROJECT]', '[DATABASE]')
transaction = b'-34'
client.rollback(database, transaction)
grpc_stub.Rollback.assert_called_once()
args, kwargs = grpc_stub.Rollback.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = firestore_pb2.RollbackRequest(
database=database, transaction=transaction)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_rollback_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
database = client.database_root_path('[PROJECT]', '[DATABASE]')
transaction = b'-34'
# Mock exception response
grpc_stub.Rollback.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.rollback, database,
transaction)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_run_query(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]',
'[ANY_PATH]')
# Mock response
transaction = b'-34'
skipped_results = 880286183
expected_response = {
'transaction': transaction,
'skipped_results': skipped_results
}
expected_response = firestore_pb2.RunQueryResponse(**expected_response)
grpc_stub.RunQuery.return_value = iter([expected_response])
response = client.run_query(parent)
resources = list(response)
self.assertEqual(1, len(resources))
self.assertEqual(expected_response, resources[0])
grpc_stub.RunQuery.assert_called_once()
args, kwargs = grpc_stub.RunQuery.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(kwargs), 1)
self.assertIn('metadata', kwargs)
actual_request = args[0]
expected_request = firestore_pb2.RunQueryRequest(parent=parent)
self.assertEqual(expected_request, actual_request)
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
@mock.patch('google.gax.config.create_stub', spec=True)
def test_run_query_exception(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]',
'[ANY_PATH]')
# Mock exception response
grpc_stub.RunQuery.side_effect = CustomException()
self.assertRaises(errors.GaxError, client.run_query, parent)
@mock.patch('google.gax.config.create_stub', spec=True)
def test_write(self, mock_create_stub):
# Mock gRPC layer
grpc_stub = mock.Mock()
mock_create_stub.return_value = grpc_stub
client = firestore_client.FirestoreClient()
# Mock request
database = client.database_root_path('[PROJECT]', '[DATABASE]')
request = {'database': database}
requests = [request]
# Mock response
stream_id = 'streamId-315624902'
stream_token = b'<PASSWORD>'
expected_response = {
| |
#############################################################################
#
# Author: <NAME>, <NAME>
#
# Copyright: <NAME> TSRI 2000
#
#############################################################################
#
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/autodpfCommands.py,v 1.100.2.5 2016/02/23 18:46:45 annao Exp $
#
# $Id: autodpfCommands.py,v 1.100.2.5 2016/02/23 18:46:45 annao Exp $
#
#
#
#
#
#
"""
This Module facilitates producing a docking parameter file for AutoDock. The steps in this process are:
* Selecting the macromolecule filename: The user can select the macromolecule for autodpf in three ways: it can be chosen from molecules previously added to the moleculeViewer, it can be picked as a PDB file, or it can be picked as a MOL2 file:
o Choose Macromol...
o Select PDB Macromolecule
o Select MOL2 Macromolecule
* Selecting the small molecule which has been previously formatted by AutoTors:
o Via Reading a PDBQ-File which adds the ligand to the viewer
* The user sets parameters pertaining to the small molecule
o Checking that a grid map exists for each of the ligand atom types
o Indicating whether a floating grid map exists
o Setting the initial translation of the small molecule
- by choosing the 'random' option which sets a random starting position for the ligand
- by entering the desired coordinates in the entry
o Setting the initial quaternion of the small molecule
- by choosing the 'random' option which sets a random starting quaternion.
- by entering the desired initial quaternion -Qx,Qy,Qz,Qw in the entry. Qx, Qy, Qz define the unit vector in the direction of rigid body rotation and Qw the angle of rotation about this unit vector.
o Setting the coefficient of the torsional DOF
o By choosing to set the initial dihedrals for the small molecule or not: If not, AutoDock assumes that the chi1, chi2, chi3 etc are all zero and does not change the initial ligand torsion angles. If the user chooses to set the initial dihedrals, he further chooses:
- for them to be randomly assigned
- an initial relative dihedral angle for each active torsion in the ligand.
o The user can specify two types of torsion constraints for the ligand:
- Gaussian constraints which use an inverted Gaussian bell curve to calculate the energy function input of the constraint. This type of constraint is specified by two floating point numbers: the perferred angle in the range -180-+180decreeds and the half-width which is the difference between two angles at which the energy is half the barrier PLUS an integer which identifies the torsion according to the list at the top of the AutoTors-generated input ligand PDBQ file. More than one constraint of this type may be specified for a single torsion.
- Hard torsion constraints may also be specified. These differ from the previous type in that the torsion is never allowed to take values bewond the range defined and in that the second parameter is the full width of the allowed range of torsion angles. Moreover, only one constraint of this type is allowed per torsion.
o If the user specifies torsion constraints, he may also specify the height of the energy barrier to be applied to these constraints.
o If the user specifies Gaussian torsion constraints, he may also specify whether to store and output the torsion energies
* The user sets parameters pertaining to docking algorithm(s) he wishes to use
:
o Setting Simulated Annealing parameters.
o Setting Genetic Algorithm parameters (GA).
o Setting Local Search parameters (LS).
It is important to remember that any of these may be used alone but only GA and LS may be used together
* The user adjusts these additional parameters:
o the step sizes of translation, quaternion rotation and dihedral torsion change.
o energy parameters including energy assigned to atoms outside the grid volume, the maximum allowable initial energy and the maximum number of retries.
o output format parameters including the level of detail for the output, the rms cluster tolerance, the reference file for rms calculations and whether to do symmetry checking in the rms calculations.
* The user selects which kind of docking parameter file to write :
o Simulated Annealing
o GA
o LS
o GALS
* The results of the previous steps are written to a file. The user selects a filename via a filebrowser. By convention, the file should have a .dpf extension. If no macromolecule has been selected, it is not possible to write a grid parameter file and the user gets a warning message to that effect. Likewise, the types of the maps to be calculated must be set before the grid parameter file is written and a warning message to this effect appears if the types have not been set.
(A checkbutton, "DONE", allows the user to withdraw the autoTools menuBar)
"""
import os, numpy
import Pmw
from ViewerFramework.VFCommand import CommandGUI
from mglutil.gui.InputForm.Tk.gui import InputFormDescr
from mglutil.util.callback import CallBackFunction
from Pmv.mvCommand import MVCommand
from MolKit.tree import TreeNode, TreeNodeSet
from MolKit.molecule import AtomSet
from MolKit import Read
from Pmv.guiTools import MoleculeChooser
from tkinter.simpledialog import SimpleDialog
import types, _py2k_string as string, tkinter,os
from .energyConstants import Rij, epsij, SolVol, SolPar, SolCon
from .DockingParameters import DockingParameters, simulated_annealing_list,\
local_search_list, genetic_algorithm_list, cluster_list,\
genetic_algorithm_local_search_list
from .DockingParameters import simulated_annealing_list4,\
local_search_list4, genetic_algorithm_list4,\
genetic_algorithm_local_search_list4_with_parameter_file, \
genetic_algorithm_local_search_list4
from .DockingParameters import simulated_annealing_list4_1,\
local_search_list4_1, genetic_algorithm_list4_1,\
genetic_algorithm_local_search_list4_1_with_parameter_file, \
genetic_algorithm_local_search_list4_1
from .DockingParameters import simulated_annealing_list4_2,\
local_search_list4_2, genetic_algorithm_list4_2,\
genetic_algorithm_local_search_list4_2_with_parameter_file, \
genetic_algorithm_local_search_list4_2, epdb_list4_2
from .DockingParameters import ConfigFileMaker
from .autotorsCommands import checkMolCharges
#these are the texts on menubuttons, menu entries etc:
menuText = {}
#menuText['AutoDpfMB'] = ' Set Docking Parameters '
menuText['AutoDpfMB'] = 'Docking'
menuText['ReadDpfMB'] = 'Open DPF...'
menuText['MacromoleculeMB'] = 'Macromolecule'
menuText['ReadMacro'] = 'Set Filename...(AD3)'
menuText['ChooseMacro'] = 'Choose...(AD3)'
menuText['ReadMacro4'] = 'Set Rigid Filename...'
menuText['ReadFlexRes4'] = 'Set Flexible Residues Filename...'
menuText['ChooseMacro4'] = 'Choose...'
menuText['SetLigandParmsMB'] = 'Ligand'
menuText['ReadLigand4'] = 'Open...'
menuText['ChooseLigand4'] = 'Choose...'
menuText['AdjustLigand4'] = 'Ligand Parameters...'
menuText['ReadLigand'] = 'Open...(AD3)'
menuText['ChooseLigand'] = 'Choose...(AD3)'
menuText['AdjustLigand'] = 'Ligand Parameters...(AD3)'
menuText['SetSearchParmsMB'] = 'Search Parameters'
menuText['SA'] = 'Simulated Annealing...'
menuText['GA'] = 'Genetic Algorithm...'
menuText['LS'] = 'Local Search...'
menuText['SetDockingRunParmsMB'] = 'Docking Parameters...'
menuText['OtherOptionsMB'] = 'Other Options...'
menuText['SetAutoDock4Parameters'] = 'AutoDock4 Parameters'
menuText['SetAutoDock41Parameters'] = 'AutoDock4.2 Parameters'
menuText['WriteDpfMB'] = 'Output'
menuText['WriteSA4'] = 'Simulated Annealing...'
menuText['WriteGA4'] = 'Genetic Algorithm...'
menuText['WriteLS4'] = 'Local Search...'
menuText['WriteGALS4'] = 'Lamarckian GA...'
menuText['WriteSA41'] = 'Simulated Annealing(4.2)...'
menuText['WriteGA41'] = 'Genetic Algorithm(4.2)...'
menuText['WriteLS41'] = 'Local Search(4.2)...'
menuText['WriteGALS41'] = 'Lamarckian GA(4.2)...'
menuText['WriteEPDB41'] = 'Evaluate Energy(EPDB)...'
menuText['WriteCONFIG41'] = 'Vina Config(config.txt)...'
menuText['WriteCluster4'] = 'Clustering...'
menuText['WriteSA'] = 'Simulated Annealing...(AD3)'
menuText['WriteGA'] = 'Genetic Algorithm...(AD3)'
menuText['WriteLS'] = 'Local Search...(AD3)'
menuText['WriteGALS'] = 'Lamarckian GA...(AD3)'
menuText['WriteCluster'] = 'Clustering...(AD3)'
menuText['EditDpfMB'] = 'Edit DPF...'
def checkHasDpo(vf):
if not hasattr(vf, 'dpo'):
vf.dpo = DockingParameters()
#the dpo needs to know its vf
vf.dpo.vf = vf
if not hasattr(vf, 'vinaDict'):
vf.vinaDict = {}
class DpfSetDpo(MVCommand):
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
if not hasattr(self.vf, 'vinaDict'):
self.vf.vinaDict = {}
def doit(self, *args, **kw):
if not len(list(kw.items())):
return 'ERROR'
for key, val in list(kw.items()):
self.vf.dpo[key]['value'] = val
if key in ['ligand','receptor','flexres','flex','center','out','size']:
self.vf.vinaDict[key]['value'] = val
class DpfLoadDefaults(MVCommand):
""" allows user to select a file containing a set of defaults"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
def __call__(self, dpffile, **kw):
"""None<-ADdpf_read
dpffile is name of file whose contents are to be used to set values in the dpo
"""
#check that dpffile actually exists
if not os.path.exists(dpffile):
raise IOError
self.doitWrapper(*(dpffile,), **kw)
def doit(self, dpffile):
self.vf.dpo.read(dpffile)
def guiCallback(self):
"""called each time the 'select defaultList file' button is pressed"""
dpfFile = self.vf.askFileOpen(types=[('select default filename:', '*.dpf')],
title = 'DPF Default File:')
if dpfFile:
self.doitWrapper(dpfFile, log=1, redraw=0)
DpfLoadDefaultsGUI = CommandGUI()
DpfLoadDefaultsGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'], menuText['ReadDpfMB'])
class DpfMacroSelector(MVCommand):
""" allows user to select a filename for the macromolecule"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
if not hasattr(self.vf, 'vinaDict'): self.vf.vinaDict={}
def guiCallback(self):
"""called each time the 'select pdb macromolecule' button is pressed"""
macroFile = self.vf.askFileOpen(types=[('PDBQS files', '*.pdbqs')],
title = 'PDBQS Macromolecule File:')
if macroFile:
filename=os.path.basename(macroFile)
self.doitWrapper(macroFile,log=1,redraw=0)
def __call__ (self, macroFile, **kw):
"""None<-ADdpf_readMacromolecule
macroFile file containing the receptor
"""
if not os.path.exists(macroFile):
raise IOError
self.doitWrapper(*(macroFile,), **kw)
def doit(self, macroFile):
filename=os.path.basename(macroFile)
ftype = string.split(filename, '.')[-1]
if ftype != 'pdbqs':
msgStr="macromolecule must be in pdbqs format!"
self.vf.warningMsg(msgStr)
return 'ERROR'
rnum=string.rfind(filename, '.')
if rnum<0:
t= "illegal filename "+ filename
self.vf.warningMsg(t)
return
#setting dpo.molstem->setting 'fld' and 'map'
self.vf.dpo.molstem = filename[:rnum]
#call set_receptor to set 'fld'
self.vf.dpo.set_receptor(filename)
DpfMacroSelectorGUI=CommandGUI()
DpfMacroSelectorGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'],\
menuText['ReadMacro'], cascadeName = menuText['MacromoleculeMB'])
class Dpf4MacroSelector(MVCommand):
""" allows user to select a filename for the macromolecule"""
def onAddCmdToViewer(self):
checkHasDpo(self.vf)
if not hasattr(self.vf, 'vinaDict'): self.vf.vinaDict={}
def onRemoveObjFromViewer(self, obj):
if hasattr(self.vf.dpo, 'molstem') and self.vf.dpo.molstem==obj.name:
delattr(self.vf.dpo, 'molstem')
def guiCallback(self):
"""called each time the 'select pdbqt macromolecule filename' button is pressed"""
macroFile = self.vf.askFileOpen(types=[('PDBQT | |
Commit.from_string(self.make_commit_text(encoding=b"UTF-8"))
self.assertEqual(b"UTF-8", c.encoding)
def test_check(self):
self.assertCheckSucceeds(Commit, self.make_commit_text())
self.assertCheckSucceeds(Commit, self.make_commit_text(parents=None))
self.assertCheckSucceeds(Commit, self.make_commit_text(encoding=b"UTF-8"))
self.assertCheckFails(Commit, self.make_commit_text(tree=b"xxx"))
self.assertCheckFails(Commit, self.make_commit_text(parents=[a_sha, b"xxx"]))
bad_committer = b"some guy without an email address 1174773719 +0000"
self.assertCheckFails(Commit, self.make_commit_text(committer=bad_committer))
self.assertCheckFails(Commit, self.make_commit_text(author=bad_committer))
self.assertCheckFails(Commit, self.make_commit_text(author=None))
self.assertCheckFails(Commit, self.make_commit_text(committer=None))
self.assertCheckFails(
Commit, self.make_commit_text(author=None, committer=None)
)
def test_check_duplicates(self):
# duplicate each of the header fields
for i in range(5):
lines = self.make_commit_lines(parents=[a_sha], encoding=b"UTF-8")
lines.insert(i, lines[i])
text = b"\n".join(lines)
if lines[i].startswith(b"parent"):
# duplicate parents are ok for now
self.assertCheckSucceeds(Commit, text)
else:
self.assertCheckFails(Commit, text)
def test_check_order(self):
lines = self.make_commit_lines(parents=[a_sha], encoding=b"UTF-8")
headers = lines[:5]
rest = lines[5:]
# of all possible permutations, ensure only the original succeeds
for perm in permutations(headers):
perm = list(perm)
text = b"\n".join(perm + rest)
if perm == headers:
self.assertCheckSucceeds(Commit, text)
else:
self.assertCheckFails(Commit, text)
def test_check_commit_with_unparseable_time(self):
identity_with_wrong_time = (
b"<NAME> <<EMAIL>> 18446743887488505614+42707004"
)
# Those fail at reading time
self.assertCheckFails(
Commit,
self.make_commit_text(
author=default_committer, committer=identity_with_wrong_time
),
)
self.assertCheckFails(
Commit,
self.make_commit_text(
author=identity_with_wrong_time, committer=default_committer
),
)
def test_check_commit_with_overflow_date(self):
"""Date with overflow should raise an ObjectFormatException when checked"""
identity_with_wrong_time = (
b"<NAME> <<EMAIL>> 18446743887488505614 +42707004"
)
commit0 = Commit.from_string(
self.make_commit_text(
author=identity_with_wrong_time, committer=default_committer
)
)
commit1 = Commit.from_string(
self.make_commit_text(
author=default_committer, committer=identity_with_wrong_time
)
)
# Those fails when triggering the check() method
for commit in [commit0, commit1]:
with self.assertRaises(ObjectFormatException):
commit.check()
def test_mangled_author_line(self):
"""Mangled author line should successfully parse"""
author_line = (
b'<NAME> <<EMAIL>> <"<NAME> '
b'<<EMAIL>>"> 1197475547 -0500'
)
expected_identity = (
b'<NAME> <<EMAIL>> <"<NAME> '
b'<<EMAIL>>">'
)
commit = Commit.from_string(self.make_commit_text(author=author_line))
# The commit parses properly
self.assertEqual(commit.author, expected_identity)
# But the check fails because the author identity is bogus
with self.assertRaises(ObjectFormatException):
commit.check()
def test_parse_gpgsig(self):
c = Commit.from_string(
b"""tree aaff74984cccd156a469afa7d9ab10e4777beb24
author <NAME> <<EMAIL>> 1412179807 +0200
committer <NAME> <<EMAIL>> 1412179807 +0200
gpgsig -----BEGIN PGP SIGNATURE-----
Version: GnuPG v1
iQIcBAABCgAGBQJULCdfAAoJEACAbyvXKaRXuKwP/RyP9PA49uAvu8tQVCC/uBa8
vi975+xvO14R8Pp8k2nps7lSxCdtCd+xVT1VRHs0wNhOZo2YCVoU1HATkPejqSeV
<KEY>
=X6RT
-----END PGP SIGNATURE-----
foo
"""
)
self.assertEqual(b"foo\n", c.message)
self.assertEqual([], c.extra)
self.assertEqual(
b"""-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1
iQIcBAABCgAGBQJULCdfAAoJEACAbyvXKaRXuKwP/RyP9PA49uAvu8tQVCC/uBa8
vi975+xvO14R8Pp8k2nps7lSxCdtCd+xVT1VRHs0wNhOZo2YCVoU1HATkPejqSeV
NScTHcxnk4/+bxyfk14xvJkNp7FlQ3npmBkA+lbV0Ubr33rvtIE5jiJPyz+SgWAg
xdBG2TojV0squj00GoH/euK6aX7GgZtwdtpTv44haCQdSuPGDcI4TORqR6YSqvy3
GPE+3ZqXPFFb+KILtimkxitdwB7CpwmNse2vE3rONSwTvi8nq3ZoQYNY73CQGkUy
qoFU0pDtw87U3niFin1ZccDgH0bB6624sLViqrjcbYJeg815Htsu4rmzVaZADEVC
XhIO4MThebusdk0AcNGjgpf3HRHk0DPMDDlIjm+Oao0cqovvF6VyYmcb0C+RmhJj
dodLXMNmbqErwTk3zEkW0yZvNIYXH7m9SokPCZa4eeIM7be62X6h1mbt0/IU6Th+
v18fS0iTMP/Viug5und+05C/v04kgDo0CPphAbXwWMnkE4B6Tl9sdyUYXtvQsL7x
0+WP1gL27ANqNZiI07Kz/BhbBAQI/+2TFT7oGr0AnFPQ5jHp+3GpUf6OKuT1wT3H
ND189UFuRuubxb42vZhpcXRbqJVWnbECTKVUPsGZqat3enQUB63uM4i6/RdONDZA
fDeF1m4qYs+cUXKNUZ03
=X6RT
-----END PGP SIGNATURE-----""",
c.gpgsig,
)
def test_parse_header_trailing_newline(self):
c = Commit.from_string(
b"""\
tree a7d6277f78d3ecd0230a1a5df6db00b1d9c521ac
parent c09b6dec7a73760fbdb478383a3c926b18db8bbe
author <NAME> <<EMAIL>> 1461964057 -1000
committer <NAME> <<EMAIL>> 1461964057 -1000
gpgsig -----BEGIN PGP SIGNATURE-----
wsBcBAABCAAQBQJXI80ZCRA6pcNDcVZ70gAAarcIABs72xRX3FWeox349nh6ucJK
CtwmBTusez2Zwmq895fQEbZK7jpaGO5TRO4OvjFxlRo0E08UFx3pxZHSpj6bsFeL
hHsDXnCaotphLkbgKKRdGZo7tDqM84wuEDlh4MwNe7qlFC7bYLDyysc81ZX5lpMm
2MFF1TvjLAzSvkT7H1LPkuR3hSvfCYhikbPOUNnKOo0sYjeJeAJ/JdAVQ4mdJIM0
gl3REp9+A+qBEpNQI7z94Pg5Bc5xenwuDh3SJgHvJV6zBWupWcdB3fAkVd4TPnEZ
nHxksHfeNln9RKseIDcy4b2ATjhDNIJZARHNfr6oy4u3XPW4svRqtBsLoMiIeuI=
=ms6q
-----END PGP SIGNATURE-----
3.3.0 version bump and docs
"""
)
self.assertEqual([], c.extra)
self.assertEqual(
b"""\
-----BEGIN PGP SIGNATURE-----
wsBcBAABCAAQBQJXI80ZCRA6pcNDcVZ70gAAarcIABs72xRX3FWeox349nh6ucJK
CtwmBTusez2Zwmq895fQEbZK7jpaGO5TRO4OvjFxlRo0E08UFx3pxZHSpj6bsFeL
hHsDXnCaotphLkbgKKRdGZo7tDqM84wuEDlh4MwNe7qlFC7bYLDyysc81ZX5lpMm
2MFF1TvjLAzSvkT7H1LPkuR3hSvfCYhikbPOUNnKOo0sYjeJeAJ/JdAVQ4mdJIM0
gl3REp9+A+qBEpNQI7z94Pg5Bc5xenwuDh3SJgHvJV6zBWupWcdB3fAkVd4TPnEZ
nHxksHfeNln9RKseIDcy4b2ATjhDNIJZARHNfr6oy4u3XPW4svRqtBsLoMiIeuI=
=ms6q
-----END PGP SIGNATURE-----\n""",
c.gpgsig,
)
_TREE_ITEMS = {
b"a.c": (0o100755, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
b"a": (stat.S_IFDIR, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
b"a/c": (stat.S_IFDIR, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
}
_SORTED_TREE_ITEMS = [
TreeEntry(b"a.c", 0o100755, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
TreeEntry(b"a", stat.S_IFDIR, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
TreeEntry(b"a/c", stat.S_IFDIR, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
]
class TreeTests(ShaFileCheckTests):
def test_add(self):
myhexsha = b"d80c186a03f423a81b39df39dc87fd269736ca86"
x = Tree()
x.add(b"myname", 0o100755, myhexsha)
self.assertEqual(x[b"myname"], (0o100755, myhexsha))
self.assertEqual(b"100755 myname\0" + hex_to_sha(myhexsha), x.as_raw_string())
def test_add_old_order(self):
myhexsha = b"d80c186a03f423a81b39df39dc87fd269736ca86"
x = Tree()
warnings.simplefilter("ignore", DeprecationWarning)
try:
x.add(0o100755, b"myname", myhexsha)
finally:
warnings.resetwarnings()
self.assertEqual(x[b"myname"], (0o100755, myhexsha))
self.assertEqual(b"100755 myname\0" + hex_to_sha(myhexsha), x.as_raw_string())
def test_simple(self):
myhexsha = b"d80c186a03f423a81b39df39dc87fd269736ca86"
x = Tree()
x[b"myname"] = (0o100755, myhexsha)
self.assertEqual(b"100755 myname\0" + hex_to_sha(myhexsha), x.as_raw_string())
self.assertEqual(b"100755 myname\0" + hex_to_sha(myhexsha), bytes(x))
def test_tree_update_id(self):
x = Tree()
x[b"a.c"] = (0o100755, b"d80c186a03f423a81b39df39dc87fd269736ca86")
self.assertEqual(b"0c5c6bc2c081accfbc250331b19e43b904ab9cdd", x.id)
x[b"a.b"] = (stat.S_IFDIR, b"d80c186a03f423a81b39df39dc87fd269736ca86")
self.assertEqual(b"07bfcb5f3ada15bbebdfa3bbb8fd858a363925c8", x.id)
def test_tree_iteritems_dir_sort(self):
x = Tree()
for name, item in _TREE_ITEMS.items():
x[name] = item
self.assertEqual(_SORTED_TREE_ITEMS, x.items())
def test_tree_items_dir_sort(self):
x = Tree()
for name, item in _TREE_ITEMS.items():
x[name] = item
self.assertEqual(_SORTED_TREE_ITEMS, x.items())
def _do_test_parse_tree(self, parse_tree):
dir = os.path.join(os.path.dirname(__file__), "data", "trees")
o = Tree.from_path(hex_to_filename(dir, tree_sha))
self.assertEqual(
[(b"a", 0o100644, a_sha), (b"b", 0o100644, b_sha)],
list(parse_tree(o.as_raw_string())),
)
# test a broken tree that has a leading 0 on the file mode
broken_tree = b"0100644 foo\0" + hex_to_sha(a_sha)
def eval_parse_tree(*args, **kwargs):
return list(parse_tree(*args, **kwargs))
self.assertEqual([(b"foo", 0o100644, a_sha)], eval_parse_tree(broken_tree))
self.assertRaises(
ObjectFormatException, eval_parse_tree, broken_tree, strict=True
)
test_parse_tree = functest_builder(_do_test_parse_tree, _parse_tree_py)
test_parse_tree_extension = ext_functest_builder(_do_test_parse_tree, parse_tree)
def _do_test_sorted_tree_items(self, sorted_tree_items):
def do_sort(entries):
return list(sorted_tree_items(entries, False))
actual = do_sort(_TREE_ITEMS)
self.assertEqual(_SORTED_TREE_ITEMS, actual)
self.assertIsInstance(actual[0], TreeEntry)
# C/Python implementations may differ in specific error types, but
# should all error on invalid inputs.
# For example, the C implementation has stricter type checks, so may
# raise TypeError where the Python implementation raises
# AttributeError.
errors = (TypeError, ValueError, AttributeError)
self.assertRaises(errors, do_sort, b"foo")
self.assertRaises(errors, do_sort, {b"foo": (1, 2, 3)})
myhexsha = b"d80c186a03f423a81b39df39dc87fd269736ca86"
self.assertRaises(errors, do_sort, {b"foo": (b"xxx", myhexsha)})
self.assertRaises(errors, do_sort, {b"foo": (0o100755, 12345)})
test_sorted_tree_items = functest_builder(
_do_test_sorted_tree_items, _sorted_tree_items_py
)
test_sorted_tree_items_extension = ext_functest_builder(
_do_test_sorted_tree_items, sorted_tree_items
)
def _do_test_sorted_tree_items_name_order(self, sorted_tree_items):
self.assertEqual(
[
TreeEntry(
b"a",
stat.S_IFDIR,
b"d80c186a03f423a81b39df39dc87fd269736ca86",
),
TreeEntry(
b"a.c",
0o100755,
b"d80c186a03f423a81b39df39dc87fd269736ca86",
),
TreeEntry(
b"a/c",
stat.S_IFDIR,
b"d80c186a03f423a81b39df39dc87fd269736ca86",
),
],
list(sorted_tree_items(_TREE_ITEMS, True)),
)
test_sorted_tree_items_name_order = functest_builder(
_do_test_sorted_tree_items_name_order, _sorted_tree_items_py
)
test_sorted_tree_items_name_order_extension = ext_functest_builder(
_do_test_sorted_tree_items_name_order, sorted_tree_items
)
def test_check(self):
t = Tree
sha = hex_to_sha(a_sha)
# filenames
self.assertCheckSucceeds(t, b"100644 .a\0" + sha)
self.assertCheckFails(t, b"100644 \0" + sha)
self.assertCheckFails(t, b"100644 .\0" + sha)
self.assertCheckFails(t, b"100644 a/a\0" + sha)
self.assertCheckFails(t, b"100644 ..\0" + sha)
self.assertCheckFails(t, b"100644 .git\0" + sha)
# modes
self.assertCheckSucceeds(t, b"100644 a\0" + sha)
self.assertCheckSucceeds(t, b"100755 a\0" + sha)
self.assertCheckSucceeds(t, b"160000 a\0" + sha)
# TODO more whitelisted modes
self.assertCheckFails(t, b"123456 a\0" + sha)
self.assertCheckFails(t, b"123abc a\0" + sha)
# should fail check, but parses ok
self.assertCheckFails(t, b"0100644 foo\0" + sha)
# shas
self.assertCheckFails(t, b"100644 a\0" + (b"x" * 5))
self.assertCheckFails(t, b"100644 a\0" + (b"x" * 18) + b"\0")
self.assertCheckFails(t, b"100644 a\0" + (b"x" * 21) + b"\n100644 b\0" + sha)
# ordering
sha2 = hex_to_sha(b_sha)
self.assertCheckSucceeds(t, b"100644 a\0" + sha + b"\n100644 b\0" + sha)
self.assertCheckSucceeds(t, b"100644 a\0" + sha + b"\n100644 b\0" + sha2)
self.assertCheckFails(t, b"100644 a\0" + sha + b"\n100755 a\0" + sha2)
self.assertCheckFails(t, b"100644 b\0" + sha2 + b"\n100644 a\0" + sha)
def test_iter(self):
t = Tree()
t[b"foo"] = (0o100644, a_sha)
self.assertEqual(set([b"foo"]), set(t))
class TagSerializeTests(TestCase):
def test_serialize_simple(self):
x = make_object(
Tag,
tagger=b"<NAME> <<EMAIL>>",
name=b"0.1",
message=b"Tag 0.1",
object=(Blob, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
tag_time=423423423,
tag_timezone=0,
)
self.assertEqual(
(
b"object d80c186a03f423a81b39df39dc87fd269736ca86\n"
b"type blob\n"
b"tag 0.1\n"
b"tagger <NAME> <<EMAIL>> "
b"423423423 +0000\n"
b"\n"
b"Tag 0.1"
),
x.as_raw_string(),
)
def test_serialize_none_message(self):
x = make_object(
Tag,
tagger=b"<NAME> <<EMAIL>>",
name=b"0.1",
message=None,
object=(Blob, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
tag_time=423423423,
tag_timezone=0,
)
self.assertEqual(
(
b"object d80c186a03f423a81b39df39dc87fd269736ca86\n"
b"type blob\n"
b"tag 0.1\n"
b"tagger <NAME>ij <<EMAIL>> "
b"423423423 +0000\n"
),
x.as_raw_string(),
)
default_tagger = (
b"<NAME> <<EMAIL>> " b"1183319674 -0700"
)
default_message = b"""Linux 2.6.22-rc7
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.7 (GNU/Linux)
iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql
OK2XeQOiEeXtT76rV4t2WR4=
=ivrA
-----END PGP SIGNATURE-----
"""
class TagParseTests(ShaFileCheckTests):
def make_tag_lines(
self,
object_sha=b"a38d6181ff27824c79fc7df825164a212eff6a3f",
object_type_name=b"commit",
name=b"v2.6.22-rc7",
tagger=default_tagger,
message=default_message,
):
lines = []
if object_sha is not None:
lines.append(b"object " + object_sha)
if object_type_name is not None:
lines.append(b"type " + object_type_name)
if name is not None:
lines.append(b"tag " + name)
if tagger is not None:
lines.append(b"tagger " + tagger)
if message is not None:
lines.append(b"")
lines.append(message)
return lines
def make_tag_text(self, **kwargs):
return b"\n".join(self.make_tag_lines(**kwargs))
def test_parse(self):
x = Tag()
x.set_raw_string(self.make_tag_text())
self.assertEqual(
b"<NAME> <<EMAIL>>", x.tagger
)
self.assertEqual(b"v2.6.22-rc7", x.name)
object_type, object_sha = x.object
self.assertEqual(b"a38d6181ff27824c79fc7df825164a212eff6a3f", object_sha)
self.assertEqual(Commit, object_type)
self.assertEqual(
datetime.datetime.utcfromtimestamp(x.tag_time),
datetime.datetime(2007, 7, 1, 19, 54, 34),
)
self.assertEqual(-25200, x.tag_timezone)
def test_parse_no_tagger(self):
x = Tag()
x.set_raw_string(self.make_tag_text(tagger=None))
self.assertEqual(None, x.tagger)
self.assertEqual(b"v2.6.22-rc7", x.name)
self.assertEqual(None, x.tag_time)
def test_parse_no_message(self):
x = Tag()
x.set_raw_string(self.make_tag_text(message=None))
self.assertEqual(None, x.message)
self.assertEqual(
b"<NAME> <<EMAIL>>", x.tagger
)
self.assertEqual(
datetime.datetime.utcfromtimestamp(x.tag_time),
datetime.datetime(2007, 7, 1, 19, 54, 34),
)
self.assertEqual(-25200, x.tag_timezone)
self.assertEqual(b"v2.6.22-rc7", x.name)
def test_check(self):
self.assertCheckSucceeds(Tag, self.make_tag_text())
self.assertCheckFails(Tag, self.make_tag_text(object_sha=None))
self.assertCheckFails(Tag, self.make_tag_text(object_type_name=None))
self.assertCheckFails(Tag, self.make_tag_text(name=None))
self.assertCheckFails(Tag, self.make_tag_text(name=b""))
self.assertCheckFails(Tag, self.make_tag_text(object_type_name=b"foobar"))
self.assertCheckFails(
Tag,
self.make_tag_text(
tagger=b"some guy without an email address 1183319674 -0700"
),
)
self.assertCheckFails(
Tag,
self.make_tag_text(
tagger=(
b"<NAME> <<EMAIL>> "
b"Sun 7 Jul 2007 12:54:34 +0700"
)
),
)
self.assertCheckFails(Tag, self.make_tag_text(object_sha=b"xxx"))
def test_check_tag_with_unparseable_field(self):
self.assertCheckFails(
Tag,
self.make_tag_text(
tagger=(
b"<NAME> <<EMAIL>> "
b"423423+0000"
)
),
)
def test_check_tag_with_overflow_time(self):
"""Date with overflow should raise an ObjectFormatException when checked"""
author = "<NAME> <<EMAIL>> %s +0000" % (MAX_TIME + 1,)
tag = Tag.from_string(self.make_tag_text(tagger=(author.encode())))
with self.assertRaises(ObjectFormatException):
tag.check()
def test_check_duplicates(self):
# duplicate each of the header fields
for i in range(4):
lines = self.make_tag_lines()
lines.insert(i, lines[i])
self.assertCheckFails(Tag, b"\n".join(lines))
def test_check_order(self):
lines = self.make_tag_lines()
headers = lines[:4]
rest = lines[4:]
# of all possible permutations, ensure only the original succeeds
for perm in permutations(headers):
perm = list(perm)
text = b"\n".join(perm + rest)
if perm == headers:
self.assertCheckSucceeds(Tag, text)
else:
self.assertCheckFails(Tag, text)
def test_tree_copy_after_update(self):
"""Check Tree.id is correctly updated when the tree is copied after updated."""
shas = []
tree = Tree()
shas.append(tree.id)
tree.add(b"data", 0o644, Blob().id)
copied = tree.copy()
shas.append(tree.id)
shas.append(copied.id)
self.assertNotIn(shas[0], shas[1:])
self.assertEqual(shas[1], shas[2])
class CheckTests(TestCase):
def test_check_hexsha(self):
check_hexsha(a_sha, "failed to check good sha")
self.assertRaises(
ObjectFormatException, check_hexsha, b"1" * 39, "sha too short"
)
self.assertRaises(
ObjectFormatException, check_hexsha, b"1" * 41, "sha too long"
)
self.assertRaises(
ObjectFormatException,
check_hexsha,
b"x" * 40,
"invalid characters",
)
def test_check_identity(self):
check_identity(
b"<NAME> | |
<gh_stars>10-100
# coding: utf-8
from itertools import product
from collections import OrderedDict
import numpy as np
from sympy import Abs, S, cacheit
from sympy import Indexed, Matrix, ImmutableDenseMatrix
from sympy import expand
from sympy.core import Basic, Symbol
from sympy.core import Add, Mul, Pow
from sympy.core.expr import AtomicExpr
from sympy.core.containers import Tuple
from sympy.simplify.simplify import simplify
from sympde.core.basic import _coeffs_registery
from sympde.core.basic import CalculusFunction
from sympde.core.algebra import (Dot_1d,
Dot_2d, Inner_2d, Cross_2d,
Dot_3d, Inner_3d, Cross_3d)
from sympde.core.utils import random_string
from sympde.calculus import jump, avg, minus, plus
from sympde.calculus import Jump, is_zero
from sympde.calculus.core import _generic_ops, _diff_ops
from sympde.calculus.matrices import SymbolicDeterminant, Inverse, Transpose
from sympde.calculus.matrices import MatSymbolicPow, MatrixElement, SymbolicTrace
from sympde.topology.basic import BasicDomain, Union, Interval
from sympde.topology.basic import Boundary, Interface
from sympde.topology.basic import InteriorDomain
from sympde.topology.domain import NormalVector, TangentVector, NCube, NCubeInterior
from sympde.topology.mapping import JacobianSymbol, InterfaceMapping, MultiPatchMapping, JacobianInverseSymbol
from sympde.topology.mapping import LogicalExpr, PullBack
# TODO fix circular dependency between sympde.expr.evaluation and sympde.topology.mapping
from sympde.topology.space import ScalarFunction
from sympde.topology.space import VectorFunction
from sympde.topology.space import IndexedVectorFunction
from sympde.topology.space import Trace
from sympde.topology.space import element_of
from sympde.topology.space import ScalarFunctionSpace
from sympde.topology.derivatives import _partial_derivatives
from sympde.topology.derivatives import _logical_partial_derivatives
from sympde.topology.derivatives import get_atom_derivatives
from sympde.topology.derivatives import get_atom_logical_derivatives
from sympde.topology.derivatives import dx, dy, dz
from sympde.topology.derivatives import dx1, dx2, dx3
from sympde.topology.derivatives import (Grad_1d, Div_1d,
Grad_2d, Curl_2d, Rot_2d, Div_2d,
Grad_3d, Curl_3d, Div_3d)
from sympde.topology.derivatives import Bracket_2d
from sympde.topology.derivatives import Laplace_1d, Laplace_2d, Laplace_3d
from sympde.topology.derivatives import Hessian_1d, Hessian_2d, Hessian_3d
from sympde.topology.derivatives import (LogicalGrad_1d, LogicalDiv_1d,
LogicalGrad_2d, LogicalCurl_2d, LogicalRot_2d, LogicalDiv_2d,
LogicalGrad_3d, LogicalCurl_3d, LogicalDiv_3d)
from sympde.topology.derivatives import LogicalBracket_2d
from sympde.topology.derivatives import LogicalLaplace_1d, LogicalLaplace_2d, LogicalLaplace_3d
from sympde.topology.derivatives import LogicalHessian_1d, LogicalHessian_2d, LogicalHessian_3d
from .basic import BasicExpr, BasicForm
from .expr import BilinearForm
from .expr import Integral
from .expr import Functional
from .expr import _get_domain
__all__ = (
'Advection',
'AdvectionT',
'Basic1dForm',
'Bilaplacian',
'BoundaryExpression',
'DomainExpression',
'InterfaceExpression',
'KernelExpression',
'Mass',
'Stiffness',
'TensorExpr',
'TerminalExpr',
'_get_trials_tests',
'_replace_atomic_expr',
'_split_expr_over_interface',
'_split_test_function',
'_tensorize_atomic_expr',
'_to_matrix_form',
'_unpack_functions',
'is_sequence',
)
#==============================================================================
def is_sequence(a):
return isinstance(a, (list,tuple,Tuple))
#==============================================================================
def _unpack_functions(ls):
funcs = []
for x in ls:
if isinstance(x, ScalarFunction):
funcs.append(x)
elif isinstance(x, VectorFunction):
funcs.extend(x[j] for j in range(x.shape[0]))
else:
raise TypeError('Can only accept ScalarFunction and VectorFunction')
return tuple(funcs)
#==============================================================================
def _get_trials_tests(expr, *, flatten=False):
"""
Get all scalar trial/test functions in the given expression.
Parameters
----------
expr : BasicForm | BasicExpr
Symbolic expression.
flatten: Boolean, optional
Decompose the vector trial/test functions into their scalar components.
Returns
-------
trials : tuple
All scalar trial functions in the given expression, with vector
functions decomposed into their scalar components.
tests : tuple
All scalar test functions in the given expression, with vector
functions decomposed into their scalar components.
"""
if not isinstance(expr, (BasicForm, BasicExpr)):
raise TypeError("Expression must be of type BasicForm or BasicExpr, got '{}' instead".format(type(expr)))
if expr.is_bilinear:
trials = _unpack_functions(expr.variables[0]) if flatten else expr.variables[0]
tests = _unpack_functions(expr.variables[1]) if flatten else expr.variables[1]
elif expr.is_linear:
trials = None
tests = _unpack_functions(expr.variables) if flatten else expr.variables
elif expr.is_functional:
trials = None
tests = None
else:
ValueError('Could not interpret expression as bilinear form, linear form, or functional')
return trials, tests
#==============================================================================
def _to_matrix_form(expr, *, trials=None, tests=None, domain=None):
"""
Create a matrix representation of input expression, based on trial and
test functions. We have three options:
1. if both the trial and test functions are given, we treat the expression
as a bilinear form and convert it to a (n_rows x n_cols) rectangular
matrix with n_rows = len(tests) and n_cols = len(trials);
2. if only the test functions are given, we treat the expression as a
linear form and convert it to a (n_rows x 1) matrix (column vector) with
n_rows = len(tests);
3. if neither the trial nor the test functions are given, we treat the
expression as a scalar functional and convert it to a 1x1 matrix.
The domain is needed when the expression is defined over an interface,
to delete the plus/minus operators and the InterfaceMapping object.
Parameters
----------
expr : sympy.Expr
Expression corresponding to a bilinear/linear form or functional.
trials : iterable
List of all scalar trial functions (after unpacking vector functions).
tests : iterable
List of all scalar test functions (after unpacking vector functions).
domain : Domain
domain of expr
Returns
-------
M : sympy.matrices.immutable.ImmutableDenseMatrix
Matrix representation of input expression.
"""
if not isinstance(domain, Interface):
atoms = expr.atoms(minus, plus)
new_atoms = [e.args[0] for e in atoms]
subs = tuple(zip(atoms, new_atoms))
expr = expr.subs(subs)
mapping = expr.atoms(InterfaceMapping)
if mapping:
mapping = list(mapping)[0]
expr = expr.subs(mapping, mapping.minus)
# Bilinear form
if trials and tests:
M = [[None for j in trials] for i in tests]
for i, test in enumerate(tests):
subs_i = {v:0 for v in tests if v != test}
expr_i = expr.subs(subs_i)
for j, trial in enumerate(trials):
subs_j = {u:0 for u in trials if u != trial}
M[i][j] = expr_i.subs(subs_j)
M = Matrix(M)
# Linear form
elif tests:
M = [[None] for i in tests]
for i, test in enumerate(tests):
subs_i = {v:0 for v in tests if v != test}
M[i][0] = expr.subs(subs_i)
M = Matrix(M)
# Functional
else:
M = [[expr]]
return ImmutableDenseMatrix(M)
#==============================================================================
def _split_expr_over_interface(expr, interface, tests=None, trials=None):
"""
Splits an expression defined on an interface, into
expressions where the test and trial functions are defined on each side of
the interface.
Parameters:
expr: sympde expression
interface: interface of a connectivity
tests: tests functions as given from linear or bilinear forms
trials: trials functions as given from linear or bilinear forms
Returns: sympde expression
"""
# ...
is_bilinear = not( trials is None ) and not( tests is None )
is_linear = ( trials is None ) and not( tests is None )
if trials is None: trials = []
if tests is None: tests = []
# ...
int_expressions = OrderedDict()
bnd_expressions = OrderedDict()
# ...
# we replace all jumps
jumps = expr.atoms(Jump)
args = [j._args[0] for j in jumps]
for a in args:
expr = expr.subs({jump(a): minus(a) - plus(a)})
# ...
# ...
d_trials = OrderedDict()
for u in trials:
u_minus = minus(u)
u_plus = plus(u)
d_trials[u] = {'-': u_minus, '+': u_plus}
# # TODO add sub for avg
# expr = expr.subs({jump(u): u_minus - u_plus})
d_tests = OrderedDict()
for v in tests:
v_minus = minus(v)
v_plus = plus(v)
d_tests[v] = {'-': v_minus, '+': v_plus}
# # TODO add sub for avg
# expr = expr.subs({jump(v): v_minus - v_plus})
# ...
# ...
trials = []
for u in d_trials.keys():
u_minus = d_trials[u]['-']
u_plus = d_trials[u]['+']
trials += [u_minus, u_plus]
tests = []
for u in d_tests.keys():
u_minus = d_tests[u]['-']
u_plus = d_tests[u]['+']
tests += [u_minus, u_plus]
# ...
# ...
def _nullify(expr, u, us):
"""nullifies all symbols in us except u."""
others = list(set(us) - set([u]))
for other in others:
expr = expr.subs({other: 0})
return expr
# ...
if is_bilinear:
for u in d_trials.keys():
for v in d_tests.keys():
u_minus = d_trials[u]['-']
u_plus = d_trials[u]['+']
v_minus = d_tests[v]['-']
v_plus = d_tests[v]['+']
# ...
newexpr = _nullify(expr, u_minus, trials)
newexpr = _nullify(newexpr, v_minus, tests)
newexpr = newexpr.subs({u_minus: u, v_minus: v})
mapping = newexpr.atoms(InterfaceMapping)
if mapping and not is_zero(newexpr):
mapping = list(mapping)[0]
newexpr = newexpr.subs(mapping, mapping.minus)
if not is_zero(newexpr):
if interface.minus in bnd_expressions:
newexpr += bnd_expressions[interface.minus]
bnd_expressions[interface.minus] = newexpr.subs(interface, interface.minus)
# ...
newexpr = _nullify(expr, u_plus, trials)
newexpr = _nullify(newexpr, v_plus, tests)
newexpr = newexpr.subs({u_plus: u, v_plus: v})
mapping = newexpr.atoms(InterfaceMapping)
if mapping and not is_zero(newexpr):
mapping = list(mapping)[0]
newexpr = newexpr.subs(mapping, mapping.plus)
for nn in newexpr.atoms(NormalVector):
newexpr = newexpr.subs(nn, -nn)
if not is_zero(newexpr):
if interface.plus in bnd_expressions:
newexpr += bnd_expressions[interface.plus]
bnd_expressions[interface.plus] = newexpr.subs(interface, interface.plus)
# ...
# TODO must call InterfaceExpression afterward
newexpr = _nullify(expr, u_minus, trials)
newexpr = _nullify(newexpr, v_plus, tests)
mapping = newexpr.atoms(InterfaceMapping)
if mapping:
mapping = list(mapping)[0]
for det in newexpr.atoms(SymbolicDeterminant):
if det.atoms(InterfaceMapping):
newdet = det.subs(mapping, mapping.minus)
newexpr = newexpr.subs(det, newdet)
if not is_zero(newexpr):
if isinstance(u, IndexedVectorFunction):
u_minus = minus(u.base)
if isinstance(v, IndexedVectorFunction):
v_plus = plus(v.base)
if (u_minus, v_plus) in int_expressions:
newexpr += int_expressions[u_minus, v_plus].expr
int_expressions[u_minus, v_plus] = InterfaceExpression(interface, u_minus, v_plus, newexpr)
# ...
# TODO must call InterfaceExpression afterward
newexpr = _nullify(expr, u_plus, trials)
newexpr = _nullify(newexpr, v_minus, tests)
mapping = newexpr.atoms(InterfaceMapping)
if mapping:
mapping = list(mapping)[0]
for det in newexpr.atoms(SymbolicDeterminant):
if det.atoms(InterfaceMapping):
newdet = det.subs(mapping, mapping.minus)
newexpr = newexpr.subs(det, newdet)
if not is_zero(newexpr):
if isinstance(u, IndexedVectorFunction):
u_plus = plus(u.base)
if isinstance(v, IndexedVectorFunction):
v_minus = minus(v.base)
if (u_plus, v_minus) in | |
<reponame>Dan-Eli/FGP_Docs
import os
import sys
import codecs
import csv
import re
import collections
import argparse
import json
import ast
import traceback
import inflect
import operator
import Tkinter, Tkconstants, tkFileDialog
from openpyxl import *
from openpyxl.styles import *
from openpyxl.worksheet.write_only import WriteOnlyCell
#from methods import xl_methods as xl
home_folder = os.path.abspath(os.path.join(__file__, "..\\..\\.."))
scripts_folder = os.path.join(os.sep, home_folder, 'scripts')
sys.path.append(scripts_folder)
from common import shared
from common import spreadsheet as sh
def get_groups(in_csv):
srch_csv = open(in_csv, mode='rb')
srch_lines = srch_csv.readlines()
groups = []
for line in srch_lines[1:]:
line_parse = line.split(',')
group = line_parse[0]
group = group.strip()
if not group == '':
groups.append(group)
groups = list(set(groups))
return groups
def get_word_list(words, juris, omit=False):
# Create the inflect engine to get plurals
p = inflect.engine()
# First, remove any blanks in the list
words = [word for word in words if not word.strip() == '']
if omit:
singular_words = [word.strip()[1:] for word in words if word[0] == '-']
else:
singular_words = [word.strip() for word in words if not word[0] == '-']
#print singular_words
if not juris == 'Quebec':
plural_words = [p.plural(word) for word in singular_words]
srch_words = singular_words + plural_words
else:
srch_words = singular_words
return srch_words
def get_search_words(juris, group='gb3', kword_fn=None):
# Get the list of search words from a CSV file
if kword_fn is not None:
srch_csv = codecs.open(kword_fn, encoding='utf-8', mode='rb')
else:
srch_csv = open('files\\filter_lists.csv', mode='rb')
srch_lines = srch_csv.readlines()
srch_words = collections.OrderedDict()
for line in srch_lines[1:]:
# Each line in the filter list CSV file contains:
# - first column is the group name
# - second column is the theme name
# - last column contains the keywords
# Ex: GB3,Imagery,[imagery,uav,drone,ortho]
line = line.strip()
if line == '': continue
# Parse the square brackets
quotes = []
start_pos = line.find('"')
while start_pos > -1:
end_pos = line.find('"', start_pos+1)
quote = line[start_pos+1:end_pos]
quotes.append(quote)
start_pos = line.find('"', end_pos+1)
if len(quotes) == 0: continue
# Split and strip keywords
keywords = []
for k in quotes:
out_k = k.split(',')
out_k = [k.strip() for k in out_k]
keywords.append(out_k)
first_quote = line.find('"')
vals = line[:first_quote].split(',')
if len(vals) > 2:
group_val = vals[0]
theme = vals[1]
else:
group_val = vals[0]
theme = group_val
print "group: %s" % group
print "group_val: %s" % group_val
if not group == 'all':
if group_val.lower().find(group.lower()) > -1:
srch_words[theme] = keywords
else:
srch_words[theme] = keywords
print "srch_words: %s" % srch_words
return srch_words
def search_dataset(row, idx, words, place=None):
out_lines = []
found_idx = []
# Get the title and description of the current row
title_str = row['Title'].lower().replace('_', ' ')
desc_str = row['Description'].lower()
# Get a list of keywords
keywords = row['Keywords']
keywords = keywords.lower()
#keywords = k_str.split(',')
#keywords = [k.lower().strip() for k in keywords]
# If place name provided, check in description
if place is not None and not place == '':
if desc_str.lower().find(place.lower()) == -1:
return None
# If words contains for than 1 entry (contains a second set of keywords)
if len(words) > 1:
filter2 = words[1]
if search_text(keywords, filter2) is None and \
search_text(title_str, filter2) is None and \
search_text(desc_str, filter2) is None:
return None
words = words[0]
found = False
key_srch = search_text(keywords, words)
#print "\nkey_srch: %s" % key_srch
if key_srch is None:
title_srch = search_text(title_str, words)
if title_srch is None:
#print "title_srch: %s" % title_srch
desc_srch = search_text(desc_str, words)
if desc_srch is not None:
#print "desc_srch: %s" % desc_srch
found_word = desc_srch
found_in = 'desc'
found = True
else:
found_word = title_srch
found_in = 'title'
found = True
else:
found_word = key_srch
found_in = 'keywords'
found = True
#print "Found: %s" % found
# else:
# found_word = wrd
# found_in = 'keywords'
# found = True
# elif search_text(title_str, words):
# found_in = 'title'
# found = True
# elif search_text(desc_str, words):
# found_in = 'desc'
# found = True
if found:
# Add the line to the filtered list
out_lines.append((found_in, found_word, row))
# Add the line index to the found_idx list
found_idx.append(idx)
return (out_lines, found_idx)
def search_text(text, words):
#print "\nText: %s" % text
#print "Words: %s" % words
for wrd in words:
res = re.compile(r'\b' + wrd + r'\b').search(text)
if res is not None:
return wrd
#answer = raw_input("Press enter...")
#answer = raw_input("Press enter...")
#return any(re.compile(r'\b' + kyword + r'\b').search(text) for kyword in words)
def run(juris, xl_fn, group, place='', keywords=None):
juris = juris.replace(' ', '_')
os.system("title Analysis Filter - %s" % juris)
# Open merged spreadsheet
#print "#1"
merged_xl = sh.PT_XL(fn=xl_fn, read_only=True)
#print "#2"
merged_xl.set_workbook()
merged_xl.set_worksheet('Merged Datasets')
in_rows = merged_xl.get_dictrows('values')
header_txts = in_rows[0].keys()
# Get a list of search words from the filter CSV file
srch_words = get_search_words(juris, group, keywords)
if not srch_words:
print "No search words for that group."
merged_xl.close_workbook()
return None
out_lines = collections.OrderedDict()
found_idx = []
# Create the inflect engine to get plurals
p = inflect.engine()
for theme, words in srch_words.items():
# Go through each list of words
print words
#answer = raw_input("Press enter...")
filtered_lines = []
for idx, row in enumerate(in_rows):
msg = "Filtering %s of %s lines for search words" % \
(idx + 1, len(in_rows))
shared.print_oneliner(msg)
# Go through each line in the merged CSV
if row['Source'] == 'err_log.csv': continue
#print "k: %s" % k
#row['Layer'] = k
#row['P/T'] = shared.get_pt_abbreviation(juris)
title_str = row['Title'].lower().replace('_', ' ')
desc_str = row['Description'].lower()
# Get a list of keywords
k_str = row['Keywords']
#keywords = k_str.split(',')
#keywords = [k.lower().strip() for k in keywords]
srch_res = search_dataset(row, idx, words, place)
if srch_res is not None:
#print srch_res
#answer = raw_input("Press enter...")
cur_lines, cur_found = srch_res
filtered_lines += cur_lines
found_idx += cur_found
# if place is None or place == '':
# # If no place name is provided
# if len(words) > 1:
# # If there is a 2nd filter, use it to narrow searches
# filter2 = words[1]
# if search_text(title_str, filter2) or \
# search_text(desc_str, filter2):
# # print
# # print
# # print title_str
# # print
# # print desc_str
# # answer = raw_input("Press enter...")
# cur_lines, cur_found = search_dataset(row, idx, words[0])
# filtered_lines += cur_lines
# found_idx += cur_found
# else:
# cur_lines, cur_found = search_dataset(row, idx, words[0])
# filtered_lines += cur_lines
# found_idx += cur_found
# else:
# if desc_str.lower().find(place.lower()) > -1:
# if len(words) > 1:
# filter2 = words[1]
# if search_text(title_str, filter2) or \
# search_text(desc_str, filter2):
# cur_lines, cur_found = search_dataset(row, idx, \
# words[0])
# filtered_lines += cur_lines
# found_idx += cur_found
# else:
# cur_lines, cur_found = search_dataset(row, idx, words[0])
# filtered_lines += cur_lines
# found_idx += cur_found
print
# Add the filtered lines to the out_lines with the current theme as key
out_lines[theme] = filtered_lines
#print "Out lines: %s" % out_lines[k]
#answer = raw_input("Press enter...")
out_f = codecs.open('out_tmp.txt', encoding='utf-8', mode='w')
# Set the widths of each column in the output
# header_info = [('Layer', 24), ('P/T', 6), ('Source', 50),
# ('Title', 100), ('Description', 100),
# ('Type', 20), ('Start Date', 20), ('Recent Date', 20),
# ('Update Frequency', 35), ('Publisher', 60),
# ('Licensing', 50), ('Available Formats', 50),
# ('Access', 32), ('Download', 25), ('Spatial Reference', 50),
# ('Data URL', 70), ('Web Page URL', 70),
# ('Web Map URL', 70), ('Service', 25),
# ('Service Name', 100), ('Service URL', 70),
# ('Metadata URL', 70), ('Metadata Type', 65),
# ('Notes', 100)]
header_info = sh.get_header_info('analysis')['xl']
# for k, v in out_lines.items():
# print "%s: %s" % (k, v)
# answer = raw_input("Press enter...")
# Sort lines by title or description
sort_dict = collections.OrderedDict()
for theme, lines in out_lines.items():
for l in lines:
category = l[0]
srch_wrd = l[1]
sort_line = l[2]
# print "Category: %s" % category
# print "Search word: %s" % srch_wrd
# print "Sort line: %s" % sort_line
# answer = raw_input("Press enter...")
sort_line['Word Found'] = srch_wrd
sort_line['Found In'] = category.title()
sort_line['Layer'] = theme.title()
sort_line['P/T'] = shared.get_pt_abbreviation(juris)
if category in sort_dict:
prev_lst = sort_dict[category]
else:
prev_lst = []
prev_lst.append(sort_line)
sort_dict[category] = prev_lst
try:
# Open the output CSV file for editing
#print "keywords: %s" % keywords
ky_word_fn = os.path.basename(keywords)
#print "keywords basename: %s" % ky_word_fn
if ky_word_fn.find('CE') > -1:
out_abbr = 'CE_'
elif ky_word_fn.find('GB') > -1:
out_abbr = 'GB3_'
else:
out_abbr = ''
if place == '' or place is None:
outxl_fn = "results\\%s\\%s_%sresults.xlsx" % (juris, juris, out_abbr)
else:
outxl_fn = "results\\%s\\%s_%s%sresults.xlsx" % (juris, juris, \
place, out_abbr)
#print "outxl_fn: %s" % outxl_fn
#answer = raw_input("Press enter...")
# Create the Excel file
out_xl = sh.PT_XL(fn=outxl_fn, replace_ws=True)
# Create the worksheet with the group as name
group_wsname = '%s' % group.title()
group_ws = out_xl.add_worksheet(group_wsname, header_info) #, \
#'Found in Titles & Keywords')
# Get the list of title items and sort it by 'Layer'
#print sort_dict.keys()
#answer = raw_input("Press enter...")
title_lst = []
if 'title' in sort_dict.keys():
title_lst = sort_dict['title']
keywrd_lst = []
if 'keywords' in sort_dict.keys():
keywrd_lst = sort_dict['keywords']
join_lst = title_lst + keywrd_lst
join_lst.sort(key=operator.itemgetter('Layer'))
unique_titles = shared.remove_duplicates(join_lst)
# Go through each title line and add it to the Excel sheet
for idx, line in enumerate(unique_titles):
msg = "Saving %s of %s lines to '%s'" % \
(idx + 1, len(unique_titles), outxl_fn)
shared.print_oneliner(msg)
# Convert the current line dictionary to cells
for k, v in line.items():
#print "%s: %s" % (k, v)
#answer = raw_input("Press enter...")
out_xl.add_cell(v, k)
out_xl.write_row()
print
# Get the list of description items and sort it by 'Layer'
desc_lst = []
if 'desc' in sort_dict.keys():
desc_lst = sort_dict['desc']
desc_lst.sort(key=operator.itemgetter('Layer'))
unique_desc = shared.remove_duplicates(desc_lst)
# Go through each title line and add it to the Excel sheet
for idx, line in enumerate(unique_desc):
msg = "Saving %s of %s lines to '%s'" % \
(idx + 1, len(unique_desc), outxl_fn)
shared.print_oneliner(msg)
# Convert the current line dictionary to cells
for k, v in line.items():
#print "%s: %s" % (k, v)
#answer = raw_input("Press enter...")
out_xl.add_cell(v, k)
out_xl.write_row()
print
# Add a keywords table as well
out_xl.write_row()
out_xl.add_title('Keywords')
for theme, words in srch_words.items():
value = '%s: %s' % (theme, ', '.join(words[0]))
out_xl.add_cell(value, 0)
out_xl.write_row()
# save the file
out_xl.save_file()
merged_xl.close_workbook()
except Exception, e:
print e
traceback.print_exc(file=sys.stdout)
#out_f.write(unicode(in_line))
#out_f.write('\n')
answer = raw_input("Press enter...")
####################################################################
# Create an XLSX file with unknown classifications
# other_csv_fn = "results\\%s\\_%s_%s_other_results.xlsx" % (juris, \
# juris, group.upper())
# # | |
(parent.board.max_filesize * 1000):
error_message = "File size is " + str(reply.image.size - (parent.board.max_filesize *1000)) + " bytes larger than the maximum of " + str(parent.board.max_filesize) + " kilobytes."
return render_to_response('imageboard/post.html', {'board_list':board_list, 'form':form, 'delform':DelPostForm, 'error_message':error_message, 'post':parent,})
# Enforce image dimensions
if not parent.board.min_image_dimension < reply.image.width < parent.board.max_image_dimension or not parent.board.min_image_dimension < reply.image.height < parent.board.max_image_dimension:
error_message = "Images must be at least " + str(parent.board.min_image_dimension) + "x" + str(parent.board.min_image_dimension) + " pixels and no more than "
error_message += str(parent.board.max_image_dimension) + "x" + str(parent.board.max_image_dimension) + " pixels"
return render_to_response('imageboard/post.html', {'board_list':board_list, 'form':form, 'delform':DelPostForm, 'error_message':error_message, 'post':parent,})
# Generate the thumbnail right before saving.
_gen_thumbnail(reply, form.cleaned_data['image'])
# Save the original filename. Maybe later we can make it so that files are downloaded with this name
reply.file_name = form.cleaned_data['image'].name
new_poster.save() # save the last_post_time for the Poster
reply.save() # save the reply
# where to go after replying: the root, or the post being replied to
if parent.root:
return_to = parent.root.id
else:
return_to = parent.id
return HttpResponseRedirect(reverse('sukiyaki.imageboard.views.view_post', args=(return_to,))) # go back to the root (OP)
else: # invalid form
return render_to_response('imageboard/post.html', {'post':parent, 'form':form, 'delform':DelPostForm, 'board_list':board_list,})
def _test_reply_limit(parent, form):
"""Check if the reply_limit has been reached for this thread, if so, lock the thread."""
if parent.root:
count = parent.root.count_replies()
if count + 1 == parent.board.reply_limit_per_thread > 0:
parent.locked = True
parent.root.locked = True
elif count > parent.board.reply_limit_per_thread > 0:
return render_to_response('imageboard/post.html', {'post':parent.root, 'form':form, 'delform':DelPostForm, 'board_list':board_list, 'error_message':"Thread full."})
else:
count = parent.count_replies()
if count + 1 == parent.board.reply_limit_per_thread > 0:
parent.locked = True
elif count > parent.board.reply_limit_per_thread > 0:
return render_to_response('imageboard/post.html', {'post':parent, 'form':form, 'delform':DelPostForm, 'board_list':board_list, 'error_message':"Thread full."})
def _txtpost(request, board):
form = TextPostForm(request.POST)
if form.is_valid():
# get the IP address of the Poster, and store it if new, or stop if banned
remote_addr = request.META['REMOTE_ADDR']
new_poster, poster_created = Poster.objects.get_or_create(ipaddr=remote_addr)
if not poster_created and new_poster.banned: # if the Poster is not new, and is banned
if new_poster.ban_end < datetime.datetime.now(): # Need to check if a user's ban has expired. If so, remove their ban, and continue
new_poster.banned = False # unban the Poster
else: # they are still banned
return render_to_response('imageboard/banned.html', {'board_list':board_list, 'board':board, 'poster':new_poster} ) # Banned poster, do not post
elif not poster_created and board.time_between_posts > 0 and datetime.datetime.now() - new_poster.last_post_time < datetime.timedelta(seconds=board.time_between_posts): # The Poster is not new, and has posted less than time_between_posts seconds ago
error_message = "Flood detected! Please wait to post."
posts = board.textpost_set.filter(reply=None).order_by('sticky', 'rank', 'post_time')
return render_to_response('imageboard/board.html', {'board_list':board_list, 'board':board, 'form':form, 'error_message':error_message, 'posts':posts,})
#check if the thread limit for this board has been reached, if so, delete the lowest ranked thread, and continue
if TextPost.objects.filter(reply=None, board=board.id, sticky=False).count() >= board.max_threads > 0:
p = TextPost.objects.filter(reply=None, board=board.id, sticky=False).order_by('rank', 'post_time')[0]
p.delete()
tempPost = form.save(commit=False) # get the Post object
tempPost.board = board # set the board this post belongs to
tempPost.poster = new_poster # set the poster this post belongs to
tempPost.rank = board.highest_rank() # set the rank of this post to the highest rank
tempPost.reply = None # Since this is a top-level post, it is a reply to nothing
tempPost.password = hashlib.sha512(SALT + tempPost.password).hexdigest() # save the password as a hash
new_poster.save() # save the last_post_time for the Poster
tempPost.save() # save the object to the databse finally
return HttpResponseRedirect(reverse('sukiyaki.imageboard.views.view_text_post', args=(tempPost.id,)))
else: # if the form is invalid
return render_to_response('imageboard/board.html', {'form':form, 'delform':DelPostForm, 'board_list':board_list, 'board':board}) # {'form':form, 'delform':DelPostForm, 'board_list':board_list,}
def _txtreply(request, parent):
form = TextReplyForm(request.POST)
if form.is_valid():
# get the iP address of the poster, store it, or stop if banned
remote_addr = request.META['REMOTE_ADDR']
new_poster, poster_created = Poster.objects.get_or_create(ipaddr=remote_addr)
if not poster_created and new_poster.banned:
if new_poster.ban_end < datetime.datetime.now(): # Need to check if a user's ban has expired. If so, remove their ban, and continue
new_poster.banned = False
else: # if they are still banned
return render_to_response('imageboard/banned.html', {'board_list':board_list,'poster':new_poster})
elif not poster_created and parent.board.time_between_posts > 0 and datetime.datetime.now() - new_poster.last_post_time < datetime.timedelta(seconds=parent.board.time_between_posts): # The Poster is not new, and has posted less than time_between_posts seconds ago
return render_to_response('imageboard/post.html', {'post':parent, 'form':form, 'delform':DelPostForm, 'board_list':board_list, 'error_message':"Flood detected! Please wait to reply."})
# Test if the reply limit for the thread has been reached.
reply_limit = _test_reply_limit(parent, form)
if reply_limit: # If the reply limit has been reached, return.
return reply_limit
reply = form.save(commit=False) # get the reply
reply.level = parent.level + 1 # one level deeper than the parent
if reply.level >= MAX_DEPTH: # If the maximum reply depth has been reached, lock the post from replies.
reply.locked = True
reply.board = parent.board # same board as parent
reply.poster = new_poster # set the poster
reply.reply = parent # set the parent
# update the reference to the root (OP) of the thread and increment ranks
_increment_rank(parent, reply)
reply.rank = 0
reply.password = hashlib.sha512(SALT + reply.password).hexdigest() # save the password as a hash
new_poster.save() # save the last_post_time for the Poster
reply.save() # save the reply
# where to go after replying: the root, or the post being replied to
if parent.root:
return_to = parent.root.id
else:
return_to = parent.id
return HttpResponseRedirect(reverse('sukiyaki.imageboard.views.view_text_post', args=(return_to,))) # go back to the root (OP)
else: # invalid form
return render_to_response('imageboard/post.html', {'post':parent, 'form':form, 'delform':DelPostForm, 'board_list':board_list,})
# View functions
def board_index(request):
return render_to_response('imageboard/index.html', {'board_list':board_list,})
def view_board(request, board_abbr):
b = get_object_or_404(Board, abbr=board_abbr)
# If time limits are placed on threads, either delete them here, or put some kind of warning of impending deletion
# We're going to delete them with no warning.
if b.max_thread_age > 0:
# cover cases for each type of post, depending on board.
if b.files and not b.images: # Files
cutoff = datetime.datetime.now() - datetime.timedelta(seconds=(b.max_thread_age*60))
#posts = b.filepost_set.filter(reply=None)
elif b.images and not b.files: # Images
cutoff = datetime.datetime.now() - datetime.timedelta(seconds=(b.max_thread_age*60))
b.imagepost_set.filter(reply=None, sticky=False, post_time__lte=cutoff).delete()
elif not b.images and not b.files: #Text
cutoff = datetime.datetime.now() - datetime.timedelta(seconds=(b.max_thread_age*60))
# Which kind of form and posts do we display?
# Later on, we can also decide which template to show depending on the board type
if b.files and not b.images:
posts = b.filepost_set.filter(reply=None).order_by('sticky', 'rank', 'post_time').reverse()
# form = filepostform
elif b.images and not b.files:
posts = b.imagepost_set.filter(reply=None).order_by('sticky', 'rank', 'post_time').reverse()
form = ImgPostForm
elif not b.images and not b.files:
posts = b.textpost_set.filter(reply=None).order_by('sticky', 'rank', 'post_time').reverse()
form = TxtPostForm
paginator = Paginator(posts, b.threads_per_page)
# Ensure that the page GET is an int. Otherwise, default to page 1.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
posts_page = paginator.page(page)
except (EmptyPage, InvalidPage):
posts_page = paginator.page(paginator.num_pages)
# for now, we just use the same template, because it's sort of generic.
return render_to_response('imageboard/board.html', {'board':b, 'posts':posts_page, 'board_list':board_list, 'form':form,})
def view_post(request, post_id):
p = get_object_or_404(ImagePost, pk=post_id)
form = ImgReplyForm
return render_to_response('imageboard/post.html', {'post':p, 'board_list':board_list, 'form':form, 'delform':DelPostForm})
def view_text_post(request, text_id):
p = get_object_or_404(TextPost, pk=text_id)
form = TxtReplyForm
return render_to_response('imageboard/post.html', {'post':p, 'board_list':board_list, 'form':form, 'delform':DelPostForm})
def post(request, board_abbr):
board = get_object_or_404(Board, abbr=board_abbr)
if request.method == 'POST':
# Test board type here and jump to appropriate validation
if board.images and not board.files: # ImagePost
return _imgpost(request, board)
elif not board.images and not board.files: # TextPost
return _txtpost(request, board)
else: # not POST
if board.images and not board.files:
form = ImgPostForm
elif not board.images and not board.files:
form = TxtPostForm
return render_to_response('imageboard/board.html', {'form':form, 'delform':DelPostForm, 'board_list':board_list,})
def reply(request, reply_to):
parent = get_object_or_404(ImagePost, pk=reply_to)
form = ImgReplyForm
# Detect locked threads/posts
if parent.root:
if parent.root.locked: # if thread is locked
return render_to_response('imageboard/post.html', {'post':parent, 'form':form, 'delform':DelPostForm, 'board_list':board_list,'error_message':"Post is locked: No replies allowed"}) # Fix the dictionary
else:
if parent.locked: # if replies aren't allowed on the parent
return render_to_response('imageboard/post.html', {'post':parent, 'form':form, 'delform':DelPostForm, 'board_list':board_list,'error_message':"Post is locked: No replies allowed"}) # Fix the dictionary
# If this is a POST request
if request.method == 'POST':
return _imgreply(request, parent)
else: # not POST
return render_to_response('imageboard/post.html', {'post':parent, | |
np.ma.masked_array(vis_to_avg, mask=mask)
nsample_to_avg = np.ma.masked_array(nsample_to_avg, mask=mask)
avg_vis = np.ma.average(
vis_to_avg, weights=nsample_to_avg, axis=0
)
avg_nsample = np.sum(nsample_to_avg, axis=0)
avg_flag = np.all(flags_to_avg, axis=0)
temp_data_array[this_obj_ind] = avg_vis
temp_nsample_array[this_obj_ind] = avg_nsample
temp_flag_array[this_obj_ind] = avg_flag
if inplace:
self.select(bls=bl_ants, keep_all_metadata=keep_all_metadata)
if not self.metadata_only:
self.data_array = temp_data_array
self.nsample_array = temp_nsample_array
self.flag_array = temp_flag_array
self.check()
return
else:
if not self.metadata_only:
new_obj.data_array = temp_data_array
new_obj.nsample_array = temp_nsample_array
new_obj.flag_array = temp_flag_array
new_obj.check()
return new_obj
else:
return self.select(
bls=bl_ants, inplace=inplace, keep_all_metadata=keep_all_metadata
)
def inflate_by_redundancy(self, tol=1.0, blt_order="time", blt_minor_order=None):
"""
Expand data to full size, copying data among redundant baselines.
Note that this method conjugates baselines to the 'u>0' convention in order
to inflate the redundancies.
Parameters
----------
tol : float
Redundancy tolerance in meters, default is 1.0 corresponding to 1 meter.
blt_order : str
string specifying primary order along the blt axis (see `reorder_blts`)
blt_minor_order : str
string specifying minor order along the blt axis (see `reorder_blts`)
"""
self.conjugate_bls(convention="u>0")
red_gps, centers, lengths = self.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
# Stack redundant groups into one array.
group_index, bl_array_full = zip(
*[(i, bl) for i, gp in enumerate(red_gps) for bl in gp]
)
# TODO should be an assert that each baseline only ends up in one group
# Map group index to blt indices in the compressed array.
bl_array_comp = self.baseline_array
uniq_bl = np.unique(bl_array_comp)
group_blti = {}
Nblts_full = 0
for i, gp in enumerate(red_gps):
for bl in gp:
# First baseline in the group that is also in the compressed
# baseline array.
if bl in uniq_bl:
group_blti[i] = np.where(bl == bl_array_comp)[0]
# add number of blts for this group
Nblts_full += group_blti[i].size * len(gp)
break
blt_map = np.zeros(Nblts_full, dtype=int)
full_baselines = np.zeros(Nblts_full, dtype=int)
missing = []
counter = 0
for bl, gi in zip(bl_array_full, group_index):
try:
# this makes the time the fastest axis
blt_map[counter : counter + group_blti[gi].size] = group_blti[gi]
full_baselines[counter : counter + group_blti[gi].size] = bl
counter += group_blti[gi].size
except KeyError:
missing.append(bl)
pass
if np.any(missing):
warnings.warn("Missing some redundant groups. Filling in available data.")
# blt_map is an index array mapping compressed blti indices to uncompressed
self.data_array = self.data_array[blt_map, ...]
self.nsample_array = self.nsample_array[blt_map, ...]
self.flag_array = self.flag_array[blt_map, ...]
self.time_array = self.time_array[blt_map]
self.lst_array = self.lst_array[blt_map]
self.integration_time = self.integration_time[blt_map]
self.uvw_array = self.uvw_array[blt_map, ...]
self.baseline_array = full_baselines
self.ant_1_array, self.ant_2_array = self.baseline_to_antnums(
self.baseline_array
)
self.Nants_data = self._calc_nants_data()
self.Nbls = np.unique(self.baseline_array).size
self.Nblts = Nblts_full
if self.phase_center_app_ra is not None:
self.phase_center_app_ra = self.phase_center_app_ra[blt_map]
if self.phase_center_app_dec is not None:
self.phase_center_app_dec = self.phase_center_app_dec[blt_map]
if self.phase_center_frame_pa is not None:
self.phase_center_frame_pa = self.phase_center_frame_pa[blt_map]
if self.multi_phase_center:
self.phase_center_id_array = self.phase_center_id_array[blt_map]
self.reorder_blts(order=blt_order, minor_order=blt_minor_order)
self.check()
def _convert_from_filetype(self, other):
"""
Convert from a file-type specific object to a UVData object.
Used in reads.
Parameters
----------
other : object that inherits from UVData
File type specific object to convert to UVData
"""
for p in other:
param = getattr(other, p)
setattr(self, p, param)
def _convert_to_filetype(self, filetype):
"""
Convert from a UVData object to a file-type specific object.
Used in writes.
Parameters
----------
filetype : str
Specifies what file type object to convert to. Options are: 'uvfits',
'fhd', 'miriad', 'uvh5', 'mir', 'ms'
Raises
------
ValueError
if filetype is not a known type
"""
if filetype == "uvfits":
from . import uvfits
other_obj = uvfits.UVFITS()
elif filetype == "fhd":
from . import fhd
other_obj = fhd.FHD()
elif filetype == "miriad":
from . import miriad
other_obj = miriad.Miriad()
elif filetype == "uvh5":
from . import uvh5
other_obj = uvh5.UVH5()
elif filetype == "mir":
from . import mir
other_obj = mir.Mir()
elif filetype == "ms":
from . import ms
other_obj = ms.MS()
else:
raise ValueError("filetype must be uvfits, mir, miriad, ms, fhd, or uvh5")
for p in self:
param = getattr(self, p)
setattr(other_obj, p, param)
return other_obj
def read_fhd(
self,
filelist,
use_model=False,
axis=None,
read_data=True,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Read in data from a list of FHD files.
Parameters
----------
filelist : array_like of str
The list/array of FHD save files to read from. Must include at
least one polarization file, a params file, a layout file and a flag file.
An obs file is also required if `read_data` is False.
use_model : bool
Option to read in the model visibilities rather than the dirty
visibilities (the default is False, meaning the dirty visibilities
will be read).
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple data sets are passed.
read_data : bool
Read in the visibility, nsample and flag data. If set to False, only
the metadata will be read in. Setting read_data to False results in
a metadata only object. If read_data is False, an obs file must be
included in the filelist.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If required files are missing or multiple files for any polarization
are included in filelist.
If there is no recognized key for visibility weights in the flags_file.
"""
from . import fhd
if isinstance(filelist[0], (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
fhd_obj = fhd.FHD()
fhd_obj.read_fhd(
filelist,
use_model=use_model,
background_lsts=background_lsts,
read_data=read_data,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
self._convert_from_filetype(fhd_obj)
del fhd_obj
def read_mir(
self,
filepath,
isource=None,
irec=None,
isb=None,
corrchunk=None,
pseudo_cont=False,
):
"""
Read in data from an SMA MIR file.
Note that with the exception of filepath, the reset of the parameters are
used to sub-select a range of data that matches the limitations of the current
instantiation of pyuvdata -- namely 1 spectral window, 1 source. These could
be dropped in the future, as pyuvdata capabilities grow.
Parameters
----------
filepath : str
The file path to the MIR folder to read from.
isource : int
Source code for MIR dataset
irec : int
Receiver code for MIR dataset
isb : int
Sideband code for MIR dataset
corrchunk : int
Correlator chunk code for MIR dataset
pseudo_cont : boolean
Read in only pseudo-continuuum values. Default is false.
"""
from . import mir
mir_obj = mir.Mir()
mir_obj.read_mir(
filepath,
isource=isource,
irec=irec,
isb=isb,
corrchunk=corrchunk,
pseudo_cont=pseudo_cont,
)
self._convert_from_filetype(mir_obj)
del mir_obj
def read_miriad(
self,
filepath,
axis=None,
antenna_nums=None,
ant_str=None,
bls=None,
polarizations=None,
time_range=None,
read_data=True,
phase_type=None,
correct_lat_lon=True,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
calc_lst=True,
fix_old_proj=False,
fix_use_ant_pos=True,
):
"""
Read in data from a miriad file.
Parameters
----------
filepath : str
The miriad root directory to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
antenna_nums : array_like of int, optional
The antennas numbers to read into the object.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when | |
'SingleMuon'),
PhysicsParkingScoutingMonitor = cms.vstring('ParkingScoutingMonitor'),
RPCMON = cms.vstring('RPCMonitor'),
ScoutingCalo = cms.vstring('ScoutingCaloCommissioning',
'ScoutingCaloHT'),
ScoutingPF = cms.vstring('ScoutingPFCommissioning',
'ScoutingPFHT',
'ScoutingPFMuons')
)
process.transferSystem = cms.PSet(
default = cms.PSet(
default = cms.vstring('Tier0'),
emulator = cms.vstring('Lustre'),
streamLookArea = cms.PSet(
),
test = cms.vstring('Lustre')
),
destinations = cms.vstring('Tier0',
'DQM',
'ECAL',
'EventDisplay',
'Lustre',
'None'),
streamA = cms.PSet(
default = cms.vstring('Tier0'),
emulator = cms.vstring('Lustre'),
test = cms.vstring('Lustre')
),
streamCalibration = cms.PSet(
default = cms.vstring('Tier0'),
emulator = cms.vstring('None'),
test = cms.vstring('Lustre')
),
streamDQM = cms.PSet(
default = cms.vstring('DQM'),
emulator = cms.vstring('None'),
test = cms.vstring('DQM',
'Lustre')
),
streamDQMCalibration = cms.PSet(
default = cms.vstring('DQM'),
emulator = cms.vstring('None'),
test = cms.vstring('DQM',
'Lustre')
),
streamEcalCalibration = cms.PSet(
default = cms.vstring('ECAL'),
emulator = cms.vstring('None'),
test = cms.vstring('ECAL')
),
streamEventDisplay = cms.PSet(
default = cms.vstring('EventDisplay',
'Tier0'),
emulator = cms.vstring('None'),
test = cms.vstring('EventDisplay',
'Lustre')
),
streamExpressCosmics = cms.PSet(
default = cms.vstring('Tier0'),
emulator = cms.vstring('Lustre'),
test = cms.vstring('Lustre')
),
streamLookArea = cms.PSet(
default = cms.vstring('DQM'),
emulator = cms.vstring('None'),
test = cms.vstring('DQM',
'Lustre')
),
streamNanoDST = cms.PSet(
default = cms.vstring('Tier0'),
emulator = cms.vstring('None'),
test = cms.vstring('Lustre')
),
streamRPCMON = cms.PSet(
default = cms.vstring('Tier0'),
emulator = cms.vstring('None'),
test = cms.vstring('Lustre')
),
streamTrackerCalibration = cms.PSet(
default = cms.vstring('Tier0'),
emulator = cms.vstring('None'),
test = cms.vstring('Lustre')
),
transferModes = cms.vstring('default',
'test',
'emulator')
)
process.hltAK4CaloAbsoluteCorrector = cms.EDProducer("LXXXCorrectorProducer",
algorithm = cms.string('AK4CaloHLT'),
level = cms.string('L3Absolute')
)
process.hltAK4CaloCorrector = cms.EDProducer("ChainedJetCorrectorProducer",
correctors = cms.VInputTag("hltAK4CaloFastJetCorrector", "hltAK4CaloRelativeCorrector", "hltAK4CaloAbsoluteCorrector", "hltAK4CaloResidualCorrector")
)
process.hltAK4CaloFastJetCorrector = cms.EDProducer("L1FastjetCorrectorProducer",
algorithm = cms.string('AK4CaloHLT'),
level = cms.string('L1FastJet'),
srcRho = cms.InputTag("hltFixedGridRhoFastjetAllCalo")
)
process.hltAK4CaloJets = cms.EDProducer("FastjetJetProducer",
Active_Area_Repeats = cms.int32(5),
DxyTrVtxMax = cms.double(0.0),
DzTrVtxMax = cms.double(0.0),
GhostArea = cms.double(0.01),
Ghost_EtaMax = cms.double(6.0),
MaxVtxZ = cms.double(15.0),
MinVtxNdof = cms.int32(5),
Rho_EtaMax = cms.double(4.4),
UseOnlyOnePV = cms.bool(False),
UseOnlyVertexTracks = cms.bool(False),
dRMax = cms.double(-1.0),
dRMin = cms.double(-1.0),
doAreaDiskApprox = cms.bool(True),
doAreaFastjet = cms.bool(False),
doOutputJets = cms.bool(True),
doPUOffsetCorr = cms.bool(False),
doPVCorrection = cms.bool(False),
doRhoFastjet = cms.bool(False),
inputEMin = cms.double(0.0),
inputEtMin = cms.double(0.3),
jetAlgorithm = cms.string('AntiKt'),
jetPtMin = cms.double(1.0),
jetType = cms.string('CaloJet'),
maxBadEcalCells = cms.uint32(9999999),
maxBadHcalCells = cms.uint32(9999999),
maxDepth = cms.int32(-1),
maxProblematicEcalCells = cms.uint32(9999999),
maxProblematicHcalCells = cms.uint32(9999999),
maxRecoveredEcalCells = cms.uint32(9999999),
maxRecoveredHcalCells = cms.uint32(9999999),
minSeed = cms.uint32(14327),
muCut = cms.double(-1.0),
muMax = cms.double(-1.0),
muMin = cms.double(-1.0),
nFilt = cms.int32(-1),
nSigmaPU = cms.double(1.0),
puPtMin = cms.double(10.0),
rFilt = cms.double(-1.0),
rParam = cms.double(0.4),
radiusPU = cms.double(0.4),
rcut_factor = cms.double(-1.0),
src = cms.InputTag("hltTowerMakerForAll"),
srcPVs = cms.InputTag("NotUsed"),
subjetPtMin = cms.double(-1.0),
subtractorName = cms.string(''),
sumRecHits = cms.bool(False),
trimPtFracMin = cms.double(-1.0),
useCMSBoostedTauSeedingAlgorithm = cms.bool(False),
useDeterministicSeed = cms.bool(True),
useFiltering = cms.bool(False),
useMassDropTagger = cms.bool(False),
usePruning = cms.bool(False),
useTrimming = cms.bool(False),
voronoiRfact = cms.double(0.9),
yCut = cms.double(-1.0),
yMax = cms.double(-1.0),
yMin = cms.double(-1.0),
zcut = cms.double(-1.0)
)
process.hltAK4CaloJetsCorrected = cms.EDProducer("CorrectedCaloJetProducer",
correctors = cms.VInputTag("hltAK4CaloCorrector"),
src = cms.InputTag("hltAK4CaloJets")
)
process.hltAK4CaloJetsCorrectedIDPassed = cms.EDProducer("CorrectedCaloJetProducer",
correctors = cms.VInputTag("hltAK4CaloCorrector"),
src = cms.InputTag("hltAK4CaloJetsIDPassed")
)
process.hltAK4CaloJetsIDPassed = cms.EDProducer("HLTCaloJetIDProducer",
JetIDParams = cms.PSet(
ebRecHitsColl = cms.InputTag("hltEcalRecHit","EcalRecHitsEB"),
eeRecHitsColl = cms.InputTag("hltEcalRecHit","EcalRecHitsEE"),
hbheRecHitsColl = cms.InputTag("hltHbhereco"),
hfRecHitsColl = cms.InputTag("hltHfreco"),
hoRecHitsColl = cms.InputTag("hltHoreco"),
useRecHits = cms.bool(True)
),
jetsInput = cms.InputTag("hltAK4CaloJets"),
max_EMF = cms.double(999.0),
min_EMF = cms.double(1e-06),
min_N90 = cms.int32(-2),
min_N90hits = cms.int32(2)
)
process.hltAK4CaloJetsPF = cms.EDProducer("FastjetJetProducer",
Active_Area_Repeats = cms.int32(5),
DxyTrVtxMax = cms.double(0.0),
DzTrVtxMax = cms.double(0.0),
GhostArea = cms.double(0.01),
Ghost_EtaMax = cms.double(6.0),
MaxVtxZ = cms.double(15.0),
MinVtxNdof = cms.int32(5),
Rho_EtaMax = cms.double(4.4),
UseOnlyOnePV = cms.bool(False),
UseOnlyVertexTracks = cms.bool(False),
dRMax = cms.double(-1.0),
dRMin = cms.double(-1.0),
doAreaDiskApprox = cms.bool(False),
doAreaFastjet = cms.bool(False),
doOutputJets = cms.bool(True),
doPUOffsetCorr = cms.bool(False),
doPVCorrection = cms.bool(False),
doRhoFastjet = cms.bool(False),
inputEMin = cms.double(0.0),
inputEtMin = cms.double(0.3),
jetAlgorithm = cms.string('AntiKt'),
jetPtMin = cms.double(1.0),
jetType = cms.string('CaloJet'),
maxBadEcalCells = cms.uint32(9999999),
maxBadHcalCells = cms.uint32(9999999),
maxDepth = cms.int32(-1),
maxProblematicEcalCells = cms.uint32(9999999),
maxProblematicHcalCells = cms.uint32(9999999),
maxRecoveredEcalCells = cms.uint32(9999999),
maxRecoveredHcalCells = cms.uint32(9999999),
minSeed = cms.uint32(0),
muCut = cms.double(-1.0),
muMax = cms.double(-1.0),
muMin = cms.double(-1.0),
nFilt = cms.int32(-1),
nSigmaPU = cms.double(1.0),
puPtMin = cms.double(10.0),
rFilt = cms.double(-1.0),
rParam = cms.double(0.4),
radiusPU = cms.double(0.4),
rcut_factor = cms.double(-1.0),
src = cms.InputTag("hltTowerMakerForPF"),
srcPVs = cms.InputTag("NotUsed"),
subjetPtMin = cms.double(-1.0),
subtractorName = cms.string(''),
sumRecHits = cms.bool(False),
trimPtFracMin = cms.double(-1.0),
useCMSBoostedTauSeedingAlgorithm = cms.bool(False),
useDeterministicSeed = cms.bool(True),
useFiltering = cms.bool(False),
useMassDropTagger = cms.bool(False),
usePruning = cms.bool(False),
useTrimming = cms.bool(False),
voronoiRfact = cms.double(-9.0),
yCut = cms.double(-1.0),
yMax = cms.double(-1.0),
yMin = cms.double(-1.0),
zcut = cms.double(-1.0)
)
process.hltAK4CaloRelativeCorrector = cms.EDProducer("LXXXCorrectorProducer",
algorithm = cms.string('AK4CaloHLT'),
level = cms.string('L2Relative')
)
process.hltAK4CaloResidualCorrector = cms.EDProducer("LXXXCorrectorProducer",
algorithm = cms.string('AK4CaloHLT'),
level = cms.string('L2L3Residual')
)
process.hltAK4Iter0TrackJets4Iter1 = cms.EDProducer("FastjetJetProducer",
Active_Area_Repeats = cms.int32(5),
DxyTrVtxMax = cms.double(0.2),
DzTrVtxMax = cms.double(0.5),
GhostArea = cms.double(0.01),
Ghost_EtaMax = cms.double(6.0),
MaxVtxZ = cms.double(30.0),
MinVtxNdof = cms.int32(0),
Rho_EtaMax = cms.double(4.4),
UseOnlyOnePV = cms.bool(True),
UseOnlyVertexTracks = cms.bool(False),
dRMax = cms.double(-1.0),
dRMin = cms.double(-1.0),
doAreaDiskApprox = cms.bool(False),
doAreaFastjet = cms.bool(False),
doOutputJets = cms.bool(True),
doPUOffsetCorr = cms.bool(False),
doPVCorrection = cms.bool(False),
doRhoFastjet = cms.bool(False),
inputEMin = cms.double(0.0),
inputEtMin = cms.double(0.1),
jetAlgorithm = cms.string('AntiKt'),
jetPtMin = cms.double(1.0),
jetType = cms.string('TrackJet'),
maxBadEcalCells = cms.uint32(9999999),
maxBadHcalCells = cms.uint32(9999999),
maxDepth = cms.int32(-1),
maxProblematicEcalCells = cms.uint32(9999999),
maxProblematicHcalCells = cms.uint32(9999999),
maxRecoveredEcalCells = cms.uint32(9999999),
maxRecoveredHcalCells = cms.uint32(9999999),
minSeed = cms.uint32(14327),
muCut = cms.double(-1.0),
muMax = cms.double(-1.0),
muMin = cms.double(-1.0),
nFilt = cms.int32(-1),
nSigmaPU = cms.double(1.0),
puPtMin = cms.double(0.0),
rFilt = cms.double(-1.0),
rParam = cms.double(0.4),
radiusPU = cms.double(0.4),
rcut_factor = cms.double(-1.0),
src = cms.InputTag("hltTrackIter0RefsForJets4Iter1"),
srcPVs = cms.InputTag("hltTrimmedPixelVertices"),
subjetPtMin = cms.double(-1.0),
subtractorName = cms.string(''),
sumRecHits = cms.bool(False),
trimPtFracMin = cms.double(-1.0),
useCMSBoostedTauSeedingAlgorithm = cms.bool(False),
useDeterministicSeed = cms.bool(True),
useFiltering = cms.bool(False),
useMassDropTagger = cms.bool(False),
usePruning = cms.bool(False),
useTrimming = cms.bool(False),
voronoiRfact = cms.double(0.9),
yCut = cms.double(-1.0),
yMax = cms.double(-1.0),
yMin = cms.double(-1.0),
zcut = cms.double(-1.0)
)
process.hltAK4Iter1TrackJets4Iter2 = cms.EDProducer("FastjetJetProducer",
Active_Area_Repeats = cms.int32(5),
DxyTrVtxMax = cms.double(0.2),
DzTrVtxMax = cms.double(0.5),
GhostArea = cms.double(0.01),
Ghost_EtaMax = cms.double(6.0),
MaxVtxZ = cms.double(30.0),
MinVtxNdof = cms.int32(0),
Rho_EtaMax = cms.double(4.4),
UseOnlyOnePV = cms.bool(True),
UseOnlyVertexTracks = cms.bool(False),
dRMax = cms.double(-1.0),
dRMin = cms.double(-1.0),
doAreaDiskApprox = cms.bool(False),
doAreaFastjet = cms.bool(False),
doOutputJets = cms.bool(True),
doPUOffsetCorr = cms.bool(False),
doPVCorrection = cms.bool(False),
doRhoFastjet = cms.bool(False),
inputEMin = cms.double(0.0),
inputEtMin = cms.double(0.1),
jetAlgorithm = cms.string('AntiKt'),
jetPtMin = cms.double(7.5),
jetType = cms.string('TrackJet'),
maxBadEcalCells = cms.uint32(9999999),
maxBadHcalCells = cms.uint32(9999999),
maxDepth = cms.int32(-1),
maxProblematicEcalCells = cms.uint32(9999999),
maxProblematicHcalCells = cms.uint32(9999999),
maxRecoveredEcalCells = cms.uint32(9999999),
maxRecoveredHcalCells = cms.uint32(9999999),
minSeed = cms.uint32(14327),
muCut = cms.double(-1.0),
muMax = cms.double(-1.0),
muMin = cms.double(-1.0),
nFilt = cms.int32(-1),
nSigmaPU = cms.double(1.0),
puPtMin = cms.double(0.0),
rFilt = cms.double(-1.0),
rParam = cms.double(0.4),
radiusPU = cms.double(0.4),
rcut_factor = cms.double(-1.0),
src = cms.InputTag("hltIter1TrackRefsForJets4Iter2"),
srcPVs = cms.InputTag("hltTrimmedPixelVertices"),
subjetPtMin = cms.double(-1.0),
subtractorName = cms.string(''),
sumRecHits = cms.bool(False),
trimPtFracMin = cms.double(-1.0),
useCMSBoostedTauSeedingAlgorithm = cms.bool(False),
useDeterministicSeed = cms.bool(True),
useFiltering = cms.bool(False),
useMassDropTagger = cms.bool(False),
usePruning = cms.bool(False),
useTrimming = cms.bool(False),
voronoiRfact = cms.double(0.9),
yCut = cms.double(-1.0),
yMax = cms.double(-1.0),
yMin = cms.double(-1.0),
zcut = cms.double(-1.0)
)
process.hltAK4PFAbsoluteCorrector = cms.EDProducer("LXXXCorrectorProducer",
algorithm = cms.string('AK4PFHLT'),
level = cms.string('L3Absolute')
)
process.hltAK4PFCorrector = cms.EDProducer("ChainedJetCorrectorProducer",
correctors = cms.VInputTag("hltAK4PFFastJetCorrector", "hltAK4PFRelativeCorrector", "hltAK4PFAbsoluteCorrector", "hltAK4PFResidualCorrector")
)
process.hltAK4PFFastJetCorrector = cms.EDProducer("L1FastjetCorrectorProducer",
algorithm = cms.string('AK4PFHLT'),
level = cms.string('L1FastJet'),
srcRho = cms.InputTag("hltFixedGridRhoFastjetAll")
)
process.hltAK4PFJets = cms.EDProducer("FastjetJetProducer",
Active_Area_Repeats = cms.int32(5),
DxyTrVtxMax = cms.double(0.0),
DzTrVtxMax = cms.double(0.0),
GhostArea = cms.double(0.01),
Ghost_EtaMax = cms.double(6.0),
MaxVtxZ = cms.double(15.0),
MinVtxNdof = cms.int32(0),
Rho_EtaMax = cms.double(4.4),
UseOnlyOnePV = cms.bool(False),
UseOnlyVertexTracks = cms.bool(False),
dRMax = cms.double(-1.0),
dRMin = cms.double(-1.0),
doAreaDiskApprox = cms.bool(True),
doAreaFastjet = cms.bool(False),
doOutputJets = cms.bool(True),
doPUOffsetCorr = cms.bool(False),
doPVCorrection = cms.bool(False),
doRhoFastjet = cms.bool(False),
inputEMin = cms.double(0.0),
inputEtMin = cms.double(0.0),
jetAlgorithm = cms.string('AntiKt'),
jetPtMin = cms.double(0.0),
jetType = cms.string('PFJet'),
maxBadEcalCells = cms.uint32(9999999),
maxBadHcalCells = cms.uint32(9999999),
maxDepth = cms.int32(-1),
maxProblematicEcalCells = cms.uint32(9999999),
maxProblematicHcalCells = cms.uint32(9999999),
maxRecoveredEcalCells = cms.uint32(9999999),
maxRecoveredHcalCells = cms.uint32(9999999),
minSeed = cms.uint32(0),
muCut = cms.double(-1.0),
muMax = cms.double(-1.0),
muMin = cms.double(-1.0),
nFilt = cms.int32(-1),
nSigmaPU = cms.double(1.0),
puPtMin = cms.double(10.0),
rFilt = cms.double(-1.0),
rParam = cms.double(0.4),
radiusPU = cms.double(0.4),
rcut_factor = cms.double(-1.0),
src = cms.InputTag("hltParticleFlow"),
srcPVs = cms.InputTag("hltPixelVertices"),
subjetPtMin = cms.double(-1.0),
subtractorName = cms.string(''),
sumRecHits = cms.bool(False),
trimPtFracMin = cms.double(-1.0),
useCMSBoostedTauSeedingAlgorithm = cms.bool(False),
useDeterministicSeed = cms.bool(True),
useFiltering = cms.bool(False),
useMassDropTagger = cms.bool(False),
usePruning = cms.bool(False),
useTrimming = cms.bool(False),
voronoiRfact = cms.double(-9.0),
yCut = cms.double(-1.0),
yMax = cms.double(-1.0),
yMin = cms.double(-1.0),
zcut = cms.double(-1.0)
)
process.hltAK4PFJetsCorrected = cms.EDProducer("CorrectedPFJetProducer",
correctors = cms.VInputTag("hltAK4PFCorrector"),
src = cms.InputTag("hltAK4PFJets")
)
process.hltAK4PFJetsLooseID = cms.EDProducer("HLTPFJetIDProducer",
CEF = cms.double(0.99),
CHF = cms.double(0.0),
NCH = cms.int32(0),
NEF = cms.double(0.99),
NHF = cms.double(0.99),
NTOT = cms.int32(1),
jetsInput = cms.InputTag("hltAK4PFJets"),
maxCF = cms.double(99.0),
maxEta = cms.double(1e+99),
minPt = cms.double(20.0)
)
process.hltAK4PFJetsLooseIDCorrected = cms.EDProducer("CorrectedPFJetProducer",
correctors = cms.VInputTag("hltAK4PFCorrector"),
src = cms.InputTag("hltAK4PFJetsLooseID")
)
process.hltAK4PFJetsTightID = cms.EDProducer("HLTPFJetIDProducer",
CEF = cms.double(0.99),
CHF = cms.double(0.0),
NCH = cms.int32(0),
NEF = cms.double(0.99),
NHF = | |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Contributed to by:
# meta-androcto, <NAME>, zmj100, <NAME>, TrumanBlending, PKHG, #
# Oscurart, Greg, <NAME>, komi3D, BlenderLab, <NAME> (brikbot), #
# metalliandy, macouno, CoDEmanX, dustractor, Liero, lijenstina, <NAME> #
# Pistiwique, <NAME> #
bl_info = {
"name": "Edit Mesh Tools",
"author": "Meta-Androcto",
"version": (0, 3, 6),
"blender": (2, 80, 0),
"location": "View3D > Sidebar > Edit Tab / Edit Mode Context Menu",
"warning": "",
"description": "Mesh modelling toolkit. Several tools to aid modelling",
"doc_url": "{BLENDER_MANUAL_URL}/addons/mesh/edit_mesh_tools.html",
"category": "Mesh",
}
# Import From Files
if "bpy" in locals():
import importlib
importlib.reload(mesh_offset_edges)
importlib.reload(split_solidify)
importlib.reload(mesh_filletplus)
importlib.reload(mesh_vertex_chamfer)
importlib.reload(random_vertices)
# importlib.reload(mesh_extrude_and_reshape)
importlib.reload(mesh_edge_roundifier)
importlib.reload(mesh_edgetools)
importlib.reload(mesh_edges_floor_plan)
importlib.reload(mesh_edges_length)
importlib.reload(pkhg_faces)
importlib.reload(mesh_cut_faces)
importlib.reload(mesh_relax)
else:
from . import mesh_offset_edges
from . import split_solidify
from . import mesh_filletplus
from . import mesh_vertex_chamfer
from . import random_vertices
# from . import mesh_extrude_and_reshape
from . import mesh_edge_roundifier
from . import mesh_edgetools
from . import mesh_edges_floor_plan
from . import mesh_edges_length
from . import pkhg_faces
from . import mesh_cut_faces
from . import mesh_relax
import bmesh
import bpy
import collections
import mathutils
import random
from math import (
sin, cos, tan,
degrees, radians, pi,
)
from random import gauss
from mathutils import Matrix, Euler, Vector
from bpy_extras import view3d_utils
from bpy.types import (
Operator,
Menu,
Panel,
PropertyGroup,
AddonPreferences,
)
from bpy.props import (
BoolProperty,
BoolVectorProperty,
EnumProperty,
FloatProperty,
FloatVectorProperty,
IntVectorProperty,
PointerProperty,
StringProperty,
IntProperty
)
# ########################################
# ##### General functions ################
# ########################################
# Multi extrude
def gloc(self, r):
return Vector((self.offx, self.offy, self.offz))
def vloc(self, r):
random.seed(self.ran + r)
return self.off * (1 + gauss(0, self.var1 / 3))
def nrot(self, n):
return Euler((radians(self.nrotx) * n[0],
radians(self.nroty) * n[1],
radians(self.nrotz) * n[2]), 'XYZ')
def vrot(self, r):
random.seed(self.ran + r)
return Euler((radians(self.rotx) + gauss(0, self.var2 / 3),
radians(self.roty) + gauss(0, self.var2 / 3),
radians(self.rotz) + gauss(0, self.var2 / 3)), 'XYZ')
def vsca(self, r):
random.seed(self.ran + r)
return self.sca * (1 + gauss(0, self.var3 / 3))
class ME_OT_MExtrude(Operator):
bl_idname = "object.mextrude"
bl_label = "Multi Extrude"
bl_description = ("Extrude selected Faces with Rotation,\n"
"Scaling, Variation, Randomization")
bl_options = {"REGISTER", "UNDO", "PRESET"}
off : FloatProperty(
name="Offset",
soft_min=0.001, soft_max=10,
min=-100, max=100,
default=1.0,
description="Translation"
)
offx : FloatProperty(
name="Loc X",
soft_min=-10.0, soft_max=10.0,
min=-100.0, max=100.0,
default=0.0,
description="Global Translation X"
)
offy : FloatProperty(
name="Loc Y",
soft_min=-10.0, soft_max=10.0,
min=-100.0, max=100.0,
default=0.0,
description="Global Translation Y"
)
offz : FloatProperty(
name="Loc Z",
soft_min=-10.0, soft_max=10.0,
min=-100.0, max=100.0,
default=0.0,
description="Global Translation Z"
)
rotx : FloatProperty(
name="Rot X",
min=-85, max=85,
soft_min=-30, soft_max=30,
default=0,
description="X Rotation"
)
roty : FloatProperty(
name="Rot Y",
min=-85, max=85,
soft_min=-30,
soft_max=30,
default=0,
description="Y Rotation"
)
rotz : FloatProperty(
name="Rot Z",
min=-85, max=85,
soft_min=-30, soft_max=30,
default=-0,
description="Z Rotation"
)
nrotx : FloatProperty(
name="N Rot X",
min=-85, max=85,
soft_min=-30, soft_max=30,
default=0,
description="Normal X Rotation"
)
nroty : FloatProperty(
name="N Rot Y",
min=-85, max=85,
soft_min=-30, soft_max=30,
default=0,
description="Normal Y Rotation"
)
nrotz : FloatProperty(
name="N Rot Z",
min=-85, max=85,
soft_min=-30, soft_max=30,
default=-0,
description="Normal Z Rotation"
)
sca : FloatProperty(
name="Scale",
min=0.01, max=10,
soft_min=0.5, soft_max=1.5,
default=1.0,
description="Scaling of the selected faces after extrusion"
)
var1 : FloatProperty(
name="Offset Var", min=-10, max=10,
soft_min=-1, soft_max=1,
default=0,
description="Offset variation"
)
var2 : FloatProperty(
name="Rotation Var",
min=-10, max=10,
soft_min=-1, soft_max=1,
default=0,
description="Rotation variation"
)
var3 : FloatProperty(
name="Scale Noise",
min=-10, max=10,
soft_min=-1, soft_max=1,
default=0,
description="Scaling noise"
)
var4 : IntProperty(
name="Probability",
min=0, max=100,
default=100,
description="Probability, chance of extruding a face"
)
num : IntProperty(
name="Repeat",
min=1, max=500,
soft_max=100,
default=1,
description="Repetitions"
)
ran : IntProperty(
name="Seed",
min=-9999, max=9999,
default=0,
description="Seed to feed random values"
)
opt1 : BoolProperty(
name="Polygon coordinates",
default=True,
description="Polygon coordinates, Object coordinates"
)
opt2 : BoolProperty(
name="Proportional offset",
default=False,
description="Scale * Offset"
)
opt3 : BoolProperty(
name="Per step rotation noise",
default=False,
description="Per step rotation noise, Initial rotation noise"
)
opt4 : BoolProperty(
name="Per step scale noise",
default=False,
description="Per step scale noise, Initial scale noise"
)
@classmethod
def poll(cls, context):
obj = context.object
return (obj and obj.type == 'MESH')
def draw(self, context):
layout = self.layout
col = layout.column(align=True)
col.label(text="Transformations:")
col.prop(self, "off", slider=True)
col.prop(self, "offx", slider=True)
col.prop(self, "offy", slider=True)
col.prop(self, "offz", slider=True)
col = layout.column(align=True)
col.prop(self, "rotx", slider=True)
col.prop(self, "roty", slider=True)
col.prop(self, "rotz", slider=True)
col.prop(self, "nrotx", slider=True)
col.prop(self, "nroty", slider=True)
col.prop(self, "nrotz", slider=True)
col = layout.column(align=True)
col.prop(self, "sca", slider=True)
col = layout.column(align=True)
col.label(text="Variation settings:")
col.prop(self, "var1", slider=True)
col.prop(self, "var2", slider=True)
col.prop(self, "var3", slider=True)
col.prop(self, "var4", slider=True)
col.prop(self, "ran")
col = layout.column(align=False)
col.prop(self, 'num')
col = layout.column(align=True)
col.label(text="Options:")
col.prop(self, "opt1")
col.prop(self, "opt2")
col.prop(self, "opt3")
col.prop(self, "opt4")
def execute(self, context):
obj = bpy.context.object
om = obj.mode
bpy.context.tool_settings.mesh_select_mode = [False, False, True]
origin = Vector([0.0, 0.0, 0.0])
# bmesh operations
bpy.ops.object.mode_set()
bm = bmesh.new()
bm.from_mesh(obj.data)
sel = [f for f in bm.faces if f.select]
after = []
# faces loop
for i, of in enumerate(sel):
nro = nrot(self, of.normal)
off = vloc(self, i)
loc = gloc(self, i)
of.normal_update()
# initial rotation noise
if self.opt3 is False:
rot = vrot(self, i)
# initial scale noise
if self.opt4 is False:
s = vsca(self, i)
# extrusion loop
for r in range(self.num):
# random probability % for extrusions
if self.var4 > int(random.random() * 100):
nf = of.copy()
nf.normal_update()
no = nf.normal.copy()
# face/obj coordinates
if self.opt1 is True:
ce = nf.calc_center_bounds()
else:
ce = origin
# per step rotation noise
if self.opt3 is True:
rot = vrot(self, i + r)
# per step scale noise
if self.opt4 is True:
s = vsca(self, i + r)
# proportional, scale * offset
if self.opt2 is True:
off = s * off
for v in nf.verts:
v.co -= ce
v.co.rotate(nro)
v.co.rotate(rot)
v.co += ce + loc + no * off
v.co = v.co.lerp(ce, 1 - s)
# extrude code from TrumanBlending
for a, b in zip(of.loops, nf.loops):
sf = bm.faces.new((a.vert, a.link_loop_next.vert,
b.link_loop_next.vert, b.vert))
sf.normal_update()
bm.faces.remove(of)
of = nf
after.append(of)
for v in bm.verts:
v.select = False
for e in bm.edges:
e.select = False
for f in after:
if f not in sel:
f.select = True
else:
f.select = False
bm.to_mesh(obj.data)
obj.data.update()
# restore user settings
bpy.ops.object.mode_set(mode=om)
if not len(sel):
self.report({"WARNING"},
"No suitable Face selection found. Operation cancelled")
return {'CANCELLED'}
return {'FINISHED'}
# Face inset fillet
def edit_mode_out():
bpy.ops.object.mode_set(mode='OBJECT')
def edit_mode_in():
bpy.ops.object.mode_set(mode='EDIT')
def angle_rotation(rp, q, axis, angle):
# returns the vector made by the rotation of the vector q
# rp by angle around axis and then adds rp
return (Matrix.Rotation(angle, 3, axis) @ (q - rp)) + rp
def face_inset_fillet(bme, face_index_list, inset_amount, distance,
number_of_sides, out, radius, type_enum, kp):
list_del = []
for faceindex in face_index_list:
bme.faces.ensure_lookup_table()
# loops through the faces...
f = bme.faces[faceindex]
f.select_set(False)
list_del.append(f)
f.normal_update()
vertex_index_list = [v.index for v in f.verts]
dict_0 = {}
orientation_vertex_list = []
n = len(vertex_index_list)
for i in range(n):
# loops through the vertices
dict_0[i] = []
bme.verts.ensure_lookup_table()
p = (bme.verts[vertex_index_list[i]].co).copy()
p1 = (bme.verts[vertex_index_list[(i - 1) % n]].co).copy()
p2 = (bme.verts[vertex_index_list[(i + 1) % n]].co).copy()
# copies some vert coordinates, always the 3 around i
dict_0[i].append(bme.verts[vertex_index_list[i]])
# appends the bmesh vert of the appropriate index to the dict
vec1 = p - p1
vec2 = p - p2
# vectors for the other corner points to the cornerpoint
# corresponding to i / p
angle = vec1.angle(vec2)
adj = inset_amount / tan(angle * 0.5)
h = (adj ** 2 + inset_amount ** 2) ** 0.5
if round(degrees(angle)) == 180 or round(degrees(angle)) == 0.0:
# if the corner is a straight line...
# I think this creates some new | |
import pandas as pd
import codecs
import json
from collections import defaultdict, OrderedDict
import numpy as np
from tqdm import tqdm
import gensim
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.model_selection import KFold
import shutil
import os
import sys
import random
# todo in separate file
def read_dataset(data_path, read_fn=lambda x: x, sep='\t'):
vocab = defaultdict(list)
with codecs.open(data_path, 'r', encoding='utf-8') as f:
for line in f:
line_split = line.replace("\n", '').split(sep)
word = line_split[0].upper()
hypernyms = read_fn(line_split[1])
vocab[word].append(hypernyms)
return vocab
class VectorsWithHash:
def __init__(self, vectors):
self.vectors = vectors
self.hash = {}
def __contains__(self, w):
return w in self.vectors
def similarity(self, w1, w2):
key = f'{w1}[SEP]{w2}'
if key in self.hash:
return self.hash[key]
res = self.vectors.similarity(w1, w2)
self.hash[key] = res
return res
def most_similar(self, word, topn=10000):
return self.vectors.most_similar(word, topn=topn)
class HypernymPredictModel():
def __init__(self, config):
pass
def train(self, train_words):
pass
def predict(self, new_words):
pass
def save(self, model_path):
pass
def load(self, model_path):
pass
def _init_model_state(self, config):
self.word_type = config['pos']
self.topk = config['topk']
pass
def _load_resources(self, config):
self._load_thesaurus(config)
self._load_vectors(config)
self._load_wkt(config)
def _load_thesaurus(self, config):
ThesClass = EnWordNet if config['lang'] == 'en' else RuThes if config['ruthes'] else RuWordNet
self.thesaurus = ThesClass(config['thesaurus_dir'])
def _load_vectors(self, config):
embeddings_path = config['embeddings_path']
try:
self.vector_model = VectorsWithHash(gensim.models.KeyedVectors.load(embeddings_path))
except:
self.vector_model = VectorsWithHash(gensim.models.KeyedVectors.load_word2vec_format(embeddings_path, binary=False))
def _load_wkt(self, config):
self.wiktionary = wkt.load_wiktionary(config['wiktionary_dump_path'], self.vector_model)
def _calculate_features(self, word):
candidate2features = self._calculate_candidates(word)
candidate_col = []
features = []
for synset_id in candidate2features:
synset = self.thesaurus.synsets[synset_id]
init_features = self._calculate_init_features(synset_id, candidate2features)
wkt_features = calculate_wiktionary_features(word, synset, self.wiktionary)
synset_features = calculate_synset_similarity(word, synset, self.vector_model, None)
candidate_col.append(synset_id)
features.append(init_features + wkt_features + synset_features)
features = np.array(features)
columns = {f'f{i}': features[:,i] for i in range(features.shape[1])}
columns['cand'] = candidate_col
return pd.DataFrame(columns)
def _calculate_candidates(self, word):
most_similar_words = self.vector_model.most_similar(word, topn=10000) # must be larger then topk
most_similar_words = filter_most_sim_by_type(word, most_similar_words, self.word_type, self.thesaurus)
most_similar_words = most_similar_words[:self.topk]
candidates = []
for cand_word in most_similar_words:
if cand_word in self.thesaurus.sense2synid:
for synid in self.thesaurus.sense2synid[cand_word]:
if self.thesaurus.synsets[synid].synset_type != self.word_type:
continue
candidates.append([synid, 0])
for h in self.thesaurus.synsets[synid].rels.get('hypernym', []):
candidates.append([h.synset_id, 1])
for hh in h.rels.get('hypernym', []):
candidates.append([hh.synset_id, 2])
candidate2features = {}
for synset_id, level in candidates:
if synset_id not in candidate2features:
candidate2features[synset_id] = [0, []]
candidate2features[synset_id][0] += 1
candidate2features[synset_id][1].append(level)
return candidate2features
def _calculate_init_features(synset_id, candidate2features):
features = []
init_features = candidate2features[synset_id]
features.append(init_features[0])
features.append(np.log2(2 + init_features[0]))
features.append(np.min(init_features[1]))
features.append(np.mean(init_features[1]))
features.append(np.max(init_features[1]))
return features
'''
def load_wiktionary(wiktionary_dump_path, vectors, wkt):
title2docs = {key.replace(' ', '_'): val for key, val in wkt.get_title2docs(wiktionary_dump_path).items() if key in vectors}
for title in title2docs:
docs_info = []
for doc in title2docs[title]:
docs_info.append(wkt.parse_wiktionary(doc['text']))
title2docs[title] = docs_info
return title2docs
'''
def load_wiktionary(wiktionary_dump_path, vectors):
title2docs = {key.replace(' ', '_'): val for key, val in wkt.get_title2docs(wiktionary_dump_path).items() if key in vectors}
for title in title2docs:
docs_info = []
for doc in title2docs[title]:
docs_info.append(wkt.parse_wiktionary(doc['text']))
title2docs[title] = docs_info
return title2docs
def calc_most_sim(embeddings, thesaurus, test_df, word_type, topk):
all_most_similar = []
words = []
for word in test_df['word']:
if word not in embeddings:
continue
most_similar_words = embeddings.most_similar(word, topn=10000) # must be larger then topk
most_similar_words = filter_most_sim_by_type(word, most_similar_words, word_type, thesaurus)
most_similar_words = most_similar_words[:topk]
all_most_similar.append(most_similar_words)
words.append(word)
df = pd.DataFrame(columns=['word', 'most_similar'])
df['word'] = words
df['most_similar'] = all_most_similar
return df
def filter_most_sim_by_type(word, most_similar_words, word_type, thesaurus):
filtered_word_list = []
banned_words = set()
for synid in thesaurus.sense2synid.get(word, []):
synset = thesaurus.synsets[synid]
banned_words.update(synset.synset_words)
for w, score in most_similar_words:
w = w.replace('ё', 'е')
if w not in thesaurus.sense2synid:
continue
if w in banned_words:
continue
found_sense = False
inst_hypernyms = []
for synid in thesaurus.sense2synid[w]:
if thesaurus.synsets[synid].synset_type == word_type:
found_sense = True
if 'instance hypernym' in thesaurus.synsets[synid].rels:
inst_hypernyms.append(1)
else:
inst_hypernyms.append(0)
if found_sense is True and sum(inst_hypernyms) != len(inst_hypernyms):
filtered_word_list.append([w, score])
return filtered_word_list
def load_word2patterns(word2patterns_path):
word2patterns = {}
with codecs.open(word2patterns_path, 'r', 'utf-8') as file_descr:
for line in file_descr.read().split('\n'):
if len(line) == 0:
continue
target_word, cand_word, pattern_count, one_sent_count = line.split('\t')
if target_word not in word2patterns:
word2patterns[target_word] = {}
word2patterns[target_word][cand_word] = [int(pattern_count), int(one_sent_count)]
return word2patterns
def get_word_patterns_features(target_word, cand_word, word2patterns_syn):
word_pattern_features = []
if len(word2patterns_syn) > 0:
syn_pattern_count = 0
syn_one_sent_count = 0
syn_pattern_score = 1
if target_word in word2patterns_syn and w in word2patterns_syn[target_word]:
syn_pattern_count = word2patterns_syn[target_word][cand_word][0]
syn_one_sent_count = word2patterns_syn[target_word][cand_word][1]
syn_pattern_score = 1 + syn_pattern_count / (syn_one_sent_count + 2)
word_pattern_features = [np.log2(1 + syn_pattern_count), np.log2(1 + syn_one_sent_count), syn_pattern_score]
return word_pattern_features
def get_synset_patterns_features(target_word, synset, word2patterns_hyp):
synset_pattern_features = []
if len(word2patterns_hyp) > 0:
hyp_pattern_features = []
for s_word in set(synset.synset_words):
if target_word in word2patterns_hyp and s_word in word2patterns_hyp[target_word]:
hyp_pattern_count = word2patterns_hyp[target_word][s_word][0]
hyp_one_sent_count = word2patterns_hyp[target_word][s_word][1]
hyp_pattern_score = 1 + hyp_pattern_count / (hyp_one_sent_count + 2)
hyp_pattern_features.append([np.log2(1 + hyp_pattern_count), np.log2(1 + hyp_one_sent_count), hyp_pattern_score])
if len(hyp_pattern_features) == 0:
hyp_pattern_features.append([0, 0, 0])
max_hyp_pattern_features = np.max(hyp_pattern_features, axis=0)
min_hyp_pattern_features = np.min(hyp_pattern_features, axis=0)
synset_pattern_features += max_hyp_pattern_features.tolist()
synset_pattern_features += min_hyp_pattern_features.tolist()
return synset_pattern_features
'''
def get_synset_bert_features(target_word, synset, bert_model, thesaurus):
synset_bert_features = []
if bert_model is not None:
synsets_ids = [synset.synset_id]
for hypo in synset.rels.get('hyponym', []):
synsets_ids.append(hypo.synset_id)
synsets_ids = sorted(synsets_ids)
bert_probs = get_synsets_probs(bert_model, target_word, synsets_ids, thesaurus)
synset_bert_prob = bert_probs[0][1]
if bert_probs.shape[0] > 1:
hyponyms_bert_prob = np.max(bert_probs[1:], axis=0)[1]
else:
hyponyms_bert_prob = 0.0
synset_bert_features += [synset_bert_prob, hyponyms_bert_prob]
return synset_bert_features
'''
def get_synset_bert_features(target_word, synset, bert_model, thesaurus):
synset_bert_features = []
if bert_model is not None:
synset_bert_features.append(bert_model.predict(target_word, ';'.join(synset.synset_words), return_prob=True)[0][1])
return synset_bert_features
def get_synset_candidates_(cand_word, thesaurus, word_type):
candidates = []
if cand_word in thesaurus.sense2synid:
for synid in thesaurus.sense2synid[cand_word]:
if thesaurus.synsets[synid].synset_type != word_type:
continue
#if 'instance hypernym' in thesaurus.synsets[synid].rels:
# continue
for h in thesaurus.synsets[synid].rels.get('hypernym', []):
candidates.append([h, 1])
for hh in h.rels.get('hypernym', []):
candidates.append([hh, 2])
return candidates
def get_init_synset_features(synset, level):
synset_features = [-level]
return synset_features
def get_candidates_with_features(target_word, cand_word, word_features, thesaurus, vectors, word_type, word2patterns_syn={}, word2patterns_hyp={}, bert_model=None, wiktionary={}):
predict = OrderedDict()
word_features += get_word_patterns_features(target_word, cand_word, word2patterns_syn)
candidates = get_synset_candidates_(cand_word, thesaurus, word_type)
for s, level in candidates:
if s.synset_type != word_type:
continue
synset_features = get_init_synset_features(s, level)
synset_features += get_synset_patterns_features(target_word, s, word2patterns_hyp)
synset_features += get_synset_bert_features(target_word, s, bert_model, thesaurus)
synset_features += calculate_wiktionary_features(target_word, s, wiktionary)
synset_features += calculate_synset_similarity(target_word, s, vectors, thesaurus)
if s.synset_id not in predict:
predict[s.synset_id] = [s.synset_name, np.array(word_features), np.array(synset_features)]
else:
predict[s.synset_id][1] = np.vstack([predict[s.synset_id][1], np.array(word_features)])
predict[s.synset_id][2] = np.vstack([predict[s.synset_id][2], np.array(synset_features)])
return predict
def get_synset_candidates(most_sim, thesaurus, vectors, word_type, predict_topk=20, word2patterns_syn={}, word2patterns_hyp={}, bert_model=None, wiktionary={}):
synset_candidates = OrderedDict()
for _, row in tqdm(most_sim.iterrows()):
target_word = row['word']
most_similar_words = row['most_similar'][:predict_topk]
predict = OrderedDict()
for data in most_similar_words:
w = data[0]
word_features = data[1:]
candidates_features = get_candidates_with_features(target_word, w, word_features, thesaurus, vectors, word_type,
word2patterns_syn, word2patterns_hyp, bert_model, wiktionary)
for synset_id in candidates_features:
if synset_id not in predict:
predict[synset_id] = candidates_features[synset_id]
else:
predict[synset_id][1] = np.vstack([predict[synset_id][1], candidates_features[synset_id][1]])
predict[synset_id][2] = np.vstack([predict[synset_id][2], candidates_features[synset_id][2]])
synset_candidates[target_word] = predict
return synset_candidates
def get_train_true_info(df):
reference = {}
w2true = {}
for _, row in df.iterrows():
word = row['word']
target_gold = json.loads(row['target_gold'])
reference[word] = target_gold
w2true[word] = set()
for t in target_gold:
w2true[word].update(t)
return reference, w2true
def get_features_df(synset_candidates, w2true=None):
all_features = []
for w in synset_candidates:
for cand in synset_candidates[w]:
word_features = synset_candidates[w][cand][1]
synset_features = synset_candidates[w][cand][2]
if len(word_features.shape) == 1:
word_features.resize((1, word_features.shape[0]))
if len(synset_features.shape) == 1:
synset_features.resize((1, synset_features.shape[0]))
count = word_features.shape[0]
log2count = np.log2(2 + count)
mean_synset_features = np.mean(synset_candidates[w][cand][2], axis=0).tolist()
max_synset_features = np.max(synset_candidates[w][cand][2], axis=0).tolist()
max_word_features = np.max(word_features, axis=0).tolist()
mean_word_features = np.mean(word_features, axis=0).tolist()
min_word_features = np.min(word_features, axis=0).tolist()
all_features.append([w, cand, int(w in w2true and cand in w2true[w]), count, log2count] + mean_synset_features + max_word_features + mean_word_features + min_word_features)
df = pd.DataFrame(all_features, columns=['word', 'cand', 'label'] + [f'f{i}' for i in range(len(all_features[0]) - 3)])
df.fillna(0.0, inplace=True)
return df
def train_predict_cv(df_features, ref, folds=5):
features_len = len(df_features.columns) - 3
features = [f'f{i}' for i in range(features_len)]
kf = KFold(n_splits=folds)
results = []
models = []
for train_index, test_index in kf.split(df_features['word'].unique()):
train_words = df_features['word'].unique()[train_index]
test_words = df_features['word'].unique()[test_index]
train_df = df_features[df_features['word'].apply(lambda x: x in train_words)]
test_df = df_features[df_features['word'].apply(lambda x: x in test_words)]
clf = LogRegScaler()
X_train = train_df[features]
X_test = test_df[features]
y_train = train_df['label']
y_test = test_df['label']
clf.fit(X_train, y_train)
y_pred = clf.predict_proba(X_test)
test_df['predict'] = y_pred[:,1]
test_df = test_df.sort_values(by=['word', 'predict'], ascending=False)
cur_ref = {w: ref[w] for w in ref if w in set(test_words)}
mean_ap, mean_rr, w2ap = get_score(cur_ref, from_df_to_pred(test_df), k=10)
eval_res = [mean_ap, mean_rr]
models.append(clf)
print(eval_res)
results.append(eval_res)
print(f'Averaged results = {np.mean(results, axis=0)}')
return models, features
def get_score_list(reference, submitted, k=10):
random.seed(42)
max_items_len = int(len(reference) * 0.8)
all_words = list(reference)
map_list, mrr_list = [], []
for _ in range(30):
random.shuffle(all_words)
_80_percent_words = all_words[:max_items_len]
smaller_reference = dict(filter(lambda x: x[0] in _80_percent_words, reference.items()))
mean_ap, mean_rr = get_score(smaller_reference, submitted, k=k)
map_list.append(mean_ap)
mrr_list.append(mean_rr)
#write_to_file(os.path.splitext(submitted_path)[0]+"_map_scores.json", map_list)
#write_to_file(os.path.splitext(submitted_path)[0]+"_mrr_scores.json", mrr_list)
return map_list, | |
<filename>makehuman-master/buildscripts/ppa/buildPPA.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
MakeHuman debian package build script
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehumancommunity.org/
**Github Code Home Page:** https://github.com/makehumancommunity/
**Authors:** <NAME>, <NAME>
**Copyright(c):** MakeHuman Team 2001-2017
**Licensing:** AGPL3
This file is part of MakeHuman (www.makehumancommunity.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
Create a debian DEB package for the MakeHuman application.
"""
# HINT: You need to run
#
# apt-get install devscripts equivs mercurial
#
# ... in order for this script to function at all
#
# script has to be run as root (sudo)
#
# Settings can be changed in ../build.conf
# --- CONFIGURATION SETTINGS ---
settings = dict()
settings["package_version"] = None
settings["package_sub"] = None
settings["signString"] = "Anonymous"
settings["performSign"] = False
settings["performUpload"] = False
settings["gitpath"] = "/usr/bin/git"
files_to_chmod_executable = [
"usr/bin/makehuman-community",
"usr/share/makehuman/makehuman-community",
"usr/share/makehuman/makehuman.py",
]
# --- EVERYTHING BELOW THIS POINT IS LOGIC, HANDS OFF ---
import sys
import os
import re
import subprocess
import shutil
import glob
import time
def _cp_files(folder, dest):
"""
Copy files in folder to dest folder
"""
for f in os.listdir(folder):
fpath = os.path.join(folder, f)
if os.path.isfile(fpath):
print("Copy %s to %s" % (fpath, os.path.join(dest, f)))
shutil.copy(fpath, os.path.join(dest, f))
def _cp_pattern(srcFolder, destFolder, extIncludingDot):
"""
Copy files matching pattern in folder to dest folder
"""
for path, dirs, files in os.walk(srcFolder):
rel = os.path.relpath(path,srcFolder)
dest = os.path.join(destFolder,rel)
for f in files:
srcFile = os.path.abspath(os.path.join(path,f))
fileName, fileExtension = os.path.splitext(srcFile)
if fileExtension == extIncludingDot:
destFile = os.path.abspath(os.path.join(dest,f))
destDir = os.path.dirname(destFile)
if not os.path.exists(destDir):
os.makedirs(destDir)
#print (destDir)
shutil.copy(srcFile, destFile)
def _sed_replace(filepath, templateToken, replaceStr):
subprocess.check_call(['sed', '-i', '-e', 's/%s/%s/' % (templateToken, replaceStr), filepath])
def parseConfig(configPath):
if os.path.isfile(configPath):
import configparser
config = configparser.ConfigParser()
config.read(configPath)
return config
else:
return None
def configure(confpath):
def _conf_get(config, section, option, defaultVal):
try:
return config.get(section, option)
except:
return defaultVal
conf = parseConfig(confpath)
if conf is None:
print("PPA build requires a build.conf file. %s " % confpath)
sys.exit(1)
else:
print("Using config file at %s. NOTE: properties in config file will override any other settings!" % confpath)
settings["gitpath"] = _conf_get(conf, 'General', 'gitPath', settings["gitpath"])
settings["package_version"] = _conf_get(conf, 'PPA', 'packageVersion', settings["package_version"])
settings["package_sub"] = _conf_get(conf, 'PPA', 'packageSub', settings["package_sub"])
settings["signString"] = _conf_get(conf, 'PPA', 'signString', settings["signString"])
settings["performSign"] = _conf_get(conf, 'PPA', 'performSign', settings["performSign"])
settings["performUpload"] = _conf_get(conf, 'PPA', 'performUpload', settings["performUpload"])
if settings["package_sub"] is None or settings["package_version"] is None:
print("build.conf is incorrect")
sys.exit(1)
settings["timestamp"] = time.strftime("%Y%m%d%H%M%S")
def configurePaths():
print(settings)
print("### Starting to configure locations ###\n")
# Where is the build root?
print("Build root: " + settings["build_root"])
# Where is the buildPPA script?
settings["location_of_script"] = os.path.dirname(os.path.abspath(__file__))
print("Script location: " + settings["location_of_script"])
# Where is the source code located?
settings["source_root"] = os.path.realpath( os.path.join(settings["location_of_script"], '..', '..') )
print("Source root: " + settings["source_root"])
if not os.path.isdir( os.path.join(settings["source_root"], '.git') ):
print("Error, the git root folder %s does not contain .git folder!" % settings["source_root"])
print("Giving up.\n\n");
sys.exit(1)
# We can now read build.conf
configure(os.path.join(settings["source_root"], 'buildscripts', 'build.conf'))
# Folder where git contents are exported and prepared for packaging (scripts are run)
settings["build_prepare_destination"] = os.path.realpath( os.path.join(settings["build_root"],'build_prepare') )
if not os.path.exists(settings["build_prepare_destination"]):
os.mkdir(settings["build_prepare_destination"])
print("Build_prepare destination: " + settings["build_prepare_destination"])
# Where do we find deb build configuration files
settings["deb_config_location"] = os.path.join(settings["location_of_script"],"packages")
print("Location of deb build config files: " + settings["deb_config_location"])
# Staging area for building source and binary debs
settings["deb_staging_location"] = os.path.join(settings["build_root"],"deb_staging")
# Staging area for building source and binary debs
settings["deb_staging_location"] = os.path.join(settings["build_root"],"deb_staging")
print("Staging area for deb build process: " + settings["deb_staging_location"])
shutil.copytree(settings["deb_config_location"],settings["deb_staging_location"])
# Final destination for specific build configs
settings["main_deb_def"] = os.path.join(settings["deb_staging_location"],"makehuman-community")
print("Target deb definition dir for main: " + settings["main_deb_def"])
settings["dev_deb_def"] = os.path.join(settings["deb_staging_location"],"makehuman-community-dev")
print("Target deb definition dir for dev: " + settings["dev_deb_def"])
# Changelog locations
settings["main_changelog"] = os.path.join(settings["main_deb_def"],"debian","changelog")
print("Main changelog: " + settings["main_changelog"])
settings["dev_changelog"] = os.path.join(settings["dev_deb_def"],"debian","changelog")
print("Dev changelog: " + settings["dev_changelog"])
# Directory with extra files to copy
settings["extras_location"] = os.path.join(settings["location_of_script"],"extras")
print("Location of various extra files: " + settings["extras_location"])
# Where to copy extra files
settings["extras_destination"] = os.path.join(settings["build_prepare_destination"],"extras")
print("Destination for extras: " + settings["extras_destination"])
# Staging area for files not managed by build_prepare
settings["manual_export_location"] = os.path.realpath( os.path.join(settings["build_root"],'export_dev') )
if os.path.exists(settings["manual_export_location"]):
shutil.rmtree(settings["manual_export_location"])
os.mkdir(settings["manual_export_location"])
# Location of makehuman in source root
settings["makehuman_source_root"] = os.path.join(settings["source_root"],"makehuman")
print("Export dir for *-dev files: " + settings["manual_export_location"]);
# .orig tarballs to create
fn = "makehuman-community_" + settings["package_version"]
fn = fn + "+" + settings["timestamp"]
fn = fn + ".orig.tar.gz"
settings["main_tar_file"] = os.path.abspath(os.path.join(settings["deb_staging_location"], fn))
print("Main source tarball: " + settings["main_tar_file"])
fn = "makehuman-community-dev_" + settings["package_version"]
fn = fn + "+" + settings["timestamp"]
fn = fn + ".orig.tar.gz"
settings["dev_tar_file"] = os.path.abspath(os.path.join(settings["deb_staging_location"], fn))
print("Dev source tarball: " + settings["dev_tar_file"])
# Final destination for source deb
settings["source_final_dest"] = os.path.join(settings["build_root"],"dist_ppa")
print("Final destination for source deb definition: " + settings["source_final_dest"])
# Final destination for source deb
settings["binary_final_dest"] = os.path.join(settings["build_root"],"dist_deb")
print("Final destination for binary deb files: " + settings["binary_final_dest"])
print("\n### Finished configuring locations ###\n")
print("")
def buildSourceTree(dest = None):
if os.geteuid() != 0:
print("WARNING: You are not root. You should be running this script with root permissions!")
if dest is None:
dest = os.getenv('makehuman_dest',0)
if dest == 0:
print("You must explicitly set the makehuman_dest environment variable to point at a work directory, or specify it as argument. I will violently destroy and mutilate the contents of this directory.")
exit(1)
settings["build_root"] = os.path.normpath(os.path.realpath(dest)) # Folder to build deb package to
if os.path.exists(settings["build_root"]):
# Ensure dest dir is empty
shutil.rmtree(settings["build_root"])
os.mkdir(settings["build_root"])
configurePaths();
print("\nABOUT TO PERFORM BUILD EXPORT\n")
print("to: %s" % os.path.normpath(os.path.realpath(settings["build_prepare_destination"])))
# Export source to export folder and run scripts
sys.path = [os.path.join(settings["location_of_script"], '..')] + sys.path
try:
import build_prepare
except:
print(sys.exc_info()[0])
print("Failed to import build_prepare, expected to find it at %s. Make sure to run this script from buildscripts/ppa/" % os.path.normpath(os.path.realpath(os.path.join(settings["location_of_script"], '..'))))
exit(1)
if os.path.exists(settings["build_prepare_destination"]):
shutil.rmtree(settings["build_prepare_destination"])
exportInfo = build_prepare.export(sourcePath = settings["source_root"], exportFolder = settings["build_prepare_destination"])
#os.remove(os.path.join(settings["build_prepare_destination"], 'makehuman', 'blendertools'ender.bat'))
print("\nABOUT TO COPY CONTENTS\n")
try:
subprocess.check_call(["chown", "-R", "0:0", settings["build_prepare_destination"]])
except:
print("Failed to chown to root. Operation not permitted?")
try:
subprocess.check_call(["chmod", "-R", "644", settings["build_prepare_destination"]])
for path, dirs, files in os.walk(settings["build_prepare_destination"]):
for d in dirs:
dpath = os.path.join(settings["build_prepare_destination"], path, d)
try:
subprocess.check_call(["chmod", "755", dpath])
except:
print("Failed to chmod 755 folder %s" % dpath)
subprocess.check_call(["chmod", "755", settings["build_prepare_destination"]])
except Exception as e:
print("Failed to chmod " + settings["build_prepare_destination"])
print(e)
for x in files_to_chmod_executable:
if os.path.exists(x):
subprocess.check_call(["chmod", "755", x])
shutil.copytree(settings["extras_location"],settings["extras_destination"])
print("\nCOPYING RAW TARGETS FOR -dev\n")
_cp_pattern(settings["makehuman_source_root"],settings["manual_export_location"],".target")
print("\nCOPYING RAW OBJS FOR -dev\n")
_cp_pattern(settings["makehuman_source_root"],settings["manual_export_location"],".obj")
print("\nCOPYING RAW MHCLO FOR -dev\n")
_cp_pattern(settings["makehuman_source_root"],settings["manual_export_location"],".mhclo")
print("\nCOPYING RAW PROXIES FOR -dev\n")
_cp_pattern(settings["makehuman_source_root"],settings["manual_export_location"],".proxy")
dummy = os.path.join(settings["manual_export_location"],"dummy.txt")
with open(dummy, "w") as text_file:
text_file.write("This is only because moronic debuild cannot handle tarballs which doesn't have a non-dir entry in the root")
try:
subprocess.check_call(["chown", "-R", "0:0", settings["manual_export_location"]])
except:
print("Failed to chown to root. Operation not permitted?")
try:
subprocess.check_call(["chmod", "-R", "644", settings["manual_export_location"]])
for path, dirs, files in os.walk(settings["manual_export_location"]):
for d in dirs:
dpath = os.path.join(settings["manual_export_location"], path, d)
try:
subprocess.check_call(["chmod", "755", dpath])
except:
print("Failed to chmod 755 folder %s" % dpath)
subprocess.check_call(["chmod", "755", settings["manual_export_location"]])
except Exception as e:
print("Failed to chmod " + settings["manual_export_location"])
print(e)
def createSourceTarballs():
print("\nABOUT TO CREATE SOURCE TARBALL FOR BUILD_PREPARE DATA\n\n");
os.chdir(settings["build_prepare_destination"])
print("Tarfile: " + settings["main_tar_file"])
print("CWD: " + os.getcwd())
args = ["tar","-C",settings["build_prepare_destination"],"-czf", settings["main_tar_file"], "makehuman","README","extras"]
print(args)
subprocess.check_call(["tar","-C",settings["build_prepare_destination"],"-czf", settings["main_tar_file"], "makehuman","README.md","extras"])
print("\nABOUT TO CREATE SOURCE TARBALL FOR -DEV DATA\n\n");
os.chdir(settings["manual_export_location"])
print("Tarfile: " + settings["dev_tar_file"])
print("CWD: " + os.getcwd())
subprocess.check_call(["tar","-C",settings["manual_export_location"],"-cvzf", settings["dev_tar_file"], "data", "dummy.txt"])
def createSourceDebs():
print("\nWRITING CHANGELOGS\n")
#ts = Mon, 01 Jun 2015 15:17:49 +0200
ts = time.strftime("%a, %d %b %Y %H:%M:%S +0200")
with open(settings["main_changelog"], "w") as text_file:
text_file.write("makehuman-community (" + settings["package_version"] + "+" + settings["timestamp"] + "-" + settings["package_sub"] + ") bionic; urgency=low\n\n")
text_file.write(" * Version bump\n\n")
text_file.write(" -- " + settings["signString"] + " " + ts + "\n\n")
with open(settings["dev_changelog"], "w") as text_file:
text_file.write("makehuman-community-dev (" + settings["package_version"] + "+" + settings["timestamp"] + "-" + settings["package_sub"] + ") bionic; urgency=low\n\n")
| |
################################
# WAVE FUNCTION COLLAPSE IN 2D #
################################
# Original WFC implementation by <NAME> @mxgmn on github
# Python implementation by <NAME> @Coac on github
# Blender implementation by <NAME> @benkl on github
import time
import os
import numpy as np
import random
import sys
import bpy
class WaveFunctionCollapse:
# WaveFunctionCollapse encapsulates the wfc algorithm
def __init__(self, grid_size, sample, pattern_size):
self.patterns = Pattern.from_sample(sample, pattern_size)
self.grid = self._create_grid(grid_size)
self.propagator = Propagator(self.patterns)
def run(self):
start_time = time.time()
done = False
border = bpy.context.scene.wfc_vars.wfc_border
# self.propagator.propagate(cell)
if border == True:
# BorderInsert
# print(self.grid.size[2])
# print("we got a cell", self.grid.get_cell(0))
cell = self.grid.get_cell(0)[self.grid.size[1]-1][0]
# self.propagate(cell)
cell = self.grid.get_cell(0)[0][self.grid.size[2]-1]
# self.propagate(cell)
cell = self.grid.get_cell(
0)[self.grid.size[1]-1][self.grid.size[2]-1]
# self.propagate(cell)
cell = self.grid.get_cell(0)[0][0]
self.propagate(cell)
# Border Insert end
while not done:
done = self.step()
print("WFC run took %s seconds" % (time.time() - start_time))
def step(self):
step_time = time.time()
self.grid.print_allowed_pattern_count()
cell = self.observe()
if cell is None:
return True
self.propagate(cell)
print("Step took %s seconds" % (time.time() - step_time))
return False
def get_image(self):
return self.grid.get_image()
def get_patterns(self):
return [pattern.to_image() for pattern in self.patterns]
def observe(self):
if self.grid.check_contradiction():
return None
cell = self.grid.find_lowest_entropy()
if cell is None:
return None
cell.choose_rnd_pattern()
return cell
def propagate(self, cell):
self.propagator.propagate(cell)
def _create_grid(self, grid_size):
num_pattern = len(self.patterns)
return Grid(grid_size, num_pattern)
class Grid:
# Grid is made of Cells
def __init__(self, size, num_pattern):
self.size = size
self.grid = np.empty(self.size, dtype=object)
# Filling grid with cells
for position in np.ndindex(self.size):
self.grid[position] = Cell(num_pattern, position, self)
# self.grid = np.array([[Cell(num_pattern, (x, y), self) for x in range(self.size)] for y in range(self.size)])
# self.grid = np.array([Cell(num_pattern, (x,), self) for x in range(self.size)])
def find_lowest_entropy(self):
min_entropy = 999999
lowest_entropy_cells = []
for cell in self.grid.flat:
if cell.is_stable():
continue
entropy = cell.entropy()
if entropy == min_entropy:
lowest_entropy_cells.append(cell)
elif entropy < min_entropy:
min_entropy = entropy
lowest_entropy_cells = [cell]
if len(lowest_entropy_cells) == 0:
return None
cell = lowest_entropy_cells[np.random.randint(
len(lowest_entropy_cells))]
return cell
def get_cell(self, index):
# Returns the cell contained in the grid at the provided index
# :param index: (...z, y, x)
# :return: cell
return self.grid[index]
def get_image(self):
# Returns the grid converted from index to back to color
# :return:
image = np.vectorize(lambda c: c.get_value())(self.grid)
image = Pattern.index_to_img(image)
return image
def check_contradiction(self):
for cell in self.grid.flat:
if len(cell.allowed_patterns) == 0:
return True
return False
def print_allowed_pattern_count(self):
grid_allowed_patterns = np.vectorize(
lambda c: len(c.allowed_patterns))(self.grid)
print(grid_allowed_patterns)
class Propagator:
# Propagator that computes and stores the legal patterns relative to another
def __init__(self, patterns):
self.patterns = patterns
self.offsets = [(z, y, x) for x in range(-1, 2)
for y in range(-1, 2) for z in range(-1, 2)]
start_time = time.time()
self.precompute_legal_patterns()
print("Patterns constraints generation took %s seconds" %
(time.time() - start_time))
def precompute_legal_patterns(self):
# pool = Pool(os.cpu_count())
# pool = Pool(1)
patterns_offsets = []
# patterns_var = []
# offsets_var = []
for pattern in self.patterns:
# patterns_var.append(pattern[0][0])
for offset in self.offsets:
patterns_offsets.append((pattern, offset))
# offsets_var.append(pattern[0][1])
# patterns_compatibility = pool.starmap(
# self.legal_patterns, patterns_offsets)
# pool.close()
# pool.join()
patterns_compatibility = []
for i, pattern in enumerate(patterns_offsets):
patterns_compatibility.append(self.legal_patterns(
patterns_offsets[i][0], patterns_offsets[i][1]))
# patterns_compatibility = self.legal_patterns(patterns_var, offsets_var)
for pattern_index, offset, legal_patterns in patterns_compatibility:
self.patterns[pattern_index].set_legal_patterns(
offset, legal_patterns)
def legal_patterns(self, pattern, offset):
legal_patt = []
for candidate_pattern in self.patterns:
if pattern.is_compatible(candidate_pattern, offset):
legal_patt.append(candidate_pattern.index)
pattern.set_legal_patterns(offset, legal_patt)
return pattern.index, offset, legal_patt
@staticmethod
def propagate(cell):
to_update = [neighbour for neighbour, _ in cell.get_neighbors()]
while len(to_update) > 0:
cell = to_update.pop(0)
for neighbour, offset in cell.get_neighbors():
for pattern_index in cell.allowed_patterns:
pattern = Pattern.from_index(pattern_index)
pattern_still_compatible = False
for neighbour_pattern_index in neighbour.allowed_patterns:
neighbour_pattern = Pattern.from_index(
neighbour_pattern_index)
if pattern.is_compatible(neighbour_pattern, offset):
pattern_still_compatible = True
break
if not pattern_still_compatible:
cell.allowed_patterns.remove(pattern_index)
for neigh, _ in cell.get_neighbors():
if neigh not in to_update:
to_update.append(neigh)
class Pattern:
# Pattern is a configuration of tiles from the input image.
index_to_pattern = {}
color_to_index = {}
index_to_color = {}
def __init__(self, data, index):
self.index = index
self.data = np.array(data)
self.legal_patterns_index = {} # offset -> [pattern_index]
def get(self, index=None):
if index is None:
return self.data.item(0)
return self.data[index]
def set_legal_patterns(self, offset, legal_patterns):
self.legal_patterns_index[offset] = legal_patterns
@property
def shape(self):
return self.data.shape
def is_compatible(self, candidate_pattern, offset):
# Check if pattern is compatible with a candidate pattern for a given offset
# :param candidate_pattern:
# :param offset:
# :return: True if compatible
assert (self.shape == candidate_pattern.shape)
# Precomputed compatibility
if offset in self.legal_patterns_index:
return candidate_pattern.index in self.legal_patterns_index[offset]
# Computing compatibility
ok_constraint = True
start = tuple([max(offset[i], 0) for i, _ in enumerate(offset)])
end = tuple([min(self.shape[i] + offset[i], self.shape[i])
for i, _ in enumerate(offset)])
for index in np.ndindex(end): # index = (x, y, z...)
start_constraint = True
for i, d in enumerate(index):
if d < start[i]:
start_constraint = False
break
if not start_constraint:
continue
if candidate_pattern.get(tuple(np.array(index) - np.array(offset))) != self.get(index):
ok_constraint = False
break
return ok_constraint
def to_image(self):
return Pattern.index_to_img(self.data)
@staticmethod
def from_sample(sample, pattern_size):
# Compute patterns from sample
# :param pattern_size:
# :param sample:
# :return: list of patterns
sample = Pattern.sample_img_to_indexes(sample)
shape = sample.shape
patterns = []
pattern_index = 0
for index, _ in np.ndenumerate(sample):
# Checking if index is out of bounds
out = False
for i, d in enumerate(index): # d is a dimension, e.g.: x, y, z
if d > shape[i] - pattern_size[i]:
out = True
break
if out:
continue
pattern_location = [range(d, pattern_size[i] + d)
for i, d in enumerate(index)]
pattern_data = sample[np.ix_(*pattern_location)]
rotdata = bpy.context.scene.wfc_vars.wfc_rot
flipvdata = bpy.context.scene.wfc_vars.wfc_flipv
fliphdata = bpy.context.scene.wfc_vars.wfc_fliph
# datas = [pattern_data, np.fliplr(pattern_data)]
datas = [pattern_data]
if fliphdata == True:
datas.append(np.fliplr(pattern_data))
if flipvdata == True:
datas.append(np.flipud(pattern_data))
if shape[1] > 1 and rotdata == True: # is 2D
# rotated tiles
datas.append(np.rot90(pattern_data, axes=(1, 2)))
datas.append(np.rot90(pattern_data, 2, axes=(1, 2)))
datas.append(np.rot90(pattern_data, 3, axes=(1, 2)))
if shape[0] > 1 and rotdata == True: # is 3D
# rotated tiles
datas.append(np.rot90(pattern_data, axes=(0, 2)))
datas.append(np.rot90(pattern_data, 2, axes=(0, 2)))
datas.append(np.rot90(pattern_data, 3, axes=(0, 2)))
# Checking existence
# TODO: more probability to multiple occurrences when observe phase
for data in datas:
exist = False
for p in patterns:
if (p.data == data).all():
exist = True
break
if exist:
continue
pattern = Pattern(data, pattern_index)
patterns.append(pattern)
Pattern.index_to_pattern[pattern_index] = pattern
pattern_index += 1
# Pattern.plot_patterns(patterns)
return patterns
@staticmethod
def sample_img_to_indexes(sample):
# Convert a rgb image to a 2D array with pixel index
# :param sample:
# :return: pixel index sample
Pattern.color_to_index = {}
Pattern.index_to_color = {}
sample_index = np.zeros(sample.shape[:-1]) # without last rgb dim
color_number = 0
for index in np.ndindex(sample.shape[:-1]):
color = tuple(sample[index])
if color not in Pattern.color_to_index:
Pattern.color_to_index[color] = color_number
Pattern.index_to_color[color_number] = color
color_number += 1
sample_index[index] = Pattern.color_to_index[color]
print('Unique color count = ', color_number)
return sample_index
@staticmethod
def index_to_img(sample):
color = next(iter(Pattern.index_to_color.values()))
image = np.zeros(sample.shape + (len(color),))
for index in np.ndindex(sample.shape):
pattern_index = sample[index]
if pattern_index == -1:
image[index] = [0.5 for _ in range(len(color))] # Grey
else:
image[index] = Pattern.index_to_color[pattern_index]
return image
@staticmethod
def from_index(pattern_index):
return Pattern.index_to_pattern[pattern_index]
class Cell:
# Cell is a pixel or tile (in 2d) that stores the possible patterns
def __init__(self, num_pattern, position, grid):
self.num_pattern = num_pattern
self.position = position
self.allowed_patterns = [i for i in range(self.num_pattern)]
self.grid = grid
border = bpy.context.scene.wfc_vars.wfc_border
# self.propagator.propagate(cell)
if border == True:
# Test to init with first observed tdile one borders
rule_index = bpy.context.scene.wfc_vars.wfc_borderrule
if self.position[2] == 0:
self.allowed_patterns = [rule_index]
if self.position[1] == 0:
self.allowed_patterns = [rule_index]
if self.position[2] == self.grid.size[2]-1:
self.allowed_patterns = [rule_index]
if self.position[1] == self.grid.size[1]-1:
self.allowed_patterns = [rule_index]
# print(position, self.allowed_patterns)
self.offsets = [(z, y, x) for x in range(-1, 2)
for y in range(-1, 2) for z in range(-1, 2)]
def entropy(self):
return len(self.allowed_patterns)
def choose_rnd_pattern(self):
chosen_index = np.random.randint(len(self.allowed_patterns))
self.allowed_patterns = [self.allowed_patterns[chosen_index]]
def is_stable(self):
return len(self.allowed_patterns) == 1
def get_value(self):
if self.is_stable():
pattern = Pattern.from_index(self.allowed_patterns[0])
return pattern.get()
return -1
def get_neighbors(self):
neighbors = []
for offset in self.offsets:
neighbor_pos = tuple(np.array(self.position) + np.array(offset))
out = False
for i, d in enumerate(neighbor_pos):
if not 0 <= d < self.grid.size[i]:
out = True
if out:
continue
neighbors.append((self.grid.get_cell(neighbor_pos), offset))
return neighbors
def load_sample(path):
sample = path
# Expand dimensions from 2D to 3D (For use in 2D)
sample = np.expand_dims(sample, | |
<reponame>rlr/kitsune<gh_stars>1-10
import json
import logging
import random
import time
from datetime import date, datetime, timedelta
from django.conf import settings
from django.contrib import auth
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import PermissionDenied
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.http import (HttpResponseRedirect, HttpResponse, Http404,
HttpResponseBadRequest, HttpResponseForbidden)
from django.shortcuts import get_object_or_404, render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import (require_POST, require_GET,
require_http_methods)
import jingo
from ordereddict import OrderedDict
from mobility.decorators import mobile_template
from session_csrf import anonymous_csrf
from statsd import statsd
from taggit.models import Tag
from tidings.events import ActivationRequestFailed
from tidings.models import Watch
from tower import ugettext as _, ugettext_lazy as _lazy
from kitsune.access.decorators import permission_required, login_required
from kitsune.community.utils import top_contributors_questions
from kitsune.products.models import Product, Topic
from kitsune.questions import config
from kitsune.questions.events import QuestionReplyEvent, QuestionSolvedEvent
from kitsune.questions.feeds import (
QuestionsFeed, AnswersFeed, TaggedQuestionsFeed)
from kitsune.questions.forms import (
NewQuestionForm, EditQuestionForm, AnswerForm, WatchQuestionForm,
FREQUENCY_CHOICES, MarketplaceAaqForm, MarketplaceRefundForm,
MarketplaceDeveloperRequestForm, StatsForm)
from kitsune.questions.marketplace import (
MARKETPLACE_CATEGORIES, ZendeskError)
from kitsune.questions.models import (
Question, Answer, QuestionVote, AnswerVote, QuestionMappingType,
QuestionLocale)
from kitsune.questions.signals import tag_added
from kitsune.search.es_utils import (ES_EXCEPTIONS, Sphilastic, F,
es_query_with_analyzer)
from kitsune.search.utils import locale_or_default, clean_excerpt
from kitsune.sumo.decorators import ssl_required, ratelimit
from kitsune.sumo.helpers import urlparams
from kitsune.sumo.urlresolvers import reverse, split_path
from kitsune.sumo.utils import paginate, simple_paginate, build_paged_url, is_ratelimited
from kitsune.tags.utils import add_existing_tag
from kitsune.upload.models import ImageAttachment
from kitsune.upload.views import upload_imageattachment
from kitsune.users.forms import RegisterForm
from kitsune.users.helpers import display_name
from kitsune.users.models import Setting
from kitsune.users.utils import handle_login, handle_register
from kitsune.wiki.facets import documents_for, topics_for
from kitsune.wiki.models import Document, DocumentMappingType
log = logging.getLogger('k.questions')
UNAPPROVED_TAG = _lazy(u'That tag does not exist.')
NO_TAG = _lazy(u'Please provide a tag.')
FILTER_GROUPS = {
'all': OrderedDict([
('recently-unanswered', _lazy('Recently unanswered')),
]),
'needs-attention': OrderedDict([
('new', _lazy('New')),
('unhelpful-answers', _lazy('Answers didn\'t help')),
]),
'responded': OrderedDict([
('needsinfo', _lazy('Needs info')),
('solution-provided', _lazy('Solution provided')),
]),
'done': OrderedDict([
('solved', _lazy('Solved')),
('locked', _lazy('Locked')),
]),
}
ORDER_BY = OrderedDict([
('updated', ('updated', _lazy('Updated'))),
('views', ('questionvisits__visits', _lazy('Views'))),
('votes', ('num_votes_past_week', _lazy('Votes'))),
('replies', ('num_answers', _lazy('Replies'))),
])
@mobile_template('questions/{mobile/}product_list.html')
def product_list(request, template):
"""View to select a product to see related questions."""
return render(request, template, {
'products': Product.objects.filter(
questions_locales__locale__in=[request.LANGUAGE_CODE])
})
@mobile_template('questions/{mobile/}question_list.html')
def question_list(request, template, product_slug):
"""View the list of questions."""
filter_ = request.GET.get('filter')
owner = request.GET.get(
'owner', request.session.get('questions_owner', 'all'))
show = request.GET.get('show')
# Show defaults to NEEDS ATTENTION
if show not in FILTER_GROUPS:
show = 'needs-attention'
escalated = request.GET.get('escalated')
tagged = request.GET.get('tagged')
tags = None
topic_slug = request.GET.get('topic')
order = request.GET.get('order', 'updated')
if order not in ORDER_BY:
order == 'updated'
sort = request.GET.get('sort', 'desc')
product_slugs = product_slug.split(',')
products = []
if len(product_slugs) > 1 or product_slugs[0] != 'all':
for slug in product_slugs:
products.append(get_object_or_404(Product, slug=slug))
multiple = len(products) > 1
else:
# We want all products (no product filtering at all).
products = None
multiple = True
if topic_slug and not multiple:
# We don't support topics when there is more than one product.
# There is no way to know what product the topic applies to.
try:
topic = Topic.objects.get(slug=topic_slug, product=products[0])
except Topic.DoesNotExist:
topic = None
else:
topic = None
question_qs = Question.objects
if filter_ not in FILTER_GROUPS[show]:
filter_ = None
if escalated:
filter_ = None
if filter_ == 'new':
question_qs = question_qs.new()
elif filter_ == 'unhelpful-answers':
question_qs = question_qs.unhelpful_answers()
elif filter_ == 'needsinfo':
question_qs = question_qs.needs_info()
elif filter_ == 'solution-provided':
question_qs = question_qs.solution_provided()
elif filter_ == 'solved':
question_qs = question_qs.solved()
elif filter_ == 'locked':
question_qs = question_qs.locked()
elif filter_ == 'recently-unanswered':
question_qs = question_qs.recently_unanswered()
else:
if show == 'needs-attention':
question_qs = question_qs.needs_attention()
if show == 'responded':
question_qs = question_qs.responded()
if show == 'done':
question_qs = question_qs.done()
if escalated:
question_qs = question_qs.filter(
tags__name__in=[config.ESCALATE_TAG_NAME])
question_qs = question_qs.select_related(
'creator', 'last_answer', 'last_answer__creator')
question_qs = question_qs.prefetch_related('topic', 'topic__product')
question_qs = question_qs.filter(creator__is_active=1)
if not request.user.has_perm('flagit.can_moderate'):
question_qs = question_qs.filter(is_spam=False)
if owner == 'mine' and request.user.is_authenticated():
criteria = Q(answers__creator=request.user) | Q(creator=request.user)
question_qs = question_qs.filter(criteria).distinct()
else:
owner = None
feed_urls = ((urlparams(reverse('questions.feed'),
product=product_slug, topic=topic_slug),
QuestionsFeed().title()),)
if tagged:
tag_slugs = tagged.split(',')
tags = Tag.objects.filter(slug__in=tag_slugs)
if tags:
for t in tags:
question_qs = question_qs.filter(tags__name__in=[t.name])
if len(tags) == 1:
feed_urls += ((reverse('questions.tagged_feed',
args=[tags[0].slug]),
TaggedQuestionsFeed().title(tags[0])),)
else:
question_qs = Question.objects.none()
# Exclude questions over 90 days old without an answer.
oldest_date = date.today() - timedelta(days=90)
question_qs = question_qs.exclude(created__lt=oldest_date, num_answers=0)
# Filter by products.
if products:
# This filter will match if any of the products on a question have the
# correct id.
question_qs = question_qs.filter(product__in=products).distinct()
# Filter by topic.
if topic:
# This filter will match if any of the topics on a question have the
# correct id.
question_qs = question_qs.filter(topic__id=topic.id)
# Filter by locale for AAQ locales, and by locale + default for others.
if request.LANGUAGE_CODE in QuestionLocale.objects.locales_list():
forum_locale = request.LANGUAGE_CODE
locale_query = Q(locale=request.LANGUAGE_CODE)
else:
forum_locale = settings.WIKI_DEFAULT_LANGUAGE
locale_query = Q(locale=request.LANGUAGE_CODE)
locale_query |= Q(locale=settings.WIKI_DEFAULT_LANGUAGE)
question_qs = question_qs.filter(locale_query)
# Set the order.
order_by = ORDER_BY[order][0]
question_qs = question_qs.order_by(
order_by if sort == 'asc' else '-%s' % order_by)
try:
with statsd.timer('questions.view.paginate.%s' % filter_):
questions_page = simple_paginate(
request, question_qs, per_page=config.QUESTIONS_PER_PAGE)
except (PageNotAnInteger, EmptyPage):
# If we aren't on page 1, redirect there.
# TODO: Is 404 more appropriate?
if request.GET.get('page', '1') != '1':
url = build_paged_url(request)
return HttpResponseRedirect(urlparams(url, page=1))
# Recent answered stats
extra_filters = locale_query
if products:
extra_filters &= Q(product__in=products)
recent_asked_count = Question.recent_asked_count(extra_filters)
recent_unanswered_count = Question.recent_unanswered_count(extra_filters)
if recent_asked_count:
recent_answered_percent = int(
(float(recent_asked_count - recent_unanswered_count) /
recent_asked_count) * 100)
else:
recent_answered_percent = 0
# List of products to fill the selector.
product_list = Product.objects.filter(visible=True)
# List of topics to fill the selector. Only shows if there is exactly
# one product selected.
if products and not multiple:
topic_list = Topic.objects.filter(
visible=True, product=products[0])[:10]
else:
topic_list = []
# Store current filters in the session
if request.user.is_authenticated():
request.session['questions_owner'] = owner
# Get the top contributors for the locale and product.
# If we are in a product forum, limit the top contributors to that product.
if products and len(products) == 1:
product = products[0]
else:
product = None
try:
top_contributors, _ = top_contributors_questions(
locale=forum_locale, product=product)
except ES_EXCEPTIONS:
top_contributors = []
statsd.incr('questions.topcontributors.eserror')
log.exception('Support Forum Top contributors query failed.')
data = {'questions': questions_page,
'feeds': feed_urls,
'filter': filter_,
'owner': owner,
'show': show,
'filters': FILTER_GROUPS[show],
'order': order,
'orders': ORDER_BY,
'sort': sort,
'escalated': escalated,
'tags': tags,
'tagged': tagged,
'recent_asked_count': recent_asked_count,
'recent_unanswered_count': recent_unanswered_count,
'recent_answered_percent': recent_answered_percent,
'product_list': product_list,
'products': products,
'product_slug': product_slug,
'multiple_products': multiple,
'all_products': product_slug == 'all',
'top_contributors': top_contributors,
'topic_list': topic_list,
'topic': topic}
with statsd.timer('questions.view.render'):
return render(request, template, data)
def parse_troubleshooting(troubleshooting_json):
"""Normalizes the troubleshooting data from `question`.
Returns a normalized version, or `None` if something was wrong.
This does not try very hard to fix bad data.
"""
if not troubleshooting_json:
return None
try:
parsed = json.loads(troubleshooting_json)
except ValueError:
return None
# This is a spec of what is expected to be in the parsed
# troubleshooting data. The format here is a list of tuples. The
# first item in the tuple is a list of keys to access to get to the
# item in question. The second item in the tuple is the type the
# referenced item should be. For example, this line
#
# (('application', 'name'), basestring),
#
# means that parse['application']['name'] must be a basestring.
#
# An empty path means the parsed json.
spec = (
((), dict),
(('accessibility', ), dict),
(('accessibility', 'isActive'), bool),
(('application', ), dict),
(('application', 'name'), basestring),
(('application', 'supportURL'), basestring),
(('application', 'userAgent'), basestring),
(('application', 'version'), basestring),
(('extensions', ), list),
(('graphics', ), dict),
(('javaScript', ), dict),
(('modifiedPreferences', ), dict),
(('userJS', ), dict),
(('userJS', 'exists'), bool),
)
for path, type_ in spec:
item = parsed
for piece in path:
item = item.get(piece)
if item is None:
return None
if not isinstance(item, type_):
return None
# The data has been inspected, and should be in the right format.
# Now remove all the printing preferences, because they are noisy.
# TODO: If the UI for this gets better, we can include these prefs
# and just make them collapsible.
parsed['modifiedPreferences'] = dict(
(key, val) for (key, val) in parsed['modifiedPreferences'].items()
if not key.startswith('print'))
return parsed
@mobile_template('questions/{mobile/}question_details.html')
@anonymous_csrf # Need this so the anon csrf gets set for watch forms.
def question_details(request, template, question_id, form=None,
watch_form=None, answer_preview=None, **extra_kwargs):
"""View the answers to a question."""
ans_ = _answers_data(request, question_id, form, watch_form,
answer_preview)
question = ans_['question']
if question.is_spam and not request.user.has_perm('flagit.can_moderate'):
raise Http404('No question matches the given query.')
# Try to parse troubleshooting data as JSON.
troubleshooting_json = | |
additionally be used (see below). For the energy density, a block by block check against relevant summed energy terms in scalar.dat or dmc.dat files is additionally made.'
)
parser.add_option('-f','--fixed','--fixed_sum',dest='fixed_sum',
action='store_true',default=False,
help = 'Full sum of data takes on a fixed, non-stochastic value. In this case, when checking against reference data, check that each block satisfies the fixed sum condition. This is appropriate, e.g. for the electron density where the full sum of each block must equal the number of electrons. Typically the appropriate value is inferred automatically and applied by default (in other cases default=%default).'
)
parser.add_option('-m','--make_ref','--make_reference',dest='make_reference',
default='none',
help='Used during test construction phase. Pass an integer list via -m corresponding to the number of partial sums to perform on the reference stat data followed by a series of MC step factors. The number of partial means must divide evenly into the number of stat field values for the quantity in question. The step factors relate the length of the test run (shorter) to the reference run (longer): #MC_test*factor=#MC_reference. Files containing the reference data will be produced, one for each step factor. For the partial sums, the reference sigma is increased so that the test fails with the expected probability specified by the inputted nsigma.'
)
parser.add_option('-t','--plot_trace',dest='plot_trace',
action='store_true',default=False,
help='Plot traces of full and partial sums (default=%default).'
)
parser.add_option('-v','--verbose',
action='store_true',default=False,
help='Print detailed information (default=%default).'
)
allowed_quantities = [
'density',
'spindensity',
'energydensity',
'1rdm',
'1redm',
'momentum',
]
opt,files_in = parser.parse_args()
options = obj()
options.transfer_from(opt.__dict__)
if options.help:
print('\n'+parser.format_help().strip())
print('\n\nExample usage:')
print('\n Making reference data to create a test:')
print(" check_stats.py -p qmc -s 0 -q spindensity -e 10 -c 8 -v -m '0 10 100'")
print('\n Using reference data to perform a test:')
print(' check_stats.py -p qmc -s 0 -q spindensity -e 10 -c 8 -n 3 -r qmc.s000.stat_ref_spindensity_10.dat')
print()
exit()
#end if
if len(files_in)>0:
exit_fail('check_stats does not accept file as input, only command line arguments\nfiles provided: {0}'.format(files_in))
#end if
checkstats_settings.verbose = options.verbose
vlog('\nreading command line inputs')
options.series = int(options.series)
options.equilibration = int(options.equilibration)
options.nsigma = float(options.nsigma)
options.path,options.prefix = os.path.split(options.prefix)
if options.plot_trace and not can_plot:
vlog('trace plots requested, but plotting libraries are not available\ndisabling plots',n=1)
options.plot_trace = False
#end if
if options.path=='':
options.path = './'
#end if
options.qlabel = None
if ' ' in options.quantity or ',' in options.quantity:
qlist = options.quantity.strip('"').strip("'").replace(',',' ').split()
if len(qlist)!=2:
exit_fail('quantity can accept only one or two values\nyou provided {0}: {1}'.format(len(qlist),qlist))
#end if
options.quantity,options.qlabel = qlist
#end if
if options.qlabel is None:
default_label = obj({
'density' : 'Density' ,
'spindensity' : 'SpinDensity' ,
'energydensity' : 'EnergyDensity' ,
'1rdm' : 'DensityMatrices',
'1redm' : 'DensityMatrices',
'momentum' : 'nofk' ,
})
options.qlabel = default_label[options.quantity]
#end if
if options.quantity=='none':
exit_fail('must provide quantity')
elif options.quantity not in allowed_quantities:
exit_fail('unrecognized quantity provided\nallowed quantities: {0}\nquantity provided: {1}'.format(allowed_quantities,options.quantity))
#end if
if options.npartial_sums=='none':
exit_fail('-c option is required')
#end if
options.npartial_sums = int(options.npartial_sums)
if options.reference_file!='none':
if not os.path.exists(options.reference_file):
exit_fail('reference file does not exist\nreference file provided: {0}'.format(options.reference_file))
#end if
options.make_reference = False
elif options.make_reference!='none':
try:
mr = array(options.make_reference.split(),dtype=int)
except:
exit_fail('make_reference must be a list of integers\nyou provided: {0}'.format(options.make_reference))
#end try
if len(mr)<1:
exit_fail('make_reference must contain at least one MC length factor')
#end if
options.mc_factors = mr
options.make_reference = True
else:
exit_fail('must provide either reference_file or make_reference')
#end if
fixed_sum_quants = set(['density','spindensity','energydensity'])
if options.quantity in fixed_sum_quants:
options.fixed_sum = True
#end if
vlog('inputted options:\n'+str(options),n=1)
except Exception as e:
exit_fail('error during command line read:\n'+str(e))
#end try
return options
#end def read_command_line
def process_stat_file(options):
vlog('processing stat.h5 file')
values = obj()
try:
# find all output files matching prefix
vlog('searching for qmcpack output files',n=1)
vlog('search path:\n '+options.path,n=2)
prefix = options.prefix+'.s'+str(options.series).zfill(3)
files = os.listdir(options.path)
output_files = obj()
for file in files:
if file.startswith(prefix):
if file.endswith('.stat.h5'):
output_files.stat = file
elif file.endswith('.scalar.dat'):
output_files.scalar = file
elif file.endswith('.dmc.dat'):
output_files.dmc = file
#end if
#end if
#end for
options.output_files = output_files
vlog('files found:\n'+str(output_files).rstrip(),n=2)
if 'stat' not in output_files:
exit_fail('stat.h5 file matching prefix {0} was not found\nsearch path: {1}'.format(prefix,options.path))
#end if
# read data from the stat file
vlog('opening stat.h5 file',n=1)
stat = read_hdf(os.path.join(options.path,output_files.stat),view=True)
vlog('file contents:\n'+repr(stat).rstrip(),n=2)
vlog('extracting {0} data'.format(options.quantity),n=1)
vlog('searching for {0} with label {1}'.format(options.quantity,options.qlabel),n=2)
if options.qlabel in stat:
qstat = stat[options.qlabel]
vlog('{0} data contents:\n{1}'.format(options.quantity,repr(qstat).rstrip()),n=2)
else:
exit_fail('could not find {0} data with label {1}'.format(options.quantity,options.qlabel))
#end if
quantity_paths = obj({
'density' : obj(tot='value'),
'spindensity' : obj(u='u/value',
d='d/value'),
'1rdm' : obj(u='number_matrix/u/value',
d='number_matrix/d/value'),
'1redm' : obj(u='energy_matrix/u/value',
d='energy_matrix/d/value'),
'energydensity' : obj(W=('spacegrid1/value',0,3),
T=('spacegrid1/value',1,3),
V=('spacegrid1/value',2,3)),
'momentum' : obj(tot='value'),
})
qpaths = quantity_paths[options.quantity]
vlog('search paths:\n{0}'.format(str(qpaths).rstrip()),n=2)
qdata = obj()
dfull = None
for dname,dpath in qpaths.items():
packed = isinstance(dpath,tuple)
if packed:
dpath,dindex,dcount = dpath
#end if
if not qstat.path_exists(dpath):
exit_fail('{0} data not found in file {1}\npath searched: {2}'.format(options.quantity,output_files.stat,dpath))
#end if
if not packed:
d = array(qstat.get_path(dpath),dtype=float)
else:
if dfull is None:
dfull = array(qstat.get_path(dpath),dtype=float)
dfull.shape = dfull.shape[0],dfull.shape[1]//dcount,dcount
#end if
d = dfull[:,:,dindex]
d.shape = dfull.shape[0],dfull.shape[1]
#end if
qdata[dname] = d
vlog('{0} data found with shape {1}'.format(dname,d.shape),n=2)
if len(d.shape)>2:
d.shape = d.shape[0],d.size//d.shape[0]
vlog('reshaped {0} data to {1}'.format(dname,d.shape),n=2)
#end if
options.nblocks = d.shape[0]
#end for
# process the data, taking full and partial sums
vlog('processing {0} data'.format(options.quantity),n=1)
for dname,d in qdata.items():
vlog('processing {0} data'.format(dname),n=2)
if d.shape[1]%options.npartial_sums!=0:
exit_fail('cannot make partial sums\nnumber of requested partial sums does not divide evenly into the number of values available\nrequested partial sums: {0}\nnumber of values present: {1}\nnvalue/npartial_sums: {2}'.format(options.npartial_sums,d.shape[1],float(d.shape[1])/options.npartial_sums))
#end if
data = obj()
data.full_sum = d.sum(1)
vlog('full sum data shape: {0}'.format(data.full_sum.shape),n=3)
data.partial_sums = zeros((d.shape[0],options.npartial_sums))
psize = d.shape[1]//options.npartial_sums
for p in range(options.npartial_sums):
data.partial_sums[:,p] = d[:,p*psize:(p+1)*psize].sum(1)
#end for
vlog('partial sum data shape: {0}'.format(data.partial_sums.shape),n=3)
fmean,var,ferror,kappa = simstats(data.full_sum,exclude=options.equilibration)
vlog('full sum mean : {0}'.format(fmean),n=3)
vlog('full sum error: {0}'.format(ferror),n=3)
pmean,var,perror,kappa = simstats(data.partial_sums,dim=0,exclude=options.equilibration)
vlog('partial sum mean : {0}'.format(pmean),n=3)
vlog('partial sum error: {0}'.format(perror),n=3)
values[dname] = obj(
full_mean = fmean,
full_error = ferror,
partial_mean = pmean,
partial_error = perror,
data = data,
)
#end for
# check that all values have been processed
missing = set(qpaths.keys())-set(values.keys())
if len(missing)>0:
exit_fail('some values not processed\nvalues missing: {0}'.format(sorted(missing)))
#end if
# plot quantity traces, if requested
if options.plot_trace:
vlog('creating trace plots of full and partial sums',n=1)
for dname,dvalues in values.items():
label = options.quantity
if len(values)>1:
label+=' '+dname
#end if
data = dvalues.data
figure()
plot(data.full_sum)
title('Trace of {0} full sum'.format(label))
xlabel('Block index')
figure()
plot(data.partial_sums)
title('Trace of {0} partial sums'.format(label))
xlabel('Block index')
#end for
show()
#end if
except Exception as e:
exit_fail('error during stat file processing:\n'+str(e))
#end try
return values
#end def process_stat_file
def make_reference_files(options,values):
vlog('\nmaking reference files')
# create a reference file for each Monte Carlo sample factor
for mcfac in options.mc_factors:
errfac = sqrt(1.0+mcfac)
filename = '{0}.s{1}.stat_ref_{2}_{3}.dat'.format(options.prefix,str(options.series).zfill(3),options.quantity,mcfac)
filepath = os.path.join(options.path,filename)
vlog('writing reference file for {0}x shorter test runs'.format(mcfac),n=1)
vlog('reference file location: '+filepath,n=2)
f = open(filepath,'w')
# write descriptive header line
line = '# '
for dname in sorted(values.keys()):
line += ' {0:<16} {1:<16}'.format(dname,dname+'_err')
#end for
f.write(line+'\n')
# write means and errors of full sum
line = ''
for dname in sorted(values.keys()):
dvalues = values[dname]
fmean = dvalues.full_mean
ferror = dvalues.full_error
line += ' {0: 16.8e} {1: 16.8e}'.format(fmean,errfac*ferror)
#end for
f.write(line+'\n')
# write means and errors of partial sums
for p in range(options.npartial_sums):
line = ''
for dname in sorted(values.keys()):
dvalues = values[dname]
pmean = dvalues.partial_mean
perror = dvalues.partial_error
line += ' {0: 16.8e} {1: 16.8e}'.format(pmean[p],errfac*perror[p])
#end for
f.write(line+'\n')
#end for
f.close()
#end for
# create a trace file containing full and partial sum data per block
filename = '{0}.s{1}.stat_trace_{2}.dat'.format(options.prefix,str(options.series).zfill(3),options.quantity)
filepath = os.path.join(options.path,filename)
vlog('writing trace file containing full and partial sums per block',n=1)
vlog('trace file location: '+filepath,n=2)
f = open(filepath,'w')
# write descriptive header line
line = '# index '
for dname in sorted(values.keys()):
line += ' {0:<16}'.format(dname+'_full')
for p in range(options.npartial_sums):
line += ' {0:<16}'.format(dname+'_partial_'+str(p))
#end for
#end for
f.write(line+'\n')
# write full and partial sum data per block
for b in range(options.nblocks):
line = ' {0:>6}'.format(b)
for dname in sorted(values.keys()):
dvalues = values[dname].data
fsum = dvalues.full_sum
psums = dvalues.partial_sums[b]
line += ' {0: 16.8e}'.format(fsum[b])
for psum in psums:
line += ' {0: 16.8e}'.format(psum)
#end for
#end for
f.write(line+'\n')
#end for
f.close()
vlog('\n')
#end def | |
here. Can reduce number of POs
print 'First isomorphism: ',res
if res == True:
did_iso_1 = True
abc('&get;&scl;&put')
write_file('iso1')
leq = eq_classes()
## print leq
map1 = create_map(leq,N) #creates map into original if there were isomorphisms
## print map1
if not count_less(L,0) == N:
print 'L = %s'%sumsize(L)
L1 = [-1]*n_pos()
## L1 = pass_down(L,list(L1),map1) # no need to pass down because L will have no values at this point.
else:
map1 =range(N)
else: #didn"t do iso
map1 = range(N)
N = n_pos()
## print 4
r = pre_simp(1) #pre_simp 1 here make it not use phase.
write_file('smp1')
## NP = n_pos()/N #if NP > 1 then NP unrollings were done in pre_simp.
if r[0] == Unsat: #all remaining POs are Unsat
L1 = [0]*N
L = unmap2(L1,map1)
L = weave(list(L),[],lst1) #put back 1 in L
L = weave(list(L),lst0,[]) #put back 0 in L
print sumsize(L)
print 'Time = %.2f'%(time.time() - x_init)
report_results(list(L)) #L in original POs in 0,1,-1 format
return L
f_name_save = f_name
nam_save = '%s_initial_save.aig'%f_name
#########do second iso here
N = n_pos()
if N == 1:
map2 = [0]
L2=[-1]
## write_file('1')
## L = output(list(L),list(L1),L2,map1,map2,lst0,lst1,NP)
g = [map1,map2,lst0,lst1,NP,[]]
L = output2(list(L2),g)
uns = indices(L,-1)
if not uns == pos:
print 'uns and pos are not equal'
print 'uns = ',uns
print 'opos = ',pos
result = simple(2001,1) # 1 here means do not do simplification first
Ss = rs = result[0]
if rs == 'SAT':
L2 = [1]
pos_sat = pos_sat + [opos[0]]
print opos[0]
pos_sat = remove_dup(list(pos_sat))
print 'pos_sat = %s'%str(pos_sat)
if rs == 'UNSAT':
L2 = [0]
pos_unsat = pos_unsat + [opos[0]]
pos_unsat = remove_dup(list(pos_unsat))
print 'pos_unsat = %s'%str(len(pos_unsat))
else: #more than 1 PO left
## if False and N < 10000: #temp disable iso
if N < 10000 and n_ands() < 500000:
res = iso() #second iso - changes number of POs
print 'Second isomorphism: ',res
did_iso_2 = res
if res == True:
abc('&get;&scl;&put')
map2 = create_map(eq_classes(),N) #creates map into previous
write_file('iso2')
else:
map2 = range(n_pos())
else:
map2 = range(n_pos())
print '*******************entering par_multi_sat**********************'
abc('w %s_L2.aig'%init_initial_f_name)
S,dump,L2 = par_multi_sat(2*ttt,1,1,1,opos) #look for SAT POs
# L2 set here and has same length as n_pos here
lmbc1 = indices(L2,1)
lmbc0 = indices(L2,0)
## lst = [i for i in range(len(L2)) if L2[i]>-1]
## tlst = [opos[lst[i]] for i in range(len(lst))]
## opos = remove_sub(list(opos),tlst)
print '******************par_multi_sat ended**************************'
if len(lmbc0)>0 or len(lmbc0)>0:
print 'found %d SAT POs'%len(lmbc1)
print 'found %d UNSAT POs'%len(lmbc0)
pos_sat = pos_sat + [opos[i] for i in lmbc1]
print [opos[i] for i in lmbc1]
pos_sat = remove_dup(list(pos_sat))
print 'pos_sat = %s'%str(pos_sat)
pos_unsat = pos_unsat + [opos[i] for i in lmbc0]
pos_unsat = remove_dup(list(pos_unsat))
print 'number of pos_unsat = %s'%str(len(pos_unsat))
## L2 = s #first mention of L2 except in single PO case (N=1)
## #first mprove for 10-20 sec.
ps()
print 'Before first mprove2, L2 = %s'%sumsize(L2)
g = [map1,map2,lst0,lst1,NP,[]] #map1 is due to first isomorphism and map2 due to the second,
# NP is due to phase abstraction, lst0 and lst1 are
L = output2(list(L2),g) #reporting intermediate results in 0,1,-1 format
remain = []
uns = indices(L,-1)
## if not uns == opos:
## print 'uns and opos are not equal'
## print 'uns = ',uns
## print 'opos = ',opos
## print 'opos - len(opos), opos = ',len(opos),opos
## DDL = output3(range(len(L2)),map1,map2,lst0,lst1,NP)
## print 'DDL = %s'%str(DDL)
if n_fast == 1:
return L
NN=n_ands()
ttt = 100
print 'Entering first mprove2 for %d sec.'%ttt
print 'opos: ',str(opos)
g = [map1,map2,lst0,lst1,NP,[]]
Ss,L2,opo = mprove2(list(L2),op,ttt,1,g,list(opos)) #populates L2, sp_iter is done here
print 'multi_prove: mprove2 is done'
print 'opo: ',str(opo)
print 'L2: ',str(L2)
abc('r %s_L2.aig'%init_initial_f_name)
#opos = report(list(opos),list(L2))?
## if 1 in ss or 0 in ss #something solved
## if -1 in ss: #not all solved
## rem0 = indices(ss,0)
## rem1 = indices(ss,1)
## rem = rem0+rem1
## rem.sort()
## if not len(rem) == n_pos():
## print 'Removing %d POs'%len(rem)
## remove(rem,1)
## tlst1 = [opos[rem1[i]] for i in range(len(rem1))]
## tlst0 = [opos[rem0[i]] for i in range(len(rem0))]
## pos_sat = pos_sat + tlst1
## pos_unsat = pos_unsat + tlst1
## opos = remove_sub(list(opos),(tlst1+tlst0))
## assert len(opos) == n_pos(),'len(opos),n_pos() = %d,%d'%(len(opos),n_pos())
## abc('w %s_unsolved.aig'%init_initial_f_name) #update unsolved
opos = report(list(opos),list(L2))
## remove_proved_pos(L2) #here n_pos will differ from len(L2)
## #proved POs - remove from opos list
## lst1 = [i for i in range(len(L2)) if L2[i]==1]
## lst0 = [i for i in range(len(L2)) if L2[i]==0]
## rem = lst1+lst0
## rem.sort
## tlst1 = [opos[lst[i]] for i in range(len(lst1))]
## pos_sat = pos_sat + tlst1
## tlst0 = [opos[lst[i]] for i in range(len(lst0))]
## pos_unsat = pos_unsat + tlst1
## opos = remove_sub(list(opos),(tlst1+tlst0))
#### assert opo == opos,'opo = %s, opos = %s '%(str(opo),str(opos))
## if not len(opos) == 0:
## assert len(opos) == n_pos(),'len(opos),n_pos() = %d,%d'%(len(opos),n_pos())
## print 'writing unsolved file with n_pos = %d'%n_pos()
## abc('w %s_unsolved.aig'%init_initial_f_name)
## if Ss == 'SAT':
## print 'At least one PO may be SAT'
if Ss == 'ALL_SOLVED':
if count_less(L2,0)>0:
print 'ERROR'
## L = output(list(L),list(L1),L2,map1,map2,lst0,lst1,NP) # final report of results.
g = [map1,map2,lst0,lst1,NP,[]]
L = output2(list(L2),g)
uns = indices(L,-1)
if not uns == opos:
print 'uns and opos are not equal'
print 'uns = ',uns
print 'opos = ',opos
return L
print 'After first mprove2: %s'%sumsize(L2)
time_left = tt - (time.time()-x_init)
N_unsolved = count_less(L2,0)
if N_unsolved > 0 and n_fast == 0:
g = [map1,map2,lst0,lst1,NP,[]]
L = output2(list(L2),g)
uns = indices(L,-1)
if not uns == opos:
print 'uns and opos are not equal'
print 'uns = ',uns
print 'opos = ',opos
t = max(100,time_left/N)
t_all = 100
S = sumsize(L2)
T = '%.2f'%(time.time() - x_init)
print '%s in time = %s'%(S,T)
N = n_pos()
ttime = last_gasp_time #RKB: temp
J = slps+intrps+pdrs+bmcs+sims
#do each output for ttime sec.
if N_unsolved > 0:
# entering end game. Doing
#1. sp
#2. par_multi_sat for a long time,
#3. scorr_easy
#4. simple on each PO cone if unproved < 20, else sp_iter.
#5. simple for long time
## assert N_unsolved == n_pos(),'len N_unsolved not = n_pos',str(N_unsolved,n_pos())
found_sat = False
## print 'final_all = %d, Ss = %s'%(final_all,str(Ss))
if (final_all and not found_sat) or N_unsolved == 1:
print 'Trying to prove all %d remaining POs at once with super_prove'%N_unsolved
#simplify here?
#unsolved.aig is always updated to contain only unsolved POs
result = super_prove() #RKB put in sp_iter here because it would also find PO that is SAT?
r0 = result[0]
if r0 == 'UNSAT' or (r0 == 'SAT' and N_unsolved == 1): #all remaining POs are UNSAT
for i in range(len(L2)):
if L2[i] < 0:
if r0 == 'UNSAT':
L2[i] = 0
if r0 == 'SAT':
L2[i] = 1
## L = output(list(L),list(L1),L2,map1,map2,lst0,lst1,NP) # final report of results.
g = [map1,map2,lst0,lst1,NP,[]]
## print ' entering final reporting 1'
L = output2(list(L2),g)
uns = indices(L,-1)
if not uns == opos:
print 'uns and opos are not equal'
print 'uns = ',uns
print 'opos = ',opos
return L
if r0 == 'SAT': #N_unsolved >1. Did super_prove but do not know which PO was hit
found_sat = 1
if found_sat or not final_all: #RKB do something here with pdraz
map3 = [i for i in xrange(len(L2)) if L2[i] == -1] #map from unsolved to L2
ttime = 100
#first try par_multi_sat hard (5*ttime)
print 'Trying par_multi_sat for %.2f sec.'%(5*ttime)
SS,LL,ss3 = par_multi_sat(5*ttime) #this causes gap = ttime
if 1 in ss3: #we found a sat PO - reset_found_sat
found_sat = 0
abc('r %s_unsolved.aig'%init_initial_f_name)
opos = report(list(opos),list(ss3)) # xxxxread ?
## if 1 in ss3 or 0 in ss3: #something solved
## if -1 in ss3: #not all solved
## rem0 = indices(ss3,0)
## rem1 = indices(ss3,1)
## rem = rem0+rem1
## rem.sort()
## if not len(rem) == n_pos():
## print 'Removing %d POs'%len(rem)
## remove(rem,1)
## tlst1 = [opos[rem1[i]] for i in range(len(rem1))]
## tlst0 = [opos[rem0[i]] for i in range(len(rem0))]
## pos_sat = pos_sat + tlst1
## pos_unsat = pos_unsat + tlst1
## opos = remove_sub(list(opos),(tlst1+tlst0))
## assert len(opos) == n_pos(),'len(opos),n_pos() = %d,%d'%(len(opos),n_pos())
## abc('w %s_unsolved.aig'%init_initial_f_name) #update unsolved
L2 = unmap(list(L2),ss3,map3) #inserts the | |
the method checks if the utterance contains the given vocab
thereby allowing the user to say things like "yes, please" and still
match against "Yes.voc" containing only "yes". An exact match can be
requested.
The method first checks in the current Skill's .voc files and secondly
in the "res/text" folder of mycroft-core. The result is cached to
avoid hitting the disk each time the method is called.
Args:
utt (str): Utterance to be tested
voc_filename (str): Name of vocabulary file (e.g. 'yes' for
'res/text/en-us/yes.voc')
lang (str): Language code, defaults to self.lang
exact (bool): Whether the vocab must exactly match the utterance
Returns:
bool: True if the utterance has the given vocabulary it
"""
lang = lang or self.lang
cache_key = lang + voc_filename
if cache_key not in self.voc_match_cache:
# Check for both skill resources and mycroft-core resources
voc = self.find_resource(voc_filename + '.voc', 'vocab')
if not voc: # Check for vocab in mycroft/ovos core resources
voc = resolve_resource_file(join('text', lang,
voc_filename + '.voc')) or \
resolve_ovos_resource_file(join('text', lang,
voc_filename + '.voc'))
if not voc or not exists(voc):
LOG.warning('Could not find {}.voc file'.format(voc_filename))
return False
# load vocab and flatten into a simple list
vocab = read_vocab_file(voc)
self.voc_match_cache[cache_key] = list(chain(*vocab))
if utt:
if exact:
# Check for exact match
return any(i.strip() == utt
for i in self.voc_match_cache[cache_key])
else:
# Check for matches against complete words
return any([re.match(r'.*\b' + i + r'\b.*', utt)
for i in self.voc_match_cache[cache_key]])
else:
return False
def remove_voc(self, utt, voc_filename, lang=None):
""" removes any entry in .voc file from the utterance """
lang = lang or self.lang
cache_key = lang + voc_filename
if cache_key not in self.voc_match_cache:
self.voc_match(utt, voc_filename, lang)
if utt:
# Check for matches against complete words
for i in self.voc_match_cache.get(cache_key) or []:
# Substitute only whole words matching the token
utt = re.sub(r'\b' + i + r"\b", "", utt)
return utt
# speech
@property
def dialog_renderer(self):
if self.lang in self.dialog_renderers:
return self.dialog_renderers[self.lang]
# Try to load the renderer
self._load_dialog_files(self.res_dir, self.lang)
if self.lang in self.dialog_renderers:
return self.dialog_renderers[self.lang]
# Fall back to main language
return self.dialog_renderers.get(self._core_lang)
def speak(self, utterance, expect_response=False, wait=True, meta=None):
"""Speak a sentence.
Args:
utterance (str): sentence mycroft should speak
expect_response (bool): set to True if Mycroft should listen
for a response immediately after
speaking the utterance.
wait (bool): set to True to block while the text
is being spoken.
meta: Information of what built the sentence.
"""
# registers the skill as being active
meta = meta or {}
meta['skill'] = self.skill_id
data = {'utterance': utterance,
'expect_response': expect_response,
'meta': meta,
'lang': self.lang}
message = dig_for_message()
m = message.forward("speak", data) if message \
else Message("speak", data)
m.context["skill_id"] = self.skill_id
self.bus.emit(m)
if wait:
wait_while_speaking()
def speak_dialog(self, key, data=None, expect_response=False, wait=True):
""" Speak a random sentence from a dialog file.
Args:
key (str): dialog file key (e.g. "hello" to speak from the file
"locale/en-us/hello.dialog")
data (dict): information used to populate sentence
expect_response (bool): set to True if Mycroft should listen
for a response immediately after
speaking the utterance.
wait (bool): set to True to block while the text
is being spoken.
"""
if self.dialog_renderer:
data = data or {}
self.speak(
self.dialog_renderer.render(key, data),
expect_response, wait, meta={'dialog': key, 'data': data}
)
else:
LOG.warning(
'dialog_render is None, does the locale/dialog folder exist?'
)
self.speak(key, expect_response, wait, {})
# continuous dialog
def make_active(self):
"""Bump skill to active_skill list in intent_service.
This enables converse method to be called even without skill being
used in last 5 minutes.
"""
msg = dig_for_message() or Message("")
if "skill_id" not in msg.context:
msg.context["skill_id"] = self.skill_id
self.bus.emit(msg.forward('active_skill_request',
{'skill_id': self.skill_id}))
def converse(self, message=None):
"""Handle conversation.
This method gets a peek at utterances before the normal intent
handling process after a skill has been invoked once.
To use, override the converse() method and return True to
indicate that the utterance has been handled.
utterances and lang are depreciated
Args:
message: a message object containing a message type with an
optional JSON data packet
Returns:
bool: True if an utterance was handled, otherwise False
"""
return False
def get_response(self, dialog='', data=None, validator=None,
on_fail=None, num_retries=-1):
"""Get response from user.
If a dialog is supplied it is spoken, followed immediately by listening
for a user response. If the dialog is omitted listening is started
directly.
The response can optionally be validated before returning.
Example::
color = self.get_response('ask.favorite.color')
Args:
dialog (str): Optional dialog to speak to the user
data (dict): Data used to render the dialog
validator (any): Function with following signature::
def validator(utterance):
return utterance != "red"
on_fail (any):
Dialog or function returning literal string to speak on
invalid input. For example::
def on_fail(utterance):
return "nobody likes the color red, pick another"
num_retries (int): Times to ask user for input, -1 for infinite
NOTE: User can not respond and timeout or say "cancel" to stop
Returns:
str: User's reply or None if timed out or canceled
"""
data = data or {}
def on_fail_default(utterance):
fail_data = data.copy()
fail_data['utterance'] = utterance
if on_fail:
if self.dialog_renderer:
return self.dialog_renderer.render(on_fail, fail_data)
return on_fail
else:
if self.dialog_renderer:
return self.dialog_renderer.render(dialog, data)
return dialog
def is_cancel(utterance):
return self.voc_match(utterance, 'cancel')
def validator_default(utterance):
# accept anything except 'cancel'
return not is_cancel(utterance)
on_fail_fn = on_fail if callable(on_fail) else on_fail_default
validator = validator or validator_default
# Speak query and wait for user response
if dialog:
self.speak_dialog(dialog, data, expect_response=True, wait=True)
else:
msg = dig_for_message()
msg = msg.reply('mycroft.mic.listen') if msg else \
Message('mycroft.mic.listen',
context={"skill_id": self.skill_id})
self.bus.emit(msg)
return self._wait_response(is_cancel, validator, on_fail_fn,
num_retries)
def _wait_response(self, is_cancel, validator, on_fail, num_retries):
"""Loop until a valid response is received from the user or the retry
limit is reached.
Arguments:
is_cancel (callable): function checking cancel criteria
validator (callbale): function checking for a valid response
on_fail (callable): function handling retries
"""
self._response = False
self._real_wait_response(is_cancel, validator, on_fail, num_retries)
while self._response is False:
time.sleep(0.1)
return self._response
def __get_response(self):
"""Helper to get a reponse from the user
Returns:
str: user's response or None on a timeout
"""
def converse(utterances, lang=None):
converse.response = utterances[0] if utterances else None
converse.finished = True
return True
# install a temporary conversation handler
self.make_active()
converse.finished = False
converse.response = None
self.converse = converse
# 10 for listener, 5 for SST, then timeout
# NOTE a threading event is not used otherwise we can't raise the
# AbortEvent exception to kill the thread
start = time.time()
while time.time() - start <= 15 and not converse.finished:
time.sleep(0.1)
if self._response is not False:
if self._response is None:
# aborted externally (if None)
LOG.debug("get_response aborted")
converse.finished = True
converse.response = self._response # external override
self.converse = self._original_converse
return converse.response
def _handle_killed_wait_response(self):
self._response = None
self.converse = self._original_converse
@killable_event("mycroft.skills.abort_question", exc=AbortQuestion,
callback=_handle_killed_wait_response, react_to_stop=True)
def _real_wait_response(self, is_cancel, validator, on_fail, num_retries):
"""Loop until a valid response is received from the user or the retry
limit is reached.
Arguments:
is_cancel (callable): function checking cancel criteria
validator (callbale): function checking for a valid response
on_fail (callable): function handling retries
"""
num_fails = 0
while True:
if self._response is not False:
# usually None when aborted externally
# also allows overriding returned result from other events
return self._response
response = self.__get_response()
if response is None:
# if nothing said, prompt one more time
num_none_fails = 1 if num_retries < 0 else num_retries
if num_fails >= num_none_fails:
self._response = None
return
else:
if validator(response):
self._response = response
return
# catch user saying 'cancel'
if is_cancel(response):
self._response = None
return
num_fails += 1
if 0 < num_retries < num_fails or self._response is not False:
self._response = None
return
line = on_fail(response)
if line:
self.speak(line, expect_response=True)
else:
self.bus.emit(Message('mycroft.mic.listen',
context={"skill_id": self.skill_id}))
def ask_yesno(self, prompt, data=None):
"""Read prompt and wait for a yes/no answer
This automatically deals with translation and common variants,
such as 'yeah', 'sure', etc.
Args:
prompt (str): a dialog id or string to read
data (dict): response data
Returns:
string: 'yes', 'no' or whatever the user response if not
one of those, including None
"""
| |
<reponame>Louwrensth/tofu
"""
This module contains tests for tofu.geom in its structured version
"""
# Built-in
import os
import warnings
# Standard
import numpy as np
import matplotlib.pyplot as plt
# Nose-specific
from nose import with_setup # optional
# tofu-specific
from tofu import __version__
import tofu.utils as tfu
import tofu.geom as tfg
import tofu.data as tfd
_here = os.path.abspath(os.path.dirname(__file__))
VerbHead = 'tofu.data.tests03_core'
#######################################################
#
# Setup and Teardown
#
#######################################################
def setup_module(module):
print("") # this is to get a newline after the dots
LF = os.listdir(_here)
LF = [lf for lf in LF if all([ss in lf for ss in ['TFD_','Test','.npz']])]
LF = [lf for lf in LF if not lf[lf.index('_Vv')+2:lf.index('_U')]==__version__]
print("Removing the following previous test files:")
print (LF)
for lf in LF:
os.remove(os.path.join(_here,lf))
#print("setup_module before anything in this file")
def teardown_module(module):
#os.remove(VesTor.Id.SavePath + VesTor.Id.SaveName + '.npz')
#os.remove(VesLin.Id.SavePath + VesLin.Id.SaveName + '.npz')
#print("teardown_module after everything in this file")
#print("") # this is to get a newline
LF = os.listdir(_here)
LF = [lf for lf in LF if all([ss in lf for ss in ['TFD_','Test','.npz']])]
LF = [lf for lf in LF if lf[lf.index('_Vv')+2:lf.index('_U')]==__version__]
print("Removing the following test files:")
print (LF)
for lf in LF:
os.remove(os.path.join(_here,lf))
pass
#def my_setup_function():
# print ("my_setup_function")
#def my_teardown_function():
# print ("my_teardown_function")
#@with_setup(my_setup_function, my_teardown_function)
#def test_numbers_3_4():
# print 'test_numbers_3_4 <============================ actual test code'
# assert multiply(3,4) == 12
#@with_setup(my_setup_function, my_teardown_function)
#def test_strings_a_3():
# print 'test_strings_a_3 <============================ actual test code'
# assert multiply('a',3) == 'aaa'
#######################################################
#
# Creating Ves objects and testing methods
#
#######################################################
def emiss(pts, t=None):
R = np.hypot(pts[0,:],pts[1,:])
Z = pts[2,:]
r = np.hypot(R-2.4, Z)
e0 = np.exp(-r**2/0.25)
e1 = np.exp(-r**2/0.1)
e2 = np.exp(-(r-0.4)**2/0.1)
if t is None:
emiss = e0 + 0.1*e1
else:
emiss = (e0[None,:]
+ 0.5*np.cos(t)[:,None]*e1[None,:]
+ 0.1*np.sin(2*t)[:,None]*e2[None,:])
return emiss
class Test01_DataCam12D(object):
@staticmethod
def _create_cams(nch, lconf, ld, SavePath='./'):
c0 = tfg.utils.create_CamLOS1D(P=[4.5,0,0], F=0.1, N12=nch, D12=0.05,
angs=[-np.pi,np.pi/10.,0.],
config=lconf[0],
Diag='Test', Name='Test',
SavePath=SavePath)
c1 = tfg.utils.create_CamLOS2D(P=[4.5,0,0], F=0.1,
N12=[int(1.5*nch),nch],
D12=[0.075,0.05],
angs=[-np.pi,np.pi/10.,0.],
config=lconf[1],
Diag='Test', Name='Test',
SavePath=SavePath)
return [c0, c1]
@classmethod
def setup_class(cls, nch=30, nt=50, SavePath='./', verb=False):
# time vector
t = np.linspace(0, 10, nt)
# Configs
conf0 = tfg.utils.create_config(case='B2')
conf1 = tfg.utils.create_config(case='B3')
# dchans and cams
d0 = dict(Name=['C0-{0}'.format(ii) for ii in range(0,nch)])
d1 = dict(Name=['C1-{0}'.format(ii) for ii in range(0,nch)])
lc = cls._create_cams(nch, [conf0, conf1], [d0, d1], SavePath=SavePath)
# -------
# dextra
nteq = nt // 2
teq = np.linspace(t.min(), t.max(), nteq)
teq2 = np.copy(teq) - 0.01
Ax = np.array([2.4+0.1*np.cos(teq2), 0.1*np.sin(teq2)]).T
Ax2 = np.array([2.4+0.1*np.cos(teq2/2.), 0.1*np.sin(teq2/2.)]).T
Sep = (Ax[:,:,None]
+ 0.4*np.array([[-1,1,1,-1],[-1,-1,1,1]])[None,:,:])
Sep2 = (Ax2[:,:,None]
+ 0.3*np.array([[-1,1,1,-1],[-1,-1,1,1]])[None,:,:])
n1, n2 = 40, 60
x1, x2 = np.linspace(2,3,n1), np.linspace(-0.8,0.8,n2)
dx1, dx2 = (x1[1]-x1[0])/2., (x2[1]-x2[0])/2
extent = (x1[0]-dx1, x1[-1]+dx1, x2[0]-dx2, x2[-1]+dx2)
pts = np.array([np.tile(x1,n2), np.zeros((n1*n2,)), np.repeat(x2,n1)])
emis = emiss(pts, t=teq2).reshape(nteq, n2, n1)
dextra0 = {'pouet':{'t':teq, 'c':'k', 'data':np.sin(teq),
'units':'a.u.' , 'label':'pouet'},
'Ax':{'t':teq2, 'data2D':Ax},
'Sep':{'t':teq2, 'data2D':Sep},
'map':{'t':teq2, 'data2D':emis, 'extent':extent}}
dextra1 = {'pouet':{'t':teq, 'c':'k', 'data':np.cos(teq),
'units':'a.u.' , 'label':'pouet'},
'Ax':{'t':teq2, 'data2D':Ax2},
'Sep':{'t':teq2, 'data2D':Sep2}}
# -------
# signal as Data from lcams
lm = ['sum', 'simps']
lData = [lc[ii].calc_signal(emiss, t=t,
res=0.01, method=lm[ii], plot=False)[0]
for ii in range(0,len(lc))]
# Adding concatenated sig / data and without lcam
sig = np.concatenate([dd.data for dd in lData[:2]], axis=1)
lData += [tfd.DataCam1D(data=sig, Name='All',
Diag='Test', Exp=conf0.Id.Exp, config=conf0)]
dX12 = lc[1].dX12
lData += [tfd.DataCam2D(data=lData[1].data, dX12=dX12, Name='c1nocam',
Diag='Test', Exp=conf0.Id.Exp)]
# Setting dchans
for ii in range(0,len(lData)):
if ii % 2 == 0:
lData[ii].set_dchans({'Name':['c%s'%jj for jj in
range(0,lData[ii].nch)]})
# Setting dextra
for ii in range(0,len(lData)):
de = dextra0 if ii % 2 == 0 else dextra1
lData[ii].set_dextra(dextra=de)
# Storing
cls.lobj = lData
cls.t = t
# Saving for intermediate use
lpfe = []
for oo in cls.lobj:
if oo._dgeom['config'] is not None:
lpfe.append( oo._dgeom['config'].save(return_pfe=True,
verb=verb) )
if oo._dgeom['lCam'] is not None:
for cc in oo._dgeom['lCam']:
lpfe.append( cc.save(return_pfe=True, verb=verb) )
cls.lpfe = lpfe
@classmethod
def setup(self):
pass
def teardown(self):
pass
@classmethod
def teardown_class(cls):
for pfe in set(cls.lpfe):
os.remove(pfe)
pass
def test01_set_dchans(self):
for ii in range(0,len(self.lobj)):
oo = self.lobj[ii]
out0 = oo.dchans()
if out0 is not None:
assert type(out0) is dict
lk = list(out0.keys())
if len(lk)>0:
out1 = oo.dchans(key=lk[0])
assert type(out1) is np.ndarray
dch2 = {'dch2':['abc' for ii in range(0,oo.ddataRef['nch'])]}
oo.set_dchans(dch2, method='update')
assert all([len(out0[ss])==oo.ddataRef['nch'] for ss in lk])
def test02_set_dextra(self):
for ii in range(0,len(self.lobj)):
oo = self.lobj[ii]
out0 = oo.dextra
if out0 is not None:
assert type(out0) is dict
t = np.linspace(0,10,10)
dd = {'pouet': {'t':t, 'data':t,
'label':'pouet', 'units':'pouet units'}}
oo.set_dextra(dd, method='update')
def test03_select_t(self):
for oo in self.lobj:
ind = oo.select_t(t=None, out=bool)
assert ind.sum()==oo.ddataRef['nt']
ind = oo.select_t(t=5, out=bool)
assert ind.sum() == 1
ind = oo.select_t(t=[1,4], out=bool)
assert np.all((oo.t[ind]>=1.) & (oo.t[ind]<=4))
def test04_select_ch(self):
for oo in self.lobj:
if oo.dgeom['lCam'] is not None:
name = [(ii,k) for ii,k in
enumerate(oo.config.dStruct['lorder'])
if 'Ves' in k or 'PlasmaDomain' in k]
assert len(name) == 1
ind = oo.select_ch(touch=name[0][1], out=bool)
assert ind.sum() > 0, (ind.sum(), ind)
assert np.allclose(ind, oo.select_ch(touch=name[0][0],
out=bool))
if len(oo.dchans().keys()) > 0:
ind = oo.select_ch(key='Name', val=['c0','c10'],
log='any', out=bool)
assert ind.sum() == 2, (ind.sum(), ind)
def test05_set_dtreat_indt(self):
for oo in self.lobj:
oo.set_dtreat_indt(t=[2,3])
assert np.all((oo.t>=2) & (oo.t<=3))
oo.set_dtreat_indt(indt=list(range(0,min(4,oo.ddataRef['nt']))))
assert oo.nt == 4 or oo.nt==1
oo.set_dtreat_indt()
assert oo.nt == oo.ddataRef['nt']
def test06_set_dtreat_indch(self):
for oo in self.lobj:
oo.set_dtreat_indch(indch = range(0,10))
assert oo.dtreat['indch'].sum() == 10
def test07_set_dtreat_mask(self):
for oo in self.lobj:
# Re-initialise
oo.set_dtreat_indch()
# set mask
mask = np.arange(0,oo.ddataRef['nch'],10)
oo.set_dtreat_mask(ind=mask, val=np.nan)
nbnan = np.sum(np.any(np.isnan(oo.data), axis=0))
assert nbnan >= mask.size, [oo.ddataRef['nch'], nbnan]
def test08_dtreat_set_data0(self):
for oo in self.lobj:
# Re-initialise
oo.set_dtreat_indt()
oo.set_dtreat_mask()
oo.set_dtreat_data0( data0 = oo.data[0,:] )
assert oo.dtreat['data0-indt'] is None
assert oo.dtreat['data0-Dt'] is None
assert np.allclose(oo.data[0,:],0.), oo.data[0,:]
oo.set_dtreat_data0(indt=[1,2,6,8,9])
assert oo.dtreat['data0-indt'].sum() == 5
assert oo.dtreat['data0-data'].size == oo.ddataRef['nch']
oo.set_dtreat_data0(Dt=[2,3])
assert oo.dtreat['data0-Dt'][0] >= 2. and oo.dtreat['data0-Dt'][1] <= 3.
assert oo.dtreat['data0-data'].size == oo.ddataRef['nch']
oo.set_dtreat_data0()
assert oo.dtreat['data0-data'] is None
assert np.allclose(oo.data, oo.ddataRef['data'])
def test09_dtreat_set_interp_indt(self):
for oo in self.lobj:
ind = np.arange(0,oo.nt,10)
oo.set_dtreat_interp_indt( ind )
assert oo._dtreat['interp-indt'].sum() == ind.size
ind = dict([(ii, np.arange(0,oo.nt,5)) for ii in range(0,oo.nch,3)])
oo.set_dtreat_interp_indt( ind )
assert type(oo._dtreat['interp-indt']) is dict
oo.set_dtreat_interp_indt()
assert oo._dtreat['interp-indt'] is None
def test10_dtreat_set_interp_indch(self):
for oo in self.lobj:
ind = np.arange(0,oo.nch,10)
oo.set_dtreat_interp_indch( ind )
assert oo._dtreat['interp-indch'].sum() == ind.size
ind = dict([(ii, np.arange(0,oo.nch,5)) for ii in range(0,oo.nt,3)])
oo.set_dtreat_interp_indch( ind )
assert type(oo._dtreat['interp-indch']) is dict
oo.set_dtreat_interp_indch()
assert oo._dtreat['interp-indch'] is None
def test11_streat_set_dfit(self):
for oo in self.lobj:
oo.set_dtreat_dfit()
def test12_streat_set_interpt(self):
t = np.linspace(self.t[0]-0.1, self.t[-1]+0.5, 100)
for oo in self.lobj:
oo.set_dtreat_interpt(t)
def test13_clear_ddata(self):
for ii in range(0,len(self.lobj)):
if ii%2 == 0:
self.lobj[ii].clear_ddata()
def test14_clear_dtreat(self):
for ii in range(0,len(self.lobj)):
if ii%2 == 1:
self.lobj[ii].clear_dtreat(force=True)
def test15_plot(self):
for oo in self.lobj:
kh = oo.plot(key=None, ntMax=4, nchMax=2, fs=None,
dmargin=dict(left=0.06, right=0.9),
wintit='test', tit='AHAH')
plt.close('all')
def test16_compare(self):
for oo in self.lobj:
kh = oo.plot_compare(oo)
plt.close('all')
def test17_plot_combine(self):
for ii in range(1,len(self.lobj)):
kh = self.lobj[ii].plot_combine(self.lobj[ii-1])
plt.close('all')
def test18_spectrogram(self):
for oo in self.lobj:
kh = oo.plot_spectrogram(warn=False)
plt.close('all')
def test19_plot_svd(self):
for oo in self.lobj:
kh = oo.plot_svd()
plt.close('all')
def test20_copy_equal(self):
for oo in self.lobj:
obj = oo.copy()
assert obj == oo
def test21_get_nbytes(self):
for oo in self.lobj:
nb, dnb = oo.get_nbytes()
def test22_strip_nbytes(self, verb=False):
lok = self.lobj[0].__class__._dstrip['allowed']
nb = np.full((len(lok),), np.nan)
for oo in self.lobj:
for ii in lok:
oo.strip(ii, verb=verb)
nb[ii] = oo.get_nbytes()[0]
assert np.all(np.diff(nb)<=0.), nb
for ii in lok[::-1]:
oo.strip(ii, verb=verb)
def test23_saveload(self, verb=False):
for oo in self.lobj:
pfe = oo.save(deep=False, verb=verb, return_pfe=True)
obj = tfu.load(pfe, verb=verb)
# Just to check the loaded version works fine
assert oo == obj
os.remove(pfe)
class Test02_DataCam12DSpectral(Test01_DataCam12D):
@classmethod
def setup_class(cls, nch=30, nt=50, SavePath='./', verb=False):
# time vector
t = np.linspace(0, 10, nt)
# Configs
conf0 = tfg.utils.create_config(case='B2')
conf1 = tfg.utils.create_config(case='B3')
# dchans and cams
d0 = dict(Name=['C0-{0}'.format(ii) for ii in range(0,nch)])
d1 = dict(Name=['C1-{0}'.format(ii) for ii in range(0,nch)])
lc = cls._create_cams(nch, [conf0, conf1], [d0, d1], SavePath=SavePath)
# -------
# dextra
nteq = nt // 2
teq = np.linspace(t.min(), t.max(), nteq)
teq2 = np.copy(teq) - 0.01
Ax = np.array([2.4+0.1*np.cos(teq2), 0.1*np.sin(teq2)]).T
Ax2 = np.array([2.4+0.1*np.cos(teq2/2.), 0.1*np.sin(teq2/2.)]).T
Sep = (Ax[:,:,None]
+ 0.4*np.array([[-1,1,1,-1],[-1,-1,1,1]])[None,:,:])
Sep2 = (Ax2[:,:,None]
+ 0.3*np.array([[-1,1,1,-1],[-1,-1,1,1]])[None,:,:])
n1, n2 = 40, 60
x1, x2 = np.linspace(2,3,n1), np.linspace(-0.8,0.8,n2)
dx1, dx2 = (x1[1]-x1[0])/2., (x2[1]-x2[0])/2
extent = (x1[0]-dx1, x1[-1]+dx1, x2[0]-dx2, x2[-1]+dx2)
pts = np.array([np.tile(x1,n2), np.zeros((n1*n2,)), np.repeat(x2,n1)])
emis = emiss(pts, t=teq2).reshape(nteq, n2, n1)
dextra0 = {'pouet':{'t':teq, 'c':'k', 'data':np.sin(teq),
'units':'a.u.' , 'label':'pouet'},
'Ax':{'t':teq2, 'data2D':Ax},
'Sep':{'t':teq2, 'data2D':Sep},
'map':{'t':teq2, 'data2D':emis, 'extent':extent}}
dextra1 = {'pouet':{'t':teq, 'c':'k', 'data':np.cos(teq),
'units':'a.u.' , 'label':'pouet'},
'Ax':{'t':teq2, 'data2D':Ax2},
'Sep':{'t':teq2, 'data2D':Sep2}}
# lamb
nlamb = 100
lamb = np.linspace(10,20,nlamb)
flamb = np.exp(-(lamb-12)**2/0.1) + | |
from fabric2 import Connection
from fabric2 import task
from fabric2 import config
import os
import time
from xml.etree import ElementTree as ET
import uuid
import glob
import json
import urllib.parse
import io
workflow_components = ['input.xml', 'binding.xml', 'flow.xml', 'result.xml', 'tool.xml']
@task
def release_text(c, workflow_name):
base_dir = '.'
tools = read_all_tools('..')
dependencies = output_tool_dependencies(workflow_name, base_dir)
makefile = read_makefile(base_dir)
readme = 'README.md'
previous_readme_lines = []
if os.path.isfile(readme):
with open(readme) as f:
for previous_readme_line in f:
previous_readme_lines.append(previous_readme_line)
if "CCMS_DEPLOYMENTS_HEADER_BREAK_ELEMENT_CAUTION_ANYTHING_ABOVE_WILL_BE_AUTOGENERATED" in previous_readme_line:
previous_readme_lines = []
version = makefile["WORKFLOW_VERSION"]
name = makefile.get("WORKFLOW_LABEL")
if name:
name = name[1:-1]
else:
name = workflow_name
description = makefile.get("WORKFLOW_DESCRIPTION")
update_text = "Last updated: {}.".format(makefile['LAST_UPDATED'])
dependency_text = []
seen = {}
for (dependency, dependency_version) in dependencies:
status = "N/V"
if dependency not in seen or (dependency in seen and seen[dependency] != dependency_version):
if dependency in tools:
local_version, workflow = tools[dependency]
if dependency_version == local_version:
status = "({})".format(dependency_version)
else:
status = "({}, latest is {})".format(dependency_version, local_version)
dependency_text.append("* {} {}".format(dependency, status))
else:
dependency_text.append("* {} (untracked)".format(dependency))
seen[dependency] = dependency_version
with open(readme, 'w') as w:
w.write('## {}\n\n'.format(name))
w.write('#### Version: {}\n\n'.format(version))
if description:
w.write('#### Description: \n{}\n\n'.format(description[1:-1]))
if len(dependency_text) > 0:
w.write('#### Dependencies: \n{}\n\n'.format("\n".join(dependency_text)))
w.write('_{}_\n\n'.format(update_text))
w.write('<data id=CCMS_DEPLOYMENTS_HEADER_BREAK_ELEMENT_CAUTION_ANYTHING_ABOVE_WILL_BE_AUTOGENERATED />\n\n')
for previous_readme_line in previous_readme_lines:
w.write(previous_readme_line)
@task
def read_branch(c, workflow_name):
branch_name = None
with io.StringIO() as f:
c.local('cd {} && git branch | grep \*'.format(workflow_name), out_stream = f)
branch = f.getvalue().replace('\n','').replace('* ','')
if not ('HEAD detached' in branch or 'master' in branch or 'main' in branch):
branch_name = branch
return branch_name
def read_makefile(workflow_name):
params = {}
makefile_location = os.path.join(workflow_name,'Makefile')
with open(makefile_location) as f:
for l in f:
split_line = l.rstrip().split('=')
if len(split_line) >= 2:
params[split_line[0]] = '='.join(split_line[1:])
params['LAST_UPDATED'] = time.ctime(os.path.getmtime(makefile_location))
return params
@task
def update_workflow_from_makefile(c, workflow_name, subcomponents):
params = read_makefile(workflow_name)
update_all(c, params["WORKFLOW_VERSION"], params.get("WORKFLOW_NAME"), params.get("TOOL_FOLDER_NAME"), params.get("WORKLFLOW_LABEL"), params.get("WORKLFLOW_DESCRIPTION"), workflow_name, subcomponents=subcomponents)
@task
def update_all(c, workflow_version, workflow_name=None, tool_name=None, workflow_label=None, workflow_description=None, base_dir=".", subcomponents=None, force_update_string='yes'):
production = "production" in c
if workflow_version == None:
exit("A workflow cannot be deployed without a version.")
branch_name = read_branch(c, base_dir)
if branch_name and not production:
workflow_version = '{}+{}'.format(workflow_version, branch_name.replace(' ','_'))
if workflow_name:
update_workflow_xml(c, workflow_name, tool_name, workflow_version, workflow_label, workflow_description, base_dir=base_dir, subcomponents=subcomponents, force_update_string=force_update_string)
if tool_name:
update_tools(c, tool_name, workflow_version, base_dir)
if workflow_name:
server_url_base = "https://{}/ProteoSAFe/index.jsp?params=".format(c.host)
workflow_url = server_url_base + urllib.parse.quote(json.dumps({"workflow":workflow_name.upper(), "workflow_version":workflow_version}))
print("SUCCESS:\n\n{} updated at with version:\n\n{}\n\n".format(workflow_name, workflow_url))
if force_update_string == 'yes':
server_url_base = "https://{}/ProteoSAFe/index.jsp?params=".format(c.host)
workflow_url = server_url_base + urllib.parse.quote(json.dumps({"workflow":workflow_name.upper()}))
print("And default version :\n\n{}\n\n".format(workflow_url))
@task
def read_workflows_from_yml(c):
workflows_to_deploy = []
if "workflows" not in c:
exit("Deploy all only works if a list of workflows to deploy is specified.")
for workflow in c["workflows"]:
workflow_name = None
subcomponents = workflow_components
if isinstance(workflow,dict):
for workflow, xml in workflow.items():
workflow_name = workflow
subcomponents = xml
else:
workflow_name = workflow
workflows_to_deploy.append((workflow_name, subcomponents))
return workflows_to_deploy
def read_all_tools(base_dir = '.'):
all_tools = {}
all_submodules = glob.glob(os.path.join(base_dir, '*'))
for submodule in all_submodules:
if 'CCMSDeployments' not in submodule and os.path.isdir(submodule):
try:
submodule_params = read_makefile(submodule)
tool_name = submodule_params.get("TOOL_FOLDER_NAME")
version = submodule_params["WORKFLOW_VERSION"]
if tool_name:
all_tools[tool_name] = (version, submodule)
except:
pass
return all_tools
@task
def deploy_all(c):
for workflow, subcomponents in read_workflows_from_yml(c):
update_workflow_from_makefile(c, workflow, subcomponents)
@task
def read_dependencies(c, workflow_name, rewrite_string = 'no', base_dir = '.'):
tools = read_all_tools('..')
rewrite = rewrite_string == 'yes'
output_updates(c, workflow_name, tool_name = None, base_dir = base_dir, tools = tools, seen = {}, rewrite = rewrite)
print('')
@task
def is_on_server(c, tool_name, tool_version):
tool_path = os.path.join(c["paths"]["tools"],tool_name, tool_version)
production = "production" in c
production_user = c["production"]["workflow_user"] if production else None
on_server = False
if production_user:
on_server = c.sudo("test -e {}".format(tool_path), user=production_user, pty=True)
else:
on_server = c.run("test -e {}".format(tool_path))
return not on_server.return_code
def output_updates(c, workflow_name = None, tool_name = None, base_dir = '.', tools = None, seen = {}, rewrite = False):
updates = {}
if workflow_name:
dependencies = output_tool_dependencies(workflow_name, base_dir)
outputs = []
for (dependency, version) in dependencies:
status = "N/V"
if dependency not in seen or (dependency in seen and seen[dependency] != version):
update = False
deployed = False
if dependency in tools:
local_version, workflow = tools[dependency]
if version == local_version:
status = "{}".format(version)
else:
update = True
updates[dependency] = local_version
status = "{}->{}".format(version, local_version)
if version and is_on_server(c, dependency, local_version):
deployed = True
deployed_str = " (deployed)" if deployed else " (needs deployment)"
# if rewrite:
# if not deployed:
# update_workflow_from_makefile(c, workflow, workflow_components, True)
# status += " (updated)"
# else:
# status += " (already deployed)"
# else:
# status += deployed_str
status += deployed_str
outputs.append((update or deployed,"\t{} {}".format(dependency, status)))
else:
outputs.append((update or deployed,"\t{} untracked".format(dependency)))
seen[dependency] = version
if not rewrite:
print('\nDepenencies for {}:'.format(workflow_name))
for output in outputs:
print(output[1])
else:
print('\nUpdated depenencies for {}:'.format(workflow_name))
for output in outputs:
if output[0]:
print(output[1])
rewrite_tool_w_new_dependencies(workflow_name, updates, base_dir = base_dir)
def output_tool_dependencies(workflow_name, base_dir = '.'):
dependencies = []
local = os.path.join(base_dir, workflow_name, 'tool.xml')
tree = ET.parse(local)
root = tree.getroot()
for path in root.findall('pathSet'):
if not '$base' in path.attrib['base']:
split_full_path = path.attrib['base'].split('/')
tool_name = split_full_path[0]
if len(split_full_path) >= 2:
tool_name = '/'.join(split_full_path[0:-1])
tool_version = split_full_path[-1]
else:
tool_version = "NV"
dependencies.append((tool_name, tool_version))
return dependencies
def rewrite_tool_w_new_dependencies(workflow_name, updates, rewrite = False, base_dir = '.'):
changes_made = False
dependencies = []
local = os.path.join(base_dir, workflow_name, 'tool.xml')
tree = ET.parse(local)
root = tree.getroot()
for path in root.findall('pathSet'):
if not '$base' in path.get('base'):
split_full_path = path.get('base').split('/')
tool_name = split_full_path[0]
if tool_name in updates and updates[tool_name]:
changes_made = True
if len(split_full_path[2:]) == 0:
path.set('base',os.path.join(tool_name, updates[tool_name]))
else:
path.set('base',os.path.join(tool_name, updates[tool_name], '/'.join(split_full_path[2:])))
if changes_made:
tree.write(local)
@task
def generate_manifest(c):
for workflow, subcomponents in read_workflows_from_yml(c):
params = read_makefile(workflow)
flag = ""
if "WORKFLOW_NAME" not in params:
flag = " (Tool only)"
elif "TOOL_FOLDER_NAME" not in params:
flag = " (Workflow only)"
print('{}{}, version: {}, last updated: {}'.format(workflow,flag,params['WORKFLOW_VERSION'],params['LAST_UPDATED']))
@task
def update_workflow_xml(c, workflow_name, tool_name, workflow_version, workflow_label, workflow_description, base_dir=".", subcomponents=None, force_update_string='yes'):
if not subcomponents:
subcomponents = workflow_components
force_update = force_update_string == 'yes'
production = "production" in c
production_user = c["production"]["workflow_user"] if production else None
local_temp_path = os.path.join("/tmp/{}_{}_{}".format(workflow_name, workflow_version, str(uuid.uuid4())))
c.local("mkdir -p {}".format(local_temp_path))
for component in subcomponents:
rewrite_workflow_component(component, base_dir, workflow_name, tool_name, workflow_version, workflow_label, workflow_description, local_temp_path)
#Performing Workflow Files Validation
try:
validate_workflow_xml(local_temp_path)
except:
print("Validation Failed in Exception")
base_workflow_path = os.path.join(c["paths"]["workflows"], workflow_name, "versions")
versioned_workflow_path = os.path.join(c["paths"]["workflows"], workflow_name, "versions", workflow_version)
if production_user:
c.sudo("mkdir -p {}".format(base_workflow_path), user=production_user, pty=True)
c.sudo("mkdir -p {}".format(versioned_workflow_path), user=production_user, pty=True)
else:
c.run("mkdir -p {}".format(base_workflow_path))
c.run("mkdir -p {}".format(versioned_workflow_path))
for component in subcomponents:
# print(component)
if force_update:
update_workflow_component(c, local_temp_path, workflow_name, component, production_user=production_user) #Adding to active default version
update_workflow_component(c, local_temp_path, workflow_name, component, workflow_version=workflow_version, production_user=production_user) #Explicitly adding versioned
if not production_user:
c.run("chmod 777 {}".format(versioned_workflow_path))
c.run("chmod -R 777 {}".format(versioned_workflow_path))
for xml_filename in workflow_components:
c.run("chmod 777 {}".format(os.path.join(c["paths"]["workflows"], workflow_name, xml_filename)))
#Uploading the actual tools to the server
@task
def update_tools(c, workflow_name, workflow_version, base_dir="."):
production = "production" in c
production_user = c["production"]["tool_user"] if production else None
final_path = os.path.join(c["paths"]["tools"],workflow_name, workflow_version)
if production_user:
c.sudo("mkdir -p {}".format(final_path), user=production_user, pty=True)
else:
c.run("mkdir -p {}".format(final_path))
local_path = os.path.join(base_dir, 'tools', workflow_name)
update_folder(c, local_path, final_path, production_user=production_user)
if not production_user:
c.run("chmod 777 {}".format(final_path))
c.run("chmod -R 777 {}".format(final_path))
#Utility Functions
def rewrite_workflow_component(component, base_dir, workflow_name, tool_name, workflow_version, workflow_label, workflow_description, local_temp_path):
local = os.path.join(base_dir, workflow_name, component)
temp = os.path.join(local_temp_path,component)
tree = ET.parse(local)
root = tree.getroot()
if component in ['input.xml','result.xml']:
root.set('id', workflow_name)
root.set('version', workflow_version)
if component in ['input.xml']:
for path in root.findall('workflow-id'):
path.text = workflow_name.upper()
for path in root.findall('workflow-label'):
if workflow_label:
path.text = workflow_label
if workflow_description is not None:
description_block = ET.Element("block")
root.insert(0, description_block)
description_block.attrib["label"] = "Workflow Description"
description_row = ET.SubElement(description_block, "row")
description_cell = ET.SubElement(description_row, "cell")
description_label = ET.SubElement(description_cell, "label")
description_label.attrib["prefix"] = "false"
description_content = ET.SubElement(description_label, "content")
description_content.text = '<div style="5px;padding:1px; border:2px;margin-left:8%;margin-right:8%;text-align:left">\
<br><strong>{}</strong> \
<hr style="margin-top:5px;margin-bottom:5px"> \
{} \
<hr style="margin-top:5px;margin-bottom:5px"> \
<small>Workflow version {} </small> \
</div>'.format(workflow_label if workflow_label else workflow_name.upper(), workflow_description, workflow_version)
elif component in ['flow.xml']:
root.set('name', workflow_name)
elif component in ['tool.xml']:
for path in root.findall('pathSet'):
if '$base' in path.get('base'):
if tool_name:
path.set('base',path.get('base').replace('$base',os.path.join(tool_name,workflow_version)))
else:
exit("Cannot rewrite tool.xml without specifying tool name.")
tree.write(temp)
def validate_workflow_xml(local_temp_path):
import workflow_validator
flow_path = os.path.join(local_temp_path, "flow.xml")
binding_path = os.path.join(local_temp_path, "binding.xml")
tool_path = os.path.join(local_temp_path, "tool.xml")
workflow_obj = workflow_validator.Workflow(flow_path, binding_path, tool_path)
workflow_obj.validate()
print(workflow_obj.printerrors())
#TODO: Validate that the xml is also a valid workflow
def update_workflow_component(c, local_temp_path, workflow_filename, component, workflow_version=None, production_user=None):
local = os.path.join(local_temp_path,component)
if workflow_version:
server = os.path.join(c["paths"]["workflows"], workflow_filename, "versions", workflow_version, component)
else:
server = os.path.join(c["paths"]["workflows"], workflow_filename, component)
update_file(c, local, server, production_user=production_user)
#Update File
def update_file(c, local_path, final_path, production_user = None):
if production_user:
remote_temp_path = os.path.join("/tmp/{}_{}".format(local_path.replace("/", "_"), str(uuid.uuid4())))
c.put(local_path, remote_temp_path, preserve_mode=True)
c.sudo('cp {} {}'.format(remote_temp_path, | |
"""Staged parameters for senders comply with constraints"""
if len(self.senders) > 0:
valid, response = self.check_staged_complies_with_constraints("sender", self.senders)
if valid:
return test.PASS()
else:
return test.FAIL(response)
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_16(self, test):
"""Staged parameters for receivers comply with constraints"""
if len(self.receivers) > 0:
valid, response = self.check_staged_complies_with_constraints("receiver", self.receivers)
if valid:
return test.PASS()
else:
return test.FAIL(response)
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_17(self, test):
"""Sender patch response meets the schema"""
if len(self.senders) > 0:
valid, response = self.check_patch_response_valid("sender", self.senders)
if valid:
return test.PASS()
else:
return test.FAIL(response)
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_18(self, test):
"""Receiver patch response meets the schema"""
if len(self.receivers) > 0:
valid, response = self.check_patch_response_valid("receiver", self.receivers)
if valid:
return test.PASS()
else:
return test.FAIL(response)
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_19(self, test):
"""Sender invalid patch is refused"""
if len(self.senders) > 0:
valid, response = self.is05_utils.check_refuses_invalid_patch("sender", self.senders)
if valid:
return test.PASS()
else:
return test.FAIL(response)
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_20(self, test):
"""Receiver invalid patch is refused"""
if len(self.receivers) > 0:
valid, response = self.is05_utils.check_refuses_invalid_patch("receiver", self.receivers)
if valid:
return test.PASS()
else:
return test.FAIL(response)
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_21(self, test):
"""Sender id on staged receiver is changeable"""
if len(self.receivers) > 0:
for receiver in self.receivers:
url = "single/receivers/" + receiver + "/staged"
for id in [str(uuid.uuid4()), None]:
data = {"sender_id": id}
valid, response = self.is05_utils.checkCleanRequestJSON("PATCH", url, data=data)
if valid:
valid2, response2 = self.is05_utils.checkCleanRequestJSON("GET", url + "/")
if valid2:
try:
senderId = response['sender_id']
msg = "Failed to change sender_id at {}, expected {}, got {}".format(url, id, senderId)
if senderId == id:
pass
else:
return test.FAIL(msg)
except KeyError:
return test.FAIL("Did not find sender_id in response from {}".format(url))
else:
return test.FAIL(response2)
else:
return test.FAIL(response)
return test.PASS()
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_22(self, test):
"""Receiver id on staged sender is changeable"""
if len(self.senders) > 0:
for sender in self.senders:
url = "single/senders/" + sender + "/staged"
for id in [str(uuid.uuid4()), None]:
data = {"receiver_id": id}
valid, response = self.is05_utils.checkCleanRequestJSON("PATCH", url, data=data)
if valid:
valid2, response2 = self.is05_utils.checkCleanRequestJSON("GET", url + "/")
if valid2:
try:
receiverId = response['receiver_id']
msg = "Failed to change receiver_id at {}, expected {}, got {}".format(url,
id,
receiverId)
if receiverId == id:
pass
else:
return test.FAIL(msg)
except KeyError:
return test.FAIL("Did not find receiver_id in response from {}".format(url))
else:
return test.FAIL(response2)
else:
return test.FAIL(response)
return test.PASS()
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_23(self, test):
"""Sender transport parameters are changeable"""
if len(self.senders) > 0:
for sender in self.senders:
valid, values = self.is05_utils.generate_changeable_param("sender", sender,
self.transport_types[sender])
paramName = self.is05_utils.changeable_param_name(self.transport_types[sender])
if valid:
valid2, response2 = self.is05_utils.check_change_transport_param("sender", self.senders,
paramName, values, sender)
if valid2:
pass
else:
return test.FAIL(response2)
else:
return test.FAIL(values)
return test.PASS()
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_23_01(self, test):
"""Senders accept a patch request with empty leg(s) in transport parameters"""
if len(self.senders) > 0:
valid, response = self.check_patch_empty_transport_params("sender", self.senders)
if valid:
return test.PASS()
else:
return test.FAIL(response)
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_24(self, test):
"""Receiver transport parameters are changeable"""
if len(self.receivers) > 0:
for receiver in self.receivers:
valid, values = self.is05_utils.generate_changeable_param("receiver", receiver,
self.transport_types[receiver])
paramName = self.is05_utils.changeable_param_name(self.transport_types[receiver])
if valid:
valid2, response2 = self.is05_utils.check_change_transport_param("receiver", self.receivers,
paramName, values, receiver)
if valid2:
pass
else:
return test.FAIL(response2)
else:
return test.FAIL(values)
return test.PASS()
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_24_01(self, test):
"""Receivers accept a patch request with empty leg(s) in transport parameters"""
if len(self.receivers) > 0:
valid, response = self.check_patch_empty_transport_params("receiver", self.receivers)
if valid:
return test.PASS()
else:
return test.FAIL(response)
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_25(self, test):
"""Immediate activation of a sender is possible"""
if len(self.senders) > 0:
warn = ""
for sender in self.is05_utils.sampled_list(self.senders):
valid, response = self.is05_utils.check_activation("sender", sender,
self.is05_utils.check_perform_immediate_activation,
self.transport_types[sender],
True)
if valid:
if response and not warn:
warn = response
if self.transport_types[sender] == "urn:x-nmos:transport:rtp":
valid2, response2 = self.is05_utils.check_sdp_matches_params(sender)
if not valid2:
return test.FAIL("SDP file for Sender {} does not match the transport_params: {}"
.format(sender, response2))
else:
return test.FAIL(response)
if warn:
return test.WARNING(warn)
else:
return test.PASS()
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_26(self, test):
"""Immediate activation of a receiver is possible"""
if len(self.receivers) > 0:
warn = ""
for receiver in self.is05_utils.sampled_list(self.receivers):
valid, response = self.is05_utils.check_activation("receiver", receiver,
self.is05_utils.check_perform_immediate_activation,
self.transport_types[receiver])
if valid:
if response and not warn:
warn = response
else:
return test.FAIL(response)
if warn:
return test.WARNING(warn)
else:
return test.PASS()
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_27(self, test):
"""Relative activation of a sender is possible"""
if len(self.senders) > 0:
warn = ""
for sender in self.is05_utils.sampled_list(self.senders):
valid, response = self.is05_utils.check_activation("sender", sender,
self.is05_utils.check_perform_relative_activation,
self.transport_types[sender],
True)
if valid:
if response and not warn:
warn = response
if self.transport_types[sender] == "urn:x-nmos:transport:rtp":
valid2, response2 = self.is05_utils.check_sdp_matches_params(sender)
if not valid2:
return test.FAIL("SDP file for Sender {} does not match the transport_params: {}"
.format(sender, response2))
else:
return test.FAIL(response)
if warn:
return test.WARNING(warn)
else:
return test.PASS()
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_28(self, test):
"""Relative activation of a receiver is possible"""
if len(self.receivers) > 0:
warn = ""
for receiver in self.is05_utils.sampled_list(self.receivers):
valid, response = self.is05_utils.check_activation("receiver", receiver,
self.is05_utils.check_perform_relative_activation,
self.transport_types[receiver])
if valid:
if response and not warn:
warn = response
else:
return test.FAIL(response)
if warn:
return test.WARNING(warn)
else:
return test.PASS()
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_29(self, test):
"""Absolute activation of a sender is possible"""
if len(self.senders) > 0:
warn = ""
for sender in self.is05_utils.sampled_list(self.senders):
valid, response = self.is05_utils.check_activation("sender", sender,
self.is05_utils.check_perform_absolute_activation,
self.transport_types[sender],
True)
if valid:
if response and not warn:
warn = response
if self.transport_types[sender] == "urn:x-nmos:transport:rtp":
valid2, response2 = self.is05_utils.check_sdp_matches_params(sender)
if not valid2:
return test.FAIL("SDP file for Sender {} does not match the transport_params: {}"
.format(sender, response2))
else:
return test.FAIL(response)
if warn:
return test.WARNING(warn)
else:
return test.PASS()
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_30(self, test):
"""Absolute activation of a receiver is possible"""
if len(self.receivers) > 0:
warn = ""
for receiver in self.is05_utils.sampled_list(self.receivers):
valid, response = self.is05_utils.check_activation("receiver", receiver,
self.is05_utils.check_perform_absolute_activation,
self.transport_types[receiver])
if valid:
if response and not warn:
warn = response
else:
return test.FAIL(response)
if warn:
return test.WARNING(warn)
else:
return test.PASS()
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_31(self, test):
"""Sender active response schema is valid"""
if len(self.senders):
warn = ""
for sender in self.senders:
activeUrl = "single/senders/" + sender + "/active"
schema = self.get_schema(CONN_API_KEY, "GET", "/single/senders/{senderId}/active", 200)
valid, msg = self.compare_to_schema(schema, activeUrl)
if valid:
if msg and not warn:
warn = msg
else:
return test.FAIL(msg)
if warn:
return test.WARNING(warn)
else:
return test.PASS()
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_32(self, test):
"""Receiver active response schema is valid"""
if len(self.receivers):
warn = ""
for receiver in self.receivers:
activeUrl = "single/receivers/" + receiver + "/active"
schema = self.get_schema(CONN_API_KEY, "GET", "/single/receivers/{receiverId}/active", 200)
valid, msg = self.compare_to_schema(schema, activeUrl)
if valid:
if msg and not warn:
warn = msg
else:
return test.FAIL(msg)
if warn:
return test.WARNING(warn)
else:
return test.PASS()
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_33(self, test):
"""/bulk/ endpoint returns correct JSON"""
return test.NA("Replaced by 'auto' test")
def test_34(self, test):
"""GET on /bulk/senders returns 405"""
url = "bulk/senders"
error_code = 405
valid, response = self.is05_utils.checkCleanRequest("GET", url, code=error_code)
if valid:
valid, message = self.check_error_response("GET", response, error_code)
if valid:
return test.PASS()
else:
return test.FAIL(message)
else:
return test.FAIL(response)
def test_35(self, test):
"""GET on /bulk/receivers returns 405"""
url = "bulk/receivers"
error_code = 405
valid, response = self.is05_utils.checkCleanRequest("GET", url, code=error_code)
if valid:
valid, message = self.check_error_response("GET", response, error_code)
if valid:
return test.PASS()
else:
return test.FAIL(message)
else:
return test.FAIL(response)
def test_36(self, test):
"""Bulk interface can be used to change destination port on all senders"""
if len(self.senders) > 0:
valid, response = self.check_bulk_stage("sender", self.senders)
if valid:
return test.PASS()
else:
return test.FAIL(response)
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_37(self, test):
"""Bulk interface can be used to change destination port on all receivers"""
if len(self.receivers) > 0:
valid, response = self.check_bulk_stage("receiver", self.receivers)
if valid:
return test.PASS()
else:
return test.FAIL(response)
else:
return test.UNCLEAR("Not tested. No resources found.")
def test_38(self, test):
"""Number of legs | |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import scipy.io as sio
import gym
import time
import random
import datetime
import os
import imageio
import glob
import tqdm
import json
def get_center_state(env):
total_buffer_list = np.zeros([env.agent_num, 2, env.max_buffer_size])
done_buffer_list = np.zeros([env.agent_num, 2, env.max_buffer_size])
pos_list = np.zeros([env.agent_num, 2])
for i, agent in enumerate(env.agents):
pos_list[i] = agent.position
for j, d in enumerate(list(agent.total_data.values())):
total_buffer_list[i][0][j] = d[0]
total_buffer_list[i][1][j] = d[1]
for j, d in enumerate(agent.done_data):
done_buffer_list[i][0][j] = d[0]
done_buffer_list[i][1][j] = d[1]
# print(buffer_list)
# print(pos_list)
return total_buffer_list, done_buffer_list, pos_list
def discrete_circle_sample_count(n):
count = 0
move_dict = {}
for x in range(-n, n + 1):
y_l = int(np.floor(np.sqrt(n**2 - x**2)))
for y in range(-y_l, y_l + 1):
move_dict[count] = np.array([y, x])
count += 1
return (count), move_dict
def center_actor(input_dim_list, cnn_kernel_size, move_r, kernel_num):
sensor_map = keras.Input(shape=input_dim_list[0])
total_buffer_list = keras.Input(shape=input_dim_list[1])
done_buffer_list = keras.Input(shape=input_dim_list[2])
pos_list = keras.Input(shape=input_dim_list[3])
# CNN for map
cnn_map = layers.Dense(1, activation='relu')(sensor_map)
cnn_map = layers.Conv2D(filters=kernel_num, kernel_size=cnn_kernel_size, activation='relu', padding='same')(cnn_map)
cnn_map = layers.AveragePooling2D(pool_size=int(input_dim_list[0][0] / (2 * move_r + 1)))(cnn_map)
cnn_map = layers.AlphaDropout(0.2)(cnn_map)
move_out = tf.transpose(cnn_map, perm=[0, 3, 1, 2])
move_out = tf.expand_dims(move_out, axis=-1)
# buffer
total_buffer = tf.transpose(total_buffer_list, perm=[0, 1, 3, 2])
total_buffer = layers.Dense(1, activation='relu')(total_buffer)
total_buffer = tf.squeeze(total_buffer, axis=-1)
exe_op = layers.Dense(input_dim_list[1][2], activation='softmax')(total_buffer)
done_buffer = tf.transpose(done_buffer_list, perm=[0, 1, 3, 2])
done_buffer = layers.Dense(1, activation='relu')(done_buffer)
done_buffer = tf.squeeze(done_buffer, axis=-1)
off_op = layers.Dense(input_dim_list[2][2], activation='softmax')(done_buffer)
# center
buffer_state = layers.Dense(1, activation='relu')(done_buffer_list)
buffer_state = tf.squeeze(buffer_state, axis=-1)
# pos list
pos = layers.Dense(2, activation='relu')(pos_list)
bandwidth_out = layers.concatenate([buffer_state, pos], axis=-1)
# bandwidth_out = layers.AlphaDropout(0.2)(bandwidth_out)
bandwidth_out = layers.Dense(1, activation='relu')(bandwidth_out)
bandwidth_out = tf.squeeze(bandwidth_out, axis=-1)
# bandwidth_out = layers.Dense(input_dim_list[2], activation='relu')(bandwidth_out)
bandwidth_out = layers.Softmax()(bandwidth_out)
# bandwidth_out += 1 / (input_dim_list[3][0] * 5)
# bandwidth_out = bandwidth_out / tf.reduce_sum(bandwidth_out, 1, keepdims=True)
# bandwidth_out = bandwidth_out / tf.expand_dims(tf.reduce_sum(bandwidth_out, 1), axis=-1)
model = keras.Model(inputs=[sensor_map, total_buffer_list, done_buffer_list, pos_list], outputs=[move_out, exe_op, off_op, bandwidth_out], name='center_actor_net')
return model
# center critic
def center_critic(input_dim_list, cnn_kernel_size):
sensor_map = keras.Input(shape=input_dim_list[0])
total_buffer_list = keras.Input(shape=input_dim_list[1])
done_buffer_list = keras.Input(shape=input_dim_list[2])
pos_list = keras.Input(shape=input_dim_list[3])
move = keras.Input(shape=input_dim_list[4])
exe_op = keras.Input(shape=input_dim_list[5])
off_op = keras.Input(shape=input_dim_list[6])
bandwidth_vec = keras.Input(shape=input_dim_list[7])
# map
cnn_map = layers.Dense(1, activation='relu')(sensor_map)
cnn_map = layers.Conv2D(filters=1, kernel_size=cnn_kernel_size, activation='relu', padding='same')(cnn_map)
cnn_map = layers.AveragePooling2D(pool_size=cnn_kernel_size * 2)(cnn_map)
cnn_map = layers.AlphaDropout(0.2)(cnn_map)
cnn_map = layers.Flatten()(cnn_map)
cnn_map = layers.Dense(2, activation='relu')(cnn_map)
# buffer
total_buffer_state = layers.Dense(1, activation='relu')(total_buffer_list)
total_buffer_state = tf.squeeze(total_buffer_state, axis=-1)
total_buffer_state = layers.Dense(1, activation='relu')(total_buffer_state)
total_buffer_state = tf.squeeze(total_buffer_state, axis=-1)
buffer_state = layers.Dense(1, activation='relu')(done_buffer_list)
buffer_state = tf.squeeze(buffer_state, axis=-1)
buffer_state = layers.Dense(1, activation='relu')(buffer_state)
buffer_state = tf.squeeze(buffer_state, axis=-1)
# pos list
pos = layers.Dense(1, activation='relu')(pos_list)
pos = tf.squeeze(pos, axis=-1)
move_mlp = layers.Flatten()(move)
move_mlp = layers.Dense(1, activation='relu')(move_mlp)
exe_mlp = layers.Flatten()(exe_op)
exe_mlp = layers.Dense(1, activation='relu')(exe_mlp)
off_mlp = layers.Flatten()(off_op)
off_mlp = layers.Dense(1, activation='relu')(off_mlp)
# bandvec
# band_in = layers.Dense(2, activation='relu')(bandwidth_vec)
r_out = layers.concatenate([cnn_map, total_buffer_state, buffer_state, pos, move_mlp, exe_mlp, off_mlp, bandwidth_vec])
# r_out = layers.AlphaDropout(0.2)(r_out)
r_out = layers.Dense(1, activation='relu')(r_out)
model = keras.Model(inputs=[sensor_map, total_buffer_list, done_buffer_list, pos_list, move, exe_op, off_op, bandwidth_vec], outputs=r_out, name='center_critic_net')
return model
def update_target_net(model, target, tau=0.8):
weights = model.get_weights()
target_weights = target.get_weights()
for i in range(len(target_weights)): # set tau% of target model to be new weights
target_weights[i] = weights[i] * (1 - tau) + target_weights[i] * tau
target.set_weights(target_weights)
def circle_argmax(move_dist, move_r):
max_pos = np.argwhere(tf.squeeze(move_dist, axis=-1) == np.max(move_dist))
# print(tf.squeeze(move_dist, axis=-1))
pos_dist = np.linalg.norm(max_pos - np.array([move_r, move_r]), axis=1)
# print(max_pos)
return max_pos[np.argmin(pos_dist)]
class ACAgent(object):
def __init__(self, env, tau, gamma, lr_aa, lr_ac, lr_ca, lr_cc, batch, epsilon=0.2):
self.env = env
self.agents = self.env.agents
self.agent_num = self.env.agent_num
self.index_dim = 2
self.obs_r = self.env.obs_r
self.state_map_shape = (self.obs_r * 2 + 1, self.obs_r * 2 + 1, self.index_dim)
self.pos_shape = (2)
self.band_shape = (1)
self.sensor_map_shape = (self.env.map_size, self.env.map_size, self.index_dim)
self.buffer_list_shape = (self.agent_num, self.index_dim, self.env.max_buffer_size)
self.pos_list_shape = (self.agent_num, 2)
self.bandvec_shape = (self.env.agent_num)
self.op_shape = (self.agent_num, self.env.max_buffer_size)
self.move_count, self.move_dict = discrete_circle_sample_count(self.env.move_r)
self.movemap_shape = (self.agent_num, self.env.move_r * 2 + 1, self.env.move_r * 2 + 1)
self.epsilon = epsilon
# learning params
self.tau = tau
self.cnn_kernel_size = 3
self.gamma = gamma
self.lr_aa = lr_aa
self.lr_ac = lr_ac
self.lr_ca = lr_ca
self.lr_cc = lr_cc
self.batch_size = batch
self.softmax_memory = {}
self.center_memory = []
self.sample_prop = 1 / 4
# net init
self.center_actor = center_actor([self.sensor_map_shape, self.buffer_list_shape, self.buffer_list_shape, self.pos_list_shape], self.cnn_kernel_size, self.env.move_r, self.agent_num)
self.center_critic = center_critic([self.sensor_map_shape, self.buffer_list_shape, self.buffer_list_shape, self.pos_list_shape,
self.movemap_shape, self.op_shape, self.op_shape, self.bandvec_shape], self.cnn_kernel_size)
self.target_center_actor = center_actor([self.sensor_map_shape, self.buffer_list_shape, self.buffer_list_shape, self.pos_list_shape], self.cnn_kernel_size, self.env.move_r, self.agent_num)
update_target_net(self.center_actor, self.target_center_actor, tau=0)
self.target_center_critic = center_critic([self.sensor_map_shape, self.buffer_list_shape, self.buffer_list_shape, self.pos_list_shape,
self.movemap_shape, self.op_shape, self.op_shape, self.bandvec_shape], self.cnn_kernel_size)
update_target_net(self.center_critic, self.target_center_critic, tau=0)
self.center_actor_opt = keras.optimizers.Adam(learning_rate=lr_ca)
self.center_critic_opt = keras.optimizers.Adam(learning_rate=lr_cc)
self.summaries = {}
keras.utils.plot_model(self.center_actor, 'logs/model_figs/baseline_actor.png', show_shapes=True)
keras.utils.plot_model(self.center_critic, 'logs/model_figs/baseline_critic.png', show_shapes=True)
def actor_act(self, epoch):
tmp = random.random()
if tmp >= self.epsilon and epoch >= 16:
# agent act
agent_act_list = []
softmax_list = []
cur_state_list = []
band_vec = np.zeros(self.agent_num)
# print(agent_act_list)
# center act
sensor_map, agent_map = self.env.get_statemap()
total_buffer_list, done_buffer_list, pos_list = get_center_state(self.env)
sensor_map = tf.expand_dims(sensor_map, axis=0)
total_buffer_list = tf.expand_dims(total_buffer_list, axis=0)
done_buffer_list = tf.expand_dims(done_buffer_list, axis=0)
# print(done_buffer_list)
pos_list = tf.expand_dims(pos_list, axis=0)
band_vec = tf.expand_dims(band_vec, axis=0)
# print([sensor_map.shape, total_buffer_list.shape, done_buffer_list.shape, pos_list.shape])
action = self.center_actor.predict([sensor_map, total_buffer_list, done_buffer_list, pos_list])
new_bandvec = action[3][0]
# print('new_bandwidth{}'.format(new_bandvec[0]))
for i, agent in enumerate(self.agents):
move_dist = action[0][0][i]
# print(move_dist)
# print(move_dist.shape)
exe_dist = action[1][0][i]
off_dist = action[2][0][i]
# print(op_dist.shape)
# move_ori = np.unravel_index(np.argmax(move_dist), move_dist.shape)
move_ori = circle_argmax(move_dist, self.env.move_r)
move = [move_ori[1] - self.env.move_r, move_ori[0] - self.env.move_r]
execution = [0] * agent.max_buffer_size
offloading = [0] * agent.max_buffer_size
execution[np.argmax(exe_dist)] = 1
offloading[np.argmax(off_dist)] = 1
move_softmax = np.zeros(move_dist.shape)
move_softmax[move_ori] = 1
move_softmax = tf.expand_dims(move_softmax, axis=0)
# move_softmax = tf.expand_dims(move, axis=0)
agent_act_list.append([move, execution, offloading])
new_state_map, new_rewards, done, info = self.env.step(agent_act_list, new_bandvec)
new_sensor_map, agent_map = self.env.get_statemap()
new_total_buffer_list, new_done_buffer_list, new_pos_list = get_center_state(self.env)
new_total_buffer_list = tf.expand_dims(new_total_buffer_list, axis=0)
new_done_buffer_list = tf.expand_dims(new_done_buffer_list, axis=0)
new_pos_list = tf.expand_dims(new_pos_list, axis=0)
# record memory
self.center_memory.append([[sensor_map, total_buffer_list, done_buffer_list, pos_list], action, new_rewards[-1],
[new_sensor_map, new_total_buffer_list, new_done_buffer_list, new_pos_list]])
else:
# random action
# agents
agent_act_list = []
for i, agent in enumerate(self.agents):
move = random.sample(list(self.move_dict.values()), 1)[0]
execution = [0] * agent.max_buffer_size
offloading = [0] * agent.max_buffer_size
execution[np.random.randint(agent.max_buffer_size)] = 1
offloading[np.random.randint(agent.max_buffer_size)] = 1
agent_act_list.append([move, execution, offloading])
# center
new_bandvec = np.random.rand(self.agent_num)
new_bandvec = new_bandvec / np.sum(new_bandvec)
new_state_maps, new_rewards, done, info = self.env.step(agent_act_list, new_bandvec)
return new_rewards[-1]
# @tf.function(experimental_relax_shapes=True)
def replay(self):
# center replay
if len(self.center_memory) < self.batch_size:
return
center_samples = self.center_memory[-int(self.batch_size * self.sample_prop):] + random.sample(self.center_memory[-self.batch_size * 2:], int(self.batch_size * (1 - self.sample_prop)))
sensor_map = np.vstack([sample[0][0] for sample in center_samples])
total_buffer_list = np.vstack([sample[0][1] for sample in center_samples])
done_buffer_list = np.vstack([sample[0][2] for sample in center_samples])
pos_list = np.vstack([sample[0][3] for sample in center_samples])
# print(center_samples[0][1])
# act = np.vstack([sample[1] for sample in center_samples])
move = np.vstack([sample[1][0] for sample in center_samples])
exe = np.vstack([sample[1][1] for sample in center_samples])
off = np.vstack([sample[1][2] for sample in center_samples])
band_act = np.vstack([sample[1][3] for sample in center_samples])
c_reward = tf.expand_dims([sample[2] for sample in center_samples], axis=-1)
# new states
new_sensor_map = np.stack([sample[3][0] for sample in center_samples], axis=0)
new_total_buffer_list = np.vstack([sample[3][1] for sample in center_samples])
new_done_buffer_list = np.vstack([sample[3][2] for sample in center_samples])
new_pos_list = np.vstack([sample[3][3] for sample in center_samples])
# next actions & reward
new_c_actions = self.target_center_actor.predict([new_sensor_map, new_total_buffer_list, new_done_buffer_list, new_pos_list])
cq_future = self.target_center_critic.predict([new_sensor_map, new_total_buffer_list, new_done_buffer_list, new_pos_list,
new_c_actions[0], new_c_actions[1], new_c_actions[2], new_c_actions[3]])
c_target_qs = c_reward + cq_future * self.gamma
self.summaries['cq_val'] = np.average(c_reward[0])
# train center critic
with tf.GradientTape() as tape:
tape.watch(self.center_critic.trainable_variables)
cq_values = self.center_critic([sensor_map, total_buffer_list, done_buffer_list, pos_list, move, exe, off, band_act])
cc_loss = tf.reduce_mean(tf.math.square(cq_values - tf.cast(c_target_qs, dtype=tf.float32)))
# cc_loss = tf.reduce_mean(tf.math.square(cq_values - c_target_qs))
cc_grad = tape.gradient(cc_loss, self.center_critic.trainable_variables)
self.center_critic_opt.apply_gradients(zip(cc_grad, self.center_critic.trainable_variables))
# train center actor
with tf.GradientTape() as tape:
tape.watch(self.center_actor.trainable_variables)
c_act = self.center_actor([sensor_map, total_buffer_list, done_buffer_list, pos_list])
ca_loss = tf.reduce_mean(self.center_critic([sensor_map, total_buffer_list, done_buffer_list, pos_list, c_act[0], c_act[1], c_act[2], c_act[3]]))
# print(self.center_critic([sensor_maps, agent_maps, c_act]))
ca_grad = tape.gradient(ca_loss, self.center_actor.trainable_variables)
# print(ca_grad)
self.center_actor_opt.apply_gradients(zip(ca_grad, self.center_actor.trainable_variables))
# print(ca_loss)
self.summaries['center-critic_loss'] = cc_loss
self.summaries['center-actor_loss'] = ca_loss
def save_model(self, episode, time_str):
self.center_actor.save('logs/models/{}/center-actor_episode{}.h5'.format(time_str, episode))
self.center_critic.save('logs/models/{}/center-critic_episode{}.h5'.format(time_str, episode))
# @tf.function
def train(self, max_epochs=2000, max_step=500, up_freq=8, render=False, render_freq=1):
cur_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
train_log_dir = 'logs/fit/' + cur_time
env_log_dir = 'logs/env/env' + cur_time
record_dir = 'logs/records/' + cur_time
os.mkdir(env_log_dir)
os.mkdir(record_dir)
summary_writer = tf.summary.create_file_writer(train_log_dir)
# tf.summary.trace_on(graph=True, profiler=True)
os.makedirs('logs/models/' + cur_time)
done, episode, steps, epoch, total_reward = False, 0, 0, 0, 0
finish_length = []
finish_size = []
sensor_ages = []
# summary_record = []
while epoch < max_epochs:
print('epoch%s' % epoch)
if render and (epoch % 32 == 1):
self.env.render(env_log_dir, epoch, True)
if steps >= max_step:
# self.env.world.finished_data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.