input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
['If user with email exists, reset token will be issued.'])
# Submit password reset again
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json()['notes'],
['If user with email exists, reset token will be issued.'])
# Verify the first token doesn't work
first_token = Token.objects.all()[0]
url = "/v1/tokens/" + first_token.token
data = {'password': '<PASSWORD>'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(user.password, '<PASSWORD>')
# Now reset with the second token
second_token = Token.objects.all()[1]
url = "/v1/tokens/" + second_token.token
data = {'password': '<PASSWORD>'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(user.password, '<PASSWORD>')
def test_reset_user_no_existing(self):
"""
Actions should be successful, so usernames are not exposed.
"""
setup_identity_cache()
url = "/v1/actions/ResetPassword"
data = {'email': "<EMAIL>"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json()['notes'],
['If user with email exists, reset token will be issued.'])
def test_notification_createproject(self):
"""
CreateProject should create a notification.
We should be able to grab it.
"""
setup_identity_cache()
url = "/v1/actions/CreateProject"
data = {'project_name': "test_project", 'email': "<EMAIL>"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
new_task = Task.objects.all()[0]
headers = {
'project_name': "test_project",
'project_id': "test_project_id",
'roles': "admin,_member_",
'username': "<EMAIL>",
'user_id': "test_user_id",
'authenticated': True
}
url = "/v1/notifications"
response = self.client.get(url, headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json()['notifications'][0]['task'],
new_task.uuid)
def test_duplicate_tasks_new_project(self):
"""
Ensure we can't submit duplicate tasks
"""
setup_identity_cache()
url = "/v1/actions/CreateProject"
data = {'project_name': "test_project", 'email': "<EMAIL>"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
data = {'project_name': "test_project_2", 'email': "<EMAIL>"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_duplicate_tasks_new_user(self):
"""
Ensure we can't submit duplicate tasks
"""
project = fake_clients.FakeProject(name="test_project")
setup_identity_cache(projects=[project])
url = "/v1/actions/InviteUser"
headers = {
'project_name': "test_project",
'project_id': project.id,
'roles': "project_admin,_member_,project_mod",
'username': "<EMAIL>",
'user_id': "test_user_id",
'authenticated': True
}
data = {'email': "<EMAIL>", 'roles': ["_member_"],
'project_id': project.id}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'notes': ['created token']})
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
data = {'email': "<EMAIL>", 'roles': ["_member_"],
'project_id': project.id}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'notes': ['created token']})
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def test_return_task_id_if_admin(self):
"""
Confirm that the task id is returned when admin.
"""
user = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>", email="<EMAIL>")
setup_identity_cache(users=[user])
headers = {
'project_name': "test_project",
'project_id': "test_project_id",
'roles': "admin,_member_",
'username': "<EMAIL>",
'user_id': "test_user_id",
'authenticated': True
}
url = "/v1/actions/ResetPassword"
data = {'email': "<EMAIL>"}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# make sure the task is actually valid
new_task = Task.objects.all()[0]
self.assertTrue(all([a.valid for a in new_task.actions]))
self.assertEqual(
response.json()['task'],
new_task.uuid)
def test_return_task_id_if_admin_fail(self):
"""
Confirm that the task id is not returned unless admin.
"""
user = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>", email="<EMAIL>")
setup_identity_cache(users=[user])
headers = {
'project_name': "test_project",
'project_id': "test_project_id",
'roles': "_member_",
'username': "<EMAIL>",
'user_id': "test_user_id",
'authenticated': True
}
url = "/v1/actions/ResetPassword"
data = {'email': "<EMAIL>"}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# make sure the task is actually valid
new_task = Task.objects.all()[0]
self.assertTrue(all([a.valid for a in new_task.actions]))
self.assertFalse(response.json().get('task'))
def test_update_email_task(self):
"""
Ensure the update email workflow goes as expected.
Create task, create token, submit token.
"""
user = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>", email="<EMAIL>")
setup_identity_cache(users=[user])
url = "/v1/actions/UpdateEmail"
headers = {
'project_name': "test_project",
'project_id': "test_project_id",
'roles': "project_admin,_member_,project_mod",
'username': "<EMAIL>",
'user_id': user.id,
'authenticated': True
}
data = {'new_email': "<EMAIL>"}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'notes': ['created token']})
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {'confirm': True}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(user.name, '<EMAIL>')
@modify_dict_settings(TASK_SETTINGS=[
{'key_list': ['update_email', 'additional_actions'],
'operation': 'append',
'value': ['SendAdditionalEmailAction']},
{'key_list': ['update_email', 'action_settings',
'SendAdditionalEmailAction', 'initial'],
'operation': 'update',
'value': {
'subject': 'email_update_additional',
'template': 'email_update_started.txt',
'email_roles': [],
'email_current_user': True,
}
}
])
def test_update_email_task_send_email_to_current_user(self):
"""
Tests the email update workflow, and ensures that when setup
to send a confirmation email to the old email address it does.
"""
user = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>", email="<EMAIL>")
setup_identity_cache(users=[user])
url = "/v1/actions/UpdateEmail"
headers = {
'project_name': "test_project",
'project_id': "test_project_id",
'roles': "project_admin,_member_,project_mod",
'username': "<EMAIL>",
'user_id': user.id,
'authenticated': True
}
data = {'new_email': "<EMAIL>"}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'notes': ['created token']})
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].to, ['<EMAIL>'])
self.assertEqual(mail.outbox[0].subject, 'email_update_additional')
self.assertEqual(mail.outbox[1].to, ['<EMAIL>'])
self.assertEqual(mail.outbox[1].subject, 'email_update_token')
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {'confirm': True}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(user.name, '<EMAIL>')
self.assertEqual(len(mail.outbox), 3)
@modify_dict_settings(TASK_SETTINGS=[
{'key_list': ['update_email', 'additional_actions'],
'operation': 'append',
'value': ['SendAdditionalEmailAction']},
{'key_list': ['update_email', 'action_settings',
'SendAdditionalEmailAction', 'initial'],
'operation': 'update',
'value': {
'subject': 'email_update_additional',
'template': 'email_update_started.txt',
'email_roles': [],
'email_current_user': True}
}
])
@override_settings(USERNAME_IS_EMAIL=False)
def test_update_email_task_send_email_current_name_not_email(self):
"""
Tests the email update workflow when USERNAME_IS_EMAIL=False, and
ensures that when setup to send a confirmation email to the old
email address it does.
"""
user = fake_clients.FakeUser(
name="nkdfslnkls", password="<PASSWORD>", email="<EMAIL>")
setup_identity_cache(users=[user])
url = "/v1/actions/UpdateEmail"
headers = {
'project_name': "test_project",
'project_id': "test_project_id",
'roles': "project_admin,_member_,project_mod",
'username': "nkdfslnkls",
'user_id': user.id,
'authenticated': True,
'email': '<EMAIL>',
}
data = {'new_email': "<EMAIL>"}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'notes': ['created token']})
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].to, ['<EMAIL>'])
self.assertEqual(mail.outbox[0].subject, 'email_update_additional')
self.assertEqual(mail.outbox[1].to, ['<EMAIL>'])
self.assertEqual(mail.outbox[1].subject, 'email_update_token')
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {'confirm': True}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(mail.outbox), 3)
def test_update_email_task_invalid_email(self):
user = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>", email="<EMAIL>")
setup_identity_cache(users=[user])
url = "/v1/actions/UpdateEmail"
headers = {
'project_name': "test_project",
'project_id': "test_project_id",
'roles': "project_admin,_member_,project_mod",
'username': "test<EMAIL>",
'user_id': user.id,
'authenticated': True
}
data = {'new_email': "<EMAIL>"}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{'errors': {'new_email': [u'Enter a valid email address.']}})
@override_settings(USERNAME_IS_EMAIL=True)
def test_update_email_pre_existing_user_with_email(self):
user = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>", email="<EMAIL>")
user2 = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>",
email="<EMAIL>")
setup_identity_cache(users=[user, user2])
url = "/v1/actions/UpdateEmail"
headers = {
'project_name': "test_project",
'project_id': "test_project_id",
'roles': "project_admin,_member_,project_mod",
'username': "<EMAIL>",
'user_id': "test_user_id",
'authenticated': True,
'project_domain_id': 'default',
}
data = {'new_email': "<EMAIL>"}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json(), {'errors': ['actions invalid']})
self.assertEqual(len(Token.objects.all()), 0)
self.assertEqual(len(mail.outbox), 0)
@override_settings(USERNAME_IS_EMAIL=False)
def test_update_email_user_with_email_username_not_email(self):
user = fake_clients.FakeUser(
name="test", password="<PASSWORD>", email="<EMAIL>")
user2 = fake_clients.FakeUser(
name="new_test", password="<PASSWORD>",
email="<EMAIL>")
setup_identity_cache(users=[user, user2])
url = "/v1/actions/UpdateEmail"
headers = {
'project_name': "test_project",
'project_id': "test_project_id",
'roles': "project_admin,_member_,project_mod",
'username': "<EMAIL>",
'user_id': user.id,
'authenticated': True
}
data = {'new_email': "<EMAIL>"}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'notes': ['created token']})
self.assertEqual(len(mail.outbox), 1)
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {'confirm': True}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(user.email, '<EMAIL>')
def test_update_email_task_not_authenticated(self):
"""
Ensure that an unauthenticated user cant access the endpoint.
"""
user = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>", email="<EMAIL>")
setup_identity_cache(users=[user])
url = "/v1/actions/UpdateEmail"
headers = {
}
data = {'new_email': "<EMAIL>"}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@override_settings(USERNAME_IS_EMAIL=False)
def test_update_email_task_username_not_email(self):
user = fake_clients.FakeUser(
name="test_user", password="123", email="<EMAIL>")
setup_identity_cache(users=[user])
url = "/v1/actions/UpdateEmail"
headers = {
'project_name': "test_project",
'project_id': "test_project_id",
'roles': "project_admin,_member_,project_mod",
'username': "test_user",
'user_id': user.id,
'authenticated': True
}
data = {'new_email': "<EMAIL>"}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'notes': ['created token']})
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {'confirm': True}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(user.name, "test_user")
self.assertEqual(user.email, '<EMAIL>')
# Tests for USERNAME_IS_EMAIL=False
@override_settings(USERNAME_IS_EMAIL=False)
def test_invite_user_email_not_username(self):
"""
Invites a user where the email is different to the username.
"""
project = fake_clients.FakeProject(name="test_project")
setup_identity_cache(projects=[project])
url = "/v1/actions/InviteUser"
headers = {
'project_name': "test_project",
'project_id': project.id,
'roles': "project_admin,_member_,project_mod",
'username': "user",
'user_id': "test_user_id",
'authenticated': True
}
data = {'username': 'new_user', 'email': "<EMAIL>",
'roles': ["_member_"], 'project_id': project.id}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'notes': ['created token']})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'invite_user')
self.assertEqual(mail.outbox[0].to[0], '<EMAIL>')
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {'password': '<PASSWORD>'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(
fake_clients.identity_cache['new_users'][0].name,
'new_user')
@override_settings(USERNAME_IS_EMAIL=False)
def test_reset_user_username_not_email(self):
"""
Ensure the reset user workflow goes as expected.
Create task + create token, submit token.
"""
user = fake_clients.FakeUser(
name="test_user", password="<PASSWORD>", email="<EMAIL>")
setup_identity_cache(users=[user])
url = "/v1/actions/ResetPassword"
# NOTE(amelia): Requiring both username and email here may be
# a slight issue for various UIs as typically a
# forgotten password screen only asks for the
# email address, however there isn't a very
# good way to address this as keystone doesn't
# store emails in their own field
# Currently this is | |
"""
Copyright (c) 2021 Aiven Ltd
See LICENSE for details
"""
from astacus.common.exceptions import TransientException
from astacus.common.ipc import BackupManifest, NodeResult, Plugin, SnapshotFile, SnapshotResult, SnapshotState
from astacus.common.op import Op
from astacus.common.progress import Progress
from astacus.coordinator.cluster import Cluster
from astacus.coordinator.config import CoordinatorNode
from astacus.coordinator.plugins.base import BackupManifestStep, SnapshotStep, StepFailedError, StepsContext
from astacus.coordinator.plugins.clickhouse.client import ClickHouseClient, StubClickHouseClient
from astacus.coordinator.plugins.clickhouse.config import ClickHouseConfiguration, ClickHouseNode
from astacus.coordinator.plugins.clickhouse.manifest import AccessEntity, ClickHouseManifest, ReplicatedDatabase, Table
from astacus.coordinator.plugins.clickhouse.steps import (
AttachMergeTreePartsStep, ClickHouseManifestStep, CreateClickHouseManifestStep, DistributeReplicatedPartsStep,
FreezeTablesStep, RemoveFrozenTablesStep, RestoreAccessEntitiesStep, RestoreReplicatedDatabasesStep,
RetrieveAccessEntitiesStep, RetrieveDatabasesAndTablesStep, SyncReplicasStep, TABLES_LIST_QUERY, UnfreezeTablesStep,
ValidateConfigStep
)
from astacus.coordinator.plugins.clickhouse.zookeeper import FakeZooKeeperClient, ZooKeeperClient
from pathlib import Path
from typing import Optional, Type, Union
from unittest import mock
import asyncio
import datetime
import httpx
import json
import pytest
import respx
import sys
import uuid
pytestmark = [pytest.mark.clickhouse]
SAMPLE_ENTITIES = [
AccessEntity(type="P", uuid=uuid.UUID(int=1), name="a_policy", attach_query="ATTACH ROW POLICY ..."),
AccessEntity(type="Q", uuid=uuid.UUID(int=2), name="a_quota", attach_query="ATTACH QUOTA ..."),
AccessEntity(type="R", uuid=uuid.UUID(int=3), name="a_role", attach_query="ATTACH ROLE ..."),
AccessEntity(type="S", uuid=uuid.UUID(int=4), name="a_settings_profile", attach_query="ATTACH SETTINGS PROFILE ..."),
AccessEntity(type="U", uuid=uuid.UUID(int=5), name="josé", attach_query="ATTACH USER ..."),
]
SAMPLE_DATABASES = [
ReplicatedDatabase(name="db-one"),
ReplicatedDatabase(name="db-two"),
]
SAMPLE_TABLES = [
Table(
database="db-one",
name="table-uno",
uuid=uuid.UUID("00000000-0000-0000-0000-100000000001"),
engine="ReplicatedMergeTree",
create_query="CREATE TABLE db-one.table-uno ...",
dependencies=[("db-one", "table-dos"), ("db-two", "table-eins")]
),
Table(
database="db-one",
name="table-dos",
uuid=uuid.UUID("00000000-0000-0000-0000-100000000002"),
engine="MergeTree",
create_query="CREATE TABLE db-one.table-dos ...",
),
Table(
database="db-two",
name="table-eins",
uuid=uuid.UUID("00000000-0000-0000-0000-200000000001"),
engine="ReplicatedMergeTree",
create_query="CREATE TABLE db-two.table-eins ...",
)
]
SAMPLE_MANIFEST = ClickHouseManifest(
access_entities=SAMPLE_ENTITIES,
replicated_databases=SAMPLE_DATABASES,
tables=SAMPLE_TABLES,
)
def mock_clickhouse_client() -> mock.Mock:
mock_client = mock.Mock(spec_set=ClickHouseClient)
if sys.version_info < (3, 8):
awaitable = asyncio.Future()
awaitable.set_result(mock.Mock(spec_set=list))
mock_client.execute.return_value = awaitable
return mock_client
@pytest.mark.asyncio
@pytest.mark.parametrize(
"clickhouse_count,coordinator_count,success", [
(3, 3, True),
(0, 0, True),
(1, 2, False),
(0, 1, False),
(2, 1, False),
(1, 0, False),
]
)
async def test_validate_step_require_equal_nodes_count(clickhouse_count: int, coordinator_count: int, success: bool) -> None:
clickhouse_configuration = ClickHouseConfiguration(
nodes=[ClickHouseNode(host="::1", port=9000) for _ in range(clickhouse_count)]
)
step = ValidateConfigStep(clickhouse=clickhouse_configuration)
coordinator_nodes = [CoordinatorNode(url=f"node{i}") for i in range(coordinator_count)]
cluster = Cluster(nodes=coordinator_nodes)
if success:
await step.run_step(cluster, StepsContext())
else:
with pytest.raises(StepFailedError):
await step.run_step(cluster, StepsContext())
async def create_zookeper_access_entities(zookeeper_client: ZooKeeperClient) -> None:
async with zookeeper_client.connect() as connection:
await asyncio.gather(
connection.create("/clickhouse/access/P/a_policy",
str(uuid.UUID(int=1)).encode()),
connection.create(f"/clickhouse/access/uuid/{str(uuid.UUID(int=1))}", b"ATTACH ROW POLICY ..."),
connection.create("/clickhouse/access/Q/a_quota",
str(uuid.UUID(int=2)).encode()),
connection.create(f"/clickhouse/access/uuid/{str(uuid.UUID(int=2))}", b"ATTACH QUOTA ..."),
connection.create("/clickhouse/access/R/a_role",
str(uuid.UUID(int=3)).encode()),
connection.create(f"/clickhouse/access/uuid/{str(uuid.UUID(int=3))}", b"ATTACH ROLE ..."),
connection.create("/clickhouse/access/S/a_settings_profile",
str(uuid.UUID(int=4)).encode()),
connection.create(f"/clickhouse/access/uuid/{str(uuid.UUID(int=4))}", b"ATTACH SETTINGS PROFILE ..."),
connection.create("/clickhouse/access/U/jos%C3%A9",
str(uuid.UUID(int=5)).encode()),
connection.create(f"/clickhouse/access/uuid/{str(uuid.UUID(int=5))}", b"ATTACH USER ..."),
)
@pytest.mark.asyncio
async def test_retrieve_access_entities() -> None:
zookeeper_client = FakeZooKeeperClient()
await create_zookeper_access_entities(zookeeper_client)
step = RetrieveAccessEntitiesStep(zookeeper_client=zookeeper_client, access_entities_path="/clickhouse/access")
access_entities = await step.run_step(Cluster(nodes=[]), StepsContext())
assert access_entities == SAMPLE_ENTITIES
class TrappedZooKeeperClient(FakeZooKeeperClient):
"""
A fake ZooKeeper client with a trap: it will inject a concurrent write after a few reads.
"""
def __init__(self) -> None:
super().__init__()
self.calls_until_failure: Optional[int] = None
async def inject_fault(self) -> None:
if self.calls_until_failure == 0:
self.calls_until_failure = None
# This is our "failure": a concurrent modification
async with self.connect() as new_connection:
new_uuid = str(uuid.UUID(int=5))
await new_connection.create("/clickhouse/access/R/a_new_role", new_uuid.encode())
await new_connection.create("/clickhouse/access/uuid/{new_uuid}", b"ATTACH ROLE a_new_role ...")
elif self.calls_until_failure is not None:
self.calls_until_failure -= 1
@pytest.mark.asyncio
async def test_retrieve_access_entities_fails_from_concurrent_updates() -> None:
zookeeper_client = TrappedZooKeeperClient()
await create_zookeper_access_entities(zookeeper_client)
# This fixed value is not ideal, we need to wait for a few reads before injecting a concurrent
# update and see it cause problems, because we must do an update after something was
# read by the step.
# This is not a defect of the step, it's OK if something is updated in a part of the ZooKeeper
# tree we haven't explored yet: the snapshot would have been the same if we had started
# the snapshot just after this update.
zookeeper_client.calls_until_failure = 8
step = RetrieveAccessEntitiesStep(zookeeper_client=zookeeper_client, access_entities_path="/clickhouse/access")
with pytest.raises(TransientException):
await step.run_step(Cluster(nodes=[]), StepsContext())
@pytest.mark.asyncio
async def test_retrieve_tables() -> None:
clients = [StubClickHouseClient(), StubClickHouseClient()]
clients[0].set_response(
TABLES_LIST_QUERY,
[
# This special row is what we get for a database without tables
["db-empty", "", "", "00000000-0000-0000-0000-000000000000", "", []],
[
"db-one",
"table-uno",
"ReplicatedMergeTree",
"00000000-0000-0000-0000-100000000001",
"CREATE TABLE db-one.table-uno ...",
[("db-one", "table-dos"), ("db-two", "table-eins")],
],
[
"db-one",
"table-dos",
"MergeTree",
"00000000-0000-0000-0000-100000000002",
"CREATE TABLE db-one.table-dos ...",
[],
],
[
"db-two",
"table-eins",
"ReplicatedMergeTree",
"00000000-0000-0000-0000-200000000001",
"CREATE TABLE db-two.table-eins ...",
[],
],
]
)
step = RetrieveDatabasesAndTablesStep(clients=clients)
context = StepsContext()
databases, tables = await step.run_step(Cluster(nodes=[]), context)
assert databases == [ReplicatedDatabase(name="db-empty")] + SAMPLE_DATABASES
assert tables == SAMPLE_TABLES
@pytest.mark.asyncio
async def test_retrieve_tables_without_any_database_or_table() -> None:
clients = [StubClickHouseClient(), StubClickHouseClient()]
clients[0].set_response(TABLES_LIST_QUERY, [])
step = RetrieveDatabasesAndTablesStep(clients=clients)
context = StepsContext()
assert await step.run_step(Cluster(nodes=[]), context) == ([], [])
@pytest.mark.asyncio
async def test_retrieve_tables_without_any_table() -> None:
clients = [StubClickHouseClient(), StubClickHouseClient()]
clients[0].set_response(TABLES_LIST_QUERY, [
["db-empty", "", "", "00000000-0000-0000-0000-000000000000", "", []],
])
step = RetrieveDatabasesAndTablesStep(clients=clients)
context = StepsContext()
databases, tables = await step.run_step(Cluster(nodes=[]), context)
assert databases == [ReplicatedDatabase(name="db-empty")]
assert tables == []
@pytest.mark.asyncio
async def test_create_clickhouse_manifest() -> None:
step = CreateClickHouseManifestStep()
context = StepsContext()
context.set_result(RetrieveAccessEntitiesStep, SAMPLE_ENTITIES)
context.set_result(RetrieveDatabasesAndTablesStep, (SAMPLE_DATABASES, SAMPLE_TABLES))
assert await step.run_step(Cluster(nodes=[]), context) == SAMPLE_MANIFEST
@pytest.mark.asyncio
async def test_remove_frozen_tables_step() -> None:
step = RemoveFrozenTablesStep(freeze_name="some-thing+special")
cluster = Cluster(nodes=[CoordinatorNode(url="http://node1/node"), CoordinatorNode(url="http://node2/node")])
with respx.mock:
respx.post(
"http://node1/node/clear", content=Op.StartResult(op_id=123, status_url="http://node1/clear/123").jsondict()
)
respx.post(
"http://node2/node/clear", content=Op.StartResult(op_id=456, status_url="http://node2/clear/456").jsondict()
)
respx.get("http://node1/clear/123", content=NodeResult(progress=Progress(final=True)).jsondict())
respx.get("http://node2/clear/456", content=NodeResult(progress=Progress(final=True)).jsondict())
try:
await step.run_step(cluster, StepsContext())
finally:
for call in respx.calls:
request: httpx.Request = call[0]
if request.url in {"http://node1/node/clear", "http://node2/node/clear"}:
assert json.loads(request.read())["root_globs"] == ["shadow/some%2Dthing%2Bspecial/store/**/*"]
@pytest.mark.asyncio
async def test_freezes_all_mergetree_tables_listed_in_manifest() -> None:
await _test_freeze_unfreezes_all_mergetree_tables_listed_in_manifest(step_class=FreezeTablesStep, operation="FREEZE")
@pytest.mark.asyncio
async def test_unfreezes_all_mergetree_tables_listed_in_manifest() -> None:
await _test_freeze_unfreezes_all_mergetree_tables_listed_in_manifest(step_class=UnfreezeTablesStep, operation="UNFREEZE")
async def _test_freeze_unfreezes_all_mergetree_tables_listed_in_manifest(
*, step_class: Union[Type[FreezeTablesStep], Type[UnfreezeTablesStep]], operation: str
):
first_client, second_client = mock_clickhouse_client(), mock_clickhouse_client()
step = step_class(clients=[first_client, second_client], freeze_name="Äs`t:/.././@c'_'s")
cluster = Cluster(nodes=[CoordinatorNode(url="node1"), CoordinatorNode(url="node2")])
context = StepsContext()
context.set_result(RetrieveDatabasesAndTablesStep, (SAMPLE_DATABASES, SAMPLE_TABLES))
await step.run_step(cluster, context)
assert first_client.mock_calls == [
mock.call.execute(f"ALTER TABLE `db-one`.`table-uno` {operation} WITH NAME 'Äs`t:/.././@c\\'_\\'s'"),
mock.call.execute(f"ALTER TABLE `db-one`.`table-dos` {operation} WITH NAME 'Äs`t:/.././@c\\'_\\'s'"),
mock.call.execute(f"ALTER TABLE `db-two`.`table-eins` {operation} WITH NAME 'Äs`t:/.././@c\\'_\\'s'"),
]
# The operation is replicated, so we'll only do it on the first client
assert second_client.mock_calls == []
@pytest.mark.asyncio
async def test_distribute_parts_of_replicated_tables() -> None:
step = DistributeReplicatedPartsStep()
context = StepsContext()
context.set_result(
SnapshotStep, [
SnapshotResult(
state=SnapshotState(
root_globs=[],
files=[
SnapshotFile(
relative_path=Path("store/000/00000000-0000-0000-0000-100000000001/detached/all_0_0_0/data.bin"),
file_size=1000,
mtime_ns=0,
hexdigest="0001"
),
SnapshotFile(
relative_path=Path("store/000/00000000-0000-0000-0000-100000000001/detached/all_1_1_0/data.bin"),
file_size=1000,
mtime_ns=0,
hexdigest="0002"
),
SnapshotFile(
relative_path=Path("store/000/00000000-0000-0000-0000-100000000002/detached/all_0_0_0/data.bin"),
file_size=1000,
mtime_ns=0,
hexdigest="0003"
),
]
),
),
SnapshotResult(
state=SnapshotState(
root_globs=[],
files=[
SnapshotFile(
relative_path=Path("store/000/00000000-0000-0000-0000-100000000001/detached/all_0_0_0/data.bin"),
file_size=1000,
mtime_ns=0,
hexdigest="0001"
),
SnapshotFile(
relative_path=Path("store/000/00000000-0000-0000-0000-100000000001/detached/all_1_1_0/data.bin"),
file_size=1000,
mtime_ns=0,
hexdigest="0002"
),
SnapshotFile(
relative_path=Path("store/000/00000000-0000-0000-0000-100000000002/detached/all_0_0_0/data.bin"),
file_size=1000,
mtime_ns=0,
hexdigest="0004"
),
]
),
),
]
)
context.set_result(RetrieveDatabasesAndTablesStep, (SAMPLE_DATABASES, SAMPLE_TABLES))
await step.run_step(Cluster(nodes=[]), context)
snapshot_results = context.get_result(SnapshotStep)
# On the ReplicatedMergeTree table (uuid ending in 0001), each server has only half the parts now
# On the MergeTree table (uuid ending in 0002), each server has kept its own part (same name but different digest)
assert sorted(snapshot_results[0].state.files) == [
SnapshotFile(
relative_path=Path("store/000/00000000-0000-0000-0000-100000000001/detached/all_0_0_0/data.bin"),
file_size=1000,
mtime_ns=0,
hexdigest="0001"
),
SnapshotFile(
relative_path=Path("store/000/00000000-0000-0000-0000-100000000002/detached/all_0_0_0/data.bin"),
file_size=1000,
mtime_ns=0,
hexdigest="0003"
),
]
assert sorted(snapshot_results[1].state.files) == [
SnapshotFile(
relative_path=Path("store/000/00000000-0000-0000-0000-100000000001/detached/all_1_1_0/data.bin"),
file_size=1000,
mtime_ns=0,
hexdigest="0002"
),
SnapshotFile(
relative_path=Path("store/000/00000000-0000-0000-0000-100000000002/detached/all_0_0_0/data.bin"),
file_size=1000,
mtime_ns=0,
hexdigest="0004"
),
]
@pytest.mark.asyncio
async def test_parse_clickhouse_manifest() -> None:
step = ClickHouseManifestStep()
context = StepsContext()
context.set_result(
BackupManifestStep,
BackupManifest(
start=datetime.datetime(2020, 1, 2, 3, 4, 5, 678, tzinfo=datetime.timezone.utc),
end=datetime.datetime(2020, 1, 2, 5, 6, 7, 891, tzinfo=datetime.timezone.utc),
attempt=1,
snapshot_results=[],
upload_results=[],
plugin=Plugin.clickhouse,
plugin_data={
"access_entities": [{
"name": "default",
"uuid": "00000000-0000-0000-0000-000000000002",
"type": "U",
"attach_query": "ATTACH USER ..."
}],
"replicated_databases": [{
"name": "db-one"
}],
"tables": [{
"database": "db-one",
"name": "t1",
"engine": "MergeTree",
"uuid": "00000000-0000-0000-0000-000000000004",
"create_query": "CREATE ..."
}]
}
)
)
clickhouse_manifest = await step.run_step(Cluster(nodes=[]), context)
assert clickhouse_manifest == ClickHouseManifest(
access_entities=[AccessEntity(type="U", uuid=uuid.UUID(int=2), name="default", attach_query="ATTACH USER ...")],
replicated_databases=[ReplicatedDatabase(name="db-one")],
tables=[Table(database="db-one", name="t1", engine="MergeTree", uuid=uuid.UUID(int=4), create_query="CREATE ...")],
)
@pytest.mark.asyncio
async def test_creates_all_replicated_databases_and_tables_in_manifest() -> None:
clients = [mock_clickhouse_client(), mock_clickhouse_client()]
step = RestoreReplicatedDatabasesStep(clients=clients, replicated_databases_zookeeper_path="/clickhouse/databases")
cluster = Cluster(nodes=[CoordinatorNode(url="node1"), CoordinatorNode(url="node2")])
context = StepsContext()
context.set_result(ClickHouseManifestStep, SAMPLE_MANIFEST)
await step.run_step(cluster, context)
first_client_queries = [
"DROP DATABASE IF EXISTS `db-one` SYNC",
"CREATE DATABASE `db-one` ENGINE = Replicated('/clickhouse/databases/db%2Done', '{shard}', '{replica}')",
"DROP DATABASE IF EXISTS `db-two` SYNC",
"CREATE DATABASE `db-two` ENGINE = Replicated('/clickhouse/databases/db%2Dtwo', '{shard}', '{replica}')",
"CREATE TABLE db-one.table-uno ...", "CREATE TABLE db-one.table-dos ...", "CREATE TABLE db-two.table-eins ..."
]
# CREATE TABLE is replicated, that why we only create the table on the first client
second_client_queries = [
"DROP DATABASE IF EXISTS `db-one` SYNC",
"CREATE DATABASE `db-one` ENGINE = Replicated('/clickhouse/databases/db%2Done', '{shard}', '{replica}')",
"DROP DATABASE IF EXISTS `db-two` SYNC",
"CREATE DATABASE `db-two` ENGINE = Replicated('/clickhouse/databases/db%2Dtwo', '{shard}', '{replica}')",
]
assert clients[0].mock_calls == list(map(mock.call.execute, first_client_queries))
assert clients[1].mock_calls == list(map(mock.call.execute, second_client_queries))
@pytest.mark.asyncio
async def test_drops_each_database_on_all_servers_before_recreating_it():
# We use the same client twice to record the global sequence of queries across all servers
client = mock_clickhouse_client()
step = RestoreReplicatedDatabasesStep(
clients=[client, client], replicated_databases_zookeeper_path="/clickhouse/databases"
)
cluster = Cluster(nodes=[CoordinatorNode(url="node1"), CoordinatorNode(url="node2")])
context = StepsContext()
context.set_result(ClickHouseManifestStep, SAMPLE_MANIFEST)
await step.run_step(cluster, context)
first_client_queries = [
"DROP DATABASE IF EXISTS `db-one` SYNC", "DROP DATABASE IF EXISTS `db-one` SYNC",
"CREATE DATABASE `db-one` ENGINE = Replicated('/clickhouse/databases/db%2Done', '{shard}', '{replica}')",
"CREATE DATABASE `db-one` ENGINE = Replicated('/clickhouse/databases/db%2Done', '{shard}', '{replica}')",
"DROP DATABASE IF EXISTS `db-two` SYNC", "DROP DATABASE IF EXISTS `db-two` SYNC",
"CREATE DATABASE `db-two` ENGINE = Replicated('/clickhouse/databases/db%2Dtwo', '{shard}', '{replica}')",
"CREATE DATABASE `db-two` ENGINE = Replicated('/clickhouse/databases/db%2Dtwo', '{shard}', '{replica}')",
"CREATE TABLE db-one.table-uno ...", | |
<gh_stars>0
# -*- coding: latin-1 -*-
#
# wif.py - parse or generate Wi-Fi Simple Configuration data
#
from __future__ import absolute_import, division
from .record import Record, GlobalRecord, hexlify, _PY2
from .record import DecodeError, EncodeError
from collections import namedtuple
from functools import reduce
import operator
import uuid
VersionTuple = namedtuple('Version', 'major, minor')
class AttributeBase(object):
def __repr__(self):
s = "{r.__class__.__module__}.{r.__class__.__name__}({r:args})"
return s.format(r=self)
def __str__(self):
return "{r._str} {r:data}".format(r=self)
@classmethod
def _decode_error(cls, fmt, *args, **kwargs):
clname = cls.__module__ + "." + cls.__name__
return DecodeError(clname + " " + fmt.format(*args, **kwargs))
@classmethod
def _encode_error(cls, fmt, *args, **kwargs):
clname = cls.__module__ + "." + cls.__name__
return EncodeError(clname + " " + fmt.format(*args, **kwargs))
class AttributeContainer(AttributeBase):
_attribute_name_mapping = NotImplemented
@property
def attribute_names(self):
return self._attribute_name_mapping.keys()
def _map_key(self, key):
if isinstance(key, int):
return key
elif key in self._attribute_name_mapping:
return self._attribute_name_mapping[key]._key
else:
errstr = "unknown attribute name '{name}'"
raise ValueError(errstr.format(name=key))
def __init__(self, *args):
self._attributes = dict()
for _type, _value in args:
self.setdefault(_type, []).append(_value)
def __getitem__(self, key):
return self._attributes[self._map_key(key)]
def __setitem__(self, key, value):
self._attributes[self._map_key(key)] = value
def __contains__(self, key):
return self._map_key(key) in self._attributes
def __iter__(self):
return self._attributes.__iter__()
def get(self, key, default=None):
return self._attributes.get(self._map_key(key), default)
def setdefault(self, key, default=None):
return self._attributes.setdefault(self._map_key(key), default)
def keys(self):
return self._attributes.keys()
def values(self):
return self._attributes.values()
def items(self):
return self._attributes.items()
def __format__(self, format_spec):
if format_spec == 'args':
afmt = "(0x{:02X}, {!r})"
args = [afmt.format(k, v) for k in self for v in self[k]]
return ', '.join(args)
elif format_spec == 'data':
keys = ["0x{:02X}".format(k) for k in self for v in self[k]]
return "Attributes {}".format(' '.join(keys))
else:
return super(AttributeContainer, self).__format__(format_spec)
def get_attribute(self, name, index=0):
cls = self._attribute_name_mapping[name]
try:
return cls.decode(self.get(cls._key, [])[index])
except IndexError:
pass
def set_attribute(self, name, *args):
cls = self._attribute_name_mapping[name]
obj = args[0] if isinstance(args[0], cls) else cls(*args)
self[cls._key] = [obj.encode()]
def add_attribute(self, name, *args):
cls = self._attribute_name_mapping[name]
obj = args[0] if isinstance(args[0], cls) else cls(*args)
self.setdefault(cls._key, []).append(obj.encode())
class Attribute(AttributeBase):
def __init__(self, *args):
if args and type(args[0]) == type(self):
self._value = args[0]._value
elif args:
self.init(*args)
else:
self._value = ()
@property
def value(self):
return self._value
def __eq__(self, other):
return type(self) == type(other) and self._value == other._value
def __format__(self, format_spec):
if format_spec == 'args':
return ", ".join(["{!r}".format(v) for v in self._value])
elif format_spec == 'data':
return ", ".join(["{!s}".format(v) for v in self._value])
else:
return super(Attribute, self).__format__(format_spec)
def encode(self):
octets = self._encode_struct(self._fmt[0], *self._value)
if not self._fmt[1] <= len(octets) <= self._fmt[2]:
errstr = "data length is out of limits {1} <= {0} <= {2}"
raise self._encode_error(errstr, len(octets), *self._fmt[1:3])
return octets
def _encode_struct(self, fmt, *values):
return Record._encode_struct(fmt, *values)
@classmethod
def decode(cls, octets, offset=0):
if not cls._fmt[1] <= len(octets) <= cls._fmt[2]:
errstr = "data length is out of limits {1} <= {0} <= {2}"
raise cls._decode_error(errstr, len(octets), *cls._fmt[1:3])
values = cls._decode_struct(cls._fmt[0], octets, offset)
return cls._from_decode(*values)
@classmethod
def _decode_struct(cls, fmt, octets, offset=0):
return Record._decode_struct(fmt, octets, offset, always_tuple=True)
@classmethod
def _from_decode(cls, *values):
return cls(*values)
class IntegerAttribute(Attribute):
"""Base class for Attributes that contain a single integer value.
"""
def init(self, value):
self._value = (int(value),)
@property
def value(self):
return self._value[0]
class BooleanAttribute(Attribute):
"""Base class for Attributes that contain a single boolean value.
"""
def init(self, value):
self._value = (bool(value),)
@property
def value(self):
return self._value[0]
class OctetsAttribute(Attribute):
"""Base class for Attributes that contain an octet string value.
"""
def init(self, value):
self._value = (bytes(bytearray(value) if _PY2 else value),)
@property
def value(self):
return self._value[0]
def __format__(self, format_spec):
if format_spec == 'data':
value = bytearray(self.value) if _PY2 else self.value
return ":".join(["{:02X}".format(x) for x in value])
else:
return super(OctetsAttribute, self).__format__(format_spec)
class AsciiAttribute(Attribute):
"""Base class for Attributes that contain an ascii string value.
"""
def init(self, *args):
value = Record._value_to_ascii(args[0], 'value').encode('ascii')
self._value = (value,)
@property
def value(self):
return self._value[0] if _PY2 else self._value[0].decode('ascii')
def __format__(self, format_spec):
if format_spec == 'data':
return "{}".format(self.value)
else:
return super(AsciiAttribute, self).__format__(format_spec)
@classmethod
def _from_decode(cls, *values):
return cls(values[0].decode('ascii'))
class UnicodeAttribute(Attribute):
"""Base class for Attributes that contain a UTF-8 string value.
"""
def init(self, *args):
value = Record._value_to_unicode(args[0], 'value').encode('utf-8')
self._value = (value,)
@property
def value(self):
return self._value[0] if _PY2 else self._value[0].decode('utf-8')
def __format__(self, format_spec):
if format_spec == 'data':
return "{}".format(self.value)
else:
return super(UnicodeAttribute, self).__format__(format_spec)
@classmethod
def _from_decode(cls, *values):
return cls(values[0].decode('utf-8'))
class UUIDAttribute(Attribute):
"""Base class for Attributes that contain a UUID value.
"""
def init(self, *args):
if isinstance(args[0], uuid.UUID):
self._value = (args[0].bytes,)
elif isinstance(args[0], (bytes, bytearray)) and len(args[0]) == 16:
self._value = (bytes(args[0]),)
else:
self._value = (uuid.UUID(args[0]).bytes,)
@property
def value(self):
return str(uuid.UUID(bytes=self._value[0]))
def __format__(self, format_spec):
if format_spec == 'args':
return "'{}'".format(self.value)
elif format_spec == 'data':
return "{}".format(self.value)
else:
return super(UUIDAttribute, self).__format__(format_spec)
class BitmapAttribute(Attribute):
"""Base class for Attributes that map bits to values.
"""
def init(self, *args):
assert len(args) > 0, "at least one argument is required"
if isinstance(args[0], int):
self._value = (args[0],)
else:
args = args[0] if isinstance(args[0], (tuple, list)) else args
bits = [self._get_mask(v) for v in args]
self._value = (reduce(operator.or_, bits),)
@property
def value(self):
bitmap, value = (self._bitmap, self._value[0])
names = [name for mask, name in bitmap if value & mask == mask]
return tuple([self._value[0]] + names)
@classmethod
def _get_mask(cls, name):
names = [_name for _mask, _name in cls._bitmap]
return cls._bitmap[names.index(name)][0]
def __format__(self, format_spec):
if format_spec == 'data':
fmt = "0x{{val:0{width}X}} {{lst}}".format(width=self._fmt[1]*2)
return fmt.format(val=self._value[0], lst=list(self.value[1:]))
else:
return super(BitmapAttribute, self).__format__(format_spec)
def __contains__(self, name):
mask = self._get_mask(name)
return self._value[0] & mask == mask
class DeviceTypeAttribute(Attribute):
"""Base class for Primary and Secondary Device Type.
"""
_mapping = (
(0x0001000000000000, "Computer::"),
(0x00010050F2040001, "Computer::PC"),
(0x00010050F2040002, "Computer::Server"),
(0x00010050F2040003, "Computer::MediaCenter"),
(0x00010050F2040004, "Computer::UltraMobile"),
(0x00010050F2040005, "Computer::Notebook"),
(0x00010050F2040006, "Computer::Desktop"),
(0x00010050F2040007, "Computer::MobileInternetDevice"),
(0x00010050F2040008, "Computer::Netbook"),
(0x00010050F2040009, "Computer::Tablet"),
(0x00010050F2040009, "Computer::Ultrabook"),
(0x0002000000000000, "Input::"),
(0x00020050F2040001, "Input::Keyboard"),
(0x00020050F2040002, "Input::Mouse"),
(0x00020050F2040003, "Input::Joystick"),
(0x00020050F2040004, "Input::Trackball"),
(0x00020050F2040005, "Input::GameController"),
(0x00020050F2040006, "Input::Remote"),
(0x00020050F2040007, "Input::Touchscreen"),
(0x00020050F2040008, "Input::BiometricReader"),
(0x00020050F2040009, "Input::BarcodeReader"),
(0x0003000000000000, "Printer::"),
(0x00030050F2040002, "Printer::Scanner"),
(0x00030050F2040003, "Printer::Fax"),
(0x00030050F2040004, "Printer::Copier"),
(0x00030050F2040005, "Printer::Multifunction"),
(0x0004000000000000, "Camera::"),
(0x00040050F2040001, "Camera::DigitalStillCamera"),
(0x00040050F2040002, "Camera::VideoCamera"),
(0x00040050F2040003, "Camera::WebCamera"),
(0x00040050F2040004, "Camera::SecurityCamera"),
(0x0005000000000000, "Storage::"),
(0x00050050F2040001, "Storage::NAS"),
(0x0006000000000000, "Network::"),
(0x00060050F2040001, "Network::AccessPoint"),
(0x00060050F2040002, "Network::Router"),
(0x00060050F2040003, "Network::Switch"),
(0x00060050F2040004, "Network::Gateway"),
(0x00060050F2040005, "Network::Bridge"),
(0x0007000000000000, "Display::"),
(0x00070050F2040001, "Display::Television"),
(0x00070050F2040002, "Display::PictureFrame"),
(0x00070050F2040003, "Display::Projector"),
(0x00070050F2040004, "Display::Monitor"),
(0x0008000000000000, "Multimedia::"),
(0x00080050F2040001, "Multimedia::DigitalAudioRecorder"),
(0x00080050F2040002, "Multimedia::PersonalVideoRecorder"),
(0x00080050F2040003, "Multimedia::MediaCenterExtender"),
(0x00080050F2040004, "Multimedia::SetTopBox"),
(0x00080050F2040005, "Multimedia::ServerAdapterExtender"),
(0x00080050F2040006, "Multimedia::PortableVideoPlayer"),
(0x0009000000000000, "Gaming::"),
(0x00090050F2040001, "Gaming::Xbox"),
(0x00090050F2040002, "Gaming::Xbox360"),
(0x00090050F2040003, "Gaming::Playstation"),
(0x00090050F2040004, "Gaming::Console"),
(0x00090050F2040005, "Gaming::Portable"),
(0x000A000000000000, "Telephone::"),
(0x000A0050F2040001, "Telephone::WindowsMobile"),
(0x000A0050F2040002, "Telephone::SingleModePhone"),
(0x000A0050F2040003, "Telephone::DualModePhone"),
(0x000A0050F2040004, "Telephone::SingleModeSmartphone"),
(0x000A0050F2040005, "Telephone::DualModeSmartphone"),
(0x000B000000000000, "Audio::"),
(0x000B0050F2040001, "Audio::Receiver"),
(0x000B0050F2040002, "Audio::Speaker"),
(0x000B0050F2040003, "Audio::PortableMusicPlayer"),
(0x000B0050F2040004, "Audio::Headset"),
(0x000B0050F2040005, "Audio::Headphone"),
(0x000B0050F2040006, "Audio::Microphone"),
(0x000B0050F2040006, "Audio::HomeTheater"),
(0x000C000000000000, "Dock::"),
(0x000C0050F2040001, "Dock::Computer"),
(0x000C0050F2040001, "Dock::Media"),
)
def init(self, *args):
values = [v if isinstance(v, int) else self._get_enum(v) for v in args]
self._value = tuple(values)
@property
def value(self):
return tuple(["{}".format(self._get_name(v)) for v in self._value])
def _get_enum(self, value):
for enum, name in self._mapping:
if value == name:
return enum
raise ValueError("{!r} does not have a known mapping".format(value))
def _get_name(self, value):
for enum, name in self._mapping:
if value == enum:
return name
category = value >> 48
for enum, name in self._mapping:
if enum >> 48 == category:
return "{}{:012X}".format(name, value & 0xFFFFFFFFFFFF)
return "{:04X}::{:012X}".format(category, value & 0xFFFFFFFFFFFF)
def __format__(self, format_spec):
if format_spec == 'args':
return ", ".join(["0x{:016X}".format(v) for v in self._value])
elif format_spec == 'data':
return " ".join(["{}".format(v) for v in self.value])
else:
return super(DeviceTypeAttribute, self).__format__(format_spec)
class VersionAttribute(Attribute):
"""Base class for Attributes that contain a version number.
"""
def init(self, *args):
value = (args[0] << 4 | args[1] & 15) if len(args) == 2 else args[0]
self._value = (value,)
@property
def value(self):
return VersionTuple(self._value[0] >> 4, self._value[0] & 15)
def __format__(self, format_spec):
if format_spec == 'args':
return "{}, {}".format(*self.value)
elif format_spec == 'data':
return "{}.{}".format(*self.value)
else:
return super(VersionAttribute, self).__format__(format_spec)
#
# Wi-Fi Simple Configuration Attributes
#
class APChannel(IntegerAttribute):
_str = "AP Channel"
_fmt = ('H', 2, 2)
_key = 0x1001
class AuthenticationType(BitmapAttribute):
_str = "Authentication Type"
_fmt = ('H', 2, 2)
_key = 0x1003
_bitmap = (
(0x0001, 'Open'),
(0x0002, 'WPA-Personal'),
(0x0004, 'Shared'),
(0x0008, 'WPA-Enterprise'),
(0x0010, 'WPA2-Enterprise'),
(0x0020, 'WPA2-Personal'))
class ConfigMethods(BitmapAttribute):
_str = "Configuration Methods"
_fmt = ('H', 2, 2)
_key = 0x1008
_bitmap = (
(0x0001, 'USBA'),
(0x0002, 'Ethernet'),
(0x0004, 'Label'),
(0x0008, 'Display'),
(0x0010, 'External NFC Token'),
(0x0020, 'Integrated NFC Token'),
(0x0040, 'NFC Interface'),
(0x0080, 'Push Button'),
(0x0100, 'Keypad'),
(0x0280, 'Virtual Push Button'),
(0x0480, 'Physical Push Button'),
(0x2008, 'Virtual Display PIN'),
(0x4008, 'Physical Display PIN'))
class DeviceName(UnicodeAttribute):
_str = "Device Name"
_fmt = ('*', 0, 64)
_key = 0x1011
class EncryptionType(BitmapAttribute):
_str = | |
= request.POST.get('personal_equity').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
term_loan = request.POST.get('term_loan').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
other = request.POST.get('other').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
total_sources = request.POST.get('total_sources').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
cash_reserve = request.POST.get('cash_reserve').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
inventory = request.POST.get('inventory').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
purchase_building = request.POST.get('purchase_building').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
purchase_equipment = request.POST.get('purchase_equipment').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
renovations = request.POST.get('renovations').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
working_capital = request.POST.get('working_capital').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
other = request.POST.get('other').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
total_uses = request.POST.get('total_uses').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
body = '<!doctype html>' + \
'<html lang="en">' + \
'<head>' + \
'<meta charset="utf-8">' + \
'<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">' + \
'<link rel="stylesheet"' + \
'href="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/css/bootstrap.min.css"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous">' + \
'<title>Fund sources and uses</title>' + \
'</head>' + \
'<body>' + \
'<div class="container">' + \
'<div class="card text-center">' + \
'<div class="card-header text-center">Fund sources and uses</div>' + \
'<div class="card-body">'
body += '<h6>Comapny name : ' + company_name + '</h6>' + \
'<h6>Share capital : ' + share_capital + '</h6>' + \
'<h6>Head office address : ' + head_office_address + '</h6>' + \
'<h6>Establishment number : ' + establishment_number + '</h6>' + \
'<h6>Register of Trade and Companies : ' + register_of_trade_and_companies + '</h6>' + \
'<h6>Main activities : ' + main_activities + '</h6>' + \
'<h6>Activity number : ' + activity_number + '</h6>' + \
'<h6>Intra-community VAT number : ' + intra_community_vat_number + '</h6>' + \
'<h6>President : ' + president + '</h6>' + \
'<h6>Registration date : ' + registration_date + '</h6>' + \
'<br>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Line of credit' + \
'</div>' + \
'<div class="card-body">' + \
line_of_credit + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Outside equity' + \
'</div>' + \
'<div class="card-body">' + \
outside_equity + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Personal equity' + \
'</div>' + \
'<div class="card-body">' + \
personal_equity + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Term loan' + \
'</div>' + \
'<div class="card-body">' + \
term_loan + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Other' + \
'</div>' + \
'<div class="card-body">' + \
other + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Total sources' + \
'</div>' + \
'<div class="card-body">' + \
total_sources + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Cash reserve' + \
'</div>' + \
'<div class="card-body">' + \
cash_reserve + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Inventory' + \
'</div>' + \
'<div class="card-body">' + \
inventory + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Purchase building' + \
'</div>' + \
'<div class="card-body">' + \
purchase_building + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Purchase equipment' + \
'</div>' + \
'<div class="card-body">' + \
purchase_equipment + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Renovations' + \
'</div>' + \
'<div class="card-body">' + \
renovations + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Working capital' + \
'</div>' + \
'<div class="card-body">' + \
working_capital + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Other' + \
'</div>' + \
'<div class="card-body">' + \
other + \
'</div>' + \
'</div>'
body += '<br>'
body += '<div class="card">' + \
'<div class="card-header">' + \
'Total uses' + \
'</div>' + \
'<div class="card-body">' + \
total_uses + \
'</div>' + \
'</div>'
body += '<br>'
body += '<br>' + \
'</div>' + \
'</div>' + \
'</div>' + \
'<br>' + \
'<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/js/bootstrap.bundle.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'</body>' + \
'</html>'
options = {
'page-size': 'A4',
'header-center': 'Fund sources and uses',
'footer-left': 'Company : ' + company_name + ' [' + establishment_number + ']',
'footer-right': '[page] sur [topage]',
'encoding': 'UTF-8',
'no-outline': None,
'custom-header': [
('Accept-Encoding', 'pdf')
]
}
# path_wkthmltopdf = 'static/reporting/static/wkhtmltopdf.exe'
# config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)
# output = pdfkit.from_string(body, output_path=False, configuration=config, options=options)
output = pdfkit.from_string(body, output_path=False, options=options)
response = HttpResponse(output, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="fund_sources_and_uses.pdf"'
return response
def physical_inventory_count_sheet(request):
return render(request, 'reporting/physical_inventory_count_sheet.html')
def generate_html_to_pdf_physical_inventory_count_sheet(request):
company_name = request.POST.get('company_name').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
share_capital = request.POST.get('share_capital').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
head_office_address = request.POST.get('head_office_address').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
establishment_number = request.POST.get('establishment_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
register_of_trade_and_companies = request.POST.get('register_of_trade_and_companies').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
main_activities = request.POST.get('main_activities').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
activity_number = request.POST.get('activity_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
intra_community_vat_number = request.POST.get('intra_community_vat_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
president = request.POST.get('president').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
registration_date = request.POST.get('registration_date').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c1 = request.POST.get('r1c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c2 = request.POST.get('r1c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c3 = request.POST.get('r1c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c4 = request.POST.get('r1c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c5 = request.POST.get('r1c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c1 = request.POST.get('r2c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c2 = request.POST.get('r2c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c3 = request.POST.get('r2c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c4 = request.POST.get('r2c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c5 = request.POST.get('r2c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c1 = request.POST.get('r3c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c2 = request.POST.get('r3c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c3 = request.POST.get('r3c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c4 = request.POST.get('r3c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c5 = request.POST.get('r3c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c1 = request.POST.get('r4c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c2 = request.POST.get('r4c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c3 = request.POST.get('r4c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c4 = request.POST.get('r4c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c5 = request.POST.get('r4c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c1 = request.POST.get('r5c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c2 = request.POST.get('r5c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c3 = request.POST.get('r5c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c4 = request.POST.get('r5c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c5 = request.POST.get('r5c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c1 = request.POST.get('r6c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c2 = request.POST.get('r6c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c3 = request.POST.get('r6c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c4 = request.POST.get('r6c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c5 = request.POST.get('r6c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c1 = request.POST.get('r7c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c2 = request.POST.get('r7c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c3 = request.POST.get('r7c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c4 = request.POST.get('r7c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c5 = request.POST.get('r7c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c1 = request.POST.get('r8c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c2 = request.POST.get('r8c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c3 = request.POST.get('r8c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c4 = request.POST.get('r8c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c5 = request.POST.get('r8c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c1 = request.POST.get('r9c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c2 = request.POST.get('r9c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c3 = request.POST.get('r9c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c4 = request.POST.get('r9c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c5 = request.POST.get('r9c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c1 = request.POST.get('r10c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c2 = request.POST.get('r10c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c3 = request.POST.get('r10c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c4 = request.POST.get('r10c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c5 = request.POST.get('r10c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c1 = request.POST.get('r11c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c2 = request.POST.get('r11c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c3 = request.POST.get('r11c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c4 = request.POST.get('r11c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c5 = request.POST.get('r11c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c1 = request.POST.get('r12c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c2 = | |
'''
Author: <NAME>
Contact: <EMAIL>
This file contains the contents for the "msAllele" utilities. Specifically, the tool queries one or more BWTs for a single pattern.
It returns the counts for each BWT and extracts the reads for a visual display (provided there are relatively few reads). The main
difference between this and "msCompare" is that it calculates a follow up step where reads are grouped based on their similarity.
These "alleles" are displayed as separate units in the output.
'''
import locale
import markup
import numpy as np
import os
from MUSCython import MultiStringBWTCython as MSBWT
import msSharedUtil
dirLabels = msSharedUtil.dirLabels
MSBWTdirs = msSharedUtil.MSBWTdirs
uniformLengths = msSharedUtil.uniformLengths
def GET():
'''
This function returns a markup page for the contents of the "msAllele" query page
@return - a markup page containing pieces of the form required for the query
'''
panel = markup.page()
panel.add('<script type="text/javascript" src="./static/js/jquery-2.1.1.min.js"></script>')
panel.div(style="padding:50px 150px")
citOrder, citationDict = msSharedUtil.buildCheckboxSelect(panel)
panel.label("Search Pattern:")
panel.input(type="text", name="pattern", size="100")
panel.input(type="hidden", name="target", value="msAllele.Search")
panel.input(type="submit", name="submit", value="Submit")
panel.form.close()
panel.br()
panel.h4('Instructions:')
panel.ol()
panel.li('Select one or more datasets. Note: selecting too many datasets may lead to a timeout.')
panel.li('Select a k-mer, such as "ACAAG", to search for. Note: selecting a small k-mer in a large dataset can match many reads leading to longer computations.')
panel.ol.close()
panel.h4('Citations:')
panel.ol()
for citation in citOrder:
panel.li(citation, id=citationDict[citation][0])
panel.ol.close()
panel.div.close()
return panel
def POST(datasets, pattern):
'''
This function returns a markup page for the contents of the "msAllele" response page
@param datasets - a list of datasets (aka BWTs) to be queried in this response
@param pattern - the pattern to search for (a k-mer)
@return - the markup page containing the results of the query
'''
panel = markup.page()
panel.script(type="text/javascript")
panel.add("""
function getSelectedText() {
var hidden, submit;
var selectedText=(window.getSelection ? window.getSelection() : document.getSelection ? document.getSelection() : document.selection.createRange().text);
if (selectedText == "") {
alert("You must select a subsequence");
return false;
} else {
document.forms["SearchSelected"]["pattern"].value = selectedText;
}
}
""")
panel.script.close()
panel.div(style="padding:50px 50px;")
if isinstance(datasets, str):
datasets = [datasets]
if (datasets == None):
panel.h3("ERROR: No datasets selected.")
panel.div(align="center", style="padding: 30px 30px;")
panel.input(type="button", value="New Search", onClick='self.location="./msAllele"')
panel.div.close()
panel.div.close()
return panel
if (pattern == None):
panel.h3("ERROR: No search pattern specified")
panel.div(align="center", style="padding: 30px 30px;")
panel.input(type="button", value="New Search", onClick='self.location="./msAllele"')
panel.div.close()
panel.div.close()
return panel
pattern = str(pattern).upper()
for c in pattern:
if not (c in WatsonComp):
panel.h3("ERROR: '"+c+"' is not a valid symbol")
panel.div(align="center", style="padding: 30px 30px;")
panel.input(type="button", value="New Search", onClick='self.location="./msAllele"')
panel.div.close()
panel.div.close()
return panel
for dataset in datasets:
dashIndex = dataset.find('-')
groupIndex = int(dataset[0:dashIndex])
datasetLabel = dataset[dashIndex+1:]
groupLabel = dirLabels[groupIndex]
readLen = uniformLengths[groupIndex]
MSBWTdir = MSBWTdirs[groupIndex]
bwtDirName = "%s/%s" % (MSBWTdir, datasetLabel)
metadata = msSharedUtil.loadMetadata(bwtDirName)
panel.h3(groupLabel+': '+metadata.get('Name', datasetLabel))
filestat = os.stat(bwtDirName+"/comp_msbwt.npy")
filesize = locale.format("%d", filestat.st_size, grouping=True)
bwt = MSBWT.loadBWT(bwtDirName)
if readLen == 0:
readLen = len(bwt.recoverString(0))
stringCount = locale.format("%d", bwt.getSymbolCount(0), grouping=True)
baseCount = locale.format("%d", bwt.getTotalSize(), grouping=True)
bitsPerBase = (8.0*filestat.st_size)/bwt.getTotalSize()
panel.strong("%s strings with %s bases and index size of %s bytes (%3.2f bits per base)<br />" % (stringCount, baseCount, filesize, bitsPerBase))
panel.strong("Target: %s<br />" % (pattern))
lo1, hi1 = bwt.findIndicesOfStr(pattern)
lo2, hi2 = bwt.findIndicesOfStr(revComp(pattern))
count = hi1 - lo1 + hi2 - lo2
if (count > 10000):
panel.add("Found %d times (%d forward, %d reverse-complemented)<br /><br />" % (count, hi1-lo1, hi2-lo2))
panel.span("Too much data!", style="font-size: 180%;")
elif count > 0:
panel.add("Found %d times (%d forward, %d reverse-complemented)<br /><br />" % (count, hi1-lo1, hi2-lo2))
panel.div(style="font-size:10px; font-family: monospace;")
l = len(pattern)
bufferLen = readLen
margin = bufferLen - l
haps = extractHaplotypes(bwt, pattern, readLen)
if len(haps) > 0:
consensusMain = haps[0][0]
panel.table(border='1')
panel.tr()
panel.th('Consensus')
panel.th('Exact matches')
panel.tr.close()
extrasList = []
for consensus, readlist in haps:
if len(readlist) >= 5:
panel.tr()
panel.td()
panel.strong()
output = ""
for i, base in enumerate(consensus):
if i == margin:
output += '<span style="color: green;">'
elif i == margin+l:
output += '</span>'
if(base != '$') and (base != '.') and (consensus[i] != '.') and (base.upper() != consensusMain[i].upper()):
output += '<span style="background-color:yellow;">%s</span>' % base.upper()
else:
output += base.upper()
panel.add(output)
panel.strong.close()
panel.td.close()
panel.td(str(len(readlist)))
panel.tr.close()
else:
for read in readlist:
extrasList.append(read)
if len(extrasList) > 0:
panel.tr()
panel.th('Remainder Consensus')
panel.th('Inexact matches')
panel.tr.close()
consensus, dummyVar = conSeq(extrasList)
panel.tr()
panel.td()
panel.strong()
output = ""
for i, base in enumerate(consensus):
if i == margin:
output += '<span style="color: green;">'
elif i == margin+l:
output += '</span>'
if(base != '$') and (base != '.') and (consensus[i] != '.') and (base.upper() != consensusMain[i].upper()):
output += '<span style="background-color:yellow;">%s</span>' % base.upper()
else:
output += base.upper()
panel.add(output)
panel.strong.close()
panel.td.close()
panel.td(str(len(extrasList)))
panel.tr.close()
panel.table.close()
for consensus, readlist in haps:
if len(readlist) >= 5:
read = "."*margin + "*"*l + '.'*margin
panel.add(read)
panel.br()
for read in sorted(readlist):
color = "red" if (read.find('$') > read.find(pattern)) else "blue"
output = ""
for i, base in enumerate(read):
if (i == margin):
output += '<span style="color: %s;">' % color
elif (i == margin+l):
output += '</span>'
if (base != '$') and (base != '.') and (consensus[i] != '.') and (base.upper() != consensus[i].upper()):
output += '<span style="background-color:yellow;">%s</span>' % base
else:
output += base
output += '<br />'
panel.add(output)
panel.strong('%s<span style="color: green;">%s</span>%s<br />' % (consensus[:margin], consensus[margin:margin+l], consensus[margin+l:]))
panel.br()
panel.br()
if len(extrasList) > 0:
consensus, dummyVar = conSeq(extrasList)
extrasList.sort(cmp=readCmp)
read = "."*margin + "*"*l + '.'*margin
panel.add(read)
panel.br()
for read in extrasList:
color = "red" if (read.find('$') > read.find(pattern)) else "blue"
output = ""
for i, base in enumerate(read):
if (i == margin):
output += '<span style="color: %s;">' % color
elif (i == margin+l):
output += '</span>'
if (base != '$') and (base != '.') and (consensus[i] != '.') and (base.upper() != consensus[i].upper()):
output += '<span style="background-color:yellow;">%s</span>' % base
else:
output += base
output += '<br />'
panel.add(output)
panel.strong('%s<span style="color: green;">%s</span>%s<br />' % (consensus[:margin], consensus[margin:margin+l], consensus[margin+l:]))
panel.br()
panel.div.close()
else:
panel.add("Pattern not found<br /><br />")
panel.form(action="", name="SearchSelected", method="POST", enctype="multipart/form-data", onsubmit='return getSelectedText()')
panel.div(align="center", style="padding: 30px 30px;")
panel.input(type="submit", name="submit", value="Search Selected")
panel.input(type="button", value="New Search", onClick='self.location="./msAllele"')
for dataset in datasets:
panel.input(type="hidden", name="dataset", value=dataset)
panel.input(type="hidden", name="pattern", value=pattern)
panel.div.close()
panel.form.close()
panel.div.close()
return panel
def extractHaplotypes(bwt, kmer, readLen):
'''
A subroutine for calculating haplotypes present in a particular BWT based on a kmer query
@param bwt - an instance of the BasicBWT class from the "msbwt" package
@param kmer - the pattern to search for (string)
@param readLen - the length of reads in the BWT
@return - a list of tuples where each tuple is of the form (consensus sequence, list of reads that match)
'''
forwardIndices = bwt.findIndicesOfStr(kmer)
revComp = MSBWT.reverseComplement(kmer)
reverseIndices = bwt.findIndicesOfStr(revComp)
bufferLen = readLen
patternLen = len(kmer)
fixedSize = 2*bufferLen-patternLen
totalBuffLen = 2*readLen-patternLen
modifiedSeqs = []
for i in xrange(forwardIndices[0], forwardIndices[1]):
readSeq = bwt.recoverString(i)
dollarPos = readSeq.find('$')
suffLen = len(readSeq)
#calculate how many tailing '.' we need first, then construct the string from that info
beforePattern = suffLen-dollarPos-1
modSeq = ('.'*(bufferLen-patternLen-beforePattern)+
readSeq[dollarPos+1:].lower()+
readSeq[:patternLen]+
readSeq[patternLen:dollarPos+1].lower())
modSeq += '.'*(fixedSize-len(modSeq))
modifiedSeqs.append(modSeq)
for i in xrange(reverseIndices[0], reverseIndices[1]):
revCompSeq = bwt.recoverString(i)
readSeq = MSBWT.reverseComplement(revCompSeq)
suffLen = len(readSeq)
dollarPos = readSeq.find('$')
beforePattern = suffLen-dollarPos-patternLen
modSeq = ('.'*(bufferLen-patternLen-beforePattern)+
readSeq[dollarPos:-patternLen].lower()+
readSeq[-patternLen:]+
readSeq[0:dollarPos].lower())
modSeq += '.'*(fixedSize-len(modSeq))
modifiedSeqs.append(modSeq)
#jump out if we find nothing
if len(modifiedSeqs) == 0:
return []
#now we begin searching for haplotypes
groupID = 0
allGroups = {}
pairedSets = {}
while len(modifiedSeqs) > 0:
currSeq = modifiedSeqs[0]
currSet = []
x = 0
while x < len(modifiedSeqs):
if modifiedSeqs[x] == currSeq:
#same seq
currSet.append(modifiedSeqs.pop(x))
else:
x += 1
allGroups[groupID] = (combineShiftedSeqs(currSet[0], currSet[0]), currSet)
pairedSets[groupID] = set([])
groupID += 1
edges = {}
for x in xrange(0, len(allGroups)):
for y in xrange(x+1, len(allGroups)):
#first, check if they're compatible
con1 = allGroups[x][0]
con2 = allGroups[y][0]
diff, shared = getDeltaOverlaps(con1, con2)
if diff == 0:
edges[(x, y)] = shared*(len(allGroups[x][1])+len(allGroups[y][1]))
pairedSets[x].add(y)
pairedSets[y].add(x)
else:
#don't add an edge because they conflict for one reason or another
pass
while len(edges) > 0:
#get the maximum weighted edge
maxEdge, maxValue = max(edges.iteritems(), key=lambda x: x[1])
#pull out the groupIDs
mergeID1 | |
ocupante não especificado de um triciclo motorizado traumatizado em um acidente de trânsito'),
('V32.0', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo a motor de duas ou três rodas - condutor traumatizado em acidente não-de-trânsito'),
('V32.1', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo a motor de duas ou três rodas - passageiro traumatizado em acidente não-de-trânsito'),
('V32.2', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo a motor de duas ou três rodas - pessoa viajando no exterior do veículo traumatizada em um acidente não-de-trânsito'),
('V32.3', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo a motor de duas ou três rodas - ocupante não especificado de um triciclo motorizado traumatizado em acidente não-de-trânsito'),
('V32.4', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo a motor de duas ou três rodas - pessoa traumatizada ao subir ou descer do veículo'),
('V32.5', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo a motor de duas ou três rodas - condutor traumatizado em um acidente de trânsito'),
('V32.6', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo a motor de duas ou três rodas - passageiro traumatizado em um acidente de trânsito'),
('V32.7', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo a motor de duas ou três rodas - pessoa viajando no exterior do veículo traumatizada em um acidente de trânsito'),
('V32.9', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo a motor de duas ou três rodas - ocupante não especificado de um triciclo motorizado traumatizado em um acidente de trânsito'),
('V33.0', 'Ocupante de um triciclo motorizado traumatizado em colisão com um automóvel, "pick up" ou caminhonete - condutor traumatizado em acidente não-de-trânsito'),
('V33.1', 'Ocupante de um triciclo motorizado traumatizado em colisão com um automóvel, "pick up" ou caminhonete - passageiro traumatizado em acidente não-de-trânsito'),
('V33.2', 'Ocupante de um triciclo motorizado traumatizado em colisão com um automóvel, "pick up" ou caminhonete - pessoa viajando no exterior do veículo traumatizada em um acidente não-de-trânsito'),
('V33.3', 'Ocupante de um triciclo motorizado traumatizado em colisão com um automóvel, "pick up" ou caminhonete - ocupante não especificado de um triciclo motorizado traumatizado em acidente não-de-trânsito'),
('V33.4', 'Ocupante de um triciclo motorizado traumatizado em colisão com um automóvel, "pick up" ou caminhonete - pessoa traumatizada ao subir ou descer do veículo'),
('V33.5', 'Ocupante de um triciclo motorizado traumatizado em colisão com um automóvel, "pick up" ou caminhonete - condutor traumatizado em um acidente de trânsito'),
('V33.6', 'Ocupante de um triciclo motorizado traumatizado em colisão com um automóvel, "pick up" ou caminhonete - passageiro traumatizado em um acidente de trânsito'),
('V33.7', 'Ocupante de um triciclo motorizado traumatizado em colisão com um automóvel, "pick up" ou caminhonete - pessoa viajando no exterior do veículo traumatizada em um acidente de trânsito'),
('V33.9', 'Ocupante de um triciclo motorizado traumatizado em colisão com um automóvel, "pick up" ou caminhonete - ocupante não especificado de um triciclo motorizado traumatizado em um acidente de trânsito'),
('V34.0', 'Ocupante de um triciclo motorizado traumatizado em colisão com um veículo de transporte pesado ou um ônibus - condutor traumatizado em acidente não-de-trânsito'),
('V34.1', 'Ocupante de um triciclo motorizado traumatizado em colisão com um veículo de transporte pesado ou um ônibus - passageiro traumatizado em acidente não-de-trânsito'),
('V34.2', 'Ocupante de um triciclo motorizado traumatizado em colisão com um veículo de transporte pesado ou um ônibus - pessoa viajando no exterior do veículo traumatizada em um acidente não-de-trânsito'),
('V34.3', 'Ocupante de um triciclo motorizado traumatizado em colisão com um veículo de transporte pesado ou um ônibus - ocupante não especificado de um triciclo motorizado traumatizado em acidente não-de-trânsito'),
('V34.4', 'Ocupante de um triciclo motorizado traumatizado em colisão com um veículo de transporte pesado ou um ônibus - pessoa traumatizada ao subir ou descer do veículo'),
('V34.5', 'Ocupante de um triciclo motorizado traumatizado em colisão com um veículo de transporte pesado ou um ônibus - condutor traumatizado em um acidente de trânsito'),
('V34.6', 'Ocupante de um triciclo motorizado traumatizado em colisão com um veículo de transporte pesado ou um ônibus - passageiro traumatizado em um acidente de trânsito'),
('V34.7', 'Ocupante de um triciclo motorizado traumatizado em colisão com um veículo de transporte pesado ou um ônibus - pessoa viajando no exterior do veículo traumatizada em um acidente de trânsito'),
('V34.9', 'Ocupante de um triciclo motorizado traumatizado em colisão com um veículo de transporte pesado ou um ônibus - ocupante não especificado de um triciclo motorizado traumatizado em um acidente de trânsito'),
('V35.0', 'Ocupante de um triciclo motorizado traumatizado em colisão com um trem [comboio] ou um veículo ferroviário - condutor traumatizado em acidente não-de-trânsito'),
('V35.1', 'Ocupante de um triciclo motorizado traumatizado em colisão com um trem [comboio] ou um veículo ferroviário - passageiro traumatizado em acidente não-de-trânsito'),
('V35.2', 'Ocupante de um triciclo motorizado traumatizado em colisão com um trem [comboio] ou um veículo ferroviário - pessoa viajando no exterior do veículo traumatizada em um acidente não-de-trânsito'),
('V35.3', 'Ocupante de um triciclo motorizado traumatizado em colisão com um trem [comboio] ou um veículo ferroviário - ocupante não especificado de um triciclo motorizado traumatizado em acidente não-de-trânsito'),
('V35.4', 'Ocupante de um triciclo motorizado traumatizado em colisão com um trem [comboio] ou um veículo ferroviário - pessoa traumatizada ao subir ou descer do veículo'),
('V35.5', 'Ocupante de um triciclo motorizado traumatizado em colisão com um trem [comboio] ou um veículo ferroviário - condutor traumatizado em um acidente de trânsito'),
('V35.6', 'Ocupante de um triciclo motorizado traumatizado em colisão com um trem [comboio] ou um veículo ferroviário - passageiro traumatizado em um acidente de trânsito'),
('V35.7', 'Ocupante de um triciclo motorizado traumatizado em colisão com um trem [comboio] ou um veículo ferroviário - pessoa viajando no exterior do veículo traumatizada em um acidente de trânsito'),
('V35.9', 'Ocupante de um triciclo motorizado traumatizado em colisão com um trem [comboio] ou um veículo ferroviário - ocupante não especificado de um triciclo motorizado traumatizado em um acidente de trânsito'),
('V36.0', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo não-motorizado - condutor traumatizado em acidente não-de-trânsito'),
('V36.1', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo não-motorizado - passageiro traumatizado em acidente não-de-trânsito'),
('V36.2', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo não-motorizado - pessoa viajando no exterior do veículo traumatizada em um acidente não-de-trânsito'),
('V36.3', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo não-motorizado - ocupante não especificado de um triciclo motorizado traumatizado em acidente não-de-trânsito'),
('V36.4', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo não-motorizado - pessoa traumatizada ao subir ou descer do veículo'),
('V36.5', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo não-motorizado - condutor traumatizado em um acidente de trânsito'),
('V36.6', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo não-motorizado - passageiro traumatizado em um acidente de trânsito'),
('V36.7', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo não-motorizado - pessoa viajando no exterior do veículo traumatizada em um acidente de trânsito'),
('V36.9', 'Ocupante de um triciclo motorizado traumatizado em colisão com outro veículo não-motorizado - ocupante não especificado de um triciclo motorizado traumatizado em um acidente de trânsito'),
('V37.0', 'Ocupante de um triciclo motorizado traumatizado em colisão com um objeto fixo ou parado - condutor traumatizado em acidente não-de-trânsito'),
('V37.1', 'Ocupante de um triciclo motorizado traumatizado em colisão com um objeto fixo ou parado - passageiro traumatizado em acidente não-de-trânsito'),
('V37.2', 'Ocupante de um triciclo motorizado traumatizado em colisão com um objeto fixo ou parado - pessoa viajando no exterior do veículo traumatizada em um acidente não-de-trânsito'),
('V37.3', 'Ocupante de um triciclo motorizado traumatizado em colisão com um objeto fixo ou parado - ocupante não especificado de um triciclo motorizado traumatizado em acidente não-de-trânsito'),
('V37.4', 'Ocupante de um triciclo motorizado traumatizado em colisão com um objeto fixo ou parado - pessoa traumatizada ao subir ou descer do veículo'),
('V37.5', 'Ocupante de um triciclo motorizado traumatizado em colisão com um | |
: [u'y', u's'] ,
u'孙' : [u'x', u's'] ,
u'㫘' : [u'm'] ,
u'陛' : [u'b'] ,
u'淢' : [u'y', u'x'] ,
u'䍩' : [u'a', u'y'] ,
u'牳' : [u'm'] ,
u'嗲' : [u'd'] ,
u'郴' : [u'c', u'l'] ,
u'銉' : [u'y'] ,
u'洌' : [u'l'] ,
u'䦏' : [u'x'] ,
u'鸖' : [u'h'] ,
u'溡' : [u's'] ,
u'蘦' : [u'l'] ,
u'㴬' : [u'x'] ,
u'種' : [u'c', u'z'] ,
u'嚱' : [u'x'] ,
u'螻' : [u'l'] ,
u'戾' : [u'l'] ,
u'篃' : [u'm'] ,
u'鍈' : [u'y'] ,
u'揓' : [u's'] ,
u'铝' : [u'l'] ,
u'潠' : [u'x', u's'] ,
u'䯣' : [u'k', u'g'] ,
u'坰' : [u'j'] ,
u'烵' : [u'z'] ,
u'衺' : [u'x'] ,
u'㾀' : [u'q'] ,
u'鬃' : [u'z'] ,
u'刉' : [u'j'] ,
u'茓' : [u'x'] ,
u'撒' : [u's'] ,
u'㨙' : [u'x'] ,
u'眛' : [u'm'] ,
u'閜' : [u'x', u'k'] ,
u'䲢' : [u't'] ,
u'弫' : [u'z'] ,
u'㒲' : [u'c'] ,
u'逵' : [u'k'] ,
u'䜻' : [u'c', u's'] ,
u'姄' : [u'm'] ,
u'汍' : [u'w'] ,
u'諎' : [u'z'] ,
u'䇔' : [u'l', u'n'] ,
u'鵗' : [u'x'] ,
u'绖' : [u'd'] ,
u'呝' : [u'e'] ,
u'蕧' : [u'f'] ,
u'曦' : [u'x'] ,
u'㱭' : [u'd'] ,
u'祯' : [u'z'] ,
u'韰' : [u'x'] ,
u'件' : [u'j'] ,
u'慿' : [u'p'] ,
u'谈' : [u't'] ,
u'伊' : [u'y'] ,
u'玍' : [u'g'] ,
u'䊗' : [u'h'] ,
u'朚' : [u'h'] ,
u'鞥' : [u'y', u'e'] ,
u'媧' : [u'w'] ,
u'缪' : [u'j', u'm', u'l'] ,
u'謲' : [u'c'] ,
u'临' : [u'l'] ,
u'犷' : [u'g'] ,
u'晄' : [u'h'] ,
u'雏' : [u'c'] ,
u'㕎' : [u'k', u'e'] ,
u'姑' : [u'g'] ,
u'織' : [u'z'] ,
u'詜' : [u't'] ,
u'䵞' : [u'q', u'j'] ,
u'燡' : [u'y'] ,
u'䃫' : [u'd'] ,
u'敮' : [u'x', u'g'] ,
u'闹' : [u'n'] ,
u'㑸' : [u'a', u'y'] ,
u'壻' : [u'x'] ,
u'絾' : [u'c'] ,
u'覆' : [u'f'] ,
u'瘏' : [u't'] ,
u'舗' : [u'p'] ,
u'䔙' : [u'd'] ,
u'撘' : [u'd'] ,
u'騧' : [u'g'] ,
u'崩' : [u'b'] ,
u'粨' : [u'b'] ,
u'䮲' : [u'h'] ,
u'甹' : [u'p'] ,
u'腁' : [u'p'] ,
u'䑃' : [u'm'] ,
u'揂' : [u'j'] ,
u'饑' : [u'q', u'j'] ,
u'屓' : [u'x'] ,
u'蟚' : [u'p'] ,
u'瑣' : [u's'] ,
u'聫' : [u'l'] ,
u'括' : [u'k', u'g'] ,
u'须' : [u'x'] ,
u'孽' : [u'n'] ,
u'竼' : [u'p'] ,
u'悁' : [u'y', u'j'] ,
u'蜄' : [u'z'] ,
u'后' : [u'h'] ,
u'碑' : [u'b'] ,
u'㶓' : [u'c'] ,
u'鼔' : [u'g'] ,
u'蒙' : [u'm'] ,
u'喣' : [u'x'] ,
u'鲩' : [u'h', u'w'] ,
u'䤰' : [u'y', u'w'] ,
u'涳' : [u'k'] ,
u'逶' : [u'w'] ,
u'慀' : [u'x'] ,
u'䛅' : [u'x'] ,
u'釋' : [u'y', u's'] ,
u'祐' : [u'y'] ,
u'㩒' : [u'q'] ,
u'廕' : [u'y'] ,
u'蕘' : [u'y', u'r'] ,
u'剢' : [u'z'] ,
u'盥' : [u'g'] ,
u'鵨' : [u's'] ,
u'苭' : [u'y'] ,
u'号' : [u'h'] ,
u'髽' : [u'z'] ,
u'䞄' : [u'b'] ,
u'焋' : [u'z'] ,
u'躊' : [u'c'] ,
u'锣' : [u'l'] ,
u'瞤' : [u'r'] ,
u'戭' : [u'y'] ,
u'莬' : [u'm', u'w'] ,
u'傶' : [u'c'] ,
u'稽' : [u'q', u'j'] ,
u'鮼' : [u'q'] ,
u'㼿' : [u't'] ,
u'棆' : [u'z', u'l'] ,
u'坏' : [u'h', u'p'] ,
u'鹕' : [u'h'] ,
u'䗘' : [u'k', u'g'] ,
u'潟' : [u'x'] ,
u'賞' : [u's'] ,
u'巨' : [u'j'] ,
u'䡱' : [u'z'] ,
u'鍷' : [u'k'] ,
u'痸' : [u'c'] ,
u'舀' : [u'y'] ,
u'䄂' : [u'l'] ,
u'斅' : [u'x', u'j'] ,
u'騐' : [u'y'] ,
u'夒' : [u'n'] ,
u'綕' : [u'z'] ,
u'覝' : [u'l'] ,
u'䲟' : [u'y'] ,
u'焢' : [u'h'] ,
u'䀬' : [u'q'] ,
u'撯' : [u'z'] ,
u'㞹' : [u'k'] ,
u'锺' : [u'z'] ,
u'堼' : [u'f'] ,
u'粿' : [u'g'] ,
u'俉' : [u'w'] ,
u'灌' : [u'h', u'g'] ,
u'䍖' : [u'h', u'x', u'f'] ,
u'柙' : [u'y', u'x', u'j'] ,
u'鑤' : [u'p', u'b'] ,
u'学' : [u'x'] ,
u'翩' : [u'p'] ,
u'诱' : [u'y'] ,
u'仳' : [u'p'] ,
u'獶' : [u'n'] ,
u'䊀' : [u'h'] ,
u'标' : [u'b'] ,
u'鞎' : [u'h'] ,
u'媐' : [u'x'] ,
u'谟' : [u'm'] ,
u'匡' : [u'k'] ,
u'欱' : [u'x', u'h'] ,
u'隸' : [u'l'] ,
u'㨻' : [u'c'] ,
u'喺' : [u'x'] ,
u'轉' : [u'z'] ,
u'剋' : [u'k'] ,
u'淊' : [u'y', u'h'] ,
u'㳔' : [u'd'] ,
u'橛' : [u'j'] ,
u'釢' : [u'n'] ,
u'㕥' : [u'y'] ,
u'哤' : [u'm'] ,
u'蹳' : [u'b'] ,
u'䵵' : [u'z'] ,
u'泴' : [u'g'] ,
u'㿾' : [u'z'] ,
u'䨆' : [u'x', u'b'] ,
u'溉' : [u'x', u'g'] ,
u'鄌' : [u't'] ,
u'或' : [u'y', u'h'] ,
u'䮛' : [u'f'] ,
u'銡' : [u'j'] ,
u'稦' : [u'y'] ,
u'㼨' : [u'h'] ,
u'掫' : [u'z'] ,
u'蘮' : [u'j'] ,
u'箻' : [u'l'] ,
u'㢽' : [u'e'] ,
u'鸾' : [u'l'] ,
u'蟃' : [u'w'] ,
u'働' : [u'd'] ,
u'䑚' : [u't'] ,
u'棝' : [u'g'] ,
u'鍠' : [u'h'] ,
u'屪' : [u'l'] ,
u'䗯' : [u'j'] ,
u'賵' : [u'f'] ,
u'瑺' : [u'c'] ,
u'㥼' : [u'y'] ,
u'巿' : [u'f'] ,
u'會' : [u'h', u'k', u'g'] ,
u'肂' : [u's'] ,
u'册' : [u'c'] ,
u'缓' : [u'h'] ,
u'颒' : [u'h'] ,
u'講' : [u'j'] ,
u'榜' : [u'p', u'b'] ,
u'吥' : [u'b'] ,
u'䚮' : [u'r'] ,
u'趴' : [u'p'] ,
u'庾' : [u'y'] ,
u'䥇' : [u's'] ,
u'遍' : [u'b'] ,
u'盎' : [u'a'] ,
u'㯐' : [u't', u'f'] ,
u'慗' : [u'c'] ,
u'苖' : [u'd'] ,
u'叠' : [u'd'] ,
u'祧' : [u't'] ,
u'髦' : [u'm', u'l'] ,
u'蕯' : [u'l'] ,
u'毰' : [u'p'] ,
u'噹' : [u'd'] ,
u'鵿' : [u's'] ,
u'蠈' : [u'z'] ,
u'䬊' : [u'q', u's'] ,
u'瞍' : [u's'] ,
u'挚' : [u'z'] ,
u'鎥' : [u't'] ,
u'座' : [u'z'] ,
u'笪' : [u'd'] ,
u'輲' : [u'c'] ,
u'䨴' : [u'd', u'w'] ,
u'皷' : [u'g'] ,
u'䗁' : [u'j'] ,
u'扄' : [u's'] ,
u'鋏' : [u'j'] ,
u'巑' : [u'c'] ,
u'穔' : [u'h'] ,
u'蹜' : [u's'] ,
u'䥞' : [u'j'] ,
u'痡' : [u'p'] ,
u'䓫' : [u'q', u'j'] ,
u'慮' : [u'l'] ,
u'釹' : [u'n'] ,
u'峻' : [u'j'] ,
u'祾' : [u'l'] ,
u'趆' : [u'd'] ,
u'䢈' : [u'q', u'c'] ,
u'爏' : [u'l'] ,
u'蘗' : [u'b'] ,
u'䄙' : [u'm'] ,
u'悘' : [u'y'] ,
u'鸧' : [u'q', u'c'] ,
u'天' : [u't'] ,
u'碨' : [u'w'] ,
u'貰' : [u's'] ,
u'侲' : [u'z'] ,
u'蕁' : [u'q', u'x', u't'] ,
u'柂' : [u'y', u'd'] ,
u'鵑' : [u'j'] ,
u'塓' : [u'm'] ,
u'習' : [u'x'] ,
u'菚' : [u'z'] ,
u'仜' : [u'h'] ,
u'灣' : [u'w'] ,
u'葫' : [u'h'] ,
u'鯪' : [u'l'] ,
u'䝭' : [u'g'] ,
u'曬' : [u's'] ,
u'鱻' : [u'x'] ,
u'彽' : [u'c'] ,
u'综' : [u'z'] ,
u'撁' : [u'q'] ,
u'茄' : [u'q', u'j'] ,
u'倎' : [u't'] ,
u'粑' : [u'b'] ,
u'㦓' : [u'x'] ,
u'鬔' : [u'p'] ,
u'肙' : [u'y'] ,
u'栞' : [u'k'] ,
u'㔠' : [u'h', u'j'] ,
u'冣' : [u'j'] ,
u'颩' : [u'b', u'd'] ,
u'䴰' : [u's'] ,
u'榳' : [u't'] ,
u'鐶' : [u'h'] ,
u'敀' : [u'b'] ,
u'闋' : [u'q', u'k', u'j'] ,
u'結' : [u'j'] ,
u'㹒' : [u'p'] ,
u'嫕' : [u'y'] ,
u'腘' : [u'g'] ,
u'噢' : [u'y', u'o'] ,
u'狥' : [u'x'] ,
u'㿧' : [u'c'] ,
u'饨' : [u't'] ,
u'蛭' : [u'z'] ,
u'湲' : [u'y'] ,
u'執' : [u'z'] ,
u'黽' : [u'm'] ,
u'䎄' : [u't'] ,
u'甋' : [u'd'] ,
u'誊' : [u't'] ,
u'㘍' : [u'q', u'j'] ,
u'宔' : [u'z'] ,
u'丝' : [u's'] ,
u'鄣' : [u'z'] ,
u'玤' : [u'b'] ,
u'㲦' : [u'h'] ,
u'昭' : [u'z'] ,
u'螬' : [u'c'] ,
u'咶' : [u'h', u's'] ,
u'總' : [u'z'] ,
u'㬿' : [u'd'] ,
u'艅' : [u'y'] ,
u'泆' : [u'y'] ,
u'协' : [u'x'] ,
u'驕' : [u'q', u'x', u'j'] ,
u'䇘' : [u'h'] ,
u'歟' : [u'y'] ,
u'裞' : [u's'] ,
u'姨' : [u'y'] ,
u'䱱' : [u'd', u't'] ,
u'靷' : [u'y'] ,
u'燸' : [u'r'] ,
u'蘀' : [u't'] ,
u'䔂' : [u'z'] ,
u'憅' : [u't'] ,
u'鸐' : [u'd'] ,
u'崒' : [u'c', u'z'] ,
u'禕' : [u'y'] ,
u'趝' : [u'j'] ,
u'䢟' : [u'y'] ,
u'產' : [u'c'] ,
u'䐬' : [u'c'] ,
u'悯' : [u'm'] ,
u'鄺' : [u'k'] ,
u'尼' : [u'n'] ,
u'碿' : [u's'] ,
u'資' : [u'z'] ,
u'䯉' : [u'y'] ,
u'瑌' : [u'r'] ,
u'䝖' : [u'z'] ,
u'揙' : [u'b'] ,
u'彦' : [u'y'] ,
u'篩' : [u's'] ,
u'迱' : [u't'] ,
u'䫳' : [u'd'] ,
u'睶' : [u'c'] ,
u'氇' : [u'l'] ,
u'鎎' : [u'x'] ,
u'㼑' : [u'l'] ,
u'庐' : [u'l'] ,
u'蠟' : [u'l'] ,
u'圡' : [u't'] ,
u'皠' : [u'c'] ,
u'㦪' : [u'x'] ,
u'漱' : [u's'] ,
u'銸' : [u'z'] ,
u'㸻' : [u's'] ,
u'冺' : [u'm'] ,
u'證' : [u'z'] ,
u'噋' : [u'k', u't'] ,
u'槊' : [u's'] ,
u'湛' : [u'c', u'z', u't', u'd', u'j'] ,
u'闢' : [u'p'] ,
u'僤' : [u'd'] ,
u'詳' : [u'y', u'x'] ,
u'䥵' : [u'x'] ,
u'棴' : [u'f'] ,
u'檉' : [u'c'] ,
u'㞋' : [u'n'] ,
u'锌' : [u'x'] ,
u'昖' : [u'y'] ,
u'供' : [u'g'] ,
u'縦' : [u'c', u'z'] ,
u'枫' : [u'f'] ,
u'匸' : [u'x'] ,
u'羻' : [u'q'] ,
u'㲽' : [u'x', u'r', u'n'] ,
u'騾' : | |
46, default: 7)
:param int n_rolling_volatility: number of days for Rolling n-day volatility- (statId: 34, default: 7)
:param int num_sim_monte_carlo: number of simulations - (statId: 62, default: 1000)
:param str period_type: Quarter (Q), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in () -Carries out stats on either daily, monthly, annually or quarterly dates (default: 'D')
:param float risk_free_alpha: risk free val alpha - (statId: 52, default: 0)
:param float risk_free_sharpe: risk free val sharpe- (statId: 49, default: 0)
:param float risk_free_sortino: risk free val sortino - (statId: 56, default: 0)
:param float risk_free_treynor: risk free val treynor- (statId: 51, default: 0)
:param date start_date: start date
:param str stat: A stat type - /statistics endpoint to get types
:param float var_conf_interval: VaR Confidence Interval ( alpha ) i.entity 99, 95, etc - (statId: 40, default: 95)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_portfolio_performance_using_get_with_http_info(account_id, client_id, portfolio_id, portfolioid, **kwargs) # noqa: E501
else:
(data) = self.get_portfolio_performance_using_get_with_http_info(account_id, client_id, portfolio_id, portfolioid, **kwargs) # noqa: E501
return data
def get_portfolio_performance_using_get_with_http_info(self, account_id, client_id, portfolio_id, portfolioid, **kwargs): # noqa: E501
"""Portfolio Performance # noqa: E501
Get information on the performance of a portfolio using IRR (Internal Rate of Return). You must provide the unique portfolio_id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_performance_using_get_with_http_info(account_id, client_id, portfolio_id, portfolioid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: Account Id -/account (required)
:param str client_id: Client Id -/client (required)
:param str portfolio_id: portfolio_id (required)
:param str portfolioid: Portfolio Id -/portoflio (required)
:param str active_premium_period: Q (quarterly), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in () - (statId: 19, default: 'D')
:param str annualized_return_period: Q (quarterly), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in () - (statId: 19, default: 'D')
:param str benchmark_id: Benchmark Id - benchmarkId or clientBenchmarkId -/benchmark
:param date end_date: end date
:param float hist_factor: Histogram factor- (statId: 39, default: 5)
:param float mar_down_side_deviation: minimum acceptable return for downside deviation - (statId: 58, default: 0)
:param float max_percentile_monte_carlo: max percentile for monte carlo, i.entity. 80 - (statId: 62, default: 95)
:param float mean_percentile_monte_carlo: mean percentile for monte carlo i.entity. 50- (statId: 62, default: 50)
:param float min_percentile_monte_carlo: min percentile for monte carlo i.entity. 20 - (statId: 62, default: 5)
:param int moving_average_n_day: number of days for moving average n-day - (statId: 18, default: 7)
:param int n_day_returns: number of days for Rolling n-day returns - (statId: 2, default: 7)
:param int n_path_monte_carlo: number of points for a simulation- (statId: 62, default: 100)
:param int n_rolling_max_drawdown: number of days for Rolling n-day max drawdown- (statId: 46, default: 7)
:param int n_rolling_volatility: number of days for Rolling n-day volatility- (statId: 34, default: 7)
:param int num_sim_monte_carlo: number of simulations - (statId: 62, default: 1000)
:param str period_type: Quarter (Q), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in () -Carries out stats on either daily, monthly, annually or quarterly dates (default: 'D')
:param float risk_free_alpha: risk free val alpha - (statId: 52, default: 0)
:param float risk_free_sharpe: risk free val sharpe- (statId: 49, default: 0)
:param float risk_free_sortino: risk free val sortino - (statId: 56, default: 0)
:param float risk_free_treynor: risk free val treynor- (statId: 51, default: 0)
:param date start_date: start date
:param str stat: A stat type - /statistics endpoint to get types
:param float var_conf_interval: VaR Confidence Interval ( alpha ) i.entity 99, 95, etc - (statId: 40, default: 95)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'client_id', 'portfolio_id', 'portfolioid', 'active_premium_period', 'annualized_return_period', 'benchmark_id', 'end_date', 'hist_factor', 'mar_down_side_deviation', 'max_percentile_monte_carlo', 'mean_percentile_monte_carlo', 'min_percentile_monte_carlo', 'moving_average_n_day', 'n_day_returns', 'n_path_monte_carlo', 'n_rolling_max_drawdown', 'n_rolling_volatility', 'num_sim_monte_carlo', 'period_type', 'risk_free_alpha', 'risk_free_sharpe', 'risk_free_sortino', 'risk_free_treynor', 'start_date', 'stat', 'var_conf_interval'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_portfolio_performance_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_portfolio_performance_using_get`") # noqa: E501
# verify the required parameter 'client_id' is set
if ('client_id' not in params or
params['client_id'] is None):
raise ValueError("Missing the required parameter `client_id` when calling `get_portfolio_performance_using_get`") # noqa: E501
# verify the required parameter 'portfolio_id' is set
if ('portfolio_id' not in params or
params['portfolio_id'] is None):
raise ValueError("Missing the required parameter `portfolio_id` when calling `get_portfolio_performance_using_get`") # noqa: E501
# verify the required parameter 'portfolioid' is set
if ('portfolioid' not in params or
params['portfolioid'] is None):
raise ValueError("Missing the required parameter `portfolioid` when calling `get_portfolio_performance_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['account_id'] = params['account_id'] # noqa: E501
if 'client_id' in params:
path_params['client_id'] = params['client_id'] # noqa: E501
if 'portfolio_id' in params:
path_params['portfolio_id'] = params['portfolio_id'] # noqa: E501
if 'portfolioid' in params:
path_params['portfolioid'] = params['portfolioid'] # noqa: E501
query_params = []
if 'active_premium_period' in params:
query_params.append(('active_premium_period', params['active_premium_period'])) # noqa: E501
if 'annualized_return_period' in params:
query_params.append(('annualized_return_period', params['annualized_return_period'])) # noqa: E501
if 'benchmark_id' in params:
query_params.append(('benchmark_id', params['benchmark_id'])) # noqa: E501
if 'end_date' in params:
query_params.append(('end_date', params['end_date'])) # noqa: E501
if 'hist_factor' in params:
query_params.append(('hist_factor', params['hist_factor'])) # noqa: E501
if 'mar_down_side_deviation' in params:
query_params.append(('mar_down_side_deviation', params['mar_down_side_deviation'])) # noqa: E501
if 'max_percentile_monte_carlo' in params:
query_params.append(('max_percentile_monte_carlo', params['max_percentile_monte_carlo'])) # noqa: E501
if 'mean_percentile_monte_carlo' in params:
query_params.append(('mean_percentile_monte_carlo', params['mean_percentile_monte_carlo'])) # noqa: E501
if 'min_percentile_monte_carlo' in params:
query_params.append(('min_percentile_monte_carlo', params['min_percentile_monte_carlo'])) # noqa: E501
if 'moving_average_n_day' in params:
query_params.append(('moving_average_n_day', params['moving_average_n_day'])) # noqa: E501
if 'n_day_returns' in params:
query_params.append(('n_day_returns', params['n_day_returns'])) # noqa: E501
if 'n_path_monte_carlo' in params:
query_params.append(('n_path_monte_carlo', params['n_path_monte_carlo'])) # noqa: E501
if 'n_rolling_max_drawdown' in params:
query_params.append(('n_rolling_max_drawdown', params['n_rolling_max_drawdown'])) # noqa: E501
if 'n_rolling_volatility' in params:
query_params.append(('n_rolling_volatility', params['n_rolling_volatility'])) # noqa: E501
if 'num_sim_monte_carlo' in params:
query_params.append(('num_sim_monte_carlo', params['num_sim_monte_carlo'])) # noqa: E501
if 'period_type' in params:
query_params.append(('period_type', params['period_type'])) # noqa: E501
if 'risk_free_alpha' in params:
query_params.append(('risk_free_alpha', params['risk_free_alpha'])) # noqa: E501
if 'risk_free_sharpe' in params:
query_params.append(('risk_free_sharpe', params['risk_free_sharpe'])) # noqa: E501
if 'risk_free_sortino' in params:
query_params.append(('risk_free_sortino', params['risk_free_sortino'])) # noqa: E501
if 'risk_free_treynor' in params:
query_params.append(('risk_free_treynor', params['risk_free_treynor'])) # noqa: E501
if 'start_date' in params:
query_params.append(('start_date', params['start_date'])) # noqa: E501
if 'stat' in params:
query_params.append(('stat', params['stat'])) # noqa: E501
if 'var_conf_interval' in params:
query_params.append(('var_conf_interval', params['var_conf_interval'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/portfolio/{portfolio_id}/performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_security_performance_using_get(self, security_id, **kwargs): # noqa: E501
"""Security Performance # noqa: E501
Get performance statistics for a security using TWR (Time Weighted Return). You must provide the unique security_id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_security_performance_using_get(security_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str security_id: security_id (required)
:param str active_premium_period: Q (quarterly), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in () - (statId: 19, default: 'D')
:param str annualized_return_period: Q (quarterly), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in () - (statId: 19, default: 'D')
:param str bench_ticker: Bench Ticker for security - (default: ^GSPC)
:param str benchmark_id: benchmark_id
:param date end_date: Ending parameter for time window
:param float hist_factor: Histogram factor- (statId: 39, default: 5)
:param float mar_down_side_deviation: minimum acceptable return for downside deviation - (statId: 58, default: 0)
:param float max_percentile_monte_carlo: max percentile for monte carlo, i.entity. 80 - | |
<filename>abps/abps_runners.py
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Train and Eval DQN on Atari environments.
Training and evaluation proceeds alternately in iterations, where each
iteration consists of a 1M frame training phase followed by a 500K frame
evaluation phase. In the literature, some papers report averages of the train
phases, while others report averages of the eval phases.
This example is configured to use dopamine.atari.preprocessing, which, among
other things, repeats every action it receives for 4 frames, and then returns
the max-pool over the last 2 frames in the group. In this example, when we
refer to "ALE frames" we refer to the frames before the max-pooling step (i.e.
the raw data available for processing). Because of this, many of the
configuration parameters (like initial_collect_steps) are divided by 4 in the
body of the trainer (e.g. if you want to evaluate with 400 frames in the
initial collection, you actually only need to .step the environment 100 times).
For a good survey of training on Atari, see Machado, et al. 2017:
https://arxiv.org/pdf/1709.06009.pdf.
To run:
```bash
tf_agents/agents/dqn/examples/v1/train_eval_atari \
--root_dir=$HOME/atari/pong \
--atari_roms_path=/tmp
--alsologtostderr
```
Additional flags are available such as `--replay_buffer_capacity` and
`--n_step_update`.
"""
import abc
import copy
import gc
import json
import os
from absl import flags
from absl import logging
import gin
import numpy as np
import tensorflow.compat.v1 as tf
from tf_agents.environments import batched_py_environment
from tf_agents.environments import parallel_py_environment
from tf_agents.environments import suite_atari
from tf_agents.eval import metric_utils
from tf_agents.metrics import py_metrics
from tf_agents.networks import q_network
from tf_agents.policies import py_tf_policy
from tf_agents.policies import random_py_policy
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
from tf_agents.utils import timer
from abps import hparam as hparam_lib
from abps import new_pymetrics
from abps import py_hashed_replay_buffer
from abps.agents.dqn import dqn_agent
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('game_name', 'Pong', 'Name of Atari game to run.')
flags.DEFINE_string(
'eval_agents', 'worker_0',
'names of the agents the evaluator will evalutate, multiple agent names should be separated by comma'
)
flags.DEFINE_string(
'train_agents', None,
'names of the agents the trainer will train, multiple agent names should be separated by comma'
)
flags.DEFINE_string(
'architect_prob', None,
'probability over each type of architecture being selected as initial architecture'
)
flags.DEFINE_string('select_policy_way', 'random',
'Way to select behavior policy')
flags.DEFINE_string(
'hparam_path', None,
'JSON file that contains hyperparameters and name for each agent')
flags.DEFINE_string('dqn_type', 'dqn', 'type of dqn agent')
flags.DEFINE_string('pbt_low', None, 'how to choose low value agents')
flags.DEFINE_string('pbt_high', None, 'how to choose high value agents')
flags.DEFINE_integer('num_iterations', None,
'Number of train/eval iterations to run.')
flags.DEFINE_integer(
'initial_collect_steps', None,
'Number of frames to ALE frames to process before '
'beginning to train. Since this is in ALE frames, there '
'will be initial_collect_steps/4 items in the replay '
'buffer when training starts.')
flags.DEFINE_integer('replay_buffer_capacity', None,
'Maximum number of items to store in the replay buffer.')
flags.DEFINE_integer(
'train_steps_per_iteration', None,
'Number of ALE frames to run through for each iteration '
'of training.')
flags.DEFINE_integer(
'n_step_update', None, 'The number of steps to consider '
'when computing TD error and TD loss.')
flags.DEFINE_integer(
'eval_episode_per_iteration', None,
'Number of ALE frames to run through for each iteration '
'of evaluation.')
flags.DEFINE_integer(
'eval_interval_secs', None,
'interval of waiting time for evaluator to detect new ckpt')
flags.DEFINE_integer('epsilon_decay_selection', 400,
'Period over which to decay epsilon, for Bandit')
flags.DEFINE_integer('update_policy_iteration', 10,
'number of train episode between change policy')
flags.DEFINE_integer('eval_parallel_size', None,
'number of process used for parallelization')
flags.DEFINE_integer('num_worker', None, 'number of workers')
flags.DEFINE_integer('pbt_period', 10, 'number of abps runs between pbt')
flags.DEFINE_integer('bandit_buffer_size', None,
'size of the buffer window size')
flags.DEFINE_float('eval_epsilon_greedy', 0.0,
'epsilon for the policy when doing evaluation')
flags.DEFINE_float('learning_rate', None, 'Learning rate')
flags.DEFINE_float('ucb_coeff', 5.0, 'coefficient for UCB in best online')
flags.DEFINE_float('bandit_ucb_coeff', 5.0, 'coefficient for UCB in bandit')
flags.DEFINE_float('pbt_percent_low', 0.2, 'percent of agents to be replaced')
flags.DEFINE_float('pbt_percent_top', 0.4, 'percent of agents as good')
flags.DEFINE_boolean('enable_functions', False, '')
flags.DEFINE_boolean('adjust_metric', False, '')
flags.DEFINE_boolean('is_eval', False, 'is this run a evaluator')
flags.DEFINE_boolean('pbt', True, 'if or not using pbt')
flags.DEFINE_boolean(
'online_eval_use_train', True,
'when doing online eval for policy selection whether or not to use epsilon greedy'
)
flags.DEFINE_boolean(
'create_hparam', False,
'whether or not create hparam when no hparam file is found')
FLAGS = flags.FLAGS
# AtariPreprocessing runs 4 frames at a time, max-pooling over the last 2
# frames. We need to account for this when computing things like update
# intervals.
ATARI_FRAME_SKIP = 4
def softmax(q_table):
return np.exp(q_table) / sum(np.exp(q_table))
def sigmoid(x, coeff=1, truncate=1):
prob = 1.0 / (1.0 + np.exp(-coeff * x))
prob[prob > truncate] = truncate
return prob
def write_policy_step(old_steps, new_steps, step_index):
old_steps.step_type[step_index] = new_steps.step_type
old_steps.reward[step_index] = new_steps.reward
old_steps.discount[step_index] = new_steps.discount
old_steps.observation[step_index] = new_steps.observation
return old_steps
def perturb(num, low=0.95, high=1.05):
return int(num * np.random.uniform(low, high))
def unstack_time_steps(stack_timesteps):
st_component = [item for item in stack_timesteps]
t_components = zip(*st_component)
return [ts.TimeStep(*t_component) for t_component in t_components]
# Fix the place holder.
def get_available_gpus():
return []
def change_from_last_to_mid(time_step):
return ts.transition(time_step.observation, time_step.reward)
def add_summary(file_writer, tag, value, step):
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
file_writer.add_summary(summary, step)
def write_csv(outdir, tag, value, step, iteration):
with tf.gfile.GFile(os.path.join(outdir, tag + '%r=3.2:sl=8M'),
'a+') as writer:
if isinstance(value, str):
writer.write('%d\t%d\t%s\n' % (iteration, step, value))
else:
writer.write('%d\t%d\t%f\n' % (iteration, step, value))
class AtariQNetwork(q_network.QNetwork):
"""QNetwork subclass that divides observations by 255."""
def call(self, observation, step_type=None, network_state=None):
state = tf.cast(observation, tf.float32)
# We divide the grayscale pixel values by 255 here rather than storing
# normalized values beause uint8s are 4x cheaper to store than float32s.
state = state / 255
return super(AtariQNetwork, self).call(
state, step_type=step_type, network_state=network_state)
def convert_list_to_tuple(orig_list):
if isinstance(orig_list, list):
return tuple(convert_list_to_tuple(x) for x in orig_list)
else:
return orig_list
def log_metric(metric, prefix):
tag = common.join_scope(prefix, metric.name)
logging.info('%s', '{0} = {1}'.format(tag, metric.result()))
def game_over(env):
if env._num_envs == 1: # pylint: disable=protected-access
return env.envs[0].game_over
else:
return [e.game_over for e in env._envs] # pylint: disable=protected-access
@gin.configurable
class Runner(object):
"""Train and evaluate DQN on Atari."""
def __init__( # pylint: disable=dangerous-default-value
self,
root_dir,
env_name,
max_episode_frames=108000, # ALE frames
terminal_on_life_loss=False,
conv_layer_params=[(32, (8, 8), 4), (64, (4, 4), 2), (64, (3, 3), 1)],
fc_layer_params=(512,),
# Params for collect
epsilon_greedy=0.01,
epsilon_decay_period=1000000, # ALE frames
# Params for train
update_period=16, # ALE frames
target_update_tau=1.0,
target_update_period=32000, # ALE frames
learning_rate=2.5e-4,
n_step_update=1,
gamma=0.99,
reward_scale_factor=1.0,
gradient_clipping=None,
enable_functions=False,
# Params for checkpoints, summaries, and logging
log_interval=1000,
summary_interval=1000,
debug_summaries=False,
summarize_grads_and_vars=False,
eval_metrics_callback=None,
max_ckpt=200,
hparam_path=None,
dqn_type='dqn',
use_gpu=True,
freeze_before_select=False,
num_worker=2,
architect_prob=[0.2, 0.2, 0.2, 0.4],
is_eval=False,
create_hparam=False,
):
"""A Base Runner class for multi-agent ABPS training.
Args:
root_dir: Directory to write log files to.
env_name: Fully-qualified name of the Atari environment (i.e. Pong-v0).
max_episode_frames: Maximum length of a single episode, in ALE frames.
terminal_on_life_loss: Whether to simulate an episode termination when a
life is lost.
conv_layer_params: Params for convolutional layers of QNetwork.
fc_layer_params: Params for fully connected layers of QNetwork.
epsilon_greedy: Final epsilon value to decay to for training.
epsilon_decay_period: Period over which to decay epsilon, from 1.0 to
epsilon_greedy (defined above).
update_period: Run a train operation every update_period ALE frames.
target_update_tau: Coeffecient for soft target network updates (1.0 ==
hard updates).
target_update_period: Period, in ALE frames, to copy the live network to
the target network.
learning_rate: RMS optimizer learning rate.
n_step_update: The number of steps to consider when computing TD error and
TD loss. Applies standard single-step updates when set to 1.
gamma: Discount for future rewards.
reward_scale_factor: Scaling factor for rewards.
gradient_clipping: Norm length to clip gradients.
enable_functions: Enable functions.
log_interval: Log stats to the terminal every log_interval training steps.
summary_interval: Write TF summaries every summary_interval training
steps.
debug_summaries: If True, write additional summaries for debugging (see
dqn_agent for which summaries are written).
summarize_grads_and_vars: Include gradients in summaries.
eval_metrics_callback: A callback function that takes (metric_dict,
global_step) as parameters. Called after every eval with the results of
the evaluation.
max_ckpt: Max ckpt.
hparam_path: Path to the JSON file that contains hyperparameters for each
individual agent. Tunable hyperparams including:
epsilon_greedy,epsilon_decay_period,target_update_tau,target_update_period.
If not speicified in the JSON the agent will use arguments passed in
_init() as default values for each hparam. learning_rate=2.5e-4,
dqn_type: A string specifying if dqn or double dqn is used
use_gpu: whether or not to use GPU
freeze_before_select: whether to freeze the model parameters while collect
data
num_worker: Number of workers.
architect_prob: | |
= [{'volume_id': '235'}]
validate(self.server, group_schemas.server)
self.server.pop('block_device_mapping')
self.server['block_device_mapping_v2'] = [{'volume_id': '235'}]
validate(self.server, group_schemas.server)
def test_blank_image(self):
"""
invalidates if imageRef is just whitespace
"""
self.server['imageRef'] = ' '
self.assertRaisesRegexp(ValidationError, "is not of type",
validate, self.server, group_schemas.server)
def test_invalid_flavor(self):
"""
invalidates if flavorRef is not a string
"""
self.server['flavorRef'] = 3
self.assertRaisesRegexp(ValidationError, "3 is not of type 'string'",
validate, self.server, group_schemas.server)
def test_empty_flavor(self):
"""
invalidates if flavorRef is an empty string
"""
self.server['flavorRef'] = ''
self.assertRaisesRegexp(ValidationError, "'' is too short",
validate, self.server, group_schemas.server)
def test_blank_flavor(self):
"""
invalidates if flavorRef is just whitespace
"""
self.server['flavorRef'] = ' '
self.assertRaisesRegexp(ValidationError, "does not match",
validate, self.server, group_schemas.server)
def test_invalid_personality_object(self):
"""
Invalidates if personality contains object instead of array
"""
self.server['personality'] = {'b': 'lah'}
self.assertRaisesRegexp(ValidationError, "{'b': 'lah'} is not of type 'array'",
validate, self.server, group_schemas.server)
def test_invalid_personality_no_path(self):
"""
Invalidates if personality item does not contain path
"""
del self.server['personality'][0]['path']
self.assertRaisesRegexp(ValidationError, "'path' is a required property",
validate, self.server, group_schemas.server)
def test_invalid_personality_path_not_string(self):
"""
Invalidates if personality path is not string
"""
self.server['personality'][0]['path'] = 4
self.assertRaisesRegexp(ValidationError, "4 is not of type 'string'",
validate, self.server, group_schemas.server)
def test_invalid_personality_path_exceeds_255(self):
"""
Invalidates if personality path is > 255 chars
"""
self.server['personality'][0]['path'] = 'abc' * 100
self.assertRaisesRegexp(ValidationError, "'{}' is too long".format('abc' * 100),
validate, self.server, group_schemas.server)
def test_invalid_personality_contents_not_string(self):
"""
Invalidates if personality item contents is not a string
"""
self.server['personality'][0]['contents'] = 4
self.assertRaisesRegexp(ValidationError, "4 is not of type 'string'",
validate, self.server, group_schemas.server)
def test_invalid_personality_no_contents(self):
"""
Invalidates if personality item does not contain contents
"""
del self.server['personality'][0]['contents']
self.assertRaisesRegexp(ValidationError, "'contents' is a required property",
validate, self.server, group_schemas.server)
class StackLaunchConfigTestCase(SynchronousTestCase):
"""Verify correctness of JSON schema for launch_stack launch configs."""
def setUp(self):
"""Save a config to modify in tests"""
self.examples = group_examples.launch_stack_config()
self.stack_config = self.examples['all_options']['args']['stack']
def test_valid_examples_validate(self):
"""The launch stack config examples all validate."""
for example in group_examples.launch_stack_config().values():
validate(example, group_schemas.launch_config)
def test_invalid_missing_template_and_template_url(self):
"""The config must have either template or template_url defined."""
del self.stack_config['template']
self.assertRaises(ValidationError, validate,
self.stack_config, group_schemas.stack)
def test_invalid_both_template_and_template_url(self):
"""The config can't have both template and template_url defined."""
min_with_url = self.examples['minimal_with_url']['args']['stack']
self.stack_config['template_url'] = min_with_url['template_url']
self.assertRaises(ValidationError, validate,
self.stack_config, group_schemas.stack)
def test_invalid_extra_property(self):
"""The config should not allow additional properties."""
self.stack_config['foobarbaz'] = 'asdf'
self.assertRaises(ValidationError, validate,
self.stack_config, group_schemas.stack)
class ScalingPolicyTestCase(SynchronousTestCase):
"""
Simple verification that the JSON schema for scaling policies is correct.
"""
def setUp(self):
"""
Store copies of schedule type policies
"""
self.at_policy = deepcopy(group_examples.policy()[3])
self.cron_policy = deepcopy(group_examples.policy()[4])
def test_schema_valid(self):
"""
The schema itself is a valid Draft 3 schema
"""
Draft3Validator.check_schema(group_schemas.policy)
def test_valid_examples_validate(self):
"""
The scaling policy examples all validate.
"""
for example in group_examples.policy():
validate(example, group_schemas.policy)
def test_either_change_or_changePercent_or_desiredCapacity(self):
"""
A scaling policy can have one of the attribute "change" or "changePercent"
or "desiredCapacity", but not any 2 or 3 of them
"""
_invalid = {
"name": "meh",
"cooldown": 5,
"type": "webhook"
}
for props in [{'change': 3, 'changePercent': 23},
{'change': 3, 'desiredCapacity': 23},
{'changePercent': 3, 'desiredCapacity': 23},
{'change': 4, 'changePercent': 3, 'desiredCapacity': 23}]:
invalid = _invalid.copy()
invalid.update(props)
self.assertRaisesRegexp(
ValidationError, 'not of type',
validate, invalid, group_schemas.policy)
def test_change_zero(self):
"""
A scaling policy cannot have 'change' as 0
"""
invalid = {
"name": "meh",
"cooldown": 5,
"type": "webhook",
"change": 0
}
self.assertRaisesRegexp(
ValidationError, 'is disallowed for 0',
validate, invalid, group_schemas.policy)
del invalid['change']
invalid['changePercent'] = 0.0
self.assertRaisesRegexp(
ValidationError, 'is disallowed for 0.0',
validate, invalid, group_schemas.policy)
def test_changepercent_zero(self):
"""
A scaling policy cannot have 'changePercent' as 0.0
"""
invalid = {
"name": "meh",
"cooldown": 5,
"type": "webhook",
"changePercent": 0.0
}
self.assertRaisesRegexp(
ValidationError, 'is disallowed for 0.0',
validate, invalid, group_schemas.policy)
def test_desired_zero(self):
"""
A scaling policy CAN have 'desiredCapacity' as 0
"""
valid = {
"name": "meh",
"cooldown": 5,
"type": "webhook",
"desiredCapacity": 0
}
validate(valid, group_schemas.policy)
def test_desired_negative(self):
"""
A scaling policy cannot have a negative "desiredCapacity" attribute
"""
invalid = {
"name": "aname",
"desiredCapacity": -5,
"cooldown": 5,
"type": "webhook"
}
self.assertRaisesRegexp(
ValidationError, 'is less than the minimum of 0',
validate, invalid, group_schemas.policy)
def test_no_other_properties_valid(self):
"""
Scaling policy can only have the following properties: name,
change/changePercent/desiredCapacity, cooldown, type, and capabilityUrls.
Any other property results in an error.
"""
invalid = {
"name": "aname",
"change": 5,
"cooldown": 5,
"type": "webhook",
"poofy": False
}
self.assertRaisesRegexp(
ValidationError, 'is not of type',
validate, invalid, group_schemas.policy)
def test_type_set(self):
"""
Scaling policy can only have the following properties: name,
change/changePercent/desiredCapacity, cooldown, type, and capabilityUrls.
Ensure that if the type is not present, that's an error.
"""
invalid = {
"name": "aname",
"change": 5,
"cooldown": 5
}
self.assertRaisesRegexp(
ValidationError, "'type' is a required property",
validate, invalid, group_schemas.policy)
def test_type_valid(self):
"""
Scaling policy have a type value that has enum validation.
Make sure it works.
"""
invalid = {
"name": "aname",
"change": 5,
"cooldown": 5,
"type": "blah"
}
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def test_invalid_name_does_not_validate(self):
"""
The name must contain something other than whitespace.
"""
invalid = {
"name": "",
"change": 10,
"cooldown": 5,
"type": "webhook"
}
for invalid_name in ('', ' ', ' '):
invalid['name'] = invalid_name
self.assertRaisesRegexp(
ValidationError, 'does not match', validate, invalid,
group_schemas.policy)
def test_min_cooldown(self):
"""
Cooldown must be >= 0
"""
invalid = {
"name": "",
"change": -1,
"cooldown": 5,
"type": "webhook"
}
self.assertRaisesRegexp(ValidationError, "does not match",
validate, invalid, group_schemas.policy)
def test_max_cooldown(self):
"""
Cooldown must be <= group_schemas.MAX_COOLDOWN
"""
invalid = {
"name": "",
"change": 10,
"cooldown": group_schemas.MAX_COOLDOWN + 1,
"type": "webhook"
}
self.assertRaisesRegexp(ValidationError, "does not match",
validate, invalid, group_schemas.policy)
def test_schedule_no_args(self):
"""
Schedule policy must have 'args'
"""
invalid = self.at_policy
del invalid['args']
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def test_args_when_no_schedule(self):
"""
args can be there only when type is 'schedule'
"""
invalid = self.at_policy
invalid['type'] = 'webhook'
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def test_schedule_no_change(self):
"""
Schedule policy must have 'change', 'changePercent' or 'desiredCapacity'
"""
invalid = self.at_policy
del invalid['changePercent']
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def test_excess_in_args(self):
"""
Args cannot have anything other than 'at' or 'cron'
"""
invalid = self.at_policy
invalid['args']['junk'] = 2
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def test_only_one_in_args(self):
"""
Args can have only one of 'at' or 'cron'; not both
"""
invalid = self.at_policy
invalid['args']['cron'] = '* * * * *'
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def test_empty_args(self):
"""
Args cannot be empty
"""
invalid = self.at_policy
invalid['args'] = {}
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def test_invalid_timestamp(self):
"""
policy with invalid timestamp raises ``ValidationError``
"""
invalid = self.at_policy
invalid_dates = ['', 'junk']
for invalid_date in invalid_dates:
invalid['args']['at'] = invalid_date
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def test_only_date_timestamp(self):
"""
policy with only date in timestamp raises ``ValidationError``
"""
invalid = self.at_policy
invalid['args']['at'] = '2012-10-10'
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def test_only_time_timestamp(self):
"""
policy with only time in timestamp raises ``ValidationError``
"""
invalid = self.at_policy
invalid['args']['at'] = '11:25'
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def test_localtime_timestamp(self):
"""
policy with localtime in timestamp raises ``ValidationError``
"""
invalid = self.at_policy
invalid['args']['at'] = '2012-10-20T11:25:00'
self.assertRaisesRegexp(ValueError, 'Expecting Zulu-format UTC time',
group_schemas.validate_datetime, invalid['args']['at'])
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def test_past_timestamp(self):
"""
policy with past date raises `ValidationError`
"""
invalid = self.at_policy
past = datetime.utcnow() - timedelta(days=1)
invalid['args']['at'] = past.isoformat() + 'Z'
self.assertRaisesRegexp(ValidationError, 'must be in the future',
group_schemas.validate_datetime, invalid['args']['at'])
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def test_valid_UTC_timestamp(self):
"""
policy with valid UTC timestamp validates
"""
valid = self.at_policy
future = datetime.utcnow() + timedelta(days=1)
valid['args']['at'] = future.isoformat() + 'Z'
group_schemas.validate_datetime(valid['args']['at'])
validate(valid, group_schemas.policy)
def test_valid_cron(self):
"""
policy with valid cron entry validates
"""
valid_crons = ['* * * * *', '0-59 0-23 1-31 1-12 0-6', '00 9,16 * * *',
'00 02-11 * * *', '00 09-18 * * 1-5', '0 0 0 0 0']
valid = self.cron_policy
for valid_cron in valid_crons:
valid['args']['cron'] = valid_cron
validate(valid, group_schemas.policy)
def test_invalid_cron(self):
"""
policy with invalid cron entry raises ``ValidationError``
"""
invalid_crons = ['', 'junk', '* * -32 * *', '-90 * * *', '* 0 * *',
'* * * * * *', '0 * * 0 * *', '* * * *', '* * * * * * * *',
'*12345', 'dfsdfdf', '- - - - -', '-090 * * * *', '* -089 * * *']
invalid = self.cron_policy
for invalid_cron in invalid_crons:
invalid['args']['cron'] = invalid_cron
self.assertRaises(ValidationError, validate, invalid, group_schemas.policy)
def | |
<filename>src/trw/train/metrics.py
from abc import ABC, abstractmethod
from typing import Dict, List, Optional
import numpy as np
from ..utils import to_value
from . import losses
from sklearn import metrics
import collections
import torch
from .analysis_plots import auroc
import torch.nn as nn
# TODO discard samples where weight is <= 0 for all the metrics
def fast_confusion_matrix(
y: torch.Tensor,
y_pred: torch.Tensor,
num_classes: int,
ignore_y_out_of_range: bool = False,
device=torch.device('cpu')) -> torch.Tensor:
"""
Compute confusion matrix to evaluate the accuracy of a classification.
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` and
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Similar to :func:`sklearn.metrics.confusion_matrix`
Args:
y_pred: prediction (tensor of integers)
y: tensor of integers
num_classes: the number of classes
ignore_y_out_of_range: if `True`, indices of `y` greater than `num_classes` will be ignored
device: device where to perform the calculation
"""
assert y_pred.shape == y.shape
y_pred = y_pred.flatten().to(device)
y = y.flatten().to(device)
if ignore_y_out_of_range:
target_mask = (y >= 0) & (y < num_classes)
y = y[target_mask]
y_pred = y_pred[target_mask]
indices = num_classes * y + y_pred
m = torch.bincount(indices, minlength=num_classes ** 2).reshape(num_classes, num_classes)
return m
class Metric(ABC):
"""
A metric base class
Calculate interesting metric
"""
@abstractmethod
def __call__(self, outputs: Dict) -> Optional[Dict]:
"""
Args:
outputs:
the outputs of a batch
Returns:
a dictionary of metric names/values or None
"""
pass
@abstractmethod
def aggregate_metrics(self, metric_by_batch: List[Dict]) -> Dict[str, float]:
"""
Aggregate all the metrics into a consolidated metric.
Args:
metric_by_batch: a list of metrics, one for each batch
Returns:
a dictionary of result name and value
"""
pass
class MetricLoss(Metric):
"""
Extract the loss from the outputs
"""
def __call__(self, outputs):
loss = to_value(outputs.get('loss'))
if loss is not None:
return {'loss': float(loss)}
return None
def aggregate_metrics(self, metric_by_batch):
loss = 0.0
for m in metric_by_batch:
loss += m['loss']
return {'loss': loss / len(metric_by_batch)}
class MetricClassificationBinaryAUC(Metric):
"""
Calculate the Area under the Receiver operating characteristic (ROC) curve.
For this, the output needs to provide an ``output_raw`` of shape [N, 2] (i.e., binary
classification framed as a multi-class classification) or of shape [N, 1] (binary classification)
"""
def __call__(self, outputs):
truth = to_value(outputs.get('output_truth'))
found = to_value(outputs.get('output_raw'))
if truth is None or found is None:
# data is missing
return None
if len(found.shape) != len(truth.shape) or found.shape[1] > 2:
# dimensions are of the expected shape
return None
if len(found.shape) > 2:
# TODO: implement for N-dimensions! We probably can't keep everything in memory
return None
return {
'output_raw': found,
'output_truth': truth,
}
def aggregate_metrics(self, metric_by_batch):
all_output_raw = [m['output_raw'] for m in metric_by_batch]
all_output_raw = np.concatenate(all_output_raw)
all_output_truth = [m['output_truth'] for m in metric_by_batch]
all_output_truth = np.concatenate(all_output_truth)
auc = auroc(all_output_truth, all_output_raw[:, -1])
if np.isnan(auc):
auc = 0.0
return {'1-auc': 1.0 - auc}
class MetricClassificationError(Metric):
"""
Calculate the ``1 - accuracy`` using the `output_truth` and `output`
"""
def __call__(self, outputs):
truth = to_value(outputs.get('output_truth'))
found = to_value(outputs.get('output'))
weights = to_value(outputs.get('weights'))
assert found.shape == truth.shape
if truth is not None and found is not None:
if weights is not None:
min_weight = weights.min()
if min_weight <= 0:
# if we have invalid indices (i.e., weights <= 0),
# discard these samples
valid_samples = np.where(weights > 0)
truth = truth[valid_samples]
found = found[valid_samples]
return collections.OrderedDict([
('nb_trues', np.sum(found == truth)),
('total', truth.size), # for multi-dimension, use the size! (e.g., patch discriminator, segmentation)
])
return None
def aggregate_metrics(self, metric_by_batch):
nb_trues = 0
total = 0
for m in metric_by_batch:
nb_trues += m['nb_trues']
total += m['total']
return {'classification error': 1.0 - nb_trues / total}
class MetricSegmentationDice(Metric):
"""
Calculate the average dice score of a segmentation map 'output_truth' and class
segmentation logits 'output_raw'.
Notes:
* by default, nn.Sigmoid function will be applied on the output to force a range [0..1] of the output
* the aggregation will aggregate all the foregrounds/backgrounds THEN calculate the dice (but NOT average
of dices). Using this aggregation, it is possible to calculate the true dice on a partitioned input
(e.g., 3D segmentations, we often use sub-volumes)
"""
def __init__(
self,
dice_fn=losses.LossDiceMulticlass(
normalization_fn=None, # we use discrete values, not probabilities
return_dice_by_class=True,
smooth=0),
aggregate_by: Optional[str] = None):
"""
Args:
dice_fn: the function to calculate the dice score of each class
aggregate_by: if not None, the dice scores will be aggregated first by `aggregate_by`. This can be useful
when the metrics is calculated from pieces of the input data and we want to calculate a dice per
case
"""
self.dice_fn = dice_fn
self.aggregate_by = aggregate_by
def __call__(self, outputs):
# keep the torch variable. We want to use GPU if available since it can
# be slow use numpy for this
truth = outputs.get('output_truth')
found = outputs.get('output')
raw = outputs.get('output_raw')
assert raw is not None, 'missing value=`output_raw`'
assert found is not None, 'missing value=`output`'
assert truth is not None, 'missing value=`output_truth`'
nb_classes = raw.shape[1]
if self.aggregate_by is not None:
aggregate_by = outputs.get(self.aggregate_by)
assert aggregate_by is not None, f'cannot find the aggregate_by={self.aggregate_by} in batch! if using ' \
f'`trw.train.OutputSegmentation`, make sure to set `sample_uid_name`' \
f'appropriately'
else:
aggregate_by = None
#assert found.min() >= 0, 'Unexpected value: `output` must be in range [0..1]'
if found is None or truth is None:
return None
assert found.shape[1] == 1, 'output must have a single channel!'
if raw.shape[1] > 1:
# one hot encode the output
found_one_hot = losses.one_hot(found[:, 0], nb_classes)
assert len(found_one_hot.shape) == len(truth.shape), f'expecting dim={len(truth.shape)}, ' \
f'got={len(found_one_hot)}'
assert found_one_hot.shape[2:] == truth.shape[2:]
else:
# it is already one hot encoded for a binary classification!
found_one_hot = found
with torch.no_grad():
numerator, cardinality = self.dice_fn(found_one_hot, truth)
return {
# sum the samples: we have to do this to support variably sized
# batch size
'numerator': to_value(numerator),
'cardinality': to_value(cardinality),
'aggregate_by': aggregate_by
}
@staticmethod
def _aggregate_dices(metric_by_batch):
eps = 1e-5 # avoid div by 0
# aggregate all the patches at once to calculate the global dice (and not average of dices)
numerator = metric_by_batch[0]['numerator'].copy()
cardinality = metric_by_batch[0]['cardinality'].copy()
assert len(numerator.shape) == 2, 'must be NxC matrix'
assert numerator.shape == cardinality.shape
numerator = numerator.sum(axis=0)
cardinality = cardinality.sum(axis=0)
for m in metric_by_batch[1:]:
numerator += m['numerator'].sum(axis=0)
cardinality += m['cardinality'].sum(axis=0)
# calculate the dice score by class
dice = numerator / (cardinality + eps)
return dice
@staticmethod
def _aggregate_dices_by_uid(metric_by_batch):
eps = 1e-5 # avoid div by 0
# group the dice's (numerator, denominator) by UID
num_card_by_uid = {}
for m in metric_by_batch:
numerators = m['numerator']
cardinalitys = m['cardinality']
uids = m.get('aggregate_by')
assert uids is not None
assert len(numerators) == len(cardinalitys)
assert len(numerators) == len(uids)
for numerator, cardinality, uid in zip(numerators, cardinalitys, uids):
numerator_cardinality = num_card_by_uid.get(uid)
if numerator_cardinality is None:
num_card_by_uid[uid] = [numerator, cardinality]
else:
numerator_cardinality[0] += numerator
numerator_cardinality[1] += cardinality
# then calculate the average dice by UID
dice_sum = 0
for uid, (numerator, cardinality) in num_card_by_uid.items():
# if cardinality[class] == 0, then numerator[class] == 0
# so it is ok to just add `eps` when cardinality == 0 to avoid
# div by 0
dice = numerator / (cardinality + eps)
dice_sum += dice
return dice_sum / len(num_card_by_uid)
def aggregate_metrics(self, metric_by_batch):
nb_batches = len(metric_by_batch)
if nb_batches > 0:
if self.aggregate_by is None:
dice = MetricSegmentationDice._aggregate_dices(metric_by_batch)
else:
dice = MetricSegmentationDice._aggregate_dices_by_uid(metric_by_batch)
# to keep consistent with the other metrics
# calculate the `1 - metric`
one_minus_dice = 1 - dice
r = collections.OrderedDict()
for c in range(len(dice)):
r[f'1-dice[class={c}]'] = one_minus_dice[c]
r['1-dice'] = np.average(one_minus_dice)
return r
# empty, so assume the worst
return {'1-dice': 1}
class MetricClassificationF1(Metric):
def __init__(self, average=None):
"""
Calculate the Multi-class ``1 - F1 score``.
Args:
average: one of ``binary``, ``micro``, ``macro`` or ``weighted`` or None. If ``None``, use
``binary`` if only 2 classes or ``macro`` if more than two classes
"""
self.average = average
self.max_classes = 0
def __call__(self, outputs):
output_raw = to_value(outputs.get('output_raw'))
if output_raw is None:
return None
if len(output_raw.shape) != 2:
return None
truth = to_value(outputs.get('output_truth'))
if | |
<filename>scripts/ReachabilityAlgorithm.py<gh_stars>0
"""
This file contains the functions related executing the PolyReach algorithm
"""
import copy
import time
from typing import List, Tuple
from matplotlib import pyplot as plt
import numpy as np
import sympy
from sympy import Poly
from sympy.polys.polymatrix import PolyMatrix
import json
import scripts.set_representations as pz
from scripts.polyflow import PolyFlow, Domain
from scripts.misc_functions import timeit_measure
from scripts.dreal_error_bound import HigherOrderEstimationDreal
from scripts.bernstein_bound import BernsteinBound
class PolyReach:
""""
PolyReach object used to solve reachability problems of polynomial systems
...
Attributes
----------
alpha_base_list : List[float]
A list containing the the constant factors required for calculating the bloating factor
dim_n : int
Dimension of the differential equation
domain : polyflow.Domain
description of the domain of the reachability problem
flow_pipe : set_representations.Zonotope
Set representation which describes the trajectories of the interval [tk,tk+1]
from_dict : Boolean
Boolean whether the system is read from a json variable or not
polyflow : polyflow.Polyflow
Object containing the parameters + errorbound of Polyflow
pz_lifted : set_representations.PZonotopeList
Object representation the monomials of the lifted space at time step k
pz_poly_flow_observer : set_representations.AugPZonotope
Object representing the lifted state of polynomials (transformed coordinate system) at time step k
pz_projected : set_representations.AugPZonotope
Object representing the projected state at time step k + 1
scale_factor : float
Constant used for the coordinate system transformation
time_step : float
Difference in time between the time steps
z_projected : set_representations.Zonotope
Set representing the projected state at time step k + 1
Methods
---------
polyreach_from_dict(input_dict)
Alternative constructor. Creates a PolyReach object with a dictionary
to_dict
Returns a dictionary of Polyreach object containing attribute information
new_polyreach
Creates a new PolyReach object
init_alpha_base
Creates a list of factors used to determine the bloating factors
__str__
Returns a string with information of Polyreach object
define_dangerous_sets
Defines the dangerous sets of the reachability problem
simulate
executes the reachability algorithm
get_bloating_error
Get the bloating error at time step k
get_bloating_error2
Get the bloating error at time step k
create_flowpipe
Create the set representation of all trajectories of the time interval [tk, tk+1]
"""
bernstein_object: BernsteinBound
remainder_obj: HigherOrderEstimationDreal
time_step: float
scale_factor: float
remainder_smt_tol: List[float]
pz_lifted: pz.PZonotopeList
pz_projected: pz.AugPZonotope
pz_poly_flow_observer: pz.AugPZonotope
z_projected: pz.Zonotope
dim_n: int
domain: Domain
flow_pipe: pz.Zonotope
polyflow: PolyFlow
alpha_base_list: List[float]
extra_eig: float
polyflow_smt_tol: float
@timeit_measure
def __init__(self, *args, **kwargs):
"""
Parameters
----------
args
*param0 : MutablePolyDenseMatrix
Differential equation of the system
*param1 : tuple
Tuple containing all symbolics used for the differential equation
*param2 : list[list[float]
2D list describing the grid for the Polyflow optimization
*param3 : int
Maximum Lie derivative that is estimated in the Polyflow
*param4: float
Time step of the simulation
*param5: float
relaxation factor for the Polyflow optimization
*param6 : List[float]
Relaxation term on the polyflow errorbound SMT problem (delta-weakening)
*param : float
Relaxation term on the Remainder SMT problem (delta-weakening)
kwargs
**name : str
Name of the system
**solver : str
name of the solver used for the Polyflow optimization
**smt_solver : str
Name of the used SMT solver
**plot : bool
Determines whether a plot should be shown or not
**polyflow : dict
description of the polyflow object
**flow_pipe : dict
description of the allocated memory for the flowpipe per iteration
**z_projected : dict
description of the allocated memory for the over-approximated projected set per iteration
**pz_projected
description of the allocated memory for the approximated projected set per iteration
**pz_lifted : dict
description of the allocated memory for the lifted set (monomial) per iteration
**pz_poly_flow_observer : dict
description of the allocated memory for the lifted set (polynomial) per iteration
**alpha_base_List : list
list of factors used for the linear flowpipe
**time_step : float
time step of the simulation
**dim_n : int
Dimension of the system
**from_dict : bool
Boolean to decide whether new parameters have to be estimated or not
**scale_factor : float
Factor used for the coordinate transformation
"""
self.dangerous_sets = []
self.name = 'PolyReach'
self.plot = False
self.from_dict = False
self.doi = None
for key, value in kwargs.items():
if key in ['name', 'plot']:
setattr(self, key, value)
if 'from_dict' in kwargs.keys():
self.from_dict = kwargs['from_dict']
if 'dangerous_sets' in kwargs.keys():
self.dangerous_sets = [pz.Zonotope.from_interval_list(np.array(set_i))
for set_i in kwargs['dangerous_sets']]
if not self.from_dict:
self.new_polyreach(*args, **kwargs)
else:
self.polyreach_from_dict(kwargs)
def polyreach_from_dict(self, input_dict: dict):
"""
Alternative constructor. Creates a PolyReach object with a dictionary
This constructor transforms the type of the items in the dictionary to the correct, before it is set as an
attribute of PolyReach.
Parameters
----------
input_dict
Dictionary containing information to construct the PolyReach object.
The Dictionary has the following keywords:
[polyflow, flow_pipe, z_projected, pz_projected, pz_lifted
pz_poly_flow_observer,alpha_base_list, time_step, dim_n, from_dict, scale_factor]
Returns
-------
PolyReach
returns the PolyReach with the entire problem description
"""
raise NotImplementedError
def to_dict(self) -> dict:
""" Returns a dictionary of Polyreach object containing attribute information """
self.from_dict = True
key_list = ['dim_n', 'polyflow', 'time_step', 'flow_pipe', 'z_projected',
'pz_projected', 'pz_poly_flow_observer', 'pz_lifted', 'name', 'remainder_smt_tol',
'alpha_base_list', 'from_dict', 'scale_factor']
output_dict = {}
for key_i in key_list:
output_dict.update(to_json_el(self, key_i))
return output_dict
@staticmethod
def _create_polyflow_object(extra_eig, polyflow_smt_tol, scale_factor, differential_eq,
symbol_tuple, domain_description, lie_order, time_step, **kwargs):
# TODO Polyflow classmethod?
input_dict_polyflow = copy.deepcopy(kwargs)
input_dict_polyflow.update({'extra_eig': extra_eig,
'polyflow_smt_tol': polyflow_smt_tol,
'scale_factor': scale_factor})
polyflow = PolyFlow(differential_eq, symbol_tuple, domain_description, lie_order,
time_step, **input_dict_polyflow)
return polyflow
@staticmethod
def get_coordinate_scale_factor(time_step: float, lie_order: int, extra_eig: float) -> float:
""" Calculates the scale factor used for the coordinate transformation """
return (1 + extra_eig) ** -1 * (lie_order - 1) / time_step
@staticmethod
def _create_set_objects(dimension_projected, max_monomial_order, lie_order):
# Allocate memory for big (polynomial) zonotope variables
pz_lifted = pz.PZonotopeList.generate_list(dimension_projected, max_monomial_order)
# Create empty polynomial zonotope to store the projected state. This shape is equal to the highest monomial
generators_max_monomial = pz_lifted.polynomial_zonotope_list[-1].get_generators()
e_max_monomial = pz_lifted.polynomial_zonotope_list[-1].get_e()
projected_polynomial_zonotope_shape = generators_max_monomial.shape
pz_projected = pz.AugPZonotope(np.empty((dimension_projected, 1)),
None,
np.empty((dimension_projected, projected_polynomial_zonotope_shape[-1])),
e_max_monomial[:, 1:], is_empty=True)
pz_poly_flow_observer = pz.AugPZonotope(np.empty((dimension_projected * lie_order, 1)),
None,
np.empty((dimension_projected * lie_order,
projected_polynomial_zonotope_shape[-1])),
e_max_monomial[:, 1:], is_empty=True)
# Allocate memory of zonotope which contains the projected state + polyflow error bound
generators_temp = np.empty((dimension_projected, projected_polynomial_zonotope_shape[1] + dimension_projected))
z_projected = pz.Zonotope(np.empty((dimension_projected, 1)),
generators_temp, is_empty=True)
# Allocate memory for flowpipe which exists of the following zonotopes
flow_pipe = pz.Zonotope(np.empty((dimension_projected, 1)), np.empty((dimension_projected,
generators_temp.shape[1] + 1
+ 2 * dimension_projected)),
is_empty=True)
return pz_lifted, pz_projected, pz_poly_flow_observer, z_projected, flow_pipe
def new_polyreach(self, differential_eq: PolyMatrix, symbol_tuple: Tuple[sympy.symbols],
domain_description: np.ndarray, lie_order: int, **kwargs):
"""
Parameters
----------
differential_eq
Differential equation of the system
symbol_tuple
Symbolics used in the differential equation
domain_description
Description of the grid for the Polyflow optimization
lie_order
Maximum Lie order that is used for the optimization
kwargs
time_step
Time step of the simulation
extra_eig
Relaxation factor for the polyflow optimization
polyflow_smt_tol
Relaxation factor for the SMT problem of the Polyflow errorbound (delta-weakening)
remainder_smt_tol
Relaxation factor for the SMT problem of the remainder (delta-weakening)
Returns
-------
"""
prop_defaults = {
'time_step': 0.1,
'scale_factor': 1.0,
'extra_eig': 0.2,
'polyflow_smt_tol': None,
'remainder_smt_tol': None,
'doi': [0, 1]
}
# Set variables with default argument
for prop, default in prop_defaults.items():
setattr(self, prop, kwargs.get(prop, default))
if prop in kwargs.keys():
kwargs.pop(prop)
order_remainder = 1
self.dim_n = len(symbol_tuple)
self.scale_factor = self.get_coordinate_scale_factor(self.time_step, lie_order, self.extra_eig)
self.domain = Domain(domain_description)
self.polyflow = self._create_polyflow_object(self.extra_eig, self.polyflow_smt_tol, self.scale_factor,
differential_eq, symbol_tuple, domain_description,
lie_order, self.time_step, **kwargs)
if kwargs['smt_solver'] == 'dreal':
self.remainder_obj = HigherOrderEstimationDreal([self.polyflow.lie_sympy_list[order_remainder], ],
self.polyflow.symbol_tuple, order_remainder, self.time_step,
self.remainder_smt_tol)
# test
self.bernstein_object = BernsteinBound(self.polyflow.lie_sympy_list[order_remainder],
self.polyflow.symbol_tuple,
order_remainder, self.time_step)
self.pz_lifted, self.pz_projected, self.pz_poly_flow_observer, \
self.z_projected, self.flow_pipe = self._create_set_objects(self.dim_n, self.polyflow.max_monomial_order,
lie_order)
# make it a polyflow function?
self.init_alpha_base(self.polyflow.continuous_matrix_list)
def init_alpha_base(self, matrices):
"""
Creates a list of factors used to determine the bloating factors
Initializes the constant used for the bloating factor of the flowpipe.
This constant is only dependent on the eigenvalue of the operator
Parameters
----------
matrices
Continuous time matrix of the system
Returns
-------
list of floats
"""
self.alpha_base_list = [-1] * self.dim_n
for i in range(self.dim_n):
self.alpha_base_list[i] = get_alpha_prime(matrices[i], self.time_step)
return self.alpha_base_list
def define_dangerous_sets_interval(self, interval_list):
""" Defines the dangerous sets of the reachability problem """
for interval_i in interval_list:
self.dangerous_sets.append(pz.Zonotope.from_interval_list(interval_i))
return self.dangerous_sets
@timeit_measure
def simulate(self, x_0: pz.Zonotope, t=10.0):
"""
Solves the Reachability problem of PolyReach
Parameters
----------
x_0 : Zonotope
Initial set
t : float
End time of simulation
"""
time_measure_0 = time.time()
time_step_i = 0
| |
max_gflatlanes = max(max_gflatlanes, len(gflatlanes))
self.max_gflatlanes = max_gflatlanes
max_3dlanes = max(max_3dlanes, len(real_gt_3dlanes))
self.max_3dlanes = max_3dlanes
self.max_2dpoints = max(self.max_2dpoints, max([len(l) for l in lanes]))
self.max_gflatpoints = max(self.max_gflatpoints, max([len(l) for l in gflatlanes]))
self.max_3dpoints = max(self.max_3dpoints, max([len(l) for l in real_gt_3dlanes]))
self.X3d[1] = max(self.X3d[1], max([np.max(l[:, 0]) for l in real_gt_3dlanes]))
self.X3d[0] = min(self.X3d[0], min([np.min(l[:, 0]) for l in real_gt_3dlanes]))
self.Y3d[1] = max(self.Y3d[1], max([np.max(l[:, 1]) for l in real_gt_3dlanes]))
self.Y3d[0] = min(self.Y3d[0], min([np.min(l[:, 1]) for l in real_gt_3dlanes]))
self.Z3d[1] = max(self.Z3d[1], max([np.max(l[:, 2]) for l in real_gt_3dlanes]))
self.Z3d[0] = min(self.Z3d[0], min([np.min(l[:, 2]) for l in real_gt_3dlanes]))
self.Xgflat[1] = max(self.Xgflat[1], max([np.max(l[:, 0]) for l in gflatlanes]))
self.Xgflat[0] = min(self.Xgflat[0], min([np.min(l[:, 0]) for l in gflatlanes]))
self.Ygflat[1] = max(self.Ygflat[1], max([np.max(l[:, 1]) for l in gflatlanes]))
self.Ygflat[0] = min(self.Ygflat[0], min([np.min(l[:, 1]) for l in gflatlanes]))
self._old_annotations[image_id] = {
'path': image_path,
'gt_2dlanes': lanes,
'gt_3dlanes': real_gt_3dlanes,
'gt_gflatlanes': gflatlanes,
'aug': False,
'relative_path': info_dict['raw_file'],
'gt_camera_pitch': gt_cam_pitch,
'gt_camera_height': gt_cam_height,
'json_line': info_dict,
}
image_id += 1
def _transform_annotation(self, anno, img_wh=None):
if img_wh is None:
img_h = self._get_img_heigth(anno['path'])
img_w = self._get_img_width(anno['path'])
else:
img_w, img_h = img_wh
gt_2dlanes = anno['gt_2dlanes']
gt_gflatlanes = anno['gt_gflatlanes']
gt_3dlanes = anno['gt_3dlanes']
assert len(gt_2dlanes) == len(gt_gflatlanes)
assert len(gt_3dlanes) == len(gt_gflatlanes)
categories = anno['categories'] if 'categories' in anno else [1] * len(gt_2dlanes)
gt_2dlanes = zip(gt_2dlanes, categories)
# gt_2dlanes = filter(lambda x: len(x[0]) > 0, gt_2dlanes)
seq_len = 2+2*self.max_2dpoints
# gt_gflatlanes = filter(lambda x: len(x[0]) > 0, gt_gflatlanes)
lanes = np.ones((self.max_2dlanes, 1+seq_len+seq_len+self.max_2dpoints), dtype=np.float32) * -1e5
lanes[:, 0] = 0
laneflags = np.ones((self.max_2dlanes, self.max_2dpoints), dtype=np.float32) * -1e-5
# old_lanes = sorted(old_lanes, key=lambda x: x[0][0][0])
for lane_pos, (lane, category) in enumerate(gt_2dlanes):
lower, upper = lane[0][1], lane[-1][1]
xs = np.array([p[0] for p in lane]) / img_w
ys = np.array([p[1] for p in lane]) / img_h
lanes[lane_pos, 0] = category
lanes[lane_pos, 1] = lower / img_h
lanes[lane_pos, 2] = upper / img_h
lanes[lane_pos, 3:3 + len(xs)] = xs
lanes[lane_pos, (3 + self.max_2dpoints):(3 + self.max_2dpoints + len(ys))] = ys
laneflags[lane_pos, :len(xs)] = 1.
# gflatlane = self.make_lane_y_mono_inc(gt_gflatlanes[lane_pos])
# gflatlane = gt_gflatlanes[lane_pos]
# use real 3d gts
gflatlane = gt_3dlanes[lane_pos]
assert len(lane) == len(gflatlane)
lower, upper = gflatlane[0][1], gflatlane[-1][1]
gflat_Xs = np.array([p[0] for p in gflatlane]) / self.gflatXnorm
gflat_Ys = np.array([p[1] for p in gflatlane]) / self.gflatYnorm
lanes[lane_pos, 1+seq_len] = lower / self.gflatYnorm
lanes[lane_pos, 1+seq_len+1] = upper / self.gflatYnorm
lanes[lane_pos, (1+seq_len+2): (1+seq_len+2+len(gflat_Xs))] = gflat_Xs
lanes[lane_pos, (1+seq_len+2+self.max_gflatpoints): (1+seq_len+2+self.max_gflatpoints+len(gflat_Ys))] = gflat_Ys
gflat_Zs = np.array([p[2] for p in gflatlane]) / self.gflatZnorm
lanes[lane_pos, (1+seq_len+seq_len):(1+seq_len+seq_len+len(gflat_Zs))] = gflat_Zs
new_anno = {
'path': anno['path'],
'gt_2dgflatlabels': lanes,
'gt_2dgflatflags': laneflags,
'old_anno': anno,
'categories': [cat for _, cat in gt_2dlanes],
'gt_camera_pitch': anno['gt_camera_pitch'],
'gt_camera_height': anno['gt_camera_height'],
}
return new_anno
def _transform_annotations(self):
print('Now transforming annotations...')
self._annotations = {}
for image_id, old_anno in self._old_annotations.items():
self._annotations[image_id] = self._transform_annotation(old_anno)
def _load_eval_data(self):
self._extact_eval_data()
self._transform_eval_annotations()
def _extact_eval_data(self):
image_id = 0
self._old_annotations = {}
for anno_file in self.anno_files:
with open(anno_file, 'r') as anno_obj:
for line in anno_obj:
info_dict = json.loads(line)
# dict_keys(['raw_file', 'cam_height', 'cam_pitch',
# 'centerLines', 'laneLines', 'centerLines_visibility', 'laneLines_visibility'])
image_path = os.path.join(self.root, info_dict['raw_file'])
gt_cam_height = info_dict['cam_height']
gt_cam_pitch = info_dict['cam_pitch']
assert os.path.exists(image_path), '{:s} not exist'.format(image_path)
self._image_file.append(image_path)
self._image_ids.append(image_id)
self._old_annotations[image_id] = {
'path': image_path,
'aug': False,
'relative_path': info_dict['raw_file'],
'json_line': info_dict,
'gt_camera_pitch': gt_cam_pitch,
'gt_camera_height': gt_cam_height,
}
image_id += 1
def _load_predcam_data(self, result_path):
self._extact_predcam_data(result_path)
self._transform_eval_annotations()
def _extact_predcam_data(self, result_path):
image_id = 0
self._old_annotations = {}
with open(result_path, 'r') as anno_obj:
for line in anno_obj.readlines():
info_dict = json.loads(line)
image_path = os.path.join(self.root, info_dict['raw_file'])
cam_height = info_dict['pred_cam_height']
cam_pitch = info_dict['pred_cam_pitch']
assert os.path.exists(image_path), '{:s} not exist'.format(image_path)
self._image_file.append(image_path)
self._image_ids.append(image_id)
self._old_annotations[image_id] = {
'path': image_path,
'aug': False,
'relative_path': info_dict['raw_file'],
'json_line': info_dict,
'gt_camera_pitch': cam_pitch,
'gt_camera_height': cam_height,
}
image_id += 1
def _transform_eval_annotation(self, anno):
new_anno = {
'path': anno['path'],
'old_anno': anno,
'gt_camera_pitch': anno['gt_camera_pitch'],
'gt_camera_height': anno['gt_camera_height'],
}
return new_anno
def _transform_eval_annotations(self):
print('Now transforming EVALEVALEVAL annotations...')
self._annotations = {}
for image_id, old_anno in self._old_annotations.items():
self._annotations[image_id] = self._transform_eval_annotation(old_anno)
def detections(self, ind):
image_id = self._image_ids[ind]
item = self._annotations[image_id]
return item
def __len__(self):
return len(self._annotations)
def _to_float(self, x):
return float("{:.2f}".format(x))
def class_name(self, cid):
cat_id = self._classes[cid]
return cat_id
def _get_img_heigth(self, path):
return 1080
def _get_img_width(self, path):
return 1920
def __getitem__(self, idx, transform=False):
# I think this part is only used when testing
item = self._annotations[idx]
img = cv2.imread(item['path'])
gt_2dflatlabels = item['gt_2dgflatlabels']
gt_2dgflatflags = item['gt_2dgflatflags']
gt_camera_pitch = item['gt_camera_pitch']
gt_camera_height = item['gt_camera_height']
if transform:
raise NotImplementedError
return (img, gt_2dflatlabels, gt_2dgflatflags, gt_camera_pitch, gt_camera_height, idx)
def pred2lanes(self, path, pred, y_samples, camera_height):
ys = np.array(y_samples) / self.gflatYnorm
lanes = []
probs = []
for lane in pred:
if lane[1] == 0:
continue
# pred_height = lane[-2]
lane_xsys = lane[6:6+4]
lane_zsys = lane[10:10+4]
X_pred = np.polyval(lane_xsys, ys) * self.gflatXnorm
Z_pred = np.polyval(lane_zsys, ys) * self.gflatZnorm
valid_indices = (ys > lane[4]) & (ys < lane[5])
if np.sum(valid_indices) < 2:
continue
X_pred = X_pred[valid_indices]
Y_pred = ys[valid_indices] * self.gflatYnorm
Z_pred = Z_pred[valid_indices]
# X_pred, Y_pred = self.transform_lane_gflat2g(camera_height, X_pred, Y_pred, Z_pred)
lanes.append(np.stack([X_pred, Y_pred, Z_pred], axis=-1).tolist())
probs.append(float(lane[0]))
return lanes, probs
def pred2apollosimformat(self, idx, pred, runtime):
runtime *= 1000. # s to ms
old_anno = self._annotations[idx]['old_anno']
# path = old_anno['path']
relative_path = old_anno['relative_path']
json_line = old_anno['json_line']
gt_camera_height = old_anno['gt_camera_height']
# y_samples = self.anchor_y_steps
# y_samples = list((np.linspace(0, 1., num=100) * 200.))
y_samples = list((np.linspace(self.top_view_region[2, 1]/self.gflatYnorm, self.top_view_region[0, 1]/self.gflatYnorm, num=100) * self.gflatYnorm))
pred_lanes, prob_lanes = self.pred2lanes(relative_path, pred, y_samples, gt_camera_height)
json_line["laneLines"] = pred_lanes
json_line["laneLines_prob"] = prob_lanes
return json_line
def save_apollosim_predictions(self, predictions, runtimes, filename):
with open(filename, 'w') as jsonFile:
for idx in range(len(predictions)):
json_line = self.pred2apollosimformat(idx, predictions[idx], runtimes[idx])
json.dump(json_line, jsonFile)
jsonFile.write('\n')
def eval(self, exp_dir, predictions, runtimes, label=None, only_metrics=False):
# raise NotImplementedError
pred_filename = 'apollosim_{}_{}_predictions_{}.json'.format(self.dataset_name, self.split, label)
pred_filename = os.path.join(exp_dir, pred_filename)
self.save_apollosim_predictions(predictions, runtimes, pred_filename)
if self.metric == 'default':
evaluator = eval_3D_lane.LaneEval(self)
eval_stats_pr = evaluator.bench_one_submit_varying_probs(pred_filename, self.anno_files[0])
max_f_prob = eval_stats_pr['max_F_prob_th']
eval_stats = evaluator.bench_one_submit(pred_filename, self.anno_files[0], prob_th=max_f_prob)
print("Metrics: AP, F-score, x error (close), x error (far), z error (close), z error (far)")
print("Laneline:{:.3}, {:.3}, {:.3}, {:.3}, {:.3}, {:.3}".format(
eval_stats_pr['laneline_AP'], eval_stats[0], eval_stats[3], eval_stats[4], eval_stats[5], eval_stats[6]))
result = {
'AP': eval_stats_pr['laneline_AP'],
'F-score': eval_stats[0],
'x error (close)': eval_stats[3],
'x error (far)': eval_stats[4],
'z error (close)': eval_stats[5],
'z error (far)': eval_stats[6]
}
# print("Centerline:{:.3}, {:.3}, {:.3}, {:.3}, {:.3}, {:.3}".format(
# eval_stats_pr['centerline_AP'], eval_stats[7], eval_stats[10], eval_stats[11], eval_stats[12], eval_stats[13]))
elif self.metric == 'ours':
raise NotImplementedError
if not only_metrics:
filename = 'apollosim_{}_{}_eval_result_{}.json'.format(self.dataset_name, self.split, label)
with open(os.path.join(exp_dir, filename), 'w') as out_file:
json.dump(result, out_file)
return eval_stats
def draw_annotation(self, idx, pred=None, img=None, cls_pred=None):
if img is None:
# raise NotImplementedError
img, gt_2dflatlabels, gt_2dgflatflags, gt_camera_pitch, gt_camera_height, _ = \
self.__getitem__(idx, transform=False)
# Tensor to opencv image
img = img.permute(1, 2, 0).numpy()
# Unnormalize
if self.normalize:
img = img * np.array(IMAGENET_STD) + np.array(IMAGENET_MEAN)
img = (img * 255).astype(np.uint8)
else:
img = (img - np.min(img)) / (np.max(img) - np.min(img))
_, gt_2dflatlabels, gt_2dgflatflags, gt_camera_pitch, gt_camera_height, _ = \
self.__getitem__(idx, transform=False)
img = (img * 255).astype(np.uint8)
img_h, img_w, _ = img.shape
img_canvas = deepcopy(img)
K = self.K
aug_mat = np.identity(3, dtype=np.float)
H_g2im = self.homograpthy_g2im(gt_camera_pitch, gt_camera_height, K)
H_im2ipm = np.linalg.inv(np.matmul(self.H_crop_ipm, np.matmul(H_g2im, self.H_ipm2g)))
H_im2ipm = np.matmul(H_im2ipm, np.linalg.inv(aug_mat))
P_g2im = self.projection_g2im(gt_camera_pitch, gt_camera_height, self.K) # used for x=PX (3D to 2D)
# H_g2im = self.homograpthy_g2im(gt_cam_pitch, gt_cam_height, self.K)
H_im2g = np.linalg.inv(H_g2im)
P_g2gflat = np.matmul(H_im2g, P_g2im)
ipm_canvas = deepcopy(img)
im_ipm = cv2.warpPerspective(ipm_canvas / 255., H_im2ipm, (self.ipm_w, self.ipm_h))
im_ipm = np.clip(im_ipm, 0, 1)
ipm_laneline = im_ipm.copy()
H_g2ipm = np.linalg.inv(self.H_ipm2g)
for i, lane in enumerate(gt_2dflatlabels):
# lane = lane[3:] # remove conf, upper and lower positions
seq_len = len(lane - 5) // 8
xs = lane[3:3 + seq_len][gt_2dgflatflags[i] > 0]
ys = lane[3 + seq_len:3 + seq_len * 2][gt_2dgflatflags[i] > 0]
ys = ys[xs >= 0].astype(np.int)
xs = xs[xs >= 0].astype(np.int)
# for p in zip(xs, ys):
# p = (int(p[0] * img_w), int(p[1] * img_h))
# img_canvas = cv2.circle(img_canvas, p, 5, color=(0, 0, 255), thickness=-1)
for p in range(1, ys.shape[0]):
img_canvas = cv2.line(img_canvas, (xs[p - 1], ys[p - 1]), (xs[p], ys[p]), [0, 0, 1], 2)
gflatlane = lane[5 + seq_len * 5:]
gflatXs = gflatlane[:seq_len][gt_2dgflatflags[i] > 0] * self.gflatXnorm
gflatYs = gflatlane[seq_len:seq_len * 2][gt_2dgflatflags[i] > 0] * self.gflatYnorm
x_ipm, y_ipm = self.homographic_transformation(H_g2ipm, gflatXs, gflatYs)
x_ipm = x_ipm.astype(np.int)
y_ipm = y_ipm.astype(np.int)
for k in range(1, x_ipm.shape[0]):
ipm_laneline = cv2.line(ipm_laneline, (x_ipm[k - | |
"""python embed_fbanks.py pascal1k_crossnet_adadelta_emb_200.pickle"""
from __future__ import division
import cPickle as pickle
import sys
import os.path as path
import glob
import operator
import os
from collections import defaultdict
import joblib
from scipy.spatial.distance import pdist, cdist
from scipy.io import loadmat
import numpy as np
from sklearn.preprocessing import StandardScaler
import pandas as pd
from nnet_archs import CrossNet
from dataset_iterators import CrossLearnIterator
CACHE_DIR = os.getcwd()
# try:
# os.makedirs(CACHE_DIR)
# except OSError:
# pass
def tridist_idx(d, i, j, n):
if j > i:
return tridist_idx(d, j, i, n)
if j == i:
return 0.
else:
return d[n*j - j*(j+1)/2 + i - j - 1]
def load_images(img_mat_fname, corpus_df, normalize=True, force_rebuild=False):
_memo_fname = path.join(CACHE_DIR, 'IMAGES_' + path.splitext(path.basename(img_mat_fname))[0]) + '.pkl'
if not path.exists(_memo_fname) or force_rebuild:
used_pics = set(corpus_df['picture'])
m = loadmat(img_mat_fname)
fnames = []
X = np.empty((m['Img'].shape[0] * 20, 4096), dtype=np.float32)
idx_x = 0
for idx_m in xrange(m['Img'].shape[0]):
c = m['Img'][idx_m][0]
fname = path.splitext(path.basename(c['fname'][0][0][0]))[0]
if not fname in used_pics:
continue
codes = c['codes'][0][0][:, :-1]
for img_segment in codes:
fnames.append(fname)
X[idx_x] = img_segment
idx_x += 1
X = X[:idx_x, :]
assert(len(fnames) == X.shape[0])
with open(_memo_fname, 'wb') as fid:
pickle.dump((fnames, X), fid, -1)
else:
with open(_memo_fname, 'rb') as fid:
fnames, X = pickle.load(fid)
if normalize:
X = StandardScaler().fit_transform(X)
return fnames, X
def load_sounds(img_fnames, corpus_df, stackdir, normalize=True, force_rebuild=False):
_memo_fname = path.join(CACHE_DIR, 'SOUNDS_' + '-'.join(stackdir.split('/')[1:-1])) + '.pkl'
if not path.exists(_memo_fname) or force_rebuild:
images = set(img_fnames)
tokens = set()
for _, row in corpus_df.iterrows():
if not row['picture'] in images:
continue
for tokenlist in row['tokens']:
tokens |= set(tokenlist)
tokens = sorted(list(tokens))
nfeatures = np.load(path.join(stackdir, tokens[0] + '.npy')).shape[0]
X = np.empty((len(tokens), nfeatures), dtype=np.float32)
for i, token in enumerate(tokens):
if i % 500 == 0:
print ' loading:', i, token
X[i] = np.load(path.join(stackdir, token + '.npy'))
with open(_memo_fname, 'wb') as fid:
pickle.dump((tokens, X), fid, -1)
else:
with open(_memo_fname, 'rb') as fid:
tokens, X = pickle.load(fid)
if normalize:
X = StandardScaler().fit_transform(X)
return tokens, X
def calc_snd_dists(snd_X, stackdir, nnet_file, force_rebuild=False):
_memo_fname = path.join(CACHE_DIR,
'SOUND_DISTANCES_' +
'-'.join(stackdir.split('/')[1:-1]) +
'_' +
path.splitext(path.basename(nnet_file))[0] +
'.npy')
if not path.exists(_memo_fname) or force_rebuild:
dists = pdist(snd_X, 'cosine')
np.save(_memo_fname, dists)
else:
dists = np.load(_memo_fname)
return dists
def calc_img_dists(img_X, img_mat_fname, nnet_file, force_rebuild=False):
_memo_fname = path.join(CACHE_DIR, 'IMAGE_DISTANCES_' +
path.splitext(path.basename(img_mat_fname))[0] +
'_' +
path.splitext(path.basename(nnet_file))[0] +
'.npy')
if not path.exists(_memo_fname) or force_rebuild:
dists = pdist(img_X, 'cosine')
np.save(_memo_fname, dists)
else:
dists = np.load(_memo_fname)
return dists
def calc_multi_dists(img_X, snd_X, stackdir, img_mat_fname, nnet_file, force_rebuild=False):
_memo_fname = path.join(CACHE_DIR,
'MULTI_DISTANCES_' +
path.splitext(path.basename(img_mat_fname))[0] +
'_' +
'-'.join(stackdir.split('/')[1:-1]) +
'_' +
path.splitext(path.basename(nnet_file))[0] +
'.npy')
if not path.exists(_memo_fname) or force_rebuild:
dists = cdist(img_X, snd_X, 'cosine')
np.save(_memo_fname, dists)
else:
dists = np.load(_memo_fname)
return dists
def kneighbors(d, i, k, n):
"""Return the closest k neighbors to element at index 0<=i<n
according to the triangular distance vector d"""
return np.argsort(np.vectorize(lambda x: tridist_idx(d, i, x, n))(np.arange(n)))[1:k+1]
def word_similarity_for_image(query_snd, distances, n, snd_fnames, id2pic):
"""find the neighbors of word in the embedded space.
sort these and find rank of word that describes same image"""
neighbors = kneighbors(distances, query_snd, n-1, n)
query_id = snd_fnames[query_snd]
query_pic = id2pic[query_id]
rank = n
for i in xrange(neighbors.shape[0]):
if len(id2pic[snd_fnames[neighbors[i]]] & query_pic) > 0:
rank = i+1
break
return rank
def image_search_by_word_query(query_snd, multi_distances, snd_fnames, img_fnames, pic2id):
"""map a word query into the embedding space and find images in the same space.
return rank of first image that has the query in its tokenlist
multi_distances: n_images x n_sounds
"""
image_neighbors = np.argsort(multi_distances[:, query_snd])
query_id = snd_fnames[query_snd]
n_images, n_sounds = multi_distances.shape
rank = n_images
for i in xrange(image_neighbors.shape[0]):
if query_id in pic2id[img_fnames[image_neighbors[i]]]:
rank = i+1
break
return rank
def image_TOKEN_search_by_word_query_TOKEN(query_snd_ix, multi_distances,
snd_fnames, img_fnames,
id2pic):
"""map a word token query into the embedding space and find images in the same space
return rank of first neighbor whose TOKEN is in the picture list of the id"""
n_images, n_sounds = multi_distances.shape
query_id = snd_fnames[query_snd_ix]
img_neighbors = np.argsort(multi_distances[:, query_snd_ix])
pictures_for_query = id2pic[query_id]
rank = img_neighbors.shape[0]
for i in xrange(img_neighbors.shape[0]):
if img_fnames[img_neighbors[i]] in pictures_for_query:
rank = i + 1
break
return rank
def image_TOKEN_search_by_word_query_TYPE_at_k(k, word_TYPE, multi_distances,
snd_fnames, img_fnames,
word2id, id2pic):
"""map all word tokens corresponding to word_TYPE into the embedding space and take
the k nearest neighbors for each token. concatenate the neighbors. return rank of the
first neighbor whose token is in the concatenated picture lists"""
n_images, n_sounds = multi_distances.shape
word_tokens = word2id[word_TYPE]
word_tokens &= set(snd_fnames)
if len(word_tokens) == 0:
raise ValueError('not enough word tokens for {0}'.format(word_TYPE))
pictures_for_word = set()
for token in word_tokens:
pictures_for_word.update(id2pic[token])
id2ix = {v: ix for ix, v in enumerate(snd_fnames)}
image_token_ix = None
image_token_dists = None
for token in word_tokens:
# find k neighbors, keep their ix and distance
nn = multi_distances[:, id2ix[token]]
nn_ix = np.argsort(nn)[:k]
nn_dists = nn[nn_ix]
if image_token_ix is None:
image_token_ix = nn_ix
image_token_dists = nn_dists
else:
image_token_ix = np.hstack((image_token_ix, nn_ix))
image_token_dists = np.hstack((image_token_dists, nn_dists))
sort_ix = np.argsort(image_token_dists)
rank = sort_ix.shape[0]
for ix in sort_ix:
if img_fnames[image_token_ix[ix]] in pictures_for_word:
rank = ix + 1
break
return rank
def word_TOKEN_search_by_image_query_TOKEN(query_img_ix, multi_distances,
snd_fnames, img_fnames,
pic2id):
"""map an image query into the embedding space and find words in the same space.
return rank of first neighbor whose TOKEN is in the tokenlist of the image"""
n_images, n_sounds = multi_distances.shape
word_neighbors = np.argsort(multi_distances[query_img_ix, :])
query_pic = img_fnames[query_img_ix]
tokens_for_query = pic2id[query_pic]
rank = n_sounds
for i in xrange(word_neighbors.shape[0]):
if snd_fnames[word_neighbors[i]] in tokens_for_query:
rank = i + 1
break
return rank
def word_TYPE_search_by_image_query_TOKEN(query_img_ix, multi_distances,
snd_fnames, img_fnames,
pic2id, id2word):
"""map an image query into the embedding space and find words in the same space.
return rank of first neighbor whose TYPE is in the tokenlist of the image"""
n_images, n_sounds = multi_distances.shape
word_neighbors = np.argsort(multi_distances[query_img_ix, :])
query_pic = img_fnames[query_img_ix]
tokens_for_query = pic2id[query_pic]
words_for_query = set([id2word[x] for x in tokens_for_query])
rank = n_sounds
for i in xrange(word_neighbors.shape[0]):
if id2word[snd_fnames[word_neighbors[i]]] in words_for_query:
rank = i + 1
break
return rank
def mapping_dicts(lucid_tokens_df, corpus_df, lucid2pascal):
"""return id2word, mapping LUCID id's to words, and id2pic, mapping LUCID id's to
sets of picture names and pic2id, mapping picture names to tokens"""
id2word = dict(zip(lucid_tokens_df['id'], lucid_tokens_df['word']))
word2id = defaultdict(set)
for _, row in lucid_tokens_df.iterrows():
word2id[lucid2pascal.get(row['word'], row['word'])].add(row['id'])
word2id.default_factory = None
word2id = dict(word2id)
id2pic = defaultdict(set)
pic2id = defaultdict(set)
for _, row in corpus_df.iterrows():
picname = row['picture']
for token in reduce(operator.add, row['tokens']):
id2pic[token].add(picname)
pic2id[picname].add(token)
id2pic.default_factory = None
id2pic = dict(id2pic)
pic2id.default_factory = None
pic2id = dict(pic2id)
return id2word, word2id, id2pic, pic2id
def load_net(nnet_file):
if path.splitext(nnet_file)[1] == 'joblib':
nnet = joblib.load(nnet_file)
else:
with open(nnet_file, 'rb') as fid:
nnet = pickle.load(fid)
return nnet
if __name__ == '__main__':
datadir = '/mnt/data/pascal1k_prepared_dataset/'
stackdir = path.join(datadir, 'LUCID_stack_tokens_drop')
# if len(sys.argv) != 6:
# print 'usage: embed_eval.py NNET_FILE CORPUS_FILE TOKEN_FILE STACKTOKENSDIR REPLACEMENTFILE'
# exit()
print 'loading net...',
nnet_file = 'full_pascal1k_crossnet_adadelta_emb_200.pickle'
nnet = load_net(nnet_file)
transform_imgs = nnet.transform_img()
transform_snds = nnet.transform_snd()
transform_both = nnet.transform_img_snd()
print 'done.'
print 'loading corpus...',
corpus_file = path.join(datadir, 'corpus_drop.pkl')
corpus_df = pd.read_pickle(corpus_file)
print 'done.'
print 'loading tokens file...',
tokens_file = path.join(datadir, 'lucid_tokens_drop.pkl')
tokens_df = pd.read_pickle(tokens_file)
print 'done.'
print 'loading mappings...',
replacement_file = path.join(datadir, 'replacements.txt')
lucid2pascal = dict(x.strip().split('\t')[::-1]
for x in open(replacement_file))
pascal2lucid = dict(x.strip().split('\t')
for x in open(replacement_file))
id2word, word2id, id2pic, pic2id = mapping_dicts(tokens_df, corpus_df, lucid2pascal)
print 'done.'
print 'loading images...',
img_mat_fname = '/mnt/data/pascal1k_prepared_dataset/split_test_img.mat'
img_fnames, img_X = load_images(img_mat_fname, corpus_df, force_rebuild=True)
pic2ix = {v:k for k, v in enumerate(img_fnames)}
print 'done.'
print 'loading sounds...',
stack_tokens_dir = '/mnt/data/pascal1k_prepared_dataset/LUCID_stack_tokens_drop/'
snd_fnames, snd_X = load_sounds(img_fnames, corpus_df, stack_tokens_dir, force_rebuild=True)
sndID2ix = {v:k for k, v in enumerate(snd_fnames)}
print 'done.'
snd_fnames_set = set(snd_fnames)
id2pic_keys_set = set(id2pic.keys())
assert(all(f in id2pic_keys_set for f in snd_fnames_set))
print 'embedding images...',
img_X_emb = transform_imgs(img_X)
print 'done.'
print 'embedding sounds...',
snd_X_emb = transform_snds(snd_X)
print 'done.'
print 'calculating sound distances...',
snd_dists = calc_snd_dists(snd_X_emb, stack_tokens_dir, nnet_file, force_rebuild=True)
print 'done.'
print 'calculating image distances...',
img_dists = calc_img_dists(img_X_emb, img_mat_fname, nnet_file, force_rebuild=True)
print 'done.'
print 'calculating sound-image distances...',
multi_dists = calc_multi_dists(img_X_emb, snd_X_emb, stack_tokens_dir, img_mat_fname, nnet_file, force_rebuild=True)
print 'done.'
print
# SCORES
print 'SCORES:'
# 1. word similarity for image
word_similarity_ranks = np.array([word_similarity_for_image(i,
snd_dists,
snd_X.shape[0],
snd_fnames,
id2pic)
for i in xrange(len(snd_fnames))])
print 'word similarity for image (median, mean rank): {0:.3f} {1:.3f}'.format(
np.median(word_similarity_ranks), word_similarity_ranks.mean())
# 2. word TOKEN search by image TOKEN
word_token_image_token_ranks = np.array(
[word_TOKEN_search_by_image_query_TOKEN(i, multi_dists, snd_fnames, img_fnames, pic2id)
for i in xrange(len(img_fnames))])
print 'word TOKEN search by | |
<gh_stars>0
import asyncio
import io
import os
import re
from datetime import datetime, timedelta
import discord
import psycopg2
import requests
from PIL import Image
from dateutil.relativedelta import relativedelta
from discord.ext import commands
SQLpath = os.environ["DATABASE_URL"]
db = psycopg2.connect(SQLpath) # sqlに接続
cur = db.cursor() # なんか操作する時に使うやつ
auction_notice_ch_id = 727333695450775613
class AuctionDael(commands.Cog):
"""オークション、取引に関するcog"""
def is_admin(self, user):
kgx_guild = self.bot.get_guild(558125111081697300)
if user.guild != kgx_guild:
return False
admin_role = kgx_guild.get_role(558129132161073164)
dev_role = kgx_guild.get_role(558138575225356308)
return bool(set(user.roles) & {admin_role, dev_role})
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["bs"])
async def bidscore(self, ctx, pt: int): # カウントしてその数字に対応する役職を付与する
if ctx.channel.id not in (558265536430211083, 711682097928077322):
return # 落札申請所またはbot-commandのみ使用可能
channel = self.bot.get_channel(602197766218973185)
p = re.compile(r'^[0-9]+$')
if p.fullmatch(str(pt)):
cur.execute("SELECT bid_score FROM user_data where user_id = %s", (ctx.author.id,))
old_score, = cur.fetchone()
new_score = old_score + pt
cur.execute("UPDATE user_data SET bid_score = %s WHERE user_id = %s", (new_score, ctx.author.id))
db.commit()
if ctx.channel.id == 558265536430211083:
embed = discord.Embed(description=f'**{ctx.author.display_name}**の現在の落札ポイントは**{new_score}**です。',
color=0x9d9d9d)
embed.set_author(name=ctx.author, icon_url=ctx.author.avatar_url) # ユーザー名+ID,アバターをセット
await channel.send(embed=embed)
before, after = await ctx.bot.update_bidscore_role(ctx.author, new_score)
if before != after: # beforeとafterで違うランクだったら
if before is None:
before_name = "落札初心者"
else:
before_name = before.name
embed = discord.Embed(
description=f'**{ctx.author.display_name}**がランクアップ!``{before_name}⇒{after.name}``',
color=after.color)
embed.set_author(name=ctx.author, icon_url=ctx.author.avatar_url) # ユーザー名+ID,アバターをセット
await ctx.channel.send(embed=embed)
embed = discord.Embed(description=f'**{ctx.author.display_name}**に落札ポイントを付与しました。', color=0x9d9d9d)
embed.set_author(name=ctx.author, icon_url=ctx.author.avatar_url) # ユーザー名+ID,アバターをセット
await ctx.channel.send(embed=embed)
await asyncio.sleep(0.5)
# ランキングを出力する
await self.bot.update_bidscore_ranking()
@commands.command()
async def start(self, ctx):
def check(m):
"""ユーザーの次のメッセージを待つ"""
if m.author.bot:
return
return m.channel == ctx.channel and m.author == ctx.author
abs_datetime_pattern = re.compile(r'(\d{1,4})/(\d{1,2})/(\d{1,2})-(\d{1,2}):(\d{1,2})')
rel_datetime_pattern = re.compile(r'(?i)(?=.+)((\d+)(month|m))?((\d+(\.\d+)?)(week|w))?((\d+(\.\d+)?)(day|d))?((\d+(\.\d+)?)(hour|h))?((\d+(\.\d+)?)(minute|m))?')
def is_exist_date(year, month, day):
"""
存在しない月なら1、存在しない日なら2を返す
"""
if month in (1, 3, 5, 7, 8, 10, 12):
if not 1 <= day <= 31:
return 2
elif month in (4, 6, 9, 11):
if not 1 <= day <= 30:
return 2
elif month == 2:
if year % 400 == 0 or year % 4 == 0 and year % 100 != 0: # 閏年なら
if not 1 <= day <= 29:
return 2
else:
if not 1 <= day <= 28:
return 2
else:
return 1
return 0
# 2つ行ってる場合はreturn
user = ctx.author.id
if self.bot.get_user_auction_count(user) >= 2 and ctx.author.id != 2<PASSWORD>:
description = "貴方はすでに取引を2つ以上行っているためこれ以上取引を始められません。\n" \
"行っている取引が2つ未満になってから再度行ってください。"
await ctx.channel.send(embed=discord.Embed(description=description, color=0xf04747))
await ctx.channel.send("--------キリトリ線--------")
return
first_message_object = None
# オークション系
if self.bot.is_auction_category(ctx):
# 既にオークションが行われていたらreturn
if "☆" not in ctx.channel.name:
description = "このチャンネルでは既にオークションが行われています。\n☆がついているチャンネルでオークションを始めてください。"
await ctx.channel.send(embed=discord.Embed(description=description, color=0xf04747), delete_after=3)
await asyncio.sleep(3)
await ctx.message.delete()
return
# 単位の設定
if self.bot.is_siina_category(ctx):
unit = "椎名"
elif self.bot.is_gacha_category(ctx):
unit = "ガチャ券"
else:
embed = discord.Embed(description="何によるオークションですか?単位を入力してください。(ex.GTギフト券, がちゃりんご, エメラルド etc)",
color=0xffaf60)
first_message_object = await ctx.channel.send(embed=embed)
try:
input_unit = await self.bot.wait_for('message', check=check, timeout=600.0)
except asyncio.TimeoutError:
await ctx.send("10分間操作がなかったためキャンセルしました\n--------キリトリ線--------")
return
unit = input_unit.content
# ALLにおいて
if "all" in ctx.channel.name.lower() and unit in ("椎名", "椎名林檎", "ガチャ券"):
embed = discord.Embed(description="椎名、ガチャ券のオークションは専用のチャンネルで行ってください。",
color=0xffaf60)
await ctx.channel.send(embed=embed)
return
embed = discord.Embed(
description="出品するものを入力してください。",
color=0xffaf60)
if first_message_object is not None:
await ctx.channel.send(embed=embed)
else:
first_message_object = await ctx.channel.send(embed=embed)
try:
input_item = await self.bot.wait_for('message', check=check, timeout=600.0)
except asyncio.TimeoutError:
await ctx.send("10分間操作がなかったためキャンセルしました\n--------キリトリ線--------")
return
embed = discord.Embed(description="開始価格を入力してください。\n**※次のように入力してください。"
"【〇LC+△ST+□】 or 【〇ST+△】 or 【△】 ex.1lc+1st+1 or 1st+1 or 32**\n"
"終了したい場合は`cancel`と入力してください",
color=0xffaf60)
await ctx.channel.send(embed=embed)
while not self.bot.is_closed(): # 正しい入力が来るまでwhile
try:
user_start_price = await self.bot.wait_for('message', check=check, timeout=600.0)
except asyncio.TimeoutError:
await ctx.send("10分間操作がなかったためキャンセルしました\n--------キリトリ線--------")
return
start_price = self.bot.stack_check(user_start_price.content)
if user_start_price.content.lower() == "cancel":
await ctx.send("キャンセルしました\n--------キリトリ線--------")
return
if start_price is None:
await ctx.send("価格の形式が正しくありません\n**"
"※次のように入力してください。【〇LC+△ST+□】 or 【〇ST+△】 or 【△】 ex.1lc+1st+1 or 1st+1 or 32**")
elif start_price == 0:
await ctx.send("開始価格を0にすることはできません。入力しなおしてください。")
else: # 正しい入力ならbreak
break
embed = discord.Embed(description="即決価格を入力してください。\n**※次のように入力してください。"
"【〇LC+△ST+□】 or 【〇ST+△】 or 【△】 ex.1lc+1st+1 or 1st+1 or 32**\n"
"ない場合は`なし`とお書きください。\n"
"終了したい場合は`cancel`と入力してください",
color=0xffaf60)
await ctx.channel.send(embed=embed)
while not self.bot.is_closed():
try:
input_bin_price = await self.bot.wait_for('message', check=check, timeout=600.0)
except asyncio.TimeoutError:
await ctx.send("10分間操作がなかったためキャンセルしました\n--------キリトリ線--------")
return
if input_bin_price.content.lower() == "cancel":
await ctx.send("キャンセルしました\n--------キリトリ線--------")
return
if input_bin_price.content == "なし":
bin_price = "なし"
break
bin_price = self.bot.stack_check(input_bin_price.content)
if bin_price is None:
await ctx.send("価格の形式が正しくありません\n**"
"※次のように入力してください。【〇LC+△ST+□】 or 【〇ST+△】 or 【△】 ex.1lc+1st+1 or 1st+1 or 32**")
elif bin_price < start_price:
await ctx.send("即決価格が開始価格より低いです。入力しなおしてください。")
elif bin_price == start_price:
await ctx.send("即決価格が開始価格と等しいです。入力しなおしてください。\n価格が決まっているのであれば、取引チャンネルをお使いください。")
else:
break
embed = discord.Embed(
description="オークション終了日時を入力してください。\n**注意!**時間の書式に注意してください!\n\n"
f"例 {datetime.now().year}年5月14日の午後8時に終了したい場合:\n**{datetime.now().year}/05/14-20:00**と入力してください。\n\n"
"例 1カ月2週間3日4時間5分後に終了したい場合:\n**1M2w3d4h5m**と入力してください。\n\n"
"終了したい場合は**cancel**と入力してください",
color=0xffaf60)
await ctx.channel.send(embed=embed)
now = datetime.now() # 都度生成するとタイムラグが生じてしまうため、あらかじめ取得した値を使いまわす
while not self.bot.is_closed():
try:
input_end_time = await self.bot.wait_for('message', check=check, timeout=600.0)
except asyncio.TimeoutError:
await ctx.send("10分間操作がなかったためキャンセルしました\n--------キリトリ線--------")
return
if input_end_time.content.lower() == "cancel":
await ctx.send("キャンセルしました\n--------キリトリ線--------")
return
if abs_datetime_pattern.fullmatch(input_end_time.content): # 絶対時刻の書式の場合
year, month, day, hour, minute = map(int, abs_datetime_pattern.fullmatch(input_end_time.content).groups())
if not 2000 <= year <= 3000:
await ctx.send("年は2000~3000の範囲のみ指定可能です。入力しなおしてください。")
continue
if is_exist_date(year, month, day) == 1:
await ctx.send(f"{month}月は存在しません。入力しなおしてください。")
continue
if is_exist_date(year, month, day) == 2:
await ctx.send(f"{year}年{month}月に{day}日は存在しません。入力しなおしてください。")
continue
if (hour, minute) == (24, 00):
end_time = datetime(year, month, day) + timedelta(days=1)
elif hour not in range(24) or minute not in range(60):
await ctx.send(f"{hour}時{minute}分は存在しません。入力しなおしてください。")
continue
else:
end_time = datetime(year, month, day, hour, minute)
elif rel_datetime_pattern.fullmatch(input_end_time.content): # 相対時刻の書式の場合
"""
入力が"1m1.5w"の場合のマッチオブジェクトに対してMatch.groups()した場合は('1m', '1', 'm', '1.5w', '1.5', '.5', 'w',…)となるため、
(0-indexedで)6番目が単位、4番目が数値となる
ただし、monthの部分は小数を受け付けないため、別で処理をする
"""
groups = rel_datetime_pattern.fullmatch(input_end_time.content).groups()
week_u, day_u, hour_u, minute_u = groups[6::4] # 単位部分
week_n, day_n, hour_n, minute_n = groups[4::4] # 数値部分
month_u, month_n = groups[2], groups[1]
if month_u == "m" and not any((week_u, day_u, hour_u, minute_u)):
# month_uが"m"、かつweek~minuteが指定されていないとき ("1m"のような入力)
minute_u, minute_n = month_u, month_n # monthの内容をminuteに移動する
month_u = None # monthを指定されていないことにする
end_time = now
if month_u:
end_time += relativedelta(months=int(month_n))
# month以外の各単位について、単位部分がNoneでなければend_timeに加算
if week_u:
end_time += timedelta(weeks=float(week_n))
if day_u:
end_time += timedelta(days=float(day_n))
if hour_u:
end_time += timedelta(hours=float(hour_n))
if minute_u:
end_time += timedelta(minutes=float(minute_n))
year, month, day, hour, minute = end_time.year, end_time.month, end_time.day, end_time.hour, end_time.minute # 表示用に属性を展開しておく
else: # 正しくない入力の場合
await ctx.send("時間の書式が正しくありません\n\n"
f"例 {datetime.now().year}年5月14日の午後8時に終了したい場合:\n**{datetime.now().year}/05/14-20:00**と入力してください。\n\n"
"例 1カ月2週間3日4時間5分後に終了したい場合:\n**1M2w3d4h5m**と入力してください。\n\n"
"終了したい場合は**cancel**と入力してください")
continue
if end_time <= now:
await ctx.channel.send("終了時刻を現在時刻以前にすることはできません。入力しなおしてください。")
continue
elif end_time - now <= timedelta(hours=12):
await ctx.send("開催期間を12時間以下にすることはできません。入力しなおしてください。")
continue
elif end_time - now >= timedelta(weeks=8):
await ctx.channel.send("2ヶ月以上にわたるオークションはできません。入力しなおしてください。")
continue
break
end_time_sql = end_time.strftime('%Y/%m/%d-%H:%M')
end_time_text = f"{year}/{month:0>2}/{day:0>2}-{hour:0>2}:{minute:0>2}" # 24:00の場合はそのまま表示
embed = discord.Embed(
description="その他、即決特典などありましたらお書きください。\n長い場合、改行などをして**1回の送信**で書いてください。\n"
"何も無ければ「なし」で構いません。",
color=0xffaf60)
await ctx.channel.send(embed=embed)
try:
input_notice = await self.bot.wait_for('message', check=check, timeout=600.0)
except asyncio.TimeoutError:
await ctx.send("10分間操作がなかったためキャンセルしました\n--------キリトリ線--------")
return
display_start_price = f"{unit}{self.bot.stack_check_reverse(start_price)}"
# 即決価格なしなら単位は付与しない
if bin_price == "なし":
display_bin_price = "なし"
else:
display_bin_price = f"{unit}{self.bot.stack_check_reverse(bin_price)}"
embed = discord.Embed(title="これで始めます。よろしいですか?YES/NOで答えてください。(小文字でもOK。NOの場合初めからやり直してください。)",
color=0xffaf60)
embed.add_field(name="出品者", value=f'{ctx.author.display_name}', inline=True)
embed.add_field(name="出品物", value=f'{input_item.content}', inline=True)
embed.add_field(name="開始価格", value=f'{display_start_price}', inline=False)
embed.add_field(name="即決価格", value=f'{display_bin_price}', inline=False)
embed.add_field(name="終了日時", value=f'{end_time_text}', inline=True)
embed.add_field(name="特記事項", value=f'{input_notice.content}', inline=True)
await ctx.channel.send(embed=embed)
try:
input_confirm = await self.bot.wait_for('message', check=check, timeout=600.0)
except asyncio.TimeoutError:
await ctx.send("10分間操作がなかったためキャンセルしました\n--------キリトリ線--------")
return
if input_confirm.content.lower() in ("yes", "いぇs", "いぇs"):
await ctx.channel.purge(limit=3)
await asyncio.sleep(0.3)
embed = discord.Embed(title="オークション内容", color=0xffaf60)
embed.add_field(name="出品者", value=f'{ctx.author.display_name}', inline=True)
embed.add_field(name="出品物", value=f'{input_item.content}', inline=True)
embed.add_field(name="開始価格", value=f'{display_start_price}', inline=False)
embed.add_field(name="即決価格", value=f'{display_bin_price}', inline=False)
embed.add_field(name="終了日時", value=f'{end_time_text}', inline=True)
embed.add_field(name="特記事項", value=f'{input_notice.content}', inline=True)
await ctx.channel.send("<:siina:558251559394213888>オークションを開始します<:siina:558251559394213888>")
auction_embed = await ctx.channel.send(embed=embed)
await auction_embed.pin()
# SQLにデータ登録
cur.execute("UPDATE auction SET auction_owner_id = %s, embed_message_id = %s, auction_item = %s, "
"auction_start_price = %s, auction_bin_price = %s, auction_end_time = %s, "
"unit = %s, notice = %s WHERE ch_id = %s",
(ctx.author.id, auction_embed.id, input_item.content, str(start_price),
str(bin_price), end_time_sql, unit, input_notice.content, ctx.channel.id))
db.commit()
try:
await asyncio.wait_for(ctx.channel.edit(name=ctx.channel.name.split('☆')[0]), timeout=3.0)
except asyncio.TimeoutError:
pass
else:
await ctx.channel.purge(limit=2)
await ctx.channel.send("初めからやり直してください。\n--------キリトリ線--------")
# 通常取引について
elif self.bot.is_normal_category(ctx):
# 既に取引が行われていたらreturn
if "☆" not in ctx.channel.name:
description = "このチャンネルでは既に取引が行われています。\n☆がついているチャンネルで取引を始めてください。"
await ctx.channel.send(embed=discord.Embed(description=description, color=0xf04747))
await asyncio.sleep(3)
await ctx.channel.purge(limit=2)
return
# 単位の設定
if self.bot.is_siina_category(ctx):
unit = "椎名"
elif self.bot.is_gacha_category(ctx):
unit = "ガチャ券"
else:
embed = discord.Embed(description="何による取引ですか?単位を入力してください。(ex.GTギフト券, がちゃりんご, エメラルド etc)",
color=0xffaf60)
first_message_object = await ctx.channel.send(embed=embed)
try:
input_unit = await self.bot.wait_for('message', check=check, timeout=600.0)
except asyncio.TimeoutError:
await ctx.send("10分間操作がなかったためキャンセルしました\n--------キリトリ線--------")
return
unit = input_unit.content
# ALLにおいて
if "all" in ctx.channel.name.lower() and unit in ("椎名", "椎名林檎", "ガチャ券"):
await ctx.channel.purge(limit=2)
embed = discord.Embed(description="椎名、ガチャ券の取引は専用のチャンネルで行ってください。",
color=0xffaf60)
await ctx.channel.send(embed=embed)
await ctx.channel.send("--------キリトリ線--------")
return
embed = discord.Embed(
description="出品するものを入力してください。",
color=0xffaf60)
if first_message_object is not None:
await ctx.channel.send(embed=embed)
else:
first_message_object = await ctx.channel.send(embed=embed)
try:
input_item = await self.bot.wait_for('message', check=check, timeout=600.0)
except asyncio.TimeoutError:
await ctx.send("10分間操作がなかったためキャンセルしました\n--------キリトリ線--------")
return
embed = discord.Embed(description="希望価格を入力してください。\n**※次のように入力してください。"
"【〇LC+△ST+□】 or 【〇ST+△】 or 【△】 ex.1lc+1st+1 or 1st+1 or 32**\n"
"終了したい場合は`cancel`と入力してください",
color=0xffaf60)
await ctx.channel.send(embed=embed)
while not self.bot.is_closed():
try:
input_hope_price = await self.bot.wait_for('message', check=check, timeout=600.0)
except asyncio.TimeoutError:
await ctx.send("10分間操作がなかったためキャンセルしました\n--------キリトリ線--------")
return
if input_hope_price.content.lower() == "cancel":
await ctx.send("キャンセルしました\n--------キリトリ線--------")
return
hope_price = self.bot.stack_check(input_hope_price.content)
if hope_price is None:
await ctx.send("価格の形式が正しくありません\n**"
"※次のように入力してください。【〇LC+△ST+□】 or 【〇ST+△】 or 【△】 ex.1lc+1st+1 or 1st+1 or 32**")
elif hope_price == 0:
await ctx.send("希望価格を0にすることはできません。入力しなおしてください。")
else:
break
embed = discord.Embed(
description="取引終了日時を入力してください。\n**注意!**時間の書式に注意してください!\n\n"
f"例 5月14日の午後8時に終了したい場合:\n**{datetime.now().year}/05/14-20:00**と入力してください。\n\n"
"例 1カ月2週間3日4時間5分後に終了したい場合:\n**1M2w3d4h5m**と入力してください。\n\n"
"終了したい場合は**cancel**と入力してください",
color=0xffaf60)
await ctx.channel.send(embed=embed)
now = datetime.now() # 都度生成するとタイムラグが生じてしまうため、あらかじめ取得した値を使いまわす
while not self.bot.is_closed():
try:
input_end_time = await self.bot.wait_for('message', check=check, timeout=600.0)
except asyncio.TimeoutError:
await ctx.send("10分間操作がなかったためキャンセルしました\n--------キリトリ線--------")
return
if input_end_time.content.lower() == "cancel":
await ctx.send("キャンセルしました\n--------キリトリ線--------")
return
if abs_datetime_pattern.fullmatch(input_end_time.content): # 絶対時刻の書式の場合
year, month, day, hour, minute = map(int, abs_datetime_pattern.fullmatch(input_end_time.content).groups())
if not 2000 <= year <= 3000:
await ctx.send("年は2000~3000の範囲のみ指定可能です。入力しなおしてください。")
continue
if is_exist_date(year, month, day) == 1:
await ctx.send(f"{month}月は存在しません。入力しなおしてください。")
continue
if is_exist_date(year, month, day) == 2:
await ctx.send(f"{year}年{month}月に{day}日は存在しません。入力しなおしてください。")
continue
if (hour, minute) == (24, 00):
end_time = | |
"""Datemath Module"""
import logging
import random
import re
import string
from datetime import timedelta, datetime, date
from time import time
from elasticsearch.exceptions import NotFoundError
from curator_api.exceptions import ConfigurationError, FailedExecution
from curator_api.defaults.settings import date_regex
def get_date_regex(timestring):
"""
Return a regex string based on a provided strftime timestring.
:arg timestring: An strftime pattern
:rtype: str
"""
logger = logging.getLogger(__name__)
prev, curr, regex = '', '', ''
for curr in timestring:
if curr == '%':
pass
elif curr in date_regex() and prev == '%':
regex += r'\d{' + date_regex()[curr] + '}'
elif curr in ['.', '-']:
regex += "\\" + curr
else:
regex += curr
prev = curr
logger.debug("regex = {0}".format(regex))
return regex
def get_datetime(index_timestamp, timestring):
"""
Return the datetime extracted from the index name, which is the index
creation time.
:arg index_timestamp: The timestamp extracted from an index name
:arg timestring: An strftime pattern
:rtype: :py:class:`datetime.datetime`
"""
# Compensate for week of year by appending '%w' to the timestring
# and '1' (Monday) to index_timestamp
iso_week_number = False
if '%W' in timestring or '%U' in timestring or '%V' in timestring:
timestring += '%w'
index_timestamp += '1'
if '%V' in timestring and '%G' in timestring:
iso_week_number = True
# Fake as so we read Greg format instead. We will process it later
timestring = timestring.replace("%G", "%Y").replace("%V", "%W")
elif '%m' in timestring:
if not '%d' in timestring:
timestring += '%d'
index_timestamp += '1'
date = datetime.strptime(index_timestamp, timestring)
# Handle ISO time string
if iso_week_number:
date = _handle_iso_week_number(date, timestring, index_timestamp)
return date
def fix_epoch(epoch):
"""
Fix value of `epoch` to be epoch, which should be 10 or fewer digits long.
:arg epoch: An epoch timestamp, in epoch + milliseconds, or microsecond, or
even nanoseconds.
:rtype: int
"""
try:
# No decimals allowed
epoch = int(epoch)
except Exception:
raise ValueError('Invalid epoch received, unable to convert {0} to int'.format(epoch))
# If we're still using this script past January, 2038, we have bigger
# problems than my hacky math here...
if len(str(epoch)) <= 10:
return epoch
elif len(str(epoch)) > 10 and len(str(epoch)) <= 13:
return int(epoch/1000)
else:
orders_of_magnitude = len(str(epoch)) - 10
powers_of_ten = 10**orders_of_magnitude
epoch = int(epoch/powers_of_ten)
return epoch
def _handle_iso_week_number(date, timestring, index_timestamp):
date_iso = date.isocalendar()
iso_week_str = "{Y:04d}{W:02d}".format(Y=date_iso[0], W=date_iso[1])
greg_week_str = datetime.strftime(date, "%Y%W")
# Edge case 1: ISO week number is bigger than Greg week number.
# Ex: year 2014, all ISO week numbers were 1 more than in Greg.
# Edge case 2: 2010-01-01 in ISO: 2009.W53, in Greg: 2010.W00
# For Greg converting 2009.W53 gives 2010-01-04, converting back
# to same timestring gives: 2010.W01.
if (iso_week_str > greg_week_str or datetime.strftime(date, timestring) != index_timestamp):
# Remove one week in this case
date = date - timedelta(days=7)
return date
def datetime_to_epoch(mydate):
"""Convert a datetime object to epoch time"""
# I would have used `total_seconds`, but apparently that's new
# to Python 2.7+, and due to so many people still using
# RHEL/CentOS 6, I need this to support Python 2.6.
tdelta = (mydate - datetime(1970, 1, 1))
return tdelta.seconds + tdelta.days * 24 * 3600
class TimestringSearch(object):
"""
An object to allow repetitive search against a string, `searchme`, without
having to repeatedly recreate the regex.
:arg timestring: An strftime pattern
"""
def __init__(self, timestring):
regex = r'(?P<date>{0})'.format(get_date_regex(timestring))
self.pattern = re.compile(regex)
self.timestring = timestring
def get_epoch(self, searchme):
"""
Return the epoch timestamp extracted from the `timestring` appearing in
`searchme`.
:arg searchme: A string to be searched for a date pattern that matches
`timestring`
:rtype: int
"""
match = self.pattern.search(searchme)
if match:
if match.group("date"):
timestamp = match.group("date")
return datetime_to_epoch(
get_datetime(timestamp, self.timestring)
)
# # I would have used `total_seconds`, but apparently that's new
# # to Python 2.7+, and due to so many people still using
# # RHEL/CentOS 6, I need this to support Python 2.6.
# tdelta = (
# get_datetime(timestamp, self.timestring) -
# datetime(1970,1,1)
# )
# return tdelta.seconds + tdelta.days * 24 * 3600
def get_point_of_reference(unit, count, epoch=None):
"""
Get a point-of-reference timestamp in epoch + milliseconds by deriving
from a `unit` and a `count`, and an optional reference timestamp, `epoch`
:arg unit: One of ``seconds``, ``minutes``, ``hours``, ``days``, ``weeks``,
``months``, or ``years``.
:arg unit_count: The number of ``units``. ``unit_count`` * ``unit`` will
be calculated out to the relative number of seconds.
:arg epoch: An epoch timestamp used in conjunction with ``unit`` and
``unit_count`` to establish a point of reference for calculations.
:rtype: int
"""
if unit == 'seconds':
multiplier = 1
elif unit == 'minutes':
multiplier = 60
elif unit == 'hours':
multiplier = 3600
elif unit == 'days':
multiplier = 3600*24
elif unit == 'weeks':
multiplier = 3600*24*7
elif unit == 'months':
multiplier = 3600*24*30
elif unit == 'years':
multiplier = 3600*24*365
else:
raise ValueError('Invalid unit: {0}.'.format(unit))
# Use this moment as a reference point, if one is not provided.
if not epoch:
epoch = time()
epoch = fix_epoch(epoch)
return epoch - multiplier * count
def get_unit_count_from_name(index_name, pattern):
"""Extract a unit_count from an index_name"""
logger = logging.getLogger(__name__)
if pattern is None:
return None
match = pattern.search(index_name)
if match:
try:
return int(match.group(1))
except Exception as err:
logger.debug('Unable to convert value to integer: {0}'.format(err))
return None
else:
return None
def date_range(unit, range_from, range_to, epoch=None, week_starts_on='sunday'):
"""
Get the epoch start time and end time of a range of ``unit``s, reckoning the
start of the week (if that's the selected unit) based on ``week_starts_on``,
which can be either ``sunday`` or ``monday``.
:arg unit: One of ``hours``, ``days``, ``weeks``, ``months``, or ``years``.
:arg range_from: How many ``unit`` (s) in the past/future is the origin?
:arg range_to: How many ``unit`` (s) in the past/future is the end point?
:arg epoch: An epoch timestamp used to establish a point of reference for
calculations.
:arg week_starts_on: Either ``sunday`` or ``monday``. Default is ``sunday``
:rtype: tuple
"""
logger = logging.getLogger(__name__)
acceptable_units = ['hours', 'days', 'weeks', 'months', 'years']
if unit not in acceptable_units:
raise ConfigurationError(
'"unit" must be one of: {0}'.format(acceptable_units))
if not range_to >= range_from:
raise ConfigurationError(
'"range_to" must be greater than or equal to "range_from"')
if not epoch:
epoch = time()
epoch = fix_epoch(epoch)
raw_por = datetime.utcfromtimestamp(epoch)
logger.debug('Raw point of reference = {0}'.format(raw_por))
# Reverse the polarity, because -1 as last week makes sense when read by
# humans, but datetime timedelta math makes -1 in the future.
origin = range_from * -1
# These if statements help get the start date or start_delta
if unit == 'hours':
por = datetime(raw_por.year, raw_por.month, raw_por.day, raw_por.hour, 0, 0)
start_delta = timedelta(hours=origin)
if unit == 'days':
por = datetime(raw_por.year, raw_por.month, raw_por.day, 0, 0, 0)
start_delta = timedelta(days=origin)
if unit == 'weeks':
por = datetime(raw_por.year, raw_por.month, raw_por.day, 0, 0, 0)
sunday = False
if week_starts_on.lower() == 'sunday':
sunday = True
weekday = por.weekday()
# Compensate for ISO week starting on Monday by default
if sunday:
weekday += 1
logger.debug('Weekday = {0}'.format(weekday))
start_delta = timedelta(days=weekday, weeks=origin)
if unit == 'months':
por = datetime(raw_por.year, raw_por.month, 1, 0, 0, 0)
year = raw_por.year
month = raw_por.month
if origin > 0:
for _ in range(0, origin):
if month == 1:
year -= 1
month = 12
else:
month -= 1
else:
for _ in range(origin, 0):
if month == 12:
year += 1
month = 1
else:
month += 1
start_date = datetime(year, month, 1, 0, 0, 0)
if unit == 'years':
por = datetime(raw_por.year, 1, 1, 0, 0, 0)
start_date = datetime(raw_por.year - origin, 1, 1, 0, 0, 0)
if unit not in ['months', 'years']:
start_date = por - start_delta
# By this point, we know our start date and can convert it to epoch time
start_epoch = datetime_to_epoch(start_date)
logger.debug('Start ISO8601 = {0}'.format(
datetime.utcfromtimestamp(start_epoch).isoformat()))
# This is the number of units we need to consider.
count = (range_to - range_from) + 1
# We have to iterate to one more month, and then subtract a second to get
# the last day of the correct month
if unit == 'months':
month = start_date.month
year | |
#! /usr/bin/env python
# this import must comes first to make sure we use the non-display backend
import matplotlib
matplotlib.use('Agg')
import os
import sys
import argparse
import numpy as np
import settings
from optimize.gradient_optimizer import GradientOptimizer, FindParams
from caffevis.caffevis_helper import read_label_file, set_mean
from settings_misc import load_network
from caffe_misc import layer_name_to_top_name
LR_POLICY_CHOICES = ('constant', 'progress', 'progress01')
def get_parser():
parser = argparse.ArgumentParser(description='Script to find, with or without regularization, images that cause high or low activations of specific neurons in a network via numerical optimization. Settings are read from settings.py, overridden in settings_MODEL.py and settings_user.py, and may be further overridden on the command line.',
formatter_class=lambda prog: argparse.ArgumentDefaultsHelpFormatter(prog, width=100)
)
# Network and data options
parser.add_argument('--caffe-root', type = str, default = settings.caffevis_caffe_root,
help = 'Path to caffe root directory.')
parser.add_argument('--deploy-proto', type = str, default = settings.caffevis_deploy_prototxt,
help = 'Path to caffe network prototxt.')
parser.add_argument('--net-weights', type = str, default = settings.caffevis_network_weights,
help = 'Path to caffe network weights.')
parser.add_argument('--channel-swap-to-rgb', type = str, default = '(2,1,0)',
help = 'Permutation to apply to channels to change to RGB space for plotting. Hint: (0,1,2) if your network is trained for RGB, (2,1,0) if it is trained for BGR.')
parser.add_argument('--data-size', type = str, default = '(227,227)',
help = 'Size of network input.')
#### FindParams
# Where to start
parser.add_argument('--start-at', type = str, default = 'mean_plus_rand', choices = ('mean_plus_rand', 'randu', 'mean'),
help = 'How to generate x0, the initial point used in optimization.')
parser.add_argument('--rand-seed', type = int, default = settings.optimize_image_rand_seed,
help = 'Random seed used for generating the start-at image (use different seeds to generate different images).')
parser.add_argument('--batch-size', type=int, default=settings.optimize_image_batch_size,
help = 'Batch size used for generating several images, each index will be used as random seed')
# What to optimize
parser.add_argument('--push-layers', nargs='*', default = settings.layers_to_output_in_offline_scripts,
help = 'Name of layers that contains the desired neuron whose value is optimized.')
parser.add_argument('--push-channel', type = int, default = None,
help = 'Channel number for desired neuron whose value is optimized (channel for conv, neuron index for FC).')
parser.add_argument('--push-spatial', type = str, default = 'None',
help = 'Which spatial location to push for conv layers. For FC layers, set this to None. For conv layers, set it to a tuple, e.g. when using `--push-layer conv5` on AlexNet, --push-spatial (6,6) will maximize the center unit of the 13x13 spatial grid.')
parser.add_argument('--push-dir', type = float, default = 1,
help = 'Which direction to push the activation of the selected neuron, that is, the value used to begin backprop. For example, use 1 to maximize the selected neuron activation and -1 to minimize it.')
# Use regularization?
parser.add_argument('--decay', type = float, default = settings.optimize_image_decay,
help = 'Amount of L2 decay to use.')
parser.add_argument('--blur-radius', type = float, default = settings.optimize_image_blur_radius,
help = 'Radius in pixels of blur to apply after each BLUR_EVERY steps. If 0, perform no blurring. Blur sizes between 0 and 0.3 work poorly.')
parser.add_argument('--blur-every', type = int, default = settings.optimize_image_blue_every,
help = 'Blur every BLUR_EVERY steps. If 0, perform no blurring.')
parser.add_argument('--small-val-percentile', type = float, default = 0,
help = 'Induce sparsity by setting pixels with absolute value under SMALL_VAL_PERCENTILE percentile to 0. Not discussed in paper. 0 to disable.')
parser.add_argument('--small-norm-percentile', type = float, default = 0,
help = 'Induce sparsity by setting pixels with norm under SMALL_NORM_PERCENTILE percentile to 0. \\theta_{n_pct} from the paper. 0 to disable.')
parser.add_argument('--px-benefit-percentile', type = float, default = 0,
help = 'Induce sparsity by setting pixels with contribution under PX_BENEFIT_PERCENTILE percentile to 0. Mentioned briefly in paper but not used. 0 to disable.')
parser.add_argument('--px-abs-benefit-percentile', type = float, default = 0,
help = 'Induce sparsity by setting pixels with contribution under PX_BENEFIT_PERCENTILE percentile to 0. \\theta_{c_pct} from the paper. 0 to disable.')
# How much to optimize
parser.add_argument('--lr-policy', type = str, default = settings.optimize_image_lr_policy, choices = LR_POLICY_CHOICES,
help = 'Learning rate policy. See description in lr-params.')
parser.add_argument('--lr-params', type = str, default = settings.optimize_image_lr_params,
help = 'Learning rate params, specified as a string that evalutes to a Python dict. Params that must be provided dependon which lr-policy is selected. The "constant" policy requires the "lr" key and uses the constant given learning rate. The "progress" policy requires the "max_lr" and "desired_prog" keys and scales the learning rate such that the objective function will change by an amount equal to DESIRED_PROG under a linear objective assumption, except the LR is limited to MAX_LR. The "progress01" policy requires the "max_lr", "early_prog", and "late_prog_mult" keys and is tuned for optimizing neurons with outputs in the [0,1] range, e.g. neurons on a softmax layer. Under this policy optimization slows down as the output approaches 1 (see code for details).')
# parser.add_argument('--max-iters', type = list, default = settings.optimize_image_max_iters,
# help = 'List of number of iterations of the optimization loop.')
parser.add_argument('--max-iter', type = int, default = 1000, help = 'number of iterations of the optimization loop.')
# Where to save results
parser.add_argument('--output-prefix', type = str, default = settings.optimize_image_output_prefix,
help = 'Output path and filename prefix (default: outputs/%(p.push_layer)s/unit_%(p.push_channel)04d/opt_%(r.batch_index)03d)')
parser.add_argument('--brave', action = 'store_true', default=True, help = 'Allow overwriting existing results files. Default: off, i.e. cowardly refuse to overwrite existing files.')
parser.add_argument('--skipbig', action = 'store_true', default=True, help = 'Skip outputting large *info_big.pkl files (contains pickled version of x0, last x, best x, first x that attained max on the specified layer.')
parser.add_argument('--skipsmall', action = 'store_true', default=True, help = 'Skip outputting small *info.pkl files (contains pickled version of..')
return parser
def parse_and_validate_lr_params(parser, lr_policy, lr_params):
assert lr_policy in LR_POLICY_CHOICES
try:
lr_params = eval(lr_params)
except (SyntaxError,NameError) as _:
err = 'Tried to parse the following lr_params value\n%s\nas a Python expression, but it failed. lr_params should evaluate to a valid Python dict.' % lr_params
parser.error(err)
if lr_policy == 'constant':
if not 'lr' in lr_params:
parser.error('Expected lr_params to be dict with at least "lr" key, but dict is %s' % repr(lr_params))
elif lr_policy == 'progress':
if not ('max_lr' in lr_params and 'desired_prog' in lr_params):
parser.error('Expected lr_params to be dict with at least "max_lr" and "desired_prog" keys, but dict is %s' % repr(lr_params))
elif lr_policy == 'progress01':
if not ('max_lr' in lr_params and 'early_prog' in lr_params and 'late_prog_mult' in lr_params):
parser.error('Expected lr_params to be dict with at least "max_lr", "early_prog", and "late_prog_mult" keys, but dict is %s' % repr(lr_params))
return lr_params
def parse_and_validate_push_spatial(parser, push_spatial):
'''Returns tuple of length 2.'''
try:
push_spatial = eval(push_spatial)
except (SyntaxError,NameError) as _:
err = 'Tried to parse the following push_spatial value\n%s\nas a Python expression, but it failed. push_spatial should be a valid Python expression.' % push_spatial
parser.error(err)
if push_spatial == None:
push_spatial = (0,0) # Convert to tuple format
elif isinstance(push_spatial, tuple) and len(push_spatial) == 2:
pass
else:
err = 'push_spatial should be None or a valid tuple of indices of length 2, but it is: %s' % push_spatial
parser.error(err)
return push_spatial
def main():
parser = get_parser()
args = parser.parse_args()
# Finish parsing args
lr_params = parse_and_validate_lr_params(parser, args.lr_policy, args.lr_params)
push_spatial = parse_and_validate_push_spatial(parser, args.push_spatial)
settings.caffevis_deploy_prototxt = args.deploy_proto
settings.caffevis_network_weights = args.net_weights
net, data_mean = load_network(settings)
# validate batch size
if settings.is_siamese and settings.siamese_network_format == 'siamese_batch_pair':
# currently, no batch support for siamese_batch_pair networks
# it can be added by simply handle the batch indexes properly, but it should be thoroughly tested
assert (settings.max_tracker_batch_size == 1)
current_data_shape = net.blobs['data'].shape
net.blobs['data'].reshape(args.batch_size, current_data_shape[1], current_data_shape[2], current_data_shape[3])
net.reshape()
labels = None
if settings.caffevis_labels:
labels = read_label_file(settings.caffevis_labels)
if data_mean is not None:
if len(data_mean.shape) == 3:
batched_data_mean = np.repeat(data_mean[np.newaxis, :, :, :], args.batch_size, axis=0)
elif len(data_mean.shape) == 1:
data_mean = data_mean[np.newaxis,:,np.newaxis,np.newaxis]
batched_data_mean = np.tile(data_mean, (args.batch_size,1,current_data_shape[2],current_data_shape[3]))
else:
batched_data_mean = data_mean
optimizer = GradientOptimizer(settings, net, batched_data_mean, labels = labels,
label_layers = settings.caffevis_label_layers,
channel_swap_to_rgb = settings.caffe_net_channel_swap)
if not args.push_layers:
print "ERROR: No layers to work on, please set layers_to_output_in_offline_scripts to list of layers"
return
# go over push layers
for count, push_layer in enumerate(args.push_layers):
top_name = layer_name_to_top_name(net, push_layer)
blob = net.blobs[top_name].data
is_spatial = (len(blob.shape) == 4)
channels = blob.shape[1]
# get layer definition
layer_def = settings._layer_name_to_record[push_layer]
if is_spatial:
push_spatial = (layer_def.filter[0] / 2, layer_def.filter[1] / 2)
| |
import datetime
import factory
from src.models import AlleleGeninteraction, Alleledbentity, DBSession, Functionalcomplementannotation, Genomerelease, Source, Colleague, ColleagueUrl, ColleagueRelation, ColleagueKeyword, Complexdbentity, Complexbindingannotation, Keyword, Dbuser, Dbentity, Edam, \
Referencedbentity, Journal, Book, FileKeyword, Filedbentity, FilePath, Referencedocument, Chebi, ChebiUrl, Phenotypeannotation, \
PhenotypeannotationCond, Locusdbentity, Locussummary, Taxonomy, Phenotype, Apo, Allele, Reporter, Obi, Reservedname, Straindbentity, StrainUrl, \
Strainsummary, StrainsummaryReference, Dataset, DatasetReference, DatasetKeyword, Referencetype, ReferenceRelation, ReferenceUrl, Referenceauthor, \
Physinteractionannotation, Geninteractionannotation, Goannotation, Regulationannotation, Literatureannotation, Contig, EcoAlias, EcoUrl, Goextension, \
Gosupportingevidence, Eco, Ro, Go, GoRelation, GoUrl, GoAlias, ApoRelation, Referencetriage, Proteinsequenceannotation, ProteinsequenceDetail, \
Goslimannotation, Interactor, Goslim, Expressionannotation, Datasetsample, DatasetUrl, DatasetFile, ReferenceAlias, Dnasequenceannotation, Dnasubsequence,\
So, ContigUrl, LocusAlias, LocusAliasReferences, LocusReferences, LocussummaryReference, LocusUrl, Posttranslationannotation,\
Psimod, Proteinexptannotation, Proteindomainannotation, Proteindomain, ProteindomainUrl, Ec, EcAlias, EcUrl, LocusRelation, LocusRelationReference, \
Locusnote, LocusnoteReference, Pathwayannotation, Pathwaydbentity, PathwayUrl, Bindingmotifannotation, Disease, Diseaseannotation, \
Proteinabundanceannotation, ChebiAlia, ReferenceFile, ComplexAlias, ComplexGo, ComplexReference, Colleaguetriage, CurationReference, \
CuratorActivity, Efo, DiseaseAlias, Diseasesupportingevidence, DiseaseRelation, DiseaseUrl
class SourceFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Source
sqlalchemy_session = DBSession
source_id = 1
format_name = "Addgene"
display_name = "Addgene"
bud_id = 1035
description = "Plasmid Repository"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class EcoFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Eco
sqlalchemy_session = DBSession
eco_id = 1
format_name = "format name"
display_name = "display name"
obj_url = "obj url"
source_id = 1
ecoid = 1
description = "description"
is_obsolete = '0'
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class EcoAliasFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = EcoAlias
sqlalchemy_session = DBSession
alias_id = 1
display_name = "eco alias display name"
source_id = 1
eco_id = 1
alias_type = "eco alias type"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class EcoUrlFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = EcoUrl
sqlalchemy_session = DBSession
url_id = 1
display_name = "eco url display name"
obj_url = "obj url"
source_id = 1
eco_id = 1
url_type = "url type"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class ColleagueFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Colleague
sqlalchemy_session = DBSession
colleague_id = 113698
format_name = factory.Sequence(lambda n: 'Jimmy_{0}'.format(n))
display_name = "<NAME>"
obj_url = "/colleague/Jimmy_Page_LZ"
source_id = 1
bud_id = 549
orcid = None
last_name = "Page"
first_name = "Jimmy"
suffix = None
other_last_name = None
profession = "Yeast Geneticist/Molecular biologist"
job_title = "Graduate Student"
institution = "Stanford Universty"
address1 = "Genome Research Center"
address2 = None
address3 = None
city = "Palo Alto"
state = "CA"
country = "USA"
postal_code = "94015"
work_phone = "444-444-4444"
other_phone = None
email = "<EMAIL>"
research_interest = "mRNA decay, translation, mRNA decay"
is_pi = False
is_contact = True
display_email = True
is_beta_tester = True
date_last_modified = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class ComplexdbentityFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Complexdbentity
sqlalchemy_session = DBSession
dbentity_id = 1
intact_id = 1
systematic_name = "lorem"
eco_id = 1
description = "lorem"
properties = "lorem"
complex_accession = "lorem"
subclass = "COMPLEX"
display_name = "complex1"
format_name = "complex2"
obj_url = "http://example.org/entity"
source_id = 1
sgdid = "S0000099"
dbentity_status = "Active"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
source = factory.SubFactory(SourceFactory)
eco = factory.SubFactory(EcoFactory)
class ComplexbindingannotationFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Complexbindingannotation
sqlalchemy_session = DBSession
annotation_id = 1
complex_id = 1
interactor_id = 1
binding_interactor_id = 1
source_id = 1
reference_id = 1
taxonomy_id = 1
binding_type_id = 1
range_start = 1
range_end = 1
stoichiometry = 1
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
complex = factory.SubFactory(ComplexdbentityFactory)
class KeywordFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Keyword
sqlalchemy_session = DBSession
keyword_id = 1
format_name = factory.Sequence(lambda n: 'protein_{0}'.format(n))
#display_name = factory.Sequence(lambda n: 'protein traffcking {0}'.format(n))
display_name = "protein trafficking 7"
obj_url = "/keyword/protein_trafficking,_localization_and_degradation"
source_id = 1
description = "my description"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class DatasetKeywordFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = DatasetKeyword
sqlalchemy_session = DBSession
dataset_keyword_id = 1
keyword_id = 1
dataset_id = 1
source_id = 1
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
keyword = factory.SubFactory(KeywordFactory)
class DatasetFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Dataset
sqlalchemy_session = DBSession
dataset_id = 1
format_name = "GSE10018"
display_name = "Artemisinic Acid Production Stress in Yeast"
obj_url = "/dataset/Artemisinic_Acid_Production_Stress_in_Yeast"
source_id = 1
dbxref_id = 1
dbxref_type = "GEO"
date_public = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
parent_dataset_id = 1
channel_count = 2
sample_count = 10
is_in_spell = False
is_in_browser = True
description = "blah"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class DbuserFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Dbuser
sqlalchemy_session = DBSession
dbuser_id = 1
username = "mr_curator"
first_name = "Curator"
last_name = "X"
status = "Current"
email = "<EMAIL>"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
is_curator = False
class DbentityFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Dbentity
sqlalchemy_session = DBSession
dbentity_id = 1
format_name = "format name"
display_name = "display name"
obj_url = "/reference/S000185012"
source_id = 1
bud_id = 1
sgdid = 1
subclass = "REFERENCE"
dbentity_status = "Active"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class ColleagueUrlFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = ColleagueUrl
sqlalchemy_session = DBSession
url_id = factory.Sequence(lambda n: n)
display_name = "Lab"
obj_url = factory.Sequence(lambda n: 'http://example.org/{0}'.format(n))
source_id = 1
bud_id = 1
colleague_id = 113698
url_type = "Research summary"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class ColleagueRelationFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = ColleagueRelation
sqlalchemy_session = DBSession
colleague_relation_id = factory.Sequence(lambda n: n)
source_id = 1
bud_id = 1
colleague_id = 113698
associate_id = 113699
association_type = "Lab member"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class ColleagueKeywordFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = ColleagueKeyword
sqlalchemy_session = DBSession
colleague_keyword_id = factory.Sequence(lambda n: n)
colleague_id = 113698
keyword_id = 1
source_id = 1
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class EdamFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Edam
sqlalchemy_session = DBSession
edam_id = 1
format_name = "format_name"
display_name = "display_name"
obj_url = "/url"
source_id = 1
edamid = factory.Sequence(lambda n: 'protein_{0}'.format(n))
edam_namespace = "data"
description = "This is my description"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class ReferencedbentityFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Referencedbentity
sqlalchemy_session = DBSession
dbentity_id = 1
method_obtained = "Curator triage"
publication_status = "Published"
fulltext_status = "Y"
citation = 1
year = 2016
pmid = 1
pmcid = factory.Sequence(lambda n: 'pmcid_{0}'.format(n))
date_published = "03/15/2016"
date_revised = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
issue = "1"
page = "10"
volume = "1"
title = "Nice title"
doi = "dodoi"
journal_id = 1
book_id = 1
subclass = "REFERENCE"
format_name = "format name"
display_name = "My entity"
obj_url = "http://example.org/entity"
source_id = 1
bud_id = None
sgdid = "S000001"
dbentity_status = "Active"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class DatasetReferenceFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = DatasetReference
sqlalchemy_session = DBSession
dataset_reference_id = 1
reference_id = 1
dataset_id = 1
source_id = 1
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
reference = factory.SubFactory(ReferencedbentityFactory)
class ReferenceUrlFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = ReferenceUrl
sqlalchemy_session = DBSession
url_id = 1
display_name = "ref url"
obj_url = "obj url"
source_id = 1
bud_id = 1
reference_id = 1
url_type = "url type"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class JournalFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Journal
sqlalchemy_session = DBSession
journal_id = 1
format_name = "format_name"
display_name = "My Journal"
obj_url = "http://example.org/journal"
source_id = 1
bud_id = None
#med_abbr = factory.Sequence(lambda n: 'med_{0}'.format(n))
med_abbr = "med_10"
title = factory.Sequence(lambda n: 'Title {0}'.format(n))
issn_print = "123"
issn_electronic = "213"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class BookFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Book
sqlalchemy_session = DBSession
book_id = 1
format_name = "format_name"
display_name = "My book"
obj_url = "http://example.org/book"
source_id = 1
bud_id = None
title = factory.Sequence(lambda n: 'Title {0}'.format(n))
volume_title = factory.Sequence(lambda n: 'Volume {0}'.format(n))
isbn = "1234"
total_pages = 1
publisher = "Publisher A"
date_created = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
created_by = "TOTO"
class FiledbentityFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Filedbentity
sqlalchemy_session = DBSession
dbentity_id = 1
md5sum = "12345"
previous_file_name = "filename"
topic_id = 1
format_id = 1
file_date = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
is_public = True
is_in_spell = True
is_in_browser = True
file_extension = "txt"
data_id = 1
s3_url = "http://example.org/s3"
readme_file_id = 1
format_name = factory.Sequence(lambda n: 'format_{0}'.format(n))
display_name = "My entity"
obj_url = "http://example.org/entity"
source_id = 1
bud_id = None
sgdid = "S000001"
dbentity_status = "Active"
subclass = "FILE"
year = 1990
date_created = factory.LazyAttribute(lambda o: | |
any missing information (such as
``'scf_xc_energy'`` in MP2 calculations) is not an issue.
Parameters
----------
outfile : :obj:`io.TextIOWrapper`
Buffered text stream of the output file.
line : :obj:`str`
Parsed line from ``outfile``.
"""
while 'SCF CONVERGENCE' != line.strip():
# Nuclear Repulsion : 135.87324654 Eh 3697.29901 eV
if 'Nuclear Repulsion :' in line:
if 'nuclear_repulsion_energy' not in self.data['properties'].keys():
self.data['properties']['nuclear_repulsion_energy'] = []
self.data['properties']['nuclear_repulsion_energy'].append(
float(line.split()[3])
)
# One Electron Energy: -674.26034691 Eh -18347.55681 eV
if 'One Electron Energy:' in line:
if 'scf_one_electron_energy' not in self.data['properties'].keys():
self.data['properties']['scf_one_electron_energy'] = []
self.data['properties']['scf_one_electron_energy'].append(
float(line.split()[3])
)
# Two Electron Energy: 245.90403408 Eh 6691.38895 eV
if 'Two Electron Energy:' in line:
if 'scf_two_electron_energy' not in self.data['properties'].keys():
self.data['properties']['scf_two_electron_energy'] = []
self.data['properties']['scf_two_electron_energy'].append(
float(line.split()[3])
)
# E(XC) : -26.170411238000 Eh
if 'E(XC) ' in line:
if 'scf_xc_energy' not in self.data['properties'].keys():
self.data['properties']['scf_xc_energy'] = []
self.data['properties']['scf_xc_energy'].append(
float(line.split()[2])
)
line = next(outfile)
def _extract_uhf_spin_contamination(self, outfile, line):
"""Spin contamination from unrestricted Hartree-Fock calculations.
Parameters
----------
outfile : :obj:`io.TextIOWrapper`
Buffered text stream of the output file.
line : :obj:`str`
Parsed line from ``outfile``.
"""
for _ in range(0, 3):
line = next(outfile)
if line.strip() == 'Warning: in a DFT calculation there is little theoretical justification to':
for _ in range(0, 4):
line = next(outfile)
if 'uhf_ideal_average_total_spin_squared' not in self.data['properties'].keys():
self.data['properties']['uhf_ideal_average_total_spin_squared'] = []
if 'uhf_calculated_average_total_spin_squared' not in self.data['properties'].keys():
self.data['properties']['uhf_calculated_average_total_spin_squared'] = []
# Expectation value of <S**2> : 0.750016
self.data['properties']['uhf_calculated_average_total_spin_squared'].append(
float(line.split()[5])
)
line = next(outfile)
# Ideal value S*(S+1) for S=0.5 : 0.750000
self.data['properties']['uhf_ideal_average_total_spin_squared'].append(
float(line.split()[6])
)
def _extract_mp_properties(self, outfile, line):
"""Moller-Plesset calculation properties.
This is called directly after the ``'ORCA MP2 '`` trigger, and
will terminate once the ``'ORCA property calculations'`` trigger is reached.
Instead of returning the energies themselves, we handle the creation and
modification of ``mp_info`` here so any missing information is not an
issue.
Parameters
----------
outfile : :obj:`io.TextIOWrapper`
Buffered text stream of the output file.
line : :obj:`str`
Parsed line from ``outfile``.
"""
while '- ORCA property calculations *' != line.strip():
# Freezing NCore=10 chemical core electrons
if 'Freezing NCore=' == line.strip()[:15]:
if 'frozen_electrons' not in self.data['keywords'].keys():
self.data['keywords']['frozen_electrons'] = []
frozen_electrons = int(line.split()[1][6:])
self.data['keywords']['frozen_electrons'].append(frozen_electrons)
# MP2 CORRELATION ENERGY : -3.132364939 Eh
if 'MP2 CORRELATION ENERGY' in line:
if 'mp2_correlation_energy' not in self.data['properties'].keys():
self.data['properties']['mp2_correlation_energy'] = []
if 'RI-MP2' in line:
index = 3
else:
index = 4
self.data['properties']['mp2_correlation_energy'].append(
float(line.split()[index])
)
break
line = next(outfile)
def _extract_cc_method(self, outfile, line):
"""Coupled cluster method information.
Parameters
----------
outfile : :obj:`io.TextIOWrapper`
Buffered text stream of the output file.
line : :obj:`str`
Parsed line from ``outfile``.
"""
while 'Number of correlated electrons' != line.strip()[:30]:
# Frozen core treatment ... chemical core (0 el)
# or
# Frozen core treatment ... NO frozen core
if 'Frozen core treatment' == line.strip()[:21]:
if 'NO frozen core' == line.strip()[-14:]:
self.data['keywords']['frozen_core'] = False
else:
frozen_electrons = int(line.split()[6][1:])
if frozen_electrons == 0:
self.data['keywords']['frozen_core'] = False
else:
self.data['keywords']['frozen_core'] = True
if 'frozen_electrons' not in self.data['keywords'].keys():
self.data['keywords']['frozen_electrons'] = []
self.data['keywords']['frozen_electrons'].append(frozen_electrons)
line = next(outfile)
def _extract_cc_properties(self, outfile, line):
"""Coupled cluster properties.
This is called directly after the ``'COUPLED CLUSTER ITERATIONS'``
trigger, and will terminate once the ``'ORCA POPULATION ANALYSIS'``
trigger is reached.
Parameters
----------
outfile : :obj:`io.TextIOWrapper`
Buffered text stream of the output file.
line : :obj:`str`
Parsed line from ``outfile``.
"""
while 'ORCA POPULATION ANALYSIS' != line.strip() \
and '* ORCA property calculations *' != line.strip():
# Iter E(tot) E(Corr) Delta-E Residual Time <S|S>**1/2
# 0 -7.469294707 -0.036553055 0.000000000 0.027013328 0.05 0.000000001
#
# or
#
# Iter E(tot) E(Corr) Delta-E Residual Time
# 0 -2.897443580 -0.035772194 0.000000000 0.027217829 0.00
if line.strip()[:79] == 'Iter E(tot) E(Corr) Delta-E Residual Time':
# Extracts MP2 energies under the initial line.
line = next(outfile)
if 'mp2_correlation_energy' not in self.data['properties'].keys():
self.data['properties']['mp2_correlation_energy'] = []
if 'mp2_total_energy' not in self.data['properties'].keys():
self.data['properties']['mp2_total_energy'] = []
self.data['properties']['mp2_correlation_energy'].append(
float(line.split()[2])
)
self.data['properties']['mp2_total_energy'].append(
float(line.split()[1])
)
# E(TOT) ... -7.473852176
if line.strip()[:6] == 'E(TOT)':
# Extracts total CCSD energy.
if 'ccsd_total_energy' not in self.data['properties'].keys():
self.data['properties']['ccsd_total_energy'] = []
self.data['properties']['ccsd_total_energy'].append(
float(line.split()[2])
)
# T1 diagnostic ... 0.001316573
if line.strip()[:13] == 'T1 diagnostic':
# Extracts T1 diagnostic.
if 't1_diagnostic' not in self.data['properties'].keys():
self.data['properties']['t1_diagnostic'] = []
self.data['properties']['t1_diagnostic'].append(
float(line.split()[3])
)
# E(CCSD(T)) ... -7.473882409
if line.strip()[:10] == 'E(CCSD(T))':
# Extracts total CCSD(T) energy..
if 'ccsd(t)_total_energy' not in self.data['properties'].keys():
self.data['properties']['ccsd(t)_total_energy'] = []
self.data['properties']['ccsd(t)_total_energy'].append(
float(line.split()[2])
)
line = next(outfile)
def _extract_scf_info(self, outfile, line):
"""Other scf information.
This will be placed under the ``'keyword'`` JSON property.
Parameters
----------
outfile : :obj:`io.TextIOWrapper`
Buffered text stream of the output file.
line : :obj:`str`
Parsed line from ``outfile``.
Returns
-------
:obj:`dict`
Available SCF information that could contain the following keys.
``'rij_approximation'``
The resolution of identity (RI) approximation for the Coulomb
(J) term.
``'cosx_approximation'``
The chain-of-spheres integration approximation to the exchange
term (COSX).
"""
while 'Total time needed ' not in line.strip():
# Hamiltonian:
# Ab initio Hamiltonian Method .... Hartree-Fock(GTOs)
# General Settings:
# Integral files IntName .... al.chrg0.mult2-orca.sp.esp-ccsdt.anopvqz.vtightscf.sym-lambda0
# Hartree-Fock type HFTyp .... UHF
if 'Ab initio Hamiltonian' == line.strip()[:21]:
# We only include the HF type in the keywords.
for _ in range(0, 5):
line = next(outfile)
hf_type = line.split()[4]
self.data['keywords']['hf_type'] = hf_type
# Number of Electrons NEL .... 3
if 'Number of Electrons' == line.strip()[:19]:
if 'n_electrons' not in self.data.keys():
self.data['n_electrons'] = []
n_electrons = int(line.split()[5])
self.data['n_electrons'].append(n_electrons)
# RI-approximation to the Coulomb term is turned on
if 'RI-approximation to the Coulomb term is turned on' in line:
self.data['keywords']['rij_approximation'] = True
# RIJ-COSX (HFX calculated with COS-X)).... on
if 'RIJ-COSX (HFX calculated with COS-X)' in line:
self.data['keywords']['cosx_approximation'] = True
if 'RI-JK (J+K treated both via RI)' in line:
self.data['keywords']['rik_approximation'] = True
line = next(outfile)
def _extract_mulliken_charges(self, outfile, line):
"""Mulliken atomic charges in same order as atomic coordinates.
Parameters
----------
outfile : :obj:`io.TextIOWrapper`
Buffered text stream of the output file.
line : :obj:`str`
Parsed line from ``outfile``.
"""
line = next(outfile)
line = next(outfile)
# Creates initial mulliken_charges property.
if 'mulliken_charges' not in self.data['properties'].keys():
self.data['properties']['mulliken_charges'] = []
# Appends Mulliken charges to a new item for every structure.
self.data['properties']['mulliken_charges'].append([])
while 'Sum of atomic charges' not in line:
line_split = line.split(':')
self.data['properties']['mulliken_charges'][-1].append(
float(line_split[-1])
)
line = next(outfile)
def _extract_loewdin_charges(self, outfile, line):
"""Loewdin atomic charges in same order as atomic coordinates.
Parameters
----------
outfile : :obj:`io.TextIOWrapper`
Buffered text stream of the output file.
line : :obj:`str`
Parsed line from ``outfile``.
"""
line = next(outfile)
line = next(outfile)
# Creates initial loewdin_charges property.
if 'loewdin_charges' not in self.data['properties'].keys():
self.data['properties']['loewdin_charges'] = []
# Appends Loewdin charges to a new item for every structure.
self.data['properties']['loewdin_charges'].append([])
while '' != line.strip():
line_split = line.split(':')
self.data['properties']['loewdin_charges'][-1].append(
float(line_split[-1])
)
line = next(outfile)
def _extract_dipole(self, outfile, line):
"""The X, Y, and Z dipole components.
Final QCJSON specifies the method of the dipole moment (e.g.,
``'scf_dipole_moment'``, ``'mp2_dipole_moment'``). For now, we just
store it as ``'dipole_moment'``.
Parameters
----------
outfile : :obj:`io.TextIOWrapper`
Buffered text stream of the output file.
line : :obj:`str`
Parsed line from ``outfile``.
"""
if 'dipole_moment' not in self.data['properties'].keys():
self.data['properties']['dipole_moment'] = []
while 'Total Dipole Moment :' not in line:
line = next(outfile)
line_split = line.split()
dipole = [float(line_split[4]), float(line_split[5]), float(line_split[6])]
self.data['properties']['dipole_moment'].append(dipole)
def _add_geo_conv(self, info_label, line):
"""Parse and add geometric convergence info to data.
Parameters
----------
info_label : :obj:`str`
Label for geometric convergence criteria.
line : :obj:`str`
Line from output file to extract information from.
"""
split_line = line.split()
value = float(split_line[2])
target = float(split_line[3])
if f'geo_{info_label}_target' not in self.data['keywords'].keys():
self.data['keywords'][f'geo_{info_label}_target'] = target
try:
self.data['keywords'][f'geo_{info_label}_value'].append(value)
except KeyError:
self.data['keywords'][f'geo_{info_label}_value'] = [value]
def _extract_geo_conv(self, outfile, line):
"""Extract geometric convergence values and tolerance.
Parameters
----------
outfile : :obj:`io.TextIOWrapper`
Buffered text stream of the output file.
line : :obj:`str`
Parsed line from ``outfile``.
"""
while 'Max(Dihed)' not in line and 'Max(Improp)' not in line:
if 'Energy change' in line:
self._add_geo_conv('energy_change', line)
elif 'RMS gradient' in line:
self._add_geo_conv('rms_gradient', line)
elif | |
<gh_stars>0
from datetime import datetime
from itertools import cycle
import logging
import random
from uuid import uuid4
from .exceptions import (
BadSuggestion,
BlockedSpace,
BoardIsFull,
CardsAlreadyDealt,
GameOver,
InsufficientPlayers,
InvalidHallway,
InvalidPlayer,
InvalidPlayerName,
InvalidRoom,
InvalidSpace,
InvalidSpaceForSuggestion,
InvalidSuspect,
InvalidTargetSpace,
InvalidWeapon,
MoveCompleted,
NotPlayersTurn,
PlayerAlreadyMappedToSuspect,
UnfinishedMove,
)
class Space:
def __init__(self, name):
self.name = name
self.max_allowed_suspects = 1
self.has_secret_passageway = False
self.passage_way_to = None
self.suspects_present = set([])
def remove_suspect(self, suspect):
self.suspects_present.remove(suspect)
def add_suspect(self, suspect):
# AttributeError thrown if I don't cast this to a set each time :/
self.suspects_present = set(self.suspects_present)
self.suspects_present.add(suspect)
class Room(Space):
def __init__(self, name):
super().__init__(name)
self.max_allowed_suspects = 6
class Hallway(Space):
def __init__(self, name):
super().__init__(name)
class Suspect:
def __init__(self, name):
self.name = name
class Weapon:
def __init__(self, name):
self.name = name
class Player:
def __init__(self, name, suspect):
self.__name = name
self.__suspect = suspect
self.__cards = []
# Keep a list of all suggestions
self.__suggestions = []
# Keep track of suggestions and disproves. Start with True
# since every board entity might be involved in... MURDER!
# This is now set by the Board
self.game_sheet = {}
self.in_the_game = True
self.board = None
self.turn = {
'moved': False,
'suggested': False,
'accused': False
}
@property
def cards(self):
return self.__cards
@cards.setter
def cards(self, cards):
if self.__cards:
raise CardsAlreadyDealt('Cards have been dealt to this player')
self.__cards = cards
for card in cards:
self.game_sheet[card] = False
@property
def suggestion(self):
return self.__suggestions[-1] if self.__suggestions else []
@property
def suggestions(self):
return self.__suggestions
@property
def all_suggestions(self):
return self.__all_suggestions
def update_suggestions(self, suggestion, disprove):
self.__suggestions.append({
'suggestion': suggestion,
'disprove': disprove
})
@property
def suspect(self):
return self.__suspect
@property
def name(self):
return self.__name
def update_game_sheet(self, card):
self.game_sheet[card] = False
class Board:
def __init__(self):
# Weapons
self.ROPE = Weapon('Rope')
self.KNIFE = Weapon('Knife')
self.LEAD_PIPE = Weapon('Lead Pipe')
self.WRENCH = Weapon('Wrench')
self.REVOLVER = Weapon('Revolver')
self.CANDLESTICK = Weapon('Candlestick')
self.LIST_OF_WEAPONS = [
self.ROPE,
self.KNIFE,
self.LEAD_PIPE,
self.WRENCH,
self.REVOLVER,
self.CANDLESTICK
]
# Suspects
self.SCARLET = Suspect('Miss Scarlet')
self.MUSTARD = Suspect('<NAME>')
self.WHITE = Suspect('Mrs. White')
self.GREEN = Suspect('Mr. Green')
self.PEACOCK = Suspect('Mrs. Peacock')
self.PLUM = Suspect('Professor Plum')
self.LIST_OF_SUSPECTS = [
self.SCARLET,
self.MUSTARD,
self.WHITE,
self.GREEN,
self.PEACOCK,
self.PLUM,
]
# Rooms
self.STUDY = Room('The Study')
self.HALL = Room('The Hall')
self.LOUNGE = Room('The Lounge')
self.LIBRARY = Room('The Library')
self.BILLIARD_ROOM = Room('The Billiard Room')
self.DINING_ROOM = Room('The Dining Room')
self.CONSERVATORY = Room('The Conservatory')
self.BALLROOM = Room('The Ballroom')
self.KITCHEN = Room('The Kitchen')
# Hallways
self.STUDY_TO_HALL = Hallway('The Study to Hall Hallway')
self.HALL_TO_LOUNGE = Hallway('The Hall to Lounge Hallway')
self.STUDY_TO_LIBRARY = Hallway('The Study to Library Hallway')
self.HALL_TO_BILLIARD = Hallway('The Hall to Billiard Room Hallway')
self.LOUNGE_TO_DINING = Hallway('The Lounge to Dining Room Hallway')
self.LIBRARY_TO_BILLIARD = Hallway('The Library to Billiard Room Hallway')
self.BILLIARD_TO_DINING = Hallway('The Billiard Room to Dining Room Hallway')
self.LIBRARY_TO_CONSERVATORY = Hallway('The Library to Conservatory Hallway')
self.BILLIARD_TO_BALLROOM = Hallway('The Billiard Room to Ballroom Hallway')
self.DINING_TO_KITCHEN = Hallway('The Dining Room to Kitchen Hallway')
self.CONSERVATORY_TO_BALLROOM = Hallway('The Conservatory to Ballroom Hallway')
self.BALLROOM_TO_KITCHEN = Hallway('The Ballroom to Kitchen Hallway')
# Define the spaces a little more
self.STUDY.has_secret_passageway = True
self.STUDY.passage_way_to = self.KITCHEN
self.LOUNGE.has_secret_passageway = True
self.LOUNGE.passage_way_to = self.CONSERVATORY
self.CONSERVATORY.has_secret_passageway = True
self.CONSERVATORY.passage_way_to = self.LOUNGE
self.KITCHEN.has_secret_passageway = True
self.KITCHEN.passage_way_to = self.STUDY
# Add suspects to the spaces
self.HALL_TO_LOUNGE.suspects_present = [self.SCARLET]
self.STUDY_TO_LIBRARY.suspects_present = [self.PLUM]
self.LOUNGE_TO_DINING.suspects_present = [self.MUSTARD]
self.LIBRARY_TO_CONSERVATORY.suspects_present = [self.PEACOCK]
self.CONSERVATORY_TO_BALLROOM.suspects_present = [self.GREEN]
self.BALLROOM_TO_KITCHEN.suspects_present = [self.WHITE]
self.LIST_OF_ROOMS = [
self.STUDY,
self.HALL,
self.LOUNGE,
self.LIBRARY,
self.BILLIARD_ROOM,
self.DINING_ROOM,
self.CONSERVATORY,
self.BALLROOM,
self.KITCHEN,
]
self.LIST_OF_HALLWAYS = [
self.STUDY_TO_HALL,
self.HALL_TO_LOUNGE,
self.STUDY_TO_LIBRARY,
self.HALL_TO_BILLIARD,
self.LOUNGE_TO_DINING,
self.LIBRARY_TO_BILLIARD,
self.BILLIARD_TO_DINING,
self.LIBRARY_TO_CONSERVATORY,
self.BILLIARD_TO_BALLROOM,
self.DINING_TO_KITCHEN,
self.CONSERVATORY_TO_BALLROOM,
self.BALLROOM_TO_KITCHEN,
]
self.LIST_OF_SPACES = self.LIST_OF_ROOMS + self.LIST_OF_HALLWAYS
# Make a deck
self.GAME_DECK = self.LIST_OF_WEAPONS + list(self.LIST_OF_SUSPECTS) + self.LIST_OF_ROOMS
self.id = uuid4()
self.__game_started = False
self.__time_started = None
# The Board is a 5x5 grid with four holes.
self.__map = [
[
self.STUDY,
self.STUDY_TO_HALL,
self.HALL,
self.HALL_TO_LOUNGE,
self.LOUNGE
],
[
self.STUDY_TO_LIBRARY,
None,
self.HALL_TO_BILLIARD,
None,
self.LOUNGE_TO_DINING
],
[
self.LIBRARY,
self.LIBRARY_TO_BILLIARD,
self.BILLIARD_ROOM,
self.BILLIARD_TO_DINING,
self.DINING_ROOM
],
[
self.LIBRARY_TO_CONSERVATORY,
None,
self.BILLIARD_TO_BALLROOM,
None,
self.DINING_TO_KITCHEN
],
[
self.CONSERVATORY,
self.CONSERVATORY_TO_BALLROOM,
self.BALLROOM,
self.BALLROOM_TO_KITCHEN,
self.KITCHEN
]
]
# These are the holes in the board
self.__bad_coordinates = [(1, 1), (1, 3), (3, 1), (3, 3)]
# These are valid deltas when subtracting coordinates to
# validate a move between spaces on the board
self.__valid_coord_deltas = [(1, 0), (0, 1), (-1, 0), (0, -1),
(4, 4), (4, -4), (-4, 4), (-4, -4),
(0, 0)]
# Create the confidential file
self.__confidential_file = [
random.choice(self.LIST_OF_ROOMS),
random.choice(self.LIST_OF_SUSPECTS),
random.choice(self.LIST_OF_WEAPONS),
]
# Now that the confidential envelope is set up, shuffle the
# remaining deck. Make it an iterable so cards can be distributed
# to players.
self.__deck = [card for card in self.GAME_DECK
if card not in self.__confidential_file]
random.shuffle(self.__deck)
self.__deck = iter(self.__deck)
# Maintain a map of Suspect objects to Player objects
self.__suspect_to_player_map = dict.fromkeys(self.LIST_OF_SUSPECTS, None)
# Initialize the current player. This will be set once all suspects
# are bound to players. Setting this means that the game has begun
self.__current_player = None
# "GAME _OVER_, MAN :'("
self.__game_over = False
# Winner of the game
self.__winner = None
# Use to yield the next player
self.__suspect_looper = cycle(self.LIST_OF_SUSPECTS)
# Initialize a logger
self.log = logging.getLogger(__name__)
self.log.debug('Started game')
self.log.debug('Confidential file contains "{}"'.format(
'", "'.join([_.name for _ in self.confidential_file])
))
# ------ Board Actions ------
def add_player(self, player):
if not player.name or not player.name.strip():
raise InvalidPlayerName('Player name not specified')
if self.__cannot_add_more_players():
raise BoardIsFull('All players mapped to suspects on this board')
if self.__suspect_to_player_map[player.suspect]:
raise PlayerAlreadyMappedToSuspect('Player already mapped to suspect: {} <-> {}'.format(
self.__suspect_to_player_map[player.suspect].name,
player.suspect.name
))
self.__suspect_to_player_map[player.suspect] = player
self.deal_cards_to(player)
player.game_sheet = {_: True for _ in self.GAME_DECK}
for _ in player.cards:
player.game_sheet[_] = False
self.log.info('Added player {} as {}'.format(
player.name,
player.suspect.name
))
# If this is the last player, start the game
# The first suspect returned is scarlet
if self.__cannot_add_more_players():
self.__game_started = True
self.__time_started = '{}Z'.format(str(datetime.utcnow().isoformat()))
self.current_player = self.get_player_mapped_to(self.__get_next_suspect())
return player
def __cannot_add_more_players(self):
if None in set(self.__suspect_to_player_map.values()):
return False
return True
def move_player(self, player, space=None):
self.__valid_board_state()
self.__valid_player(player)
self.__valid_player_turn(player)
self.__valid_space(space)
# Update the turn. Throw an error if player already made a move
if player.turn['moved']:
raise MoveCompleted('{} ({}) has already moved'.format(
self.current_player.name,
self.current_suspect.name
))
old_space = [_
for _ in self.LIST_OF_SPACES
if player.suspect in _.suspects_present
][0]
new_space = old_space if not space else space
self.__valid_target_space(player, old_space, new_space)
old_space.remove_suspect(player.suspect)
new_space.add_suspect(player.suspect)
player.turn['moved'] = True
if old_space == new_space:
self.log.info('{} ({}) stayed in {}'.format(
player.name,
player.suspect.name,
old_space.name
))
else:
self.log.info('{} ({}) moved from {} to {}'.format(
player.name,
player.suspect.name,
old_space.name,
new_space.name
))
def make_suggestion(self, player, suspect, weapon):
self.__valid_board_state()
self.__valid_player(player)
self.__valid_player_turn(player)
self.__valid_suspect(suspect)
self.__valid_weapon(weapon)
self.__valid_space_for_suggestion()
suspect_space = self.__get_space_for(suspect)
player_room = self.__get_space_for(player)
# Update the turn. Throw an error if player already made a suggestion
if player.turn['suggested']:
raise MoveCompleted('{} ({}) has already made a suggestion'.format(
self.current_player.name,
self.current_suspect.name
))
if suspect in player.cards or weapon in player.cards:
raise BadSuggestion('Look at your cards, yo...')
player.turn['moved'] = True
player.turn['suggested'] = True
suggestion = {
'weapon': weapon,
'suspect': suspect,
'room': player_room,
}
disprove = None
self.log.info('{} ({}) suggested that {} ({}) did it with a {} in {}'.format(
player.name,
player.suspect.name,
self.get_player_mapped_to(suspect).name,
suspect.name,
weapon.name,
player_room.name
))
# First move the suspect into the player's space
# if the player is active
if self.get_player_mapped_to(suspect).in_the_game:
suspect_space.remove_suspect(suspect)
player_room.add_suspect(suspect)
self.log.info('{} ({}) moved from {} to {}'.format(
self.get_player_mapped_to(suspect).name,
suspect.name,
suspect_space.name,
player_room.name
))
# End the game if the suggestion is in the game file
if [player_room, suspect, weapon] == self.__confidential_file:
self.__game_over = True
self.__winner = player
disprove = None
self.log.info('{} ({})\'s suggestion was correct. Game Over!'.format(
player.name,
player.suspect.name
))
else:
disprove_player, suspect, card = self.disprove_suggestion(player, suggestion)
disprove = {
'suspect': suspect,
'player': disprove_player,
'card': card,
}
player.update_suggestions(suggestion, disprove)
return True
def disprove_suggestion(self, player, suggestion):
self.__valid_board_state()
self.__valid_player(player)
self.__valid_player_turn(player)
if not player.turn['suggested']:
raise UnfinishedMove('{} ({}) must make a suggestion before getting a card to disprove it'.format(
player.name,
player.suspect.name,
))
# Need to cycle through the list of suspects clockwise
suspect_index = self.LIST_OF_SUSPECTS.index(self.current_suspect)
bottom = self.LIST_OF_SUSPECTS[suspect_index + 1:]
top = self.LIST_OF_SUSPECTS[:suspect_index]
our_suggestion = suggestion.values()
for suspect in bottom + top:
their_cards = self.get_player_mapped_to(suspect).cards
# Cast to list because, for some reason,
# 'if not common_cards' doesn't work :/
common_cards = list(set(their_cards) & set(our_suggestion))
# If the next player has card(s) to disprove | |
['UniformNoise', 'NormalNoise', 'MNIST', 'FashionMNIST', 'NotMNIST', 'CIFAR10', 'STL10', 'CIFAR100',
'TinyImagenet']
d1_tasks = ['MNIST', 'STL10', 'FashionMNIST', 'CIFAR100' ]
# d2_tasks = ['NormalNoise']
# d3_tasks = ['UniformNoise']
types = [2]
n_votes = [1]
centiles = []
for model_name in ["VGG", "Resnet"]:
for type in types:
for centile in centiles:
agg_acc = 0
counter = 0
for d1 in d1_tasks:
for d2 in d2_tasks:
if d2 in d2_compatiblity[d1]:
df_thresholds = pd.read_csv(
"results/article_plots/full_nets/cut_tail/scoring/" + model_name + '_' + d1 + '_' + d2 + 'th-acc.csv',
index_col=0)
for d3 in d2_tasks:
if d2 != d3 and d3 in d2_compatiblity[d1]:
file_pattern = model_name + '_' + d1 + '_' + d2 + '_' + d3 + "_*"
files = glob.glob(
os.path.join("results/article_plots/full_nets/cut_tail/scoring", file_pattern))
frames = dict()
rows = 0
file_counter = 0
for file in files:
df = pd.read_csv(file, index_col=0)
rows = len(df.index)
layernum = file.split("_")[-1].split(".")[0]
if df_thresholds["threshold"][int(layernum)] != -1:
frames[layernum] = df
# print(f"n: {file_counter}, {file}")
file_counter += 1
correct_count = 0
thresholds_lin = linearize(frames, df_thresholds, d1, model_name)
for i in range(rows):
chosen_id = choose_layer(frames, thresholds_lin, i, type=type, centile=centile)
correct_count += frames[chosen_id]["correct"][i]
acc = correct_count / rows
agg_acc += acc
counter += 1
print(f"{d1} - type {type}, centile {centile} Aggregated accuracy: {agg_acc / counter}")
for type in types:
for votes in n_votes:
agg_acc = 0
counter = 0
for d1 in d1_tasks:
votes = int(len(layers_shapes[model_name][d1]) / 3 + 1)
if votes % 2 == 0:
votes += 1
for d2 in d2_tasks:
if d2 in d2_compatiblity[d1]:
df_thresholds = pd.read_csv(
"results/article_plots/full_nets/cut_tail/scoring/" + model_name + '_' + d1 + '_' + d2 + 'th-acc.csv',
index_col=0)
for d3 in d3_tasks:
if d2 != d3 and d3 in d2_compatiblity[d1]:
file_pattern = model_name + '_' + d1 + '_' + d2 + '_' + d3 + "_*"
files = glob.glob(
os.path.join("results/article_plots/full_nets/cut_tail/scoring", file_pattern))
frames = dict()
rows = 0
for file in files:
df = pd.read_csv(file, index_col=0)
rows = len(df.index)
layernum = file.split("_")[-1].split(".")[0]
if df_thresholds["threshold"][int(layernum)] != -1:
frames[layernum] = df
correct_count = 0
thresholds_lin = linearize(frames, df_thresholds, d1, model_name)
# chosen_ids = dict()
for i in range(rows):
chosen_ids = choose_layers(frames, thresholds_lin, i, type=type, votes=votes)
correct_votes = 0
for chosen in chosen_ids:
correct_votes += frames[chosen]["correct"][i]
correct_count += (correct_votes > (len(chosen_ids) / 2))
# if chosen_ids.get(chosen_id) is None:
# chosen_ids[chosen_id] = 0
# else:
# chosen_ids[chosen_id] += 1
acc = correct_count / rows
# print(model_name + '_' + d1 + '_' + d2 + '_' + d3 + " acc: " + str(acc))
agg_acc += acc
counter += 1
print(f"{model_name} {d1} - type {type}, votes {votes} Aggregated accuracy: {agg_acc / counter}")
def execution_times_plot():
arr = np.load("results/article_plots/execution_times/servercifar10.npz")
print(arr["avg_net_pass"])
print(arr["avg_nap_net_pass"])
print(arr["avg_compute_hamming"])
print(arr["avg_compute_hamming_and"])
print(arr["avg_compute_hamming_full_net"])
print(arr["avg_compute_hamming_and_full_net"])
x = np.arange(100, 4001, 300)[::-1]
figure, axes = plt.subplots()
_ = axes.plot(x, arr["avg_compute_hamming"], label="hamming_distance (xor) len=4096")
_ = axes.plot(x, arr["avg_compute_hamming_and"], label="(xor) & (and) len=4096")
_ = axes.plot(x, arr["avg_compute_hamming_full_net"], label="full net xor len=12416")
_ = axes.plot(x, arr["avg_compute_hamming_and_full_net"], label="full net xor & and len=12416")
plt.axhline(y=arr["avg_net_pass"], linestyle='-')
min_xlim, max_xlim = plt.xlim()
plt.text((max_xlim - min_xlim) / 2, arr["avg_net_pass"] * 1.1, f'Avg single net pass: {arr["avg_net_pass"]:.4f}')
plt.text((max_xlim - min_xlim) / 2, arr["avg_nap_net_pass"] * 1.1,
f'Avg single NAP net pass: {arr["avg_nap_net_pass"]:.4f}')
plt.axhline(y=arr["avg_nap_net_pass"], linestyle='-')
plt.xlabel("N known patterns to compare with")
plt.ylabel("Execution time (seconds)")
plt.legend(loc='upper left')
plt.title("Server CIFAR10")
plt.savefig("results/article_plots/execution_times/ServerCIFAR10")
# show_values_on_bars(axes)
def compare_exec_times_all_methods():
d1_tasks = ['TinyImagenet']
for d1 in d1_tasks:
data = []
file_pattern = "*" + d1 + '*.npz'
files = glob.glob(os.path.join("results/article_plots/execution_times", file_pattern))
for file in files:
if d1 == "CIFAR10":
if file.split('/')[-1].split("_")[2][7] == '0':
print(file)
continue
method = file.split("/")[-1].split("_")[0]
model = file.split("/")[-1].split("_")[1]
exec_time = np.load(file)["exec_times"]
data.append((method, model, d1, exec_time.item()))
df = pd.DataFrame(data, columns=["method", "model", "dataset", "exec_time"])
print(df)
# _ = sns.catplot(x="method", y="exec_time", kind="box", data=df)
# plt.show()
grouped = df.groupby(["method", "model"])[
"exec_time"].mean().sort_values(ascending=True)
figure, axes = plt.subplots()
x = np.arange(len(grouped.index))
v = axes.bar(x, grouped.values)
show_values_on_bars(axes)
plt.xticks(x, grouped.index, rotation=90)
plt.tight_layout()
plt.title(d1)
# plt.show()
plt.savefig("results/article_plots/execution_times/plots/" + d1)
def auroc():
d1_tasks = ['MNIST', 'FashionMNIST', 'STL10', "CIFAR100"]
d2_tasks = ['UniformNoise', 'NormalNoise', 'MNIST', 'FashionMNIST', 'NotMNIST', 'CIFAR10', 'STL10', 'CIFAR100',
'TinyImagenet']
d3_tasks = ['UniformNoise', 'NormalNoise', 'MNIST', 'FashionMNIST', 'NotMNIST', 'CIFAR10', 'STL10', 'CIFAR100',
'TinyImagenet']
layer_dict = {
"MNIST": 6,
"FashionMNIST": 2,
"CIFAR10": 4,
"CIFAR100": 4,
"STL10": 11,
"TinyImagenet": 6
}
scores = 0
auroc_sum = 0
aupr_sum = 0
acc_sum = 0
counter = 0
model_name = "VGG"
for d1 in d1_tasks:
best_layer = layer_dict[d1]
for d2 in d2_tasks:
if d2 in d2_compatiblity[d1]:
best_auroc = 0
best_acc = 0
best_aupr = 0
best_auroc_pool_type = 0
best_acc_pool_type = 0
best_aupr_pool_type = 0
best_acc_layer = 0
best_aupr_layer = 0
best_auroc_layer = 0
for pool_type in ["avg", "max"]:
for layer in range(len(layers_shapes[model_name][d1])):
frames = []
for d3 in d3_tasks:
if d2 != d3 and d3 in d2_compatiblity[d1]:
file_pattern = model_name + '_' + d1 + '_' + d2 + '_' + d3 + "_" + str(layer) + "_" + pool_type + ".csv"
files = glob.glob(
os.path.join("results/article_plots/full_nets/fixed", file_pattern))
for file in files:
# print(file)
df = pd.read_csv(file, index_col=0)
rows = len(df.index)
df["label"] = 0
df.loc[int(rows / 2):, "label"] = 1
frames.append(df)
# print(f'{df["correct"].sum() / len(df.index)}')
# print(df)
# print(f"{model_name} {d1} vs {d2} layer {layer}")
frame = pd.concat(frames, axis=0, ignore_index=True)
score = roc_auc_score(frame["label"], frame["distance"])
acc = frame["correct"].sum() / len(frame.index)
lr_precision, lr_recall, _ = precision_recall_curve(frame["label"], frame["distance"])
lr_auc = auc(lr_recall, lr_precision)
if score > best_auroc:
best_auroc_layer = layer
best_auroc = score
best_auroc_pool_type = pool_type
if acc > best_acc:
best_acc_layer = layer
best_acc = acc
best_acc_pool_type = pool_type
if lr_auc > best_aupr:
best_aupr_layer = layer
best_aupr = lr_auc
best_aupr_pool_type = pool_type
# print(f"DT: {d1} auroc: {score}")
# print(f"DT: {d1} acc: {acc}")
auroc_sum += best_auroc
aupr_sum += best_aupr
acc_sum += best_acc
counter += 1
print(f"{model_name} {d1} vs {d2} best auroc layer {best_auroc_layer} pt {best_auroc_pool_type} auroc {best_auroc}"
f" best aupr layer {best_aupr_layer} aupr {best_aupr} pt {best_aupr_pool_type} "
f" best acc layer {best_acc_layer} acc {best_acc} pt {best_acc_pool_type}")
# counter += 1
# scores += score
print(f"Aggregated auroc: {auroc_sum / counter} aupr: {aupr_sum / counter} acc: {acc_sum / counter}")
def choose_layers_and_pool_type(thresholds, accuracies, model, dt, type=0, votes=1, steps=5, thresholds_factor=0.1):
linspace = np.linspace(0.1, 0.9, steps)
quantile_factors = np.sqrt(1. / np.abs(linspace - np.rint(linspace)))
max_threshold = np.max((thresholds + quantile_factors) * quantile_factors, axis=2)[:, :, np.newaxis]
scores = (accuracies - 0.5) * (
thresholds_factor + np.abs(
((thresholds + quantile_factors) * quantile_factors - max_threshold) / max_threshold))
max_acc_ids = np.argmax(scores, axis=2)[:, :, np.newaxis]
best_thresholds = np.take_along_axis(thresholds, max_acc_ids, axis=2).squeeze()
best_accuracies = np.take_along_axis(accuracies, max_acc_ids, axis=2).squeeze()
new_th = np.zeros(thresholds.shape[1])
new_acc = np.zeros(thresholds.shape[1])
chosen = []
shapes = np.array(layers_shapes[model][dt])
shape_factors = shapes / shapes.min()
max_factor = shape_factors.max()
for layer_id in range(thresholds.shape[1]):
max_threshold_pools = np.min(max_threshold[:, layer_id, :])
scores = (best_accuracies[:, layer_id] - 0.5) * (thresholds_factor + np.abs(
((best_thresholds[:, layer_id] + quantile_factors[max_acc_ids[:, layer_id, :]]) * quantile_factors[
max_acc_ids[:, layer_id, :]] - max_threshold_pools) / max_threshold_pools))
if type == 0:
if (scores[:, 0] >= scores[:, 1]).all():
add_factor = quantile_factors[max_acc_ids[0, layer_id, :]] + shape_factors[layer_id]
multiplier = quantile_factors[max_acc_ids[0, layer_id, :]] * (max_factor / shape_factors[layer_id])
chosen.append((layer_id, "max", add_factor, multiplier))
new_th[layer_id] = best_thresholds[0, layer_id]
new_acc[layer_id] = best_accuracies[0, layer_id]
else:
add_factor = quantile_factors[max_acc_ids[1, layer_id, :]] + shape_factors[layer_id]
multiplier = quantile_factors[max_acc_ids[1, layer_id, :]] * (max_factor / shape_factors[layer_id])
chosen.append((layer_id, "avg", add_factor, multiplier))
new_th[layer_id] = best_thresholds[1, layer_id]
new_acc[layer_id] = best_accuracies[1, layer_id]
elif type == 1:
if scores[0, 0] >= scores[1, 1]:
add_factor = quantile_factors[max_acc_ids[0, layer_id, :]] + shape_factors[layer_id]
multiplier = quantile_factors[max_acc_ids[0, layer_id, :]] * (max_factor / shape_factors[layer_id])
chosen.append((layer_id, "max", add_factor, multiplier))
new_th[layer_id] = best_thresholds[0, layer_id]
new_acc[layer_id] = best_accuracies[0, layer_id]
else:
add_factor = quantile_factors[max_acc_ids[1, layer_id, :]] + shape_factors[layer_id]
multiplier = quantile_factors[max_acc_ids[1, layer_id, :]] * (max_factor / shape_factors[layer_id])
chosen.append((layer_id, "avg", add_factor, multiplier))
new_th[layer_id] = best_thresholds[1, layer_id]
new_acc[layer_id] = best_accuracies[1, layer_id]
elif type == 2:
if best_accuracies[0, layer_id] >= best_accuracies[1, layer_id]:
add_factor = quantile_factors[max_acc_ids[0, layer_id, :]] + shape_factors[layer_id]
multiplier = quantile_factors[max_acc_ids[0, layer_id, :]] * (max_factor / shape_factors[layer_id])
chosen.append((layer_id, "max", add_factor, multiplier))
new_th[layer_id] = best_thresholds[0, layer_id]
new_acc[layer_id] = best_accuracies[0, layer_id]
else:
add_factor = quantile_factors[max_acc_ids[1, layer_id, :]] + shape_factors[layer_id]
multiplier = quantile_factors[max_acc_ids[1, layer_id, :]] * (max_factor / shape_factors[layer_id])
chosen.append((layer_id, "avg", add_factor, multiplier))
new_th[layer_id] = best_thresholds[1, layer_id]
new_acc[layer_id] = best_accuracies[1, layer_id]
elif type == 3:
if best_thresholds[0, layer_id] < best_thresholds[1, layer_id]:
add_factor = quantile_factors[max_acc_ids[0, layer_id, :]] + | |
<reponame>joshfuchs/photometry<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 23 20:48:10 2015
@author: jmeza
"""
# ===========================================================================
# Packages ==================================================================
# ===========================================================================
import numpy as np
#import pyfits as fits
import astropy.io.fits as fits
import os
import datetime
import matplotlib.pyplot as plt
import cosmics
from glob import glob
from astropy.convolution import convolve, convolve_fft, Box2DKernel
# ===========================================================================
# Lesser Functions Used by Main Functions ===================================
# ===========================================================================
def init():
global diagnostic
diagnostic = np.zeros([2071,8])
def save_diagnostic():
#This function save a diagnostic file to be used later to create diagnostic plots
global now
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
header = 'Reduction done on ' + now + ' \n Columns are: 0) average from bias, 1) average from scaled bias, 2) standard deviation of bias \n 3) flat field average, 4) flat field standard deviation, 5) flat field scaled average, 6) flat field scaled standard deviation '
with open('reduction_' + now + '.txt','ab') as handle:
np.savetxt(handle,diagnostic,fmt='%f',header=header)
def gauss(x,p): #single gaussian
return p[0] + p[1]*np.exp(-(((x-p[2])/(np.sqrt(2)*p[3])))**2.)
def fitgauss(p,fjac=None,x=None,y=None,err=None):
#Parameter values are passed in p
#fjac = None just means partial derivatives will not be computed
model = gauss(x,p)
status = 0
return([status,(y-model)/err])
def gaussslope(x,p): #single gaussian
return p[0] + p[1]*x + p[2]*np.exp(-(((x-p[3])/(np.sqrt(2)*p[4])))**2.)
def fitgaussslope(p,fjac=None,x=None,y=None,err=None):
#Parameter values are passed in p
#fjac = None just means partial derivatives will not be computed
model = gaussslope(x,p)
status = 0
return([status,(y-model)/err])
def adcstat(specname):
hdu = fits.getheader(specname)
adc_stat = hdu['ADCSTAT']
print ('ADC status during observations was ', adc_stat)
return adc_stat
# ============================================================================
def Read_List( lst ):
# This function reads a list of images and decomposes them into a python
# list of image names.
list_file = open(lst,'r')
im_list = list_file.read()
list_file.close()
im_list = im_list.split()
return im_list
def List_Combe(img_list):
# This is meant to combe trough list names to identify seperate sublist of
# stars / flats / standars
sub_lists= [] # list of sub_list of images
sl= [] # sub_list of images
sl.append(img_list[0]) # place first image in sublist
i= 0; # image counter
#img_list[0][0] is a string, so need to check that agaisnt strings. Use a shorter cutpoint if these are RAW images. This will help eliminate problems with short filenames.
if (img_list[0][0] == '0') or (img_list[0][0] == '1') or (img_list[0][0] == '2'):
cutpoint = 5
else:
cutpoint = 10
while i < len(img_list)-1: # run trough all images
if img_list[i+1].__contains__(img_list[i][cutpoint:]) == True: #Old = 4
sl.append(img_list[i+1]) # place it in the sub_list
else:
# if the images dont match:
sub_lists.append(sl) # write the sublist to the list of sublist
sl= [] # clear the sublist
sl.append(img_list[i+1]) # append the image to the new list
i= i+1 # image counter
sub_lists.append(sl) # append the last sublist to the list of sublist
return sub_lists # return the list of sub_list of images
def check_file_exist(name):
# This function is to be called before wirting a file.
# This function checks if the file name already exist.
# If it does it appends a number to the begining until
# the name no longer matches the files in the directory.
# List of files in directory
listDirFiles = [f for f in os.listdir('.') if f.endswith('.fits')]
# If "name" is in the derectory append a number i until it doent match
# If name is not in directory then we simply return name
if listDirFiles.__contains__(name):
i= 2
while listDirFiles.__contains__(name):
name= str(i) + name
i= i+1
return name
def Fix_Header( header ):
# This function deletes the header cards that contain the badly coded
# degree symbol '\xb0'. If they are not deleted pyfits won't write the
# headers.
bad_key = ['param0', 'param61', 'param62', 'param63']
for p in bad_key:
if p in header:
bad_str = header.comments[p]
if '\xb0' in bad_str:
del header[p]
def decimal_dec(hdu_str):
# Read header strings in "hh:mm:ss" or "dd:mm:ss" fromat
# and outputs the value as a decimal.
val_list = [float(n) for n in hdu_str.split(':')]
#if val_list[0] < 0 :
if str(val_list[0])[0] == '-':
sng = -1
val_list[0] = sng*val_list[0]
else:
sng = 1
val_deci = sng*(val_list[0]+((val_list[1]+(val_list[2]/60.0))/60.0))
return val_deci
def decimal_ra(hdu_str):
# Read header strings in "hh:mm:ss" or "dd:mm:ss" fromat
# and outputs the value as a decimal.
val_list = [float(n) for n in hdu_str.split(':')]
if val_list[0] < 0 :
sng = -1.
val_list[0] = sng*val_list[0]
else:
sng = 1.
val_deci = 15.*sng*(val_list[0]+((val_list[1]+(val_list[2]/60.0))/60.0))
return val_deci
def SigClip(data_set, lo_sig, hi_sig):
# Sigma Cliping Function #
# Input is set of counts for a particular pixel,
# along with low and high sigma factors.
# Output is a list containg only the data that is with the sigma factors.
# Only a single rejection iteration is made.
Avg = np.median(data_set)
#remove_max = np.delete(data_set,data_set.argmax())
#St_Dev = np.std(remove_max)
St_Dev = np.std(data_set)
min_val = Avg-lo_sig*St_Dev
max_val = Avg+hi_sig*St_Dev
cliped_data = []
#masked_data = []
for val in data_set:
if min_val <= val <= max_val:
cliped_data.append( val )
#else:
# masked_data.append( val)
return cliped_data#, masked_data
def RaDec2AltAz(ra, dec, lat, lst ):
# Input: RA in decimal hours; DEC in decimal deg;
# LAT in decimal deg; LST in decimal hours;
# Output: ALT, AZ, HA in decimal deg.
# Compute Hour Angle
ha = lst-ra # hour angle in deg
if ha < 0 :
ha = ha+360.
if ha > 360:
ha = ha-360.
# Convert Qunataties to Radians
ra = ra*(np.pi/180.0)
dec = dec*(np.pi/180.0)
lat = lat*(np.pi/180.0)
ha = ha*(np.pi/180.0)
# Calculate Altitiude
a = np.sin(dec)*np.sin(lat)
b = np.cos(dec)*np.cos(lat)*np.cos(ha)
alt = np.arcsin( a+b ) # altitude in radians
# Calculate Azimuth
a = np.sin(dec)-np.sin(lat)*np.sin(alt)
b = np.cos(lat)*np.cos(alt)
az = np.arccos( a/b ) # azumuth in radians
if np.sin(ha) > 0:
az = (2.*np.pi) - az
# Convert Alt, Az, and Ha to decimal deg
alt = alt*(180.0/np.pi)
az = az*(180.0/np.pi)
ha = ha*(180.0/np.pi)
return alt, az, ha
def AirMass(alt, scale):
# Calculates instantaneus airmass to be called by SetAirMass() #
# This comes from Allen, Astrophysical Quantities, page 125.
# See also http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?setairmass
# Input:
# scale = atmospheric scale factor (defalut 750)
# alt = altitude of star in degrees.
# Output:
# AM = airmass from given altitude and scale factor
x = scale*np.sin(np.pi*alt/180.)
AM = np.sqrt( x**2. + 2.*scale + 1. ) - x
return AM
def EffectiveAirMass(AM_st, AM_mid, AM_end):
# Calculate effective airmass to be called by SetAirMass() and Imcombine()
# This comes from Stetson, 'Some Factors Affecting the Accuracy of Stellar
# Photometry with CCDs,' DAO preprint, September 1988 and uses Simpson's rule.
# See also http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?setairmass
# Input: airmass at start, middel, and end of an exposure.
# Output: Effective Airmass
AM_eff = (AM_st + 4.*AM_mid + AM_end)/6.
return AM_eff
def Add_Scale (img_block):
# Function to be called by Imcombine.
# The function is meant to additively sclae a set of images, (zeros in particular).
# The input is a numpy block of pixel values (see imcombine).
# The function calculates the average number of
# counts of the region [25:75, 1700:1800] of the first image.
# Then scales the rest of the images by adding the diffrence between the
# average counts of the first image and its own.
# Returns a scaled image block, and a list of scale values.
print("Scaling Counts Additively.\n")
ni, ny, nx = np.shape(img_block)
Cavg= [] # Average Counts
Sval= [] # Scale Values
for i in range(0,ni):
Cavg.append( np.mean(img_block[i,:,:]) )
Sval.append( Cavg[0]-Cavg[i] )
img_block[i]= img_block[i] + Sval[i]
try:
diagnostic[0:len(Cavg),0] = np.array(Cavg)
except:
pass
return img_block, Sval
def Mult_Scale (img_block,index):
# Function to be called by Imcombine.
# The function is meant to multiplicative sclae a set of images, (flats in particular).
# The input is a numpy block of pixel values (see imcombine).
# The function | |
Coupling(name = 'UVGC_428_54',
value = {-1:'( (ee*complex(0,1)*G**2*sw)/(36.*cw*cmath.pi**2) if MD else -(ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2) ) + (ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2)',0:'( (5*ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2) - (ee*complex(0,1)*G**2*sw*reglog(MD/MU_R))/(12.*cw*cmath.pi**2) if MD else (ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2) ) - (ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_429_55 = Coupling(name = 'UVGC_429_55',
value = {-1:'( -(CKM2x1*complex(0,1)*G**2*yc)/(12.*cmath.pi**2) if MC else (CKM2x1*complex(0,1)*G**2*yc)/(24.*cmath.pi**2) )',0:'( (-13*CKM2x1*complex(0,1)*G**2*yc)/(24.*cmath.pi**2) + (3*CKM2x1*complex(0,1)*G**2*yc*reglog(MC/MU_R))/(4.*cmath.pi**2) if MC else -(CKM2x1*complex(0,1)*G**2*yc)/(24.*cmath.pi**2) ) + (CKM2x1*complex(0,1)*G**2*yc)/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_429_56 = Coupling(name = 'UVGC_429_56',
value = {-1:'( -(CKM2x1*complex(0,1)*G**2*yc)/(12.*cmath.pi**2) if MD else (CKM2x1*complex(0,1)*G**2*yc)/(24.*cmath.pi**2) )',0:'( (-5*CKM2x1*complex(0,1)*G**2*yc)/(24.*cmath.pi**2) + (CKM2x1*complex(0,1)*G**2*yc*reglog(MD/MU_R))/(4.*cmath.pi**2) if MD else -(CKM2x1*complex(0,1)*G**2*yc)/(24.*cmath.pi**2) ) + (CKM2x1*complex(0,1)*G**2*yc)/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_429_57 = Coupling(name = 'UVGC_429_57',
value = {-1:'-(CKM2x1*complex(0,1)*G**2*yc)/(3.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_430_58 = Coupling(name = 'UVGC_430_58',
value = {-1:'( -(G**2*ydo)/(6.*cmath.pi**2*cmath.sqrt(2)) if MD else (G**2*ydo)/(12.*cmath.pi**2*cmath.sqrt(2)) ) - (G**2*ydo)/(3.*cmath.pi**2*cmath.sqrt(2))',0:'( (-3*G**2*ydo)/(4.*cmath.pi**2*cmath.sqrt(2)) + (G**2*ydo*reglog(MD/MU_R))/(cmath.pi**2*cmath.sqrt(2)) if MD else -(G**2*ydo)/(12.*cmath.pi**2*cmath.sqrt(2)) ) + (G**2*ydo)/(12.*cmath.pi**2*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_431_59 = Coupling(name = 'UVGC_431_59',
value = {-1:'( (cab*complex(0,1)*G**2*ydo)/(6.*cmath.pi**2*cmath.sqrt(2)) if MD else -(cab*complex(0,1)*G**2*ydo)/(12.*cmath.pi**2*cmath.sqrt(2)) ) + (cab*complex(0,1)*G**2*ydo)/(3.*cmath.pi**2*cmath.sqrt(2))',0:'( (3*cab*complex(0,1)*G**2*ydo)/(4.*cmath.pi**2*cmath.sqrt(2)) - (cab*complex(0,1)*G**2*ydo*reglog(MD/MU_R))/(cmath.pi**2*cmath.sqrt(2)) if MD else (cab*complex(0,1)*G**2*ydo)/(12.*cmath.pi**2*cmath.sqrt(2)) ) - (cab*complex(0,1)*G**2*ydo)/(12.*cmath.pi**2*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_432_60 = Coupling(name = 'UVGC_432_60',
value = {-1:'( (CKM2x1*complex(0,1)*G**2*ydo)/(12.*cmath.pi**2) if MC else -(CKM2x1*complex(0,1)*G**2*ydo)/(24.*cmath.pi**2) )',0:'( (5*CKM2x1*complex(0,1)*G**2*ydo)/(24.*cmath.pi**2) - (CKM2x1*complex(0,1)*G**2*ydo*reglog(MC/MU_R))/(4.*cmath.pi**2) if MC else (CKM2x1*complex(0,1)*G**2*ydo)/(24.*cmath.pi**2) ) - (CKM2x1*complex(0,1)*G**2*ydo)/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_432_61 = Coupling(name = 'UVGC_432_61',
value = {-1:'( (CKM2x1*complex(0,1)*G**2*ydo)/(12.*cmath.pi**2) if MD else -(CKM2x1*complex(0,1)*G**2*ydo)/(24.*cmath.pi**2) )',0:'( (13*CKM2x1*complex(0,1)*G**2*ydo)/(24.*cmath.pi**2) - (3*CKM2x1*complex(0,1)*G**2*ydo*reglog(MD/MU_R))/(4.*cmath.pi**2) if MD else (CKM2x1*complex(0,1)*G**2*ydo)/(24.*cmath.pi**2) ) - (CKM2x1*complex(0,1)*G**2*ydo)/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_432_62 = Coupling(name = 'UVGC_432_62',
value = {-1:'(CKM2x1*complex(0,1)*G**2*ydo)/(3.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_433_63 = Coupling(name = 'UVGC_433_63',
value = {-1:'( -(complex(0,1)*G**2*sab*ydo)/(6.*cmath.pi**2*cmath.sqrt(2)) if MD else (complex(0,1)*G**2*sab*ydo)/(12.*cmath.pi**2*cmath.sqrt(2)) ) - (complex(0,1)*G**2*sab*ydo)/(3.*cmath.pi**2*cmath.sqrt(2))',0:'( (-3*complex(0,1)*G**2*sab*ydo)/(4.*cmath.pi**2*cmath.sqrt(2)) + (complex(0,1)*G**2*sab*ydo*reglog(MD/MU_R))/(cmath.pi**2*cmath.sqrt(2)) if MD else -(complex(0,1)*G**2*sab*ydo)/(12.*cmath.pi**2*cmath.sqrt(2)) ) + (complex(0,1)*G**2*sab*ydo)/(12.*cmath.pi**2*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_434_64 = Coupling(name = 'UVGC_434_64',
value = {-1:'( -(ee*complex(0,1)*G**2*complexconjugate(CKM2x1))/(12.*cmath.pi**2*sw*cmath.sqrt(2)) if MC else (ee*complex(0,1)*G**2*complexconjugate(CKM2x1))/(24.*cmath.pi**2*sw*cmath.sqrt(2)) )',0:'( (-5*ee*complex(0,1)*G**2*complexconjugate(CKM2x1))/(24.*cmath.pi**2*sw*cmath.sqrt(2)) + (ee*complex(0,1)*G**2*complexconjugate(CKM2x1)*reglog(MC/MU_R))/(4.*cmath.pi**2*sw*cmath.sqrt(2)) if MC else -(ee*complex(0,1)*G**2*complexconjugate(CKM2x1))/(24.*cmath.pi**2*sw*cmath.sqrt(2)) ) + (ee*complex(0,1)*G**2*complexconjugate(CKM2x1))/(24.*cmath.pi**2*sw*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_434_65 = Coupling(name = 'UVGC_434_65',
value = {-1:'( -(ee*complex(0,1)*G**2*complexconjugate(CKM2x1))/(12.*cmath.pi**2*sw*cmath.sqrt(2)) if MD else (ee*complex(0,1)*G**2*complexconjugate(CKM2x1))/(24.*cmath.pi**2*sw*cmath.sqrt(2)) )',0:'( (-5*ee*complex(0,1)*G**2*complexconjugate(CKM2x1))/(24.*cmath.pi**2*sw*cmath.sqrt(2)) + (ee*complex(0,1)*G**2*complexconjugate(CKM2x1)*reglog(MD/MU_R))/(4.*cmath.pi**2*sw*cmath.sqrt(2)) if MD else -(ee*complex(0,1)*G**2*complexconjugate(CKM2x1))/(24.*cmath.pi**2*sw*cmath.sqrt(2)) ) + (ee*complex(0,1)*G**2*complexconjugate(CKM2x1))/(24.*cmath.pi**2*sw*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_434_66 = Coupling(name = 'UVGC_434_66',
value = {-1:'-(ee*complex(0,1)*G**2*complexconjugate(CKM2x1))/(12.*cmath.pi**2*sw*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_435_67 = Coupling(name = 'UVGC_435_67',
value = {-1:'( -(complex(0,1)*G**2*yc*complexconjugate(CKM2x1))/(12.*cmath.pi**2) if MC else (complex(0,1)*G**2*yc*complexconjugate(CKM2x1))/(24.*cmath.pi**2) )',0:'( (-13*complex(0,1)*G**2*yc*complexconjugate(CKM2x1))/(24.*cmath.pi**2) + (3*complex(0,1)*G**2*yc*complexconjugate(CKM2x1)*reglog(MC/MU_R))/(4.*cmath.pi**2) if MC else -(complex(0,1)*G**2*yc*complexconjugate(CKM2x1))/(24.*cmath.pi**2) ) + (complex(0,1)*G**2*yc*complexconjugate(CKM2x1))/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_435_68 = Coupling(name = 'UVGC_435_68',
value = {-1:'( -(complex(0,1)*G**2*yc*complexconjugate(CKM2x1))/(12.*cmath.pi**2) if MD else (complex(0,1)*G**2*yc*complexconjugate(CKM2x1))/(24.*cmath.pi**2) )',0:'( (-5*complex(0,1)*G**2*yc*complexconjugate(CKM2x1))/(24.*cmath.pi**2) + (complex(0,1)*G**2*yc*complexconjugate(CKM2x1)*reglog(MD/MU_R))/(4.*cmath.pi**2) if MD else -(complex(0,1)*G**2*yc*complexconjugate(CKM2x1))/(24.*cmath.pi**2) ) + (complex(0,1)*G**2*yc*complexconjugate(CKM2x1))/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_435_69 = Coupling(name = 'UVGC_435_69',
value = {-1:'-(complex(0,1)*G**2*yc*complexconjugate(CKM2x1))/(3.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_436_70 = Coupling(name = 'UVGC_436_70',
value = {-1:'( (complex(0,1)*G**2*ydo*complexconjugate(CKM2x1))/(12.*cmath.pi**2) if MC else -(complex(0,1)*G**2*ydo*complexconjugate(CKM2x1))/(24.*cmath.pi**2) )',0:'( (5*complex(0,1)*G**2*ydo*complexconjugate(CKM2x1))/(24.*cmath.pi**2) - (complex(0,1)*G**2*ydo*complexconjugate(CKM2x1)*reglog(MC/MU_R))/(4.*cmath.pi**2) if MC else (complex(0,1)*G**2*ydo*complexconjugate(CKM2x1))/(24.*cmath.pi**2) ) - (complex(0,1)*G**2*ydo*complexconjugate(CKM2x1))/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_436_71 = Coupling(name = 'UVGC_436_71',
value = {-1:'( (complex(0,1)*G**2*ydo*complexconjugate(CKM2x1))/(12.*cmath.pi**2) if MD else -(complex(0,1)*G**2*ydo*complexconjugate(CKM2x1))/(24.*cmath.pi**2) )',0:'( (13*complex(0,1)*G**2*ydo*complexconjugate(CKM2x1))/(24.*cmath.pi**2) - (3*complex(0,1)*G**2*ydo*complexconjugate(CKM2x1)*reglog(MD/MU_R))/(4.*cmath.pi**2) if MD else (complex(0,1)*G**2*ydo*complexconjugate(CKM2x1))/(24.*cmath.pi**2) ) - (complex(0,1)*G**2*ydo*complexconjugate(CKM2x1))/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_436_72 = Coupling(name = 'UVGC_436_72',
value = {-1:'(complex(0,1)*G**2*ydo*complexconjugate(CKM2x1))/(3.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_437_73 = Coupling(name = 'UVGC_437_73',
value = {-1:'( (complex(0,1)*G**2)/(6.*cmath.pi**2) if MS else -(complex(0,1)*G**2)/(12.*cmath.pi**2) ) + (complex(0,1)*G**2)/(12.*cmath.pi**2)',0:'( (5*complex(0,1)*G**2)/(12.*cmath.pi**2) - (complex(0,1)*G**2*reglog(MS/MU_R))/(2.*cmath.pi**2) if MS else (complex(0,1)*G**2)/(12.*cmath.pi**2) ) - (complex(0,1)*G**2)/(12.*cmath.pi**2)'},
order = {'QCD':2})
UVGC_438_74 = Coupling(name = 'UVGC_438_74',
value = {-1:'( (ee*complex(0,1)*G**2)/(18.*cmath.pi**2) if MS else -(ee*complex(0,1)*G**2)/(36.*cmath.pi**2) )',0:'( (5*ee*complex(0,1)*G**2)/(36.*cmath.pi**2) - (ee*complex(0,1)*G**2*reglog(MS/MU_R))/(6.*cmath.pi**2) if MS else (ee*complex(0,1)*G**2)/(36.*cmath.pi**2) ) - (ee*complex(0,1)*G**2)/(36.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_439_75 = Coupling(name = 'UVGC_439_75',
value = {-1:'( -(complex(0,1)*G**3)/(6.*cmath.pi**2) if MS else (complex(0,1)*G**3)/(12.*cmath.pi**2) )',0:'( (-5*complex(0,1)*G**3)/(12.*cmath.pi**2) + (complex(0,1)*G**3*reglog(MS/MU_R))/(2.*cmath.pi**2) if MS else -(complex(0,1)*G**3)/(12.*cmath.pi**2) ) + (complex(0,1)*G**3)/(12.*cmath.pi**2)'},
order = {'QCD':3})
UVGC_440_76 = Coupling(name = 'UVGC_440_76',
value = {-1:'( (complex(0,1)*G**2*MS)/(6.*cmath.pi**2) if MS else -(complex(0,1)*G**2*MS)/(12.*cmath.pi**2) ) + (complex(0,1)*G**2*MS)/(3.*cmath.pi**2)',0:'( (3*complex(0,1)*G**2*MS)/(4.*cmath.pi**2) - (complex(0,1)*G**2*MS*reglog(MS/MU_R))/cmath.pi**2 if MS else (complex(0,1)*G**2*MS)/(12.*cmath.pi**2) ) - (complex(0,1)*G**2*MS)/(12.*cmath.pi**2)'},
order = {'QCD':2})
UVGC_441_77 = Coupling(name = 'UVGC_441_77',
value = {-1:'( -(CKM2x2*ee*complex(0,1)*G**2)/(12.*cmath.pi**2*sw*cmath.sqrt(2)) if MC else (CKM2x2*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2)) )',0:'( (-5*CKM2x2*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2)) + (CKM2x2*ee*complex(0,1)*G**2*reglog(MC/MU_R))/(4.*cmath.pi**2*sw*cmath.sqrt(2)) if MC else -(CKM2x2*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2)) ) + (CKM2x2*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_441_78 = Coupling(name = 'UVGC_441_78',
value = {-1:'( -(CKM2x2*ee*complex(0,1)*G**2)/(12.*cmath.pi**2*sw*cmath.sqrt(2)) if MS else (CKM2x2*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2)) )',0:'( (-5*CKM2x2*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2)) + (CKM2x2*ee*complex(0,1)*G**2*reglog(MS/MU_R))/(4.*cmath.pi**2*sw*cmath.sqrt(2)) if MS else -(CKM2x2*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2)) ) + (CKM2x2*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_441_79 = Coupling(name = 'UVGC_441_79',
value = {-1:'-(CKM2x2*ee*complex(0,1)*G**2)/(12.*cmath.pi**2*sw*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_442_80 = Coupling(name = 'UVGC_442_80',
value = {-1:'( (cw*ee*complex(0,1)*G**2)/(12.*cmath.pi**2*sw) if MS else -(cw*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw) ) + (cw*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw)',0:'( (5*cw*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw) - (cw*ee*complex(0,1)*G**2*reglog(MS/MU_R))/(4.*cmath.pi**2*sw) if MS else (cw*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw) ) - (cw*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw)'},
order = {'QCD':2,'QED':1})
UVGC_443_81 = Coupling(name = 'UVGC_443_81',
value = {-1:'( (ee*complex(0,1)*G**2*sw)/(36.*cw*cmath.pi**2) if MS else -(ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2) ) + (ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2)',0:'( (5*ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2) - (ee*complex(0,1)*G**2*sw*reglog(MS/MU_R))/(12.*cw*cmath.pi**2) if MS else (ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2) ) - (ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_444_82 = Coupling(name = 'UVGC_444_82',
value = {-1:'( -(CKM2x2*complex(0,1)*G**2*yc)/(12.*cmath.pi**2) if MC else (CKM2x2*complex(0,1)*G**2*yc)/(24.*cmath.pi**2) )',0:'( (-13*CKM2x2*complex(0,1)*G**2*yc)/(24.*cmath.pi**2) + (3*CKM2x2*complex(0,1)*G**2*yc*reglog(MC/MU_R))/(4.*cmath.pi**2) if MC else -(CKM2x2*complex(0,1)*G**2*yc)/(24.*cmath.pi**2) ) + (CKM2x2*complex(0,1)*G**2*yc)/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_444_83 = Coupling(name = 'UVGC_444_83',
value = {-1:'( -(CKM2x2*complex(0,1)*G**2*yc)/(12.*cmath.pi**2) if MS else (CKM2x2*complex(0,1)*G**2*yc)/(24.*cmath.pi**2) )',0:'( (-5*CKM2x2*complex(0,1)*G**2*yc)/(24.*cmath.pi**2) + (CKM2x2*complex(0,1)*G**2*yc*reglog(MS/MU_R))/(4.*cmath.pi**2) if MS else -(CKM2x2*complex(0,1)*G**2*yc)/(24.*cmath.pi**2) ) + (CKM2x2*complex(0,1)*G**2*yc)/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_444_84 = Coupling(name = 'UVGC_444_84',
value = {-1:'-(CKM2x2*complex(0,1)*G**2*yc)/(3.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_445_85 = Coupling(name = 'UVGC_445_85',
value = {-1:'( -(G**2*ys)/(6.*cmath.pi**2*cmath.sqrt(2)) if MS else (G**2*ys)/(12.*cmath.pi**2*cmath.sqrt(2)) ) - (G**2*ys)/(3.*cmath.pi**2*cmath.sqrt(2))',0:'( (-3*G**2*ys)/(4.*cmath.pi**2*cmath.sqrt(2)) + (G**2*ys*reglog(MS/MU_R))/(cmath.pi**2*cmath.sqrt(2)) if MS else -(G**2*ys)/(12.*cmath.pi**2*cmath.sqrt(2)) ) + (G**2*ys)/(12.*cmath.pi**2*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_446_86 = Coupling(name = 'UVGC_446_86',
value = {-1:'( (cab*complex(0,1)*G**2*ys)/(6.*cmath.pi**2*cmath.sqrt(2)) if MS else -(cab*complex(0,1)*G**2*ys)/(12.*cmath.pi**2*cmath.sqrt(2)) ) + (cab*complex(0,1)*G**2*ys)/(3.*cmath.pi**2*cmath.sqrt(2))',0:'( (3*cab*complex(0,1)*G**2*ys)/(4.*cmath.pi**2*cmath.sqrt(2)) - (cab*complex(0,1)*G**2*ys*reglog(MS/MU_R))/(cmath.pi**2*cmath.sqrt(2)) if MS else (cab*complex(0,1)*G**2*ys)/(12.*cmath.pi**2*cmath.sqrt(2)) ) - (cab*complex(0,1)*G**2*ys)/(12.*cmath.pi**2*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_447_87 = Coupling(name = 'UVGC_447_87',
value = {-1:'( (CKM2x2*complex(0,1)*G**2*ys)/(12.*cmath.pi**2) if MC else -(CKM2x2*complex(0,1)*G**2*ys)/(24.*cmath.pi**2) )',0:'( (5*CKM2x2*complex(0,1)*G**2*ys)/(24.*cmath.pi**2) - (CKM2x2*complex(0,1)*G**2*ys*reglog(MC/MU_R))/(4.*cmath.pi**2) if MC else (CKM2x2*complex(0,1)*G**2*ys)/(24.*cmath.pi**2) ) - (CKM2x2*complex(0,1)*G**2*ys)/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_447_88 = Coupling(name = 'UVGC_447_88',
value = {-1:'( (CKM2x2*complex(0,1)*G**2*ys)/(12.*cmath.pi**2) if MS else -(CKM2x2*complex(0,1)*G**2*ys)/(24.*cmath.pi**2) )',0:'( (13*CKM2x2*complex(0,1)*G**2*ys)/(24.*cmath.pi**2) - (3*CKM2x2*complex(0,1)*G**2*ys*reglog(MS/MU_R))/(4.*cmath.pi**2) if MS else (CKM2x2*complex(0,1)*G**2*ys)/(24.*cmath.pi**2) ) - (CKM2x2*complex(0,1)*G**2*ys)/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_447_89 = Coupling(name = 'UVGC_447_89',
value = {-1:'(CKM2x2*complex(0,1)*G**2*ys)/(3.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_448_90 = Coupling(name = 'UVGC_448_90',
value = {-1:'( -(complex(0,1)*G**2*sab*ys)/(6.*cmath.pi**2*cmath.sqrt(2)) if MS else (complex(0,1)*G**2*sab*ys)/(12.*cmath.pi**2*cmath.sqrt(2)) ) - (complex(0,1)*G**2*sab*ys)/(3.*cmath.pi**2*cmath.sqrt(2))',0:'( (-3*complex(0,1)*G**2*sab*ys)/(4.*cmath.pi**2*cmath.sqrt(2)) + (complex(0,1)*G**2*sab*ys*reglog(MS/MU_R))/(cmath.pi**2*cmath.sqrt(2)) if MS else -(complex(0,1)*G**2*sab*ys)/(12.*cmath.pi**2*cmath.sqrt(2)) ) + (complex(0,1)*G**2*sab*ys)/(12.*cmath.pi**2*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_449_91 = Coupling(name = 'UVGC_449_91',
value = {-1:'( -(ee*complex(0,1)*G**2*complexconjugate(CKM2x2))/(12.*cmath.pi**2*sw*cmath.sqrt(2)) if MC else (ee*complex(0,1)*G**2*complexconjugate(CKM2x2))/(24.*cmath.pi**2*sw*cmath.sqrt(2)) )',0:'( (-5*ee*complex(0,1)*G**2*complexconjugate(CKM2x2))/(24.*cmath.pi**2*sw*cmath.sqrt(2)) + (ee*complex(0,1)*G**2*complexconjugate(CKM2x2)*reglog(MC/MU_R))/(4.*cmath.pi**2*sw*cmath.sqrt(2)) if MC else -(ee*complex(0,1)*G**2*complexconjugate(CKM2x2))/(24.*cmath.pi**2*sw*cmath.sqrt(2)) ) + (ee*complex(0,1)*G**2*complexconjugate(CKM2x2))/(24.*cmath.pi**2*sw*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_449_92 = Coupling(name = 'UVGC_449_92',
value = {-1:'( -(ee*complex(0,1)*G**2*complexconjugate(CKM2x2))/(12.*cmath.pi**2*sw*cmath.sqrt(2)) if MS else (ee*complex(0,1)*G**2*complexconjugate(CKM2x2))/(24.*cmath.pi**2*sw*cmath.sqrt(2)) )',0:'( (-5*ee*complex(0,1)*G**2*complexconjugate(CKM2x2))/(24.*cmath.pi**2*sw*cmath.sqrt(2)) + (ee*complex(0,1)*G**2*complexconjugate(CKM2x2)*reglog(MS/MU_R))/(4.*cmath.pi**2*sw*cmath.sqrt(2)) if MS else -(ee*complex(0,1)*G**2*complexconjugate(CKM2x2))/(24.*cmath.pi**2*sw*cmath.sqrt(2)) ) + (ee*complex(0,1)*G**2*complexconjugate(CKM2x2))/(24.*cmath.pi**2*sw*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_449_93 = Coupling(name = 'UVGC_449_93',
value = {-1:'-(ee*complex(0,1)*G**2*complexconjugate(CKM2x2))/(12.*cmath.pi**2*sw*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_450_94 = Coupling(name = 'UVGC_450_94',
value = {-1:'( -(complex(0,1)*G**2*yc*complexconjugate(CKM2x2))/(12.*cmath.pi**2) if MC else (complex(0,1)*G**2*yc*complexconjugate(CKM2x2))/(24.*cmath.pi**2) )',0:'( (-13*complex(0,1)*G**2*yc*complexconjugate(CKM2x2))/(24.*cmath.pi**2) + (3*complex(0,1)*G**2*yc*complexconjugate(CKM2x2)*reglog(MC/MU_R))/(4.*cmath.pi**2) if MC else -(complex(0,1)*G**2*yc*complexconjugate(CKM2x2))/(24.*cmath.pi**2) ) + (complex(0,1)*G**2*yc*complexconjugate(CKM2x2))/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_450_95 = Coupling(name = 'UVGC_450_95',
value = {-1:'( -(complex(0,1)*G**2*yc*complexconjugate(CKM2x2))/(12.*cmath.pi**2) if MS else (complex(0,1)*G**2*yc*complexconjugate(CKM2x2))/(24.*cmath.pi**2) )',0:'( (-5*complex(0,1)*G**2*yc*complexconjugate(CKM2x2))/(24.*cmath.pi**2) + (complex(0,1)*G**2*yc*complexconjugate(CKM2x2)*reglog(MS/MU_R))/(4.*cmath.pi**2) if MS else -(complex(0,1)*G**2*yc*complexconjugate(CKM2x2))/(24.*cmath.pi**2) ) + (complex(0,1)*G**2*yc*complexconjugate(CKM2x2))/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_450_96 = Coupling(name = 'UVGC_450_96',
value = {-1:'-(complex(0,1)*G**2*yc*complexconjugate(CKM2x2))/(3.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_451_97 = Coupling(name = 'UVGC_451_97',
value = {-1:'( (complex(0,1)*G**2*ys*complexconjugate(CKM2x2))/(12.*cmath.pi**2) if MC else -(complex(0,1)*G**2*ys*complexconjugate(CKM2x2))/(24.*cmath.pi**2) )',0:'( (5*complex(0,1)*G**2*ys*complexconjugate(CKM2x2))/(24.*cmath.pi**2) - (complex(0,1)*G**2*ys*complexconjugate(CKM2x2)*reglog(MC/MU_R))/(4.*cmath.pi**2) if MC else (complex(0,1)*G**2*ys*complexconjugate(CKM2x2))/(24.*cmath.pi**2) ) - (complex(0,1)*G**2*ys*complexconjugate(CKM2x2))/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_451_98 = Coupling(name = 'UVGC_451_98',
value = {-1:'( (complex(0,1)*G**2*ys*complexconjugate(CKM2x2))/(12.*cmath.pi**2) if MS else -(complex(0,1)*G**2*ys*complexconjugate(CKM2x2))/(24.*cmath.pi**2) )',0:'( (13*complex(0,1)*G**2*ys*complexconjugate(CKM2x2))/(24.*cmath.pi**2) - (3*complex(0,1)*G**2*ys*complexconjugate(CKM2x2)*reglog(MS/MU_R))/(4.*cmath.pi**2) if MS else (complex(0,1)*G**2*ys*complexconjugate(CKM2x2))/(24.*cmath.pi**2) ) - (complex(0,1)*G**2*ys*complexconjugate(CKM2x2))/(24.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_451_99 = Coupling(name = 'UVGC_451_99',
value = {-1:'(complex(0,1)*G**2*ys*complexconjugate(CKM2x2))/(3.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_452_100 = Coupling(name = 'UVGC_452_100',
value = {-1:'( (complex(0,1)*G**2)/(6.*cmath.pi**2) if MT else -(complex(0,1)*G**2)/(12.*cmath.pi**2) ) + (complex(0,1)*G**2)/(12.*cmath.pi**2)',0:'( (5*complex(0,1)*G**2)/(12.*cmath.pi**2) - (complex(0,1)*G**2*reglog(MT/MU_R))/(2.*cmath.pi**2) if MT else (complex(0,1)*G**2)/(12.*cmath.pi**2) ) - (complex(0,1)*G**2)/(12.*cmath.pi**2)'},
order = {'QCD':2})
UVGC_453_101 = Coupling(name = 'UVGC_453_101',
value = {-1:'( -(ee*complex(0,1)*G**2)/(9.*cmath.pi**2) if MT else (ee*complex(0,1)*G**2)/(18.*cmath.pi**2) )',0:'( (-5*ee*complex(0,1)*G**2)/(18.*cmath.pi**2) + (ee*complex(0,1)*G**2*reglog(MT/MU_R))/(3.*cmath.pi**2) if MT else -(ee*complex(0,1)*G**2)/(18.*cmath.pi**2) ) + (ee*complex(0,1)*G**2)/(18.*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_454_102 = Coupling(name = 'UVGC_454_102',
value = {-1:'( -(complex(0,1)*G**3)/(6.*cmath.pi**2) if MT else (complex(0,1)*G**3)/(12.*cmath.pi**2) )',0:'( (-5*complex(0,1)*G**3)/(12.*cmath.pi**2) + (complex(0,1)*G**3*reglog(MT/MU_R))/(2.*cmath.pi**2) if MT else -(complex(0,1)*G**3)/(12.*cmath.pi**2) ) + (complex(0,1)*G**3)/(12.*cmath.pi**2)'},
order = {'QCD':3})
UVGC_455_103 = Coupling(name = 'UVGC_455_103',
value = {-1:'( (complex(0,1)*G**2*MT)/(6.*cmath.pi**2) if MT else -(complex(0,1)*G**2*MT)/(12.*cmath.pi**2) ) + (complex(0,1)*G**2*MT)/(3.*cmath.pi**2)',0:'( (3*complex(0,1)*G**2*MT)/(4.*cmath.pi**2) - (complex(0,1)*G**2*MT*reglog(MT/MU_R))/cmath.pi**2 if MT else (complex(0,1)*G**2*MT)/(12.*cmath.pi**2) ) - (complex(0,1)*G**2*MT)/(12.*cmath.pi**2)'},
order = {'QCD':2})
UVGC_456_104 = Coupling(name = 'UVGC_456_104',
value = {-1:'( -(CKM3x3*ee*complex(0,1)*G**2)/(12.*cmath.pi**2*sw*cmath.sqrt(2)) if MB else (CKM3x3*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2)) )',0:'( (-5*CKM3x3*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2)) + (CKM3x3*ee*complex(0,1)*G**2*reglog(MB/MU_R))/(4.*cmath.pi**2*sw*cmath.sqrt(2)) if MB else -(CKM3x3*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2)) ) + (CKM3x3*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_456_105 = Coupling(name = 'UVGC_456_105',
value = {-1:'( -(CKM3x3*ee*complex(0,1)*G**2)/(12.*cmath.pi**2*sw*cmath.sqrt(2)) if MT else (CKM3x3*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2)) )',0:'( (-5*CKM3x3*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2)) + (CKM3x3*ee*complex(0,1)*G**2*reglog(MT/MU_R))/(4.*cmath.pi**2*sw*cmath.sqrt(2)) if MT else -(CKM3x3*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2)) ) + (CKM3x3*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_456_106 = Coupling(name = 'UVGC_456_106',
value = {-1:'-(CKM3x3*ee*complex(0,1)*G**2)/(12.*cmath.pi**2*sw*cmath.sqrt(2))'},
order = {'QCD':2,'QED':1})
UVGC_457_107 = Coupling(name = 'UVGC_457_107',
value = {-1:'( -(cw*ee*complex(0,1)*G**2)/(12.*cmath.pi**2*sw) if MT else (cw*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw) ) - (cw*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw)',0:'( (-5*cw*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw) + (cw*ee*complex(0,1)*G**2*reglog(MT/MU_R))/(4.*cmath.pi**2*sw) if MT else -(cw*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw) ) + (cw*ee*complex(0,1)*G**2)/(24.*cmath.pi**2*sw)'},
order = {'QCD':2,'QED':1})
UVGC_458_108 = Coupling(name = 'UVGC_458_108',
value = {-1:'( (ee*complex(0,1)*G**2*sw)/(36.*cw*cmath.pi**2) if MT else -(ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2) ) + (ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2)',0:'( (5*ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2) - (ee*complex(0,1)*G**2*sw*reglog(MT/MU_R))/(12.*cw*cmath.pi**2) if MT else (ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2) ) - (ee*complex(0,1)*G**2*sw)/(72.*cw*cmath.pi**2)'},
order = {'QCD':2,'QED':1})
UVGC_459_109 = Coupling(name = 'UVGC_459_109',
value = {-1:'( (CKM3x3*complex(0,1)*G**2*yb)/(12.*cmath.pi**2) if MB else -(CKM3x3*complex(0,1)*G**2*yb)/(24.*cmath.pi**2) )',0:'( | |
<filename>app/celery/tasks.py
import json
from collections import defaultdict, namedtuple
from datetime import datetime
from typing import Any, Dict, List, Optional
from uuid import UUID
from flask import current_app
from itsdangerous import BadSignature
from more_itertools import chunked
from notifications_utils.columns import Row
from notifications_utils.recipients import RecipientCSV
from notifications_utils.statsd_decorators import statsd
from notifications_utils.template import SMSMessageTemplate, WithSubjectTemplate
from notifications_utils.timezones import convert_utc_to_local_timezone
from requests import HTTPError, RequestException, request
from sqlalchemy.exc import SQLAlchemyError
from app import (
DATETIME_FORMAT,
create_random_identifier,
create_uuid,
email_bulk,
email_normal,
email_priority,
metrics_logger,
notify_celery,
signer,
sms_bulk,
sms_normal,
sms_priority,
statsd_client,
)
from app.aws import s3
from app.aws.metrics import (
put_batch_saving_bulk_created,
put_batch_saving_bulk_processed,
)
from app.celery import ( # noqa: F401
letters_pdf_tasks,
process_sns_receipts_tasks,
provider_tasks,
research_mode_tasks,
)
from app.config import Config, QueueNames
from app.dao.daily_sorted_letter_dao import dao_create_or_update_daily_sorted_letter
from app.dao.inbound_sms_dao import dao_get_inbound_sms_by_id
from app.dao.jobs_dao import dao_get_job_by_id, dao_update_job
from app.dao.notifications_dao import (
dao_get_last_notification_added_for_job_id,
dao_get_notification_history_by_reference,
dao_update_notifications_by_reference,
get_notification_by_id,
update_notification_status_by_reference,
)
from app.dao.provider_details_dao import get_current_provider
from app.dao.service_email_reply_to_dao import dao_get_reply_to_by_id
from app.dao.service_inbound_api_dao import get_service_inbound_api_for_service
from app.dao.service_sms_sender_dao import dao_get_service_sms_senders_by_id
from app.dao.services_dao import (
dao_fetch_service_by_id,
fetch_todays_total_message_count,
)
from app.dao.templates_dao import dao_get_template_by_id
from app.exceptions import DVLAException, NotificationTechnicalFailureException
from app.models import (
BULK,
DVLA_RESPONSE_STATUS_SENT,
EMAIL_TYPE,
JOB_STATUS_CANCELLED,
JOB_STATUS_FINISHED,
JOB_STATUS_IN_PROGRESS,
JOB_STATUS_PENDING,
JOB_STATUS_SENDING_LIMITS_EXCEEDED,
KEY_TYPE_NORMAL,
LETTER_TYPE,
NORMAL,
NOTIFICATION_CREATED,
NOTIFICATION_DELIVERED,
NOTIFICATION_RETURNED_LETTER,
NOTIFICATION_SENDING,
NOTIFICATION_TECHNICAL_FAILURE,
NOTIFICATION_TEMPORARY_FAILURE,
PRIORITY,
SMS_TYPE,
DailySortedLetter,
Job,
Service,
Template,
)
from app.notifications.process_notifications import (
persist_notification,
persist_notifications,
send_notification_to_queue,
)
from app.notifications.validators import check_service_over_daily_message_limit
from app.service.utils import service_allowed_to_send_to
from app.utils import get_csv_max_rows
@notify_celery.task(name="process-job")
@statsd(namespace="tasks")
def process_job(job_id):
start = datetime.utcnow()
job = dao_get_job_by_id(job_id)
if job.job_status != JOB_STATUS_PENDING:
return
service = job.service
if not service.active:
job.job_status = JOB_STATUS_CANCELLED
dao_update_job(job)
current_app.logger.warning("Job {} has been cancelled, service {} is inactive".format(job_id, service.id))
return
if __sending_limits_for_job_exceeded(service, job, job_id):
return
job.job_status = JOB_STATUS_IN_PROGRESS
job.processing_started = start
dao_update_job(job)
# Record StatsD stats to compute SLOs
job_start = job.scheduled_for or job.created_at
statsd_client.timing_with_dates("job.processing-start-delay", job.processing_started, job_start)
db_template = dao_get_template_by_id(job.template_id, job.template_version)
TemplateClass = get_template_class(db_template.template_type)
template = TemplateClass(db_template.__dict__)
template.process_type = db_template.process_type
current_app.logger.info("Starting job {} processing {} notifications".format(job_id, job.notification_count))
csv = get_recipient_csv(job, template)
rows = csv.get_rows()
for result in chunked(rows, Config.BATCH_INSERTION_CHUNK_SIZE):
process_rows(result, template, job, service)
put_batch_saving_bulk_created(
metrics_logger, 1, notification_type=db_template.template_type, priority=db_template.process_type
)
job_complete(job, start=start)
def job_complete(job: Job, resumed=False, start=None):
job.job_status = JOB_STATUS_FINISHED
finished = datetime.utcnow()
job.processing_finished = finished
dao_update_job(job)
if resumed:
current_app.logger.info("Resumed Job {} completed at {}".format(job.id, job.created_at))
else:
current_app.logger.info(
"Job {} created at {} started at {} finished at {}".format(job.id, job.created_at, start, finished)
)
def choose_database_queue(template: Any, service: Service):
if service.research_mode:
return QueueNames.RESEARCH_MODE
elif template.process_type == PRIORITY:
return QueueNames.PRIORITY_DATABASE
elif template.process_type == BULK:
return QueueNames.BULK_DATABASE
else:
return QueueNames.NORMAL_DATABASE
def process_row(row: Row, template: Template, job: Job, service: Service):
template_type = template.template_type
client_reference = row.get("reference")
signed = signer.sign(
{
"api_key": job.api_key_id and str(job.api_key_id),
"template": str(template.id),
"template_version": job.template_version,
"job": str(job.id),
"to": row.recipient,
"row_number": row.index,
"personalisation": dict(row.personalisation),
"queue": queue_to_use(job.notification_count),
"client_reference": client_reference.data if client_reference else None,
}
)
notification_id = create_uuid()
sender_id = str(job.sender_id) if job.sender_id else None
send_fns = {SMS_TYPE: save_sms, EMAIL_TYPE: save_email, LETTER_TYPE: save_letter}
send_fn = send_fns[template_type]
task_kwargs = {}
if sender_id:
task_kwargs["sender_id"] = sender_id
# the same_sms and save_email task are going to be using template and service objects from cache
# these objects are transient and will not have relationships loaded
if service_allowed_to_send_to(row.recipient, service, KEY_TYPE_NORMAL):
send_fn.apply_async(
(
str(service.id),
notification_id,
signed,
),
task_kwargs,
queue=choose_database_queue(template, service),
)
else:
current_app.logger.debug("SMS {} failed as restricted service".format(notification_id))
def process_rows(rows: List, template: Template, job: Job, service: Service):
template_type = template.template_type
sender_id = str(job.sender_id) if job.sender_id else None
encrypted_smss: List[Any] = []
encrypted_emails: List[Any] = []
encrypted_letters: List[Any] = []
for row in rows:
client_reference = row.get("reference")
signed_row = signer.sign(
{
"api_key": job.api_key_id and str(job.api_key_id),
"template": str(template.id),
"template_version": job.template_version,
"job": str(job.id),
"to": row.recipient,
"row_number": row.index,
"personalisation": dict(row.personalisation),
"queue": queue_to_use(job.notification_count),
"sender_id": sender_id,
"client_reference": client_reference.data, # will return None if missing
}
)
if template_type == SMS_TYPE:
encrypted_smss.append(signed_row)
if template_type == EMAIL_TYPE:
encrypted_emails.append(signed_row)
if template_type == LETTER_TYPE:
encrypted_letters.append(encrypted_letters)
# the same_sms and save_email task are going to be using template and service objects from cache
# these objects are transient and will not have relationships loaded
if encrypted_smss:
save_smss.apply_async(
(str(service.id), encrypted_smss, None),
queue=choose_database_queue(template, service),
)
if encrypted_emails:
save_emails.apply_async(
(str(service.id), encrypted_emails, None),
queue=choose_database_queue(template, service),
)
if encrypted_letters:
save_letters.apply_async(
(str(service.id), encrypted_letters),
queue=choose_database_queue(template, service),
)
def __sending_limits_for_job_exceeded(service, job: Job, job_id):
total_sent = fetch_todays_total_message_count(service.id)
if total_sent + job.notification_count > service.message_limit:
job.job_status = JOB_STATUS_SENDING_LIMITS_EXCEEDED
job.processing_finished = datetime.utcnow()
dao_update_job(job)
current_app.logger.info(
"Job {} size {} error. Sending limits {} exceeded".format(job_id, job.notification_count, service.message_limit)
)
return True
return False
@notify_celery.task(bind=True, name="save-smss", max_retries=5, default_retry_delay=300)
@statsd(namespace="tasks")
def save_smss(self, service_id: Optional[str], signed_notifications: List[Any], receipt: Optional[UUID]):
"""
Function that takes a list of signed notifications, stores
them in the DB and then sends these to the queue. If the receipt
is not None then it is passed to the RedisQueue to let it know it
can delete the inflight notifications.
"""
verified_notifications: List[Any] = []
notification_id_queue: Dict = {}
saved_notifications = []
for signed_notification in signed_notifications:
try:
notification = signer.verify(signed_notification)
except BadSignature:
current_app.logger.exception(f"Invalid signature for signed_notification {signed_notification}")
raise
service_id = notification.get("service_id", service_id) # take it it out of the notification if it's there
service = dao_fetch_service_by_id(service_id, use_cache=True)
template = dao_get_template_by_id(
notification.get("template"), version=notification.get("template_version"), use_cache=True
)
sender_id = notification.get("sender_id")
notification_id = notification.get("id", create_uuid())
notification["notification_id"] = notification_id
reply_to_text = "" # type: ignore
if sender_id:
reply_to_text = dao_get_service_sms_senders_by_id(service_id, sender_id).sms_sender
if isinstance(template, tuple):
template = template[0]
# if the template is obtained from cache a tuple will be returned where
# the first element is the Template object and the second the template cache data
# in the form of a dict
elif isinstance(template, tuple):
reply_to_text = template[1].get("reply_to_text") # type: ignore
template = template[0]
else:
reply_to_text = template.get_reply_to_text() # type: ignore
notification["reply_to_text"] = reply_to_text
notification["service"] = service
notification["key_type"] = notification.get("key_type", KEY_TYPE_NORMAL)
notification["template_id"] = template.id
notification["template_version"] = template.version
notification["recipient"] = notification.get("to")
notification["personalisation"] = notification.get("personalisation")
notification["notification_type"] = SMS_TYPE
notification["simulated"] = notification.get("simulated", None)
notification["api_key_id"] = notification.get("api_key", None)
notification["created_at"] = datetime.utcnow()
notification["job_id"] = notification.get("job", None)
notification["job_row_number"] = notification.get("row_number", None)
verified_notifications.append(notification)
notification_id_queue[notification_id] = notification.get("queue")
process_type = template.process_type
try:
# If the data is not present in the encrypted data then fallback on whats needed for process_job.
saved_notifications = persist_notifications(verified_notifications)
current_app.logger.info(
f"Saved following notifications into db: {notification_id_queue.keys()} associated with receipt {receipt}"
)
if receipt:
_acknowledge_notification(SMS_TYPE, template, receipt)
current_app.logger.info(
f"Batch saving: receipt_id {receipt} removed from buffer queue for notification_id {notification_id} for process_type {process_type}"
)
else:
put_batch_saving_bulk_processed(
metrics_logger,
1,
notification_type=SMS_TYPE,
priority=process_type,
)
except SQLAlchemyError as e:
signed_and_verified = list(zip(signed_notifications, verified_notifications))
handle_batch_error_and_forward(signed_and_verified, SMS_TYPE, e, receipt, template)
check_service_over_daily_message_limit(KEY_TYPE_NORMAL, service)
research_mode = service.research_mode # type: ignore
current_app.logger.info(f"Sending following sms notifications to AWS: {notification_id_queue.keys()}")
for notification in saved_notifications:
queue = notification_id_queue.get(notification.id) or template.queue_to_use() # type: ignore
send_notification_to_queue(
notification,
research_mode,
queue=queue,
)
current_app.logger.debug(
"SMS {} created at {} for job {}".format(
notification.id,
notification.created_at,
notification.job,
)
)
@notify_celery.task(bind=True, name="save-sms", max_retries=5, default_retry_delay=300)
@statsd(namespace="tasks")
def save_sms(self, service_id, notification_id, signed_notification, sender_id=None):
notification = signer.verify(signed_notification)
service = dao_fetch_service_by_id(service_id, use_cache=True)
template = dao_get_template_by_id(notification["template"], version=notification["template_version"], use_cache=True)
if sender_id:
reply_to_text = dao_get_service_sms_senders_by_id(service_id, sender_id).sms_sender
if isinstance(template, tuple):
template = template[0]
# if the template is obtained from cache a tuple will be returned where
# the first element is the Template object and the second the template cache data
# in the form of a dict
elif isinstance(template, tuple):
reply_to_text = template[1].get("reply_to_text")
template = template[0]
else:
reply_to_text = template.get_reply_to_text()
check_service_over_daily_message_limit(KEY_TYPE_NORMAL, service)
try:
# This task is used by two functions: process_job and process_sms_or_email_notification
# if the data is not present in the signed data then fallback on whats needed for process_job
saved_notification = persist_notification(
notification_id=notification.get("id", notification_id),
template_id=notification["template"],
template_version=notification["template_version"],
recipient=notification["to"],
service=service,
personalisation=notification.get("personalisation"),
notification_type=SMS_TYPE,
simulated=notification.get("simulated", None),
api_key_id=notification.get("api_key", None),
key_type=notification.get("key_type", KEY_TYPE_NORMAL),
created_at=datetime.utcnow(),
job_id=notification.get("job", None),
job_row_number=notification.get("row_number", None),
reply_to_text=reply_to_text,
)
send_notification_to_queue(
saved_notification,
service.research_mode,
queue=notification.get("queue") or template.queue_to_use(),
)
current_app.logger.debug(
"SMS {} created at {} for job {}".format(
saved_notification.id,
saved_notification.created_at,
notification.get("job", None),
)
)
except SQLAlchemyError as e:
handle_save_error(self, notification, notification_id, e)
@notify_celery.task(bind=True, name="save-emails", max_retries=5, default_retry_delay=300)
@statsd(namespace="tasks")
def save_emails(self, service_id: Optional[str], signed_notifications: List[Any], receipt: Optional[UUID]):
"""
Function that takes a list of signed notifications, stores
them in the DB and then sends these to the queue. If the receipt
is not None then it is passed to the RedisQueue to let it know it
can delete the inflight notifications.
"""
verified_notifications: List[Any] = []
notification_id_queue: Dict = {}
saved_notifications = []
for signed_notification in signed_notifications:
try:
notification = signer.verify(signed_notification)
except BadSignature:
current_app.logger.exception(f"Invalid signature for signed_notification {signed_notification}")
raise
service_id = notification.get("service_id", service_id) # take it it out of the notification if it's there
service = dao_fetch_service_by_id(service_id, use_cache=True)
template = dao_get_template_by_id(
notification.get("template"), version=notification.get("template_version"), use_cache=True
)
sender_id = notification.get("sender_id")
notification_id | |
in Alexa global
'http://www.casualclub.com/',
# Why: #4543 in Alexa global
'http://www.wanelo.com/',
# Why: #4544 in Alexa global
'http://www.ipsosinteractive.com/',
# Why: #4545 in Alexa global
'http://www.videohive.net/',
# Why: #4546 in Alexa global
'http://www.fenzhi.com/',
# Why: #4547 in Alexa global
'http://www.lefrecce.it/',
# Why: #4548 in Alexa global
'http://www.bugun.com.tr/',
# Why: #4549 in Alexa global
'http://www.p30world.com/',
# Why: #4550 in Alexa global
'http://www.cuevana.tv/',
# Why: #4551 in Alexa global
'http://www.joins.com/',
# Why: #4552 in Alexa global
'http://www.tvnet.lv/',
# Why: #4553 in Alexa global
'http://aliimg.com/',
# Why: #4554 in Alexa global
'http://www.bellanaija.com/',
# Why: #4555 in Alexa global
'http://www.startpagina.nl/',
# Why: #4556 in Alexa global
'http://www.incometaxindiaefiling.gov.in/',
# Why: #4557 in Alexa global
'http://www.bellemaison.jp/',
# Why: #4558 in Alexa global
'http://www.michigan.gov/',
# Why: #4559 in Alexa global
'http://www.harborfreight.com/',
# Why: #4560 in Alexa global
'http://www.fineartamerica.com/',
# Why: #4561 in Alexa global
'http://www.mysurvey.com/',
# Why: #4562 in Alexa global
'http://www.kapaza.be/',
# Why: #4563 in Alexa global
'http://www.adxpansion.com/',
# Why: #4564 in Alexa global
'http://www.thefind.com/',
# Why: #4565 in Alexa global
'http://www.priyo.com/',
# Why: #4567 in Alexa global
'http://www.burrp.com/',
# Why: #4568 in Alexa global
'http://www.sky.it/',
# Why: #4569 in Alexa global
'http://www.ipad-winners.info/',
# Why: #4570 in Alexa global
'http://www.usgs.gov/',
# Why: #4571 in Alexa global
'http://www.gavick.com/',
# Why: #4572 in Alexa global
'http://www.ellislab.com/',
# Why: #4573 in Alexa global
'http://www.voegol.com.br/',
# Why: #4574 in Alexa global
'http://www.paginebianche.it/',
# Why: #4575 in Alexa global
'http://www.getwebcake.com/',
# Why: #4576 in Alexa global
'http://www.zeroredirect1.com/',
# Why: #4577 in Alexa global
'http://www.gaiaonline.com/',
# Why: #4578 in Alexa global
'http://iqilu.com/',
# Why: #4579 in Alexa global
'http://www.bright.com/',
# Why: #4580 in Alexa global
'http://www.comunidades.net/',
# Why: #4581 in Alexa global
'http://www.webgains.com/',
# Why: #4582 in Alexa global
'http://www.overdrive.com/',
# Why: #4583 in Alexa global
'http://www.bigcommerce.com/',
# Why: #4584 in Alexa global
'http://www.paperpkads.com/',
# Why: #4585 in Alexa global
'http://www.imageporter.com/',
# Why: #4586 in Alexa global
'http://www.lenovo.com.cn/',
# Why: #4587 in Alexa global
'http://www.listal.com/',
# Why: #4588 in Alexa global
'http://www.virgula.uol.com.br/',
# Why: #4589 in Alexa global
'http://www.rbcdaily.ru/',
# Why: #4590 in Alexa global
'http://www.redbus.in/',
# Why: #4591 in Alexa global
'http://www.3bmeteo.com/',
# Why: #4592 in Alexa global
'http://www.earn-on.com/',
# Why: #4593 in Alexa global
'http://www.ae.com/',
# Why: #4594 in Alexa global
'http://www.shoutmeloud.com/',
# Why: #4595 in Alexa global
'http://www.oeeee.com/',
# Why: #4596 in Alexa global
'http://www.usenet.nl/',
# Why: #4597 in Alexa global
'http://www.mediotiempo.com/',
# Why: #4599 in Alexa global
'http://www.prostoporno.net/',
# Why: #4600 in Alexa global
'http://www.bangyoulater.com/',
# Why: #4601 in Alexa global
'http://www.comunio.de/',
# Why: #4602 in Alexa global
'http://www.pureleads.com/',
# Why: #4603 in Alexa global
'http://www.bakeca.it/',
# Why: #4604 in Alexa global
'http://www.trovit.it/',
# Why: #4605 in Alexa global
'http://www.fakku.net/',
# Why: #4606 in Alexa global
'http://www.indeed.fr/',
# Why: #4607 in Alexa global
'http://www.inquisitr.com/',
# Why: #4608 in Alexa global
'http://www.wizards.com/',
# Why: #4609 in Alexa global
'http://www.straightdope.com/',
# Why: #4610 in Alexa global
'http://www.pornpros.com/',
# Why: #4611 in Alexa global
'http://www.s-oman.net/',
# Why: #4612 in Alexa global
'http://www.facilisimo.com/',
# Why: #4613 in Alexa global
'http://www.dostor.org/',
# Why: #4614 in Alexa global
'http://tabloidpulsa.co.id/',
# Why: #4615 in Alexa global
'http://www.shafaf.ir/',
# Why: #4616 in Alexa global
'http://www.bt.dk/',
# Why: #4617 in Alexa global
'http://www.lent.az/',
# Why: #4618 in Alexa global
'http://www.filmaffinity.com/',
# Why: #4619 in Alexa global
'http://www.wjunction.com/',
# Why: #4620 in Alexa global
'http://www.gamefront.com/',
# Why: #4621 in Alexa global
'http://www.photoshelter.com/',
# Why: #4622 in Alexa global
'http://www.cheaptickets.com/',
# Why: #4623 in Alexa global
'http://www.meetic.it/',
# Why: #4624 in Alexa global
'http://www.seochat.com/',
# Why: #4625 in Alexa global
'http://www.livemixtapes.com/',
# Why: #4626 in Alexa global
'http://www.deadline.com/',
# Why: #4627 in Alexa global
'http://www.boingboing.net/',
# Why: #4628 in Alexa global
'http://www.lecai.com/',
# Why: #4629 in Alexa global
'http://www.onetravel.com/',
# Why: #4631 in Alexa global
'http://www.erotictube.me/',
# Why: #4632 in Alexa global
'http://www.svd.se/',
# Why: #4633 in Alexa global
'http://www.pcadvisor.co.uk/',
# Why: #4634 in Alexa global
'http://www.pravda.com.ua/',
# Why: #4636 in Alexa global
'http://www.afisha.ru/',
# Why: #4637 in Alexa global
'http://www.dressupgamesite.com/',
# Why: #4638 in Alexa global
'http://www.mercadopago.com/',
# Why: #4640 in Alexa global
'http://www.bangkokpost.com/',
# Why: #4641 in Alexa global
'http://www.dumpert.nl/',
# Why: #4642 in Alexa global
'http://www.monotaro.com/',
# Why: #4643 in Alexa global
'http://www.bloomingdales.com/',
# Why: #4644 in Alexa global
'http://www.ebayclassifieds.com/',
# Why: #4645 in Alexa global
'http://www.t-online.hu/',
# Why: #4646 in Alexa global
'http://www.2dbook.com/',
# Why: #4647 in Alexa global
'http://www.golfdigest.co.jp/',
# Why: #4648 in Alexa global
'http://www.thekitchn.com/',
# Why: #4649 in Alexa global
'http://www.halifax.co.uk/',
# Why: #4650 in Alexa global
'http://www.tanx.com/',
# Why: #4651 in Alexa global
'http://www.jutarnji.hr/',
# Why: #4652 in Alexa global
'http://www.petardashd.com/',
# Why: #4653 in Alexa global
'http://www.rookee.ru/',
# Why: #4654 in Alexa global
'http://www.showroomprive.com/',
# Why: #4655 in Alexa global
'http://www.sharepoint.com/',
# Why: #4656 in Alexa global
'http://liebiao.com/',
# Why: #4657 in Alexa global
'http://www.miibeian.gov.cn/',
# Why: #4658 in Alexa global
'http://www.pumbaporn.com/',
# Why: #4659 in Alexa global
'http://www.dwnews.com/',
# Why: #4660 in Alexa global
'http://www.sanguosha.com/',
# Why: #4661 in Alexa global
'http://www.pp.cc/',
# Why: #4662 in Alexa global
'http://www.myfc.ir/',
# Why: #4663 in Alexa global
'http://www.alicdn.com/',
# Why: #4664 in Alexa global
'http://www.carmax.com/',
# Why: #4665 in Alexa global
'http://www.defencenet.gr/',
# Why: #4666 in Alexa global
'http://www.cuantarazon.com/',
# Why: #4667 in Alexa global
'http://www.westernunion.com/',
# Why: #4668 in Alexa global
'http://www.links.cn/',
# Why: #4669 in Alexa global
'http://www.natunbarta.com/',
# Why: #4670 in Alexa global
'http://www.sekindo.com/',
# Why: #4671 in Alexa global
'http://78.cn/',
# Why: #4672 in Alexa global
'http://www.edublogs.org/',
# Why: #4673 in Alexa global
'http://www.hotmail.com/',
# Why: #4674 in Alexa global
'http://www.problogger.net/',
# Why: #4675 in Alexa global
'http://www.amardeshonline.com/',
# Why: #4676 in Alexa global
'http://www.gemius.com/',
# Why: #4677 in Alexa global
'http://www.egynews.net/',
# Why: #4678 in Alexa global
'http://www.indiabix.com/',
# Why: #4679 in Alexa global
'http://www.provincial.com/',
# Why: #4680 in Alexa global
'http://www.play.com/',
# Why: #4681 in Alexa global
'http://www.beslist.nl/',
# Why: #4682 in Alexa global
'http://www.nttdocomo.co.jp/',
# Why: #4683 in Alexa global
'http://www.shape.com/',
# Why: #4684 in Alexa global
'http://www.alhilal.com/',
# Why: #4685 in Alexa global
'http://www.irecommend.ru/',
# Why: #4686 in Alexa global
'http://www.cmmnts.com/',
# Why: #4687 in Alexa global
'http://www.1news.az/',
# Why: #4688 in Alexa global
'http://www.kinobanda.net/',
# Why: #4689 in Alexa global
'http://www.banamex.com.mx/',
# Why: #4690 in Alexa global
'http://www.cleanfiles.net/',
# Why: #4691 in Alexa global
'http://www.algeriaforum.net/',
# Why: #4692 in Alexa global
'http://www.zumi.pl/',
# Why: #4693 in Alexa global
'http://www.giallozafferano.it/',
# Why: #4694 in Alexa global
'http://www.news-postseven.com/',
# Why: #4695 in Alexa global
'http://www.firstcry.com/',
# Why: #4696 in Alexa global
'http://www.mhlw.go.jp/',
# Why: #4697 in Alexa global
'http://www.lookforporn.com/',
# Why: #4698 in Alexa global
'http://www.xxsy.net/',
# Why: #4699 in Alexa global
'http://www.scriptmafia.org/',
# Why: #4700 in Alexa global
'http://www.intodns.com/',
# Why: #4701 in Alexa global
'http://www.famitsu.com/',
# Why: #4702 in Alexa global
'http://www.eclipse.org/',
# Why: #4704 in Alexa global
'http://www.net-a-porter.com/',
# Why: #4705 in Alexa global
'http://www.btemplates.com/',
# Why: #4706 in Alexa global
'http://www.topshop.com/',
# Why: #4707 in Alexa global
'http://www.myvidster.com/',
# Why: #4708 in Alexa global
'http://www.calciomercato.com/',
# Why: #4709 in Alexa global
'http://www.arabyonline.com/',
# Why: #4710 in Alexa global
'http://www.lesechos.fr/',
# Why: #4711 in Alexa global
'http://www.empireavenue.com/',
# Why: #4712 in Alexa global
'http://www.damnlol.com/',
# Why: #4713 in Alexa global
'http://www.nukistream.com/',
# Why: #4714 in Alexa global
'http://www.wayport.net/',
# Why: #4715 in Alexa global
'http://www.buienradar.nl/',
# Why: #4716 in Alexa global
'http://www.vivastreet.co.in/',
# Why: #4717 in Alexa global
'http://www.kroger.com/',
# Why: #4718 in Alexa global
'http://www.geocaching.com/',
# Why: #4719 in Alexa global
'http://www.hunantv.com/',
# Why: #4720 in Alexa global
'http://www.fotolog.net/',
# Why: #4721 in Alexa global
'http://www.gunbroker.com/',
# Why: #4722 in Alexa global
'http://www.flalottery.com/',
# Why: #4723 in Alexa global
'http://www.priples.com/',
# Why: #4724 in Alexa global
'http://www.nlayer.net/',
# Why: #4725 in Alexa global
'http://www.trafficshop.com/',
# Why: #4726 in Alexa global
'http://www.standardmedia.co.ke/',
# Why: #4727 in Alexa global
'http://www.finanzen.net/',
# Why: #4728 in Alexa global
'http://www.meta.ua/',
# Why: #4729 in Alexa global
'http://www.gfy.com/',
# Why: #4730 in Alexa global
'http://www.playground.ru/',
# Why: | |
<filename>nanobot/nanobot.py
#! /usr/bin/env/python
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from datetime import datetime
from datetime import date
from glob import glob
from random import random
from time import time
from twython import Twython
from twython import TwythonStreamer
from twython.exceptions import TwythonError
from uuid import uuid4
import json
import os.path
import sys
from jsonSettings import JsonSettings as Settings
# if we're started without a config file, we create a default/empty
# file that the user can fill in and then restart the app.
kDefaultConfigDict = {
"appKey" : "!!! Your app's 'Consumer Key'",
"appSecret" : "!!! Your app's 'Consumer Secret'",
"accessToken" : "!!! your access token",
"accessTokenSecret" : "!!! your access token secret",
"lyricFilePath" : "*.lyric",
"tweetProbability" : 24.0 / 1440,
"minimumSpacing" : 60*60,
"minimumDaySpacing" : 30,
"logFilePath" : "%Y-%m.txt"
}
kSettingsFileErrorMsg = '''\
There was no settings file found at {0}, so I just created an empty/default
file for you. Please edit it, adding the correct/desired values for each
setting as is appropriate.
'''
kStreamFileExtension = ".stream"
class NanobotStreamer(TwythonStreamer):
def SetOutputPath(self, path):
self.path = path
def on_success(self, data):
''' Called when we detect an event through the streaming API.
The base class version looks for quoted tweets and for each one it
finds, we write out a text file that contains the ID of the tweet
that mentions us.
The other (cron-job) version of your bot will look for any files with the
correct extension (identified by `kStreamFileExtension`) in its
HandleQuotes() method and favorite^H^H^H^H like those tweets.
See https://dev.twitter.com/streaming/userstreams
'''
# for now, all we're interested in handling are events.
if 'event' in data:
# Dump the data into a JSON file for the other cron-process to
# handle the next time it wakes up.
fileName = os.path.join(self.path, "{0}{1}".format(
uuid4().hex, kStreamFileExtension))
with open(fileName, "wt") as f:
f.write(json.dumps(data).encode("utf-8"))
def on_error(self, status_code, data):
print "ERROR: {0}".format(status_code)
self.disconnect()
class Nanobot(object):
'''
A tiny little twitterbot framework in Python.
'''
def __init__(self, argDict=None):
if not argDict:
argDict = { 'debug' : False, "force": False,
'stream': False, 'botPath' : "."}
# update this object's internal dict with the dict of args that was passed
# in so we can access those values as attributes.
self.__dict__.update(argDict)
# we build a list of dicts containing status (and whatever other args
# we may need to pass to the update_status function as we exit, most
# probably 'in_reply-to_status_id' when we're replying to someone.)
self.tweets = []
##
## Methods That Your Bot Might Wish To Override
##
def GetDefaultConfigOptions(self):
'''
Override this in your derived class if you'd like to ensure that
there's one or more specific key/value pairs present in the
settings file for a user to edit by hand as needed.
'''
return {}
def IsReadyForUpdate(self):
''' Check to see if we should be generating a tweet this time.
Defaults to the built-in logic where we prevent tweets happening
too closely together or too far apart, and this can be overridden
if self.force is True.
Derived classes are free to create their own version of this method.
'''
doUpdate = self.force
last = self.settings.lastUpdate or 0
now = int(time())
lastTweetAge = now - last
# default to creating a tweet at *least* every 4 hours.
maxSpace = self.settings.GetOrDefault("maximumSpacing", 4 * 60 * 60)
if lastTweetAge > maxSpace:
# been too long since the last tweet. Make a new one for our fans!
doUpdate = True
elif random() < self.settings.tweetProbability:
# Make sure that we're not tweeting too frequently. Default is to enforce
# a 1-hour gap between tweets (configurable using the 'minimumSpacing' key
# in the config file, providing a number of minutes we must remain silent.)
requiredSpace = self.settings.GetOrDefault("minimumSpacing", 60*60)
if lastTweetAge > requiredSpace:
# Our last tweet was a while ago, let's make another one.
doUpdate = True
return doUpdate
def CreateUpdateTweet(self):
''' Override this method in your derived bot class. '''
pass
def HandleOneMention(self, mention):
''' should be overridden by derived classes. Base version
likes any tweet that mentions us.
'''
who = mention['user']['screen_name']
text = mention['text']
theId = mention['id_str']
# we favorite every mention that we see
if self.debug:
print "Faving tweet {0} by {1}:\n {2}".format(theId, who, text.encode("utf-8"))
else:
self.twitter.create_favorite(id=theId)
def PreRun(self):
'''
override in derived class to perform any actions that need
to happen before the body of the Run() method.
'''
pass
def PostRun(self):
'''
override in derived class to perform any actions that need
to happen after the body of the Run() method.
'''
pass
##
## Methods That Your Bot Probably Won't Want To Override
##
def GetPath(self, path):
'''
Put all the relative path calculations in one place. If we're given a path
that has a leading slash, we treat it as absolute and do nothing. Otherwise,
we treat it as a relative path based on the botPath setting in our config file.
'''
if not path.startswith(os.sep):
path = os.path.join(self.botPath, path)
return path
def Log(self, eventType, dataList):
'''
Create an entry in the log file. Each entry will look like:
timestamp\tevent\tdata1\tdata2 <etc>\n
where:
timestamp = integer seconds since the UNIX epoch
event = string identifying the event
data1..n = individual data fields, as appropriate for each event type.
To avoid maintenance issues w/r/t enormous log files, the log filename
that's stored in the settings file is passed through datetime.strftime()
so we can expand any format codes found there against the current date/time
and create e.g. a monthly log file.
'''
now = int(time())
today = datetime.fromtimestamp(now)
# if there's no explicit log file path/name, we create one
# that's the current year & month.
fileName = self.settings.logFilePath
if not fileName:
fileName = "%Y-%m.txt"
self.settings.logFilePath = fileName
path = self.GetPath(fileName)
path = today.strftime(path)
with open(path, "a+t") as f:
f.write("{0}\t{1}\t".format(now, eventType))
f.write("\t".join(dataList))
f.write("\n")
def SendTweets(self):
''' send each of the status updates that are collected in self.tweets
'''
for msg in self.tweets:
if self.debug:
print "TWEET: {0}".format(msg['status'].encode("UTF-8"))
else:
self.twitter.update_status(**msg)
def CreateUpdate(self):
'''
Called everytime the bot is Run().
Checks to see if the bot thinks that it's ready to generate new output,
and if so, calls CreateUpdateTweet to generate it.
'''
if self.force or self.IsReadyForUpdate():
self.CreateUpdateTweet()
def HandleMentions(self):
'''
Get all the tweets that mention us since the last time we ran and
process each one.
'''
mentions = self.twitter.get_mentions_timeline(since_id=self.settings.lastMentionId)
if mentions:
# Remember the most recent tweet id, which will be the one at index zero.
self.settings.lastMentionId = mentions[0]['id_str']
for mention in mentions:
self.HandleOneMention(mention)
def HandleStreamEvents(self):
'''
There may be a bot process that's waiting for stream events. When it
encounters one, it writes the data out into a file with the extension
".stream". Handle any of those files that are present and delete them when
we're done.
See https://dev.twitter.com/node/201
for more information on the events that your bot can | |
from datetime import timedelta, datetime
import htmls
from django import test
from django.utils import timezone
from model_mommy import mommy
from devilry.apps.core.models import Assignment, AssignmentGroup
from devilry.devilry_cradmin import devilry_listbuilder
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_group import devilry_group_mommy_factories
class TestFullyAnonymousSubjectAdminItemValue(test.TestCase):
def test_non_anonymous_not_allowed(self):
testgroup = mommy.make('core.AssignmentGroup')
with self.assertRaisesMessage(ValueError,
'Can only use FullyAnonymousSubjectAdminItemValue for fully '
'anonymous assignments.'):
devilry_listbuilder.assignmentgroup.FullyAnonymousSubjectAdminItemValue(
value=testgroup,
assignment=testgroup.assignment)
def test_semi_anonymous_is_not_allowed(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
with self.assertRaisesMessage(ValueError,
'Can only use FullyAnonymousSubjectAdminItemValue for fully '
'anonymous assignments.'):
devilry_listbuilder.assignmentgroup.FullyAnonymousSubjectAdminItemValue(
value=testgroup,
assignment=testgroup.assignment)
def test_name_fully_anonymous_is_not_anonymized(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.FullyAnonymousSubjectAdminItemValue(
value=testgroup,
assignment=testgroup.assignment).render())
self.assertEqual(
'<NAME>(<EMAIL>)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
class TestStudentItemValue(test.TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def __render_studentitemvalue(self, group, **kwargs):
assignment = Assignment.objects.prefetch_point_to_grade_map()\
.get(id=group.parentnode_id)
return htmls.S(devilry_listbuilder.assignmentgroup.StudentItemValue(
value=group,
assignment_id_to_assignment_map={assignment.id: assignment},
**kwargs).render())
def test_title_default(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__parentnode__parentnode__short_name='testsubject',
parentnode__parentnode__short_name='testperiod',
parentnode__long_name='Test Assignment')
mommy.make('core.Candidate',
assignment_group=testgroup)
selector = self.__render_studentitemvalue(group=testgroup)
self.assertEqual(
'testsubject.testperiod - Test Assignment',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_title_include_periodpath_false(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__long_name='Test Assignment')
mommy.make('core.Candidate',
assignment_group=testgroup)
selector = self.__render_studentitemvalue(group=testgroup, include_periodpath=False)
self.assertEqual(
'Test Assignment',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_examiners_not_included(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='Test User',
relatedexaminer__user__shortname='<EMAIL>')
selector = self.__render_studentitemvalue(group=testgroup)
self.assertFalse(
selector.exists('.devilry-cradmin-groupitemvalue-examiners-names'))
def test_grade_students_can_see_points_false(self):
testgroup = devilry_group_mommy_factories.feedbackset_first_attempt_published(
group__parentnode__students_can_see_points=False,
grading_points=1).group
selector = self.__render_studentitemvalue(group=testgroup)
self.assertEqual(
'Grade: passed',
selector.one('.devilry-cradmin-groupitemvalue-grade').alltext_normalized)
def test_grade_students_can_see_points_true(self):
testgroup = devilry_group_mommy_factories.feedbackset_first_attempt_published(
group__parentnode__students_can_see_points=True,
grading_points=1).group
selector = self.__render_studentitemvalue(group=testgroup)
self.assertEqual(
'Grade: passed (1/1)',
selector.one('.devilry-cradmin-groupitemvalue-grade').alltext_normalized)
def test_deadline_first_attempt(self):
testgroup = mommy.make(
'core.AssignmentGroup',
parentnode=mommy.make_recipe(
'devilry.apps.core.assignment_activeperiod_start',
first_deadline=datetime(2000, 1, 15, 12, 0)))
with self.settings(DATETIME_FORMAT='Y-m-d H:i', USE_L10N=False):
selector = self.__render_studentitemvalue(group=testgroup)
self.assertEqual(
'2000-01-15 12:00',
selector.one(
'.devilry-cradmin-groupitemvalue-deadline__datetime').alltext_normalized)
def test_deadline_new_attempt(self):
testgroup = mommy.make(
'core.AssignmentGroup',
parentnode=mommy.make_recipe(
'devilry.apps.core.assignment_activeperiod_start',
first_deadline=datetime(2000, 1, 15, 12, 0)))
devilry_group_mommy_factories.feedbackset_new_attempt_unpublished(
group=testgroup,
deadline_datetime=datetime(2200, 1, 2, 12, 30))
with self.settings(DATETIME_FORMAT='Y-m-d H:i', USE_L10N=False):
selector = self.__render_studentitemvalue(group=testgroup)
self.assertEqual(
'2200-01-02 12:30',
selector.one(
'.devilry-cradmin-groupitemvalue-deadline__datetime').alltext_normalized)
def test_attempt_number_first_attempt(self):
testgroup = mommy.make(
'core.AssignmentGroup',
parentnode=mommy.make_recipe(
'devilry.apps.core.assignment_activeperiod_start'))
selector = self.__render_studentitemvalue(group=testgroup)
self.assertFalse(
selector.exists(
'.devilry-cradmin-groupitemvalue-deadline__attemptnumber'))
def test_attempt_number_new_attempt1(self):
testgroup = mommy.make(
'core.AssignmentGroup',
parentnode=mommy.make_recipe(
'devilry.apps.core.assignment_activeperiod_start'))
devilry_group_mommy_factories.feedbackset_new_attempt_unpublished(
group=testgroup)
selector = self.__render_studentitemvalue(group=testgroup)
self.assertEqual(
'(second attempt)',
selector.one(
'.devilry-cradmin-groupitemvalue-deadline__attemptnumber').alltext_normalized)
def test_attempt_number_new_attempt2(self):
testgroup = mommy.make(
'core.AssignmentGroup',
parentnode=mommy.make_recipe(
'devilry.apps.core.assignment_activeperiod_start'))
devilry_group_mommy_factories.feedbackset_new_attempt_published(
group=testgroup)
devilry_group_mommy_factories.feedbackset_new_attempt_unpublished(
group=testgroup)
selector = self.__render_studentitemvalue(group=testgroup)
self.assertEqual(
'(third attempt)',
selector.one(
'.devilry-cradmin-groupitemvalue-deadline__attemptnumber').alltext_normalized)
class TestExaminerItemValue(test.TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_name(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'<NAME>(<EMAIL>)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_name_semi_anonymous_is_anonymized(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__automatic_anonymous_id='MyAnonymousID')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'MyAnonymousID',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_name_fully_anonymous_is_anonymized(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__automatic_anonymous_id='MyAnonymousID')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'MyAnonymousID',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_examiners_include_examiners_false(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='<NAME>',
relatedexaminer__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerItemValue(
value=testgroup, assignment=testgroup.assignment, include_examiners=False).render())
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-examiners-names'))
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-examiners'))
def test_examiners_include_examiners_true(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='<NAME>',
relatedexaminer__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerItemValue(
value=testgroup, assignment=testgroup.assignment, include_examiners=True).render())
self.assertEqual(
'<NAME>(<EMAIL>)',
selector.one('.devilry-cradmin-groupitemvalue-examiners-names').alltext_normalized)
def test_examiners_semi_anonymous_include_examiners_true(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='<NAME>',
relatedexaminer__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerItemValue(
value=testgroup, assignment=testgroup.assignment, include_examiners=True).render())
self.assertEqual(
'<NAME>(<EMAIL>)',
selector.one('.devilry-cradmin-groupitemvalue-examiners-names').alltext_normalized)
def test_examiners_fully_anonymous_include_examiners_true(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='<NAME>',
relatedexaminer__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerItemValue(
value=testgroup, assignment=testgroup.assignment, include_examiners=True).render())
self.assertEqual(
'<NAME>(<EMAIL>)',
selector.one('.devilry-cradmin-groupitemvalue-examiners-names').alltext_normalized)
def test_has_unpublished_feedbackdraft_draft_false(self):
testgroup = devilry_group_mommy_factories\
.feedbackset_first_attempt_published(grading_points=1)\
.group
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-unpublished-feedbackdraft'))
def test_has_unpublished_feedbackdraft_draft_true(self):
testgroup = devilry_group_mommy_factories\
.feedbackset_first_attempt_unpublished(grading_points=1)\
.group
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Unpublished feedback draft: passed (1/1)',
selector.one('.devilry-cradmin-groupitemvalue-unpublished-feedbackdraft').alltext_normalized)
def test_grade_students_can_see_points_false(self):
testgroup = devilry_group_mommy_factories\
.feedbackset_first_attempt_published(group__parentnode__students_can_see_points=False,
grading_points=1)\
.group
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Grade: passed (1/1)',
selector.one('.devilry-cradmin-groupitemvalue-grade').alltext_normalized)
def test_grade_students_can_see_points_true(self):
testgroup = devilry_group_mommy_factories\
.feedbackset_first_attempt_published(group__parentnode__students_can_see_points=True,
grading_points=1)\
.group
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Grade: passed (1/1)',
selector.one('.devilry-cradmin-groupitemvalue-grade').alltext_normalized)
class TestPeriodAdminItemValue(test.TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_name(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='Test User',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.PeriodAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'<NAME>(<EMAIL>)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_anonymous_not_allowed(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
with self.assertRaisesRegex(ValueError, '^.*for anonymous assignments.*$'):
devilry_listbuilder.assignmentgroup.PeriodAdminItemValue(value=testgroup, assignment=testgroup.assignment)
def test_examiners(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='<NAME>',
relatedexaminer__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.PeriodAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'<NAME>(<EMAIL>)',
selector.one('.devilry-cradmin-groupitemvalue-examiners-names').alltext_normalized)
def test_grade_students_can_see_points_false(self):
testgroup = devilry_group_mommy_factories\
.feedbackset_first_attempt_published(group__parentnode__students_can_see_points=False,
grading_points=1)\
.group
selector = htmls.S(devilry_listbuilder.assignmentgroup.PeriodAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Grade: passed (1/1)',
selector.one('.devilry-cradmin-groupitemvalue-grade').alltext_normalized)
def test_grade_students_can_see_points_true(self):
testgroup = devilry_group_mommy_factories\
.feedbackset_first_attempt_published(group__parentnode__students_can_see_points=True,
grading_points=1)\
.group
selector = htmls.S(devilry_listbuilder.assignmentgroup.PeriodAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Grade: passed (1/1)',
selector.one('.devilry-cradmin-groupitemvalue-grade').alltext_normalized)
class TestSubjectAdminItemValue(test.TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_name(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.SubjectAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(<EMAIL>)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_name_semi_anonymous_is_not_anonymized(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.SubjectAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(<EMAIL>)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_fully_anonymous_is_not_allowed(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
with self.assertRaisesRegex(ValueError, '^.*for fully anonymous assignments.*$'):
devilry_listbuilder.assignmentgroup.SubjectAdminItemValue(value=testgroup, assignment=testgroup.assignment)
def test_examiners(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='<NAME>',
relatedexaminer__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.SubjectAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'<NAME>(<EMAIL>)',
selector.one('.devilry-cradmin-groupitemvalue-examiners-names').alltext_normalized)
def test_examiners_semi_anonymous(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='<NAME>',
relatedexaminer__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.SubjectAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'<NAME>(<EMAIL>)',
selector.one('.devilry-cradmin-groupitemvalue-examiners-names').alltext_normalized)
def test_grade_students_can_see_points_false(self):
testgroup = devilry_group_mommy_factories\
.feedbackset_first_attempt_published(group__parentnode__students_can_see_points=False,
grading_points=1)\
.group
selector = htmls.S(devilry_listbuilder.assignmentgroup.SubjectAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Grade: passed (1/1)',
selector.one('.devilry-cradmin-groupitemvalue-grade').alltext_normalized)
def test_grade_students_can_see_points_true(self):
testgroup = devilry_group_mommy_factories\
.feedbackset_first_attempt_published(group__parentnode__students_can_see_points=True,
grading_points=1)\
.group
selector = htmls.S(devilry_listbuilder.assignmentgroup.SubjectAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Grade: passed (1/1)',
selector.one('.devilry-cradmin-groupitemvalue-grade').alltext_normalized)
class TestDepartmentAdminItemValue(test.TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_name(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.DepartmentAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(<EMAIL>)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_name_semi_anonymous_is_not_anonymized(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.DepartmentAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(<EMAIL>)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_name_fully_anonymous_is_not_anonymized(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.DepartmentAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(<EMAIL>)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_examiners(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='<NAME>',
relatedexaminer__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.DepartmentAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(<EMAIL>)',
selector.one('.devilry-cradmin-groupitemvalue-examiners-names').alltext_normalized)
def test_examiners_semi_anonymous(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='<NAME>',
relatedexaminer__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.DepartmentAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'<NAME>(<EMAIL>)',
selector.one('.devilry-cradmin-groupitemvalue-examiners-names').alltext_normalized)
def test_examiners_fully_anonymous(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='<NAME>',
relatedexaminer__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.DepartmentAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'<NAME>(<EMAIL>)',
selector.one('.devilry-cradmin-groupitemvalue-examiners-names').alltext_normalized)
def test_grade_students_can_see_points_false(self):
testgroup = devilry_group_mommy_factories\
.feedbackset_first_attempt_published(group__parentnode__students_can_see_points=False,
grading_points=1)\
.group
selector = htmls.S(devilry_listbuilder.assignmentgroup.DepartmentAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Grade: passed (1/1)',
selector.one('.devilry-cradmin-groupitemvalue-grade').alltext_normalized)
def test_grade_students_can_see_points_true(self):
testgroup = devilry_group_mommy_factories\
.feedbackset_first_attempt_published(group__parentnode__students_can_see_points=True,
grading_points=1)\
.group
selector = htmls.S(devilry_listbuilder.assignmentgroup.DepartmentAdminItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Grade: passed (1/1)',
selector.one('.devilry-cradmin-groupitemvalue-grade').alltext_normalized)
class MockNoMultiselectItemValue(devilry_listbuilder.assignmentgroup.ItemValueMixin,
devilry_listbuilder.assignmentgroup.NoMultiselectItemValue):
def get_devilryrole(self):
return 'student' # Should not affect any of the tests that uses this class
class TestItemValue(test.TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_status_is_corrected(self):
testgroup = devilry_group_mommy_factories\
.feedbackset_first_attempt_published(grading_points=1)\
.group
selector = htmls.S(MockNoMultiselectItemValue(value=testgroup, assignment=testgroup.assignment).render())
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-status'))
def test_status_is_waiting_for_feedback(self):
testgroup = mommy.make(
'core.AssignmentGroup',
parentnode=mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
first_deadline=timezone.now() - timedelta(days=2)))
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(
group=testgroup)
testgroup.refresh_from_db()
selector = htmls.S(MockNoMultiselectItemValue(value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Status: waiting for feedback',
selector.one('.devilry-cradmin-groupitemvalue-status').alltext_normalized)
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-grade'))
def test_status_is_waiting_for_deliveries(self):
testgroup = mommy.make(
'core.AssignmentGroup',
parentnode=mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
first_deadline=timezone.now() + timedelta(days=2)))
devilry_group_mommy_factories.feedbackset_first_attempt_unpublished(
group=testgroup)
testgroup.refresh_from_db()
selector = htmls.S(MockNoMultiselectItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Status: waiting for deliveries',
selector.one('.devilry-cradmin-groupitemvalue-status').alltext_normalized)
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-grade'))
def test_grade_not_available_unless_corrected(self):
testgroup = devilry_group_mommy_factories.feedbackset_first_attempt_unpublished().group
selector = htmls.S(MockNoMultiselectItemValue(value=testgroup, assignment=testgroup.assignment).render())
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-grade'))
def test_grade_comment_summary_is_available(self):
testgroup = mommy.make('core.AssignmentGroup')
testgroup.refresh_from_db()
selector = htmls.S(MockNoMultiselectItemValue(value=testgroup, assignment=testgroup.assignment).render())
self.assertTrue(selector.exists('.devilry-cradmin-groupitemvalue-comments'))
self.assertEqual(
'0 comments from student. 0 files from student. 0 comments from examiner.',
selector.one('.devilry-cradmin-groupitemvalue-comments').alltext_normalized)
class TestFullyAnonymousSubjectAdminMultiselectItemValue(test.TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_non_anonymous_not_allowed(self):
testgroup = mommy.make('core.AssignmentGroup')
with self.assertRaisesMessage(ValueError,
'Can only use FullyAnonymousSubjectAdminMultiselectItemValue for fully '
'anonymous assignments.'):
devilry_listbuilder.assignmentgroup.FullyAnonymousSubjectAdminMultiselectItemValue(
value=testgroup,
assignment=testgroup.assignment)
def test_semi_anonymous_is_not_allowed(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
with self.assertRaisesMessage(ValueError,
'Can only use FullyAnonymousSubjectAdminMultiselectItemValue for fully '
'anonymous assignments.'):
devilry_listbuilder.assignmentgroup.FullyAnonymousSubjectAdminMultiselectItemValue(
value=testgroup,
assignment=testgroup.assignment)
def test_name_fully_anonymous_is_not_anonymized(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.FullyAnonymousSubjectAdminMultiselectItemValue(
value=testgroup,
assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(<EMAIL>)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_arialabels_fully_anonymous_is_not_anonymized(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.FullyAnonymousSubjectAdminMultiselectItemValue(
value=testgroup,
assignment=testgroup.assignment).render())
self.assertEqual(
'Select "Test User"',
selector.one('.django-cradmin-multiselect2-itemvalue-button')['aria-label'])
self.assertEqual(
'Deselect "Test User"',
selector.one('.django-cradmin-multiselect2-target-selected-item-deselectbutton')['aria-label'])
def test_selected_item_title(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.FullyAnonymousSubjectAdminMultiselectItemValue(
value=testgroup,
assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(<EMAIL>)',
selector.one('.django-cradmin-multiselect2-target-selected-item-title').alltext_normalized)
class TestExaminerMultiselectItemValue(test.TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_name(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(<EMAIL>)',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_name_semi_anonymous_is_anonymized(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__automatic_anonymous_id='MyAnonymousID')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'MyAnonymousID',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_name_fully_anonymous_is_anonymized(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__automatic_anonymous_id='MyAnonymousID')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue(
value=testgroup, assignment=testgroup.assignment).render())
self.assertEqual(
'MyAnonymousID',
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_selected_item_title_not_anonymous(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue(
value=testgroup,
assignment=testgroup.assignment).render())
self.assertEqual(
'Test User(<EMAIL>)',
selector.one('.django-cradmin-multiselect2-target-selected-item-title').alltext_normalized)
def test_selected_item_title_semi_anonymous(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__automatic_anonymous_id='MyAnonymousID')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue(
value=testgroup,
assignment=testgroup.assignment).render())
self.assertEqual(
'MyAnonymousID',
selector.one('.django-cradmin-multiselect2-target-selected-item-title').alltext_normalized)
def test_selected_item_title_fully_anonymous(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__automatic_anonymous_id='MyAnonymousID')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue(
value=testgroup,
assignment=testgroup.assignment).render())
self.assertEqual(
'MyAnonymousID',
selector.one('.django-cradmin-multiselect2-target-selected-item-title').alltext_normalized)
def test_arialabels_not_anonymous(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__fullname='<NAME>',
relatedstudent__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue(
value=testgroup,
assignment=testgroup.assignment).render())
self.assertEqual(
'Select "Test User"',
selector.one('.django-cradmin-multiselect2-itemvalue-button')['aria-label'])
self.assertEqual(
'Deselect "Test User"',
selector.one('.django-cradmin-multiselect2-target-selected-item-deselectbutton')['aria-label'])
def test_arialabels_semi_anonymous(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__automatic_anonymous_id='MyAnonymousID')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue(
value=testgroup,
assignment=testgroup.assignment).render())
self.assertEqual(
'Select "MyAnonymousID"',
selector.one('.django-cradmin-multiselect2-itemvalue-button')['aria-label'])
self.assertEqual(
'Deselect "MyAnonymousID"',
selector.one('.django-cradmin-multiselect2-target-selected-item-deselectbutton')['aria-label'])
def test_arialabels_fully_anonymous(self):
testgroup = mommy.make('core.AssignmentGroup',
parentnode__anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
mommy.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__automatic_anonymous_id='MyAnonymousID')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue(
value=testgroup,
assignment=testgroup.assignment).render())
self.assertEqual(
'Select "MyAnonymousID"',
selector.one('.django-cradmin-multiselect2-itemvalue-button')['aria-label'])
self.assertEqual(
'Deselect "MyAnonymousID"',
selector.one('.django-cradmin-multiselect2-target-selected-item-deselectbutton')['aria-label'])
def test_examiners_include_examiners_false(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='<NAME>',
relatedexaminer__user__shortname='<EMAIL>')
selector = htmls.S(devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue(
value=testgroup, assignment=testgroup.assignment, include_examiners=False).render())
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-examiners-names'))
self.assertFalse(selector.exists('.devilry-cradmin-groupitemvalue-examiners'))
def test_examiners_include_examiners_true(self):
testgroup = mommy.make('core.AssignmentGroup')
mommy.make('core.Examiner',
assignmentgroup=testgroup,
relatedexaminer__user__fullname='<NAME>',
relatedexaminer__user__shortname='<EMAIL>')
| |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module implements a string formatter based on the standard PEP
292 string.Template class extended with function calls. Variables, as
with string.Template, are indicated with $ and functions are delimited
with %.
This module assumes that everything is Unicode: the template and the
substitution values. Bytestrings are not supported. Also, the templates
always behave like the ``safe_substitute`` method in the standard
library: unknown symbols are left intact.
This is sort of like a tiny, horrible degeneration of a real templating
engine like Jinja2 or Mustache.
"""
from __future__ import division, absolute_import, print_function
import re
import ast
import dis
import types
import sys
import six
SYMBOL_DELIM = u'$'
FUNC_DELIM = u'%'
GROUP_OPEN = u'{'
GROUP_CLOSE = u'}'
ARG_SEP = u','
ESCAPE_CHAR = u'$'
VARIABLE_PREFIX = '__var_'
FUNCTION_PREFIX = '__func_'
class Environment(object):
"""Contains the values and functions to be substituted into a
template.
"""
def __init__(self, values, functions):
self.values = values
self.functions = functions
# Code generation helpers.
def ex_lvalue(name):
"""A variable load expression."""
return ast.Name(name, ast.Store())
def ex_rvalue(name):
"""A variable store expression."""
return ast.Name(name, ast.Load())
def ex_literal(val):
"""An int, float, long, bool, string, or None literal with the given
value.
"""
if val is None:
return ast.Name('None', ast.Load())
elif isinstance(val, six.integer_types):
return ast.Num(val)
elif isinstance(val, bool):
return ast.Name(bytes(val), ast.Load())
elif isinstance(val, six.string_types):
return ast.Str(val)
raise TypeError(u'no literal for {0}'.format(type(val)))
def ex_varassign(name, expr):
"""Assign an expression into a single variable. The expression may
either be an `ast.expr` object or a value to be used as a literal.
"""
if not isinstance(expr, ast.expr):
expr = ex_literal(expr)
return ast.Assign([ex_lvalue(name)], expr)
def ex_call(func, args):
"""A function-call expression with only positional parameters. The
function may be an expression or the name of a function. Each
argument may be an expression or a value to be used as a literal.
"""
if isinstance(func, six.string_types):
func = ex_rvalue(func)
args = list(args)
for i in range(len(args)):
if not isinstance(args[i], ast.expr):
args[i] = ex_literal(args[i])
if sys.version_info[:2] < (3, 5):
return ast.Call(func, args, [], None, None)
else:
return ast.Call(func, args, [])
def compile_func(arg_names, statements, name='_the_func', debug=False):
"""Compile a list of statements as the body of a function and return
the resulting Python function. If `debug`, then print out the
bytecode of the compiled function.
"""
if six.PY2:
func_def = ast.FunctionDef(
name=name.encode('utf-8'),
args=ast.arguments(
args=[ast.Name(n, ast.Param()) for n in arg_names],
vararg=None,
kwarg=None,
defaults=[ex_literal(None) for _ in arg_names],
),
body=statements,
decorator_list=[],
)
else:
func_def = ast.FunctionDef(
name=name,
args=ast.arguments(
args=[ast.arg(arg=n, annotation=None) for n in arg_names],
kwonlyargs=[],
kw_defaults=[],
defaults=[ex_literal(None) for _ in arg_names],
),
body=statements,
decorator_list=[],
)
# The ast.Module signature changed in 3.8 to accept a list of types to
# ignore.
if sys.version_info >= (3, 8):
mod = ast.Module([func_def], [])
else:
mod = ast.Module([func_def])
ast.fix_missing_locations(mod)
prog = compile(mod, '<generated>', 'exec')
# Debug: show bytecode.
if debug:
dis.dis(prog)
for const in prog.co_consts:
if isinstance(const, types.CodeType):
dis.dis(const)
the_locals = {}
exec(prog, {}, the_locals)
return the_locals[name]
# AST nodes for the template language.
class Symbol(object):
"""A variable-substitution symbol in a template."""
def __init__(self, ident, original):
self.ident = ident
self.original = original
def __repr__(self):
return u'Symbol(%s)' % repr(self.ident)
def evaluate(self, env):
"""Evaluate the symbol in the environment, returning a Unicode
string.
"""
if self.ident in env.values:
# Substitute for a value.
return env.values[self.ident]
else:
# Keep original text.
return self.original
def translate(self):
"""Compile the variable lookup."""
if six.PY2:
ident = self.ident.encode('utf-8')
else:
ident = self.ident
expr = ex_rvalue(VARIABLE_PREFIX + ident)
return [expr], set([ident]), set()
class Call(object):
"""A function call in a template."""
def __init__(self, ident, args, original):
self.ident = ident
self.args = args
self.original = original
def __repr__(self):
return u'Call(%s, %s, %s)' % (repr(self.ident), repr(self.args),
repr(self.original))
def evaluate(self, env):
"""Evaluate the function call in the environment, returning a
Unicode string.
"""
if self.ident in env.functions:
arg_vals = [expr.evaluate(env) for expr in self.args]
try:
out = env.functions[self.ident](*arg_vals)
except Exception as exc:
# Function raised exception! Maybe inlining the name of
# the exception will help debug.
return u'<%s>' % six.text_type(exc)
return six.text_type(out)
else:
return self.original
def translate(self):
"""Compile the function call."""
varnames = set()
if six.PY2:
ident = self.ident.encode('utf-8')
else:
ident = self.ident
funcnames = set([ident])
arg_exprs = []
for arg in self.args:
subexprs, subvars, subfuncs = arg.translate()
varnames.update(subvars)
funcnames.update(subfuncs)
# Create a subexpression that joins the result components of
# the arguments.
arg_exprs.append(ex_call(
ast.Attribute(ex_literal(u''), 'join', ast.Load()),
[ex_call(
'map',
[
ex_rvalue(six.text_type.__name__),
ast.List(subexprs, ast.Load()),
]
)],
))
subexpr_call = ex_call(
FUNCTION_PREFIX + ident,
arg_exprs
)
return [subexpr_call], varnames, funcnames
class Expression(object):
"""Top-level template construct: contains a list of text blobs,
Symbols, and Calls.
"""
def __init__(self, parts):
self.parts = parts
def __repr__(self):
return u'Expression(%s)' % (repr(self.parts))
def evaluate(self, env):
"""Evaluate the entire expression in the environment, returning
a Unicode string.
"""
out = []
for part in self.parts:
if isinstance(part, six.string_types):
out.append(part)
else:
out.append(part.evaluate(env))
return u''.join(map(six.text_type, out))
def translate(self):
"""Compile the expression to a list of Python AST expressions, a
set of variable names used, and a set of function names.
"""
expressions = []
varnames = set()
funcnames = set()
for part in self.parts:
if isinstance(part, six.string_types):
expressions.append(ex_literal(part))
else:
e, v, f = part.translate()
expressions.extend(e)
varnames.update(v)
funcnames.update(f)
return expressions, varnames, funcnames
# Parser.
class ParseError(Exception):
pass
class Parser(object):
"""Parses a template expression string. Instantiate the class with
the template source and call ``parse_expression``. The ``pos`` field
will indicate the character after the expression finished and
``parts`` will contain a list of Unicode strings, Symbols, and Calls
reflecting the concatenated portions of the expression.
This is a terrible, ad-hoc parser implementation based on a
left-to-right scan with no lexing step to speak of; it's probably
both inefficient and incorrect. Maybe this should eventually be
replaced with a real, accepted parsing technique (PEG, parser
generator, etc.).
"""
def __init__(self, string, in_argument=False):
""" Create a new parser.
:param in_arguments: boolean that indicates the parser is to be
used for parsing function arguments, ie. considering commas
(`ARG_SEP`) a special character
"""
self.string = string
self.in_argument = in_argument
self.pos = 0
self.parts = []
# Common parsing resources.
special_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_OPEN, GROUP_CLOSE,
ESCAPE_CHAR)
special_char_re = re.compile(r'[%s]|\Z' %
u''.join(re.escape(c) for c in special_chars))
escapable_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP)
terminator_chars = (GROUP_CLOSE,)
def parse_expression(self):
"""Parse a template expression starting at ``pos``. Resulting
components (Unicode strings, Symbols, and Calls) are added to
the ``parts`` field, a list. The ``pos`` field is updated to be
the next character after the expression.
"""
# Append comma (ARG_SEP) to the list of special characters only when
# parsing function arguments.
extra_special_chars = ()
special_char_re = self.special_char_re
if self.in_argument:
extra_special_chars = (ARG_SEP,)
special_char_re = re.compile(
r'[%s]|\Z' % u''.join(
re.escape(c) for c in
self.special_chars + extra_special_chars
)
)
text_parts = []
while self.pos < len(self.string):
char = self.string[self.pos]
if char not in self.special_chars + extra_special_chars:
# A non-special character. Skip to the next special
# character, treating the interstice as literal text.
next_pos = (
special_char_re.search(
self.string[self.pos:]).start() + self.pos
)
text_parts.append(self.string[self.pos:next_pos])
self.pos = next_pos
continue
if self.pos == len(self.string) - 1:
# The last character can never begin a structure, so we
# just interpret it as a literal character (unless it
# terminates the expression, as with , and }).
if char not in self.terminator_chars + extra_special_chars:
text_parts.append(char)
self.pos += 1
break
next_char = self.string[self.pos + 1]
if char == ESCAPE_CHAR and next_char in (self.escapable_chars +
extra_special_chars):
# An escaped special character ($$, $}, etc.). Note that
# ${ is not an escape sequence: this is ambiguous with
# the start of a symbol and it's not necessary (just
| |
#!/usr/bin/python
"""
(C) Copyright 2018-2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
from daos_utils_base import DaosCommandBase
import re
class DaosCommand(DaosCommandBase):
# pylint: disable=too-many-ancestors
"""Defines a object representing a daos command."""
METHOD_REGEX = {
"run": r"(.*)",
"container_create": r"container ([0-9a-f-]+)",
# daos pool list-cont returns the date, host name, and container UUID
# as below:
# 03/31-21:32:24.53 wolf-3 2f69b198-8478-472e-b6c8-02a451f4de1b
# UUID is made up of 36 characters of hex and -.
"pool_list_cont": r"([0-9a-f-]{36})",
# Sample pool query output.
# 04/19-18:31:26.90 wolf-3 Pool 3e59b386-fda0-404e-af7e-3ff0a38d1f81,
# ntarget=8, disabled=0
# 04/19-18:31:26.90 wolf-3 Pool space info:
# 04/19-18:31:26.90 wolf-3 - Target(VOS) count:8
# 04/19-18:31:26.90 wolf-3 - SCM:
# 04/19-18:31:26.90 wolf-3 Total size: 1000000000
# 04/19-18:31:26.90 wolf-3 Free: 999997440, min:124999680,
# max:124999680, mean:124999680
# 04/19-18:31:26.90 wolf-3 - NVMe:
# 04/19-18:31:26.90 wolf-3 Total size: 0
# 04/19-18:31:26.90 wolf-3 Free: 0, min:0, max:0, mean:0
# 04/19-18:31:26.90 wolf-3 Rebuild idle, 0 objs, 0 recs
"pool_query": r"(?:Pool\s*([A-Za-z0-9-]+),\s*ntarget=([0-9])," +
r"\s*disabled=([0-9])|Target\(VOS\) count:\s*([0-9])|" +
r"(?:SCM:\s+.*|NVMe:\s+.*)Total\s+size:\s+([0-9]+)" +
r"\s+.*Free:\s+([0-9]+),\s+min:([0-9]+),\s+" +
r"max:([0-9]+),\s+mean:([0-9]+)|" +
r"Rebuild\s*idle,\s*([0-9]+)\s*objs,\s*([0-9]+)\s*recs)",
# Sample list-attrs output.
# 04/19-21:16:31.62 wolf-3 Pool attributes:
# 04/19-21:16:31.62 wolf-3 attr0
# 04/19-21:16:31.62 wolf-3 attr1
"pool_list_attrs": r"\b([^:\s]+)\n",
# Sample get-attr output - no line break.
# 04/19-21:16:32.66 wolf-3 Pool's attr2 attribute value:
# 04/19-21:16:32.66 wolf-3 val2
"pool_get_attr": r"\b(\S+)$",
"container_query":
r"Pool UUID:\s+([0-9a-f-]+)\n" +
r"Container UUID:\s+([0-9a-f-]+)\n" +
r"Number of snapshots:\s+(\d+)\n" +
r"Latest Persistent Snapshot:\s+(\d+)\n" +
r"Highest Aggregated Epoch:\s+(\d+)",
# Sample get-attr output - no line break.
# 04/20-17:47:07.86 wolf-3 Container's attr1 attribute value: 04
# /20-17:47:07.86 wolf-3 val1
"container_get_attr": r"value: \S+ \S+ (.+)$",
# Sample list output.
# 04/20-17:52:33.63 wolf-3 Container attributes:
# 04/20-17:52:33.63 wolf-3 attr1
# 04/20-17:52:33.63 wolf-3 attr2
"container_list_attrs": r"\n \S+ \S+ (.+)"
}
def pool_query(self, pool, sys_name=None, svc=None, sys=None):
"""Query a pool.
Args:
pool (str): pool UUID
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
svc (str, optional): the pool service replicas, e.g. '1,2,3'.
Defaults to None.
sys (str, optional): [description]. Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool query command fails.
"""
return self._get_result(
("pool", "query"), pool=pool, sys_name=sys_name, svc=svc, sys=sys)
def container_create(self, pool, sys_name=None, svc=None, cont=None,
path=None, cont_type=None, oclass=None,
chunk_size=None, properties=None, acl_file=None):
# pylint: disable=too-many-arguments
"""Create a container.
Args:
pool (str): UUID of the pool in which to create the container
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
svc (str, optional): the pool service replicas, e.g. '1,2,3'.
Defaults to None.
cont (str, optional): container UUID. Defaults to None.
path (str, optional): container namespace path. Defaults to None.
cont_type (str, optional): the type of container to create. Defaults
to None.
oclass (str, optional): object class. Defaults to None.
chunk_size (str, optional): chunk size of files created. Supports
suffixes: K (KB), M (MB), G (GB), T (TB), P (PB), E (EB).
Defaults to None.
properties (str, optional): String of comma-separated <name>:<value>
pairs defining the container properties. Defaults to None
acl_file (str, optional): ACL file. Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container create command fails.
"""
return self._get_result(
("container", "create"), pool=pool, sys_name=sys_name, svc=svc,
cont=cont, path=path, type=cont_type, oclass=oclass,
chunk_size=chunk_size, properties=properties, acl_file=acl_file)
def container_destroy(self, pool, svc, cont, force=None, sys_name=None):
"""Destroy a container.
Args:
pool (str): UUID of the pool in which to create the container
svc (str): the pool service replicas, e.g. '1,2,3'.
cont (str): container UUID.
force (bool, optional): Force the container destroy. Defaults to
None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container destroy command fails.
"""
return self._get_result(
("container", "destroy"), pool=pool, sys_name=sys_name, svc=svc,
cont=cont, force=force)
def container_get_acl(self, pool, svc, cont,
verbose=False, outfile=None):
"""Get the ACL for a given container.
Args:
pool (str): Pool UUID
svc (str): Service replicas
cont (str): Container for which to get the ACL.
verbose (bool, optional): Verbose mode.
outfile (str, optional): Write ACL to file.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container get-acl command fails.
"""
return self._get_result(
("container", "get-acl"), pool=pool, svc=svc, cont=cont,
verbose=verbose, outfile=outfile)
def pool_list_cont(self, pool, svc, sys_name=None):
"""List containers in the given pool.
Args:
pool (str): Pool UUID
svc (str): Service replicas. If there are multiple, numbers must be
separated by comma like 1,2,3
sys_name (str, optional): System name. Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool list-containers command fails.
"""
return self._get_result(
("pool", "list-containers"), pool=pool, svc=svc, sys_name=sys_name)
def pool_set_attr(self, pool, attr, value, svc):
"""Set pool attribute.
Args:
pool (str): Pool UUID.
attr (str): Attribute name.
value (str): Attribute value
svc (str): Service replicas.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool set-attr command fails.
"""
return self._get_result(
("pool", "set-attr"), pool=pool, svc=svc, attr=attr, value=value)
def pool_get_attr(self, pool, attr, svc):
"""Set pool attribute.
Args:
pool (str): Pool UUID.
attr (str): Pool UUID.
svc (str): Service replicas.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool query command fails.
"""
return self._get_result(
("pool", "get-attr"), pool=pool, svc=svc, attr=attr)
def pool_list_attrs(self, pool, svc):
"""List pool attributes.
Args:
pool (str): Pool UUID.
svc (str): Service replicas.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool list-attrs command fails.
"""
return self._get_result(("pool", "list-attrs"), pool=pool, svc=svc)
def container_query(self, pool, cont, svc=None, sys_name=None):
"""Query a container.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
svc (str, optional): pool service replicas, e.g., '1,2,3'. Defaults
to None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container query command fails.
"""
return self._get_result(
("container", "query"), pool=pool, svc=svc, cont=cont,
sys_name=sys_name)
def container_set_attr(
self, pool, cont, attr, val, svc=None, sys_name=None):
"""Call daos container set-attr.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
attr (str): Attribute name.
val (str): Attribute value.
svc (str, optional): Pool service replicas, e.g., '1,2,3'. Defaults
to None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container set-attr command fails.
"""
return self._get_result(
("container", "set-attr"), pool=pool, svc=svc, cont=cont,
sys_name=sys_name, attr=attr, value=val)
def container_get_attr(self, pool, cont, attr, svc=None, sys_name=None):
"""Call daos container get-attr.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
attr (str): Attribute name.
svc (str, optional): Pool service replicas, e.g., '1,2,3'. Defaults
to None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos get-attr command fails.
"""
return self._get_result(
("container", "get-attr"), pool=pool, svc=svc, cont=cont,
sys_name=sys_name, attr=attr)
def container_list_attrs(self, pool, cont, svc=None, sys_name=None):
"""Call daos container list-attrs.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
svc (str, optional): Pool service replicas, e.g., '1,2,3'. Defaults
to None.
sys_name (str, optional): DAOS system name context for servers.
Defaults | |
from .bbox_utils import *
import numpy as np
import torch
def get_sum_XQ(grad, box_vertices, dataset_name, box_w, box_l, sign, high_rez=False, scaling_factor=1,
grad_copy=None):
"""
Calculates XQ based on the sum of attributions, using the original attribution values
:param high_rez: Whether to upscale the resolution or use pseudoimage resolution
:param scaling_factor:
:param dataset_name:
:param grad: The attributions generated from 2D pseudoimage
:param box_vertices: The vertices of the predicted box
:return: Explanation Quality (XQ)
"""
# print('box_vertices before transformation: {}'.format(box_vertices))
'''1) transform the box coordinates to match with grad dimensions'''
H, W = grad.shape[0], grad.shape[1] # image height and width
box_vertices = transform_box_coord(H, W, box_vertices, dataset_name, high_rez, scaling_factor)
# print('box_vertices after transformation: {}'.format(box_vertices))
# print('\ngrad.shape: {}'.format(grad.shape))
'''2) preprocess the box to get important parameters'''
AB, AD, AB_dot_AB, AD_dot_AD = box_preprocess(box_vertices)
# box_w = get_dist(box_vertices[0], box_vertices[1])
# box_l = get_dist(box_vertices[0], box_vertices[3])
box_scale = get_box_scale(dataset_name)
box_w = box_w * box_scale
box_l = box_l * box_scale
'''3) compute XQ'''
ignore_thresh = 0.1
# margin = 2 # margin for the box
attr_in_box = 0.0
total_attr = 0
# max_xq = 0
# if sign == 'positive':
for i in range(grad.shape[0]):
for j in range(grad.shape[1]):
curr_sum = 0
# print('grad[i][j].shape: {}'.format(grad[i][j].shape))
if sign == 'positive':
# both summing schemes give the same results
curr_sum = np.sum((grad[i][j] > 0) * grad[i][j])
# curr_sum = np.sum(np.where(grad[i][j] < 0, 0, grad[i][j]))
elif sign == 'negative':
curr_sum -= np.sum((grad[i][j] < 0) * grad[i][j])
# curr_sum -= np.sum(np.where(grad[i][j] > 0, 0, grad[i][j]))
else:
curr_sum = np.sum(abs(grad[i][j]))
if curr_sum < ignore_thresh: # ignore small attributions
continue
total_attr += curr_sum
# max_xq = max(max_xq, curr_sum)
y = i
x = j
if high_rez:
y = i * scaling_factor
x = j * scaling_factor
if in_box(box_vertices[0], y, x, AB, AD, AB_dot_AB, AD_dot_AD):
attr_in_box += curr_sum
# print("maximum xq is : {}".format(max_xq))
# box area matching pseudoimage dimensions (i.e. grad)
box_area = box_w * box_l
avg_in_box_attr = attr_in_box / box_area
avg_attr = total_attr / (grad.shape[0] * grad.shape[1])
print("avg_attr: {}".format(avg_attr))
print("avg_in_box_attr: {}".format(avg_in_box_attr))
if total_attr == 0:
print("No attributions present!")
return 0
XQ = attr_in_box / total_attr
print("XQ: {}".format(XQ))
return XQ
def get_sum_XQ_analytics(pos_grad, neg_grad, box_vertices, dataset_name, sign, ignore_thresh, box_w=None, box_l=None,
high_rez=False, scaling_factor=1, grad_copy=None):
"""
Calculates XQ based on the sum of attributions, resolution considered
:param high_rez: Whether to upscale the resolution or use pseudoimage resolution
:param scaling_factor:
:param dataset_name:
:param grad: The attributions generated from 2D pseudoimage
:param box_vertices: The vertices of the predicted box
:return: Explanation Quality (XQ)
"""
# print('box_vertices before transformation: {}'.format(box_vertices))
grad = None
if sign == 'positive':
grad = pos_grad
elif sign == 'negative':
grad = neg_grad
# print("type(grad): {}".format(type(grad)))
'''1) transform the box coordinates to match with grad dimensions'''
H, W = grad.shape[0], grad.shape[1] # image height and width
box_vertices = transform_box_coord(H, W, box_vertices, dataset_name, high_rez, scaling_factor)
# print('box_vertices after transformation: {}'.format(box_vertices))
# print('\ngrad.shape: {}'.format(grad.shape))
'''2) preprocess the box to get important parameters'''
AB, AD, AB_dot_AB, AD_dot_AD = box_preprocess(box_vertices)
# box_w = get_dist(box_vertices[0], box_vertices[1])
# box_l = get_dist(box_vertices[0], box_vertices[3])
box_scale = get_box_scale(dataset_name)
'''3) compute XQ'''
# ignore_thresh = 0
# margin = 2 # margin for the box
attr_in_box = 0.0
total_attr = 0
# max_xq = 0
# if sign == 'positive':
for i in range(grad.shape[0]):
for j in range(grad.shape[1]):
if grad[i][j] < ignore_thresh:
continue
total_attr += grad[i][j]
# max_xq = max(max_xq, curr_sum)
y = copy.deepcopy(i)
x = copy.deepcopy(j)
if high_rez:
# print("high rez is true")
y = i * scaling_factor
x = j * scaling_factor
if in_box(box_vertices[0], y, x, AB, AD, AB_dot_AB, AD_dot_AD):
attr_in_box += grad[i][j]
# if grad_copy is not None:
# grad_copy[i][j] = 1
# print("maximum xq is : {}".format(max_xq))
# box area matching pseudoimage dimensions (i.e. grad)
if box_w != None and box_l != None:
box_w = box_w * box_scale
box_l = box_l * box_scale
box_area = box_w * box_l
avg_in_box_attr = attr_in_box / box_area
avg_attr = total_attr / (grad.shape[0] * grad.shape[1])
# print("avg_attr: {}".format(avg_attr))
# print("avg_in_box_attr: {}".format(avg_in_box_attr))
if total_attr == 0:
# print("No attributions present!")
return 0
XQ = attr_in_box / total_attr
# print("XQ: {}".format(XQ))
return XQ
def get_sum_XQ_analytics_fast(pos_grad, neg_grad, box_vertices, dataset_name, sign, ignore_thresh, box_loc, vicinity):
"""
Calculates XQ based on the sum of attributions, resolution not considered
:param vicinity: search vicinity w.r.t. the box_center, class dependent
:param ignore_thresh: the threshold below which the attributions would be ignored
:param box_loc: location of the predicted box
:param sign: indicates the type of attributions shown, positive or negative
:param neg_grad: numpy array containing sum of negative gradients at each location
:param pos_grad: numpy array containing sum of positive gradients at each location
:param dataset_name:
:param box_vertices: The vertices of the predicted box
:return: Explanation Quality (XQ)
"""
# print('box_vertices before transformation: {}'.format(box_vertices))
grad = None
if sign == 'positive':
grad = pos_grad
elif sign == 'negative':
grad = neg_grad
'''1) transform the box coordinates to match with grad dimensions'''
H, W = grad.shape[0], grad.shape[1] # image height and width
box_vertices = transform_box_coord_pseudo(H, W, box_vertices, dataset_name)
box_loc = transform_box_center_coord(box_loc, dataset_name)
'''2) preprocess the box to get important parameters'''
AB, AD, AB_dot_AB, AD_dot_AD = box_preprocess(box_vertices)
'''3) compute XQ'''
box_mask = np.zeros((H, W))
generate_box_mask(box_mask, box_loc, vicinity, box_vertices[0], AB, AD, AB_dot_AB, AD_dot_AD)
# grad[grad >= ignore_thresh] is an 1D array, shape doesn't match grad
total_attr = np.sum(grad[grad >= ignore_thresh])
masked_attr = grad[box_mask == 1]
attr_in_box = np.sum(masked_attr[masked_attr >= ignore_thresh])
if total_attr == 0:
print("No attributions present!")
return 0, 0, 0, 0
XQ = attr_in_box / total_attr
dist_attr_sum = total_attr - attr_in_box
# print("XQ: {}".format(XQ))
return XQ, attr_in_box, dist_attr_sum, total_attr
def get_sum_XQ_analytics_fast_tensor(pos_grad, neg_grad, box_vertices, dataset_name, sign, ignore_thresh, box_loc, vicinity):
"""
Calculates XQ based on the sum of attributions, resolution not considered
:param vicinity: search vicinity w.r.t. the box_center, class dependent
:param ignore_thresh: the threshold below which the attributions would be ignored
:param box_loc: location of the predicted box
:param sign: indicates the type of attributions shown, positive or negative
:param neg_grad: numpy array containing sum of negative gradients at each location
:param pos_grad: numpy array containing sum of positive gradients at each location
:param dataset_name:
:param box_vertices: The vertices of the predicted box
:return: Explanation Quality (XQ)
"""
# print('box_vertices before transformation: {}'.format(box_vertices))
grad = None
if sign == 'positive':
grad = pos_grad
elif sign == 'negative':
grad = neg_grad
'''1) transform the box coordinates to match with grad dimensions'''
H, W = grad.size()[0], grad.size()[1] # image height and width
box_vertices = transform_box_coord_pseudo(H, W, box_vertices, dataset_name)
box_loc = transform_box_center_coord_tensor(box_loc, dataset_name)
print("dataset_name in get_sum_XQ_analytics_fast_tensor: {}".format(dataset_name))
print("box_vertices in psuedo image coordinates: {}".format(box_vertices))
print("box_loc in psuedo image coordinates: {}".format(box_loc))
'''2) preprocess the box to get important parameters'''
AB, AD, AB_dot_AB, AD_dot_AD = box_preprocess_tensor(box_vertices)
'''3) compute XQ'''
box_mask = torch.zeros((H, W)).cuda()
zero_tensor = torch.zeros((H, W), dtype=torch.float).cuda()
if dataset_name == "WaymoDataset":
zero_tensor = torch.zeros((H, W), dtype=torch.double).cuda()
generate_box_mask_tensor(box_mask, box_loc, vicinity, box_vertices[0], AB, AD, AB_dot_AB, AD_dot_AD)
filtered_attr = torch.where(grad >= ignore_thresh, grad, zero_tensor)
masked_attr = torch.where(box_mask == 1, filtered_attr, zero_tensor)
total_attr = torch.sum(filtered_attr)
attr_in_box = torch.sum(masked_attr)
if total_attr == 0:
print("No attributions present!")
return total_attr, total_attr, total_attr, total_attr
XQ = attr_in_box / total_attr
dist_attr_sum = total_attr - attr_in_box
print("\ncompleted XC calculation by tensor operations\n")
return XQ, attr_in_box, dist_attr_sum, total_attr
def get_cnt_XQ_analytics_fast_tensor(pos_grad, neg_grad, box_vertices, dataset_name, sign, ignore_thresh, box_loc, vicinity):
"""
Calculates XQ based on the count of pixels exceeding certain attr threshold, resolution not considered
:param vicinity: search vicinity w.r.t. the box_center, class dependent
:param ignore_thresh: the threshold below which the attributions would be ignored
:param box_loc: location of the predicted box
:param sign: indicates the type of attributions shown, positive or negative
:param neg_grad: numpy array containing sum of negative gradients at each location
:param pos_grad: numpy array containing sum of positive gradients at each location
:param dataset_name:
:param box_vertices: The vertices of the predicted box
:return: Explanation Quality (XQ)
"""
# print('box_vertices before transformation: {}'.format(box_vertices))
grad = None
if sign == 'positive':
grad = pos_grad
elif sign == 'negative':
grad = neg_grad
'''1) transform the box coordinates to | |
<reponame>olliemath/pypy<filename>rpython/rlib/test/test_rposix.py<gh_stars>0
from hypothesis import given, strategies as st, assume
import pytest
from rpython.rtyper.test.test_llinterp import interpret
from rpython.translator.c.test.test_genc import compile
from rpython.tool.pytest.expecttest import ExpectTest
from rpython.tool.udir import udir
from rpython.rlib import rposix, rposix_stat, rstring
import os, sys
import errno
import py
def rposix_requires(funcname):
return pytest.mark.skipif(not hasattr(rposix, funcname),
reason="Requires rposix.%s()" % funcname)
win_only = pytest.mark.skipif("os.name != 'nt'")
class TestPosixFunction:
def test_access(self):
filename = str(udir.join('test_access.txt'))
fd = file(filename, 'w')
fd.close()
for mode in os.R_OK, os.W_OK, os.X_OK, os.R_OK | os.W_OK | os.X_OK:
result = rposix.access(filename, mode)
assert result == os.access(filename, mode)
def test_times(self):
"""
posix.times should compile as an RPython function and should return a
five-tuple giving float-representations (seconds, effectively) of the four
fields from the underlying struct tms and the return value.
"""
times = eval(compile(lambda: str(os.times()), ())())
assert isinstance(times, tuple)
assert len(times) == 5
for value in times:
assert isinstance(value, float)
@py.test.mark.skipif("not hasattr(os, 'getlogin')")
def test_getlogin(self):
try:
expected = os.getlogin()
except OSError as e:
py.test.skip("the underlying os.getlogin() failed: %s" % e)
data = rposix.getlogin()
assert data == expected
@win_only
def test_utimes(self):
# Windows support centiseconds
def f(fname, t1):
os.utime(fname, (t1, t1))
fname = udir.join('test_utimes.txt')
fname.ensure()
t1 = 1159195039.25
compile(f, (str, float))(str(fname), t1)
assert t1 == os.stat(str(fname)).st_mtime
t1 = 5000000000.0
compile(f, (str, float))(str(fname), t1)
assert t1 == os.stat(str(fname)).st_mtime
def test_utime_negative_fraction(self):
def f(fname, t1):
os.utime(fname, (t1, t1))
fname = udir.join('test_utime_negative_fraction.txt')
fname.ensure()
t1 = -123.75
compile(f, (str, float))(str(fname), t1)
got = os.stat(str(fname)).st_mtime
assert got == -123 or got == -123.75
@win_only
def test__getfullpathname(self):
posix = __import__(os.name)
sysdrv = os.getenv('SystemDrive', 'C:')
stuff = sysdrv + 'stuff'
data = rposix.getfullpathname(stuff)
assert data == posix._getfullpathname(stuff)
# the most intriguing failure of ntpath.py should not repeat, here:
assert not data.endswith(stuff)
@win_only
def test__getfullpathname_long(self):
stuff = "C:" + "\\abcd" * 100
py.test.raises(WindowsError, rposix.getfullpathname, stuff)
ustuff = u"C:" + u"\\abcd" * 100
res = rposix.getfullpathname(ustuff)
assert res == ustuff
def test_getcwd(self):
assert rposix.getcwd() == os.getcwd()
def test_chdir(self):
def check_special_envvar():
if sys.platform != 'win32':
return
pwd = os.getcwd()
import ctypes
buf = ctypes.create_string_buffer(1000)
len = ctypes.windll.kernel32.GetEnvironmentVariableA('=%c:' % pwd[0], buf, 1000)
if (len == 0) and "WINGDB_PYTHON" in os.environ:
# the ctypes call seems not to work in the Wing debugger
return
assert str(buf.value).lower() == pwd.lower()
# ctypes returns the drive letter in uppercase,
# os.getcwd does not,
# but there may be uppercase in os.getcwd path
pwd = os.getcwd()
try:
check_special_envvar()
rposix.chdir('..')
assert os.getcwd() == os.path.dirname(pwd)
check_special_envvar()
finally:
os.chdir(pwd)
def test_mkdir(self):
filename = str(udir.join('test_mkdir.dir'))
rposix.mkdir(filename, 0o777)
with py.test.raises(OSError) as excinfo:
rposix.mkdir(filename, 0o777)
assert excinfo.value.errno == errno.EEXIST
if sys.platform == 'win32':
assert excinfo.type is WindowsError
@rposix_requires('mkdirat')
def test_mkdirat(self):
relpath = 'test_mkdirat.dir'
filename = str(udir.join(relpath))
dirfd = os.open(os.path.dirname(filename), os.O_RDONLY)
try:
rposix.mkdirat(relpath, 0o777, dir_fd=dirfd)
with py.test.raises(OSError) as excinfo:
rposix.mkdirat(relpath, 0o777, dir_fd=dirfd)
assert excinfo.value.errno == errno.EEXIST
finally:
os.close(dirfd)
def test_strerror(self):
assert rposix.strerror(2) == os.strerror(2)
def test_system(self):
filename = str(udir.join('test_system.txt'))
arg = '%s -c "print 1+1" > %s' % (sys.executable, filename)
data = rposix.system(arg)
assert data == 0
with file(filename) as f:
assert f.read().strip() == '2'
os.unlink(filename)
@py.test.mark.skipif("os.name != 'posix'")
def test_execve(self):
EXECVE_ENV = {"foo": "bar", "baz": "quux"}
def run_execve(program, args=None, env=None, do_path_lookup=False):
if args is None:
args = [program]
else:
args = [program] + args
if env is None:
env = {}
# we cannot directly call execve() because it replaces the
# current process.
fd_read, fd_write = os.pipe()
childpid = os.fork()
if childpid == 0:
# in the child
os.close(fd_read)
os.dup2(fd_write, 1) # stdout
os.close(fd_write)
if do_path_lookup:
os.execvp(program, args)
else:
rposix.execve(program, args, env)
assert 0, "should not arrive here"
else:
# in the parent
os.close(fd_write)
child_stdout = []
while True:
data = os.read(fd_read, 4096)
if not data: break # closed
child_stdout.append(data)
pid, status = os.waitpid(childpid, 0)
os.close(fd_read)
return status, ''.join(child_stdout)
# Test exit status and code
result, child_stdout = run_execve("/usr/bin/which", ["true"], do_path_lookup=True)
result, child_stdout = run_execve(child_stdout.strip()) # /bin/true or /usr/bin/true
assert os.WIFEXITED(result)
assert os.WEXITSTATUS(result) == 0
result, child_stdout = run_execve("/usr/bin/which", ["false"], do_path_lookup=True)
result, child_stdout = run_execve(child_stdout.strip()) # /bin/false or /usr/bin/false
assert os.WIFEXITED(result)
assert os.WEXITSTATUS(result) == 1
# Test environment
result, child_stdout = run_execve("/usr/bin/env", env=EXECVE_ENV)
assert os.WIFEXITED(result)
assert os.WEXITSTATUS(result) == 0
assert dict([line.split('=') for line in child_stdout.splitlines()]) == EXECVE_ENV
# The following won't actually execute anything, so they don't need
# a child process helper.
# If the target does not exist, an OSError should result
info = py.test.raises(
OSError, rposix.execve, "this/file/is/non/existent", [], {})
assert info.value.errno == errno.ENOENT
# If the target is not executable, an OSError should result
info = py.test.raises(
OSError, rposix.execve, "/etc/passwd", [], {})
assert info.value.errno == errno.EACCES
def test_os_write(self):
#Same as test in rpython/test/test_rbuiltin
fname = str(udir.join('os_test.txt'))
fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0o777)
assert fd >= 0
rposix.write(fd, 'Hello world')
os.close(fd)
with open(fname) as fid:
assert fid.read() == "Hello world"
fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0o777)
os.close(fd)
py.test.raises(OSError, rposix.write, fd, 'Hello world')
def test_os_close(self):
fname = str(udir.join('os_test.txt'))
fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0o777)
assert fd >= 0
os.write(fd, 'Hello world')
rposix.close(fd)
py.test.raises(OSError, rposix.close, fd)
def test_os_lseek(self):
fname = str(udir.join('os_test.txt'))
fd = os.open(fname, os.O_RDWR|os.O_CREAT, 0o777)
assert fd >= 0
os.write(fd, 'Hello world')
rposix.lseek(fd,0,0)
assert os.read(fd, 11) == 'Hello world'
os.close(fd)
py.test.raises(OSError, rposix.lseek, fd, 0, 0)
def test_os_fsync(self):
fname = str(udir.join('os_test.txt'))
fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0o777)
assert fd >= 0
os.write(fd, 'Hello world')
rposix.fsync(fd)
os.close(fd)
fid = open(fname)
assert fid.read() == 'Hello world'
fid.close()
py.test.raises(OSError, rposix.fsync, fd)
@py.test.mark.skipif("not hasattr(os, 'fdatasync')")
def test_os_fdatasync(self):
fname = str(udir.join('os_test.txt'))
fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0o777)
assert fd >= 0
os.write(fd, 'Hello world')
rposix.fdatasync(fd)
fid = open(fname)
assert fid.read() == 'Hello world'
os.close(fd)
py.test.raises(OSError, rposix.fdatasync, fd)
def test_os_kill(self):
import subprocess
import signal
proc = subprocess.Popen([sys.executable, "-c",
"import time;"
"time.sleep(10)",
],
)
rposix.kill(proc.pid, signal.SIGTERM)
if os.name == 'nt':
expected = signal.SIGTERM
else:
expected = -signal.SIGTERM
assert proc.wait() == expected
def test_isatty(self):
assert rposix.isatty(-1) is False
@py.test.mark.skipif("not hasattr(rposix, 'makedev')")
def test_makedev(self):
dev = rposix.makedev(24, 7)
assert rposix.major(dev) == 24
assert rposix.minor(dev) == 7
@py.test.mark.skipif("not hasattr(rposix, 'memfd_create')")
def test_memfd_create(self):
fd = rposix.memfd_create("abc", rposix.MFD_CLOEXEC)
try:
s = "defghi?"
os.write(fd, s)
finally:
os.close(fd)
@py.test.mark.skipif("not hasattr(os, 'ttyname')")
class TestOsExpect(ExpectTest):
def test_ttyname(self):
def f():
import os
from rpython.rtyper.test.test_llinterp import interpret
def ll_to_string(s):
return ''.join(s.chars)
def f(num):
try:
return os.ttyname(num)
except OSError:
return ''
assert ll_to_string(interpret(f, [0])) == f(0)
assert ll_to_string(interpret(f, [338])) == ''
self.run_test(f)
def ll_to_string(s):
return ''.join(s.chars)
class UnicodeWithEncoding:
is_unicode = True
def __init__(self, unistr):
self.unistr = unistr
if sys.platform == 'win32':
def as_bytes(self):
from rpython.rlib.runicode import unicode_encode_mbcs
res = unicode_encode_mbcs(self.unistr, len(self.unistr),
"strict")
return rstring.assert_str0(res)
else:
def as_bytes(self):
from rpython.rlib.runicode import unicode_encode_utf_8
res = unicode_encode_utf_8(self.unistr, len(self.unistr),
"strict")
return rstring.assert_str0(res)
def as_unicode(self):
return self.unistr
class BasePosixUnicodeOrAscii:
def setup_method(self, method):
self.ufilename = self._get_filename()
try:
f = file(self.ufilename, 'w')
except UnicodeEncodeError:
py.test.skip("encoding not good enough")
f.write("test")
f.close()
if sys.platform == 'win32' and isinstance(self.ufilename, str):
self.path = self.ufilename
self.path2 = self.ufilename + ".new"
else:
self.path = UnicodeWithEncoding(self.ufilename)
self.path2 = UnicodeWithEncoding(self.ufilename + ".new")
def _teardown_method(self, method):
for path in [self.ufilename + ".new", self.ufilename]:
if os.path.exists(path):
os.unlink(path)
def test_open(self):
def f():
try:
fd = os.open(self.path, os.O_RDONLY, 0o777)
try:
text = os.read(fd, 50)
return text
finally:
os.close(fd)
except OSError:
return ''
assert ll_to_string(interpret(f, [])) == "test"
def test_stat(self):
def f():
return rposix_stat.stat(self.path).st_mtime
if sys.platform == 'win32':
# double vs. float, be satisfied with sub-millisec resolution
assert abs(interpret(f, []) - os.stat(self.ufilename).st_mtime) < 1e-4
else:
assert interpret(f, []) == os.stat(self.ufilename).st_mtime
def test_access(self):
def f():
return rposix.access(self.path, os.R_OK)
assert interpret(f, []) == 1
def test_utime(self):
def f():
return rposix.utime(self.path, None)
interpret(f, []) # does not crash
def test_chmod(self):
def f():
return rposix.chmod(self.path, 0o777)
interpret(f, []) # does not crash
def test_unlink(self):
def f():
return rposix.unlink(self.path)
interpret(f, [])
assert not os.path.exists(self.ufilename)
def test_rename(self):
def f():
return rposix.rename(self.path, self.path2)
interpret(f, [])
assert not os.path.exists(self.ufilename)
assert os.path.exists(self.ufilename + '.new')
def test_replace(self):
def f():
return rposix.replace(self.path, self.path2)
interpret(f, [])
assert not os.path.exists(self.ufilename)
assert os.path.exists(self.ufilename + '.new')
def test_listdir(self):
udir = UnicodeWithEncoding(os.path.dirname(self.ufilename))
if sys.platform == 'win32':
def f():
if isinstance(udir.as_unicode(), str):
_udir = udir.as_unicode()
_res = ', '
else:
_udir = udir
_res = u', '
return _res.join(rposix.listdir(_udir))
result = interpret(f, [])
assert os.path.basename(self.ufilename) in ll_to_string(result)
else:
def f():
return ', '.join(rposix.listdir(udir))
result = interpret(f, [])
assert (os.path.basename(self.ufilename).encode('utf-8') in
ll_to_string(result))
def test_chdir(self):
| |
ML_SSE_m128_v1int32)
_mm_set1_epi64x = EmmIntrin("_mm_set1_epi64x", arity = 1, force_folding = True,
output_precision = ML_SSE_m128_v4int32)
# Conversion of a scalar float contained in a __m128 registers to a signed
# integer contained also in a __m128 register
_mm_cvt_ss2si = XmmIntrin("_mm_cvt_ss2si", arity = 1)
_mm_cvtss_si32 = _mm_cvt_ss2si # Both generate the same cvtss2si instruction
_mm_cvtsd_si64 = EmmIntrin("_mm_cvtsd_si64", arity = 1)
_mm_cvtsd_si32 = EmmIntrin("_mm_cvtsd_si32", arity = 1)
_mm_cvtss_f32 = XmmIntrin("_mm_cvtss_f32", arity = 1,
output_precision = ML_Binary32)
_mm_cvtsd_f64 = XmmIntrin("_mm_cvtsd_f64", arity = 1,
output_precision = ML_Binary64)
_mm_round_ss_rn = SmmIntrin("_mm_round_ss",
arg_map = {
0: FO_Arg(0),
1: FO_Arg(0),
2: "_MM_FROUND_TO_NEAREST_INT"
},
arity = 1,
output_precision = ML_SSE_m128_v1float32)
_mm_round_sd_rn = SmmIntrin("_mm_round_sd",
arg_map = {
0: FO_Arg(0),
1: FO_Arg(0),
2: "_MM_FROUND_TO_NEAREST_INT"
},
arity = 1,
output_precision = ML_SSE_m128_v1float64)
# 3-to-5-cycle latency / 1-to-2-cycle throughput approximate reciprocal, with a
# maximum relative error of 1.5 * 2^(-12).
_mm_rcp_ss = XmmIntrin("_mm_rcp_ss", arity = 1,
output_precision = ML_SSE_m128_v1float32)
_mm_rcp_ps = XmmIntrin("_mm_rcp_ps", arity = 1,
output_precision = ML_SSE_m128_v4float32)
_mm256_rcp_ps = ImmIntrin("_mm256_rcp_ps", arity = 1,
output_precision = ML_AVX_m256_v8float32)
_mm_add_ss = XmmIntrin("_mm_add_ss", arity = 2,
output_precision = ML_SSE_m128_v1float32)
_mm_mul_ss = XmmIntrin("_mm_mul_ss", arity = 2,
output_precision = ML_SSE_m128_v1float32)
_lzcnt_u32 = ImmIntrin("_lzcnt_u32", arity = 1,
output_precision = ML_UInt32)
_lzcnt_u64 = ImmIntrin("_lzcnt_u64", arity = 1,
output_precision = ML_UInt64)
# SSE2 instructions
_mm_unpackhi_pd = EmmIntrin("_mm_unpackhi_pd", arity = 2,
output_precision = ML_SSE_m128_v2float64)
_mm_unpacklo_pd = EmmIntrin("_mm_unpacklo_pd", arity = 2,
output_precision = ML_SSE_m128_v2float64)
# SSE4.1 instructions
_mm_mullo_epi32 = SmmIntrin("_mm_mullo_epi32", arity = 2,
output_precision = ML_SSE_m128_v4int32)
# AVX instructions
_mm256_cvtepi32_pd = ImmIntrin("_mm256_cvtepi32_pd", arity = 1,
output_precision = ML_AVX_m256_v4float64)
_mm256_extractf128_ps = ImmIntrin("_mm256_extractf128_ps", arity = 2,
output_precision = ML_SSE_m128_v4float32)
_mm256_extractf128_si256 = ImmIntrin("_mm256_extractf128_si256", arity = 2,
output_precision = ML_SSE_m128_v4int32)
_mm256_insertf128_si256 = ImmIntrin("_mm256_insertf128_si256", arity = 3,
output_precision = ML_SSE_m128_v4float32)
_mm256_permute_ps = ImmIntrin("_mm256_permute_ps", arity = 2,
output_precision = ML_AVX_m256_v8float32)
_mm256_unpackhi_pd = ImmIntrin("_mm256_unpackhi_pd", arity = 2,
output_precision = ML_AVX_m256_v4float64)
_mm256_unpacklo_pd = ImmIntrin("_mm256_unpacklo_pd", arity = 2,
output_precision = ML_AVX_m256_v4float64)
## AVX conversion metablock from 4 int64 to 4 packed double,
# with the condition that the 4 int64 fit in 4 int32 without overflow.
# @param optree is a Conversion.
# Details : input vector looks like D1 D0 | C1 C0 | B1 B0 | A1 A0
# and we want to convert D0 | C0 | B0 | A0 to double. We do this in 4 steps:
# 1: cast to 8 int32s
# 2: permute 32-bit words to get lower-significance words next to each other
# 3: extract the 2 lower words from high-256-bit part to form a vector of 4
# int32s corresponding to the lower parts of the 4 int64s.
# 4: convert the 4 int32s to 4 float64s
def conversion_to_avx_mm256_cvtepi64_pd(optree):
ymm0 = TypeCast(
optree.get_input(0),
precision=ML_AVX_m256_v8float32,
tag="avx_conv_cast"
)
d1c1d0c0b1a1b0a0 = Permute(ymm0,
Constant(
# Reorder [3, 2, 1, 0] -> [3, 1, 2, 0]
int('3120', base = 4),
precision = ML_Int32
),
precision = ymm0.get_precision()) # __m256
__m256d_d1c1d0c0b1a1b0a0 = TypeCast(d1c1d0c0b1a1b0a0,
precision = ML_AVX_m256_v4float64)
__m128d_b1a1b0a0 = TypeCast(d1c1d0c0b1a1b0a0,
precision = ML_SSE_m128_v2float64)
d1c1d0c0 = Extract(d1c1d0c0b1a1b0a0,
Constant(1, precision = ML_Int32),
precision = ML_SSE_m128_v4float32) # __m128
d0c0b0a0 = VectorUnpack(
__m128d_b1a1b0a0,
TypeCast(d1c1d0c0, precision = ML_SSE_m128_v2float64),
precision = ML_SSE_m128_v2float64
) # __m128d
__m128i_d0c0b0a0 = TypeCast(d0c0b0a0, precision = ML_SSE_m128_v4int32)
result = Conversion(__m128i_d0c0b0a0, precision = ML_AVX_m256_v4float64)
return result
## AVX typecast metablock from 4 float32 to 2 float64
def _mm256_castps256_pd128(optree):
ymm0 = optree.get_input(0)
xmm0 = TypeCast(ymm0, precision=ML_SSE_m128_v4float32, tag = "castps256_lvl0")
return TypeCast(xmm0, precision=ML_SSE_m128_v2float64, tag = "castps256_lvl1")
# AVX2 instructions
_mm256_max_epi32 = ImmIntrin("_mm256_max_epi32", arity = 2,
output_precision = ML_AVX_m256_v8int32)
# AVX2 bitwise AND of 256 bits representing integer data
_mm256_and_si256 = ImmIntrin("_mm256_and_si256", arity = 2,
output_precision = ML_AVX_m256_v8int32)
## check whether @p optree is not a bit shift by a uniform vector constant
def variable_shift_check(optree):
return not uniform_shift_check(optree)
## If optree is vector uniform constant modify it to be a
# conversion between a scalar constant and a vector
def vector_constant_op(optree):
assert isinstance(optree, Constant)
cst_value_v = optree.get_value()
op_format = optree.get_precision()
if uniform_list_check(cst_value_v):
scalar_format = op_format.get_scalar_format()
if isinstance(cst_value_v[0], VIRTUAL_CST_MASK):
raise Exception()
scalar_cst = Constant(cst_value_v[0], precision = scalar_format)
## TODO: Conversion class may be changed to VectorBroadCast
return Conversion(scalar_cst, precision = op_format)
else:
raise NotImplementedError
def x86_fma_intrinsic_builder(intr_name):
return _mm_cvtss_f32(
FunctionOperator(
intr_name, arity = 3,
output_precision = ML_SSE_m128_v1float32,
require_header = ["immintrin.h"])(
_mm_set_ss(FO_Arg(0)),
_mm_set_ss(FO_Arg(1)),
_mm_set_ss(FO_Arg(2))
)
)
def x86_fmad_intrinsic_builder(intr_name):
return _mm_cvtsd_f64(
FunctionOperator(
intr_name, arity = 3,
output_precision = ML_SSE_m128_v1float64,
require_header = ["immintrin.h"])(
_mm_set_sd(FO_Arg(0)),
_mm_set_sd(FO_Arg(1)),
_mm_set_sd(FO_Arg(2))
)
)
## Builder for x86 FMA intrinsic within XMM register
# (native, no conversions)
#
def x86_fma_intr_builder_native(intr_name,
output_precision = ML_SSE_m128_v1float32):
return FunctionOperator(intr_name, arity = 3,
output_precision = output_precision,
require_header = ["immintrin.h"]
)
def x86_fmad_intr_builder_native(intr_name,
output_precision = ML_SSE_m128_v1float64):
return FunctionOperator(intr_name, arity = 3,
output_precision = output_precision,
require_header = ["immintrin.h"]
)
## Convert a v4 to m128 conversion optree
def v4_to_m128_modifier(optree):
conv_input = optree.get_input(0)
elt_precision = conv_input.get_precision().get_scalar_format()
elts = [VectorElementSelection(
conv_input,
Constant(i, precision = ML_Integer),
precision = elt_precision
) for i in range(4)]
return Conversion(elts[0], elts[1], elts[2], elts[3],
precision = optree.get_precision())
__m128ip_cast_operator = TemplateOperatorFormat(
"(__m128i*){}", arity = 1,
output_precision = ML_Pointer_Format(ML_SSE_m128_v4int32)
)
_mm_fmadd_ss = x86_fma_intrinsic_builder("_mm_fmadd_ss")
def is_vector_cst_with_value(optree, value):
if not isinstance(optree, Constant):
return False
else:
return all(map(lambda v: value == v, optree.get_value()))
def build_format_constant(value, precision):
""" Build a constant whose format is @p precision
and set its value to @p value, possibly duplicating it if precision
is a vector format """
if precision.is_vector_format():
return Constant([value] * precision.get_vector_size(), precision=precision)
else:
return Constant(value, precision=precision)
def is_cond_comparison_tree(cond):
""" Test if cond is a Comparison or a combination
(LogicalOr/LogicalAnd/LogicalNot) or Comparison """
if isinstance(cond, Comparison):
return True
elif isinstance(cond, LogicalNot):
op = cond.get_input(0)
return is_cond_comparison_tree(op)
elif isinstance(cond, (LogicalAnd, LogicalOr)):
lhs = cond.get_input(0)
rhs = cond.get_input(1)
return is_cond_comparison_tree(lhs) and is_cond_comparison_tree(rhs)
else:
return False
def pred_vector_select_mone_zero(optree):
""" Predicate returns True if and only if
optree is Select(cond, -1, 0) or Select(cond, 0, -1)
False otherwise.
Only returns True for integer-like format (-1 being equivalent
to a mask fully set, and 0 to a mask fully cleared) """
if not isinstance(optree, Select):
return False
elif not is_cond_comparison_tree(optree.get_input(0)):
# Only Select with comparison conditions are supported
return False
elif not optree.get_precision().is_vector_format():
return False
else:
lhs = optree.get_input(1)
rhs = optree.get_input(2)
cst_pred = (is_vector_cst_with_value(lhs, VIRTUAL_CST_MASK_M1) and is_vector_cst_with_value(rhs, VIRTUAL_CST_MASK_0)) or \
(is_vector_cst_with_value(lhs, VIRTUAL_CST_MASK_0) and is_vector_cst_with_value(rhs, VIRTUAL_CST_MASK_M1))
return cst_pred
def not_pred_vector_select_one_zero(optree):
""" Negation of the predicate pred_vector_select_mone_zero """
return not(pred_vector_select_mone_zero(optree))
def invert_comp_specifier(comp_specifier):
""" return the opposite (logical negation) of @p comp_specifier """
inverse_map = {
Comparison.Equal: Comparison.NotEqual,
Comparison.Less: Comparison.GreaterOrEqual,
Comparison.LessOrEqual: Comparison.Greater,
Comparison.NotEqual: Comparison.Equal,
Comparison.Greater: Comparison.LessOrEqual,
Comparison.GreaterOrEqual: Comparison.Less,
}
return inverse_map[comp_specifier]
def generate_sse_avx_select_boolean_value(cond, precision, negate=False):
""" Generate a code generation operator for a comparison between two
values stored in SSE/AVX registers and whose boolean result is casted
to a value of same format as the operands (precision).
Negate indicates that condition must be reversed.
The Value 0 should be returned when cond is False
and -1 when cond is True """
assert isinstance(cond, Comparison)
specifier_map = {
Comparison.Equal: "eq",
Comparison.GreaterOrEqual: "ge",
Comparison.Greater: "gt",
Comparison.NotEqual: "neq",
Comparison.LessOrEqual: "le",
Comparison.Less: "lt",
}
SIGNED_PREDICATE_LIST = [
Comparison.GreaterOrEqual, Comparison.Greater,
Comparison.Less, Comparison.LessOrEqual
]
scalar_precision = precision.get_scalar_format()
if is_std_unsigned_integer_format(scalar_precision) \
and cond.specifier in SIGNED_PREDICATE_LIST:
Log.report(Log.Warning,
"Generating code for unsigned comparison with signed " \
"specifier in generate_sse_avx_select_boolean_value")
format_suffix = {
ML_SSE_m128_v4int32: "epi32",
ML_SSE_m128_v4uint32: "epi32",
ML_SSE_m128_v4float32: "ps",
ML_SSE_m128_v2float64: "pd",
ML_AVX_m256_v8int32: "epi32",
ML_AVX_m256_v8uint32: "epi32",
ML_AVX_m256_v8float32: "ps",
ML_AVX_m256_v4int64: "epi64",
ML_AVX_m256_v4float64: "pd",
}
format_prefix = {
ML_SSE_m128_v4int32: "mm",
ML_SSE_m128_v4uint32: "mm",
ML_SSE_m128_v4float32: "mm",
ML_AVX_m256_v8int32: "mm256",
ML_AVX_m256_v8uint32: "mm256",
ML_AVX_m256_v8float32: "mm256",
ML_AVX_m256_v4int64: "mm256",
ML_AVX_m256_v4float64: "mm256",
}
intrinsic_builder = {
ML_SSE_m128_v4int32: XmmIntrin,
ML_SSE_m128_v4uint32: XmmIntrin,
ML_SSE_m128_v4float32: XmmIntrin,
ML_AVX_m256_v8int32: ImmIntrin,
ML_AVX_m256_v8uint32: ImmIntrin,
ML_AVX_m256_v8float32: ImmIntrin,
ML_AVX_m256_v4int64: ImmIntrin,
ML_AVX_m256_v4float64: ImmIntrin,
}
def opcode_builder(precision, cond_specifier):
""" Build a function generator for comparison intrinsics
which requires the comparison specifier to be encoded
in the intrinsic mnemonic """
return intrinsic_builder[precision](
"_{}_cmp{}_{}".format(format_prefix[precision],
specifier_map[cond_specifier],
format_suffix[precision]),
output_precision = precision, arity = 2
)
def specifier_op_builder(precision, cond_specifier):
""" Build a function generator for comparison intrinsics
which requires the comparison specifier to be encoded
as an integer immediate value in the 3rd operand """
# TODO/FIXME: cleaning with ordered/unordered predicate
# default to ordered and non signaling when possible
# https://www.felixcloutier.com/x86/CMPPD.html#tbl-3-1
specifier_code = {
Comparison.Equal: 16,
Comparison.Less: 1,
Comparison.LessOrEqual: 2,
Comparison.NotEqual: 20,
Comparison.Greater: 14,
Comparison.GreaterOrEqual: 13,
}[cond_specifier]
return intrinsic_builder[precision](
"_{}_cmp_{}".format(format_prefix[precision],
format_suffix[precision]
),
arg_map = {0: FO_Arg(0), 1: FO_Arg(1), 2: str(specifier_code)},
output_precision = precision, arity = 2
)
mnemonic_builder = {
ML_SSE_m128_v4int32: opcode_builder,
ML_SSE_m128_v4uint32: opcode_builder,
ML_SSE_m128_v4float32: opcode_builder,
ML_AVX_m256_v8int32: opcode_builder,
ML_AVX_m256_v8uint32: opcode_builder,
ML_AVX_m256_v8float32: specifier_op_builder,
ML_AVX_m256_v4float64: specifier_op_builder,
ML_AVX_m256_v4int64: opcode_builder,
}
cond_specifier = cond.specifier if not negate \
else invert_comp_specifier(cond_specifier)
return | |
<reponame>TomSmithCGAT/CamProt
'''plot_tm_coverage
=======================================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python Proteomics
Purpose
-------
Usage
-----
Command line options
--------------------
'''
import argparse
import collections
import os
import re
import sys
import io
import gzip
import math
import pandas as pd
import numpy as np
import requests
import json
import proteomics.fasta as fasta
import proteomics.sequence as sequence
from time import gmtime, strftime
from rpy2.robjects import r as R
from rpy2.robjects import pandas2ri
pandas2ri.activate()
def writeSectionHeader(logfile, section_header):
#underliner = "".join(("-",)*len(section_header))
section_blocker = ("======================================="
"=======================================")
underliner1 = ("----------------------------------------"
"----------------------------------------")
logfile.write("\n%s\n%s\n" % (section_blocker, section_header))
logfile.write("%s\n" % underliner1)
return section_blocker
def iterateProteomicsSummary(infile, sep="\t"):
''' iterate through a proteomics summary file and yield the data per line'''
header = next(infile)
for line in infile:
try:
uniprot_id, sequence, start, end = line.strip().split(sep)[0:4]
except:
print(line.strip().split(sep))
print(line.strip().split(sep)[0:4])
print(line)
raise ValueError()
yield (uniprot_id, sequence, start, end)
def getPeptidePosition(peptide_seq, protein_seq):
''' find the positions in the protein where the peptide matches. Returns a set of all matched positions'''
if peptide_seq in protein_seq:
if len(re.findall(peptide_seq, protein_seq)) > 1:
#print("more than 1 match")
return "more than 1 match"
else:
# 1 match
span = re.search(peptide_seq, protein_seq).span()
return set(range(span[0], span[1]))
else:
# no matches
return None
def normalisePaddedArray(input_array, upstream_pad, downstream_pad, tm_length, size):
''' Normalise an array composed of three regions (upstream, middle and end), where middle = TM'''
assert len(input_array) == sum((upstream_pad, downstream_pad, tm_length))
up_array = input_array[0:upstream_pad]
tm_array = input_array[upstream_pad:upstream_pad+tm_length]
down_array = input_array[upstream_pad+tm_length:]
new_array = np.zeros(3 * size)
new_array[0:size] = sequence.sequence.normaliseArraySize(up_array, size=size)
new_array[size:2*size] = sequence.normaliseArraySize(tm_array, size=size)
new_array[2*size: 3*size] = sequence.normaliseArraySize(down_array, size=size)
return(new_array)
def getTMCoverage(protein_coverage, tm_blocks, FEATURE_SIZE, sequence_dict, debug=False):
tm_blocks_region_coverage_norm = collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(np.array)))
for protein in tm_blocks:
for tm_ix, tm in enumerate(tm_blocks[protein]):
tm_start, tm_stop = tm
tm_blocks_region_coverage_norm[protein][tm] = np.zeros(
(len(protein_coverage[protein]), 3*FEATURE_SIZE))
if tm_ix == 0:
upstream_length = min(tm_start, FEATURE_SIZE)
if len(tm_blocks[protein]) == 1:
downstream_length = min(len(sequence_dict[protein]) - tm_stop, FEATURE_SIZE)
else:
downstream_length = min(tm_blocks[protein][tm_ix+1][0] - tm_stop, FEATURE_SIZE)
elif tm_ix == (len(tm_blocks[protein]) - 1):
upstream_length = min(tm_start - tm_blocks[protein][tm_ix-1][1], FEATURE_SIZE)
downstream_length = min(len(sequence_dict[protein]) - tm_stop, FEATURE_SIZE)
else:
upstream_length = min(tm_start - tm_blocks[protein][tm_ix-1][1], FEATURE_SIZE)
downstream_length = min(tm_blocks[protein][tm_ix+1][0] - tm_stop, FEATURE_SIZE)
# print statements left in for debuggin purposes
if upstream_length == 0:
if tm_ix == (len(tm_blocks[protein]) - 1):
if debug:
print("skipping TM (up): ", protein, tm_blocks[protein][tm_ix],
len(sequence_dict[protein]))
else:
if debug:
print("skipping TM (up): ", protein, tm_blocks[protein][tm_ix - 1],
tm_blocks[protein][tm_ix])
continue
if downstream_length == 0:
if tm_ix == (len(tm_blocks[protein]) - 1):
if debug:
print("skipping TM (down): ", protein, tm_blocks[protein][tm_ix],
len(sequence_dict[protein]))
else:
if debug:
print("skipping TM (down): ", protein, tm_blocks[protein][tm_ix],
tm_blocks[protein][tm_ix+1])
continue
for array_ix, protein_array in enumerate(protein_coverage[protein]):
tm_array_raw = protein_array[tm_start-upstream_length:tm_stop+downstream_length]
#print(len(tm_array_raw))
new_array = normalisePaddedArray(
tm_array_raw, upstream_pad=upstream_length, downstream_pad=downstream_length,
tm_length=tm_stop-tm_start, size=FEATURE_SIZE)
tm_blocks_region_coverage_norm[protein][tm][array_ix] = new_array
return tm_blocks_region_coverage_norm
def normalisePerStudy(study_ix, array_dict, tms, feature_size):
tms_array = np.zeros((tms, (3 * feature_size)))
n = 0
for protein in array_dict:
for tm in array_dict[protein]:
tms_array[n] = array_dict[protein][tm][study_ix]
n+=1
return tms_array.mean(axis=0)
def makeMetaTMDF(tm_blocks_region_coverage_norm, ix2desc, tms, FEATURE_SIZE):
bins = []
desc = []
ixs = []
coverage_abs = []
coverage_norm = []
for ix in ix2desc:
coverage_array = normalisePerStudy(ix, tm_blocks_region_coverage_norm, tms, FEATURE_SIZE)
max_coverage = max(coverage_array)
coverage_array_norm = [x/max_coverage for x in coverage_array]
coverage_abs.extend(coverage_array)
coverage_norm.extend(coverage_array_norm)
bins.extend(range(0, len(coverage_array)))
desc.extend((ix2desc[ix],)*len(coverage_array))
ixs.extend((ix,)*len(coverage_array))
coverage_profile_df = pd.DataFrame({"desc":desc, "ix":ixs,
"coverage_abs":coverage_abs, "coverage_norm":coverage_norm,
"bins":bins})
return(coverage_profile_df)
def makeProteinCoveredDF(protein_coverage, tm_blocks, ix2descr, sequence_dict):
rows = []
for uniprot_id in tm_blocks:
covered = np.zeros(len(protein_coverage[uniprot_id]))
not_covered = np.zeros(len(protein_coverage[uniprot_id]))
try:
covered += protein_coverage[uniprot_id].sum(axis=1)
except:
print(uniprot_id)
print(uniprot_id in tm_blocks)
print(protein_coverage[uniprot_id])
raise ValueError()
not_covered += len(sequence_dict[uniprot_id]) - protein_coverage[uniprot_id].sum(axis=1)
total_aas = covered + not_covered
coverage = covered / total_aas
#print(coverage)
if total_aas[0]>0:
for ix, cov in enumerate(coverage):
rows.append([uniprot_id, ix, ix2descr[ix], 1, cov])
#combined_coverage = []
#for tm in sorted(tm_blocks_coverage[uniprot_id]):
# combined_coverage += list(tm_blocks_coverage[uniprot_id][tm].max(axis=0))
#rows.append([uniprot_id, tms, study_ix+1, "combined", 1, np.mean(combined_coverage)])
#print(rows)
#raise ValueError()
protein_coverage_df = pd.DataFrame.from_records(
rows, columns=["uniprot_id", "ix", "desc", "length", "coverage"])
return protein_coverage_df
def makeProteinCoveredDF(protein_coverage, tm_blocks, ix2descr):
rows = []
for uniprot_id in tm_blocks:
covered = np.zeros(len(protein_coverage[uniprot_id]))
not_covered = np.zeros(len(protein_coverage[uniprot_id]))
try:
covered += protein_coverage[uniprot_id].sum(axis=1)
except:
print(uniprot_id)
print(uniprot_id in tm_blocks)
print(protein_coverage[uniprot_id])
raise ValueError()
not_covered += len(sequence_dict[uniprot_id]) - protein_coverage[uniprot_id].sum(axis=1)
total_aas = covered + not_covered
coverage = covered / total_aas
#print(coverage)
if total_aas[0]>0:
for ix, cov in enumerate(coverage):
rows.append([uniprot_id, ix, ix2descr[ix], 1, cov])
#combined_coverage = []
#for tm in sorted(tm_blocks_coverage[uniprot_id]):
# combined_coverage += list(tm_blocks_coverage[uniprot_id][tm].max(axis=0))
#rows.append([uniprot_id, tms, study_ix+1, "combined", 1, np.mean(combined_coverage)])
#print(rows)
#raise ValueError()
protein_coverage_df = pd.DataFrame.from_records(
rows, columns=["uniprot_id", "ix", "desc", "length", "coverage"])
return protein_coverage_df
def makeTMCoveredDF(protein_coverage, tm_blocks, ix2descr):
rows = []
for uniprot_id in tm_blocks:
covered = np.zeros(len(protein_coverage[uniprot_id]))
not_covered = np.zeros(len(protein_coverage[uniprot_id]))
tms = 0
for tm in sorted(tm_blocks[uniprot_id]):
try:
tm_length = tm[1] - tm[0]
except:
continue
covered += protein_coverage[uniprot_id][:,tm[0]:tm[1]].sum(axis=1)
not_covered += tm_length - protein_coverage[uniprot_id][:,tm[0]:tm[1]].sum(axis=1)
tms += 1
#print(covered)
#print(not_covered)
#raise ValueError()
total_aas = covered + not_covered
coverage = covered / total_aas
if total_aas[0]>0:
for ix, cov in enumerate(coverage):
rows.append([uniprot_id, tms, ix, ix2descr[ix], 1, cov])
combined_coverage = []
for tm in sorted(tm_blocks[uniprot_id]):
combined_coverage += list(protein_coverage[uniprot_id][:,tm[0]:tm[1]].sum(axis=0))
rows.append([uniprot_id, tms, ix+1, "combined", 1, np.mean(combined_coverage)])
tm_coverage_df = pd.DataFrame.from_records(
rows, columns=["uniprot_id", "tms", "ix", "desc", "length", "coverage"])
return tm_coverage_df
def finaliseDataFrame(df, coverage_threshold=0):
tmp_df = df.copy()
tmp_df['coverage'] = tmp_df['coverage'] > coverage_threshold
tmp_df.set_index("uniprot_id", inplace=True, drop=False)
tmp_df['order_1'] = tmp_df[tmp_df['coverage'] > 0].groupby("uniprot_id")[
'desc'].aggregate(lambda x: len(x))
tmp_df['order_2'] = tmp_df[tmp_df['coverage'] > 0].groupby("uniprot_id")[
'desc'].sum()
tmp_df['order_3'] = tmp_df.groupby("uniprot_id")['coverage'].sum()
tmp_df['order_1'] = tmp_df['order_1'].fillna(max(tmp_df['ix'])+1).astype("int")
tmp_df['order_2'] = tmp_df['order_2'].fillna(0).astype("str")
tmp_df = tmp_df.sort_values(
['order_1', 'order_2', 'order_3'], ascending=[True, False, False])
tmp_df.reset_index(drop=True, inplace=True)
tmp_df['cum_end'] = tmp_df.groupby("desc")['length'].apply(lambda x: np.cumsum(x))
tmp_df['cum_start'] = tmp_df['cum_end'] - tmp_df['length']
return tmp_df
plotCoverage = R('''
function(coverage_profile_df, fraction_plotname, norm_plotname){
library(ggplot2)
# check the data structure
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
#coverage_profile_df$desc = factor(coverage_profile_df$desc, levels=c(
# "krug", "ccp_qe_tl_90c", "ccp_lumos_chopin_tl", "ccp_lumos_trypsin"))
my_theme = theme(
aspect.ratio=1,
text=element_text(size=20),
axis.text.x=element_blank(),
axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
panel.border=element_blank(),
panel.grid=element_blank(),
legend.text=element_text(size=12))
p = ggplot(coverage_profile_df) +
theme_bw() + my_theme +
scale_colour_manual(name="", values=cbPalette[1:(max(coverage_profile_df$ix) + 1)])
# make relative coverage plot
p1 = p + aes(bins, coverage_norm, colour=desc) + ylab("Normalised Coverage") +
geom_rect(xmin=10, xmax=20, ymin=-0.1, ymax=-0.05, fill = "black", colour="black") + # add TM model
geom_segment(x = 0, y = -0.075, xend = 30, yend = -0.075, color = "black") + # add TM model
annotate(geom="text", x=15, y=-0.075, label="TM", color="white") + # add TM model
geom_segment(x = 0, y = 0, xend=30, yend = 0, color = "grey50") + # add manual x-axis
geom_segment(x = 0, y = 0, xend=0, yend = 1, color = "grey50") + # add manual y-axis
scale_y_continuous(limits=c(-0.1,1), breaks=seq(0,1,0.25)) +
geom_line()
max_abs_coverage <- max(coverage_profile_df$coverage_abs)
# make absolute coverage plot
p2 = p + aes(bins, coverage_abs, colour=desc) + ylab("Fraction Covered") +
geom_rect(xmin=10, xmax=20, ymin=-(max_abs_coverage/20), ymax=-(max_abs_coverage/10),
fill = "black", colour="black") + # add TM model
geom_segment(x = 0, y = -(max_abs_coverage/13.3), xend = 30,
yend = -(max_abs_coverage/13.3), color = "black") + # add TM model
annotate(geom="text", x=15, y=-(max_abs_coverage/13.3), label="TM", color="white") + # add TM model
geom_segment(x = 0, y = 0, xend=30, yend = 0, color = "grey50") + # add manual x-axis
geom_segment(x = 0, y = 0, xend=0, yend = max_abs_coverage, color = "grey50") + # add manual x-axis
scale_y_continuous(limits=c(-(max_abs_coverage/10),max_abs_coverage)) +
geom_line()
ggsave(p2, file=fraction_plotname)
ggsave(p1, file=norm_plotname)
}
''')
plotCoveredTM = R('''
function(
input_df,
plotfilename,
circular=TRUE,
plot_only_covered=FALSE,
colour_by_group=FALSE,
group='desc'){
library(ggplot2)
tmp_df <- input_df
tmp_df <- tmp_df[tmp_df[[group]] != "combined",]
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
if (colour_by_group==TRUE){
tmp_df$fill = tmp_df[[group]]
tmp_df$fill[tmp_df$coverage == 0] <- NA
}
else{
tmp_df$fill = tmp_df$coverage > 0
}
tmp_df$fill <- factor(tmp_df$fill)
my_theme <- theme(
text=element_text(size=20),
legend.text=element_text(size=8),
axis.title=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
my_theme_circ <- theme(axis.text=element_blank(),
axis.ticks=element_blank())
plot_text <- "Samples from outside\n to inside:\n\n"
n_studies <- max(tmp_df$ix)
studies_step = 0.5 / (n_studies + 1)
plot_ymax <- 1
plot_ymin <- 0.5
tmp_df$ymin <- plot_ymin + (tmp_df$ix * studies_step)
tmp_df$ymax <- plot_ymin + ((tmp_df$ix +1) * studies_step)
rect_colour <- NA
if (plot_only_covered==TRUE){
p <- ggplot(tmp_df[tmp_df$uniprot_id %in% tmp_df[tmp_df$coverage>0,"uniprot_id"],])
if (circular==TRUE){rect_colour <- "grey93"}
}
else{
p <- ggplot(tmp_df)
}
p <- p + theme_bw() + my_theme # add themes
if (circular==TRUE) {
p <- p + geom_rect(aes(xmin=cum_start, xmax=cum_end, ymin=ymin, ymax=ymax, fill=factor(fill)),
colour=rect_colour, size=0.1) + coord_polar() + my_theme_circ + ylim(0, 1)
}
else{
p <- p + geom_rect(aes(xmin=cum_start, xmax=cum_end, ymin=ix, ymax=ix+1, fill=factor(fill)),
colour=rect_colour, size=0.15) #+
scale_y_continuous(breaks=seq(0.5, max(tmp_df$ix+0.5, 0.5)))#, labels=levels(tmp_df$fill))
}
if (colour_by_group==TRUE){
p <- p + scale_fill_manual(na.value="grey97",
breaks=unique(tmp_df[[group]]),
values=cbPalette[1:length(unique(tmp_df[[group]]))],
name="")
if (circular==TRUE){
p <- p + theme(legend.position=c(0.5,0.5))
}
}
else{
p <- p + scale_fill_manual(values=c("grey97", "grey13")) + theme(legend.position="none")
if (circular==TRUE){
plot_text | |
= ['content_type', 'app_id', 'authorization', 'x_sdk_date', 'x_project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
query_params = []
header_params = {}
if 'content_type' in local_var_params:
header_params['Content-Type'] = local_var_params['content_type']
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization']
if 'x_sdk_date' in local_var_params:
header_params['X-Sdk-Date'] = local_var_params['x_sdk_date']
if 'x_project_id' in local_var_params:
header_params['X-Project-Id'] = local_var_params['x_project_id']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-request-Id"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/apps/{app_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowAppResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_auto_record_async(self, request):
"""查询自动录制配置
调用此接口查询自动录制配置
:param ShowAutoRecordRequest request
:return: ShowAutoRecordResponse
"""
return self.show_auto_record_with_http_info(request)
def show_auto_record_with_http_info(self, request):
"""查询自动录制配置
调用此接口查询自动录制配置
:param ShowAutoRecordRequest request
:return: ShowAutoRecordResponse
"""
all_params = ['content_type', 'app_id', 'authorization', 'x_sdk_date', 'x_project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
query_params = []
header_params = {}
if 'content_type' in local_var_params:
header_params['Content-Type'] = local_var_params['content_type']
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization']
if 'x_sdk_date' in local_var_params:
header_params['X-Sdk-Date'] = local_var_params['x_sdk_date']
if 'x_project_id' in local_var_params:
header_params['X-Project-Id'] = local_var_params['x_project_id']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-request-Id"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/apps/{app_id}/auto-record-mode',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowAutoRecordResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_individual_stream_job_async(self, request):
"""查询单流任务状态
调用此接口查询单流任务状态。 租户的OBS桶内的情况,暂不支持查询。
:param ShowIndividualStreamJobRequest request
:return: ShowIndividualStreamJobResponse
"""
return self.show_individual_stream_job_with_http_info(request)
def show_individual_stream_job_with_http_info(self, request):
"""查询单流任务状态
调用此接口查询单流任务状态。 租户的OBS桶内的情况,暂不支持查询。
:param ShowIndividualStreamJobRequest request
:return: ShowIndividualStreamJobResponse
"""
all_params = ['content_type', 'app_id', 'job_id', 'authorization', 'x_sdk_date', 'x_project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
if 'job_id' in local_var_params:
path_params['job_id'] = local_var_params['job_id']
query_params = []
header_params = {}
if 'content_type' in local_var_params:
header_params['Content-Type'] = local_var_params['content_type']
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization']
if 'x_sdk_date' in local_var_params:
header_params['X-Sdk-Date'] = local_var_params['x_sdk_date']
if 'x_project_id' in local_var_params:
header_params['X-Project-Id'] = local_var_params['x_project_id']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-request-Id"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/apps/{app_id}/individual-stream-jobs/{job_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowIndividualStreamJobResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_mix_job_async(self, request):
"""查询合流任务
调用此接口查询合流转码任务状态。
:param ShowMixJobRequest request
:return: ShowMixJobResponse
"""
return self.show_mix_job_with_http_info(request)
def show_mix_job_with_http_info(self, request):
"""查询合流任务
调用此接口查询合流转码任务状态。
:param ShowMixJobRequest request
:return: ShowMixJobResponse
"""
all_params = ['content_type', 'app_id', 'job_id', 'authorization', 'x_sdk_date', 'x_project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
if 'job_id' in local_var_params:
path_params['job_id'] = local_var_params['job_id']
query_params = []
header_params = {}
if 'content_type' in local_var_params:
header_params['Content-Type'] = local_var_params['content_type']
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization']
if 'x_sdk_date' in local_var_params:
header_params['X-Sdk-Date'] = local_var_params['x_sdk_date']
if 'x_project_id' in local_var_params:
header_params['X-Project-Id'] = local_var_params['x_project_id']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-request-Id"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/apps/{app_id}/mix-stream-jobs/{job_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowMixJobResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_record_callback_async(self, request):
"""查询增值(录制)事件回调配置
调用此接口查询增值(录制)事件回调配置
:param ShowRecordCallbackRequest request
:return: ShowRecordCallbackResponse
"""
return self.show_record_callback_with_http_info(request)
def show_record_callback_with_http_info(self, request):
"""查询增值(录制)事件回调配置
调用此接口查询增值(录制)事件回调配置
:param ShowRecordCallbackRequest request
:return: ShowRecordCallbackResponse
"""
all_params = ['content_type', 'app_id', 'authorization', 'x_sdk_date', 'x_project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
query_params = []
header_params = {}
if 'content_type' in local_var_params:
header_params['Content-Type'] = local_var_params['content_type']
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization']
if 'x_sdk_date' in local_var_params:
header_params['X-Sdk-Date'] = local_var_params['x_sdk_date']
if 'x_project_id' in local_var_params:
header_params['X-Project-Id'] = local_var_params['x_project_id']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-request-Id"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/apps/{app_id}/record-callback',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowRecordCallbackResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_record_rule_async(self, request):
"""查询录制规则
调用此接口查询指定录制规则。
:param ShowRecordRuleRequest request
:return: ShowRecordRuleResponse
"""
return self.show_record_rule_with_http_info(request)
def show_record_rule_with_http_info(self, request):
"""查询录制规则
调用此接口查询指定录制规则。
:param ShowRecordRuleRequest request
:return: ShowRecordRuleResponse
"""
all_params = ['content_type', 'app_id', 'rule_id', 'authorization', 'x_sdk_date', 'x_project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
if 'rule_id' in local_var_params:
path_params['rule_id'] = local_var_params['rule_id']
query_params = []
header_params = {}
if 'content_type' in local_var_params:
header_params['Content-Type'] = local_var_params['content_type']
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization']
if 'x_sdk_date' in local_var_params:
header_params['X-Sdk-Date'] = local_var_params['x_sdk_date']
if 'x_project_id' in local_var_params:
header_params['X-Project-Id'] = local_var_params['x_project_id']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-request-Id"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/apps/{app_id}/record-rules/{rule_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowRecordRuleResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_url_auth_async(self, request):
"""查询访问控制参数
查询应用鉴权配置参数
:param ShowUrlAuthRequest request
:return: ShowUrlAuthResponse
"""
return self.show_url_auth_with_http_info(request)
def show_url_auth_with_http_info(self, request):
"""查询访问控制参数
查询应用鉴权配置参数
:param ShowUrlAuthRequest request
:return: ShowUrlAuthResponse
"""
all_params = ['content_type', 'app_id', 'authorization', 'x_sdk_date', 'x_project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
query_params = []
header_params = {}
if 'content_type' in local_var_params:
header_params['Content-Type'] = local_var_params['content_type']
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization']
if 'x_sdk_date' in local_var_params:
header_params['X-Sdk-Date'] = local_var_params['x_sdk_date']
if 'x_project_id' in local_var_params:
header_params['X-Project-Id'] = local_var_params['x_project_id']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-request-Id"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/apps/{app_id}/authentication',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowUrlAuthResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def start_app_async(self, request):
"""启用应用
调用此接口启用单个应用。
:param StartAppRequest request
:return: StartAppResponse
"""
return self.start_app_with_http_info(request)
def start_app_with_http_info(self, request):
"""启用应用
调用此接口启用单个应用。
:param StartAppRequest request
:return: StartAppResponse
"""
all_params = ['content_type', 'app_id', 'authorization', 'x_sdk_date', 'x_project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
query_params = []
header_params = {}
if 'content_type' in local_var_params:
header_params['Content-Type'] = local_var_params['content_type']
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization']
if 'x_sdk_date' in local_var_params:
header_params['X-Sdk-Date'] = local_var_params['x_sdk_date']
if 'x_project_id' in local_var_params:
header_params['X-Project-Id'] = local_var_params['x_project_id']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-request-Id"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/apps/{app_id}/enable',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='StartAppResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def stop_app_async(self, request):
"""停用应用
调用此接口停用单个应用。 应用停用后,新房间无法新增和加入,已加入的房间可以继续使用。合流、录制功能等也不可用。
:param StopAppRequest request
:return: StopAppResponse
"""
return self.stop_app_with_http_info(request)
def stop_app_with_http_info(self, request):
"""停用应用
调用此接口停用单个应用。 应用停用后,新房间无法新增和加入,已加入的房间可以继续使用。合流、录制功能等也不可用。
:param StopAppRequest request
:return: StopAppResponse
"""
all_params = ['content_type', 'app_id', 'authorization', 'x_sdk_date', 'x_project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
query_params = []
header_params = {}
if 'content_type' in local_var_params:
header_params['Content-Type'] = local_var_params['content_type']
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization']
if 'x_sdk_date' in local_var_params:
header_params['X-Sdk-Date'] = local_var_params['x_sdk_date']
if 'x_project_id' in local_var_params:
header_params['X-Project-Id'] = local_var_params['x_project_id']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-request-Id"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/apps/{app_id}/disable',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='StopAppResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def stop_individual_stream_job_async(self, request):
"""停止单流任务
调用此接口停止单流任务
:param StopIndividualStreamJobRequest request
:return: StopIndividualStreamJobResponse
"""
return self.stop_individual_stream_job_with_http_info(request)
def stop_individual_stream_job_with_http_info(self, request):
"""停止单流任务
调用此接口停止单流任务
:param StopIndividualStreamJobRequest request
:return: StopIndividualStreamJobResponse
"""
all_params = ['content_type', 'app_id', 'job_id', 'authorization', 'x_sdk_date', 'x_project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['app_id'] = local_var_params['app_id']
if 'job_id' in local_var_params:
path_params['job_id'] = local_var_params['job_id']
query_params = []
header_params = {}
if 'content_type' in local_var_params:
header_params['Content-Type'] = local_var_params['content_type']
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization']
if 'x_sdk_date' in local_var_params:
header_params['X-Sdk-Date'] = local_var_params['x_sdk_date']
if 'x_project_id' in local_var_params:
header_params['X-Project-Id'] = local_var_params['x_project_id']
form_params = {}
body_params = None
if | |
lambda m: m._load_user_attributes()['dev'])
last_accessed = chips.LazyField("last_accessed", lambda m: m._load_user_attributes()['last_accessed'])
viewed_alerts_at = chips.LazyField("viewed_alerts_at", lambda m: m._load_user_attributes()['viewed_alerts_at'])
invites_left = chips.LazyField("invites_left", lambda m: m._load_user_attributes()['invites_left'])
inviter_id = chips.LazyField("inviter_id", lambda m: get_uuid(m._load_user_attributes()['inviter_id'], allow_none=True))
# This field stores data about the inviter user (if exists) meant for the gamestate. Data in this value
# should not require loading across database shards (if sharding on user_id).
inviter = chips.LazyField("inviter", lambda m: m._load_inviter_attributes())
# This field stores the full User object for the inviter user (if exists). This is meant to be used only in
# admin or debugging situations as if users are sharded this would mean crossing shards.
inviter_user = chips.LazyField("inviter_user", lambda m: m._load_inviter_user())
# A lazy dict of all map tiles for this user, keyed off of the map tile key (zoom,x,y). All tiles, whether
# current displayed or in the past/future at included in this list.
all_map_tiles = chips.LazyField("all_map_tiles", lambda m: m._load_all_map_tiles())
# Not a chips.Collection, just a lazy loaded server side only dict.
metadata = chips.LazyField("metadata", lambda m: m._load_user_metadata())
# Never send the password_hash to the client or put in the gamestate.
password_hash = chips.LazyField("password_hash", lambda m: m._load_password_hash())
# This loads from the users_notification table.
activity_alert_frequency = chips.LazyField("activity_alert_frequency", lambda m: m._load_activity_alert_frequency())
# Set the current_voucher_level (a voucher_key) based on current state of user's vouchers collection.
current_voucher_level = chips.LazyField("current_voucher_level", lambda m: m._load_current_voucher_level())
# Load the singleton Store model object.
shop = chips.LazyField("shop", lambda m: m._load_shop())
def __init__(self, ctx, user_id):
super(UserModel, self).__init__(
user_id=get_uuid(user_id),
rovers=RoverCollection.load_later('rovers', self._load_rovers),
missions=MissionCollection.load_later('missions', self._load_missions),
messages=MessageCollection.load_later('messages', self._load_messages),
species=SpeciesCollection.load_later('species', self._load_species),
regions=RegionCollection.load_later('regions', self._load_regions),
progress=ProgressCollection.load_later('progress', self._load_progress),
achievements=AchievementCollection.load_later('achievements', self._load_achievements),
capabilities=CapabilityCollection.load_later('capabilities', self._load_capabilities),
vouchers=VoucherCollection.load_later('vouchers', self._load_vouchers),
map_tiles=MapTileCollection.load_later('map_tiles', self._load_map_tiles),
invitations=InviteCollection.load_later('invitations', self._load_invitations),
gifts_created=GiftCreatedCollection.load_later('gifts_created', self._load_gifts_created),
gifts_redeemed=GiftRedeemedCollection.load_later('gifts_redeemed', self._load_gifts_redeemed))
# Store the database context for lazy loading of attributes.
self._ctx = ctx
# Used to cache lazy loaded user attributes from a database row.
self._user_attributes = None
@property
def ctx(self):
""" Return the database context used to load this user. This is provided to every child model
of this user via the UserChild mixin. """
return self._ctx
@property
def activity_alert_frequency_window(self):
""" The size of the user's current notification frequency setting in seconds. """
return activity_alert_types.windows[self.activity_alert_frequency]
def is_admin(self):
return self.dev == 1
@property
def campaign_name(self):
campaign_name = self.metadata.get('MET_CAMPAIGN_NAME')
return campaign_name if campaign_name is not None else ""
def has_campaign_name(self):
return self.metadata.get('MET_CAMPAIGN_NAME') != None
# Secure token definitions.
@property
def validation_token(self):
return secure_tokens.make_token(VALIDATE_NAMESPACE, self.user_id)
def url_validate(self):
return urls.validate(self.validation_token)
def url_api_validate(self):
return urls.api_validate(self.validation_token)
@property
def password_reset_token(self):
return secure_tokens.make_token_with_timestamp(RESET_NAMESPACE, self.user_id, self.password_hash)
def is_valid_password_reset_token(self, token, timestamp):
return secure_tokens.check_token_with_timestamp(RESET_NAMESPACE, token, timestamp, RESET_EXPIRE,
self.user_id, self.password_hash)
def url_password_reset(self):
(token, timestamp) = self.password_reset_token
return urls.password_reset(self.user_id, token, timestamp)
def change_password_hash(self, new_password_hash):
with db.conn(self.ctx) as ctx:
db.run(ctx, 'update_user_password', password=new_password_hash, user_id=self.user_id)
@property
def unsubscribe_token(self):
return secure_tokens.make_token(UNSUBSCRIBE_NAMESPACE, self.user_id)
def is_valid_unsubscribe_token(self, token):
return secure_tokens.check_token(UNSUBSCRIBE_NAMESPACE, token, self.user_id)
def url_unsubscribe(self):
return urls.unsubscribe(self.user_id, self.unsubscribe_token)
def url_admin(self):
return urls.admin_user(self.user_id)
def url_admin_map(self):
return urls.admin_user_map(self.user_id)
def url_public_profile(self):
return urls.user_public_profile(self.user_id)
@property
def epoch_now(self):
""" Return the current UTC time as an 'epoch' value (seconds after the user epoch). """
return utils.seconds_between_datetimes(self.epoch, gametime.now())
def after_epoch_as_datetime(self, seconds_after_epoch):
""" Return a seconds since epoch value (integer) as a datetime object using the
user's epoch value to perform the conversion. """
return self.epoch + timedelta(seconds=seconds_after_epoch)
def seconds_between_now_and_after_epoch(self, seconds_after_epoch):
""" Return the number of seconds (integer) between now and the seconds since epoch
value provided (integer). Returned value will be negative seconds_after_epoch is earlier
than 'now'."""
return seconds_after_epoch - self.epoch_now
def first_move_possible_at(self):
""" Returns the seconds since user.epoch that this user was able to make their first move.
A number of initial moves are created automatically by the system to simulate the lander
landing and the rover deploying and looking around and this value factors that in. """
return self.progress[progress.names.PRO_USER_CREATED].achieved_at
@property
def activated_at_date(self):
""" Returns the actual wallclock time this user was activated (first able to make a move). """
return self.after_epoch_as_datetime(self.first_move_possible_at())
@property
def time_since_activated(self):
""" Returns the number of seconds that have elapsed time between when this user
was activated (first able to make a move) and now. """
return utils.seconds_between_datetimes(self.activated_at_date, gametime.now())
@property
def time_since_last_accessed(self):
""" Returns the number of seconds that have elapsed time between when this user
was last 'active' and now. """
return utils.seconds_between_datetimes(self.last_accessed, gametime.now())
def total_distance_traveled(self):
""" Returns the total distance, in meters, this user's rovers have traveled in the game so far.
This method only considers targets which have been arrived at as of the current gametime. """
return sum(r.distance_traveled() for r in self.rovers.itervalues())
def total_distance_will_have_traveled(self):
""" Returns the total distance, in meters, this user's rovers will have traveled in the game so far.
This method INCLUDES targets which have been created but not yet been arrived at. """
return sum(r.distance_will_have_traveled() for r in self.rovers.itervalues())
def validate_with_token(self, token):
""" Mark this user as validated (email verified) if the given token is valid.
Returns False if the token is invalid. """
valid = secure_tokens.check_token(VALIDATE_NAMESPACE, token, self.user_id)
if not valid:
logger.error("Invalid token when attempting user validation. (%s, %s)", self.user_id, token)
return False
with db.conn(self.ctx) as ctx:
db.run(ctx, 'update_user_valid', user_id=self.user_id)
self.valid = 1 # Make our state mirror the database's.
# No reason to send a chip since this field is not serialized.
run_callback(USER_CB, "user_validated", ctx=ctx, user=self)
return True
def add_metadata(self, key, value=""):
"""
Attach arbitrary metadata to this user object, using the given key and optional value.
The keys should have a MET_ namespace.
If the given key has already been assigned to this user, then its value and created
time are updated/replaced.
"""
assert key.startswith("MET_"), "Metadata keys must start with a MET_ prefix."
with db.conn(self.ctx) as ctx:
db.run(ctx, 'insert_or_update_users_metadata', user_id=self.user_id, key=key, value=value, created=gametime.now())
# Make our state mirror the database's
self.metadata[key] = value
def clear_metadata(self, key):
"""
Clear any metadata for the given given from this user object. This will delete the key and value comopletely.
The keys should have a MET_ namespace.
"""
assert key.startswith("MET_"), "Metadata keys must start with a MET_ prefix."
# Make our state mirror the database's
# Do this before deleting from the database so that the lazy loader has populated the metadata dictionary.
del self.metadata[key]
with db.conn(self.ctx) as ctx:
db.run(ctx, 'delete_users_metadata', user_id=self.user_id, key=key)
def has_target_with_metadata_key(self, key):
with db.conn(self.ctx) as ctx:
r = db.row(ctx, 'count_targets_with_metadata_key', metadata_key=key, user_id=self.user_id)
return r['key_count'] > 0
def set_activity_alert_frequency(self, activity_alert_frequency):
"""
Set this user's activity alert settings to the given frequency.
See front.activity_alert_types for possible frequency values.
"""
assert activity_alert_frequency in activity_alert_types.ALL
if activity_alert_frequency == activity_alert_types.OFF:
with db.conn(self.ctx) as ctx:
db.run(ctx, 'notifications/update_user_notifications', user_id=self.user_id,
activity_alert_frequency=activity_alert_frequency, wants_activity_alert=0)
else:
with db.conn(self.ctx) as ctx:
db.run(ctx, 'notifications/update_user_notifications', user_id=self.user_id,
activity_alert_frequency=activity_alert_frequency, wants_activity_alert=1)
self.activity_alert_frequency = activity_alert_frequency # Make our state mirror the database's
self.send_chips(self.ctx, self)
def update_last_accessed(self):
""" Update this users last_accessed field to be gametime.now(). """
now = gametime.now()
with db.conn(self.ctx) as ctx:
db.run(ctx, 'update_user_last_accessed', user_id=self.user_id, now=now)
self.set_silent(last_accessed = now) # Make our state mirror the database's
# Server only field so no chip.
def update_viewed_alerts_at(self):
""" Update this users viewed_alerts_at field to be 'now' in terms of their epoch. """
epoch_now = self.epoch_now
with db.conn(self.ctx) as ctx:
db.run(ctx, 'update_user_viewed_alerts_at', user_id=self.user_id, epoch_now=epoch_now)
self.viewed_alerts_at = epoch_now # Make our state mirror the database's
self.send_chips(self.ctx, self)
def increment_invites_left(self, delta=1):
""" Increment this users invites_left field by 1. """
new_invites_left = self.invites_left + delta
with db.conn(self.ctx) as ctx:
db.run(ctx, 'update_user_invites_left', user_id=self.user_id, invites_left=new_invites_left)
self.invites_left = new_invites_left # Make our state mirror the database's
self.send_chips(self.ctx, self)
def decrement_invites_left(self, delta=1):
""" Decrement this users invites_left field by 1. """
assert self.invites_left > 0
new_invites_left = self.invites_left - delta
with db.conn(self.ctx) as ctx:
db.run(ctx, 'update_user_invites_left', user_id=self.user_id, invites_left=new_invites_left)
self.invites_left = new_invites_left # Make our state mirror the database's
self.send_chips(self.ctx, self)
def current_voucher_level_prepare_refresh(self):
"""
Call this method anytime current_voucher_level's value might have changed, for instanceif a new
voucher was delivered. See current_voucher_level_refresh.
"""
self.current_voucher_level
def current_voucher_level_refresh(self):
"""
Update the lazy current_voucher_level field if it has changed and send a MOD chip.
Call this method anytime this field's value might have changed, for instance if a new voucher was delivered.
Call current_voucher_level_prepare_refresh before | |
<gh_stars>0
# -*- coding: utf-8 -*-
#
# * Copyright (c) 2009-2017. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
__author__ = "<NAME> <<EMAIL>>"
__contributors__ = ["<NAME> <<EMAIL>>"]
__copyright__ = "Copyright 2010-2017 University of Liège, Belgium, http://www.cytomine.be/"
import cytomine
import optparse
import sys
from multiprocessing import Pool
import numpy as np
import scipy.ndimage as snd
from sklearn.externals import joblib
from SeparateTrees import SeparateTrees
from SeparateTreesRegressor import SeparateTreesRegressor
from download import *
from ldmtools import *
def dataset_from_coordinates(img, x, y, feature_offsets):
(h, w) = img.shape
original_values = img[y.clip(min=0, max=h - 1), x.clip(min=0, max=w - 1)]
dataset = np.zeros((x.size, feature_offsets[:, 0].size))
for i in range(feature_offsets[:, 0].size):
dataset[:, i] = original_values - img[
(y + feature_offsets[i, 1]).clip(min=0, max=h - 1), (x + feature_offsets[i, 0]).clip(min=0, max=w - 1)]
return dataset
def image_dataset_phase_1(repository, image_number, x, y, feature_offsets, R_offsets, delta, P):
img = makesize(snd.zoom(readimage(repository, image_number), delta), 1)
(h, w) = img.shape
mask = np.ones((h, w), 'bool')
mask[:, 0] = 0
mask[0, :] = 0
mask[h - 1, :] = 0
mask[:, w - 1] = 0
(nroff, blc) = R_offsets.shape
h -= 2
w -= 2
x += 1
y += 1
n_out = int(np.round(P * nroff))
rep = np.zeros((x.size * nroff) + n_out)
xs = np.zeros((x.size * nroff) + n_out).astype('int')
ys = np.zeros((x.size * nroff) + n_out).astype('int')
for ip in range(x.size):
xs[ip * nroff:(ip + 1) * nroff] = x[ip] + R_offsets[:, 0]
ys[ip * nroff:(ip + 1) * nroff] = y[ip] + R_offsets[:, 1]
rep[ip * nroff:(ip + 1) * nroff] = ip
mask[ys, xs] = 0
(ym, xm) = np.where(mask == 1)
perm = np.random.permutation(ym.size)[0:n_out]
ym = ym[perm]
xm = xm[perm]
xs[x.size * nroff:] = xm
ys[y.size * nroff:] = ym
rep[x.size * nroff:] = x.size
dataset = dataset_from_coordinates(img, xs, ys, feature_offsets)
return dataset, rep
def dataset_mp_helper(jobargs):
return image_dataset_phase_1(*jobargs)
def get_dataset_phase_1(repository, training_images, image_ids, n_jobs, feature_offsets, R_offsets, delta, P, X, Y):
p = Pool(n_jobs)
Xc = np.round(X * delta).astype('int')
Yc = np.round(Y * delta).astype('int')
(nims, nldms) = Xc.shape
jobargs = []
for i in range(nims):
if image_ids[i] in training_images:
jobargs.append((repository, image_ids[i], Xc[i, :], Yc[i, :], feature_offsets, R_offsets, delta, P))
data = p.map(dataset_mp_helper, jobargs)
p.close()
p.join()
(nroff, blc) = R_offsets.shape
nims = len(training_images)
n_in = nroff * nldms
n_out = int(np.round(nroff * P))
n_tot = n_in + n_out
DATASET = np.zeros((nims * n_tot, feature_offsets[:, 0].size))
REP = np.zeros(nims * n_tot)
IMG = np.zeros(nims * n_tot)
b = 0
i = 0
for (d, r) in data:
(nd, nw) = d.shape
DATASET[b:b + nd, :] = d
REP[b:b + nd] = r
IMG[b:b + nd] = i
i += 1
b = b + nd
DATASET = DATASET[0:b, :]
REP = REP[0:b]
IMG = IMG[0:b]
return DATASET, REP, IMG
def build_phase_1_model(repository, tr_image=[], image_ids=[], n_jobs=1, NT=32, F=100, R=2, sigma=10, delta=0.25, P=1,
X=None, Y=None):
std_matrix = np.eye(2) * (sigma ** 2)
feature_offsets = np.round(np.random.multivariate_normal([0, 0], std_matrix, NT * F)).astype('int')
R_offsets = []
for x1 in range(-R, R + 1):
for x2 in range(-R, R + 1):
if (np.linalg.norm([x1, x2]) <= R):
R_offsets.append([x1, x2])
R_offsets = np.array(R_offsets).astype('int')
(dataset, rep, img) = get_dataset_phase_1(repository, tr_image, image_ids, n_jobs, feature_offsets, R_offsets,
delta, P, X, Y)
return dataset, rep, img, feature_offsets
def probability_map_phase_1(repository, image_number, clf, feature_offsets, delta):
img = makesize(snd.zoom(readimage(repository, image_number), delta), 1)
(h, w) = img.shape
ys = []
xs = []
c = np.arange((h - 2) * (w - 2))
ys = 1 + np.round(c / (w - 2)).astype('int')
xs = 1 + np.mod(c, (w - 2))
step = 20000
b = 0
probability_map = None
nldms = -1
while b < xs.size:
next_b = min(b + step, xs.size)
dataset = dataset_from_coordinates(img, xs[b:next_b], ys[b:next_b], feature_offsets)
probabilities = clf.predict_proba(dataset)
if (nldms == -1):
(ns, nldms) = probabilities.shape
probability_map = np.zeros((h - 2, w - 2, nldms))
for ip in range(nldms):
probability_map[ys[b:next_b] - 1, xs[b:next_b] - 1, ip] = probabilities[:, ip]
b = next_b
return probability_map
def image_dataset_phase_2(repository, image_number, x, y, feature_offsets, R_offsets, delta):
img = makesize(snd.zoom(readimage(repository, image_number), delta), 1)
(h, w) = img.shape
mask = np.ones((h, w), 'bool')
mask[:, 0] = 0
mask[0, :] = 0
mask[h - 1, :] = 0
mask[:, w - 1] = 0
(nroff, blc) = R_offsets.shape
h -= 2
w -= 2
x += 1
y += 1
rep = np.zeros((nroff, 2))
number = image_number
xs = (x + R_offsets[:, 0]).astype('int')
ys = (y + R_offsets[:, 1]).astype('int')
rep[:, 0] = R_offsets[:, 0]
rep[:, 1] = R_offsets[:, 1]
dataset = dataset_from_coordinates(img, xs, ys, feature_offsets)
return dataset, rep, number
def dataset_mp_helper_phase_2(jobargs):
return image_dataset_phase_2(*jobargs)
def get_dataset_phase_2(repository, tr_images, image_ids, n_jobs, id_term, feature_offsets, R_offsets, delta):
p = Pool(n_jobs)
(Xc, Yc, Xp, Yp, ims) = getcoords(repository.rstrip('/') + '/txt/', id_term)
nims = Xc.size
jobargs = []
for i in range(nims):
if image_ids[i] in tr_images:
jobargs.append((repository, image_ids[i], Xc[i], Yc[i], feature_offsets, R_offsets, delta))
data = p.map(dataset_mp_helper_phase_2, jobargs)
p.close()
p.join()
(nroff, blc) = R_offsets.shape
nims = len(tr_images)
DATASET = np.zeros((nims * nroff, feature_offsets[:, 0].size))
REP = np.zeros((nims * nroff, 2))
NUMBER = np.zeros(nims * nroff)
b = 0
for (d, r, n) in data:
(nd, nw) = d.shape
DATASET[b:b + nd, :] = d
REP[b:b + nd, :] = r
NUMBER[b:b + nd] = n
b = b + nd
DATASET = DATASET[0:b, :]
REP = REP[0:b]
NUMBER = NUMBER[0:b]
return DATASET, REP, NUMBER
def build_phase_2_model(repository, tr_image=None, image_ids=None, n_jobs=1, IP=0, NT=32, F=100, R=3, N=500, sigma=10,
delta=0.25):
std_matrix = np.eye(2) * (sigma ** 2)
feature_offsets = np.round(np.random.multivariate_normal([0, 0], std_matrix, NT * F)).astype('int')
R_offsets = np.zeros((N, 2))
dis = np.random.ranf(N) * R
ang = np.random.ranf(N) * 2 * np.pi
R_offsets[:, 0] = np.round((dis * np.cos(ang))).astype('int')
R_offsets[:, 1] = np.round((dis * np.sin(ang))).astype('int')
(dataset, rep, number) = get_dataset_phase_2(repository, tr_image, image_ids, n_jobs, IP, feature_offsets,
R_offsets, delta)
return dataset, rep, number, feature_offsets
def build_edgematrix_phase_3(Xc, Yc, sde, delta, T):
Xc = Xc * delta
Yc = Yc * delta
(nims, nldms) = Xc.shape
differential_entropy = np.eye(nldms) + np.inf
c1 = np.zeros((nims, 2))
c2 = np.zeros((nims, 2))
for ldm1 in range(nldms):
c1[:, 0] = Xc[:, ldm1]
c1[:, 1] = Yc[:, ldm1]
for ldm2 in range(ldm1 + 1, nldms):
c2[:, 0] = Xc[:, ldm2]
c2[:, 1] = Yc[:, ldm2]
diff = c1 - c2
d = diff - np.mean(diff, axis=0)
d = np.mean(np.sqrt((d[:, 0] ** 2) + (d[:, 1] ** 2)))
differential_entropy[ldm1, ldm2] = d
differential_entropy[ldm2, ldm1] = d
edges = np.zeros((nldms, T))
for ldm in range(nldms):
edges[ldm, :] = np.argsort(differential_entropy[ldm, :])[0:T]
return edges.astype(int)
def main():
p = optparse.OptionParser(description='Cytomine Landmark Detection : Model building',
prog='Cytomine Landmark Detector : Model builder', version='0.1')
p.add_option('--cytomine_host', type="string", default='beta.cytomine.be', dest="cytomine_host",
help="The Cytomine host (eg: beta.cytomine.be, localhost:8080)")
p.add_option('--cytomine_public_key', type="string", default='XXX', dest="cytomine_public_key",
help="Cytomine public key")
p.add_option('--cytomine_private_key', type="string", default='YYY', dest="cytomine_private_key",
help="Cytomine private key")
p.add_option('--cytomine_id_software', type="int", dest="cytomine_id_software",
help="The Cytomine software identifier")
p.add_option('--cytomine_base_path', type="string", default='/api/', dest="cytomine_base_path",
help="Cytomine base path")
p.add_option('--cytomine_working_path', default="/tmp/", type="string", dest="cytomine_working_path",
help="The working directory (eg: /tmp)")
p.add_option('--cytomine_training_images', default="all", type="string", dest="cytomine_training_images",
help="identifiers of the images used to create the models. ids must be separated by commas (no spaces). If 'all' is mentioned instead, every image with manual annotation will be used.")
p.add_option('--cytomine_id_project', type="int", dest="cytomine_id_project",
help="The Cytomine project identifier")
p.add_option('--image_type', type='string', default='jpg', dest='image_type',
help="The type of the images that will be used (jpg, bmp, png,...)")
p.add_option('--model_njobs', type='int', default=1, dest='model_njobs',
help="The number of processors used for model building")
p.add_option('--cytomine_id_terms', type='string', default=1, dest='cytomine_id_terms',
help="The identifiers of the terms to create detection models for. Terms must be separated by commas (no spaces). If 'all' is mentioned instead, every terms will be detected.")
p.add_option('--model_NT_P1', type='int', default=6, dest='model_NT_P1', help="Number of trees for phase 1.")
p.add_option('--model_F_P1', type='int', default=200, dest='model_F_P1', help="Number of features for phase | |
models to Blender.'''
# pylint: disable=unused-argument, no-member
bl_idname = "mcblend.import_model"
bl_label = "Import Bedrock Model"
bl_options = {'REGISTER', 'UNDO'}
bl_description = "Import Minecraft Bedrock edition model from json file."
# ImportHelper mixin class uses this
filename_ext = ".json"
filter_glob: StringProperty( # type: ignore
default="*.json",
options={'HIDDEN'},
maxlen=1000,
)
geometry_name: StringProperty( # type: ignore
default='',
maxlen=500,
name='Geometry name'
)
def execute(self, context):
# Save file and finish
with open(self.filepath, 'r') as f:
data = json.load(f, cls=JSONCDecoder)
try:
warnings = import_model(data, self.geometry_name, context)
if len(warnings) > 1:
for warning in warnings:
self.report({'WARNING'}, warning)
self.report(
{'WARNING'},
f"Finished with {len(warnings)} warnings. "
"See logs for more details."
)
elif len(warnings) == 1:
self.report({'WARNING'}, warnings[0])
except ImporterException as e:
self.report(
{'ERROR'}, f'Invalid model: {e}'
)
return {'FINISHED'}
# Animation (GUI)
def menu_func_mcblend_import_model(self, context):
'''Used to register the operator in the file import menu.'''
# pylint: disable=unused-argument
self.layout.operator(MCBLEND_OT_ImportModel.bl_idname)
def save_animation_properties(animation, context):
'''
Saves animation properties from context to
MCBLEND_AnimationProperties object.
'''
animation.frame_start = context.scene.frame_start
animation.frame_end = context.scene.frame_end
animation.frame_current = context.scene.frame_current
animation.timeline_markers.clear()
for timeline_marker in context.scene.timeline_markers:
anim_timeline_marker = animation.timeline_markers.add()
anim_timeline_marker.name = timeline_marker.name
anim_timeline_marker.frame = timeline_marker.frame
animation.nla_tracks.clear()
if context.object.animation_data is not None:
for nla_track in context.object.animation_data.nla_tracks:
if not nla_track.mute:
cached_nla_track = animation.nla_tracks.add()
cached_nla_track.name = nla_track.name
def load_animation_properties(animation, context):
'''
Saves animation properties from MCBLEND_AnimationProperties
object to the context.
'''
context.scene.frame_start = animation.frame_start
context.scene.frame_end = animation.frame_end
context.scene.frame_current = animation.frame_current
context.scene.timeline_markers.clear()
for anim_timeline_marker in animation.timeline_markers:
context.scene.timeline_markers.new(
anim_timeline_marker.name,
frame=anim_timeline_marker.frame)
if context.object.animation_data is not None:
object_nla_tracks = context.object.animation_data.nla_tracks
for nla_track in object_nla_tracks:
nla_track.mute = True
for cached_nla_track in animation.nla_tracks:
if cached_nla_track.name in object_nla_tracks:
object_nla_tracks[cached_nla_track.name].mute = False
class MCBLEND_OT_ListAnimations(bpy.types.Operator):
'''
Operator used for listing the animations for GUI.
'''
bl_idname = "mcblend.list_animations"
bl_label = "List animations and save them to Enum to display them in GUI"
bl_options = {'INTERNAL'}
def _list_animations(self, context):
# pylint: disable=unused-argument
items = [
(str(i), x.name, x.name)
for i, x in enumerate(bpy.context.object.mcblend.animations)]
return items
animations_enum: bpy.props.EnumProperty( # type: ignore
items=_list_animations, name="Animations")
@classmethod
def poll(cls, context):
if context.mode != 'OBJECT':
return False
if context.object.type != 'ARMATURE':
return False
return True
def execute(self, context):
'''
Runs when user picks an item from the dropdown menu in animations
panel. Sets the active animation.
'''
# Cancel operation if there is an action being edited
if context.object.animation_data.action is not None:
# TODO - stash action and activate the action strip for current
# animation if a new aniamtion has been selected
self.report(
{'WARNING'},
"Stash, push down or delete the active action before "
"selecting new animation")
return {'CANCELLED'}
# If OK than save old animation state
len_anims = len(context.object.mcblend.animations)
curr_anim_id = context.object.mcblend.active_animation
if 0 <= curr_anim_id < len_anims:
save_animation_properties(
context.object.mcblend.animations[curr_anim_id], context)
# Set new animation and load its state
new_anim_id=int(self.animations_enum)
context.object.mcblend.active_animation=new_anim_id
load_animation_properties(
context.object.mcblend.animations[new_anim_id], context)
return {'FINISHED'}
class MCBLEND_OT_AddAnimation(bpy.types.Operator):
'''Operator used creating animation settings templates.'''
bl_idname = "mcblend.add_animation"
bl_label = '''Adds new animation to the list.'''
bl_options = {'UNDO', 'INTERNAL'}
@classmethod
def poll(cls, context):
if context.mode != 'OBJECT':
return False
if context.object.type != 'ARMATURE':
return False
return True
def execute(self, context):
# Cancel operation if there is an action being edited
if (
context.object.animation_data is not None and
context.object.animation_data.action is not None):
# TODO - stash action if mcblend animation already exists or
# don't do anything if that's the first mcblend aniamtion
self.report(
{'WARNING'},
"Stash, push down or delete the active action before "
"adding new animation")
return {'CANCELLED'}
# If OK save old animation
len_anims = len(context.object.mcblend.animations)
curr_anim_id = context.object.mcblend.active_animation
if 0 <= curr_anim_id < len_anims:
save_animation_properties(
context.object.mcblend.animations[curr_anim_id], context)
context.scene.timeline_markers.clear()
# Add new animation and set its properties
animation_new = context.object.mcblend.animations.add()
len_anims = len(context.object.mcblend.animations)
context.object.mcblend.active_animation=len_anims-1
animation_new.name = f'animation{len_anims}'
# The object properties display the property edited by this operator
# redraw it.
for area in context.screen.areas:
if area.type == 'PROPERTIES':
area.tag_redraw()
return {'FINISHED'}
class MCBLEND_OT_RemoveAnimation(bpy.types.Operator):
'''
Operator used for loading saved animation templates to the context.
'''
bl_idname = "mcblend.remove_animation"
bl_label = "Remove current animation from the list."
bl_options = {'UNDO', 'INTERNAL'}
@classmethod
def poll(cls, context):
if context.mode != 'OBJECT':
return False
if context.object.type != 'ARMATURE':
return False
return len(context.object.mcblend.animations) > 0
def execute(self, context):
# Cancel operation if there is an action being edited
if (
context.object.animation_data is not None and
context.object.animation_data.action is not None):
# TODO - automatically remove the active animation instead of pringint a warning
self.report(
{'WARNING'},
"Stash, push down or delete the active action before "
"removing new animation")
return {'CANCELLED'}
# Remove animation
context.object.mcblend.animations.remove(
context.object.mcblend.active_animation)
# Set new active animation
last_active=context.object.mcblend.active_animation
len_anims=len(context.object.mcblend.animations)
if last_active > 0:
context.object.mcblend.active_animation=last_active-1
# Load data from new active animation
curr_anim_id=context.object.mcblend.active_animation
if 0 <= curr_anim_id < len_anims:
load_animation_properties(
context.object.mcblend.animations[curr_anim_id], context)
# The object properties display the property edited by this operator
# redraw it.
for area in context.screen.areas:
if area.type == 'PROPERTIES':
area.tag_redraw()
return {'FINISHED'}
# UV group (GUI)
class MCBLEND_OT_ListUvGroups(bpy.types.Operator):
'''
Operator that used for listing the UV-groups for GUI.
'''
bl_idname = "mcblend.list_uv_groups"
bl_label = "List UV groups and save them to Enum to display them in GUI"
bl_options = {'INTERNAL'}
def _list_uv_groups(self, context):
# pylint: disable=unused-argument
items = [
(str(i), x.name, x.name)
for i, x in enumerate(bpy.context.scene.mcblend_uv_groups)]
return items
uv_groups_enum: bpy.props.EnumProperty( # type: ignore
items=_list_uv_groups, name="UV Groups")
def execute(self, context):
'''
Runs when user picks an item from the dropdown menu in uv_groups
panel. Sets the active uv_group.
'''
# Set new uv_group and load its state
new_uv_group_id=int(self.uv_groups_enum)
context.scene.mcblend_active_uv_group=new_uv_group_id
return {'FINISHED'}
class MCBLEND_OT_AddUvGroup(bpy.types.Operator):
'''Operator used for creating new UV-groups.'''
bl_idname = "mcblend.add_uv_group"
bl_label = '''Adds new uv_group to the list.'''
bl_options = {'UNDO', 'INTERNAL'}
def execute(self, context):
# If OK save old uv_group
len_groups = len(context.scene.mcblend_uv_groups)
# Add new uv_group and set its properties
uv_group_new = context.scene.mcblend_uv_groups.add()
len_groups = len(context.scene.mcblend_uv_groups)
context.scene.mcblend_active_uv_group=len_groups-1
uv_group_new.name = get_unused_uv_group_name('uv_group')
sides = [
uv_group_new.side1, uv_group_new.side2, uv_group_new.side3,
uv_group_new.side4, uv_group_new.side5, uv_group_new.side6]
colors = [
(0, 0.15, 0), (0.15, 0, 0.15), (0.15, 0, 0),
(0, 0.15, 0.15), (0, 0, 0.15), (0.15, 0.15, 0)]
for color, side in zip(colors, sides):
mask = side.add()
mask.mask_type = UvMaskTypes.COLOR_MASK.value
mask.color.color = color
mask.colors.add()
mask.stripes.add()
return {'FINISHED'}
class MCBLEND_OT_RemoveUvGroup(bpy.types.Operator):
'''Operator useful for removing UV-groups.'''
bl_idname = "mcblend.remove_uv_group"
bl_label = "Remove current uv_group from the list."
bl_options = {'UNDO', 'INTERNAL'}
@classmethod
def poll(cls, context):
return len(context.scene.mcblend_uv_groups) > 0
def execute(self, context):
group_id = context.scene.mcblend_active_uv_group
group_name = context.scene.mcblend_uv_groups[group_id].name
# Remove uv_group
context.scene.mcblend_uv_groups.remove(group_id)
# Update the names of all of the meshes
for obj in bpy.data.objects:
if obj.type == "MESH":
obj_props = obj.mcblend
if obj_props.uv_group == group_name:
obj_props.uv_group = ''
# Set new active uv_group
if group_id > 0:
context.scene.mcblend_active_uv_group=group_id-1
return {'FINISHED'}
class MCBLEND_OT_CopyUvGroupSide(bpy.types.Operator):
'''Operator used for copying sides of UV-groups.'''
bl_idname = "mcblend.copy_uv_group_side"
bl_label = 'Copy active UV group side other to UV group'
bl_options = {'UNDO', 'INTERNAL'}
def _list_uv_groups(self, context):
# pylint: disable=unused-argument
items = [
(str(i), x.name, x.name)
for i, x in enumerate(bpy.context.scene.mcblend_uv_groups)]
return items
uv_groups_enum: bpy.props.EnumProperty( # type: ignore
items=_list_uv_groups, name="UV Groups")
side1: BoolProperty(name='side1') # type: ignore
side2: BoolProperty(name='side2') # type: ignore
side3: BoolProperty(name='side3') # type: ignore
side4: BoolProperty(name='side4') # type: ignore
side5: BoolProperty(name='side5') # type: ignore
side6: BoolProperty(name='side6') # type: ignore
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self)
@classmethod
def poll(cls, context):
return len(context.scene.mcblend_uv_groups) >= 1
def _copy_side(
self, context,
source_group_id: int, source_side_id: int,
target_group_id: int, target_side_id: int):
if (
source_group_id == target_group_id and
source_side_id == target_side_id
):
return # If source and target is the same don't do anything
# Get source
source_group = context.scene.mcblend_uv_groups[source_group_id]
source_sides = [
source_group.side1, source_group.side2,
source_group.side3, source_group.side4,
source_group.side5, source_group.side6]
source_masks = source_sides[source_side_id]
# Get target
target_group = context.scene.mcblend_uv_groups[target_group_id]
target_sides = [
target_group.side1, target_group.side2,
target_group.side3, target_group.side4,
target_group.side5, target_group.side6]
target_masks = target_sides[target_side_id]
# Clear target
target_masks.clear()
# Copy from source from target
for mask in source_masks:
new_mask = target_masks.add()
new_mask.mask_type = mask.mask_type
for color in mask.colors:
new_color = new_mask.colors.add()
new_color.color = color.color
new_mask.interpolate = mask.interpolate
new_mask.normalize = mask.normalize
new_mask.p1_relative = mask.p1_relative
new_mask.p2_relative = mask.p2_relative
new_mask.p1 = mask.p1
new_mask.p2 = mask.p2
for stripe in mask.stripes:
new_stripe = new_mask.stripes.add()
new_stripe.width = stripe.width
new_stripe.strength = stripe.strength
new_mask.relative_boundaries = mask.relative_boundaries
new_mask.expotent = mask.expotent
new_mask.strength = | |
bl["blacklist"]:
try:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
except:
pass
# group_id = op.param1
# user_id = op.param2
# subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
# except:
# pass
if op.type == 17:
if op.param2 in bl["blacklist"]:
try:
k3.kickoutFromGroup(op.param1,[op.param2])
sendMention(op.param1, op.param2, "", " \nBlacklistDetected")
except:
try:
k2.kickoutFromGroup(op.param1,[op.param2])
sendMention(op.param1, op.param2, "", " \nBlacklistDetected")
except:
try:
k1.kickoutFromGroup(op.param1,[op.param2])
sendMention(op.param1, op.param2, "", " \nBlacklistDetected")
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
sendMention(op.param1, op.param2, "", " \nBlacklistDetected")
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
sendMention(op.param1, op.param2, "", " \nBlacklistDetected")
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
sendMention(op.param1, op.param2, "", " \nBlacklistDetected")
except:
pass
if op.type == 5:
print ("[ 5 ] NOTIFIED ADD CONTACT")
if settings["autoAdd"] == True:
cl.sendMessage(op.param1, "Halo {} terimakasih telah menambahkan saya sebagai teman :D".format(str(cl.getContact(op.param1).displayName)))
if op.type == 13:
if op.param3 in mid:
if op.param2 in admin:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Amid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Bmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Dmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Emid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Fmid:
cl.acceptGroupInvitation(op.param1)
#===========================================
if op.param3 in Amid:
if op.param2 in mid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Cmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Dmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Emid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Fmid:
ki.acceptGroupInvitation(op.param1)
#=====================------------------
if op.param3 in Bmid:
if op.param2 in mid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Dmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Emid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Fmid:
kk.acceptGroupInvitation(op.param1)
#=====================------------------
if op.param3 in Cmid:
if op.param2 in mid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Amid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Bmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Emid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Fmid:
kc.acceptGroupInvitation(op.param1)
#=====================------------------
if op.param3 in Dmid:
if op.param2 in mid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Amid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Bmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Cmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Fmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in mid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Amid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Bmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Cmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Dmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Fmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in mid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Amid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Bmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Cmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Dmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Emid:
k3.acceptGroupInvitation(op.param1)
#=====================------------------
if mid in op.param3:
if wait["AutoJoinCancel"] == True:
G = cl.getGroup(op.param1)
if len (G.members) <= wait["memberscancel"]:
cl.acceptGroupInvitation(op.param1)
sendMention(op.param1, op.param2, "","\nTrimaksih Kak Invit aku\nDiGroup" + str(G.name) + "\nMaaf Member Kurang Dari 30 Orang")
cl.sendContact(op.param1, 'ub3808de9f7df35f57fb366d157f9790a')
cl.leaveGroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
cl.acceptGroupInvitation(op.param1)
sendMention(op.param1, op.param2, "", " \nTrimaksih Kak Invit aku\nDiGroup" + str(G.name))
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
G = k3.getGroup(op.param1)
G.preventedJoinByTicket = True
k3.updateGroup(G)
if mid in op.param3:
if settings["autoJoin"] == True:
G = cl.getGroup(op.param1)
if len(G.members) <= wait["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
sendMention(op.param1, op.param2, "", " \n<NAME> Invit aku\nDiGroup" + str(G.name))
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
G = k3.getGroup(op.param1)
G.preventedJoinByTicket = True
k3.updateGroup(G)
if Amid in op.param3:
if settings["autoJoin"] == True:
G = ki.getGroup(op.param1)
if len(G.members) <= wait["members"]:
ki.rejectGroupInvitation(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
for tag in InviterX:
if tag in bl["blacklist"]:
try:
ki.cancelGroupInvitation(op.param1,[tag])
ki.kickoutFromGroup(op.param1,[op.param2])
except:
pass
if Bmid in op.param3:
if settings["autoJoin"] == True:
G = kk.getGroup(op.param1)
if len(G.members) <= wait["members"]:
kk.rejectGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
for tag in InviterX:
if tag in bl["blacklist"]:
try:
kk.cancelGroupInvitation(op.param1,[tag])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
pass
if Cmid in op.param3:
if settings["autoJoin"] == True:
G = kc.getGroup(op.param1)
if len(G.members) <= wait["members"]:
kc.rejectGroupInvitation(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
for tag in InviterX:
if tag in bl["blacklist"]:
try:
kc.cancelGroupInvitation(op.param1,[tag])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
pass
if Dmid in op.param3:
if settings["autoJoin"] == True:
G = k1.getGroup(op.param1)
if len(G.members) <= wait["members"]:
k1.rejectGroupInvitation(op.param1)
else:
k1.acceptGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
for tag in InviterX:
if tag in bl["blacklist"]:
try:
k1.cancelGroupInvitation(op.param1,[tag])
k1.kickoutFromGroup(op.param1,[op.param2])
except:
pass
#================================---------
if Emid in op.param3:
if settings["autoJoin"] == True:
G = k2.getGroup(op.param1)
if len(G.members) <= wait["members"]:
k2.rejectGroupInvitation(op.param1)
else:
k2.acceptGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
for tag in InviterX:
if tag in bl["blacklist"]:
try:
k2.cancelGroupInvitation(op.param1,[tag])
k2.kickoutFromGroup(op.param1,[op.param2])
except:
pass
if Fmid in op.param3:
if settings["autoJoin"] == True:
G = k3.getGroup(op.param1)
if len(G.members) <= wait["members"]:
k3.rejectGroupInvitation(op.param1)
else:
k3.acceptGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
for tag in InviterX:
if tag in bl["blacklist"]:
try:
k3.cancelGroupInvitation(op.param1,[tag])
k3.kickoutFromGroup(op.param1,[op.param2])
except:
pass
if op.type == 13:
if op.param3 in bl['blacklist']:
try:
k3.cancelGroupInvitation(op.param1,[op.param3])
k3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
k2.cancelGroupInvitation(op.param1,[op.param3])
k2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
k1.cancelGroupInvitation(op.param1,[op.param3])
k1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.cancelGroupInvitation(op.param1,[op.param3])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.cancelGroupInvitation(op.param1,[op.param3])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki.cancelGroupInvitation(op.param1,[op.param3])
ki.kickoutFromGroup(op.param1,[op.param2])
except:
pass
if op.type == 32:
if op.param3 in Bots:
if op.param2 in admin:
pass
if op.param2 in Bots:
pass
else:
bl['blacklist'][op.param2] = True
with open('bl.json', 'w') as fp:
json.dump(bl, fp, sort_keys=True, indent=4)
try:
if op.param3 not in bl["blacklist"]:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param3 not in bl["blacklist"]:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param3 not in bl["blacklist"]:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param3 not in bl["blacklist"]:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
except:
pass
if op.type == 32:
if op.param3 in admin:
if op.param2 in admin:
pass
if op.param2 in Bots:
pass
else:
bl['blacklist'][op.param2] = True
with open('bl.json', 'w') as fp:
json.dump(bl, fp, sort_keys=True, indent=4)
try:
if op.param3 not in bl["blacklist"]:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param3 not in bl["blacklist"]:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param3 not in bl["blacklist"]:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param3 not in bl["blacklist"]:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
except:
pass
if op.type == 19:
if op.param3 in admin:
if op.param2 in admin:
pass
if op.param2 in Bots:
pass
else:
bl['blacklist'][op.param2] = True
with open('bl.json', 'w') as fp:
json.dump(bl, fp, sort_keys=True, indent=4)
try:
cl.inviteIntoGroup(op.param1,[op.param3])
except:
try:
ki.inviteIntoGroup(op.param1,[op.param3])
except:
try:
kk.inviteIntoGroup(op.param1,[op.pqram3])
except:
try:
kc.inviteIntoGroup(op.param1,[op.parqm3])
except:
try:
k1.inviteIntoGroup(op.param1,[op.parqm3])
except:
try:
k2.inviteIntoGroup(op.param1,[op.parqm3])
except:
try:
k3.inviteIntoGroup(op.param1,[op.parqm3])
except:
pass
if op.type == 19:
if op.param3 in mid:
if op.param2 in admin:
pass
if op.param2 in Bots:
pass
else:
bl['blacklist'][op.param2] = True
with open('bl.json', 'w') as fp:
json.dump(bl, fp, sort_keys=True, indent=4)
try:
ki.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGrouoInvitation(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGrouoInvitation(op.param1)
except:
try:
G = k1.getGroup(op.param1)
kc.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
k1.updateGroup(G)
Ti = k1.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
G.preventedJoinByTicket = True
ki.updateGroup(G)
except:
pass
if op.type == 19:
if op.param3 in Amid:
if op.param2 in admin:
pass
if op.param2 in Bots:
pass
else:
#sendMention(op.param1, op.param2, "", " \nJangan main kick boss")
bl['blacklist'][op.param2] = True
with open('bl.json', 'w') as fp:
json.dump(bl, fp, sort_keys=True, indent=4)
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGrouoInvitation(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
k1.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGrouoInvitation(op.param1)
except:
try:
G = k2.getGroup(op.param1)
k1.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
k2.updateGroup(G)
Ti = k2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
G.preventedJoinByTicket = True
k2.updateGroup(G)
except:
pass
if op.type == 19:
if op.param3 in Bmid:
if op.param2 in admin:
pass
if op.param2 in Bots:
pass
else:
#sendMention(op.param1, op.param2, "", " \nJangan main kick boss")
bl['blacklist'][op.param2] = True
with open('bl.json', 'w') as fp:
json.dump(bl, fp, sort_keys=True, indent=4)
try:
kc.kickoutFromGroup(op.param1,[op.param2])
k1.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGrouoInvitation(op.param1)
except:
try:
k1.kickoutFromGroup(op.param1,[op.param2])
k2.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGrouoInvitation(op.param1)
except:
try:
G = k3.getGroup(op.param1)
k2.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = False
k3.updateGroup(G)
Ti = k3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
| |
# Data for board 1
# Copyright Symbol Facing Upwards
# Unused attributes: starting_space
B1_Data = {
"i00": {
"name": "i00",
"dots": 3,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i10"],
"moves_to": ["i03", "i33", "i30"],
"adj": {
"ul": False,
"ur": False,
"r": "i01",
"br": "i11",
"bl": "i10",
"l": False
}
},
"i01": {
"name": "i01",
"dots": 2,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i02", "i23", "i44", "i21"],
"moves_to": ["i03", "i23", "i21"],
"adj": {
"ul": False,
"ur": False,
"r": "i02",
"br": "i12",
"bl": "i11",
"l": "i00"
}
},
"i02": {
"name": "i02",
"dots": 1,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i03", "i24", "i35", "i32", "i41"],
"moves_to": ["i03", "i13", "i12", "i01"],
"adj": {
"ul": False,
"ur": False,
"r": "i03",
"br": "i13",
"bl": "i12",
"l": "i01"
}
},
"i03": {
"name": "i03",
"dots": 1,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i23", "i02", "i01", "i00"],
"moves_to": ["i14", "i13", "i02"],
"adj": {
"ul": False,
"ur": False,
"r": False,
"br": "i14",
"bl": "i13",
"l": "i02"
}
},
"i10": {
"name": "i10",
"dots": 1,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i20", "i30"],
"moves_to": ["i00", "i11", "i21", "i20"],
"adj": {
"ul": False,
"ur": "i00",
"r": "i11",
"br": "i21",
"bl": "i20",
"l": False
}
},
"i11": {
"name": "i11",
"dots": 2,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i14", "i43", "i10"],
"moves_to": ["i13", "i33", "i31"],
"adj": {
"ul": "i00",
"ur": "i01",
"r": "i12",
"br": "i22",
"bl": "i21",
"l": "i10"
}
},
"i12": {
"name": "i12",
"dots": 4,
"starting_space": True, # Is potential starting space
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i02", "i34"],
"moves_to": ["i54", "i50"],
"adj": {
"ul": "i01",
"ur": "i02",
"r": "i13",
"br": "i23",
"bl": "i22",
"l": "i11"
}
},
"i13": {
"name": "i13",
"dots": 4,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i02", "i03", "i51", "i11"],
"moves_to": ["i51"],
"adj": {
"ul": "i02",
"ur": "i03",
"r": "i14",
"br": "i24",
"bl": "i23",
"l": "i12"
}
},
"i14": {
"name": "i14",
"dots": 3,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i03", "i36", "i34", "i43"],
"moves_to": ["i43", "i11"],
"adj": {
"ul": "i03",
"ur": False,
"r": False,
"br": "i25",
"bl": "i24",
"l": "i13"
}
},
"i20": {
"name": "i20",
"dots": 1,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i10", "i31"],
"moves_to": ["i10", "i21", "i31", "i30"],
"adj": {
"ul": False,
"ur": "i10",
"r": "i21",
"br": "i31",
"bl": "i30",
"l": False
}
},
"i21": {
"name": "i21",
"dots": 2,
"starting_space": True, # Is potential starting space
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i10", "i01", "i23", "i42", "i31", "i20"],
"moves_to": ["i01", "i23", "i42", "i40"],
"adj": {
"ul": "i10",
"ur": "i11",
"r": "i22",
"br": "i32",
"bl": "i31",
"l": "i20"
}
},
"i22": {
"name": "i22",
"dots": 3,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i24", "i25", "i50"],
"moves_to": ["i25", "i53", "i50"],
"adj": {
"ul": "i11",
"ur": "i12",
"r": "i23",
"br": "i33",
"bl": "i32",
"l": "i21"
}
},
"i23": {
"name": "i23",
"dots": 2,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i01", "i42", "i21"],
"moves_to": ["i01", "i03", "i25", "i44", "i42", "i21"],
"adj": {
"ul": "i12",
"ur": "i13",
"r": "i24",
"br": "i34",
"bl": "i33",
"l": "i22"
}
},
"i24": {
"name": "i24",
"dots": 2,
"starting_space": True, # Is potential starting space
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": [],
"moves_to": ["i02", "i45", "i43", "i22"],
"adj": {
"ul": "i13",
"ur": "i14",
"r": "i25",
"br": "i35",
"bl": "i34",
"l": "i23"
}
},
"i25": {
"name": "i25",
"dots": 3,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i23", "i22"],
"moves_to": ["i53", "i22"],
"adj": {
"ul": "i14",
"ur": False,
"r": False,
"br": "i36",
"bl": "i35",
"l": "i24"
}
},
"i30": {
"name": "i30",
"dots": 2,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i20", "i00", "i31", "i40"],
"moves_to": ["i10", "i32", "i50"],
"adj": {
"ul": False,
"ur": "i20",
"r": "i31",
"br": "i40",
"bl": False,
"l": False
}
},
"i31": {
"name": "i31",
"dots": 1,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i20", "i11", "i40"],
"moves_to": ["i20", "i21", "i32", "i41", "i40", "i30"],
"adj": {
"ul": "i20",
"ur": "i21",
"r": "i32",
"br": "i41",
"bl": "i40",
"l": "i30"
}
},
"i32": {
"name": "i32",
"dots": 3,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i34", "i35", "i52", "i62", "i31", "i30"],
"moves_to": ["i02", "i35", "i62"],
"adj": {
"ul": "i21",
"ur": "i22",
"r": "i33",
"br": "i42",
"bl": "i41",
"l": "i31"
}
},
"i33": {
"name": "i33",
"dots": "H",
"starting_space": False,
"has_piece": False,
"is_hole": True,
"sub_dots": False,
"return_moves": ["i11", "i00", "i63"],
"moves_to": [],
"adj": {
"ul": "i22",
"ur": "i23",
"r": "i34",
"br": "i43",
"bl": "i42",
"l": "i32"
}
},
"i34": {
"name": "i34",
"dots": 2,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i36", "i54", "i52"],
"moves_to": ["i12", "i14", "i36", "i54", "i52", "i32"],
"adj": {
"ul": "i23",
"ur": "i24",
"r": "i35",
"br": "i44",
"bl": "i43",
"l": "i33"
}
},
"i35": {
"name": "i35",
"dots": 3,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i45", "i62", "i32"],
"moves_to": ["i02", "i62", "i32"],
"adj": {
"ul": "i24",
"ur": "i25",
"r": "i36",
"br": "i45",
"bl": "i44",
"l": "i34"
}
},
"i36": {
"name": "i36",
"dots": 2,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i45", "i54", "i63", "i34"],
"moves_to": ["i14", "i54", "i34"],
"adj": {
"ul": "i25",
"ur": False,
"r": False,
"br": False,
"bl": "i45",
"l": "i35"
}
},
"i40": {
"name": "i40",
"dots": 1,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i31", "i21", "i42", "i43", "i44"],
"moves_to": ["i30", "i31", "i41", "i50"],
"adj": {
"ul": "i30",
"ur": "i31",
"r": "i41",
"br": "i50",
"bl": False,
"l": False
}
},
"i41": {
"name": "i41",
"dots": 4,
"starting_space": True, # Is potential starting space
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i31", "i40"],
"moves_to": ["i02", "i45"],
"adj": {
"ul": "i31",
"ur": "i32",
"r": "i42",
"br": "i51",
"bl": "i50",
"l": "i40"
}
},
"i42": {
"name": "i42",
"dots": 2,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i21", "i23"],
"moves_to": ["i21", "i23", "i44", "i62", "i60", "i40"],
"adj": {
"ul": "i32",
"ur": "i33",
"r": "i43",
"br": "i52",
"bl": "i51",
"l": "i41"
}
},
"i43": {
"name": "i43",
"dots": 3,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i24", "i14", "i53"],
"moves_to": ["i11", "i14", "i40"],
"adj": {
"ul": "i33",
"ur": "i34",
"r": "i44",
"br": "i53",
"bl": "i52",
"l": "i42"
}
},
"i44": {
"name": "i44",
"dots": 4,
"starting_space": True, # Is potential starting space
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i23", "i45", "i53", "i42"],
"moves_to": ["i01", "i40"],
"adj": {
"ul": "i34",
"ur": "i35",
"r": "i45",
"br": "i54",
"bl": "i53",
"l": "i43"
}
},
"i45": {
"name": "i45",
"dots": 1,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i24", "i41"],
"moves_to": ["i35", "i36", "i54", "i44"],
"adj": {
"ul": "i35",
"ur": "i36",
"r": False,
"br": False,
"bl": "i54",
"l": "i44"
}
},
"i50": {
"name": "i50",
"dots": 3,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i40", "i30", "i22", "i12", "i52", "i60"],
"moves_to": ["i22", "i53"],
"adj": {
"ul": "i40",
"ur": "i41",
"r": "i51",
"br": "i60",
"bl": False,
"l": False
}
},
"i51": {
"name": "i51",
"dots": 4,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i13", "i61", "i60"],
"moves_to": ["i13"],
"adj": {
"ul": "i41",
"ur": "i42",
"r": "i52",
"br": "i61",
"bl": "i60",
"l": "i50"
}
},
"i52": {
"name": "i52",
"dots": 2,
"starting_space": True, # Is potential starting space
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i34", "i53", "i54", "i61"],
"moves_to": ["i32", "i34", "i54", "i50"],
"adj": {
"ul": "i42",
"ur": "i43",
"r": "i53",
"br": "i62",
"bl": "i61",
"l": "i51"
}
},
"i53": {
"name": "i53",
"dots": 1,
"starting_space": False,
"has_piece": False,
"is_hole": False,
"sub_dots": False,
"return_moves": ["i22", "i25", "i50"],
"moves_to": ["i43", "i44", "i54", "i63", "i62", "i52"],
"adj": {
"ul": "i43",
"ur": "i44",
"r": "i54",
"br": "i63",
"bl": "i62",
"l": "i52"
}
| |
'niedergelegt', 'ist', '"',
',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast', '"', 'Phrasenmäher', '"',
'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_pickleble(en_tokenizer, SpacyTokenizer)
verify_pickleble(de_tokenizer, SpacyTokenizer)
verify_encode_token_with_offsets(en_tokenizer, EN_SAMPLES)
verify_encode_token_with_offsets(de_tokenizer, DE_SAMPLES)
# Test for loading spacy tokenizer from specifying the "model" flag
en_tokenizer = SpacyTokenizer(model='en_core_web_lg')
out = en_tokenizer.encode(EN_SAMPLES)
def test_yttm_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'yttm.model')
download(url=get_repo_url() + 'tokenizer_test_models/yttm/test_ende_yttm-6f2c39.model',
path=model_path)
tokenizer = YTTMTokenizer(model_path=model_path)
gt_tokenized = [['▁He', 'll', 'o', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you', '▁',
'Ⅷ', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '-A', 'm', 'az', 'on', '-H', 'a', 'ib',
'in', '-L', 'e', 'on', 'ard', '-S', 'hen', 'g', '-S', 'h', 'u', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 4), (4, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 10), (10, 11), (11, 13),
(13, 15), (15, 17), (17, 18), (18, 20), (20, 22), (22, 24), (24, 25), (25, 27),
(27, 30), (30, 32), (32, 35), (35, 36), (36, 38), (38, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53), (53, 54),
(54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 62),
(62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y<UNK>all! How are you <UNK> <UNK> <UNK> <UNK> ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# <UNK>abc<UNK>']
gt_str_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, YTTMTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
# Begin to verify decode
for sample_sentences, ele_gt_int_decode, ele_gt_str_decode in [(SUBWORD_TEST_SAMPLES[0], gt_int_decode[0], gt_str_decode[0]),
(SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)]:
int_decode = tokenizer.decode(tokenizer.encode(sample_sentences, int))
str_decode = tokenizer.decode(tokenizer.encode(sample_sentences, str))
assert int_decode == ele_gt_int_decode
assert str_decode == ele_gt_str_decode
os.remove(model_path)
assert tokenizer.decode([]) == ''
assert tokenizer.decode([[]]) == ['']
@pytest.mark.seed(123)
def test_sentencepiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'spm.model')
download(url=get_repo_url()
+ 'tokenizer_test_models/sentencepiece/case1/test_ende-a9bee4.model',
path=model_path)
# Case1
tokenizer = SentencepieceTokenizer(model_path)
gt_tokenized = [['▁Hel', 'lo', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you',
'▁', 'VI', 'II', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '-', 'A', 'ma', 'zo', 'n', '-', 'H', 'ai',
'bin', '-', 'L', 'e', 'on', 'ard', '-', 'S', 'hen', 'g', '-', 'S', 'hu', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!', '@',
'#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 26), (26, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 12),
(12, 14), (14, 15), (15, 16), (16, 17), (17, 19), (19, 22), (22, 23), (23, 24),
(24, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36), (36, 37),
(37, 38), (38, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48), (48, 51),
(51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (61, 62), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y ⁇ all! How are you VIII ⁇ ⁇ ⁇ ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:! ⁇ # ⁇ abc ⁇ ']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SentencepieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode)
# Case2, lower_case
gt_lower_case_int_decode = ['hello, y ⁇ all! how are you viii ⁇ ⁇ ⁇ ?',
'gluonnlp is great!!!!!!',
'gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:! ⁇ # ⁇ abc ⁇ ']
tokenizer = SentencepieceTokenizer(model_path, lowercase=True)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_lower_case_int_decode)
# Case3, Use the sentencepiece regularization commands, we test whether we can obtain different encoding results
tokenizer = SentencepieceTokenizer(model_path, lowercase=True, nbest=-1, alpha=1.0)
has_different_encode_out = False
encode_out = None
for _ in range(10):
if encode_out is None:
encode_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
else:
ele_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
if ele_out != encode_out:
has_different_encode_out = True
break
assert has_different_encode_out
os.remove(model_path)
# Case of T5 Tokenizer
with tempfile.TemporaryDirectory() as dir_path:
vocab_path = os.path.join(dir_path, 't5_spm.model')
download(
url=get_repo_url() + 'tokenizer_test_models/sentencepiece/case_t5/test_t5spm-5f05e7.model',
path=vocab_path
)
extra_ids = 100
tokenizer = T5Tokenizer(vocab_path, extra_ids)
gt_tokenized = [
['▁Hello', ',', '▁', 'y', "'", 'all', '!', '▁How', '▁are', '▁you', '▁VIII', '▁', '😁',
'▁', '😁', '▁', '😁', '▁', '?'],
['▁', 'Glu', 'on', 'N', 'LP', '▁is', '▁great', '!', '!!!!!'],
['▁', 'Glu', 'on', 'N', 'LP', '-', 'Am', 'a', 'zon', '-', 'H', 'a', 'i', 'bin', '-',
'Le', 'on', 'ard', '-', 'She', 'ng', '-', 'Sh', 'u', 'a', 'i', '-', 'X', 'ing', 'j',
'i', 'an', '.....', '/', ':', '!', '@', '#', '▁', "'", 'a', 'b', 'c', "'"]
]
gt_offsets = [
[(0, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21), (21, 25),
(25, 27), (27, 28), (28, 29), (29, 30), (30, 31), (31, 32), (32, 33), (33, 34), (34, 35)],
[(0, 0), (0, 3), (3, 5), (5, 6), (6, 8), (8, 11), (11, 17), (17, 18), (18, 23)],
[(0, 0), (0, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 11), (11, 12), (12, 15), (15, 16),
(16, 17), (17, 18), (18, 19), (19, 22), (22, 23), (23, 25), (25, 27), (27, 30), (30, 31),
(31, 34), (34, 36), (36, 37), (37, 39), (39, 40), (40, 41), (41, 42), (42, 43), (43, 44),
(44, 47), (47, 48), (48, 49), (49, 51), (51, 56), (56, 57), (57, 58), (58, 59), (59, 60),
(60, 61), (61, 62), (62, 63), (63, 64), (64, 65), (65, 66), (66, 67)]
]
gt_int_decode = [
"Hello, y'all! How are you VIII ⁇ ⁇ ⁇ ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"
]
inserted_special_tokens = list('<extra_id_{}>'.format(i) for i in range(extra_ids - 1, -1, -1))
assert list(
tokenizer.vocab.to_tokens(i) for i in range(len(tokenizer._sp_model), len(tokenizer._vocab))
) == inserted_special_tokens, 'Some <extra_id> tokens are not properly inserted.'
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SentencepieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode)
os.remove(vocab_path)
def test_subword_nmt_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'subword_nmt.model')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende-d189ff.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'subword_nmt.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende_vocab-900f81.json',
path=vocab_path)
# Case 1
tokenizer = SubwordNMTTokenizer(model_path, vocab_path)
gt_tokenized = [["Hel", "lo", ",</w>", "y", "\'", "all", "!</w>", "How</w>", "are</w>", "you</w>",
"Ⅷ</w>", "😁</w>", "😁</w>", "😁</w>", "?</w>"],
["Gl", "u", "on", "N", "L", "P</w>", "is</w>", "great", "!", "!", "!", "!!",
"!</w>"],
["Gl", "u", "on", "N", "L", "P", "-", "Amaz", "on-", "H", "ai", "b", "in-", "Le",
"on", "ard", "-", "Sh", "eng", "-", "Sh", "u", "ai", "-", "X", "ing", "ji",
"an", "..", "...", "/", ":", "!", "@", "#</w>", "\'", "ab", "c", "\'</w>"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), | |
# coding: utf-8
"""
Harmony Connect
An easy to use API that helps you access the Factom blockchain. # noqa: E501
OpenAPI spec version: 1.0.17
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from harmony_connect_client.api_client import ApiClient
class EntriesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_entries_by_chain_id(self, chain_id, **kwargs): # noqa: E501
"""Get Chain's Entries # noqa: E501
List all entries contained on the specified chain. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entries_by_chain_id(chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str chain_id: Chain identifier (required)
:param int limit: The number of items you would like back in each page.
:param int offset: The offset parameter allows you to select which item you would like to start from when you get back a list from Connect. For example, if you've already seen the first 15 items and you'd like the next set, you would send an offset of 15. `offset=0` starts from the first item of the set and is the default position.
:param str stages: The immutability stages you want to restrict results to. You can choose any from `replicated`, `factom`, and `anchored`. If you would like to search among multiple stages, send them in a comma separated string. For example: `'replicated,factom'`.
:return: EntryList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entries_by_chain_id_with_http_info(chain_id, **kwargs) # noqa: E501
else:
(data) = self.get_entries_by_chain_id_with_http_info(chain_id, **kwargs) # noqa: E501
return data
def get_entries_by_chain_id_with_http_info(self, chain_id, **kwargs): # noqa: E501
"""Get Chain's Entries # noqa: E501
List all entries contained on the specified chain. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entries_by_chain_id_with_http_info(chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str chain_id: Chain identifier (required)
:param int limit: The number of items you would like back in each page.
:param int offset: The offset parameter allows you to select which item you would like to start from when you get back a list from Connect. For example, if you've already seen the first 15 items and you'd like the next set, you would send an offset of 15. `offset=0` starts from the first item of the set and is the default position.
:param str stages: The immutability stages you want to restrict results to. You can choose any from `replicated`, `factom`, and `anchored`. If you would like to search among multiple stages, send them in a comma separated string. For example: `'replicated,factom'`.
:return: EntryList
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['chain_id', 'limit', 'offset', 'stages'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entries_by_chain_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'chain_id' is set
if ('chain_id' not in local_var_params or
local_var_params['chain_id'] is None):
raise ValueError("Missing the required parameter `chain_id` when calling `get_entries_by_chain_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'chain_id' in local_var_params:
path_params['chain_id'] = local_var_params['chain_id'] # noqa: E501
query_params = []
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'stages' in local_var_params:
query_params.append(('stages', local_var_params['stages'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['AppId', 'AppKey'] # noqa: E501
return self.api_client.call_api(
'/chains/{chain_id}/entries', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntryList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entry_by_hash(self, chain_id, entry_hash, **kwargs): # noqa: E501
"""Get Entry Info # noqa: E501
Returns information about a specific entry on Connect. The requested entry must be specified using the Chain ID and Entry Hash. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entry_by_hash(chain_id, entry_hash, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str chain_id: Chain identifier (required)
:param str entry_hash: The SHA256 hash of the entry. (required)
:return: Entry
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entry_by_hash_with_http_info(chain_id, entry_hash, **kwargs) # noqa: E501
else:
(data) = self.get_entry_by_hash_with_http_info(chain_id, entry_hash, **kwargs) # noqa: E501
return data
def get_entry_by_hash_with_http_info(self, chain_id, entry_hash, **kwargs): # noqa: E501
"""Get Entry Info # noqa: E501
Returns information about a specific entry on Connect. The requested entry must be specified using the Chain ID and Entry Hash. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entry_by_hash_with_http_info(chain_id, entry_hash, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str chain_id: Chain identifier (required)
:param str entry_hash: The SHA256 hash of the entry. (required)
:return: Entry
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['chain_id', 'entry_hash'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entry_by_hash" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'chain_id' is set
if ('chain_id' not in local_var_params or
local_var_params['chain_id'] is None):
raise ValueError("Missing the required parameter `chain_id` when calling `get_entry_by_hash`") # noqa: E501
# verify the required parameter 'entry_hash' is set
if ('entry_hash' not in local_var_params or
local_var_params['entry_hash'] is None):
raise ValueError("Missing the required parameter `entry_hash` when calling `get_entry_by_hash`") # noqa: E501
collection_formats = {}
path_params = {}
if 'chain_id' in local_var_params:
path_params['chain_id'] = local_var_params['chain_id'] # noqa: E501
if 'entry_hash' in local_var_params:
path_params['entry_hash'] = local_var_params['entry_hash'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['AppId', 'AppKey'] # noqa: E501
return self.api_client.call_api(
'/chains/{chain_id}/entries/{entry_hash}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Entry', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_first_entry(self, chain_id, **kwargs): # noqa: E501
"""Get Chain's First Entry # noqa: E501
Retrieve the first entry that has been saved to this chain. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_first_entry(chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str chain_id: Chain identifier (required)
:return: Entry
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_first_entry_with_http_info(chain_id, **kwargs) # noqa: E501
else:
(data) = self.get_first_entry_with_http_info(chain_id, **kwargs) # noqa: E501
return data
def get_first_entry_with_http_info(self, chain_id, **kwargs): # noqa: E501
"""Get Chain's First Entry # noqa: E501
Retrieve the first entry that has been saved to this chain. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_first_entry_with_http_info(chain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str chain_id: Chain identifier (required)
:return: Entry
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['chain_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_first_entry" % key
)
local_var_params[key] = | |
df2 = ddf2.compute().sort_values(["b", "c"])
df1 = df1.sort_values(["b", "c"])
assert_eq(df1[["b", "c"]], df2[["b", "c"]], check_index=False)
else:
assert_eq(ddf1, ddf2, check_divisions=False, check_index=False)
@write_read_engines()
@pytest.mark.parametrize("aggregate_files", ["a", "b"])
def test_chunksize_aggregate_files(tmpdir, write_engine, read_engine, aggregate_files):
chunksize = "1MiB"
partition_on = ["a", "b"]
df_size = 100
df1 = pd.DataFrame(
{
"a": np.random.choice(["apple", "banana", "carrot"], size=df_size),
"b": np.random.choice(["small", "large"], size=df_size),
"c": np.random.random(size=df_size),
"d": np.random.randint(1, 100, size=df_size),
}
)
ddf1 = dd.from_pandas(df1, npartitions=9)
ddf1.to_parquet(
str(tmpdir),
engine=write_engine,
partition_on=partition_on,
write_index=False,
)
ddf2 = dd.read_parquet(
str(tmpdir),
engine=read_engine,
chunksize=chunksize,
aggregate_files=aggregate_files,
)
# Check that files where aggregated as expected
if aggregate_files == "a":
assert ddf2.npartitions == 3
elif aggregate_files == "b":
assert ddf2.npartitions == 6
# Check that the final data is correct
df2 = ddf2.compute().sort_values(["c", "d"])
df1 = df1.sort_values(["c", "d"])
assert_eq(df1[["c", "d"]], df2[["c", "d"]], check_index=False)
@pytest.mark.parametrize("metadata", [True, False])
@pytest.mark.parametrize("chunksize", [None, 1024, 4096, "1MiB"])
def test_chunksize(tmpdir, chunksize, engine, metadata):
nparts = 2
df_size = 100
row_group_size = 5
df = pd.DataFrame(
{
"a": np.random.choice(["apple", "banana", "carrot"], size=df_size),
"b": np.random.random(size=df_size),
"c": np.random.randint(1, 5, size=df_size),
"index": np.arange(0, df_size),
}
).set_index("index")
ddf1 = dd.from_pandas(df, npartitions=nparts)
ddf1.to_parquet(
str(tmpdir),
engine="pyarrow",
row_group_size=row_group_size,
write_metadata_file=metadata,
)
if metadata:
path = str(tmpdir)
else:
dirname = str(tmpdir)
files = os.listdir(dirname)
assert "_metadata" not in files
path = os.path.join(dirname, "*.parquet")
ddf2 = dd.read_parquet(
path,
engine=engine,
chunksize=chunksize,
split_row_groups=True,
gather_statistics=True,
index="index",
aggregate_files=True,
)
assert_eq(ddf1, ddf2, check_divisions=False)
num_row_groups = df_size // row_group_size
if not chunksize:
assert ddf2.npartitions == num_row_groups
else:
# Check that we are really aggregating
assert ddf2.npartitions < num_row_groups
if chunksize == "1MiB":
# Largest chunksize will result in
# a single output partition
assert ddf2.npartitions == 1
@write_read_engines()
def test_roundtrip_pandas_chunksize(tmpdir, write_engine, read_engine):
path = str(tmpdir.join("test.parquet"))
pdf = df.copy()
pdf.index.name = "index"
pdf.to_parquet(
path, engine="pyarrow" if write_engine.startswith("pyarrow") else "fastparquet"
)
ddf_read = dd.read_parquet(
path,
engine=read_engine,
chunksize="10 kiB",
gather_statistics=True,
split_row_groups=True,
index="index",
)
assert_eq(pdf, ddf_read)
@FASTPARQUET_MARK
def test_read_pandas_fastparquet_partitioned(tmpdir, engine):
pdf = pd.DataFrame(
[{"str": str(i), "int": i, "group": "ABC"[i % 3]} for i in range(6)]
)
path = str(tmpdir)
pdf.to_parquet(path, partition_cols=["group"], engine="fastparquet")
ddf_read = dd.read_parquet(path, engine=engine)
assert len(ddf_read["group"].compute()) == 6
assert len(ddf_read.compute().group) == 6
def test_read_parquet_getitem_skip_when_getting_read_parquet(tmpdir, engine):
# https://github.com/dask/dask/issues/5893
pdf = pd.DataFrame({"A": [1, 2, 3, 4, 5, 6], "B": ["a", "b", "c", "d", "e", "f"]})
path = os.path.join(str(tmpdir), "data.parquet")
pd_engine = "pyarrow" if engine.startswith("pyarrow") else "fastparquet"
pdf.to_parquet(path, engine=pd_engine)
ddf = dd.read_parquet(path, engine=engine)
a, b = dask.optimize(ddf["A"], ddf)
# Make sure we are still allowing the getitem optimization
ddf = ddf["A"]
dsk = optimize_dataframe_getitem(ddf.dask, keys=[(ddf._name, 0)])
read = [key for key in dsk.layers if key.startswith("read-parquet")][0]
subgraph = dsk.layers[read]
assert isinstance(subgraph, DataFrameIOLayer)
assert subgraph.columns == ["A"]
@pytest.mark.parametrize("gather_statistics", [None, True])
@write_read_engines()
def test_filter_nonpartition_columns(
tmpdir, write_engine, read_engine, gather_statistics
):
tmpdir = str(tmpdir)
df_write = pd.DataFrame(
{
"id": [1, 2, 3, 4] * 4,
"time": np.arange(16),
"random": np.random.choice(["cat", "dog"], size=16),
}
)
ddf_write = dd.from_pandas(df_write, npartitions=4)
ddf_write.to_parquet(
tmpdir, write_index=False, partition_on=["id"], engine=write_engine
)
ddf_read = dd.read_parquet(
tmpdir,
index=False,
engine=read_engine,
gather_statistics=gather_statistics,
filters=[(("time", "<", 5))],
)
df_read = ddf_read.compute()
assert len(df_read) == len(df_read[df_read["time"] < 5])
assert df_read["time"].max() < 5
@PYARROW_MARK
def test_pandas_metadata_nullable_pyarrow(tmpdir):
tmpdir = str(tmpdir)
ddf1 = dd.from_pandas(
pd.DataFrame(
{
"A": pd.array([1, None, 2], dtype="Int64"),
"B": pd.array(["dog", "cat", None], dtype="str"),
}
),
npartitions=1,
)
ddf1.to_parquet(tmpdir, engine="pyarrow")
ddf2 = dd.read_parquet(tmpdir, engine="pyarrow")
assert_eq(ddf1, ddf2, check_index=False)
@PYARROW_MARK
def test_pandas_timestamp_overflow_pyarrow(tmpdir):
info = np.iinfo(np.dtype("int64"))
arr_numeric = np.linspace(
start=info.min + 2, stop=info.max, num=1024, dtype="int64"
)
arr_dates = arr_numeric.astype("datetime64[ms]")
table = pa.Table.from_arrays([pa.array(arr_dates)], names=["ts"])
pa.parquet.write_table(
table, f"{tmpdir}/file.parquet", use_deprecated_int96_timestamps=False
)
# This will raise by default due to overflow
with pytest.raises(pa.lib.ArrowInvalid) as e:
dd.read_parquet(str(tmpdir), engine="pyarrow").compute()
assert "out of bounds" in str(e.value)
if pa_version >= parse_version("5.0.0"):
from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine as ArrowEngine
else:
from dask.dataframe.io.parquet.arrow import ArrowEngine
class ArrowEngineWithTimestampClamp(ArrowEngine):
@classmethod
def clamp_arrow_datetimes(cls, arrow_table: pa.Table) -> pa.Table:
"""Constrain datetimes to be valid for pandas
Since pandas works in ns precision and arrow / parquet defaults to ms
precision we need to clamp our datetimes to something reasonable"""
new_columns = []
for i, col in enumerate(arrow_table.columns):
if pa.types.is_timestamp(col.type) and (
col.type.unit in ("s", "ms", "us")
):
multiplier = {"s": 1_0000_000_000, "ms": 1_000_000, "us": 1_000}[
col.type.unit
]
original_type = col.type
series: pd.Series = col.cast(pa.int64()).to_pandas()
info = np.iinfo(np.dtype("int64"))
# constrain data to be within valid ranges
series.clip(
lower=info.min // multiplier + 1,
upper=info.max // multiplier,
inplace=True,
)
new_array = pa.array(series, pa.int64())
new_array = new_array.cast(original_type)
new_columns.append(new_array)
else:
new_columns.append(col)
return pa.Table.from_arrays(new_columns, names=arrow_table.column_names)
@classmethod
def _arrow_table_to_pandas(
cls, arrow_table: pa.Table, categories, **kwargs
) -> pd.DataFrame:
fixed_arrow_table = cls.clamp_arrow_datetimes(arrow_table)
return super()._arrow_table_to_pandas(
fixed_arrow_table, categories, **kwargs
)
# this should not fail, but instead produce timestamps that are in the valid range
dd.read_parquet(str(tmpdir), engine=ArrowEngineWithTimestampClamp).compute()
@pytest.mark.parametrize(
"write_cols",
[["part", "col"], ["part", "kind", "col"]],
)
def test_partitioned_column_overlap(tmpdir, engine, write_cols):
tmpdir.mkdir("part=a")
tmpdir.mkdir("part=b")
path0 = str(tmpdir.mkdir("part=a/kind=x"))
path1 = str(tmpdir.mkdir("part=b/kind=x"))
path0 = os.path.join(path0, "data.parquet")
path1 = os.path.join(path1, "data.parquet")
_df1 = pd.DataFrame({"part": "a", "kind": "x", "col": range(5)})
_df2 = pd.DataFrame({"part": "b", "kind": "x", "col": range(5)})
df1 = _df1[write_cols]
df2 = _df2[write_cols]
df1.to_parquet(path0, index=False)
df2.to_parquet(path1, index=False)
if engine == "fastparquet":
path = [path0, path1]
else:
path = str(tmpdir)
if write_cols == ["part", "kind", "col"]:
result = dd.read_parquet(path, engine=engine)
expect = pd.concat([_df1, _df2], ignore_index=True)
assert_eq(result, expect, check_index=False)
else:
# For now, partial overlap between partition columns and
# real columns is not allowed
with pytest.raises(ValueError):
dd.read_parquet(path, engine=engine)
@PYARROW_MARK
@pytest.mark.parametrize(
"write_cols",
[["col"], ["part", "col"]],
)
def test_partitioned_no_pandas_metadata(tmpdir, engine, write_cols):
# See: https://github.com/dask/dask/issues/8087
# Manually construct directory-partitioned dataset
path1 = tmpdir.mkdir("part=a")
path2 = tmpdir.mkdir("part=b")
path1 = os.path.join(path1, "data.parquet")
path2 = os.path.join(path2, "data.parquet")
# Write partitions without parquet metadata.
# Note that we always use pyarrow to do this
# (regardless of the `engine`)
_df1 = pd.DataFrame({"part": "a", "col": range(5)})
_df2 = pd.DataFrame({"part": "b", "col": range(5)})
t1 = pa.Table.from_pandas(
_df1[write_cols],
preserve_index=False,
).replace_schema_metadata(metadata={})
pq.write_table(t1, path1)
t2 = pa.Table.from_pandas(
_df2[write_cols],
preserve_index=False,
).replace_schema_metadata(metadata={})
pq.write_table(t2, path2)
# Check results
expect = pd.concat([_df1, _df2], ignore_index=True)
result = dd.read_parquet(str(tmpdir), engine=engine)
result["part"] = result["part"].astype("object")
assert_eq(result[list(expect.columns)], expect, check_index=False)
@fp_pandas_xfail
def test_partitioned_preserve_index(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
size = 1_000
npartitions = 4
b = np.arange(npartitions).repeat(size // npartitions)
data = pd.DataFrame(
{
"myindex": np.arange(size),
"A": np.random.random(size=size),
"B": pd.Categorical(b),
}
).set_index("myindex")
data.index.name = None
df1 = dd.from_pandas(data, npartitions=npartitions)
df1.to_parquet(tmp, partition_on="B", engine=write_engine)
expect = data[data["B"] == 1]
got = dd.read_parquet(tmp, engine=read_engine, filters=[("B", "==", 1)])
assert_eq(expect, got)
def test_from_pandas_preserve_none_index(tmpdir, engine):
if engine.startswith("pyarrow"):
pytest.importorskip("pyarrow", minversion="0.15.0")
fn = str(tmpdir.join("test.parquet"))
df = pd.DataFrame({"a": [1, 2], "b": [4, 5], "c": [6, 7]}).set_index("c")
df.index.name = None
df.to_parquet(
fn,
engine="pyarrow" if engine.startswith("pyarrow") else "fastparquet",
index=True,
)
expect = pd.read_parquet(fn)
got = dd.read_parquet(fn, engine=engine)
assert_eq(expect, got)
def test_multi_partition_none_index_false(tmpdir, engine):
if engine.startswith("pyarrow"):
pytest.importorskip("pyarrow", minversion="0.15.0")
write_engine = "pyarrow"
else:
assert engine == "fastparquet"
write_engine = "fastparquet"
# Write dataset without dask.to_parquet
ddf1 = ddf.reset_index(drop=True)
for i, part in enumerate(ddf1.partitions):
path = tmpdir.join(f"test.{i}.parquet")
part.compute().to_parquet(str(path), engine=write_engine)
# Read back with index=False
ddf2 = dd.read_parquet(str(tmpdir), index=False, engine=engine)
assert_eq(ddf1, ddf2)
@write_read_engines()
def test_from_pandas_preserve_none_rangeindex(tmpdir, write_engine, read_engine):
# See GitHub Issue#6348
fn = str(tmpdir.join("test.parquet"))
df0 = pd.DataFrame({"t": [1, 2, 3]}, index=pd.RangeIndex(start=1, stop=4))
df0.to_parquet(
fn, engine="pyarrow" if write_engine.startswith("pyarrow") else "fastparquet"
)
df1 = dd.read_parquet(fn, engine=read_engine)
assert_eq(df0, df1.compute())
def test_illegal_column_name(tmpdir, engine):
# Make sure user is prevented from preserving a "None" index
# name if there is already a column using the special `null_name`
null_name = "__null_dask_index__"
fn = str(tmpdir.join("test.parquet"))
df = pd.DataFrame({"x": [1, 2], null_name: [4, 5]}).set_index("x")
df.index.name = None
ddf = dd.from_pandas(df, npartitions=2)
# If we don't want to preserve the None index name, the
# write should work, but the user should be warned
with pytest.warns(UserWarning, match=null_name):
ddf.to_parquet(fn, engine=engine, write_index=False)
# If we do want to preserve the None index name, should
# get a ValueError for having an illegal column name
with pytest.raises(ValueError) as e:
ddf.to_parquet(fn, engine=engine)
assert null_name in str(e.value)
def test_divisions_with_null_partition(tmpdir, engine):
df = pd.DataFrame({"a": [1, 2, None, None], "b": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(str(tmpdir), engine=engine, write_index=False)
ddf_read = dd.read_parquet(str(tmpdir), engine=engine, index="a")
assert ddf_read.divisions == (None, None, None)
@PYARROW_MARK
def test_pyarrow_dataset_simple(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": [4, 5, 6], "b": ["a", "b", "b"]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=engine)
read_df = dd.read_parquet(fn, engine="pyarrow-dataset")
read_df.compute()
assert_eq(ddf, read_df)
@PYARROW_MARK
@pytest.mark.parametrize("test_filter", [True, False])
def test_pyarrow_dataset_partitioned(tmpdir, engine, test_filter):
fn = str(tmpdir)
df = pd.DataFrame({"a": [4, 5, 6], "b": ["a", "b", "b"]})
df["b"] = df["b"].astype("category")
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=engine, partition_on="b")
read_df = dd.read_parquet(
fn,
engine="pyarrow",
filters=[("b", "==", "a")] if test_filter else None,
)
if test_filter:
assert_eq(ddf[ddf["b"] == | |
# Copyright 2016 <NAME>, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
import sys
import token
import six
from . import util
# Mapping of matching braces. To find a token here, look up token[:2].
_matching_pairs_left = {
(token.OP, '('): (token.OP, ')'),
(token.OP, '['): (token.OP, ']'),
(token.OP, '{'): (token.OP, '}'),
}
_matching_pairs_right = {
(token.OP, ')'): (token.OP, '('),
(token.OP, ']'): (token.OP, '['),
(token.OP, '}'): (token.OP, '{'),
}
class MarkTokens(object):
"""
Helper that visits all nodes in the AST tree and assigns .first_token and .last_token attributes
to each of them. This is the heart of the token-marking logic.
"""
def __init__(self, code):
self._code = code
self._methods = util.NodeMethods()
self._iter_children = None
def visit_tree(self, node):
self._iter_children = util.iter_children_func(node)
util.visit_tree(node, self._visit_before_children, self._visit_after_children)
def _visit_before_children(self, node, parent_token):
col = getattr(node, 'col_offset', None)
token = self._code.get_token_from_utf8(node.lineno, col) if col is not None else None
if not token and util.is_module(node):
# We'll assume that a Module node starts at the start of the source code.
token = self._code.get_token(1, 0)
# Use our own token, or our parent's if we don't have one, to pass to child calls as
# parent_token argument. The second value becomes the token argument of _visit_after_children.
return (token or parent_token, token)
def _visit_after_children(self, node, parent_token, token):
# This processes the node generically first, after all children have been processed.
# Get the first and last tokens that belong to children. Note how this doesn't assume that we
# iterate through children in order that corresponds to occurrence in source code. This
# assumption can fail (e.g. with return annotations).
first = token
last = None
for child in self._iter_children(node):
if not first or child.first_token.index < first.index:
first = child.first_token
if not last or child.last_token.index > last.index:
last = child.last_token
# If we don't have a first token from _visit_before_children, and there were no children, then
# use the parent's token as the first token.
first = first or parent_token
# If no children, set last token to the first one.
last = last or first
# Statements continue to before NEWLINE. This helps cover a few different cases at once.
if util.is_stmt(node):
last = self._find_last_in_stmt(last)
# Capture any unmatched brackets.
first, last = self._expand_to_matching_pairs(first, last, node)
# Give a chance to node-specific methods to adjust.
if self._code._mark_node_specific_methods:
nfirst, nlast = self._methods.get(self, node.__class__)(node, first, last)
else:
nfirst, nlast = first, last
if (nfirst, nlast) != (first, last):
# If anything changed, expand again to capture any unmatched brackets.
nfirst, nlast = self._expand_to_matching_pairs(nfirst, nlast, node)
node.first_token = nfirst
node.last_token = nlast
def _find_last_in_stmt(self, start_token):
t = start_token
while (not util.match_token(t, token.NEWLINE) and
not util.match_token(t, token.OP, ';') and
not token.ISEOF(t.type)):
t = self._code.next_token(t, include_extra=True)
return self._code.prev_token(t)
def _expand_to_matching_pairs(self, first_token, last_token, node):
"""
Scan tokens in [first_token, last_token] range that are between node's children, and for any
unmatched brackets, adjust first/last tokens to include the closing pair.
"""
# We look for opening parens/braces among non-child tokens (i.e. tokens between our actual
# child nodes). If we find any closing ones, we match them to the opens.
to_match_right = []
to_match_left = []
for tok in self._code.token_range(first_token, last_token):
tok_info = tok[:2]
if to_match_right and tok_info == to_match_right[-1]:
to_match_right.pop()
elif tok_info in _matching_pairs_left:
to_match_right.append(_matching_pairs_left[tok_info])
elif tok_info in _matching_pairs_right:
to_match_left.append(_matching_pairs_right[tok_info])
# Once done, extend `last_token` to match any unclosed parens/braces.
for match in reversed(to_match_right):
last = self._code.next_token(last_token)
# Allow for trailing commas or colons (allowed in subscripts) before the closing delimiter
while any(util.match_token(last, token.OP, x) for x in (',', ':')):
last = self._code.next_token(last)
# Now check for the actual closing delimiter.
if util.match_token(last, *match):
last_token = last
# And extend `first_token` to match any unclosed opening parens/braces.
for match in to_match_left:
first = self._code.prev_token(first_token)
if util.match_token(first, *match):
first_token = first
return (first_token, last_token)
#----------------------------------------------------------------------
# Node visitors. Each takes a preliminary first and last tokens, and returns the adjusted pair
# that will actually be assigned.
def visit_default(self, node, first_token, last_token):
# pylint: disable=no-self-use
# By default, we don't need to adjust the token we computed earlier.
return (first_token, last_token)
def handle_comp(self, open_brace, node, first_token, last_token):
# For list/set/dict comprehensions, we only get the token of the first child, so adjust it to
# include the opening brace (the closing brace will be matched automatically).
before = self._code.prev_token(first_token)
util.expect_token(before, token.OP, open_brace)
return (before, last_token)
# Python 3.8 fixed the starting position of list comprehensions:
# https://bugs.python.org/issue31241
if sys.version_info < (3, 8):
def visit_listcomp(self, node, first_token, last_token):
return self.handle_comp('[', node, first_token, last_token)
if six.PY2:
# We shouldn't do this on PY3 because its SetComp/DictComp already have a correct start.
def visit_setcomp(self, node, first_token, last_token):
return self.handle_comp('{', node, first_token, last_token)
def visit_dictcomp(self, node, first_token, last_token):
return self.handle_comp('{', node, first_token, last_token)
def visit_comprehension(self, node, first_token, last_token):
# The 'comprehension' node starts with 'for' but we only get first child; we search backwards
# to find the 'for' keyword.
first = self._code.find_token(first_token, token.NAME, 'for', reverse=True)
return (first, last_token)
def visit_if(self, node, first_token, last_token):
while first_token.string not in ('if', 'elif'):
first_token = self._code.prev_token(first_token)
return first_token, last_token
def handle_attr(self, node, first_token, last_token):
# Attribute node has ".attr" (2 tokens) after the last child.
dot = self._code.find_token(last_token, token.OP, '.')
name = self._code.next_token(dot)
util.expect_token(name, token.NAME)
return (first_token, name)
visit_attribute = handle_attr
visit_assignattr = handle_attr
visit_delattr = handle_attr
def handle_def(self, node, first_token, last_token):
# With astroid, nodes that start with a doc-string can have an empty body, in which case we
# need to adjust the last token to include the doc string.
if not node.body and getattr(node, 'doc', None):
last_token = self._code.find_token(last_token, token.STRING)
# Include @ from decorator
if first_token.index > 0:
prev = self._code.prev_token(first_token)
if util.match_token(prev, token.OP, '@'):
first_token = prev
return (first_token, last_token)
visit_classdef = handle_def
visit_functiondef = handle_def
def handle_following_brackets(self, node, last_token, opening_bracket):
# This is for calls and subscripts, which have a pair of brackets
# at the end which may contain no nodes, e.g. foo() or bar[:].
# We look for the opening bracket and then let the matching pair be found automatically
# Remember that last_token is at the end of all children,
# so we are not worried about encountering a bracket that belongs to a child.
first_child = next(self._iter_children(node))
call_start = self._code.find_token(first_child.last_token, token.OP, opening_bracket)
if call_start.index > last_token.index:
last_token = call_start
return last_token
def visit_call(self, node, first_token, last_token):
last_token = self.handle_following_brackets(node, last_token, '(')
# Handling a python bug with decorators with empty parens, e.g.
# @deco()
# def ...
if util.match_token(first_token, token.OP, '@'):
first_token = self._code.next_token(first_token)
return (first_token, last_token)
def visit_subscript(self, node, first_token, last_token):
last_token = self.handle_following_brackets(node, last_token, '[')
return (first_token, last_token)
def handle_bare_tuple(self, node, first_token, last_token):
# A bare tuple doesn't include parens; if there is a trailing comma, make it part of the tuple.
maybe_comma = self._code.next_token(last_token)
if util.match_token(maybe_comma, token.OP, ','):
last_token = maybe_comma
return (first_token, last_token)
if sys.version_info >= (3, 8):
# In Python3.8 parsed tuples include parentheses when present.
def handle_tuple_nonempty(self, node, first_token, last_token):
# It's a bare tuple if the first token belongs to the first child. The first child may
# include extraneous parentheses (which don't create new nodes), so account for those too.
child = node.elts[0]
child_first, child_last = self._gobble_parens(child.first_token, child.last_token, True)
if first_token == child_first:
return self.handle_bare_tuple(node, first_token, last_token)
return (first_token, last_token)
else:
# Before python 3.8, parsed tuples do not include parens.
def handle_tuple_nonempty(self, node, first_token, last_token):
(first_token, last_token) = self.handle_bare_tuple(node, first_token, last_token)
return self._gobble_parens(first_token, last_token, False)
def visit_tuple(self, node, first_token, last_token):
if not node.elts:
# An | |
(γ-1) μ m_P / k_B
where k_B is the Boltzmann constant
γ is 5/3, the perfect gas constant
m_P is the proton mass
μ = 1 / (mean no. molecules per unit atomic weight)
= 1 / (X + Y /4 + E)
where E = Ne * X, and Y = (1-X).
Can neglect metals as they are heavy.
Leading contribution is from electrons, which is already included
[+ Z / (12->16)] from metal species
[+ Z/16*4 ] for OIV from electrons."""
#convert U (J/kg) to T (K) : U = N k T / (γ - 1)
#T = U (γ-1) μ m_P / k_B
#where k_B is the Boltzmann constant
#γ is 5/3, the perfect gas constant
#m_P is the proton mass
#μ is 1 / (mean no. molecules per unit atomic weight) calculated in loop.
#Internal energy units are 10^-10 erg/g
hy_mass = 1 - helium
muienergy = 4 / (hy_mass * (3 + 4*nebynh) + 1)*ienergy*1e10
#Boltzmann constant (cgs)
boltzmann=1.38066e-16
gamma=5./3
#So for T in K, boltzmann in erg/K, internal energy has units of erg/g
temp = (gamma-1) * self.protonmass / boltzmann * muienergy
return temp
class RecombRatesCen92(object):
"""Recombination rates and collisional ionization rates, as a function of temperature.
This is taken from KWH 06, astro-ph/9509107, Table 2, based on Cen 1992.
Illustris uses these rates."""
def alphaHp(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
Temp in K."""
return 8.4e-11 / np.sqrt(temp) / np.power(temp/1000, 0.2) / (1+ np.power(temp/1e6, 0.7))
def alphaHep(self,temp):
"""Recombination rate for He+, ionized helium, in cm^3/s.
Temp in K."""
return 1.5e-10 / np.power(temp,0.6353)
def alphad(self, temp):
"""Recombination rate for dielectronic recombination, in cm^3/s.
Temp in K."""
return 1.9e-3 / np.power(temp,1.5) * np.exp(-4.7e5/temp)*(1+0.3*np.exp(-9.4e4/temp))
def alphaHepp(self, temp):
"""Recombination rate for doubly ionized helium, in cm^3/s.
Temp in K."""
return 4 * self.alphaHp(temp)
def GammaeH0(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K"""
return 5.85e-11 * np.sqrt(temp) * np.exp(-157809.1/temp) / (1+ np.sqrt(temp/1e5))
def GammaeHe0(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K"""
return 2.38e-11 * np.sqrt(temp) * np.exp(-285335.4/temp) / (1+ np.sqrt(temp/1e5))
def GammaeHep(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K"""
return 5.68e-12 * np.sqrt(temp) * np.exp(-631515.0/temp) / (1+ np.sqrt(temp/1e5))
class RecombRatesVerner96(object):
"""Recombination rates and collisional ionization rates, as a function of temperature.
Recombination rates are the fit from Verner & Ferland 1996 (astro-ph/9509083).
Collisional rates are the fit from Voronov 1997 (http://www.sciencedirect.com/science/article/pii/S0092640X97907324).
In a very photoionised medium this changes the neutral hydrogen abundance by approximately 10% compared to Cen 1992.
These rates are those used by Nyx.
"""
def _Verner96Fit(self, temp, aa, bb, temp0, temp1):
"""Formula used as a fitting function in Verner & Ferland 1996 (astro-ph/9509083)."""
sqrttt0 = np.sqrt(temp/temp0)
sqrttt1 = np.sqrt(temp/temp1)
return aa / ( sqrttt0 * (1 + sqrttt0)**(1-bb)*(1+sqrttt1)**(1+bb) )
def alphaHp(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
The V&F 96 fitting formula is accurate to < 1% in the worst case.
Temp in K."""
#See line 1 of V&F96 table 1.
return self._Verner96Fit(temp, aa=7.982e-11, bb=0.748, temp0=3.148, temp1=7.036e+05)
def alphaHep(self,temp):
"""Recombination rate for He+, ionized helium, in cm^3/s.
Accurate to ~2% for T < 10^6 and 5% for T< 10^10.
Temp in K."""
#VF96 give two rates. The first is more accurate for T < 10^6, the second is valid up to T = 10^10.
#We use the most accurate allowed. See lines 2 and 3 of Table 1 of VF96.
lowTfit = self._Verner96Fit(temp, aa=3.294e-11, bb=0.6910, temp0=1.554e+01, temp1=3.676e+07)
highTfit = self._Verner96Fit(temp, aa=9.356e-10, bb=0.7892, temp0=4.266e-02, temp1=4.677e+06)
#Note that at 10^6K the two fits differ by ~10%. This may lead one to disbelieve the quoted accuracies!
#We thus switch over at a slightly lower temperature.
#The two fits cross at T ~ 3e5K.
swtmp = 7e5
deltat = 1e5
upper = swtmp + deltat
lower = swtmp - deltat
#In order to avoid a sharp feature at 10^6 K, we linearly interpolate between the two fits around 10^6 K.
interpfit = (lowTfit * (upper - temp) + highTfit * (temp - lower))/(2*deltat)
return (temp < lower)*lowTfit + (temp > upper)*highTfit + (upper > temp)*(temp > lower)*interpfit
def alphad(self, temp):
"""Recombination rate for dielectronic recombination, in cm^3/s.
This is the value from Aldrovandi & Pequignot 73, as used in Nyx, Sherwood and Cen 1992.
It is corrected from the value in Aldrovandi & Pequignot 1973 by Burgess & Tworkowski 1976 (fig1)
by a factor of 0.65. The exponent is also made slightly more accurate.
Temp in K."""
return 1.23e-3 / np.power(temp,1.5) * np.exp(-4.72e5/temp)*(1+0.3*np.exp(-9.4e4/temp))
def alphaHepp(self, temp):
"""Recombination rate for doubly ionized helium, in cm^3/s. Accurate to 2%.
Temp in K."""
#See line 4 of V&F96 table 1.
return self._Verner96Fit(temp, aa=1.891e-10, bb=0.7524, temp0=9.370, temp1=2.774e6)
def _Voronov96Fit(self, temp, dE, PP, AA, XX, KK):
"""Fitting function for collisional rates. Eq. 1 of Voronov 1997. Accurate to 10%,
but data is only accurate to 50%."""
bolevk = 8.61734e-5 # Boltzmann constant in units of eV/K
UU = dE / (bolevk * temp)
return AA * (1 + PP * np.sqrt(UU))/(XX+UU) * UU**KK * np.exp(-UU)
def GammaeH0(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K. Voronov 97, Table 1."""
return self._Voronov96Fit(temp, 13.6, 0, 0.291e-07, 0.232, 0.39)
def GammaeHe0(self,temp):
"""Collisional ionization rate for He0 in cm^3/s. Temp in K. Voronov 97, Table 1."""
return self._Voronov96Fit(temp, 24.6, 0, 0.175e-07, 0.180, 0.35)
def GammaeHep(self,temp):
"""Collisional ionization rate for HeI in cm^3/s. Temp in K. Voronov 97, Table 1."""
return self._Voronov96Fit(temp, 54.4, 1, 0.205e-08, 0.265, 0.25)
class RecombRatesBadnell(RecombRatesVerner96):
"""Recombination rates and collisional ionization rates, as a function of temperature.
Recombination rates are the fit from Badnell's website: http://amdpp.phys.strath.ac.uk/tamoc/RR/#partial.
"""
def _RecombRateFit_lowcharge_ion(self, temp, aa, bb, cc, temp0, temp1, temp2):
"""Formula used as a fitting function in Verner & Ferland 1996 (astro-ph/9509083)/ See http://amdpp.phys.strath.ac.uk/tamoc/RR/#partial."""
sqrttt0 = np.sqrt(temp/temp0)
sqrttt1 = np.sqrt(temp/temp1)
BB = bb + cc*np.exp(-temp2/temp)
return aa / ( sqrttt0 * (1 + sqrttt0)**(1-BB)*(1+sqrttt1)**(1+BB) )
def alphaHp(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
Temp in K."""
#See line 1 of V&F96 table 1.
return self._Verner96Fit(temp, aa=8.318e-11, bb=0.7472, temp0=2.965, temp1=7.001e5)
def alphaHep(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
Temp in K."""
#See line 1 of V&F96 table 1.
return self._Verner96Fit(temp, aa=1.818E-10, bb=0.7492, temp0=10.17, temp1=2.786e6)
def alphaHepp(self, temp):
"""Recombination rate for doubly ionized helium, in cm^3/s.
Temp in K."""
#See line 4 of V&F96 table 1.
return self._RecombRateFit_lowcharge_ion(temp, aa=5.235E-11, bb=0.6988, cc=0.0829, temp0=7.301, temp1=4.475e6, temp2 = 1.682e5)
class PhotoRates(object):
"""The photoionization rates for a given species.
Eq. 29 of KWH 96. This is loaded from a TREECOOL table."""
def __init__(self, treecool_file="data/TREECOOL_ep_2018p"):
#Format of the treecool table:
# log_10(1+z), Gamma_HI, Gamma_HeI, Gamma_HeII, Qdot_HI, Qdot_HeI, Qdot_HeII,
# where 'Gamma' is the photoionization rate and 'Qdot' is the photoheating rate.
# The Gamma's are in units of s^-1, and the Qdot's are in units of erg s^-1.
try:
data = np.loadtxt(treecool_file)
except OSError:
treefile = os.path.join(os.path.dirname(os.path.realpath(__file__)), treecool_file)
data = np.loadtxt(treefile)
redshifts = data[:,0]
photo_rates = data[:,1:4]
photo_heat = data[:,4:7]
assert np.shape(redshifts)[0] == np.shape(photo_rates)[0]
self.Gamma_HI = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,0])
self.Gamma_HeI = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,1])
self.Gamma_HeII = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,2])
self.Eps_HI = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,0])
self.Eps_HeI = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,1])
self.Eps_HeII = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,2])
def gHe0(self,redshift):
"""Get photo rate for neutral Helium"""
log1z = np.log10(1+redshift)
return self.Gamma_HeI(log1z)
def gHep(self,redshift):
"""Get photo rate for singly ionized Helium"""
log1z = np.log10(1+redshift)
return self.Gamma_HeII(log1z)
def gH0(self,redshift):
"""Get photo rate for neutral Hydrogen"""
log1z = np.log10(1+redshift)
return self.Gamma_HI(log1z)
def epsHe0(self,redshift):
"""Get photo heating rate for neutral Helium"""
log1z = np.log10(1+redshift)
return self.Eps_HeI(log1z)
def epsHep(self,redshift):
"""Get photo heating rate for singly ionized Helium"""
log1z = np.log10(1+redshift)
return self.Eps_HeII(log1z)
def epsH0(self,redshift):
"""Get photo heating rate for neutral Hydrogen"""
log1z = np.log10(1+redshift)
return self.Eps_HI(log1z)
class CoolingRatesKWH92(object):
"""The cooling rates from KWH92, in erg s^-1 cm^-3 (cgs).
All rates are divided by the abundance of the ions involved in the interaction.
So we are computing the cooling rate divided by n_e n_X. Temperatures in K.
None | |
<reponame>LSSTDESC/WeakLensingDeblending
"""Manage the parameters that define a simulated survey's camera design and observing conditions.
"""
from __future__ import print_function, division
import math
import numpy as np
import numpy.linalg
import galsim
from six import iteritems
class Survey(object):
"""Survey camera and observing parameters.
No default values are assigned to constructor args since these are handled at run time
based on a requested (survey_name,filter_band) combination using :func:`get_defaults`.
Args:
survey_name(str): Use default camera and observing parameters appropriate for this survey.
filter_band(str): LSST imaging band to simulate. Must be one of 'u','g','r','i','z','y'.
image_width(int): Simulated camera image width in pixels.
image_height(int): Simulated camera image height in pixels.
pixel_scale(float): Simulated camera pixel scale in arcseconds per pixel.
exposure_time(float): Simulated camera total exposure time seconds.
zero_point(float): Simulated camera zero point in electrons per second at 24th magnitude.
mirror_diameter(float): Size of the primary mirror's clear aperture in meters to use for
the optical PSF, or zero if no optical PSF should be simulated.
effective_area(float): Effective total light collecting area in square meters. Used to
determine the obscuration fraction in the simulated optical PSF. Ignored if
mirror_diameter is zero.
zenith_psf_fwhm(float): FWHM of the atmospheric PSF at zenith in arcseconds.
atmospheric_psf_beta(float): Moffat beta parameter of the atmospheric PSF, or use a Kolmogorov
PSF if beta <= 0.
atmospheric_psf_e1(float): Atmospheric ellipticity component e1 (+) with \|e\| = (a-b)/(a+b).
atmospheric_psf_e2(float): Atmospheric ellipticity component e2 (x) with \|e\| = (a-b)/(a+b).
sky_brightness(float): Sky brightness in mags/sq.arcsec during the observation.
airmass(float): Optical path length through the atmosphere relative to the zenith path length.
extinction(float): Exponential exctinction coefficient for atmospheric absorption.
cosmic_shear_g1(float): Cosmic shear ellipticity component g1 (+) with \|g\| = (a-b)/(a+b).
cosmic_shear_g2(float): Cosmic shear ellipticity component g2 (x) with \|g\| = (a-b)/(a+b).
Raises:
RuntimeError: Missing or extra arguments provided or unable to calculate PSF size.
"""
def __init__(self, no_analysis=False, **args):
if set(args.keys()) != set(Survey._parameter_names):
raise RuntimeError('Missing or extra arguments provided to Survey constructor.')
self.args = args
self.__dict__.update(args)
# Build our atmospheric PSF model.
atmospheric_psf_fwhm = self.zenith_psf_fwhm*self.airmass**0.6
if self.atmospheric_psf_beta > 0:
atmospheric_psf_model = galsim.Moffat(
beta = self.atmospheric_psf_beta, fwhm = atmospheric_psf_fwhm)
else:
atmospheric_psf_model = galsim.Kolmogorov(fwhm = atmospheric_psf_fwhm)
# Shear the atmospheric PSF, if necessary. Note that GalSim uses g1,g2 for the
# |g| = (a-b)/(a+b) ellipticity spinor and e1,e2 for |e| = (a^2-b^2)/(a^2+b^2).
if self.atmospheric_psf_e1 != 0 or self.atmospheric_psf_e2 != 0:
atmospheric_psf_model = atmospheric_psf_model.shear(
g1 = self.atmospheric_psf_e1, g2 = self.atmospheric_psf_e2)
# Combine with our optical PSF model, if any.
if self.mirror_diameter > 0:
lambda_over_diameter = 3600*math.degrees(
1e-10*Survey._central_wavelength[self.filter_band]/self.mirror_diameter)
area_ratio = self.effective_area/(math.pi*(0.5*self.mirror_diameter)**2)
if area_ratio <= 0 or area_ratio > 1:
raise RuntimeError('Incompatible effective-area and mirror-diameter values.')
self.obscuration_fraction = math.sqrt(1 - area_ratio)
optical_psf_model = galsim.Airy(lam_over_diam = lambda_over_diameter,
obscuration = self.obscuration_fraction)
self.psf_model = galsim.Convolve(atmospheric_psf_model,optical_psf_model)
else:
self.psf_model = atmospheric_psf_model
self.obscuration_fraction = 0.
# Draw a centered PSF image covering 10x the atmospheric PSF FWHM.
psf_size_pixels = 2*int(math.ceil(10*atmospheric_psf_fwhm/self.pixel_scale))
self.psf_image = galsim.Image(psf_size_pixels,psf_size_pixels,scale = self.pixel_scale)
self.psf_model.drawImage(image = self.psf_image)
if not no_analysis:
# Draw a (temporary) high-resolution (10x) image covering the same area.
zoom = 10
hires_psf_image = galsim.Image(zoom*psf_size_pixels,zoom*psf_size_pixels,scale = self.pixel_scale/zoom)
self.psf_model.drawImage(image = hires_psf_image)
# Calculate the unweighted second moments in arcsecs**2 of the hi-res PSF image.
hires_sum = np.sum(hires_psf_image.array)
hires_grid = (self.pixel_scale/zoom)*(np.arange(zoom*psf_size_pixels) - 0.5*zoom*psf_size_pixels + 0.5)
hires_x,hires_y = np.meshgrid(hires_grid,hires_grid)
psf_x = np.sum(hires_psf_image.array*hires_x)/hires_sum
psf_y = np.sum(hires_psf_image.array*hires_x)/hires_sum
hires_x -= psf_x
hires_y -= psf_y
psf_xx = np.sum(hires_psf_image.array*hires_x**2)/hires_sum
psf_xy = np.sum(hires_psf_image.array*hires_x*hires_y)/hires_sum
psf_yy = np.sum(hires_psf_image.array*hires_y**2)/hires_sum
self.psf_second_moments = np.array(((psf_xx,psf_xy),(psf_xy,psf_yy)))
# Calculate the corresponding PSF sizes |Q|**0.25 and (0.5*trQ)**0.5
self.psf_sigma_m = np.power(np.linalg.det(self.psf_second_moments),0.25)
self.psf_sigma_p = np.sqrt(0.5*np.trace(self.psf_second_moments))
# Also calculate the PSF size as |Q|**0.25 using adaptive weighted second moments
# of the non-hires PSF image.
try:
hsm_results = galsim.hsm.FindAdaptiveMom(self.psf_image)
self.psf_size_hsm = hsm_results.moments_sigma*self.pixel_scale
except RuntimeError as e:
raise RuntimeError('Unable to calculate adaptive moments of PSF image.')
# Calculate the mean sky background level in detected electrons per pixel.
self.mean_sky_level = self.get_flux(self.sky_brightness)*self.pixel_scale**2
# Create an empty image using (0,0) to index the lower-left corner pixel.
self.image_bounds = galsim.BoundsI(0,self.image_width-1,0,self.image_height-1)
self.image = galsim.Image(bounds = self.image_bounds,scale=self.pixel_scale,
dtype = np.float32)
def description(self):
"""Describe the survey we simulate.
Returns:
str: Description of the camera design and observing conditions we simulate.
"""
return 'Simulating %s %s-band survey with %r (obs.frac. = %.3f)' % (
self.survey_name,self.filter_band,self.args,self.obscuration_fraction)
def get_flux(self,ab_magnitude):
"""Convert source magnitude to flux.
The calculation includes the effects of atmospheric extinction.
Args:
ab_magnitude(float): AB magnitude of source.
Returns:
float: Flux in detected electrons.
"""
zeropoint_airmass=1.0
if self.survey_name=='DES': zeropoint_airmass=1.3
if self.survey_name=='LSST' or self.survey_name=='HSC':
zeropoint_airmass=1.2
if self.survey_name=='Euclid':
zeropoint_airmass=1.0
ab_magnitude += self.extinction*(self.airmass -zeropoint_airmass)
return self.exposure_time*self.zero_point*10**(-0.4*(ab_magnitude-24))
def get_image_coordinates(self,dx_arcsecs,dy_arcsecs):
"""Convert a physical offset from the image center into image coordinates.
Args:
dx_arcsecs(float): Offset from the image center in arcseconds.
dy_arcsecs(float): Offset from the image center in arcseconds.
Returns:
tuple: Corresponding floating-point image coordinates (x_pixels,y_pixels)
whose :func:`math.floor` value gives pixel indices and whose...
"""
x_pixels = 0.5*self.image_width + dx_arcsecs/self.pixel_scale
y_pixels = 0.5*self.image_height + dy_arcsecs/self.pixel_scale
return x_pixels,y_pixels
# Survey constructor parameter names. The order established here is used by print_defaults().
_parameter_names = (
'survey_name','filter_band',
'image_width','image_height','pixel_scale','exposure_time','zero_point',
'mirror_diameter','effective_area',
'zenith_psf_fwhm','atmospheric_psf_beta','atmospheric_psf_e1',
'atmospheric_psf_e2','sky_brightness','airmass','extinction',
'cosmic_shear_g1','cosmic_shear_g2',
)
# Central wavelengths in Angstroms for each LSST filter band, calculated from the
# baseline total filter throughputs tabulated at
# http://dev.lsstcorp.org/cgit/LSST/sims/throughputs.git/snapshot/throughputs-1.2.tar.gz
_central_wavelength = {
'u':3592.13, 'g':4789.98, 'r':6199.52, 'i':7528.51, 'z':8689.83, 'y':9674.05, 'VIS': 7135.0,
}
# Default constructor arg values for different (survey,filter_band) combinations.
_defaults = {
'*': {
'atmospheric_psf_beta': 0.0,
'atmospheric_psf_e1': 0.0,
'atmospheric_psf_e2': 0.0,
'cosmic_shear_g1': 0.0,
'cosmic_shear_g2': 0.0,
'airmass': 1.0,
},
'LSST': {
# http://www.lsst.org/lsst/science/optical_design
# Updated: https://www.lsst.org/scientists/keynumbers
'*': {
'mirror_diameter': 8.36,
'effective_area': 32.4,
'image_width': 4096,
'image_height': 4096,
'pixel_scale': 0.2,
'airmass':1.2,
},
# See http://arxiv.org/pdf/0805.2366v4.pdf, Table 2 for:
# exposure_time, sky_brightness, zenith_psf_fwhm, extinction.
# Zero points are calculated from
# https://github.com/DarkEnergyScienceCollaboration/WeakLensingDeblending/issues/1
'y': {
'exposure_time': 4800.,
'sky_brightness': 18.6,
'zenith_psf_fwhm': 0.63,
'zero_point': 10.58,
'extinction': 0.138,
},
'z': {
'exposure_time': 4800.,
'sky_brightness': 19.6,
'zenith_psf_fwhm': 0.65,
'zero_point': 22.68,
'extinction': 0.043,
},
'i': {
'exposure_time': 5520.,
'sky_brightness': 20.5,
'zenith_psf_fwhm': 0.67,
'zero_point': 32.36,
'extinction': 0.07,
},
'r': {
'exposure_time': 5520.,
'sky_brightness': 21.2,
'zenith_psf_fwhm': 0.70,
'zero_point': 43.70,
'extinction': 0.10,
},
'g': {
'exposure_time': 2400.,
'sky_brightness': 22.3,
'zenith_psf_fwhm': 0.73,
'zero_point': 50.70,
'extinction': 0.163,
},
'u': {
'exposure_time': 1680.,
'sky_brightness': 22.9,
'zenith_psf_fwhm': 0.77,
'zero_point': 9.16,
'extinction': 0.451,
},
},
'DES': {
# http://www.ctio.noao.edu/noao/content/Basic-Optical-Parameters
# http://www.ctio.noao.edu/noao/content/DECam-What
# http://www.darkenergysurvey.org/survey/des-description.pdf
# skybrightness from http://www.ctio.noao.edu/noao/node/1218
# extinction from https://arxiv.org/pdf/1701.00502.pdf table 6
# fwhm values from https://arxiv.org/pdf/1407.3801.pdf
'*': {
'mirror_diameter': 3.934,
'effective_area': 10.014,
'image_width': 3115,
'image_height': 3115,
'pixel_scale': 0.263,
},
'i': {
'exposure_time': 1000.,
'sky_brightness': 20.5,
'zenith_psf_fwhm': 0.96,
'zero_point': 13.94,
'extinction': 0.05,
},
'r' : {
'exposure_time': 800.,
'sky_brightness': 21.4,
'zenith_psf_fwhm': 1.03,
'zero_point': 15.65,
'extinction': 0.09,
},
'g' : {
'exposure_time': 800.,
'sky_brightness': 22.3,
'zenith_psf_fwhm': 1.24,
'zero_point': 12.29,
'extinction': 0.17,
},
'z' : {
'exposure_time': 800.,
'sky_brightness': 18.7, #Value from SDSS
'zenith_psf_fwhm': 1.12,
'zero_point': 10.81,
'extinction': 0.06,
},
},
'CFHT': {
# http://www.cfht.hawaii.edu/Instruments/Imaging/Megacam/generalinformation.html
# http://www.cfht.hawaii.edu/Instruments/ObservatoryManual/om-focplndat.gif
# Calculating zeropoints with:
#http://www1.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/community/CFHTLS-SG/docs/extra/filters.html
'*': {
'mirror_diameter': 3.592,
'effective_area': 8.022,
'image_width': 4428,
'image_height': 4428,
'pixel_scale': 0.185,
},
'i': {
'exposure_time': 4300.,
'sky_brightness': 20.3,
'zenith_psf_fwhm': 0.64,
'zero_point': 8.46,
'extinction': 0.07,
},
'r' : {
'exposure_time': 2000.,
'sky_brightness': 20.8,
'zenith_psf_fwhm': 0.71,
'zero_point': 10.72,
'extinction': 0.10,
},
},
'HSC': {
# http://www.subarutelescope.org/Introduction/telescope.html
#http://www.naoj.org/Projects/HSC/forobservers.html
# https://arxiv.org/pdf/1702.08449.pdf
#sky from: http://www.naoj.org/Observing/Instruments/SCam/exptime.html
#extinction from http://tmt.mtk.nao.ac.jp/ETC_readme.html
#Filter throughputs from speclite
'*': {
'mirror_diameter': 8.2,
'effective_area': 52.81, #I couldn't find the effective aperture
'image_width': 4096,
'image_height': 2048,
'pixel_scale': 0.17,
},
'g': {
'exposure_time': 600.,
'sky_brightness': 21.4,
'zenith_psf_fwhm': 0.72,
'zero_point': 91.11,
'extinction': 0.13,
},
'r' : {
'exposure_time': 600.,
'sky_brightness': 20.6,
'zenith_psf_fwhm': 0.67,
'zero_point': 87.74,
'extinction': 0.11,
},
'i': {
'exposure_time': 1200.,
'sky_brightness': 19.7,
'zenith_psf_fwhm': 0.56,
'zero_point': 69.80,
'extinction': 0.07,
},
'z' : {
'exposure_time': 1200.,
'sky_brightness': 18.3,
'zenith_psf_fwhm': 0.63,
'zero_point': 29.56,
'extinction': 0.05,
},
'y': {
'exposure_time': 1200.,
'sky_brightness': 17.9,
'zenith_psf_fwhm': 0.64,
'zero_point': 21.53,
'extinction': 0.05,
},
},
'Euclid': {
# Info from: http://www.mssl.ucl.ac.uk/~smn2/instrument.html
'*': {
'mirror_diameter': 1.3,
'effective_area': 1.15, # area in square meters after 13% obscuration as in: https://arxiv.org/pdf/1608.08603.pdf
'image_width': 4096, # https://www.euclid-ec.org/?page_id=2485
'image_height': 4132,
'pixel_scale': 0.101, #Cropper et al 2018: Proc. of SPIE Vol. 10698 1069828-19
},
'VIS': {
'exposure_time': 2260, #4 exposures combined as in Cropper et al. 2018
'sky_brightness': 22.9207, # http://www.mssl.ucl.ac.uk/~smn2/instrument.html, same result using Alderling model
'zenith_psf_fwhm': 0.17, #arcseconds | |
<reponame>msabramo/tox<gh_stars>1-10
import argparse
import distutils.sysconfig
import os
import random
import sys
import re
import shlex
import string
import subprocess
import textwrap
import pkg_resources
from tox.interpreters import Interpreters
import py
import tox
iswin32 = sys.platform == "win32"
defaultenvs = {'jython': 'jython', 'pypy': 'pypy'}
for _name in "py,py24,py25,py26,py27,py30,py31,py32,py33,py34".split(","):
if _name == "py":
basepython = sys.executable
else:
basepython = "python" + ".".join(_name[2:4])
defaultenvs[_name] = basepython
def parseconfig(args=None, pkg=None):
if args is None:
args = sys.argv[1:]
parser = prepare_parse(pkg)
opts = parser.parse_args(args)
config = Config()
config.option = opts
basename = config.option.configfile
if os.path.isabs(basename):
inipath = py.path.local(basename)
else:
for path in py.path.local().parts(reverse=True):
inipath = path.join(basename)
if inipath.check():
break
else:
feedback("toxini file %r not found" %(basename), sysexit=True)
try:
parseini(config, inipath)
except tox.exception.InterpreterNotFound:
exn = sys.exc_info()[1]
# Use stdout to match test expectations
py.builtin.print_("ERROR: " + str(exn))
return config
def feedback(msg, sysexit=False):
py.builtin.print_("ERROR: " + msg, file=sys.stderr)
if sysexit:
raise SystemExit(1)
class VersionAction(argparse.Action):
def __call__(self, argparser, *args, **kwargs):
name = argparser.pkgname
mod = __import__(name)
version = mod.__version__
py.builtin.print_("%s imported from %s" %(version, mod.__file__))
raise SystemExit(0)
class CountAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if hasattr(namespace, self.dest):
setattr(namespace, self.dest, int(getattr(namespace, self.dest))+1)
else:
setattr(namespace, self.dest, 0)
def prepare_parse(pkgname):
parser = argparse.ArgumentParser(description=__doc__,)
#formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.pkgname = pkgname
parser.add_argument("--version", nargs=0, action=VersionAction,
dest="version",
help="report version information to stdout.")
parser.add_argument("-v", nargs=0, action=CountAction, default=0,
dest="verbosity",
help="increase verbosity of reporting output.")
parser.add_argument("--showconfig", action="store_true",
help="show configuration information for all environments. ")
parser.add_argument("-l", "--listenvs", action="store_true",
dest="listenvs", help="show list of test environments")
parser.add_argument("-c", action="store", default="tox.ini",
dest="configfile",
help="use the specified config file name.")
parser.add_argument("-e", action="append", dest="env",
metavar="envlist",
help="work against specified environments (ALL selects all).")
parser.add_argument("--notest", action="store_true", dest="notest",
help="skip invoking test commands.")
parser.add_argument("--sdistonly", action="store_true", dest="sdistonly",
help="only perform the sdist packaging activity.")
parser.add_argument("--installpkg", action="store", default=None,
metavar="PATH",
help="use specified package for installation into venv, instead of "
"creating an sdist.")
parser.add_argument("--develop", action="store_true", dest="develop",
help="install package in the venv using 'setup.py develop' via "
"'pip -e .'")
parser.add_argument("--set-home", action="store_true", dest="sethome",
help="(experimental) force creating a new $HOME for each test "
"environment and create .pydistutils.cfg|pip.conf files "
"if index servers are specified with tox. ")
parser.add_argument('-i', action="append",
dest="indexurl", metavar="URL",
help="set indexserver url (if URL is of form name=url set the "
"url for the 'name' indexserver, specifically)")
parser.add_argument("-r", "--recreate", action="store_true",
dest="recreate",
help="force recreation of virtual environments")
parser.add_argument("--result-json", action="store",
dest="resultjson", metavar="PATH",
help="write a json file with detailed information about "
"all commands and results involved. This will turn off "
"pass-through output from running test commands which is "
"instead captured into the json result file.")
# We choose 1 to 4294967295 because it is the range of PYTHONHASHSEED.
parser.add_argument("--hashseed", action="store",
metavar="SEED", default=None,
help="set PYTHONHASHSEED to SEED before running commands. "
"Defaults to a random integer in the range 1 to 4294967295. "
"Passing 'noset' suppresses this behavior.")
parser.add_argument("--force-dep", action="append",
metavar="REQ", default=None,
help="Forces a certain version of one of the dependencies "
"when configuring the virtual environment. REQ Examples "
"'pytest<2.7' or 'django>=1.6'.")
parser.add_argument("--sitepackages", action="store_true",
help="override sitepackages setting to True in all envs")
parser.add_argument("args", nargs="*",
help="additional arguments available to command positional substitution")
return parser
class Config(object):
def __init__(self):
self.envconfigs = {}
self.invocationcwd = py.path.local()
self.interpreters = Interpreters()
@property
def homedir(self):
homedir = get_homedir()
if homedir is None:
homedir = self.toxinidir # XXX good idea?
return homedir
class VenvConfig:
def __init__(self, **kw):
self.__dict__.update(kw)
@property
def envbindir(self):
if (sys.platform == "win32" and "jython" not in self.basepython
and "pypy" not in self.basepython):
return self.envdir.join("Scripts")
else:
return self.envdir.join("bin")
@property
def envpython(self):
if "jython" in str(self.basepython):
name = "jython"
else:
name = "python"
return self.envbindir.join(name)
# no @property to avoid early calling (see callable(subst[key]) checks)
def envsitepackagesdir(self):
self.getsupportedinterpreter() # for throwing exceptions
x = self.config.interpreters.get_sitepackagesdir(
info=self._basepython_info,
envdir=self.envdir)
return x
def getsupportedinterpreter(self):
if sys.platform == "win32" and self.basepython and \
"jython" in self.basepython:
raise tox.exception.UnsupportedInterpreter(
"Jython/Windows does not support installing scripts")
info = self.config.interpreters.get_info(self.basepython)
if not info.executable:
raise tox.exception.InterpreterNotFound(self.basepython)
if info.version_info < (2,6):
raise tox.exception.UnsupportedInterpreter(
"python2.5 is not supported anymore, sorry")
return info.executable
testenvprefix = "testenv:"
def get_homedir():
try:
return py.path.local._gethomedir()
except Exception:
return None
def make_hashseed():
return str(random.randint(1, 4294967295))
class parseini:
def __init__(self, config, inipath):
config.toxinipath = inipath
config.toxinidir = toxinidir = config.toxinipath.dirpath()
self._cfg = py.iniconfig.IniConfig(config.toxinipath)
config._cfg = self._cfg
self.config = config
ctxname = getcontextname()
if ctxname == "jenkins":
reader = IniReader(self._cfg, fallbacksections=['tox'])
toxsection = "tox:%s" % ctxname
distshare_default = "{toxworkdir}/distshare"
elif not ctxname:
reader = IniReader(self._cfg)
toxsection = "tox"
distshare_default = "{homedir}/.tox/distshare"
else:
raise ValueError("invalid context")
if config.option.hashseed is None:
hashseed = make_hashseed()
elif config.option.hashseed == 'noset':
hashseed = None
else:
hashseed = config.option.hashseed
config.hashseed = hashseed
reader.addsubstitutions(toxinidir=config.toxinidir,
homedir=config.homedir)
config.toxworkdir = reader.getpath(toxsection, "toxworkdir",
"{toxinidir}/.tox")
config.minversion = reader.getdefault(toxsection, "minversion", None)
# determine indexserver dictionary
config.indexserver = {'default': IndexServerConfig('default')}
prefix = "indexserver"
for line in reader.getlist(toxsection, "indexserver"):
name, url = map(lambda x: x.strip(), line.split("=", 1))
config.indexserver[name] = IndexServerConfig(name, url)
override = False
if config.option.indexurl:
for urldef in config.option.indexurl:
m = re.match(r"\W*(\w+)=(\S+)", urldef)
if m is None:
url = urldef
name = "default"
else:
name, url = m.groups()
if not url:
url = None
if name != "ALL":
config.indexserver[name].url = url
else:
override = url
# let ALL override all existing entries
if override:
for name in config.indexserver:
config.indexserver[name] = IndexServerConfig(name, override)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath(toxsection, "distdir",
"{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath(toxsection, "distshare",
distshare_default)
reader.addsubstitutions(distshare=config.distshare)
config.sdistsrc = reader.getpath(toxsection, "sdistsrc", None)
config.setupdir = reader.getpath(toxsection, "setupdir", "{toxinidir}")
config.logdir = config.toxworkdir.join("log")
for sectionwrapper in self._cfg:
section = sectionwrapper.name
if section.startswith(testenvprefix):
name = section[len(testenvprefix):]
envconfig = self._makeenvconfig(name, section, reader._subs,
config)
config.envconfigs[name] = envconfig
if not config.envconfigs:
config.envconfigs['python'] = \
self._makeenvconfig("python", "_xz_9", reader._subs, config)
config.envlist = self._getenvlist(reader, toxsection)
for name in config.envlist:
if name not in config.envconfigs:
if name in defaultenvs:
config.envconfigs[name] = \
self._makeenvconfig(name, "_xz_9", reader._subs, config)
all_develop = all(name in config.envconfigs
and config.envconfigs[name].develop
for name in config.envlist)
config.skipsdist = reader.getbool(toxsection, "skipsdist", all_develop)
def _makeenvconfig(self, name, section, subs, config):
vc = VenvConfig(envname=name)
vc.config = config
reader = IniReader(self._cfg, fallbacksections=["testenv"])
reader.addsubstitutions(**subs)
vc.develop = not config.option.installpkg and \
reader.getbool(section, "usedevelop", config.option.develop)
vc.envdir = reader.getpath(section, "envdir", "{toxworkdir}/%s" % name)
vc.args_are_paths = reader.getbool(section, "args_are_paths", True)
if reader.getdefault(section, "python", None):
raise tox.exception.ConfigError(
"'python=' key was renamed to 'basepython='")
if name in defaultenvs:
bp = defaultenvs[name]
else:
bp = sys.executable
vc.basepython = reader.getdefault(section, "basepython", bp)
vc._basepython_info = config.interpreters.get_info(vc.basepython)
reader.addsubstitutions(envdir=vc.envdir, envname=vc.envname,
envbindir=vc.envbindir, envpython=vc.envpython,
envsitepackagesdir=vc.envsitepackagesdir)
vc.envtmpdir = reader.getpath(section, "tmpdir", "{envdir}/tmp")
vc.envlogdir = reader.getpath(section, "envlogdir", "{envdir}/log")
reader.addsubstitutions(envlogdir=vc.envlogdir, envtmpdir=vc.envtmpdir)
vc.changedir = reader.getpath(section, "changedir", "{toxinidir}")
if config.option.recreate:
vc.recreate = True
else:
vc.recreate = reader.getbool(section, "recreate", False)
args = config.option.args
if args:
if vc.args_are_paths:
args = []
for arg in config.option.args:
origpath = config.invocationcwd.join(arg, abs=True)
if origpath.check():
arg = vc.changedir.bestrelpath(origpath)
args.append(arg)
reader.addsubstitutions(args)
setenv = {}
if config.hashseed is not None:
setenv['PYTHONHASHSEED'] = config.hashseed
setenv.update(reader.getdict(section, 'setenv'))
vc.setenv = setenv
if not vc.setenv:
vc.setenv = None
vc.commands = reader.getargvlist(section, "commands")
vc.whitelist_externals = reader.getlist(section,
"whitelist_externals")
vc.deps = []
for depline in reader.getlist(section, "deps"):
m = re.match(r":(\w+):\s*(\S+)", depline)
if m:
iname, name = m.groups()
ixserver = config.indexserver[iname]
else:
name = depline.strip()
ixserver = None
name = self._replace_forced_dep(name, config)
vc.deps.append(DepConfig(name, ixserver))
vc.distribute = reader.getbool(section, "distribute", False)
vc.sitepackages = self.config.option.sitepackages or \
reader.getbool(section, "sitepackages", False)
vc.downloadcache = None
downloadcache = reader.getdefault(section, "downloadcache")
if downloadcache:
# env var, if present, takes precedence
downloadcache = os.environ.get("PIP_DOWNLOAD_CACHE", downloadcache)
vc.downloadcache = py.path.local(downloadcache)
pip_default_opts = ["--pre", "{opts}", "{packages}"]
vc.install_command = reader.getargv(
section,
"install_command",
"pip install " + " ".join(pip_default_opts),
)
if '{packages}' not in vc.install_command:
raise tox.exception.ConfigError(
"'install_command' must contain '{packages}' substitution")
return vc
def _getenvlist(self, reader, toxsection):
env = self.config.option.env
if not env:
env = os.environ.get("TOXENV", None)
if not env:
envlist = reader.getlist(toxsection, "envlist", sep=",")
if not envlist:
envlist = self.config.envconfigs.keys()
return envlist
envlist = _split_env(env)
if "ALL" in envlist:
envlist = list(self.config.envconfigs)
envlist.sort()
return envlist
def _replace_forced_dep(self, name, config):
"""
Override the given dependency config name taking --force-dep-version
option into account.
:param name: dep config, for example ["pkg==1.0", "other==2.0"].
:param config: Config instance
:return: the new dependency that should be used for virtual environments
"""
if not config.option.force_dep:
return name
for forced_dep in config.option.force_dep:
if self._is_same_dep(forced_dep, name):
return forced_dep
return name
@classmethod
def _is_same_dep(cls, dep1, dep2):
"""
Returns True if both dependency definitions refer to the
same package, even if versions differ.
"""
dep1_name = pkg_resources.Requirement.parse(dep1).project_name
| |
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LxmertConfig
base_model_prefix = "lxmert"
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
return getattr(self, self.base_model_prefix).dummy_inputs
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"visual_feats": tf.TensorSpec((None, None, None), tf.float32, name="visual_feats"),
"visual_pos": tf.TensorSpec((None, None, None), tf.float32, name="visual_pos"),
"visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"),
"token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
LXMERT_START_DOCSTRING = r"""
The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from
Transformers](https://arxiv.org/abs/1908.07490) by <NAME> and <NAME>. It's a vision and language transformer
model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual
genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss
for question answering attribute prediction, and object tag prediction.
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
</Tip>
Parameters:
config ([`LxmertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
LXMERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`LxmertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
visual_feats: (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
This input represents visual features. They ROI pooled object features from bounding boxes using a
faster-RCNN model)
These are currently not provided by the transformers library.
visual_pos: (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
This input represents spacial features corresponding to their relative (via index) visual features. The
pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
1.
These are currently not provided by the transformers library.
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
LXMERT_START_DOCSTRING,
)
class TFLxmertModel(TFLxmertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.lxmert = TFLxmertMainLayer(config, name="lxmert")
@add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFLxmertModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
visual_feats=None,
visual_pos=None,
attention_mask=None,
visual_attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
visual_feats=visual_feats,
visual_pos=visual_pos,
attention_mask=attention_mask,
visual_attention_mask=visual_attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.lxmert(
input_ids=inputs["input_ids"],
visual_feats=inputs["visual_feats"],
visual_pos=inputs["visual_pos"],
attention_mask=inputs["attention_mask"],
visual_attention_mask=inputs["visual_attention_mask"],
token_type_ids=inputs["token_type_ids"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
l_hs = tf.convert_to_tensor(output.language_hidden_states) if self.config.output_hidden_states else None
v_hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None
l_attns = tf.convert_to_tensor(output.language_attentions) if self.config.output_attentions else None
v_attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None
c_enc_attns = tf.convert_to_tensor(output.cross_encoder_attentions) if self.config.output_attentions else None
return TFLxmertModelOutput(
pooled_output=output.pooled_output,
language_output=output.language_output,
vision_output=output.vision_output,
language_hidden_states=l_hs,
vision_hidden_states=v_hs,
language_attentions=l_attns,
vision_attentions=v_attns,
cross_encoder_attentions=c_enc_attns,
)
class TFLxmertPooler(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="dense",
)
def call(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
return pooled_output
# Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert
class TFLxmertPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config: LxmertConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(inputs=hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert
class TFLxmertLMPredictionHead(tf.keras.layers.Layer):
def __init__(self, config: LxmertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.hidden_size = config.hidden_size
self.transform = TFLxmertPredictionHeadTransform(config, name="transform")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape: tf.TensorShape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self) -> tf.keras.layers.Layer:
return self.input_embeddings
def set_output_embeddings(self, value: tf.Variable):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self) -> Dict[str, tf.Variable]:
return {"bias": self.bias}
def set_bias(self, value: tf.Variable):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.transform(hidden_states=hidden_states)
seq_length = shape_list(hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
# Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert
class TFLxmertMLMHead(tf.keras.layers.Layer):
def __init__(self, config: LxmertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
super().__init__(**kwargs)
self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
prediction_scores = self.predictions(hidden_states=sequence_output)
return prediction_scores
class TFLxmertPreTrainingHeads(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
self.seq_relationship = tf.keras.layers.Dense(
2,
kernel_initializer=get_initializer(config.initializer_range),
name="seq_relationship",
)
def call(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class TFLxmertVisualAnswerHead(tf.keras.layers.Layer):
def __init__(self, config, num_labels, **kwargs):
super().__init__(**kwargs)
hid_dim = config.hidden_size
self.dense = tf.keras.layers.Dense(
hid_dim * 2,
kernel_initializer=get_initializer(config.initializer_range),
name="logit_fc_._0",
)
self.activation = get_tf_activation("gelu")
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2")
self.dense_1 = tf.keras.layers.Dense(
num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="logit_fc_._3",
)
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dense_1(hidden_states)
return hidden_states
class TFLxmertVisualObjHead(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.transform = TFLxmertPredictionHeadTransform(config, name="transform")
| |
<reponame>JohnGBaker/tess-short-binaries
#Applies an algorithm to significantly decimate dense microlens data (eg from wfirst data challenge) without losing information.
import numpy as np
import argparse
import matplotlib
import matplotlib.pyplot as plt
#The idea here is that we successively pass over the data, decimating progressive factors of two
#In each pass, we perform LLSF over surrounding N points, then compare likelihood of that fit
#both using the full data and using a factor of two decimation for the target points.
#If the difference is smaller than some tolerance (weighted by the original data rms err)
#then we keep the decimated version
# data are in a np array of the form [t,x,sigma^2,tmin,tmax]
# line info is in the form [t0, x0, slope]
def chi2(data,line):
res=data[:,1]-line[1]-(data[:,0]-line[0])*line[2]
return np.sum(res*res/data[:,2])
def DoLLSF(data,tref=None):
sig2=1/np.sum(data[:,2]**(-1))
t0=sig2*np.sum(data[:,0]/data[:,2])
x0=sig2*np.sum(data[:,1]/data[:,2])
t2sum=np.sum((data[:,0]-t0)**2/data[:,2])
xtsum=np.sum((data[:,0]-t0)*(data[:,1]-x0)/data[:,2])
slope=xtsum/t2sum;
#print("\n1/sig2=",1/sig2,"t2sum=",t2sum,"xtsum=",xtsum)
#print("t0,x0,slope",t0,x0,slope)
#print(" data=",data)
if(tref is None):tref=t0
return np.array([tref,x0+(tref-t0)*slope,slope])
def subData(data,line,dchitol):
#We will replace the data with a single point, requiring that
# 1. llsf fit for this data + other data is unchanged
# -require slope and x0 variations of delta chi2 vanish
# 2. the derivative of chi^2 wrt llsf intercept at mean time is preserved
#
# deltachi = sum[ (xi -x0 -(ti-t0)*s)^2 / sig2i ] - (xnew -x0 -(tnew-t0)*s)^2 / sig2new
#
# d^2deltachi/dx0^2 = 0 -> 1/sig2new = sum(1/sig2i)
# and
# d deltachi/dx0 = 0 -> xnew -x0 -s*(tnew-t0) = sig2new * sum((xi-x0-s*(ti-t0))/sig2i)
# = xd-x0
# where xd=sig2new*sum(xi/sig2i), and we write the line setting t0=t0d=sig2new*sum(ti/sig2i)
# and
# d deltachi/ds = 0 = -sum((ti-t0)*(xi-x0-s*(ti-t0))/sig2i) + (tnew-t0)*(xnew-x0-s*(tnew-t0))/sig2new
# = -sum((ti-t0)*ri/sig2i) + (tnew-t0)*(xd-x0)/sig2new
# where ri = xi-x0-s*(ti-t0)
#
# For the last equation, if xd!=x0, we can set tnew to solve, but we constrain tnew to be within the
# time limits of the data.
# We also constrain the size of the resulting deltachi to be below some limit, after solving as above
global nsub, nsubtfail,nsubchi2fail
nsub+=1
sig2new=1/np.sum(data[:,2]**(-1))
t0d=sig2new*np.sum(data[:,0]/data[:,2])
xd=sig2new*np.sum(data[:,1]/data[:,2])
slope=line[2]
x0=(t0d-line[0])*slope+line[1]
#print("line0=",t0d,x0,slope)
trel=data[:,0]-t0d;
res=(data[:,1]-x0-trel*slope)
#compute new t point to ensure that slope matches line
trsum=np.sum(trel*res/data[:,2])
#xsum=np.sum((data[:,1]-x0)/data[:,2])
xsum=(xd-x0)/sig2new
if(xsum==0):
if(trsum==0):toff=0
else: return data
else: toff=trsum/xsum
dataTmax=data[-1,4]
dataTmin=data[0,3]
if(dataTmax-t0d <= toff ):
#print("fail tmax")
nsubtfail+=1
return data
if(dataTmin-t0d >= toff ):
#print("fail tmin")
nsubtfail+=1
return data
tnew=t0d+toff
#compute new xval
xnew=xd+slope*(tnew-t0d)
#print("xd,tnew,xnew",xd,tnew,xnew)
dchi=(np.sum(res*res/data[:,2])-(xd-x0)**2/sig2new)
if(dchi>dchitol):
#print("fail dchi=",dchi,">",dchitol)
nsubchi2fail+=1
return data
return np.array([[tnew,xnew,sig2new,dataTmin,dataTmax]])
def reduceDataChunk(segment,target,tol):
line=DoLLSF(segment)
n=len(segment)
if( n - len(target) < 2):
#in this case there is no real solution to the formal problem I pose
#if there is just 1 remaining point, then a solution could be found, but it
#will be set at the location of the remaining point and will not satisfy the
#time-range condition
return target
redchi2=chi2(segment,line)/(n-2)
global nchi2,nchi2fail
nchi2+=1
if(redchi2>1+tol):
#print("fail redchi2-1=",redchi2-1)
nchi2fail+=1
return target
return subData(target,line,tol*n)
def reduceDataPass(data,chunksize,tol,segwid=3):
ndata=len(data)
nchunk=int(ndata/chunksize)+1 #nchunk gives enough to cover the dataset+1
segsize=int(segwid*chunksize)
noff=int((nchunk*chunksize-ndata)/2) #half the amount of overhang beyond the data
#noff=int((nchunk*chunksize-ndata)*np.random.rand())
nfirst=chunksize #This gives the location of the first (leftmost) chunk boundary, i.e. the index of the first datapoint in the second chunk.
if(noff>0):nfirst-=noff
for i in range(nchunk):
#print("\n****\ni=",i)
#set the range of the target chunk constraining within bounds
itargleft=nfirst+(i-1)*chunksize
if(itargleft<0):itargleft=0
itargright=nfirst+i*chunksize
if(itargright>ndata):itargright=ndata
target=data[itargleft:itargright]
#time grouping test:
dtmax=0;dtmin=target[-1,0]-target[0,0]
for k in range(len(target)-1):
dt=target[k+1,0]-target[k,0]
if(dt>dtmax):dtmax=dt
#for the time grouping test dtmin we expand to the nearest neighbor points (if any)
for k in range(max(0,itargleft-1),min(ndata-1,itargright+1)):
dt=data[k+1,0]-data[k,0]
if(dt<dtmin):dtmin=dt
if(len(target)<2 or dtmax/dtmin > 30):
#target too short or times not grouped
replacement=target.copy()
else: #passed test so continue
#print(" target=",target)
#set the range of the surrounding segment
isegleft=int((itargleft+itargright-segsize)/2)
if(isegleft<0):isegleft=0
isegright=isegleft+segsize
if(isegright>ndata):isegright=ndata
#print(" ",isegleft,"--",itargleft,"++",itargright,"--",isegright)
segment=data[isegleft:isegright]
#print(" segment=",segment)
replacement=reduceDataChunk(segment,target,tol).copy()
#diagnostics:
#newseg=np.concatenate((data[isegleft:itargleft],replacement,data[itargright:isegright]),axis=0)
#llsf=DoLLSF(segment,tref=0)[1:3]
#nllsf=DoLLSF(newseg,tref=0)[1:3]
#print(" replacement=",replacement)
if(i==0):newdata=replacement
else: newdata=np.append(newdata,replacement,axis=0)
#print(" newdata=",newdata)
#print(" LLSF: ",llsf,"->",nllsf," delta=",llsf-nllsf)
return newdata
def zeroCounters():
global nchi2,nchi2fail,nsub,nsubtfail,nsubchi2fail
nchi2=0
nchi2fail=0
nsub=0
nsubtfail=0
nsubchi2fail=0
def decimate(origdata, lev, maxpass=1000, ntemper=20, csscale=1000, npretemper=0,verbose=False):
#first configure the data. Internally, we work with 5-column data:
# t, flux, err**2, tmin, tmax
#We also support 3 column data: t,flux,err
data=origdata.copy()
threecols=data.shape[1]==3
if threecols:
data=np.array([[d[0],d[1],d[2]**2,d[0],d[0]] for d in data])
#first we tune some parameters based on 'lev' option
#Note that I find similar levels of concentration [and net num of samples] on the peak region for segw=csmin*nwid~75 with csmin varying from 4->10
#These tests are done with
#segw=75,tol=0.25 segw=150,tol=0.25 segw=150,tol=0.5 segw=75,tol=0.5
#2: n=523 nev=321 F=.61 764 / 1182 = .64 533 / 799 = .67 338 / 476 = .71
#3: n=736 nev=472 F=.64 704 / 1158 = .61 523 / 823 = .64 330 / 487 = .68
#4: n=783 nev=421 F=.54 747 / 1196 = .62 536 / 909 = .59 368 / 659 = .56
#5: n=900 nev=494 F=.55 784 / 1389 = .56 617 /1174 = .53 386 / 744 = .52
#6: n=796 nev=425 F=.53 728 / 1306 = .62 670 /1140 = .59 437 / 782 = .56
#7: n=877 nev=485 F=.55 812 / 1409 = .58
#8: n=917 nev=512 F=.56 797 / 1324 = .60 684 /1253 = .55 384 / 769 = .50
#9: n=908 nev=504 F=.55
#10:n=908 nev=493 F=.54 787 / 1283 = .61 695 /1167 = .60
#11:n=1022 nev=476 F=.46
#12:n=926 nev=398 F=.43 753 / 1317 = .57 666 /1137 = .59
#14:n=1109 nev=513 F=.46 819 / 1433 = .57 664 /1188 = .56
segw=150;tol=0.2;csmin=10
#here we set up some scalings for these params blending between the following guides
#lev=0:segw=1000,tol=0.05,csmin=25 #few % red of lens reg. but next 10x reduced overall
#lev=5:segw=150,tol=0.2,csmin=10 #reduction by factor of ~30 overall
#lev=10:segw=60,tol=0.5,csmin=2 #reduction by factor ~100 overall
#lev=15:segw=25,tol=1.0,csmin=2 #reduction by factor >200 overall
if(lev<=5):
x=lev/5.0
segw=int(np.exp(np.log(1000)*(1-x)+np.log(150)*x))
tol=np.exp(np.log(0.05)*(1-x)+np.log(0.2)*x)
csmin=int(25*(1-x)+10*x)
#csmin=10
elif(lev<=10):
x=(lev-5)/5.0
segw=int(np.exp(np.log(150)*(1-x)+np.log(60)*x))
tol=np.exp(np.log(0.2)*(1-x)+np.log(0.5)*x)
csmin=int(10*(1-x)+2.0*x)
else:
x=(lev-10)/5.0
segw=int(np.exp(np.log(60)*(1-x)+np.log(25)*x))
tol=np.exp(np.log(0.5)*(1-x)+np.log(1.0)*x)
csmin=2
if(verbose):print("segw,csmin,tol:",segw,csmin,tol)
nwid=int(segw/csmin)
##Now for the actual decimation algorithm
lastcs=0
doneAtSize=False
for i in range(maxpass):
zeroCounters()
#with pretempering we begin with a pass of small chunk smoothing to make it less likely to cut small features.
if(i<npretemper):
chunksize=int(csmin*np.exp(np.log((1+csscale/csmin))*(i/(1.0+npretemper))))
ieff=0
else:
ieff=i-npretemper
chunksize=int(csmin+csscale/(ieff/ntemper*(1+ieff/ntemper)+1))
if(chunksize==lastcs and doneAtSize):
#already tried this case
continue
#print(i, "ieff=",ieff)
#print("Trying chunksize=",chunksize)
newdata = reduceDataPass(data,chunksize,tol,nwid)
#print("data size ",len(data),"->",len(newdata))
#print("fail rate: chi2:",nchi2fail/(nchi2+2e-18),"sub t:",nsubtfail/(nsub+2e-18),"sub chi2:",nsubchi2fail/(nsub+2e-18))
#datallsf=DoLLSF(origdata,tref=0)
#newdatallsf=DoLLSF(newdata,tref=0)
#print("llsf:",datallsf[1:3],"->",newdatallsf[1:3]," delta=",(newdatallsf-datallsf)[1:3])
#termination condition
if(len(newdata)==len(data) and lastcs==chunksize and i>npretemper):
if(chunksize<=csmin):
break
else: doneAtSize=True
else:doneAtSize=False
lastcs=chunksize
data=newdata
if threecols:
data=np.array([[d[0],d[1],np.sqrt(d[2])] for d in data])
return data
def main():
parser = argparse.ArgumentParser(description='Attempt to decimate data losing minimal information.')
parser.add_argument('fname', metavar='chain_file', type=str, help='Input file path')
parser.add_argument('-lev', default="5",help='Level of aggressiveness in data reduction')
parser.add_argument('-anscol', type=int, default="-1",help='Level of aggressiveness in data reduction')
parser.add_argument('-plot', action="store_true", help='Plot results instead of saving to file.')
parser.add_argument('-evalonly', action="store_true", help='Perform evaluation from precomputed results.')
parser.add_argument('-esterr', action="store_true", help='Roughly estimate error bar level from the first few points.')
parser.add_argument('-q', action="store_true", help='Run in quiet mode with minimal screen output.')
args = parser.parse_args()
lev=int(args.lev)
tag="lev"+str(lev)
data=np.loadtxt(args.fname) #Assume reading in t,x,sigma
#data=np.array([[t,np.random.normal(),1] for t in range(300)])#fake data
#data=np.array([[t,0.1*(t%2)+t,1] for t in range(10)])#fake data
#data=np.array([[d[0],d[1],d[2]**2,d[0],d[0]] for d in data])
#err=np.std([d[2] for d in data[:1600]])
#print("err=",err)
#err=np.std([d[2] for d in data[:400]])
#print("err=",err)
#err=np.std([d[2] for d in data[:100]])
#print("err=",err)
#print("err=",err)
tcol=0
dcol=1
if(args.anscol>=0):
ans=np.array([[d[0],d[args.anscol]] for d in data])
if(args.anscol<=tcol):tcol+=1
if(args.anscol<=dcol):dcol+=1
#print("ans:",ans.shape)
if(args.esterr):
err=np.std([d[2] for d in data[:25]])
if(not args.q):print("Using err=",err)
data=np.array([[d[tcol],d[dcol],err**2,d[tcol],d[tcol]] for d in data])
else:
data=np.array([[d[tcol],d[dcol],d[dcol+1]**2,d[tcol],d[tcol]] for d in data])
origdata=data.copy()
data=decimate(data,lev,verbose=not args.q)
if(not args.q):print("nsamples:",len(origdata),"->",len(newdata))
if(plot):
plt.errorbar(origdata[:,0],origdata[:,1],yerr=np.sqrt(origdata[:,2]),fmt="+")
plt.errorbar(newdata[:,0],newdata[:,1],yerr=np.sqrt(newdata[:,2]),fmt=".")
icut=int(len(newdata)*9/10)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)*4/5)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)*3/5)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)*2/5)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)/5)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)/10)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
if(args.anscol>=0 and False):
plt.plot(ans[:,0],ans[:,1],"k-",linewidth=2)
newdata=np.array([[d[0],d[1],np.sqrt(d[2])] for d in newdata])
if(".txt" in args.fname):
outfile=args.fname.replace(".txt","_"+tag+".dat")
elif(".dat" in args.fname):
outfile=args.fname.replace(".dat","_"+tag+".dat")
elif(".out" in args.fname):
outfile=args.fname.replace(".out","_"+tag+".dat")
else:
outfile=args.fname+".dat"
if(not args.plot and args.anscol>=0):
#Given a noise free 'answer' we estimate errors from the decimation in two ways.
#Method 1: Estimate the reduced chi2 deviation of the true data from the decimated data
# In this case the true values are linearly interpolated to the decimated sample points
# and the variance comes from the decimated data estimate.
diff=newdata[:,1]-np.interp(newdata[:,0],ans[:,0],ans[:,1])
var=newdata[:,2]**2
#print(diff,var)
ncount=len(diff)
rchi2a=np.sum(diff*diff/var)/ncount
#Method 2: Estimate the reduced chi2 deviation of the decimated data from the true data
# In this case the decimated values are linearly interpolated to the | |
# ElectrumSV - lightweight Bitcoin client
# Copyright (C) 2012 <EMAIL>
# Copyright (C) 2019-2020 The ElectrumSV Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''ElectrumSV Preferences dialog.'''
from functools import partial
from typing import Optional, TYPE_CHECKING
import weakref
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QCheckBox, QComboBox, QDialog, QGroupBox, QHBoxLayout, QLabel, QSpinBox, QTabWidget,
QVBoxLayout, QWidget
)
from electrumsv import qrscanner
from electrumsv.app_state import app_state
from electrumsv.constants import (MAXIMUM_TXDATA_CACHE_SIZE_MB, MINIMUM_TXDATA_CACHE_SIZE_MB,
WalletSettings)
from electrumsv.extensions import label_sync
from electrumsv.extensions import extensions
from electrumsv.i18n import _, languages
import electrumsv.web as web
from electrumsv.wallet import AbstractAccount, Wallet
from .amountedit import BTCSatsByteEdit
from .util import Buttons, CloseButton, FormSectionWidget, HelpButton, HelpLabel, MessageBox
if TYPE_CHECKING:
from electrumsv.gui.qt.main_window import ElectrumWindow
class PreferencesDialog(QDialog):
def __init__(self, main_window: 'ElectrumWindow', wallet: Wallet,
account: Optional[AbstractAccount]=None):
'''The preferences dialog has a account tab only if account is given.'''
super().__init__(main_window, Qt.WindowSystemMenuHint | Qt.WindowTitleHint |
Qt.WindowCloseButtonHint)
self.setWindowTitle(_('Preferences'))
self._main_window = weakref.proxy(main_window)
self.lay_out(wallet, account)
self.initial_language = app_state.config.get('language', None)
def accept(self) -> None:
if app_state.fx:
app_state.fx.trigger_history_refresh()
# Qt on Mac has a bug with "modalSession has been exited prematurely" That means
# you cannot create a modal dialog when exiting a model dialog, such as in the
# finished signal. So we do this in the accept() function instead.
if self.initial_language != app_state.config.get('language', None):
MessageBox.show_warning(
_('Restart ElectrumSV to activate your updated language setting'),
title=_('Success'), parent=self)
super().accept()
def lay_out(self, wallet: Wallet, account: Optional[AbstractAccount]) -> None:
tabs_info = [
(self.general_widgets, _('General')),
(self.tx_widgets, _('Transactions')),
(self.fiat_widgets, _('Fiat')),
(partial(self.extensions_widgets, account), _('Extensions')),
]
tabs_info.append((partial(self._wallet_widgets, wallet), _('Wallet')))
tabs_info.append((self.network_widgets, _('Network')))
tabs_info.append((self.ui_widgets, _('UI')))
tabs = QTabWidget()
tabs.setUsesScrollButtons(False)
for widget_func, name in tabs_info:
tab = QWidget()
widget_func(tab)
tabs.addTab(tab, name)
vbox = QVBoxLayout()
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(self)))
self.setLayout(vbox)
def tx_widgets(self, tab: QWidget) -> None:
def on_customfee(_text):
amt = customfee_e.get_amount()
m = int(amt * 1000.0) if amt is not None else None
app_state.config.set_key('customfee', m)
app_state.app.custom_fee_changed.emit()
customfee_e = BTCSatsByteEdit()
customfee_e.setAmount(app_state.config.custom_fee_rate() / 1000.0
if app_state.config.has_custom_fee_rate() else None)
customfee_e.textChanged.connect(on_customfee)
# customfee_label = HelpLabel(_('Custom Fee Rate'),
# _('Custom Fee Rate in Satoshis per byte'))
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(app_state.config.get('confirmed_only', False))
def on_unconf(state):
app_state.config.set_key('confirmed_only', state != Qt.Unchecked)
unconf_cb.stateChanged.connect(on_unconf)
options_box = QGroupBox()
options_vbox = QVBoxLayout()
options_box.setLayout(options_vbox)
options_vbox.addWidget(unconf_cb)
form = FormSectionWidget(minimum_label_width=120)
form.add_row(_('Custom Fee Rate'), customfee_e)
form.add_row(_("Options"), options_box, True)
vbox = QVBoxLayout()
vbox.addWidget(form)
vbox.addStretch(1)
tab.setLayout(vbox)
def general_widgets(self, tab: QWidget) -> None:
# language
lang_modifiable = app_state.config.is_modifiable('language')
lang_pairs = sorted((code, language) for language, code in languages.items())
language_names, language_keys = zip(*lang_pairs)
# lang_label = HelpLabel(_('Language') + ':',
# _('Select which language is used in the GUI (after restart).'))
# lang_label.setEnabled(lang_modifiable)
lang_combo = QComboBox()
lang_combo.setEnabled(lang_modifiable)
lang_combo.addItems(language_names)
try:
index = language_keys.index(app_state.config.get("language", ''))
except ValueError:
index = 0
lang_combo.setCurrentIndex(index)
def on_lang(index):
lang_request = language_keys[index]
if lang_request != app_state.config.get('language'):
app_state.config.set_key("language", lang_request, True)
lang_combo.currentIndexChanged.connect(on_lang)
nz_modifiable = app_state.config.is_modifiable('num_zeros')
# nz_label = HelpLabel(_('Zeros after decimal point') + ':',
# _('Number of zeros displayed after the decimal point. '
# 'For example, if set to 2, "1." will be displayed as "1.00"'))
# nz_label.setEnabled(nz_modifiable)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(app_state.decimal_point)
nz.setValue(app_state.num_zeros)
nz.setEnabled(nz_modifiable)
def on_nz():
value = nz.value()
if app_state.num_zeros != value:
app_state.num_zeros = value
app_state.config.set_key('num_zeros', value, True)
app_state.app.num_zeros_changed.emit()
nz.valueChanged.connect(on_nz)
# unit_label = HelpLabel(_('Base unit') + ':', '\n'.join((
# _('Base unit of display in the application.'),
# '1 BSV = 1,000 mBSV = 1,000,000 bits.',
# )))
unit_combo = QComboBox()
unit_combo.addItems(app_state.base_units)
unit_combo.setCurrentIndex(app_state.base_units.index(app_state.base_unit()))
def on_unit(index):
app_state.set_base_unit(app_state.base_units[index])
nz.setMaximum(app_state.decimal_point)
unit_combo.currentIndexChanged.connect(on_unit)
msg = _('Choose which online block explorer to use for functions that open a web browser')
# block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_explorers = web.BE_sorted_list()
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(
web.BE_from_config(app_state.config)))
def on_be(index):
app_state.config.set_key('block_explorer', block_explorers[index], True)
block_ex_combo.currentIndexChanged.connect(on_be)
# qr_label = HelpLabel(_('Video Device') + ':',
# _("Install the zbar package to enable this."))
qr_combo = QComboBox()
qr_combo.addItem("Default", "default")
system_cameras = qrscanner.find_system_cameras()
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
qr_combo.setCurrentIndex(qr_combo.findData(app_state.config.get("video_device")))
qr_combo.setEnabled(qrscanner.libzbar is not None)
def on_video_device(index):
app_state.config.set_key("video_device", qr_combo.itemData(index), True)
qr_combo.currentIndexChanged.connect(on_video_device)
updatecheck_box = QGroupBox()
updatecheck_vbox = QVBoxLayout()
updatecheck_box.setLayout(updatecheck_vbox)
# The main checkbox, which turns update checking on or off completely.
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(app_state.config.get('check_updates', True))
def on_set_updatecheck(v):
app_state.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
updatecheck_vbox.addWidget(updatecheck_cb)
# The secondary checkbox, which determines if unstable releases result in notifications.
updatecheck_unstable_cb = QCheckBox(_("Ignore unstable releases"))
updatecheck_unstable_cb.setChecked(
app_state.config.get('check_updates_ignore_unstable', True))
def on_set_updatecheck_unstable(v):
app_state.config.set_key('check_updates_ignore_unstable', v == Qt.Checked, save=True)
updatecheck_unstable_cb.stateChanged.connect(on_set_updatecheck_unstable)
updatecheck_vbox.addWidget(updatecheck_unstable_cb)
form = FormSectionWidget(minimum_label_width=130)
form.add_row(_('Language'), lang_combo)
form.add_row(_('Zeros after decimal point'), nz)
form.add_row(_('Base unit'), unit_combo)
form.add_row(_('Online block explorer'), block_ex_combo)
form.add_row(_('Video device'), qr_combo)
form.add_row(_('Software updates'), updatecheck_box)
vbox = QVBoxLayout()
vbox.addWidget(form)
vbox.addStretch(1)
tab.setLayout(vbox)
def fiat_widgets(self, tab: QWidget) -> None:
# Fiat Currency
hist_checkbox = QCheckBox(_('Show historical rates'))
fiat_balance_checkbox = QCheckBox(_('Show Fiat balance for addresses'))
ccy_combo = QComboBox()
ex_combo = QComboBox()
# FIXME: note main window tabs are not correctly hooked up to FX rate changes
# to refresh when an update comes in from twiddling here
def update_currencies():
fx = app_state.fx
if fx:
currencies = sorted(fx.get_currencies())
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(fx.get_currency()))
def update_history_cb():
fx = app_state.fx
if fx:
hist_checkbox.setChecked(fx.get_history_config())
hist_checkbox.setEnabled(fx.is_enabled())
def update_fiat_balance_cb():
fx = app_state.fx
if fx:
fiat_balance_checkbox.setChecked(fx.get_fiat_address_config())
def update_exchanges():
fx = app_state.fx
if fx:
b = fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = fx.get_history_config()
c = fx.get_currency()
exchanges = fx.get_exchanges_by_ccy(c, h)
else:
exchanges = fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(fx.config_exchange()))
def on_currency(index):
fx = app_state.fx
if fx:
enabled = index != 0
fx.set_enabled(enabled)
if enabled:
fx.set_currency(ccy_combo.currentText())
update_history_cb()
update_exchanges()
app_state.app.fiat_ccy_changed.emit()
def on_exchange(_index):
exchange = str(ex_combo.currentText())
fx = app_state.fx
if fx and fx.is_enabled() and exchange and exchange != fx.exchange.name():
fx.set_exchange(exchange)
def on_history(state):
fx = app_state.fx
if fx:
fx.set_history_config(state == Qt.Checked)
update_exchanges()
app_state.app.fiat_history_changed.emit()
def on_fiat_balance(state):
fx = app_state.fx
if fx:
fx.set_fiat_address_config(state == Qt.Checked)
app_state.app.fiat_balance_changed.emit()
update_currencies()
update_history_cb()
update_fiat_balance_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_balance_checkbox.stateChanged.connect(on_fiat_balance)
ex_combo.currentIndexChanged.connect(on_exchange)
options_box = QGroupBox()
options_vbox = QVBoxLayout()
options_box.setLayout(options_vbox)
options_vbox.addWidget(hist_checkbox)
options_vbox.addWidget(fiat_balance_checkbox)
extension_form = FormSectionWidget()
extension_form.add_row(_('Currency'), ccy_combo)
extension_form.add_row(_('Source'), ex_combo)
extension_form.add_row(_('Options'), options_box, True)
vbox = QVBoxLayout()
vbox.addWidget(extension_form)
vbox.addStretch(1)
tab.setLayout(vbox)
def extensions_widgets(self, account: Optional[AbstractAccount], tab: QWidget) -> None:
def cb_clicked(extension, settings_widget, checked):
extension.set_enabled(checked)
if settings_widget:
settings_widget.setEnabled(checked)
vbox = QVBoxLayout()
extension_form = FormSectionWidget()
for extension in extensions:
cb = QCheckBox(_("Enabled"))
cb.setChecked(extension.is_enabled())
help_widget = HelpButton(extension.description)
field_layout = QHBoxLayout()
field_layout.addWidget(cb)
field_layout.addStretch(1)
if extension is label_sync and account:
settings_widget = app_state.app.label_sync.settings_widget(self, account)
settings_widget.setEnabled(extension.is_enabled())
field_layout.addWidget(settings_widget)
cb.clicked.connect(partial(cb_clicked, extension, settings_widget))
else:
cb.clicked.connect(partial(cb_clicked, extension, None))
field_layout.addWidget(help_widget)
extension_form.add_row(extension.name, field_layout, True)
vbox.addWidget(extension_form)
vbox.addStretch(1)
tab.setLayout(vbox)
def _wallet_widgets(self, wallet: Wallet, tab: QWidget) -> None:
use_change_addresses_cb = QCheckBox(_('Use change addresses'))
use_change_addresses_cb.setChecked(wallet.get_boolean_setting(WalletSettings.USE_CHANGE))
use_change_addresses_cb.setEnabled(
app_state.config.is_modifiable(WalletSettings.USE_CHANGE))
use_change_addresses_cb.setToolTip(
_('Using a different change key each time improves your privacy by '
'making it more difficult for others to analyze your transactions.')
)
def on_usechange(state: int):
should_enable = state == Qt.Checked
if wallet.get_boolean_setting(WalletSettings.USE_CHANGE) != should_enable:
wallet.set_boolean_setting(WalletSettings.USE_CHANGE, should_enable)
multiple_change_cb.setEnabled(should_enable)
use_change_addresses_cb.stateChanged.connect(on_usechange)
multiple_change_cb = QCheckBox(_('Use multiple change addresses'))
multiple_change_cb.setChecked(wallet.get_boolean_setting(WalletSettings.MULTIPLE_CHANGE))
multiple_change_cb.setEnabled(wallet.get_boolean_setting(WalletSettings.USE_CHANGE))
multiple_change_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change keys in order to break '
'up large coin amounts and obfuscate the recipient key.'),
_('This may result in higher transactions fees.')
]))
def on_multiple_change_toggled(state: int) -> None:
multiple = state == Qt.Checked
if wallet.get_boolean_setting(WalletSettings.MULTIPLE_CHANGE) != multiple:
wallet.set_boolean_setting(WalletSettings.MULTIPLE_CHANGE, multiple)
multiple_change_cb.stateChanged.connect(on_multiple_change_toggled)
coinsplitting_option_cb = QCheckBox(_('Show coin-splitting option on the Send tab'))
coinsplitting_option_cb.setChecked(wallet.get_boolean_setting(WalletSettings.ADD_SV_OUTPUT))
coinsplitting_option_cb.setEnabled(
app_state.config.is_modifiable(WalletSettings.ADD_SV_OUTPUT))
coinsplitting_option_cb.setToolTip(
_('Whether to feature the the option to add Bitcoin SV only data to the transaction '
'on the Send tab. Will only be shown for compatible account types.')
)
def on_coinsplitting_option_cb(state: int):
should_enable = state == | |
<reponame>yuvaraja2303/pycast
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2012-2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pycast.methods.basemethod import BaseForecastingMethod
from pycast.common.timeseries import TimeSeries
class ExponentialSmoothing(BaseForecastingMethod):
"""Implements an exponential smoothing algorithm.
Explanation:
http://www.youtube.com/watch?v=J4iODLa9hYw
"""
def __init__(self, smoothingFactor=0.1, valuesToForecast=1):
"""Initializes the ExponentialSmoothing.
:param float smoothingFactor: Defines the alpha for the ExponentialSmoothing.
Valid values are in (0.0, 1.0).
:param integer valuesToForecast: Number of values that should be forecasted.
:raise: Raises a :py:exc:`ValueError` when smoothingFactor has an invalid value.
"""
super(ExponentialSmoothing, self).__init__(["smoothingFactor"], valuesToForecast, True, True)
self.set_parameter("smoothingFactor", smoothingFactor)
def _get_parameter_intervals(self):
"""Returns the intervals for the methods parameter.
Only parameters with defined intervals can be used for optimization!
:return: Returns a dictionary containing the parameter intervals, using the parameter
name as key, while the value hast the following format:
[minValue, maxValue, minIntervalClosed, maxIntervalClosed]
- minValue
Minimal value for the parameter
- maxValue
Maximal value for the parameter
- minIntervalClosed
:py:const:`True`, if minValue represents a valid value for the parameter.
:py:const:`False` otherwise.
- maxIntervalClosed:
:py:const:`True`, if maxValue represents a valid value for the parameter.
:py:const:`False` otherwise.
:rtype: dictionary
"""
parameterIntervals = {}
parameterIntervals["smoothingFactor"] = [0.0, 1.0, False, False]
return parameterIntervals
def execute(self, timeSeries):
"""Creates a new TimeSeries containing the smoothed and forcasted values.
:return: TimeSeries object containing the smoothed TimeSeries,
including the forecasted values.
:rtype: TimeSeries
:note: The first normalized value is chosen as the starting point.
"""
# determine the number of values to forecast, if necessary
self._calculate_values_to_forecast(timeSeries)
# extract the required parameters, performance improvement
alpha = self._parameters["smoothingFactor"]
valuesToForecast = self._parameters["valuesToForecast"]
# initialize some variables
resultList = []
estimator = None
lastT = None
# "It's always about performance!"
append = resultList.append
# smooth the existing TimeSeries data
for idx in xrange(len(timeSeries)):
# get the current to increase performance
t = timeSeries[idx]
# get the initial estimate
if estimator is None:
estimator = t[1]
continue
# add the first value to the resultList without any correction
if 0 == len(resultList):
append([t[0], estimator])
lastT = t
continue
# calculate the error made during the last estimation
error = lastT[1] - estimator
# calculate the new estimator, based on the last occured value, the error and the smoothingFactor
estimator = estimator + alpha * error
# save the current value for the next iteration
lastT = t
# add an entry to the result
append([t[0], estimator])
# forecast additional values if requested
if valuesToForecast > 0:
currentTime = resultList[-1][0]
normalizedTimeDiff = currentTime - resultList[-2][0]
for idx in xrange(valuesToForecast):
currentTime += normalizedTimeDiff
# reuse everything
error = lastT[1] - estimator
estimator = estimator + alpha * error
# add a forecasted value
append([currentTime, estimator])
# set variables for next iteration
lastT = resultList[-1]
# return a TimeSeries, containing the result
return TimeSeries.from_twodim_list(resultList)
class HoltMethod(BaseForecastingMethod):
"""Implements the Holt algorithm.
Explanation:
http://en.wikipedia.org/wiki/Exponential_smoothing#Double_exponential_smoothing
"""
def __init__(self, smoothingFactor=0.1, trendSmoothingFactor=0.5, valuesToForecast=1):
"""Initializes the HoltMethod.
:param float smoothingFactor: Defines the alpha for the ExponentialSmoothing.
Valid values are in (0.0, 1.0).
:param float trendSmoothingFactor: Defines the beta for the HoltMethod.
Valid values are in (0.0, 1.0).
:param integer valuesToForecast: Defines the number of forecasted values that will
be part of the result.
:raise: Raises a :py:exc:`ValueError` when smoothingFactor or trendSmoothingFactor has an invalid value.
"""
super(HoltMethod, self).__init__(["smoothingFactor",
"trendSmoothingFactor"],
valuesToForecast, True, True)
self.set_parameter("smoothingFactor", smoothingFactor)
self.set_parameter("trendSmoothingFactor", trendSmoothingFactor)
def _get_parameter_intervals(self):
"""Returns the intervals for the methods parameter.
Only parameters with defined intervals can be used for optimization!
:return: Returns a dictionary containing the parameter intervals, using the parameter
name as key, while the value hast the following format:
[minValue, maxValue, minIntervalClosed, maxIntervalClosed]
- minValue
Minimal value for the parameter
- maxValue
Maximal value for the parameter
- minIntervalClosed
:py:const:`True`, if minValue represents a valid value for the parameter.
:py:const:`False` otherwise.
- maxIntervalClosed:
:py:const:`True`, if maxValue represents a valid value for the parameter.
:py:const:`False` otherwise.
:rtype: dictionary
"""
parameterIntervals = {}
parameterIntervals["smoothingFactor"] = [0.0, 1.0, False, False]
parameterIntervals["trendSmoothingFactor"] = [0.0, 1.0, False, False]
return parameterIntervals
def execute(self, timeSeries):
"""Creates a new TimeSeries containing the smoothed values.
:return: TimeSeries object containing the smoothed TimeSeries,
including the forecasted values.
:rtype: TimeSeries
:note: The first normalized value is chosen as the starting point.
"""
# determine the number of values to forecast, if necessary
self._calculate_values_to_forecast(timeSeries)
# extract the required parameters, performance improvement
alpha = self._parameters["smoothingFactor"]
beta = self._parameters["trendSmoothingFactor"]
# initialize some variables
resultList = []
estimator = None
trend = None
lastT = None
# "It's always about performance!"
append = resultList.append
# smooth the existing TimeSeries data
for idx in xrange(len(timeSeries)):
# get the current to increase performance
t = timeSeries[idx]
# get the initial estimate
if estimator is None:
estimator = t[1]
lastT = t
continue
# add the first value to the resultList without any correction
if 0 == len(resultList):
append([t[0], estimator])
trend = t[1] - lastT[1]
# store current values for next iteration
lastT = t
lastEstimator = estimator
continue
# calculate the new estimator and trend, based on the last occured value, the error and the smoothingFactor
estimator = alpha * t[1] + (1 - alpha) * (estimator + trend)
trend = beta * (estimator - lastEstimator) + (1 - beta) * trend
# add an entry to the result
append([t[0], estimator])
# store current values for next iteration
lastT = t
lastEstimator = estimator
# forecast additional values if requested
if self._parameters["valuesToForecast"] > 0:
currentTime = resultList[-1][0]
normalizedTimeDiff = currentTime - resultList[-2][0]
for idx in xrange(1, self._parameters["valuesToForecast"] + 1):
currentTime += normalizedTimeDiff
# reuse everything
forecast = estimator + idx * trend
# add a forecasted value
append([currentTime, forecast])
# return a TimeSeries, containing the result
return TimeSeries.from_twodim_list(resultList)
# TODO:A second method, referred to as either Brown's linear exponential smoothing (LES) or Brown's double exponential smoothing works as follows.[9]
class HoltWintersMethod(BaseForecastingMethod):
"""Implements the Holt-Winters algorithm.
Explanation:
http://en.wikipedia.org/wiki/Exponential_smoothing#Triple_exponential_smoothing
"""
def __init__(self, smoothingFactor=0.1, trendSmoothingFactor=0.5, seasonSmoothingFactor=0.5, seasonLength=0, valuesToForecast=1):
"""Initializes the HoltWintersMethod.
:param float smoothingFactor: Defines the alpha for the Holt-Winters algorithm.
Valid values are (0.0, 1.0).
:param float trendSmoothingFactor: Defines the beta for the Holt-Winters algorithm..
Valid values are (0.0, 1.0).
:param float seasonSmoothingFactor: Defines the gamma for the Holt-Winters algorithm.
Valid values are (0.0, 1.0).
:param integer seasonLength: The expected length for the seasons. Please use a good estimate here!
:param integer valuesToForecast: Defines the number of forecasted values that will be part of the result.
"""
super(HoltWintersMethod, self).__init__(["smoothingFactor",
"trendSmoothingFactor",
"seasonSmoothingFactor",
"seasonLength"],
valuesToForecast,
True, True)
if not 0 < seasonLength:
raise ValueError("Please specify season length that is greater than 0.");
self.set_parameter("smoothingFactor", smoothingFactor)
self.set_parameter("trendSmoothingFactor", trendSmoothingFactor)
self.set_parameter("seasonSmoothingFactor", seasonSmoothingFactor)
self.set_parameter("seasonLength", seasonLength)
def _get_parameter_intervals(self):
"""Returns the intervals for the methods parameter.
Only parameters with defined intervals can be used for optimization!
:return: Returns a dictionary containing the parameter intervals, using the parameter
name as key, while the value hast the following format:
[minValue, maxValue, minIntervalClosed, maxIntervalClosed]
- minValue
Minimal value for the parameter
- maxValue
Maximal value for the parameter
- minIntervalClosed
:py:const:`True`, if minValue represents a valid value for the | |
<filename>scripts/m18_submission_checker/aida_m18_submission_checker.py
"""
aida_m18_submission_checker.py
June 21st, 2019
to run:
python3 aida_m18_submission_checker.py <path/to/archive>
or
python aida_m18_submission_checker.py <path/to/archive>
"""
import sys
import os
import re
import logging
import tarfile
import zipfile
'''
Globals
'''
# list of names of items in the archive - to be populated after determining what filetype the archive is
ARCHIVE_NAMES = []
warnings_dict = {'1a' : ".ttl files must be located in the <run_ID>/NIST/ and optionally in the <run_id>/INTER-TA directory",
'1b' : ".ttl files must be located the appropriate hypothesis_id subdirectory of the <run_ID>/NIST/ directory. i.e. <run_id>/NIST/<hypothesis_id>/<document_id>.ttl",
'2' : ".ttl files must be located in the <run_ID>/NIST/ and optionally in the <run_id>/INTER-TA directory",
'3' : ".ttl files must be located in the <run_ID>/ directory" }
def check_runtime_args(args):
"""
Checks the runtime arguments to ensure it is a file that exists. This function will log the errors detected, if any.
:param args: runtime arguments (sys.argv)
:return: True if valid runtime arguments, False otherwise
"""
# check that an argument was given
if len(args) < 2:
logging.error("Missing archive file as argument.\nTo run: python3 aida_m18_submission_checker.py <path/to/archive>")
return False
# checks that argument is an existing file
if not os.path.isfile(args[1]):
logging.error("Argument is not a file.\nTo run: python3 aida_m18_submission_checker.py <path/to/archive>")
return False
return True
def get_valid_archive_filetype(archive_file_name):
"""
Checks the archive file type. If archive is not of valid file type, log an error message.
Valid filetypes: .tgz, .tar.gz, or .zip.
:return: string with archive filetype 'tar' or 'zip', or None if invalid filetype
(i.e. the library that should be used to read the archive)
:rtype: string, or None
"""
# note - dont need to check for multiple occurances of file extentions, this will cause the get_task_type to fail
# with unkown task type.
if archive_file_name.endswith('.tar.gz') or archive_file_name.endswith('.tgz'):
return 'tar'
elif archive_file_name.endswith('.zip'):
return 'zip'
else:
logging.error("Archive filetype is unknown. Please use .zip, .tgz, or .tar.gz")
return None
def get_archive_member_names(archive_file_name, archive_file_type):
"""
Gets all of the file names/members of the archive.
:param archive_file_name: the file name/path of the archive
:param archive_file_type: the file extention type: 'tar' or 'zip'
:return: a list of the names/members in the archive on success, None if failure to open/read archive
:rtype: bool
"""
logging.info("Checking archive... this may take a moment for large files.")
try:
if archive_file_type == 'zip':
archive = zipfile.ZipFile(archive_file_name, 'r', allowZip64=True)
return archive.namelist()
elif archive_file_type == 'tar':
archive = tarfile.open(archive_file_name, mode='r')
archive_members = archive.getmembers()
archive_name_list = []
for item in archive_members:
if item.type == tarfile.DIRTYPE:
#append a '/' to the end of the directory name to match zip output formatting
archive_name_list.append(item.name + '/')
else:
archive_name_list.append(item.name)
return archive_name_list
except:
# expected errors: (zipfile.BadZipFile, tarfile.ReadError), but catch all of them anyways
logging.error("Error thrown attempting to read/open the archive. Please use 'zip' or 'tar' to create your archive.")
return None
def get_task_type(archive_file_name, archive_member_names):
"""
:param archive_member_names: a list of members/names in the archive
:return: string corresponding to the task type, which could be any of the following: '1a', '1b', '2', or '3'
:rtype: string
"""
# count the number of .'s in the archive file name to determine the task type. Hope the user has the proper naming convention
basename = os.path.basename(archive_file_name)
dot_count = basename.count('.')
# get rid of extra . count if the file extention is .tar.gz
if basename.endswith('.tar.gz'):
dot_count -= 1
if dot_count == 1:
# if theres only 1 dot_count, we need to check to see if it is 1a or 1b.
logging.info("Based on the number of periods in the file name, this is either submission task type 1a OR 1b...")
member_names_str = " ".join(archive_member_names)
# if .ttl file in "/NIST/" directory then ttls_under_nist_dir is not none (possible 1a)
ttls_under_nist_dir = re.compile(".*\/NIST\/[^\/]+.ttl").search(member_names_str)
# if ".ttl file in a subdirectory of /NIST/" then ttls_under_nist_subdir is not none (possible 1b)
ttls_under_nist_subdir = re.compile(".*\/NIST\/\S*\/\S*\.ttl").search(member_names_str)
detected = 0
if ttls_under_nist_dir is not None:
detected += 1
if ttls_under_nist_subdir is not None:
detected += 2
'''
detected values:
0 -- nothing detected continue as 1a
1 -- only 1a detected continue as 1a
2 -- only 1b detected continue as 1b
3 -- 1a and 1b detected continue as 1b
'''
if detected == 0:
logging.warning("No .ttl files found in a .../NIST/ directory, continuing to check as submission type 1a.")
return '1a'
if detected == 1:
logging.info("Found .ttl files in a .../NIST/ directory, continuing to check as submission type 1a.")
return '1a'
elif detected == 2:
logging.info("Found .ttl files in a subdirectory of ../NIST/, continuing to check as submission type 1b.")
return '1b'
elif detected == 3:
logging.warning("Found .ttl files in a .../NIST/ directory and in a subdirectory of ../NIST/, continuing to check as submission type 1b.")
return '1b'
elif dot_count == 2:
logging.info("Based on the number of periods in the file name, this archive is assumed to be a Task {0} submission.".format('2'))
return '2'
elif dot_count == 3:
logging.info("Based on the number of periods in the file name, this archive is assumed to be a Task {0} submission.".format('3'))
return '3'
else:
logging.error("Based on the number of periods in the file name, this is neither a Task 1a, 1b, 2, or 3 submission. Please name your submission file based on the rules defined in section 9 of the NIST AIDA 2019 Evaluation Plan.")
return None
def get_archive_submit_ready_status_values(task_type, archive_member_names):
"""
Prereq: ARCHIVE_FILE_NAME, and ARCHIVE_NAMES have been set
:return: total number of ttl files found, a dict of ttl counts per valid directory, and a dict of ttl counts per invalid directory
:rtype: int, dict {'dirname':int,...}, dict {'dirname':int,...}
"""
ttl_valid_count_dict = {}
ttl_invalid_count_dict = {}
ttls_total_count = 0
valid_dir_check = None
# function pointer for which type of validity checking we want to do
if task_type == '1a' or task_type == '2':
valid_dir_check = do_TA1a_2_check
elif task_type == '1b':
valid_dir_check = do_TA1b_check
elif task_type == '3':
valid_dir_check = do_TA3_check
for name in archive_member_names:
# split into levels
path_items = re.split('/', name)
if name.endswith('.ttl'):
ttls_total_count += 1
file_dir_status = valid_dir_check(path_items)
dir = os.path.dirname(name) + '/'
if file_dir_status:
#get value for the current ttl dir, if it dosen't exist then add it to the dict
curr_val = ttl_valid_count_dict.setdefault(dir, 0)
curr_val += 1
ttl_valid_count_dict[dir] = curr_val
else:
#get value for the current ttl dir, if it dosen't exist then add it to the dict
curr_val = ttl_invalid_count_dict.setdefault(dir, 0)
curr_val += 1
ttl_invalid_count_dict[dir] = curr_val
return ttls_total_count, ttl_valid_count_dict, ttl_invalid_count_dict
def do_TA1a_2_check(path_items):
"""
Check the path in the archive to ensure the path is valid
TASK 1a Directory structure:
<TA1aperformer>_<run>
NIST
<document_id>.ttl 1a: (1 to X); 2: (1)
INTER-TA
<document_id>.ttl 1a: (0 to X); 2: (0 or 1)
Ignores non ttl files.
Prints number of ttl files found, and number of ttl files that are in need to be checked.
:param path_items: The path to the .ttl file that needs to be checked
:return: 'NIST' if .ttl file is in the ../NIST/ directory, 'INTER-TA' if in the ../INTER-TA/ directory or None if not in either directory
:rtype: string or None
"""
# validate that the ttl file is in an acceptable directory
# rule: ttl needs to be in 2nd level -- aka 3rd item in list['name', 'NIST', 'valid.ttl']
# rule: ttl needs to be in directory named NIST, or INTER-TA
try:
if ((path_items[-2] == 'NIST' or path_items[-2] == 'INTER-TA') and len(path_items) == 3):
return True
except:
return False
return False
def do_TA1b_check(path_items):
"""
Check the path in the archive to ensure the path is valid
Task 1b directory structure:
<TA1performer>_<run>
NIST
hypothesisID (1 to Y)
<document_id>.ttl (1 to X)
Ignores non ttl files.
Prints number of ttl files found, and number of ttl files that are in need to be checked.
:param path_items: The path to the .ttl file that needs to be checked
:return: True if the file path is valid, False otherwise
:rtype: bool
"""
# validate that the ttl file is in an acceptable directory
# rule: ttl needs to be in 3rd level -- aka 4th item in list['name', 'NIST', 'hypothesis', 'valid.ttl']
# rule: ttl needs to be in a | |
3, 'sfn' : 'even', 'subf' : (1,)},
#49
{'format' : 3, 'sfn' : 'even', 'subf' : (4,)},
#50
{'format' : 3, 'sfn' : 'even', 'subf' : (7,)},
#51
{'format' : 3, 'sfn' : 'any', 'subf' : (1,)},
#52
{'format' : 3, 'sfn' : 'any', 'subf' : (4,)},
#53
{'format' : 3, 'sfn' : 'any', 'subf' : (7,)},
#54
{'format' : 3, 'sfn' : 'any', 'subf' : (1, 6)},
#55
{'format' : 3, 'sfn' : 'any', 'subf' : (2, 7)},
#56
{'format' : 3, 'sfn' : 'any', 'subf' : (3, 8)},
#57
{'format' : 3, 'sfn' : 'any', 'subf' : (1, 4, 7)},
#58
{'format' : 3, 'sfn' : 'any', 'subf' : (2, 5, 8)},
#59
{'format' : 3, 'sfn' : 'any', 'subf' : (3, 6, 9)},
#60~62 invalid config index = 60/61/62
{'format' : -1},
{'format' : -1},
{'format' : -1},
#63
{'format' : 3, 'sfn' : 'any', 'sfNum' : (9,)}]
if _prachConfFdd[self.prachConfInd]['format'] < 0:
self.ngwin.logEdit.append('args error: invalid PRACH configuration index: %d. PRACH index 30/46/60/61/62 are not supported in FDD.' % self.prachConfInd)
return False
self.prachFddFormat = _prachConfFdd[self.prachConfInd]['format']
self.prachFddSfn = _prachConfFdd[self.prachConfInd]['sfn']
self.prachFddSubf = _prachConfFdd[self.prachConfInd]['subf']
return True
def initPrachTdd(self):
#Table 5.7.1-3: Frame structure type 2 random access configurations for preamble formats 0-4.
if self.prachConfInd in range(20):
self.prachTddFormat = 0
elif self.prachConfInd in range(20, 30):
self.prachTddFormat = 1
elif self.prachConfInd in range(39, 40):
self.prachTddFormat = 2
elif self.prachConfInd in range(40, 48):
self.prachTddFormat = 3
elif self.prachConfInd in range(48, 58):
self.prachTddFormat = 4
else:
self.ngwin.logEdit.append('args error: PRACH configuration index 58~63 are not supported for frame structure type 2.')
return False
#Table 5.7.1-4: Frame structure type 2 random access preamble mapping in time and frequency.
_prachQuadTdd = [[[(0,1,0,2)], [(0,1,0,1)], [(0,1,0,0)], [(0,1,0,2)], [(0,1,0,1)], [(0,1,0,0)], [(0,1,0,2)]],
#1
[[(0,2,0,2)], [(0,2,0,1)], [(0,2,0,0)], [(0,2,0,2)], [(0,2,0,1)], [(0,2,0,0)], [(0,2,0,2)]],
#2
[[(0,1,1,2)], [(0,1,1,1)], [(0,1,1,0)], [(0,1,0,1)], [(0,1,0,0)], None, [(0,1,1,1)]],
#3
[[(0,0,0,2)], [(0,0,0,1)], [(0,0,0,0)], [(0,0,0,2)], [(0,0,0,1)], [(0,0,0,0)], [(0,0,0,2)]],
#4
[[(0,0,1,2)], [(0,0,1,1)], [(0,0,1,0)], [(0,0,0,1)], [(0,0,0,0)], None, [(0,0,1,1)]],
#5
[[(0,0,0,1)], [(0,0,0,0)], None, [(0,0,0,0)], None, None, [(0,0,0,1)]],
#6
[[(0,0,0,2),(0,0,1,2)], [(0,0,0,1),(0,0,1,1)], [(0,0,0,0),(0,0,1,0)], [(0,0,0,1),(0,0,0,2)], [(0,0,0,0),(0,0,0,1)], [(0,0,0,0),(1,0,0,0)], [(0,0,0,2),(0,0,1,1)]],
#7
[[(0,0,0,1),(0,0,1,1)], [(0,0,0,0),(0,0,1,0)], None, [(0,0,0,0),(0,0,0,2)], None, None, [(0,0,0,1),(0,0,1,0)]],
#8
[[(0,0,0,0),(0,0,1,0)], None, None, [(0,0,0,0),(0,0,0,1)], None, None, [(0,0,0,0),(0,0,1,1)]],
#9
[[(0,0,0,1),(0,0,0,2),(0,0,1,2)], [(0,0,0,0),(0,0,0,1),(0,0,1,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0)], [(0,0,0,0),(0,0,0,1),(0,0,0,2)], [(0,0,0,0),(0,0,0,1),(1,0,0,1)], [(0,0,0,0),(1,0,0,0),(2,0,0,0)], [(0,0,0,1),(0,0,0,2),(0,0,1,1)]],
#10
[[(0,0,0,0),(0,0,1,0),(0,0,1,1)], [(0,0,0,1),(0,0,1,0),(0,0,1,1)], [(0,0,0,0),(0,0,1,0),(1,0,1,0)], None, [(0,0,0,0),(0,0,0,1),(1,0,0,0)], None, [(0,0,0,0),(0,0,0,2),(0,0,1,0)]],
#11
[None, [(0,0,0,0),(0,0,0,1),(0,0,1,0)], None, None, None, None, [(0,0,0,1),(0,0,1,0),(0,0,1,1)]],
#12
[[(0,0,0,1),(0,0,0,2),(0,0,1,1),(0,0,1,2)], [(0,0,0,0),(0,0,0,1),(0,0,1,0),(0,0,1,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0)], [(0,0,0,0),(0,0,0,1),(0,0,0,2),(1,0,0,2)], [(0,0,0,0),(0,0,0,1),(1,0,0,0),(1,0,0,1)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0)], [(0,0,0,1),(0,0,0,2),(0,0,1,0),(0,0,1,1)]],
#13
[[(0,0,0,0),(0,0,0,2),(0,0,1,0),(0,0,1,2)], None, None, [(0,0,0,0),(0,0,0,1),(0,0,0,2),(1,0,0,1)], None, None, [(0,0,0,0),(0,0,0,1),(0,0,0,2),(0,0,1,1)]],
#14
[[(0,0,0,0),(0,0,0,1),(0,0,1,0),(0,0,1,1)], None, None, [(0,0,0,0),(0,0,0,1),(0,0,0,2),(1,0,0,0)], None, None, [(0,0,0,0),(0,0,0,2),(0,0,1,0),(0,0,1,1)]],
#15
[[(0,0,0,0),(0,0,0,1),(0,0,0,2),(0,0,1,1),(0,0,1,2)], [(0,0,0,0),(0,0,0,1),(0,0,1,0),(0,0,1,1),(1,0,0,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0)], [(0,0,0,0),(0,0,0,1),(0,0,0,2),(1,0,0,1),(1,0,0,2)], [(0,0,0,0),(0,0,0,1),(0,0,0,2),(1,0,0,1),(2,0,0,1)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0),(4,0,0,0)], [(0,0,0,0),(0,0,0,1),(0,0,0,2),(0,0,1,0),(0,0,1,1)]],
#16
[[(0,0,0,1),(0,0,0,2),(0,0,1,0),(0,0,1,1),(0,0,1,2)], [(0,0,0,0),(0,0,0,1),(0,0,1,0),(0,0,1,1),(1,0,1,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,1,0)], [(0,0,0,0),(0,0,0,1),(0,0,0,2),(1,0,0,0),(1,0,0,2)], [(0,0,0,0),(0,0,0,1),(1,0,0,0),(1,0,0,1),(2,0,0,0)], None, None],
#17
[[(0,0,0,0),(0,0,0,1),(0,0,0,2),(0,0,1,0),(0,0,1,2)], [(0,0,0,0),(0,0,0,1),(0,0,1,0),(0,0,1,1),(1,0,0,0)], None, [(0,0,0,0),(0,0,0,1),(0,0,0,2),(1,0,0,0),(1,0,0,1)], None, None, None],
#18
[[(0,0,0,0),(0,0,0,1),(0,0,0,2),(0,0,1,0),(0,0,1,1),(0,0,1,2)], [(0,0,0,0),(0,0,0,1),(0,0,1,0),(0,0,1,1),(1,0,0,1),(1,0,1,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0),(2,0,1,0)], [(0,0,0,0),(0,0,0,1),(0,0,0,2),(1,0,0,0),(1,0,0,1),(1,0,0,2)], [(0,0,0,0),(0,0,0,1),(1,0,0,0),(1,0,0,1),(2,0,0,0),(2,0,0,1)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0),(4,0,0,0),(5,0,0,0)], [(0,0,0,0),(0,0,0,1),(0,0,0,2),(0,0,1,0),(0,0,1,1),(1,0,0,2)]],
#19
[None, [(0,0,0,0),(0,0,0,1),(0,0,1,0),(0,0,1,1),(1,0,0,0),(1,0,1,0)], None, None, None, None, [(0,0,0,0),(0,0,0,1),(0,0,0,2),(0,0,1,0),(0,0,1,1),(1,0,1,1)]],
#20
[[(0,1,0,1)], [(0,1,0,0)], None, [(0,1,0,1)], [(0,1,0,0)], None, [(0,1,0,1)]],
#21
[[(0,2,0,1)], [(0,2,0,0)], None, [(0,2,0,1)], [(0,2,0,0)], None, [(0,2,0,1)]],
#22
[[(0,1,1,1)], [(0,1,1,0)], None, None, None, None, [(0,1,1,0)]],
#23
[[(0,0,0,1)], [(0,0,0,0)], None, [(0,0,0,1)], [(0,0,0,0)], None, [(0,0,0,1)]],
#24
[[(0,0,1,1)], [(0,0,1,0)], None, None, None, None, [(0,0,1,0)]],
#25
[[(0,0,0,1),(0,0,1,1)], [(0,0,0,0),(0,0,1,0)], None, [(0,0,0,1),(1,0,0,1)], [(0,0,0,0),(1,0,0,0)], None, [(0,0,0,1),(0,0,1,0)]],
#26
[[(0,0,0,1),(0,0,1,1),(1,0,0,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0)], None, [(0,0,0,1),(1,0,0,1),(2,0,0,1)], [(0,0,0,0),(1,0,0,0),(2,0,0,0)], None, [(0,0,0,1),(0,0,1,0),(1,0,0,1)]],
#27
[[(0,0,0,1),(0,0,1,1),(1,0,0,1),(1,0,1,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0)], None, [(0,0,0,1),(1,0,0,1),(2,0,0,1),(3,0,0,1)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0)], None, [(0,0,0,1),(0,0,1,0),(1,0,0,1),(1,0,1,0)]],
#28
[[(0,0,0,1),(0,0,1,1),(1,0,0,1),(1,0,1,1),(2,0,0,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0)], None, [(0,0,0,1),(1,0,0,1),(2,0,0,1),(3,0,0,1),(4,0,0,1)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0),(4,0,0,0)], None, [(0,0,0,1),(0,0,1,0),(1,0,0,1),(1,0,1,0),(2,0,0,1)]],
#29
[[(0,0,0,1),(0,0,1,1),(1,0,0,1),(1,0,1,1),(2,0,0,1),(2,0,1,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0),(2,0,1,0)], None, [(0,0,0,1),(1,0,0,1),(2,0,0,1),(3,0,0,1),(4,0,0,1),(5,0,0,1)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0),(4,0,0,0),(5,0,0,0)], None, [(0,0,0,1),(0,0,1,0),(1,0,0,1),(1,0,1,0),(2,0,0,1),(2,0,1,0)]],
#30
[[(0,1,0,1)], [(0,1,0,0)], None, [(0,1,0,1)], [(0,1,0,0)], None, [(0,1,0,1)]],
#31
[[(0,2,0,1)], [(0,2,0,0)], None, [(0,2,0,1)], [(0,2,0,0)], None, [(0,2,0,1)]],
#32
[[(0,1,1,1)], [(0,1,1,0)], None, None, None, None, [(0,1,1,0)]],
#33
[[(0,0,0,1)], [(0,0,0,0)], None, [(0,0,0,1)], [(0,0,0,0)], None, [(0,0,0,1)]],
#34
[[(0,0,1,1)], [(0,0,1,0)], None, None, None, None, [(0,0,1,0)]],
#35
[[(0,0,0,1),(0,0,1,1)], [(0,0,0,0),(0,0,1,0)], None, [(0,0,0,1),(1,0,0,1)], [(0,0,0,0),(1,0,0,0)], None, [(0,0,0,1),(0,0,1,0)]],
#36
[[(0,0,0,1),(0,0,1,1),(1,0,0,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0)], None, [(0,0,0,1),(1,0,0,1),(2,0,0,1)], [(0,0,0,0),(1,0,0,0),(2,0,0,0)], None, [(0,0,0,1),(0,0,1,0),(1,0,0,1)]],
#37
[[(0,0,0,1),(0,0,1,1),(1,0,0,1),(1,0,1,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0)], None, [(0,0,0,1),(1,0,0,1),(2,0,0,1),(3,0,0,1)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0)], None, [(0,0,0,1),(0,0,1,0),(1,0,0,1),(1,0,1,0)]],
#38
[[(0,0,0,1),(0,0,1,1),(1,0,0,1),(1,0,1,1),(2,0,0,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0)], None, [(0,0,0,1),(1,0,0,1),(2,0,0,1),(3,0,0,1),(4,0,0,1)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0),(4,0,0,0)], None, [(0,0,0,1),(0,0,1,0),(1,0,0,1),(1,0,1,0),(2,0,0,1)]],
#39
[[(0,0,0,1),(0,0,1,1),(1,0,0,1),(1,0,1,1),(2,0,0,1),(2,0,1,1)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0),(2,0,1,0)], None, [(0,0,0,1),(1,0,0,1),(2,0,0,1),(3,0,0,1),(4,0,0,1),(5,0,0,1)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0),(4,0,0,0),(5,0,0,0)], None, [(0,0,0,1),(0,0,1,0),(1,0,0,1),(1,0,1,0),(2,0,0,1),(2,0,1,0)]],
#40
[[(0,1,0,0)], None, None, [(0,1,0,0)], None, None, [(0,1,0,0)]],
#41
[[(0,2,0,0)], None, None, [(0,2,0,0)], None, None, [(0,2,0,0)]],
#42
[[(0,1,1,0)], None, None, None, None, None, None],
#43
[[(0,0,0,0)], None, None, [(0,0,0,0)], None, None, [(0,0,0,0)]],
#44
[[(0,0,1,0)], None, None, None, None, None, None],
#45
[[(0,0,0,0),(0,0,1,0)], None, None, [(0,0,0,0),(1,0,0,0)], None, None, [(0,0,0,0),(1,0,0,0)]],
#46
[[(0,0,0,0),(0,0,1,0),(1,0,0,0)], None, None, [(0,0,0,0),(1,0,0,0),(2,0,0,0)], None, None, [(0,0,0,0),(1,0,0,0),(2,0,0,0)]],
#47
[[(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0)], None, None, [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0)], None, None, [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0)]],
#48
[[(0,1,0,0)], [(0,1,0,0)], [(0,1,0,0)], [(0,1,0,0)], [(0,1,0,0)], [(0,1,0,0)], [(0,1,0,0)]],
#49
[[(0,2,0,0)], [(0,2,0,0)], [(0,2,0,0)], [(0,2,0,0)], [(0,2,0,0)], [(0,2,0,0)], [(0,2,0,0)]],
#50
[[(0,1,1,0)], [(0,1,1,0)], [(0,1,1,0)], None, None, None, [(0,1,1,0)]],
#51
[[(0,0,0,0)], [(0,0,0,0)], [(0,0,0,0)], [(0,0,0,0)], [(0,0,0,0)], [(0,0,0,0)], [(0,0,0,0)]],
#52
[[(0,0,1,0)], [(0,0,1,0)], [(0,0,1,0)], None, None, None, [(0,0,1,0)]],
#53
[[(0,0,0,0),(0,0,1,0)], [(0,0,0,0),(0,0,1,0)], [(0,0,0,0),(0,0,1,0)], [(0,0,0,0),(1,0,0,0)], [(0,0,0,0),(1,0,0,0)], [(0,0,0,0),(1,0,0,0)], [(0,0,0,0),(0,0,1,0)]],
#54
[[(0,0,0,0),(0,0,1,0),(1,0,0,0)], [(0,0,0,0),(0,0,1,0),(1,0,0,0)], [(0,0,0,0),(0,0,1,0),(1,0,0,0)], [(0,0,0,0),(1,0,0,0),(2,0,0,0)], [(0,0,0,0),(1,0,0,0),(2,0,0,0)], [(0,0,0,0),(1,0,0,0),(2,0,0,0)], [(0,0,0,0),(0,0,1,0),(1,0,0,0)]],
#55
[[(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0)]],
#56
[[(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0),(4,0,0,0)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0),(4,0,0,0)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0),(4,0,0,0)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0)]],
#57
[[(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0),(2,0,1,0)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0),(2,0,1,0)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0),(2,0,1,0)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0),(4,0,0,0),(5,0,0,0)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0),(4,0,0,0),(5,0,0,0)], [(0,0,0,0),(1,0,0,0),(2,0,0,0),(3,0,0,0),(4,0,0,0),(5,0,0,0)], [(0,0,0,0),(0,0,1,0),(1,0,0,0),(1,0,1,0),(2,0,0,0),(2,0,1,0)]],
#58~63 is not supported
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, None]
]
self.prachTddQuad = _prachQuadTdd[self.prachConfInd][self.sa]
if self.prachTddQuad is None:
self.ngwin.logEdit.append('args error: invalid PRACH configuration index %d for UL/DL configuration %d' % (self.prachConfInd, self.sa))
return False
return True
def initSrsFdd(self):
#Table 5.5.3.3-1: Frame structure type 1 sounding reference signal subframe configuration.
_srsSubfConfFdd = [{'tSfc' : 1, 'deltaSfc' : (0,)},
{'tSfc' : 2, 'deltaSfc' : (0,)},
{'tSfc' : 2, 'deltaSfc' : (1,)},
{'tSfc' : 5, 'deltaSfc' : (0,)},
{'tSfc' : 5, 'deltaSfc' : (1,)},
{'tSfc' : 5, 'deltaSfc' : (2,)},
{'tSfc' : 5, 'deltaSfc' : (3,)},
{'tSfc' : 5, 'deltaSfc' : (0,1)},
{'tSfc' : 5, 'deltaSfc' : (2,3)},
{'tSfc' : 10, 'deltaSfc' : (0,)},
{'tSfc' : 10, 'deltaSfc' : (1,)},
{'tSfc' : 10, 'deltaSfc' : (2,)},
{'tSfc' : 10, 'deltaSfc' : (3,)},
{'tSfc' : 10, 'deltaSfc' : (0,1,2,3,4,6,8)},
{'tSfc' : 10, 'deltaSfc' : (0,1,2,3,4,5,6,8)},
#15 is invalid
None
]
if self.srsSubfConf < 0 or self.srsSubfConf > 14:
self.ngwin.logEdit.append('args error: valid SRS subframe configuration for FDD is [0, 14].')
return False
self.tSfc = _srsSubfConfFdd[self.srsSubfConf]['tSfc']
self.deltaSfc = _srsSubfConfFdd[self.srsSubfConf]['deltaSfc']
return True
def initSrsTdd(self):
#Table 5.5.3.3-2: Frame structure type 2 sounding reference signal subframe configuration.
_srsSubfConfTdd = [{'tSfc' : 5, 'deltaSfc' : (1,)},
{'tSfc' : 5, 'deltaSfc' : (1,2)},
{'tSfc' : 5, 'deltaSfc' : (1,3)},
{'tSfc' : 5, 'deltaSfc' : (1,4)},
{'tSfc' : 5, 'deltaSfc' : (1,2,3)},
{'tSfc' : 5, 'deltaSfc' : (1,2,4)},
{'tSfc' : 5, 'deltaSfc' : (1,3,4)},
{'tSfc' : 5, 'deltaSfc' : (1,2,3,4)},
{'tSfc' : 10, 'deltaSfc' : (1,2,6)},
{'tSfc' : 10, 'deltaSfc' : (1,3,6)},
{'tSfc' : 10, 'deltaSfc' : (1,6,7)},
{'tSfc' : 10, 'deltaSfc' : (1,2,6,8)},
{'tSfc' : 10, 'deltaSfc' : (1,3,6,9)},
{'tSfc' : 10, 'deltaSfc' : (1,4,6,7)},
#14~15 are invalid
None,
None
]
if self.srsSubfConf < 0 or self.srsSubfConf > 13:
self.ngwin.logEdit.append('args error: valid SRS subframe configuration for TDD is [0, 13].')
return False
self.tSfc = _srsSubfConfTdd[self.srsSubfConf]['tSfc']
self.deltaSfc = _srsSubfConfTdd[self.srsSubfConf]['deltaSfc']
return True
def fillCrs(self):
if self.crsOk:
return
#6.10.1.2 Mapping to resource elements
'''
iap,l,v,k,special case
0,0,0,6*im+(v+v_shift)%6,no
0,symbPerSlot-3,3,same,no
1,0,3,same,no
1,symbPerSlot-3,0,same,no
2,1,0,same,no
2,1,3,same,no
3,1,3,same,no
3,1,6,same,no
'''
_crsPos = [(0, 0, 0),
(0, self.symbPerSlot-3, 3),
(1, 0, 3),
(1, self.symbPerSlot-3, 0),
(2, 1, 0),
(2, 1, 3),
(3, 1, 3),
(3, 1, 6)]
if self.apNum == 1:
_crsPos = _crsPos[:2]
elif self.apNum == 2:
_crsPos = _crsPos[:4]
m = list(range(2*self.prbNum))
vShift = self.pci % 6
for ap, l, v in _crsPos:
k = list(map(lambda x : 6*x+(v+vShift)%6, m))
if ap in [0, 1]:
symb = [islot * self.symbPerSlot + l for islot in range(self.slotPerRf)]
elif (ap == 2 and v == 0) or (ap == 3 and v == 3):
symb = [islot * self.symbPerSlot + l for islot in range(self.slotPerRf) if islot % 2 == 0]
else: #(ap == 2 and v == 3) or (ap == 3 and v == 6)
symb = [islot * self.symbPerSlot + l for islot in range(self.slotPerRf) if islot % 2 == 1]
for _k in k:
for _symb in symb:
if self.gridDl[ap][_k][_symb] == LteResType.LTE_RES_PDSCH.value:
self.gridDl[ap][_k][_symb] | |
BC_h_line.set_data([],[])
BC_q_point
BC_h_point
return Qline, WLline, BC_q_line, BC_h_line, BC_q_point, BC_h_point, day_text
# animation function. this is called sequentially
def animate_min(i):
# discharge (ax1)
x = Sub.XSname
y= Sub.Qmin[Sub.Qmin.index == counter[i]].values[0]
day_text.set_text('Date = ' + str(counter[i]) )
Qline.set_data(x,y)
# water level (ax4)
y= Sub.Hmin.loc[Sub.Qmin.index == counter[i]].values[0]
WLline.set_data(x,y)
# BC Q (ax2)
x = Sub.QBCmin.columns.values
y = Sub.QBCmin.loc[dt.datetime(counter[i].year,counter[i].month,counter[i].day)].values
BC_q_line.set_data(x,y)
# BC H (ax3)
y = Sub.HBCmin.loc[dt.datetime(counter[i].year,counter[i].month,counter[i].day)].values
BC_h_line.set_data(x,y)
#BC Q point (ax2)
x=(counter[i] - dt.datetime(counter[i].year,counter[i].month,counter[i].day)).seconds/60
y= dt.datetime(counter[i].year,counter[i].month,counter[i].day)
ax2.scatter(x, Sub.QBCmin[x][y])
#BC h point (ax3)
ax3.scatter(x, Sub.HBCmin[x][y])
return Qline, WLline, BC_q_line, BC_h_line, ax2.scatter(x, Sub.QBCmin[x][y],s=150), ax3.scatter(x, Sub.HBCmin[x][y],s=150), day_text
plt.tight_layout()
anim = animation.FuncAnimation(fig2, animate_min, init_func=init_min, frames = len(Sub.Qmin.index),
interval = Interval, blit = True)
return anim
def AnimateArray(Arr, Time, NoElem, TicksSpacing = 2, Figsize=(8,8), PlotNumbers=True,
NumSize= 8, Title = 'Total Discharge',titlesize = 15, Backgroundcolorthreshold=None,
cbarlabel = 'Discharge m3/s', cbarlabelsize = 12, textcolors=("white","black"),
Cbarlength = 0.75, Interval = 200,cmap='coolwarm_r', Textloc=[0.1,0.2],
Gaugecolor='red',Gaugesize=100, ColorScale = 1,gamma=1./2.,linthresh=0.0001,
linscale=0.001, midpoint=0, orientation='vertical', rotation=-90, IDcolor = "blue",
IDsize =10, **kwargs):
"""
=============================================================================
AnimateArray(Arr, Time, NoElem, TicksSpacing = 2, Figsize=(8,8), PlotNumbers=True,
NumSize= 8, Title = 'Total Discharge',titlesize = 15, Backgroundcolorthreshold=None,
cbarlabel = 'Discharge m3/s', cbarlabelsize = 12, textcolors=("white","black"),
Cbarlength = 0.75, Interval = 200,cmap='coolwarm_r', Textloc=[0.1,0.2],
Gaugecolor='red',Gaugesize=100, ColorScale = 1,gamma=1./2.,linthresh=0.0001,
linscale=0.001, midpoint=0, orientation='vertical', rotation=-90,IDcolor = "blue",
IDsize =10, **kwargs)
=============================================================================
Parameters
----------
Arr : [array]
the array you want to animate.
Time : [dataframe]
dataframe contains the date of values.
NoElem : [integer]
Number of the cells that has values.
TicksSpacing : [integer], optional
Spacing in the colorbar ticks. The default is 2.
Figsize : [tuple], optional
figure size. The default is (8,8).
PlotNumbers : [bool], optional
True to plot the values intop of each cell. The default is True.
NumSize : integer, optional
size of the numbers plotted intop of each cells. The default is 8.
Title : [str], optional
title of the plot. The default is 'Total Discharge'.
titlesize : [integer], optional
title size. The default is 15.
Backgroundcolorthreshold : [float/integer], optional
threshold value if the value of the cell is greater, the plotted
numbers will be black and if smaller the plotted number will be white
if None given the maxvalue/2 will be considered. The default is None.
textcolors : TYPE, optional
Two colors to be used to plot the values i top of each cell. The default is ("white","black").
cbarlabel : str, optional
label of the color bar. The default is 'Discharge m3/s'.
cbarlabelsize : integer, optional
size of the color bar label. The default is 12.
Cbarlength : [float], optional
ratio to control the height of the colorbar. The default is 0.75.
Interval : [integer], optional
number to controlthe speed of the animation. The default is 200.
cmap : [str], optional
color style. The default is 'coolwarm_r'.
Textloc : [list], optional
location of the date text. The default is [0.1,0.2].
Gaugecolor : [str], optional
color of the points. The default is 'red'.
Gaugesize : [integer], optional
size of the points. The default is 100.
IDcolor : [str]
the ID of the Point.The default is "blue".
IDsize : [integer]
size of the ID text. The default is 10.
ColorScale : integer, optional
there are 5 options to change the scale of the colors. The default is 1.
1- ColorScale 1 is the normal scale
2- ColorScale 2 is the power scale
3- ColorScale 3 is the SymLogNorm scale
4- ColorScale 4 is the PowerNorm scale
5- ColorScale 5 is the BoundaryNorm scale
------------------------------------------------------------------
gamma : [float], optional
value needed for option 2 . The default is 1./2..
linthresh : [float], optional
value needed for option 3. The default is 0.0001.
linscale : [float], optional
value needed for option 3. The default is 0.001.
midpoint : [float], optional
value needed for option 5. The default is 0.
------------------------------------------------------------------
orientation : [string], optional
orintation of the colorbar horizontal/vertical. The default is 'vertical'.
rotation : [number], optional
rotation of the colorbar label. The default is -90.
**kwargs : [dict]
keys:
Points : [dataframe].
dataframe contains two columns 'cell_row', and cell_col to
plot the point at this location
Returns
-------
animation.FuncAnimation.
"""
fig = plt.figure(60, figsize = Figsize)
gs = gridspec.GridSpec(nrows = 2, ncols = 2, figure = fig )
ax = fig.add_subplot(gs[:,:])
ticks = np.arange(np.nanmin(Arr), np.nanmax(Arr),TicksSpacing)
if ColorScale == 1:
im = ax.matshow(Arr[:,:,0],cmap=cmap, vmin = np.nanmin(Arr), vmax = np.nanmax(Arr),)
cbar_kw = dict(ticks = ticks)
elif ColorScale == 2:
im = ax.matshow(Arr[:,:,0],cmap=cmap, norm=colors.PowerNorm(gamma=gamma,
vmin = np.nanmin(Arr), vmax = np.nanmax(Arr)))
cbar_kw = dict(ticks = ticks)
elif ColorScale == 3:
linthresh=1
linscale=2
im = ax.matshow(Arr[:,:,0],cmap=cmap, norm=colors.SymLogNorm(linthresh=linthresh,
linscale=linscale, base=np.e,vmin = np.nanmin(Arr), vmax = np.nanmax(Arr)))
formatter = LogFormatter(10, labelOnlyBase=False)
cbar_kw = dict(ticks = ticks, format=formatter)
elif ColorScale == 4:
bounds = np.arange(np.nanmin(Arr), np.nanmax(Arr),TicksSpacing)
norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)
im = ax.matshow(Arr[:,:,0],cmap=cmap, norm=norm)
cbar_kw = dict(ticks = ticks)
else:
im = ax.matshow(Arr[:,:,0],cmap=cmap, norm=MidpointNormalize(midpoint=midpoint))
cbar_kw = dict(ticks = ticks)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, shrink=Cbarlength, orientation=orientation,**cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=rotation, va="bottom")
cbar.ax.tick_params(labelsize=10)
day_text = ax.text(Textloc[0],Textloc[1], ' ',fontsize= cbarlabelsize)
ax.set_title(Title,fontsize= titlesize)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
Indexlist = list()
for x in range(Arr.shape[0]):
for y in range(Arr.shape[1]):
if not np.isnan(Arr[x, y,0]):
Indexlist.append([x,y])
Textlist = list()
for x in range(NoElem):
Textlist.append(ax.text(Indexlist[x][1], Indexlist[x][0],
round(Arr[Indexlist[x][0], Indexlist[x][1], 0],2),
ha="center", va="center", color="w", fontsize=NumSize))
# Points = list()
PoitsID = list()
if 'Points' in kwargs.keys():
row = kwargs['Points'].loc[:,'cell_row'].tolist()
col = kwargs['Points'].loc[:,'cell_col'].tolist()
IDs = kwargs['Points'].loc[:,'id'].tolist()
Points = ax.scatter(col, row, color=Gaugecolor, s=Gaugesize)
for i in range(len(row)):
PoitsID.append(ax.text(col[i], row[i], IDs[i], ha="center",
va="center", color=IDcolor, fontsize=IDsize))
# Normalize the threshold to the images color range.
if Backgroundcolorthreshold is not None:
Backgroundcolorthreshold = im.norm(Backgroundcolorthreshold)
else:
Backgroundcolorthreshold = im.norm(np.nanmax(Arr))/2.
def init() :
im.set_data(Arr[:,:,0])
day_text.set_text('')
output = [im, day_text]
if 'Points' in kwargs.keys():
# plot gauges
# for j in range(len(kwargs['Points'])):
row = kwargs['Points'].loc[:,'cell_row'].tolist()
col = kwargs['Points'].loc[:,'cell_col'].tolist()
# Points[j].set_offsets(col, row)
Points.set_offsets(np.c_[col, row])
output.append(Points)
for x in range(len(col)):
PoitsID[x].set_text(IDs[x])
output = output + PoitsID
if PlotNumbers:
for x in range(NoElem):
val = round(Arr[Indexlist[x][0], Indexlist[x][1], 0],2)
Textlist[x].set_text(val)
output = output + Textlist
return output
def animate(i):
im.set_data(Arr[:,:,i])
day_text.set_text('Date = '+str(Time[i])[0:10])
output = [im, day_text]
if 'Points' in kwargs.keys():
# plot gauges
# for j in range(len(kwargs['Points'])):
row = kwargs['Points'].loc[:,'cell_row'].tolist()
col = kwargs['Points'].loc[:,'cell_col'].tolist()
# Points[j].set_offsets(col, row)
Points.set_offsets(np.c_[col, row])
output.append(Points)
for x in range(len(col)):
PoitsID[x].set_text(IDs[x])
output = output + PoitsID
if PlotNumbers:
for x in range(NoElem):
val = round(Arr[Indexlist[x][0], Indexlist[x][1], i],2)
kw = dict(color=textcolors[int(im.norm(Arr[Indexlist[x][0], Indexlist[x][1], i]) > Backgroundcolorthreshold)])
Textlist[x].update(kw)
Textlist[x].set_text(val)
output = output + Textlist
return output
plt.tight_layout()
# global anim
anim = animation.FuncAnimation(fig, animate, init_func=init, frames = np.shape(Arr)[2],
interval = Interval, blit = True)
return anim
def SaveAnimation(anim, VideoFormat="gif",Path='',SaveFrames=20):
"""
Parameters
----------
anim : TYPE
DESCRIPTION.
VideoFormat : TYPE, optional
DESCRIPTION. The default is "gif".
Path : TYPE, optional
DESCRIPTION. The default is ''.
SaveFrames : TYPE, optional
DESCRIPTION. The default is 20.
in order to save a video using matplotlib you have to download ffmpeg from
https://ffmpeg.org/ and define this path to matplotlib
import matplotlib as mpl
mpl.rcParams['animation.ffmpeg_path'] = "path where you saved the ffmpeg.exe/ffmpeg.exe"
Returns
-------
None.
"""
ffmpegPath = os.getenv("HOME") + "/.matplotlib/ffmpeg-static/bin/ffmpeg.exe"
message = """
please visit https://ffmpeg.org/ and download a version of ffmpeg compitable
with your operating system, and copy the content of the folder and paste it
in the "user/.matplotlib/ffmpeg-static/"
ffmpeg-static
"""
assert os.path.exists(ffmpegPath), message
mpl.rcParams['animation.ffmpeg_path'] = ffmpegPath
if VideoFormat == "gif":
# assert len(Path) >= 1 and Path.endswith(".gif"), "please enter a valid path to save the animation"
writergif = animation.PillowWriter(fps=SaveFrames)
anim.save(Path, writer=writergif)
else:
try:
if VideoFormat == 'avi' or VideoFormat == 'mov':
writervideo = animation.FFMpegWriter(fps=SaveFrames,bitrate=1800)
anim.save(Path, writer = writervideo)
elif VideoFormat == 'mp4':
writermp4 = animation.FFMpegWriter(fps=SaveFrames,bitrate=1800)
anim.save(Path, writer=writermp4)
except FileNotFoundError:
print("please visit https://ffmpeg.org/ and download a version of ffmpeg compitable with your operating system, for more details please check the method definition")
@staticmethod
def rescale(OldValue,OldMin,OldMax,NewMin,NewMax):
"""
===================================================================
rescale(OldValue,OldMin,OldMax,NewMin,NewMax)
===================================================================
this function rescale a value between two boundaries to a new value bewteen two
other boundaries
inputs:
1-OldValue:
[float] value need to transformed
2-OldMin:
[float] min old | |
Actions that initiate navigation, are waiting for
these navigation to happen and for pages to start loading.
You can opt out of waiting via setting this flag.
You would only need this option in the exceptional cases such as navigating
to inaccessible pages. Defaults to ``False``.
``*modifiers``
Modifier keys to press. Ensures that only these modifiers are pressed
during the click, and then restores current modifiers back.
If not specified, currently pressed modifiers are used.
"""
if self.library.presenter_mode:
self.hover(selector)
self.library.highlight_elements(selector, duration=timedelta(seconds=2))
sleep(2)
with self.playwright.grpc_channel() as stub:
options = {
"button": button.name,
"clickCount": clickCount,
"force": force,
"noWaitAfter": noWaitAfter,
}
if delay:
options["delay"] = self.get_timeout(delay)
# Without the != None 0 being falsy causes issues
if position_x is not None and position_y is not None:
positions: Dict[str, object] = {"x": position_x, "y": position_y}
options["position"] = positions
if modifiers:
options["modifiers"] = [m.name for m in modifiers]
options_json = json.dumps(options)
logger.debug(f"Click options are: {options_json}")
response = stub.Click(
Request().ElementSelectorWithOptions(
selector=selector, options=options_json
)
)
logger.debug(response.log)
@keyword(tags=("Setter", "PageContent"))
def hover(
self,
selector: str,
position_x: Optional[float] = None,
position_y: Optional[float] = None,
force: bool = False,
*modifiers: KeyboardModifier,
):
"""Moves the virtual mouse and scrolls to the element found by ``selector``.
This method hovers over an element matching ``selector`` by performing the following steps:
- Find an element match matching ``selector``. If there is none, wait until a matching element is attached to the DOM.
- Wait for actionability checks on the matched element, unless ``force`` option is set. If the element is detached during the checks, the whole action is retried.
- Scroll the element into view if needed.
- Use `Mouse Move` to hover over the center of the element, or the specified ``position``.
``selector`` Selector element to hover.
See the `Finding elements` section for details about the selectors.
``position_x`` & ``position_y`` A point to hover relative to the top-left corner of element bounding box.
If not specified, hovers over some visible point of the element.
Only positive values within the bounding-box are allowed.
``force`` Set to True to skip Playwright's [https://playwright.dev/docs/actionability | Actionability checks].
``*modifiers`` Modifier keys to press. Ensures that only these modifiers are
pressed during the hover, and then restores current modifiers back.
If not specified, currently pressed modifiers are used.
"""
with self.playwright.grpc_channel() as stub:
options: Dict[str, Any] = {"force": force}
if position_x and position_y:
positions: Dict[str, object] = {"x": position_x, "y": position_y}
options["position"] = positions
if modifiers:
options["modifiers"] = [m.name for m in modifiers]
options_json = json.dumps(options)
logger.debug(f"Hover Options are: {options_json}")
response = stub.Hover(
Request().ElementSelectorWithOptions(
selector=selector, options=options_json
)
)
logger.debug(response.log)
@keyword(tags=("Setter", "PageContent"))
def focus(self, selector: str):
"""Moves focus on to the element found by ``selector``.
``selector`` Selector of the element.
See the `Finding elements` section for details about the selectors.
If there's no element matching selector, the method waits until a
matching element appears in the DOM. Timeouts after 10 seconds.
"""
with self.playwright.grpc_channel() as stub:
response = stub.Focus(Request().ElementSelector(selector=selector))
logger.debug(response.log)
@keyword(tags=("Setter", "PageContent"))
def scroll_to(
self,
selector: Optional[str] = None,
vertical: str = "top",
horizontal: str = "left",
behavior: ScrollBehavior = ScrollBehavior.auto,
):
"""Scrolls an element or the page to an absolute position based on given coordinates.
``selector`` Selector of the element. If the selector is ``${None}`` or ``${Empty}``
the page itself is scrolled. To ensure an element is in view use `Hover` instead.
See the `Finding elements` section for details about the selectors.
``vertical`` defines where to scroll vertically.
It can be a positive number, like ``300``.
It can be a percentage value of the absolute scrollable size, like ``50%``.
It can be a string defining that top or the bottom of the scroll area. < ``top`` | ``bottom`` >
_Be aware that some pages do lazy loading and load more content once you scroll down._
Bottom defines the current known bottom coordinate.
``horizontal`` defines where to scroll horizontally.
Works same as vertical but defines < ``left`` | ``right`` > as start and end.
``behavior`` defines whether the scroll happens directly or it scrolls smoothly.
"""
scroll_size = self.library.get_scroll_size(selector)
scroll_width = scroll_size["width"]
scroll_height = scroll_size["height"]
client_size = self.library.get_client_size(selector)
client_width = client_size["width"]
client_height = client_size["height"]
vertical_px = get_abs_scroll_coordinates(
vertical, scroll_height - client_height, "top", "bottom"
)
horizontal_px = get_abs_scroll_coordinates(
horizontal, scroll_width - client_width, "left", "right"
)
exec_scroll_function(
self,
f'scrollTo({{"left": {horizontal_px}, "top": {vertical_px}, "behavior": "{behavior.name}"}})',
selector,
)
@keyword(tags=("Setter", "PageContent"))
def scroll_by(
self,
selector: Optional[str] = None,
vertical: str = "height",
horizontal: str = "0",
behavior: ScrollBehavior = ScrollBehavior.auto,
):
"""Scrolls an element or the page relative from current position by the given values.
``selector`` Selector of the element. If the selector is ``${None}`` or ``${Empty}``
the page itself is scrolled. To ensure an element is in view use `Hover` instead.
See the `Finding elements` section for details about the selectors.
``vertical`` defines how far and in which direction to scroll vertically.
It can be a positive or negative number. Positive scrolls down, like ``50``, negative scrolls up, like ``-50``.
It can be a percentage value of the absolute scrollable size, like ``9.95%`` or negative like ``-10%``.
It can be the string ``height`` to defining to scroll exactly one visible height down or up with ``-height``.
_Be aware that some pages do lazy loading and load more content once you scroll down._
The percentage of the current scrollable height is used and may change.
``horizontal`` defines where to scroll horizontally.
Works same as vertical but defines positive values for right and negative values for left.
``width`` defines to scroll exactly one visible range to the right.
``behavior`` defines whether the scroll happens directly or it scrolls smoothly.
"""
scroll_size = self.library.get_scroll_size(selector)
scroll_width = scroll_size["width"]
scroll_height = scroll_size["height"]
client_size = self.library.get_client_size(selector)
client_width = client_size["width"]
client_height = client_size["height"]
vertical_px = get_rel_scroll_coordinates(
vertical, scroll_height - client_height, client_height, "height"
)
horizontal_px = get_rel_scroll_coordinates(
horizontal, scroll_width - client_width, client_width, "width"
)
exec_scroll_function(
self,
f'scrollBy({{"left": {horizontal_px}, "top": {vertical_px}, "behavior": "{behavior.name}"}})',
selector,
)
@keyword(tags=("Setter", "PageContent"))
def check_checkbox(self, selector: str):
"""Checks the checkbox or selects radio button found by ``selector``.
``selector`` Selector of the checkbox.
See the `Finding elements` section for details about the selectors.
Does nothing if the element is already checked/selected.
"""
with self.playwright.grpc_channel() as stub:
response = stub.CheckCheckbox(Request().ElementSelector(selector=selector))
logger.debug(response.log)
@keyword(tags=("Setter", "PageContent"))
def uncheck_checkbox(self, selector: str):
"""Unchecks the checkbox found by ``selector``.
``selector`` Selector of the checkbox.
See the `Finding elements` section for details about the selectors.
Does nothing if the element is not checked/selected.
"""
with self.playwright.grpc_channel() as stub:
response = stub.UncheckCheckbox(
Request().ElementSelector(selector=selector)
)
logger.debug(response.log)
@keyword(tags=("Setter", "PageContent"))
def select_options_by(self, selector: str, attribute: SelectAttribute, *values):
"""Selects options from select element found by ``selector``.
``selector`` Selector of the select tag.
See the `Finding elements` section for details about the selectors.
Matches based on the chosen attribute with list of ``values``.
Possible attributes to match options by:
``attribute``
If no values to select are passed will deselect options in element.
"""
matchers = ""
if not values or len(values) == 1 and not values[0]:
self.deselect_options(selector)
return
if attribute is SelectAttribute.value:
matchers = json.dumps([{"value": s} for s in values])
elif attribute is SelectAttribute.label:
matchers = json.dumps([{"label": s} for s in values])
elif attribute is SelectAttribute.index:
matchers = json.dumps([{"index": int(s)} for s in values])
with self.playwright.grpc_channel() as stub:
response = stub.SelectOption(
Request().SelectElementSelector(selector=selector, matcherJson=matchers)
)
logger.debug(response.log)
@keyword(tags=("Setter", "PageContent"))
def deselect_options(self, selector: str):
"""Deselects all options from select element found by ``selector``.
``selector`` Selector of the select tag.
See the `Finding elements` section for details about the selectors.
"""
with self.playwright.grpc_channel() as stub:
response = stub.DeselectOption(Request().ElementSelector(selector=selector))
logger.debug(response.log)
def _fill_text(self, selector: str, txt: str, log_response: bool = True):
if self.library.presenter_mode:
self.hover(selector)
self.library.highlight_elements(selector, duration=timedelta(seconds=2))
sleep(2)
with self.playwright.grpc_channel() as stub:
response = stub.FillText(Request().FillText(selector=selector, text=txt))
if log_response:
logger.debug(response.log)
def _type_text(
self,
selector: str,
txt: str,
delay: timedelta = timedelta(microseconds=0),
clear: bool = True,
log_response: bool = True,
):
if self.library.presenter_mode:
self.hover(selector)
self.library.highlight_elements(selector, duration=timedelta(seconds=2))
sleep(2)
with self.playwright.grpc_channel() as stub:
delay_ms = self.get_timeout(delay)
| |
''' Models for representing top-level plot objects.
'''
from __future__ import absolute_import
import warnings
from six import string_types
from ..core.enums import Location, OutputBackend
from ..core.properties import Bool, Dict, Enum, Include, Instance, Int, List, Override, String, Float
from ..core.property_mixins import LineProps, FillProps
from ..core.query import find
from ..core.validation import error, warning
from ..core.validation.errors import BAD_EXTRA_RANGE_NAME, REQUIRED_RANGE, REQUIRED_SCALE, INCOMPATIBLE_SCALE_AND_RANGE
from ..core.validation.warnings import MISSING_RENDERERS
from ..model import Model
from ..util.string import nice_join
from .annotations import Legend, Title
from .axes import Axis
from .glyphs import Glyph
from .grids import Grid
from .layouts import LayoutDOM
from .ranges import Range, FactorRange, DataRange1d, Range1d
from .renderers import GlyphRenderer, Renderer, TileRenderer
from .scales import Scale, CategoricalScale, LinearScale, LogScale
from .sources import DataSource, ColumnDataSource
from .tools import Tool, Toolbar, HoverTool
def _check_conflicting_kwargs(a1, a2, kwargs):
if a1 in kwargs and a2 in kwargs:
raise ValueError("Conflicting properties set on plot: %r and %r" % (a1, a2))
class _list_attr_splat(list):
def __setattr__(self, attr, value):
for x in self:
setattr(x, attr, value)
def __dir__(self):
if len(set(type(x) for x in self)) == 1:
return dir(self[0])
else:
return dir(self)
_LEGEND_EMPTY_WARNING = """
You are attemptings to set `plot.legend.%s` on a plot that has zero legends added, this will have no effect.
Before legend properties can be set, you must add a Legend explicitly, or call a glyph method with the 'legend' parameter set.
"""
class _legend_attr_splat(_list_attr_splat):
def __setattr__(self, attr, value):
if not len(self):
warnings.warn(_LEGEND_EMPTY_WARNING % attr)
return super(_legend_attr_splat, self).__setattr__(attr, value)
def _select_helper(args, kwargs):
""" Allow flexible selector syntax.
Returns:
dict
"""
if len(args) > 1:
raise TypeError("select accepts at most ONE positional argument.")
if len(args) > 0 and len(kwargs) > 0:
raise TypeError("select accepts EITHER a positional argument, OR keyword arguments (not both).")
if len(args) == 0 and len(kwargs) == 0:
raise TypeError("select requires EITHER a positional argument, OR keyword arguments.")
if args:
arg = args[0]
if isinstance(arg, dict):
selector = arg
elif isinstance(arg, string_types):
selector = dict(name=arg)
elif isinstance(arg, type) and issubclass(arg, Model):
selector = {"type": arg}
else:
raise TypeError("selector must be a dictionary, string or plot object.")
elif 'selector' in kwargs:
if len(kwargs) == 1:
selector = kwargs['selector']
else:
raise TypeError("when passing 'selector' keyword arg, not other keyword args may be present")
else:
selector = kwargs
return selector
class Plot(LayoutDOM):
''' Model representing a plot, containing glyphs, guides, annotations.
'''
def select(self, *args, **kwargs):
''' Query this object and all of its references for objects that
match the given selector.
There are a few different ways to call the ``select`` method.
The most general is to supply a JSON-like query dictionary as the
single argument or as keyword arguments:
Args:
selector (JSON-like) : some sample text
Keyword Arguments:
kwargs : query dict key/values as keyword arguments
Additionally, for compatibility with ``Model.select``, a selector
dict may be passed as ``selector`` keyword argument, in which case
the value of ``kwargs['selector']`` is used for th query.
For convenience, queries on just names can be made by supplying
the ``name`` string as the single parameter:
Args:
name (str) : the name to query on
Also queries on just type can be made simply by supplying the
``Model`` subclass as the single parameter:
Args:
type (Model) : the type to query on
Returns:
seq[Model]
Examples:
.. code-block:: python
# These three are equivalent
p.select(selector={"type": HoverTool})
p.select({"type": HoverTool})
p.select(HoverTool)
# These two are also equivalent
p.select({"name": "mycircle"})
p.select("mycircle")
# Keyword arguments can be supplied in place of selector dict
p.select({"name": "foo", "type": HoverTool})
p.select(name="foo", type=HoverTool)
'''
selector = _select_helper(args, kwargs)
# Want to pass selector that is a dictionary
return _list_attr_splat(find(self.references(), selector, {'plot': self}))
def row(self, row, gridplot):
''' Return whether this plot is in a given row of a GridPlot.
Args:
row (int) : index of the row to test
gridplot (GridPlot) : the GridPlot to check
Returns:
bool
'''
return self in gridplot.row(row)
def column(self, col, gridplot):
''' Return whether this plot is in a given column of a GridPlot.
Args:
col (int) : index of the column to test
gridplot (GridPlot) : the GridPlot to check
Returns:
bool
'''
return self in gridplot.column(col)
def _axis(self, *sides):
objs = []
for s in sides:
objs.extend(getattr(self, s, []))
axis = [obj for obj in objs if isinstance(obj, Axis)]
return _list_attr_splat(axis)
@property
def xaxis(self):
''' Splattable list of :class:`~bokeh.models.axes.Axis` objects for the x dimension.
'''
return self._axis("above", "below")
@property
def yaxis(self):
''' Splattable list of :class:`~bokeh.models.axes.Axis` objects for the y dimension.
'''
return self._axis("left", "right")
@property
def axis(self):
''' Splattable list of :class:`~bokeh.models.axes.Axis` objects.
'''
return _list_attr_splat(self.xaxis + self.yaxis)
@property
def legend(self):
''' Splattable list of :class:`~bokeh.models.annotations.Legend` objects.
'''
legends = [obj for obj in self.renderers if isinstance(obj, Legend)]
return _legend_attr_splat(legends)
@property
def hover(self):
''' Splattable list of :class:`~bokeh.models.tools.HoverTool` objects.
'''
hovers = [obj for obj in self.tools if isinstance(obj, HoverTool)]
return _list_attr_splat(hovers)
def _grid(self, dimension):
grid = [obj for obj in self.renderers if isinstance(obj, Grid) and obj.dimension==dimension]
return _list_attr_splat(grid)
@property
def xgrid(self):
''' Splattable list of :class:`~bokeh.models.grids.Grid` objects for the x dimension.
'''
return self._grid(0)
@property
def ygrid(self):
''' Splattable list of :class:`~bokeh.models.grids.Grid` objects for the y dimension.
'''
return self._grid(1)
@property
def grid(self):
''' Splattable list of :class:`~bokeh.models.grids.Grid` objects.
'''
return _list_attr_splat(self.xgrid + self.ygrid)
@property
def tools(self):
return self.toolbar.tools
@tools.setter
def tools(self, tools):
self.toolbar.tools = tools
def add_layout(self, obj, place='center'):
''' Adds an object to the plot in a specified place.
Args:
obj (Renderer) : the object to add to the Plot
place (str, optional) : where to add the object (default: 'center')
Valid places are: 'left', 'right', 'above', 'below', 'center'.
Returns:
None
'''
valid_places = ['left', 'right', 'above', 'below', 'center']
if place not in valid_places:
raise ValueError(
"Invalid place '%s' specified. Valid place values are: %s" % (place, nice_join(valid_places))
)
if hasattr(obj, 'plot'):
if obj.plot is not None:
raise ValueError("object to be added already has 'plot' attribute set")
obj.plot = self
self.renderers.append(obj)
if place is not 'center':
getattr(self, place).append(obj)
def add_tools(self, *tools):
''' Adds tools to the plot.
Args:
*tools (Tool) : the tools to add to the Plot
Returns:
None
'''
for tool in tools:
if not isinstance(tool, Tool):
raise ValueError("All arguments to add_tool must be Tool subclasses.")
if hasattr(tool, 'overlay'):
self.renderers.append(tool.overlay)
self.toolbar.tools.append(tool)
def add_glyph(self, source_or_glyph, glyph=None, **kw):
''' Adds a glyph to the plot with associated data sources and ranges.
This function will take care of creating and configuring a Glyph object,
and then add it to the plot's list of renderers.
Args:
source (DataSource) : a data source for the glyphs to all use
glyph (Glyph) : the glyph to add to the Plot
Keyword Arguments:
Any additional keyword arguments are passed on as-is to the
Glyph initializer.
Returns:
GlyphRenderer
'''
if glyph is not None:
source = source_or_glyph
else:
source, glyph = ColumnDataSource(), source_or_glyph
if not isinstance(source, DataSource):
raise ValueError("'source' argument to add_glyph() must be DataSource subclass")
if not isinstance(glyph, Glyph):
raise ValueError("'glyph' argument to add_glyph() must be Glyph subclass")
g = GlyphRenderer(data_source=source, glyph=glyph, **kw)
self.renderers.append(g)
return g
def add_tile(self, tile_source, **kw):
''' Adds new TileRenderer into the Plot.renderers
Args:
tile_source (TileSource) : a tile source instance which contain tileset configuration
Keyword Arguments:
Additional keyword arguments are passed on as-is to the tile renderer
Returns:
TileRenderer : TileRenderer
'''
tile_renderer = TileRenderer(tile_source=tile_source, **kw)
self.renderers.append(tile_renderer)
return tile_renderer
@error(REQUIRED_RANGE)
def _check_required_range(self):
missing = []
if not self.x_range: missing.append('x_range')
if not self.y_range: missing.append('y_range')
if missing:
return ", ".join(missing) + " [%s]" % self
@error(REQUIRED_SCALE)
def _check_required_scale(self):
missing = []
if not self.x_scale: missing.append('x_scale')
if not self.y_scale: missing.append('y_scale')
if missing:
return ", ".join(missing) + " [%s]" % self
@error(INCOMPATIBLE_SCALE_AND_RANGE)
def _check_compatible_scale_and_ranges(self):
incompatible = []
x_ranges = list(self.extra_x_ranges.values())
if self.x_range: x_ranges.append(self.x_range)
y_ranges = list(self.extra_y_ranges.values())
if self.y_range: y_ranges.append(self.y_range)
if self.x_scale is not None:
for rng in x_ranges:
if isinstance(rng, (DataRange1d, Range1d)) and not isinstance(self.x_scale, (LinearScale, LogScale)):
incompatible.append("incompatibility on x-dimension: %s, %s" %(rng, self.x_scale))
elif isinstance(rng, FactorRange) and not isinstance(self.x_scale, CategoricalScale):
incompatible.append("incompatibility on x-dimension: %s/%s" %(rng, self.x_scale))
# special case because CategoricalScale is a subclass of LinearScale, should be removed in future
if isinstance(rng, (DataRange1d, Range1d)) and isinstance(self.x_scale, CategoricalScale):
incompatible.append("incompatibility on x-dimension: | |
has changed, typically the score values?
pass
elif data["event"] == "timeSeriesWidget.newAnnotation":
#self.logger.debug(f"draw anno!")
self.__dispatch_function(self.draw_new_annotation)
def update_scores(self):
if self.server.fetch_score_variables():
if self.showScores:
self.show_scores()
def show_legend(self):
self.plot.legend.visible = True
def hide_legend(self):
self.plot.legend.visible = False
def hide_marker(self):
self.remove_renderers([lin+"_marker" for lin in self.lines])
def show_marker(self):
self.logger.debug("show marker")
for variableName in self.lines:
markerName = variableName + "_marker"
color = self.lines[variableName].glyph.line_color
marker = self.plot.circle(x="x",y="y", line_color=color, fill_color=color,
source=self.columnData[variableName], name=markerName,
size=3) # x:"time", y:variableName #the legend must havee different name than the source bug
pass
def update_column_datas(self,newData):
if self.columnData =={}:
self.logger.info("init the colum data")
#for var in self.server.get_variables_selectable():
# self.columnData[var]=ColumnDataSource({"x":[],"y":[]})
if "__time" in newData:
del newData["time"]
for var in newData:
if not var.endswith("__time"):
if var.endswith("_limitMax"):
#special dictionary
minName = var[:-len("_limitMax")]+"_limitMin"
if minName in newData:
dic = {"x":newData[var+"__time"],
"upper":newData[var],
"lower":newData[minName],
"y":newData[var]} # is needed for the auto adjust y limits
dic = self.insert_band_breaks(dic)
else:
#min is missing, can't process
continue
else:
dic = {"y":newData[var],
"x":newData[var+"__time"]}
if var in self.columnData:
self.columnData[var].data = dic #update
else:
self.columnData[var] = ColumnDataSource(dic)
def insert_band_breaks(self,band):
# we insert a break where any of the values of the bandDict values is numpy.nan
# the band dict has keys x,y,upper,lower, where y is the upper
fill = 0
# check start and end
for entry in ["x", "y", "upper", "lower"]:
band[entry] = numpy.asarray(band[entry], dtype=numpy.float64)
if len(band["lower"])==0 or len(band["upper"])==0:
return band
if ~numpy.isfinite(band["lower"][-1]) or ~numpy.isfinite(band["upper"][-1]):
band["upper"][-1] = fill
band["lower"][-1] = fill
band["y"][-1] = numpy.nan
band["x"][-1] = band["x"][-2]
if ~numpy.isfinite(band["lower"][0]) or ~numpy.isfinite(band["upper"][0]):
band["upper"][0] = fill
band["lower"][0] = fill
band["y"][0] = numpy.nan
band["x"][0] = band["x"][1]
inf1 = numpy.isfinite(band["lower"])
indices1 = numpy.where(~inf1)[0]
inf2 = numpy.isfinite(band["upper"])
indices2 = numpy.where(~inf2)[0]
indices = numpy.append(indices1, indices2)
indices = set(indices) - set([0, len(band["lower"]) - 1])
indices = numpy.asarray(list(indices))
indices = list(numpy.sort(indices))
# now we have a sorted list of indices where eiher lower or upper is nan/inf meaning this is a break
# for the break we do the following: we need the scheme as an example
# (t,v): (1,1),(2,2),(2,0),(3,0),(3,4),(4,5) etc
# we have the data
# t= 1 2 3 4 5 6 7
# v= 1 2 n 4 5 6 7
# so we create
# t= 1 2 2 4 4 5 6 7
# v 1 2 0 0 4 5 6 7
print("indices", indices)
insertX = []
for indx in indices:
# add a start and convert the inf to a end of the break
band['x'][indx] = band['x'][indx + 1]
band['y'][indx] = numpy.nan
band["lower"][indx] = fill
band["upper"][indx] = fill
insertX.append(band['x'][indx - 1])
# now the inserts
band['x'] = numpy.insert(band['x'], indices, insertX)
band['y'] = numpy.insert(band['y'], indices, [numpy.nan] * len(insertX))
band['lower'] = numpy.insert(band['lower'], indices, [fill] * len(insertX))
band['upper'] = numpy.insert(band['upper'], indices, [fill] * len(insertX))
return band
def sync_x_axis(self,times=None):
self.logger.debug(f"sync_x_axis x ")
variables = self.server.get_variables_selected()
start = times["start"]
end = times["end"]
#self.set_x_axis(start,end)
variablesRequest = variables.copy()
variablesRequest.append("__time") # make sure we get the time included
newData = self.server.get_data(variablesRequest, start, end,
self.server.get_settings()["bins"]) # for debug
self.update_column_datas(newData)
self.set_x_axis(start, end)
#self.plot.x_range.start = start
#self.plot.x_range.end = end
self.autoAdjustY = self.server.get_mirror()["autoScaleY"][".properties"]["value"]
self.adjust_y_axis_limits()
def set_y_axis(self,limits):
self.plot.y_range.start = limits[0]
self.plot.y_range.end = limits[1]
def draw_new_annotation(self):
data = self.server.fetch_mirror()
entry = data["nextNewAnnotation"][".properties"]["value"]
if entry["type"] == "time":
self.boxSelectTool.dimensions = "width"
self.set_active_drag_tool(self.boxSelectTool)
self.currentAnnotationTag = entry["tag"]
elif entry["type"] == "threshold":
self.boxSelectTool.dimensions = "height"
self.set_active_drag_tool(self.boxSelectTool)
self.currentAnnotationTag = "threshold"
self.currentAnnotationVariable = entry["variable"]
elif entry["type"] == "motif":
self.boxSelectTool.dimensions = "width"
self.set_active_drag_tool(self.boxSelectTool)
self.currentAnnotationTag = "motif"
self.currentAnnotationVariable = entry["variable"]
def _compare_anno(self,anno1,anno2):
keysInBoth = set(anno1.keys()).intersection(set(anno2.keys()))
for k in keysInBoth:
if k == "browsePath":
continue
elif k in ["startTime", "endTime"]:
diff = abs(anno1[k]-anno2[k])
if diff < 0.1:
continue
else:
self.logger.debug(f'compare failded time diff {diff}')
return False
else:
if anno1[k] != anno2[k]:
print(f"compare failed {k}, {anno1[k]} {anno2[k]}")
return False
return True
def update_annotations_and_thresholds_old(self,arg=None):
self.logger.debug(f"update_annotations {arg}")
# this is called when the backend has changed annotation leaves or values, it adjusts annotations
# and thresholds
#avoid reload if an envelope embedded in a annotation is changed
if "data" in arg and "sourcePath" in arg["data"]:
splitted = arg["data"]["sourcePath"].split('.')
if len(splitted)>2 and splitted[-2]=="envelope":
self.logger.info("skip anno update due to envelope")
return
# modifies give the value
if "value" in arg["data"]:
#check if the annotation is in our known list
annotationBrowsePath = '.'.join(arg["data"]["sourcePath"].split('.')[:-1])
lookup = {v["browsePath"]:k for k,v in self.server.get_annotations().items()}
if annotationBrowsePath in lookup:
#build the _eventInfo to avoid the fetch
id = lookup[annotationBrowsePath]
updatedAnno = copy.deepcopy(self.server.get_annotations()[id])
changeKey = arg["data"]["sourcePath"].split('.')[-1]
updatedAnno[changeKey]=arg["data"]["value"]
if changeKey != "variable" and "variable" in updatedAnno:
updatedAnno["variable"] = updatedAnno["variable"] # events from the outside deliver the variable as list (the forward refs from the referencer, internally, we only keep a string
eventInfo = {"new":{},"delete":{},"modify":{id:updatedAnno}}
arg["data"]["_eventInfo"] = eventInfo
lastAnnotations = self.server.get_annotations()
if "data" in arg and "_eventInfo" in arg["data"]:
newAnnotations = self.server.fetch_annotations_differential(arg["data"]["_eventInfo"])
differential = True
else:
newAnnotations = self.server.fetch_annotations()
differential = False
#check for deletes
# the delete check is fast enough so no need to improve with differential
deleteList = [] # a list of ids
for annoId,anno in lastAnnotations.items():
if annoId not in newAnnotations:
self.logger.debug(f"update_annotations() -- annotations was deleted on server: {annoId}, {lastAnnotations[annoId]['name']}")
deleteList.append(annoId)
if annoId in self.renderers:
with self.renderersLock:
self.renderersGarbage.append(self.renderers[annoId]["renderer"])
del self.renderers[annoId]
else:
self.delete_annotations([annoId])
self.logger.debug(f"update_annotations() -- must delete {deleteList}")
if self.boxModifierVisible:
if self.boxModifierAnnotationName in deleteList:
self.box_modifier_hide()
#now the new ones
createdTimeAnnos = []
if not differential:
annosToIterate = newAnnotations
else:
#take on the the nodes from the incoming
annosToIterate = arg["data"]["_eventInfo"]["new"]
annosToIterate.update(arg["data"]["_eventInfo"]["modify"])
self.logger.debug(f"annosToIterate {annosToIterate}")
for annoId,anno in annosToIterate.items():
if anno["type"] == "time":
if not self.find_renderer(annoId):# not in self.renderers:# and self.showAnnotations:
self.logger.debug(f"new annotations {annoId}")
self.draw_annotation(anno,visible=False) #will be activated later with show_annotations
createdTimeAnnos.append(annoId)
else:
#check if is has changed
#if anno != self.renderers[annoId]["info"]:
if not self._compare_anno(anno,self.renderers[annoId]["info"] ):
self.logger.debug(f"update_annotations() -- annotation has changed {annoId} {self.renderers[annoId]['info']} => {anno}")
if BOX_ANNO:
isVisible = self.renderers[annoId]["renderer"] in self.plot.renderers # remember if the annotation was currently visible
with self.renderersLock:
self.renderersGarbage.append(self.renderers[annoId]["renderer"])
del self.renderers[annoId]# kick out the entry,
# if the currently selected is being changed, we hide the box modifier
if self.boxModifierVisible:
if self.boxModifierAnnotationName == annoId:
self.box_modifier_hide()
# now recreate: if the annotation was visible before (was in the plot.renderers
# then we show it again, if not, we decide later in the show_annotations if it will be shown or not
# depending on selected tags etc. this covers especially the exception case where a user
# draws a new annotation, which is a currently NOT activated tag, then modifies that new annotation:
# it should stay visible!
if BOX_ANNO:
if isVisible:
self.draw_annotation(anno, visible=True) #show right away because it was visible before
else:
self.draw_annotation(anno, visible=False) # show later if allowed depending on tags etc.
createdTimeAnnos.append(annoId) #show later if allowed
else:
self.update_annotation_data(anno,annoId)
if anno["type"] in ["threshold","motif"]:
# for thresholds/motifs we do not support delete/create per backend, only modify
# so check for modifications here
# it might not be part of the renderers: maybe thresholds are currently off
if annoId in self.renderers and not self._compare_anno(anno,self.renderers[annoId]["info"]):
self.logger.debug(f"update_annotations() -- thresholds/motif has changed {annoId} {self.renderers[annoId]['info']} => {anno}")
with self.renderersLock:
self.renderersGarbage.append(self.renderers[annoId]["renderer"])
del self.renderers[annoId] # kick out the entry, the remaining invisible renderer will stay in bokeh as garbage
#if the currently selected is being changed, we hide the box modifier
if self.boxModifierVisible:
if self.boxModifierAnnotationName == annoId:
self.box_modifier_hide()
# now recreate
if anno["type"] =="threshold":
self.draw_threshold(anno)
else:
self.draw_motif(anno)
#now execute the changes
if 0:
for entry in deleteList:
# we only switch it invisible for now, we don't delete the
# renderer, as this takes too long
r = self.find_renderer(entry)
if r:
r.visible = False
if self.showAnnotations and createdTimeAnnos != []:
self.show_annotations(createdTimeAnnos,fetch=False) # this will put them to the plot renderes
#self.show_annotations()
self.remove_renderers() # execute at least the deletes
def update_annotations_and_thresholds_old_part(self,arg,lastAnnotations,newAnnotations,differential):
self.logger.debug(f"update_annotations_and_thresholds_old_part")
#check for deletes
# the delete check is fast enough so no need to improve with differential
deleteList = [] # a list of ids
for annoId,anno in lastAnnotations.items():
if anno["type"]=="time":
continue
if annoId not in newAnnotations:
self.logger.debug(f"update_annotations() -- annotations was deleted on server: {annoId}, {lastAnnotations[annoId]['name']}")
deleteList.append(annoId)
if annoId in self.renderers:
with self.renderersLock:
self.renderersGarbage.append(self.renderers[annoId]["renderer"])
del self.renderers[annoId]
else:
self.delete_annotations([annoId])
self.logger.debug(f"update_annotations() -- must delete {deleteList}")
if self.boxModifierVisible:
if self.boxModifierAnnotationName in | |
same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "CrossReplicaSum", name,
tld.op_callbacks, input, group_assignment)
return _result
except _core._FallbackException:
try:
return cross_replica_sum_eager_fallback(
input, group_assignment, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"CrossReplicaSum", input=input, group_assignment=group_assignment,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"CrossReplicaSum", _inputs_flat, _attrs, _result)
_result, = _result
return _result
CrossReplicaSum = tf_export("raw_ops.CrossReplicaSum")(_ops.to_raw_op(cross_replica_sum))
def cross_replica_sum_eager_fallback(input, group_assignment, name, ctx):
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32)
_inputs_flat = [input, group_assignment]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"CrossReplicaSum", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"CrossReplicaSum", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def enqueue_tpu_embedding_integer_batch(batch, mode_override, device_ordinal=-1, name=None):
r"""An op that enqueues a list of input batch tensors to TPUEmbedding.
Args:
batch: A list of at least 1 `Tensor` objects with type `int32`.
A list of 1D tensors, one for each embedding table, containing the
indices into the tables.
mode_override: A `Tensor` of type `string`.
A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
device_ordinal: An optional `int`. Defaults to `-1`.
The TPU device to use. Should be >= 0 and less than the number
of TPU cores in the task on which the node is placed.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name,
"EnqueueTPUEmbeddingIntegerBatch", name, tld.op_callbacks, batch,
mode_override, "device_ordinal", device_ordinal)
return _result
except _core._FallbackException:
try:
return enqueue_tpu_embedding_integer_batch_eager_fallback(
batch, mode_override, device_ordinal=device_ordinal, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if not isinstance(batch, (list, tuple)):
raise TypeError(
"Expected list for 'batch' argument to "
"'enqueue_tpu_embedding_integer_batch' Op, not %r." % batch)
_attr_N = len(batch)
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"EnqueueTPUEmbeddingIntegerBatch", batch=batch,
mode_override=mode_override,
device_ordinal=device_ordinal,
name=name)
return _op
EnqueueTPUEmbeddingIntegerBatch = tf_export("raw_ops.EnqueueTPUEmbeddingIntegerBatch")(_ops.to_raw_op(enqueue_tpu_embedding_integer_batch))
def enqueue_tpu_embedding_integer_batch_eager_fallback(batch, mode_override, device_ordinal, name, ctx):
if not isinstance(batch, (list, tuple)):
raise TypeError(
"Expected list for 'batch' argument to "
"'enqueue_tpu_embedding_integer_batch' Op, not %r." % batch)
_attr_N = len(batch)
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
batch = _ops.convert_n_to_tensor(batch, _dtypes.int32)
mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string)
_inputs_flat = list(batch) + [mode_override]
_attrs = ("N", _attr_N, "device_ordinal", device_ordinal)
_result = _execute.execute(b"EnqueueTPUEmbeddingIntegerBatch", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
_result = None
return _result
def enqueue_tpu_embedding_sparse_batch(sample_indices, embedding_indices, aggregation_weights, mode_override, device_ordinal=-1, combiners=[], name=None):
r"""An op that enqueues TPUEmbedding input indices from a SparseTensor.
This Op eases the porting of code that uses embedding_lookup_sparse(),
although some Python preprocessing of the SparseTensor arguments to
embedding_lookup_sparse() is required to produce the arguments to this Op,
since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training
step.
The tensors at corresponding positions in the three input lists
must have the same shape, i.e. rank 1 with dim_size() equal to the total
number of lookups into the table described by the corresponding table_id.
Args:
sample_indices: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`.
A list of rank 1 Tensors specifying the training example and
feature to which the corresponding embedding_indices and aggregation_weights
values belong. sample_indices[i] must equal b * nf + f, where nf is the
number of features from the corresponding table, f is in [0, nf), and
b is in [0, batch size).
embedding_indices: A list with the same length as `sample_indices` of `Tensor` objects with the same type in: `int32`, `int64`.
A list of rank 1 Tensors, indices into the embedding tables.
aggregation_weights: A list with the same length as `sample_indices` of `Tensor` objects with the same type in: `float32`, `float64`.
A list of rank 1 Tensors containing per sample -- i.e. per
(training example, feature) -- aggregation weights.
mode_override: A `Tensor` of type `string`.
A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
device_ordinal: An optional `int`. Defaults to `-1`.
The TPU device to use. Should be >= 0 and less than the number
of TPU cores in the task on which the node is placed.
combiners: An optional list of `strings`. Defaults to `[]`.
A list of string scalars, one for each embedding table that specify
how to normalize the embedding activations after weighted summation.
Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
the sum of the weights be 0 for 'mean' or the sum of the squared weights be
0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
all tables.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name,
"EnqueueTPUEmbeddingSparseBatch", name, tld.op_callbacks,
sample_indices, embedding_indices, aggregation_weights, mode_override,
"device_ordinal", device_ordinal, "combiners", combiners)
return _result
except _core._FallbackException:
try:
return enqueue_tpu_embedding_sparse_batch_eager_fallback(
sample_indices, embedding_indices, aggregation_weights,
mode_override, device_ordinal=device_ordinal, combiners=combiners,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if not isinstance(sample_indices, (list, tuple)):
raise TypeError(
"Expected list for 'sample_indices' argument to "
"'enqueue_tpu_embedding_sparse_batch' Op, not %r." % sample_indices)
_attr_N = len(sample_indices)
if not isinstance(embedding_indices, (list, tuple)):
raise TypeError(
"Expected list for 'embedding_indices' argument to "
"'enqueue_tpu_embedding_sparse_batch' Op, not %r." % embedding_indices)
if len(embedding_indices) != _attr_N:
raise ValueError(
"List argument 'embedding_indices' to 'enqueue_tpu_embedding_sparse_batch' Op with length %d "
"must match length %d of argument 'sample_indices'." %
(len(embedding_indices), _attr_N))
if not isinstance(aggregation_weights, (list, tuple)):
raise TypeError(
"Expected list for 'aggregation_weights' argument to "
"'enqueue_tpu_embedding_sparse_batch' Op, not %r." % aggregation_weights)
if len(aggregation_weights) != _attr_N:
raise ValueError(
"List argument 'aggregation_weights' to 'enqueue_tpu_embedding_sparse_batch' Op with length %d "
"must match length %d of argument 'sample_indices'." %
(len(aggregation_weights), _attr_N))
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
if combiners is None:
combiners = []
if not isinstance(combiners, (list, tuple)):
raise TypeError(
"Expected list for 'combiners' argument to "
"'enqueue_tpu_embedding_sparse_batch' Op, not %r." % combiners)
combiners = [_execute.make_str(_s, "combiners") for _s in combiners]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"EnqueueTPUEmbeddingSparseBatch", sample_indices=sample_indices,
embedding_indices=embedding_indices,
aggregation_weights=aggregation_weights,
mode_override=mode_override,
device_ordinal=device_ordinal,
combiners=combiners, name=name)
return _op
EnqueueTPUEmbeddingSparseBatch = tf_export("raw_ops.EnqueueTPUEmbeddingSparseBatch")(_ops.to_raw_op(enqueue_tpu_embedding_sparse_batch))
def enqueue_tpu_embedding_sparse_batch_eager_fallback(sample_indices, embedding_indices, aggregation_weights, mode_override, device_ordinal, combiners, name, ctx):
if not isinstance(sample_indices, (list, tuple)):
raise TypeError(
"Expected list for 'sample_indices' argument to "
"'enqueue_tpu_embedding_sparse_batch' Op, not %r." % sample_indices)
_attr_N = len(sample_indices)
if not isinstance(embedding_indices, (list, tuple)):
raise TypeError(
"Expected list for 'embedding_indices' argument to "
"'enqueue_tpu_embedding_sparse_batch' Op, not %r." % embedding_indices)
if len(embedding_indices) != _attr_N:
raise ValueError(
"List argument 'embedding_indices' to 'enqueue_tpu_embedding_sparse_batch' Op with length %d "
"must match length %d of argument 'sample_indices'." %
(len(embedding_indices), _attr_N))
if not isinstance(aggregation_weights, (list, tuple)):
raise TypeError(
"Expected list for 'aggregation_weights' argument to "
"'enqueue_tpu_embedding_sparse_batch' Op, not %r." % aggregation_weights)
if len(aggregation_weights) != _attr_N:
raise ValueError(
"List argument 'aggregation_weights' to 'enqueue_tpu_embedding_sparse_batch' Op with length %d "
"must match length %d of argument 'sample_indices'." %
(len(aggregation_weights), _attr_N))
if device_ordinal is None:
device_ordinal = -1
device_ordinal = _execute.make_int(device_ordinal, "device_ordinal")
if combiners is None:
combiners = []
if not isinstance(combiners, (list, tuple)):
raise TypeError(
"Expected list for 'combiners' argument to "
"'enqueue_tpu_embedding_sparse_batch' Op, not %r." % combiners)
combiners = [_execute.make_str(_s, "combiners") for _s in combiners]
_attr_T1, sample_indices = _execute.args_to_matching_eager(list(sample_indices), ctx, _dtypes.int32)
_attr_T2, embedding_indices = _execute.args_to_matching_eager(list(embedding_indices), ctx, _dtypes.int32)
_attr_T3, aggregation_weights = _execute.args_to_matching_eager(list(aggregation_weights), ctx, _dtypes.float32)
mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string)
_inputs_flat = list(sample_indices) + list(embedding_indices) + list(aggregation_weights) + [mode_override]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "T3", | |
<gh_stars>0
from __future__ import absolute_import, unicode_literals
import json
import os
import redis
from statics.scripts import encryption,read_excel
from saltops_v2.settings import SECRET_KEY,BASE_DIR,DATABASES
from django.shortcuts import render,HttpResponse,redirect
from django.views import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from app_asset import models as asset_db
from app_auth import models as auth_db
from app_log import models as log_db
from app_auth.views import login_check,perms_check
from saltops_v2.settings import SERVER_TAG,ANSIBLE_USER,WEBSSH_URL,REDIS_INFO
from django.db.models import Q
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
from openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font,Color,colors,GradientFill,NamedStyle
from app_asset.tasks import sync_host
from django.core.paginator import Paginator,PageNotAnInteger,EmptyPage
from statics.scripts import page as pg
from django.views.decorators.cache import cache_page
# Create your views here.
class IDC(View):
"""机房管理"""
@method_decorator(csrf_exempt)
@method_decorator(login_check)
@method_decorator(perms_check)
def dispatch(self, request, *args, **kwargs):
return super(IDC,self).dispatch(request, *args, **kwargs)
def get(self,request):
title = "IDC 机房"
idc_obj = asset_db.IDC.objects.all()
return render(request, 'asset/asset_idc.html', locals())
def post(self,request):
idc_name = request.POST.get("idc_name")
idc_msg = request.POST.get("idc_msg")
idc_admin = request.POST.get("idc_admin")
idc_admin_phone = request.POST.get("idc_admin_phone")
idc_admin_email = request.POST.get("idc_admin_email")
idc_obj = asset_db.IDC(idc_name=idc_name,idc_msg=idc_msg,idc_admin=idc_admin,idc_admin_phone=idc_admin_phone,
idc_admin_email=idc_admin_email)
idc_obj.save()
data = '机房已添加,请刷新查看!'
return HttpResponse(data)
def put(self,request):
req_info = eval(request.body.decode())
idc_id = req_info.get("idc_id")
idc_name = req_info.get("idc_name")
idc_msg = req_info.get("idc_msg")
idc_admin = req_info.get("idc_admin")
idc_admin_phone = req_info.get("idc_admin_phone")
idc_admin_email = req_info.get("idc_admin_email")
action = req_info.get("action",None)
if action:
"""修改IDC信息"""
idc_obj = asset_db.IDC.objects.get(id=idc_id)
idc_obj.idc_name = idc_name
idc_obj.idc_msg = idc_msg
idc_obj.idc_admin = idc_admin
idc_obj.idc_admin_phone = idc_admin_phone
idc_obj.idc_admin_email = idc_admin_email
idc_obj.save()
data = "IDC已修改,请刷新查看!"
return HttpResponse(data)
else:
"""获取修改信息"""
idc_info = asset_db.IDC.objects.get(id=idc_id)
info_json = {'idc_id': idc_info.id, 'idc_name': idc_info.idc_name, 'idc_msg': idc_info.idc_msg,
'idc_admin': idc_info.idc_admin,'idc_admin_phone': idc_info.idc_admin_phone,'idc_admin_email': idc_info.idc_admin_email}
data = json.dumps(info_json)
return HttpResponse(data)
def delete(self,request):
"""删除权限"""
req_info = eval(request.body.decode())
idc_id = req_info.get("idc_id")
asset_db.IDC.objects.get(id=idc_id).delete()
data = "IDC 已删除,请刷新查看!"
return HttpResponse(data)
class HostGroup(View):
"""分组管理"""
@method_decorator(csrf_exempt)
@method_decorator(login_check)
@method_decorator(perms_check)
def dispatch(self, request, *args, **kwargs):
return super(HostGroup,self).dispatch(request, *args, **kwargs)
def get(self,request):
title = "主机分组"
group_obj = asset_db.HostGroup.objects.all()
return render(request, 'asset/asset_group.html', locals())
def post(self,request):
group_name = request.POST.get("group_name")
group_msg = request.POST.get("group_msg")
group_obj = asset_db.HostGroup(host_group_name=group_name,host_group_msg=group_msg)
group_obj.save()
data = '分组已添加,请刷新查看!'
return HttpResponse(data)
def put(self,request):
'''修改分组'''
req_info = eval(request.body.decode())
group_id = req_info.get("group_id")
group_name = req_info.get("group_name")
group_msg = req_info.get("group_msg")
action = req_info.get("action",None)
if action:
"""修改group信息"""
group_obj = asset_db.HostGroup.objects.get(id=group_id)
group_obj.host_group_name = group_name
group_obj.host_group_msg = group_msg
group_obj.save()
data = "分组已修改,请刷新查看!"
return HttpResponse(data)
else:
"""获取修改信息"""
group_info = asset_db.HostGroup.objects.get(id=group_id)
info_json = {'group_id': group_info.id, 'group_name': group_info.host_group_name, 'group_msg': group_info.host_group_msg}
data = json.dumps(info_json)
return HttpResponse(data)
def delete(self,request):
"""删除分组"""
req_info = eval(request.body.decode())
group_id = req_info.get("group_id")
asset_db.HostGroup.objects.get(id=group_id).delete()
data = "分组已删除,请刷新查看!"
return HttpResponse(data)
class Supplier(View):
"""供应商管理"""
@method_decorator(csrf_exempt)
@method_decorator(login_check)
@method_decorator(perms_check)
def dispatch(self, request, *args, **kwargs):
return super(Supplier,self).dispatch(request, *args, **kwargs)
def get(self,request):
title = "设备厂商"
supplier_obj = asset_db.Supplier.objects.all()
return render(request, 'asset/asset_supplier.html', locals())
def post(self,request):
supplier = request.POST.get("supplier")
supplier_head = request.POST.get("supplier_head")
supplier_head_phone = request.POST.get("supplier_head_phone")
supplier_head_email = request.POST.get("supplier_head_email")
supplier_obj = asset_db.Supplier(supplier=supplier,supplier_head=supplier_head,supplier_head_phone=supplier_head_phone,
supplier_head_email=supplier_head_email)
supplier_obj.save()
data = '供应商已添加,请刷新查看!'
return HttpResponse(data)
def put(self,request):
req_info = eval(request.body.decode())
supplier_id = req_info.get("supplier_id")
supplier = req_info.get("supplier")
supplier_head = req_info.get("supplier_head")
supplier_head_phone = req_info.get("supplier_head_phone")
supplier_head_email = req_info.get("supplier_head_email")
action = req_info.get("action",None)
if action:
"""修改supplier信息"""
supplier_obj = asset_db.Supplier.objects.get(id=supplier_id)
supplier_obj.supplier = supplier
supplier_obj.supplier_head = supplier_head
supplier_obj.supplier_head_phone = supplier_head_phone
supplier_obj.supplier_head_email = supplier_head_email
supplier_obj.save()
data = "供应商已修改,请刷新查看!"
return HttpResponse(data)
else:
"""获取修改信息"""
supplier_info = asset_db.Supplier.objects.get(id=supplier_id)
info_json = {'supplier_id': supplier_info.id, 'supplier': supplier_info.supplier,'supplier_head': supplier_info.supplier_head,
'supplier_head_phone': supplier_info.supplier_head_phone,'supplier_head_email': supplier_info.supplier_head_email}
data = json.dumps(info_json)
return HttpResponse(data)
def delete(self,request):
"""删除权限"""
req_info = eval(request.body.decode())
supplier_id = req_info.get("supplier_id")
asset_db.Supplier.objects.get(id=supplier_id).delete()
data = "supplier 已删除,请刷新查看!"
return HttpResponse(data)
class Host(View):
"""服务器管理"""
@method_decorator(csrf_exempt)
@method_decorator(login_check)
@method_decorator(perms_check)
def dispatch(self, request, *args, **kwargs):
return super(Host,self).dispatch(request, *args, **kwargs)
def get(self,request,page=1):
title = "服务器"
supplier_obj = asset_db.Supplier.objects.all()
group_obj = asset_db.HostGroup.objects.all()
idc_obj = asset_db.IDC.objects.all()
role_id = request.session['role_id']
role_obj = auth_db.Role.objects.get(id=role_id)
host_obj = role_obj.host.all()
pagesize = 13
paginator = Paginator(host_obj, pagesize)
# 从前端获取当前的页码数,默认为1
# 把当前的页码数转换成整数类型
currentPage = int(page)
page_nums = paginator.num_pages
page_list = pg.control(currentPage, page_nums)
try:
host_list = paginator.page(page) # 获取当前页码的记录
except PageNotAnInteger:
host_list = paginator.page(1) # 如果用户输入的页码不是整数时,显示第1页的内容
except EmptyPage:
host_list = paginator.page(paginator.num_pages)
host_info_list = []
for i in host_list:
try:
host_detail_obj = asset_db.HostDetail.objects.get(host_id=i.id)
host_status = host_detail_obj.host_status
except:
host_status = "None"
if i.group:
host_group_name = i.group.host_group_name
else:
host_group_name = "None"
if i.supplier:
supplier_name = i.supplier.supplier
else:
supplier_name = "None"
if i.idc:
idc_name = i.idc.idc_name
else:
idc_name = "None"
host_info_list.append({"id":i.id,"host_ip":i.host_ip,"host_type":i.host_type,"group_name":host_group_name,"host_msg":i.host_msg,
"supplier":supplier_name,"idc_name":idc_name,"host_status":host_status,"overdue_date":i.overdue_date})
webssh_url = WEBSSH_URL
return render(request, 'asset/asset_host.html', locals())
def post(self,request):
host_ip = request.POST.get("host_ip")
host_remove_port = request.POST.get("host_remove_port")
host_user = request.POST.get("host_user")
host_passwd = request.POST.get("host_passwd")
host_type = request.POST.get("host_type")
host_group = request.POST.get("host_group")
host_idc = request.POST.get("host_idc")
host_supplier = request.POST.get("host_supplier")
host_msg = request.POST.get("host_msg")
serial_num = request.POST.get("serial_num")
purchase_date = request.POST.get("purchase_date")
overdue_date = request.POST.get("overdue_date")
if host_group == "0":
host_group = None
if host_idc == "0":
host_idc = None
if host_supplier == "0":
host_supplier = None
# 加密密码
key = SECRET_KEY[2:18]
pc = encryption.prpcrypt(key) # 初始化密钥
aes_passwd = pc.encrypt(host_passwd)
host_obj = asset_db.Host(host_ip=host_ip,host_remove_port=host_remove_port,host_user=host_user,host_passwd=<PASSWORD>,host_type=host_type,
group_id=host_group,idc_id=host_idc,supplier_id=host_supplier,host_msg=host_msg,serial_num=serial_num,
purchase_date=purchase_date,overdue_date=overdue_date)
host_obj.save()
role_id = request.session['role_id']
role_obj = auth_db.Role.objects.get(id=role_id)
role_obj.host.add(host_obj)
data = '服务器已添加,请刷新查看!'
return HttpResponse(data)
def put(self,request):
req_info = eval(request.body.decode())
host_id = req_info.get("host_id")
host_ip = req_info.get("host_ip")
host_remove_port = req_info.get("host_remove_port")
host_user = req_info.get("host_user")
host_passwd = req_info.get("host_passwd")
host_type = req_info.get("host_type")
host_group = req_info.get("host_group")
host_idc = req_info.get("host_idc")
host_supplier = req_info.get("host_supplier")
host_msg = req_info.get("host_msg")
serial_num = req_info.get("serial_num")
purchase_date = req_info.get("purchase_date")
overdue_date = req_info.get("overdue_date")
action = req_info.get("action",None)
if action:
"""修改服务器信息"""
# 加密密码
key = SECRET_KEY[2:18]
pc = encryption.prpcrypt(key) # 初始化密钥
aes_passwd = pc.encrypt(host_passwd)
if host_group == "0":
host_group = None
if host_idc == "0":
host_idc = None
if host_supplier == "0":
host_supplier = None
host_obj = asset_db.Host.objects.get(id=host_id)
host_obj.host_ip = host_ip
host_obj.host_remove_port = host_remove_port
host_obj.host_user = host_user
host_obj.host_passwd = <PASSWORD>
host_obj.host_type = host_type
host_obj.group_id = host_group
host_obj.idc_id = host_idc
host_obj.supplier_id = host_supplier
host_obj.host_msg = host_msg
host_obj.serial_num = serial_num
host_obj.purchase_date = purchase_date
host_obj.overdue_date = overdue_date
host_obj.save()
data = "服务器已修改,请刷新查看!"
return HttpResponse(data)
else:
"""获取修改信息"""
host_info = asset_db.Host.objects.get(id=host_id)
#密码解密
key = SECRET_KEY[2:18]
pc = encryption.prpcrypt(key)
passwd =host_info.host_passwd.strip("b").strip("'").encode(encoding="utf-8")
de_passwd = pc.decrypt(passwd).decode()
info_json = {'host_id': host_info.id, 'host_ip': host_info.host_ip,'host_remove_port': host_info.host_remove_port,
'host_user': host_info.host_user,'host_passwd': <PASSWORD>,'host_type': host_info.host_type,
'host_group': host_info.group_id,'host_idc': host_info.idc_id,'host_supplier': host_info.supplier_id,
'host_msg': host_info.host_msg,'serial_num': host_info.serial_num,'purchase_date': host_info.purchase_date,'overdue_date': host_info.overdue_date}
data = json.dumps(info_json,ensure_ascii=False)
return HttpResponse(data)
def delete(self,request):
"""删除服务器"""
req_info = eval(request.body.decode())
host_id = req_info.get("host_id")
asset_db.Host.objects.get(id=host_id).delete()
data = "服务器已删除,请刷新查看!"
return HttpResponse(data)
@login_check
@perms_check
@cache_page(60*5)
def host_detail(request,id):
"""查看服务器详细信息"""
title = "服务器"
host_obj = asset_db.Host.objects.get(id=id)
try:
host_detail = asset_db.HostDetail.objects.get(host_id=id)
disk_info = json.loads(host_detail.disk_info)
interface = json.loads(host_detail.interface)
software_obj = asset_db.software.objects.filter(host_id=id)
software_list = []
for i in software_obj:
software_list.append({"server_name": i.server_name, "server_version": i.server_version,
"server_port": i.server_port})
mem_info = json.loads(host_detail.mem_info)
mem_total = mem_info["total"]
mem_used = mem_info["used"]
mem_free = mem_info["free"]
mem_usage = round((float(mem_used)/float(mem_total))*100,2)
mem_free_p = round((100-mem_usage),2)
swap_info = json.loads(host_detail.swap_info)
swap_total = swap_info["total"]
if swap_total==0:
swap_usage = 0
swap_free_p = 0
else:
swap_used = swap_info["used"]
swap_free = swap_info["free"]
swap_usage = round((float(swap_used) / float(swap_total)) * 100, 2)
swap_free_p = 100 - swap_usage
disk_info = json.loads(host_detail.disk_info)
disk_list = []
for i in disk_info:
device = i['device']
mount = i['mount']
fstype = i['fstype']
total = i['size_total']
free = i['size_available']
free_rate = round(float(free) / float(total)*100,2)
usage = round((100-free_rate),2)
disk_list.append({'device':device,"mount":mount,"fstype":fstype,"total":int(total/1024/1024),"usage":usage,"free_rate":free_rate})
return render(request, "asset/asset_host_detail.html", locals())
except:
return HttpResponse("信息未同步")
@csrf_exempt
@login_check
@perms_check
def sync_host_info(request):
"""同步服务器系统信息"""
ids = request.POST.get("ids")
ips = []
if ids == 'all':
host_obj = asset_db.Host.objects.all()
for i in host_obj:
ips.append(i.host_ip)
else:
ids = ids.strip(',').split(',')
for i in ids:
host_obj = asset_db.Host.objects.get(id=i)
ips.append(host_obj.host_ip)
tk = sync_host.delay(json.dumps(ips),ANSIBLE_USER,json.dumps(SERVER_TAG))
data = "信息同步中,任务ID:{}".format(tk.id)
user_name = request.session['user_name']
user_obj = auth_db.User.objects.get(user_name=user_name)
task_obj = log_db.TaskRecord(task_name="同步服务器信息",task_id=tk.id,status=tk.state,task_user_id=user_obj.id)
task_obj.save()
return HttpResponse(data)
@csrf_exempt
@login_check
@perms_check
def search_host(request):
"""过滤服务器信息"""
idc_id = request.POST.get('idc_id',None)
group_id = request.POST.get('hostgroup_id',None)
host_type = request.POST.get('host_type',None)
search_key = request.POST.get('search_key', None)
role_id = request.session['role_id']
if search_key:
host_obj = asset_db.Host.objects.filter((Q(host_ip__icontains=search_key) | Q(host_msg__icontains=search_key) | Q(host_type__icontains=search_key))& Q(role__id=role_id))
if idc_id:
host_obj = asset_db.Host.objects.filter(Q(idc_id=idc_id) & Q(role__id=role_id))
if group_id:
host_obj = asset_db.Host.objects.filter(Q(group_id=group_id) & Q(role__id=role_id))
if host_type:
host_obj = asset_db.Host.objects.filter(Q(host_type=host_type) & Q(role__id=role_id))
host_list = []
for i in host_obj:
try:
host_status = asset_db.HostDetail.objects.get(host_id=i.id).host_status
except:
host_status = "Unknown"
host_list.append({"id": i.id, "host_ip": i.host_ip, "host_type": i.host_type, "group_name": i.group.host_group_name,
"host_msg": i.host_msg,
"supplier": i.supplier.supplier, "idc_name": i.idc.idc_name, "host_status": host_status})
return render(request, "asset/asset_host_search.html", locals())
@csrf_exempt
@login_check
@perms_check
def del_host(request):
"""批量删除服务器"""
ids = request.POST.get("ids")
ids = ids.strip(',').split(',')
for ids in ids:
asset_db.Host.objects.get(id=ids).delete()
return HttpResponse("服务器已删除,请刷新页面")
@csrf_exempt
@login_check
@perms_check
def connect_host(request):
"""连接服务器"""
req_info = eval(request.body.decode())
host_id = req_info.get("host_id")
host_obj = asset_db.Host.objects.get(id=host_id)
redis_obj = redis.Redis(host=REDIS_INFO["host"],port=REDIS_INFO["port"])
remote_user = request.session['remote_user']
remote_passwd = request.session['remote_passwd']
remote_sshkey = request.session['remote_sshkey']
remote_sshkey_pass = request.session['remote_sshkey_pass']
if remote_sshkey:
if remote_sshkey_pass:
pass
else:
remote_sshkey_pass = "None"
else:
remote_sshkey = "None"
remote_sshkey_pass = "None"
webssh_info = {"username": remote_user, "publickey": remote_sshkey, "password": <PASSWORD>_<PASSWORD>,"hostname": host_obj.host_ip,
"port": host_obj.host_remove_port,"key_pass":remote_sshkey_pass}
redis_obj.set("webssh_info",json.dumps(webssh_info),ex=5,nx=True)
return HttpResponse("已连接到服务器")
@csrf_exempt
@login_check
@perms_check
def import_host(request):
"""导入服务器信息"""
upload_file = request.FILES.get("upload_file", None)
filename = os.path.join(BASE_DIR,"statics/media/import_asset.xlsx")
file_obj = open(filename,'wb')
for chrunk in upload_file.chunks():
file_obj.write(chrunk)
file_obj.close()
data = read_excel.import_host(filename)
role_id = request.session['role_id']
role_obj = auth_db.Role.objects.get(id=role_id)
for i in data:
host_ip = i[0]
host_idc = i[1]
host_type = i[2]
host_group = i[3]
host_user = i[4]
host_passwd = <PASSWORD>]
host_msg = i[6]
host_remove_port = i[7]
serial_num = i[8]
purchase_date = i[9]
overdue_date = i[10]
host_supplier = i[11]
supplier_head = i[12]
| |
import time
import re
import pyautogui
import requests
import sys
import random
import pandas as pd
import imagehash
import openpyxl
from selenium import webdriver
from bs4 import BeautifulSoup
from datetime import datetime
from PIL import Image
from openpyxl.utils.dataframe import dataframe_to_rows
class MyLikes:
def __init__(self, url, driver_path, records_path) -> None:
self.url = url # URL for selenium
self.incrementer = 0 # Variable to replace a count within a for loop for `main`
self.card_identifier = dict() # Unique identifier for a profile card
self.picture_count = 0 # This helps to identify the profile card we're on and is also used in the filenames
self.records = list() # Storing the data to be written to an Excel workbook
self.records_path = records_path # Path to save the Excel workbook
self.seen_cards = list() # Which cards have already been seen by the script?
self.now = datetime.utcnow() # Store the start time, in GMT, of the script in a variable
self.url_regEx = re.compile(pattern=r'url\(\"(https://.+\.jpg)\"')
self.card_identifier_regEx = re.compile(pattern=r'https://images-ssl.gotinder.com/(.+)/\d{3}x')
self.distance_regEx = re.compile(pattern=r'(\d+) miles away')
self.options = webdriver.ChromeOptions() # Standard for using Chrome with selenium
self.options.add_experimental_option('debuggerAddress', 'localhost:9222') # Running Chrome on localhost
self.driver = webdriver.Chrome(executable_path=driver_path, options=self.options) # Standard for using Chrome with selenium
self.headers = { # Headers for our requests. These are very important. Without them, we can get timed out or banned.
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36',
'accept-language': 'en-US,en;q=0.9',
'referer': 'https://tinder.com/',
'dnt': '1'
}
def __repr__(self) -> str:
return 'This script is intended to download all of the pictures and videos from the profiles on the "Likes Sent" section of Tinder.'
def load_workbook(self, initials=False) -> None:
# Load the existing Excel workbook that's being used as a database
self.workbook = openpyxl.load_workbook(filename=self.records_path)
self.sheet = self.workbook['Datetime']
if initials:
# Save the current database to a variable
self.existing_df = pd.read_excel(self.records_path, sheet_name='Datetime', header=0)
self.picture_count_from_workbook = len(set(self.existing_df['Card_ID']))
def close_workbook(self) -> None:
# Save and close the Excel workbook
self.workbook.save(filename=self.records_path)
self.workbook.close()
def log_in(self) -> None:
# Open the URL in Chrome
self.driver.get(url=self.url)
time.sleep(4)
# Click the Likes Sent button
self.driver.find_element_by_xpath(xpath='//a[@href="/app/my-likes"]').click() # Selecting by the href of an anchor (i.e., 'a') tag
time.sleep(3)
def main(self) -> None:
# while 1:
for _ in range(7):
# Sleep
time.sleep(3)
# Get the current page's HTML
final_html = self.driver.page_source
# Create a soup object
self.soup = BeautifulSoup(final_html, 'html.parser')
# Find all profile cards within the current HTML
cards = self.soup.find_all('div', {'aria-label': re.compile(pattern=r'.*'), 'class': 'Bdrs(8px) Bgz(cv) Bgp(c) StretchedBox'})
# Find the div's id for the div that holds the profile cards. This is important because Tinder frequently changes this id, the class name, etc.
div_id = self.soup.find('div', {'class': 'Sb(s) D(f) Jc(c) Fxd(c) Animtf(l) Animfm(f) Animdur(.75s) NetHeight(100%,--side-nav-bar-height)--ml H(100%) Ovy(s) Ovsb(n) Ovs(touch)'})['id']
# Iterate over the profile cards
for card in cards:
card_identifier = re.search(pattern=self.card_identifier_regEx, string=str(card)).group(1)
if sum([1 for item in set(self.existing_df['Card_ID']) if item == card_identifier]) == 1 and card_identifier not in self.seen_cards:
# Add to seen cards list
self.seen_cards.append(card_identifier)
# Add the card ID to the dictionary
self.card_identifier.setdefault(card_identifier, 0)
# Increment the picture count
self.picture_count += 1
continue # Since the profile card ID is in the "Card_ID" column of the current database, skip this card and go to the next card
elif self.card_identifier.get(card_identifier) is not None:
continue # Since the profile card ID is in the dictionary, skip this card and go to the next card
else: # Since we haven't gathered the profile data (i.e., the profile card data aren't in the current database and aren't in the current "records" list), gather now
# Click in the background
pyautogui.moveTo(x=1850, y=350, duration=0.1)
pyautogui.click()
# Add to seen cards list
self.seen_cards.append(card_identifier)
# Add the card ID to the dictionary
self.card_identifier.setdefault(card_identifier, 0)
# Increment the picture count
self.picture_count += 1
# Increment the picture count that originates from the Excel workbook only when we gather new data
self.picture_count_from_workbook += 1
# Click the relevant profile card
if self.driver.find_element_by_xpath(xpath=f'//*[@id="{div_id}"]/div[2]/div[{self.picture_count}]/div/div/span/div') is not None: # Tinder may change the div the xpath relates to. I can probably write a regular expression to account for this, but I manually updated this one.
try:
self.driver.find_element_by_xpath(xpath=f'//*[@id="{div_id}"]/div[2]/div[{self.picture_count}]/div/div/span/div').click()
except Exception as e:
self.driver.find_element_by_xpath(xpath=f'//*[@id="{div_id}"]/div[2]/div[{self.picture_count}]/div/div/span/div[2]/video').click()
elif self.driver.find_element_by_xpath(xpath=f'//*[@id="{div_id}"]/div[2]/div[{self.picture_count}]/div/div') is not None:
self.driver.find_element_by_xpath(xpath=f'//*[@id="{div_id}"]/div[2]/div[{self.picture_count}]/div/div').click()
else:
# Finish the script by writing the data to a dataframe then appending data to an Excel workbook's worksheet. Finally, call `sys.exit()`
sys.exit('The script is complete. There are no more profile cards to go through.')
# Sleep
time.sleep(1)
# Get HTML of the profile card
profile_html = self.driver.page_source
second_soup = BeautifulSoup(profile_html, 'html.parser')
# Try to get the name from the profile card
if second_soup.find('h1', {'class': 'Fz($xl) Fw($bold) Fxs(1) Fxw(w) Pend(8px) M(0) D(i)'}) is not None:
name = second_soup.find('h1', {'class': 'Fz($xl) Fw($bold) Fxs(1) Fxw(w) Pend(8px) M(0) D(i)'}).text.title()
else:
name = 'Name Not Found'
# This may be empty, but the span tag should always be there
age = second_soup.find('span', {'class': 'Whs(nw) Fz($l)'}).text
# Try to get the distance from me from the profile card
if second_soup.find('div', {'class': 'Fz($ms)'}) is not None:
row_text = str(second_soup.find('div', {'class': 'Fz($ms)'}))
if re.search(pattern=self.distance_regEx, string=row_text):
distance = re.search(pattern=self.distance_regEx, string=row_text).group(1)
else:
distance = 'Distance Not Found'
else:
distance = 'Distance Not Found'
# Try to get the bio from the profile card
if second_soup.find('div', {'class': 'P(16px) Us(t) C($c-secondary) BreakWord Whs(pl) Fz($ms)'}) is not None:
bio = second_soup.find('div', {'class': 'P(16px) Us(t) C($c-secondary) BreakWord Whs(pl) Fz($ms)'}).text
else:
bio = 'Bio Not Found'
# Try to get the passions from the profile card
if second_soup.find_all('div', {'class': 'Bdrs(100px) Bd D(ib) Va(m) Fz($xs) Mend(8px) Mb(8px) Px(8px) Py(4px) Bdc($c-secondary) C($c-secondary)'}) is not None:
passions = second_soup.find_all('div', {'class': 'Bdrs(100px) Bd D(ib) Va(m) Fz($xs) Mend(8px) Mb(8px) Px(8px) Py(4px) Bdc($c-secondary) C($c-secondary)'})
passions_text = ''
for passion in passions:
passions_text += passion.text + ','
passions_text = passions_text.strip(',')
else:
passions_text = 'Passions Not Found'
if passions_text == '':
passions_text = 'Passions Not Found'
# Try to get the "My Anthem" from the profile card
if second_soup.find('div', {'class': 'Mb(4px) Ell Fz($ms)'}) is not None:
song_title = second_soup.find('div', {'class': 'Mb(4px) Ell Fz($ms)'}).text
song_artist = second_soup.find('span', {'class': 'Mstart(4px) Ell'}).text
else:
song_title = 'No Song Title Found'
song_artist = 'No Song Artist Found'
# Get the total number of pages in the profile card
try:
number_of_pages = int(second_soup.find('button', {'class': 'bullet D(ib) Va(m) Cnt($blank)::a D(b)::a Cur(p) bullet--active H(4px)::a W(100%)::a Py(4px) Px(2px) W(100%) Bdrs(100px)::a Bgc(#fff)::a focus-background-style'}).text.split('/')[1])
except Exception:
number_of_pages = 1 # If there's only one page, there won't be a button.
# Iterate over the number of pages
for i in range(0, number_of_pages, 1):
time.sleep(1)
page_html = self.driver.page_source
page_soup = BeautifulSoup(page_html, 'html.parser')
current_card = page_soup.find('span', {'class': 'keen-slider__slide Wc($transform) Fxg(1)', 'aria-hidden': 'false', 'style': re.compile(pattern=r'.+')})
vid = current_card.find('video', {'class': 'W(100%)'})
# Find appropriate URL
try:
if vid:
vid = vid['src']
download_url = vid
else:
download_url = re.search(pattern=self.url_regEx, string=str(current_card)).group(1)
except Exception:
print(f'Could not find a download URL, {self.picture_count_from_workbook}, page {number_of_pages}')
continue # Couldn't find a download URL
# Send GET request
r = requests.get(url=download_url, headers=self.headers)
# Content Type (i.e., image or video) and Last-Modified
content_type, res_last_mod = r.headers['Content-Type'], r.headers['Last-Modified']
res_last_mod = self.to_datetime_obj(date_str=res_last_mod)
time_diff = ':'.join(str(self.now - res_last_mod).split(':')[:2])
# Write picture/video to disk
with open(file=f'./tinder_pics/{self.picture_count_from_workbook}_{name}_{i+1}.{download_url[-3:]}', mode='wb') as file:
file.write(r.content)
# If the content is an image, use the phash method to create a hash
if download_url[-3:] == 'jpg':
hash = imagehash.phash(image=Image.open(fp=f'./tinder_pics/{self.picture_count_from_workbook}_{name}_{i+1}.{download_url[-3:]}'))
# Append data to list
self.records.append(
(name,
age,
distance,
bio,
passions_text,
song_title,
song_artist,
card_identifier,
content_type,
res_last_mod,
self.now,
time_diff,
str(hash))
) # Convert hash from imagehash.ImageHash to string
# Resetting hash. This can be handled in a better way.
hash = ''
# Check if we need to click to go to the next page
if i != (number_of_pages - 1):
pyautogui.moveTo(x=1250, y=400, duration=0.1)
pyautogui.click()
time.sleep(1)
else:
continue
# Click off the profile card
pyautogui.moveTo(x=1850, y=350, duration=0.1)
pyautogui.click()
time.sleep(1)
# Move down the webpage
if self.incrementer == 0:
pyautogui.moveTo(x=1850, y=350, duration=0.5)
time.sleep(1)
print(f'Run number: {self.incrementer} | {pyautogui.position()}')
pyautogui.scroll(clicks=-2000)
time.sleep(2.5)
pyautogui.scroll(clicks=-280)
time.sleep(1)
| |
<reponame>dumpmemory/google-research
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains ISL training and evaluation on synthetic data."""
import argparse
import json
import os
import data_gen_syn
import isl_module as isl
import metrices
import mlp as MLP
import nonlinear_castle
import nonlinear_gpu as nonlinear
import numpy as np
from scipy.special import expit as sigmoid
from sklearn import metrics
import torch
from torch.utils.data import DataLoader
import utils
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
metrics = dict()
# pylint: disable=invalid-name
# pylint: disable=f-string-without-interpolation
# pylint: disable=unused-variable
# pylint: disable=redefined-outer-name
# pylint: disable=unexpected-keyword-arg
# pylint: disable=eval-used
# pylint: disable=no-value-for-parameter
def mean_squared_loss(n, target, output):
"""Generates synthetic data (in different envs) depending on data_source."""
loss = 0.5 / n * np.sum((output - target)**2)
return loss
def gen_synthetic_env(args,
data_source,
dim,
num_env,
random,
ISL_param=None,
castle_param=None,
noise_type='None'):
"""Generates synthetic data (in different envs) depending on data_source."""
if data_source == 'binary':
vertex_label = ['Y', 'X1', 'X2', 'S1']
return np.array(
data_gen_syn.bi_classify_env_jointSampled(
dim, num_env, 1.0, random,
ISL_param['probs'])), vertex_label, utils.customized_graph(
data_source)
elif data_source == 'ISL':
num_xy = ISL_param['num_xy']
num_s = ISL_param['num_s']
vertex_label = list()
vertex_label.append('Y')
vertex_label += [f'X{i+1}' for i in range(num_xy - 1)]
vertex_label += [f'S{i+1}' for i in range(num_s)]
if ISL_param['train']:
return data_gen_syn.gen_ISL_env(
num_xy, num_s, dim, 3,
probs=ISL_param['probs']), vertex_label, utils.customized_graph(
data_source, vertex_label=vertex_label)
else:
return data_gen_syn.gen_ISL_env(
num_xy, num_s, dim, 3,
probs=ISL_param['test_probs']), vertex_label, utils.customized_graph(
data_source, vertex_label=vertex_label)
elif data_source == 'ISL_counter':
num_xy = ISL_param['num_xy']
num_s = ISL_param['num_s']
vertex_label = list()
vertex_label.append('Y')
vertex_label += [f'X{i+1}' for i in range(num_xy - 1)]
vertex_label += [f'S{i+1}' for i in range(num_s)]
return data_gen_syn.gen_ISL_simple(
num_env, dim, ISL_param), vertex_label, utils.customized_graph(
data_source, vertex_label=vertex_label)
elif data_source == 'xyz_complex':
vertex_label = ['z', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'y1', 'y2', 'y3']
return np.array(data_gen_syn.gen_xyz_complex_env(
dim)), vertex_label, utils.customized_graph(data_source)
elif data_source == 'castle_complex':
vertex_label = ['y', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9']
return data_gen_syn.gen_castle_env(
dim, castle_param,
num_env), vertex_label, utils.customized_graph(data_source)
elif data_source == 'binary_ISL':
num_xy = ISL_param['num_xy']
num_s = ISL_param['num_s']
vertex_label = list()
vertex_label.append('Y')
vertex_label += [f'X{i+1}' for i in range(num_xy - 1)]
vertex_label += [f'S{i+1}' for i in range(num_s)]
if ISL_param['train']:
return data_gen_syn.binary_ISL(
args,
dim,
num_xy,
num_s,
3,
probs=ISL_param['probs'],
sem_type=noise_type), vertex_label, utils.customized_graph(
data_source, vertex_label=vertex_label)
else:
return data_gen_syn.binary_ISL(
args,
dim,
num_xy,
num_s,
3,
probs=ISL_param['test_probs'],
sem_type=noise_type), vertex_label, utils.customized_graph(
data_source, vertex_label=vertex_label)
elif data_source == 'random':
return
else:
ValueError('invalid data source')
def train(X, b_true, vertex_label, model_type, args):
"""Trains the model."""
n_envs, n, d = X.shape
os.makedirs(args.Output_path, exist_ok=True)
if model_type == 'notear-mlp':
X = np.vstack(X)
model = nonlinear.NotearsMLP(dims=[d, args.hidden, 1], bias=True)
w_est_origin = nonlinear.notears_nonlinear(
model, X, lambda1=args.lambda1, lambda2=args.lambda2, w_threshold=0)
wthresh_w_shd_dict = utils.rank_W(
w_est_origin,
2,
10,
b_true,
w_threshold_low=0,
w_threshold_high=5,
step_size=0.02)
w_est = None
w_threshold = None
for threshold, w_element in wthresh_w_shd_dict.items():
if utils.is_dag(w_element[0]):
w_est = w_element[0]
w_threshold = threshold
break
exp = f'notear_mlp_syn_{args.synthetic_source}_W-thresh{round(w_threshold, 3)}'
# save the learned W matrix
np.savetxt(
args.Output_path + f'{exp}_West.csv', w_est, fmt='%.3f', delimiter=',')
np.savetxt(
args.Output_path + f'{exp}_WOrigin.csv',
w_est_origin,
fmt='%.3f',
delimiter=',')
# save the learned DAG
utils.save_dag(
w_est, args.Output_path + f'{exp}_DAG', vertex_label=vertex_label)
# count the accuracy of Y
b_true_Y = np.zeros_like(b_true)
b_true_Y[:, 0] = b_true[:, 0]
w_est_Y = np.zeros_like(w_est)
w_est_Y[:, 0] = w_est[:, 0]
acc = metrices.count_accuracy(b_true_Y, w_est_Y != 0)
metrics[f'{exp}_train_acc'] = acc
y = model(torch.from_numpy(X))
y = y.cpu().detach().numpy()
mse = mean_squared_loss(y.shape[0], y[:, 0], X[:, 0])
print('mse:', mse)
metrics[f'{model_type}_train_MSE'] = mse
elif model_type == 'notear-castle':
X = np.vstack(X)
model = nonlinear_castle.NotearsMLP(dims=[d, args.hidden, 1], bias=True)
# To use a different feature as label, change y_index to column
# index of the feature.
w_est_origin = nonlinear_castle.notears_nonlinear(
model,
X,
lambda1=args.lambda1,
lambda2=args.lambda2,
w_threshold=args.w_threshold)
wthresh_w_shd_dict = utils.rank_W(
w_est_origin,
2,
10,
b_true,
w_threshold_low=0,
w_threshold_high=5,
step_size=0.02)
w_est = None
w_threshold = None
for threshold, w_element in wthresh_w_shd_dict.items():
if utils.is_dag(w_element[0]):
w_est = w_element[0]
w_threshold = threshold
break
exp = f'notear_castle_syn_{args.synthetic_source}_W-thresh-{round(w_threshold, 3)}'
# save the learned W matrix
np.savetxt(
args.Output_path + f'{exp}_West.csv', w_est, fmt='%.2f', delimiter=',')
np.savetxt(
args.Output_path + f'{exp}_WOrigin.csv',
w_est_origin,
fmt='%.3f',
delimiter=',')
# save the learned DAG
utils.save_dag(
w_est, args.Output_path + f'{exp}_DAG', vertex_label=vertex_label)
# estimate the accuracy of Y
b_true_Y = np.zeros_like(b_true)
b_true_Y[:, 0] = b_true[:, 0]
w_est_Y = np.zeros_like(w_est)
w_est_Y[:, 0] = w_est[:, 0]
acc = metrices.count_accuracy(b_true_Y, w_est_Y != 0)
metrics[f'{exp}_train_acc'] = acc
y = model(torch.from_numpy(X))
y = y.cpu().detach().numpy()
mse = mean_squared_loss(y.shape[0], y[:, 0], X[:, 0])
print('mse:', mse)
metrics[f'{model_type}_train_MSE'] = mse
elif model_type == 'ISL':
model = isl.isl_module(
n_envs=n_envs,
Y_dims=[d, args.hidden, 1],
dims=[d, args.hidden, 1],
bias=True)
model.to(device)
# To use a different feature as label, change y_index to column index of
# the feature.
_, w_est_origin_envs = isl.notears_nonlinear(
model,
X,
y_index=0,
lambda1=args.lambda1,
lambda2=args.lambda2,
lambda1_Y=args.lambda1_Y,
lambda2_Y_fc1=args.lambda2_Y_fc1,
lambda2_Y_fc2=args.lambda2_Y_fc2,
beta=args.beta,
w_threshold=0)
for i in range(len(w_est_origin_envs)):
wthresh_w_shd_dict = utils.rank_W(
w_est_origin_envs[i],
2,
30,
b_true,
w_threshold_low=0,
w_threshold_high=5,
step_size=0.02)
w_est = None
w_threshold = None
for threshold, w_element in wthresh_w_shd_dict.items():
if utils.is_dag(w_element[0]):
w_est = w_element[0]
w_threshold = threshold
break
exp = f'notear_ISL_syn_{args.synthetic_source}_W-thresh{round(w_threshold, 3)}'
# save the leared W matrix
np.savetxt(
args.Output_path + f'{exp}_West_{i}.csv',
w_est,
fmt='%.3f',
delimiter=',')
np.savetxt(
args.Output_path + f'{exp}_WOrigin_env_{i}.csv',
w_est_origin_envs[i],
fmt='%.3f',
delimiter=',')
# save the learned DAG
utils.save_dag(
w_est, args.Output_path + f'{exp}_DAG_{i}', vertex_label=vertex_label)
# count the accuracy of Y
b_true_Y = np.zeros_like(b_true)
b_true_Y[:, 0] = b_true[:, 0]
w_est_Y = np.zeros_like(w_est)
w_est_Y[:, 0] = w_est[:, 0]
acc = metrices.count_accuracy(b_true_Y, w_est_Y != 0)
print(acc)
metrics[f'{exp}_train_env_{i}_acc'] = acc
y = model.test(X)
mse = mean_squared_loss(y.shape[0] * y.shape[1], y,
X[:, :, 0][:, :, np.newaxis])
print('mse:', mse)
metrics[f'{exp}_train_env_{i}_mse'] = mse
else:
ValueError('Invalid model type')
return model, w_est
def test(model, X, model_type, test_type, counter=False):
"""Test functions."""
if model_type == 'notear-mlp':
X = np.vstack(X)
y = model(torch.from_numpy(X))
y = y.cpu().detach().numpy()
mse = mean_squared_loss(y.shape[0], y[:, 0], X[:, 0])
elif model_type == 'notear-castle':
X = np.vstack(X)
y = model(torch.from_numpy(X))
y = y.cpu().detach().numpy()
mse = mean_squared_loss(y.shape[0], y[:, 0], X[:, 0])
elif model_type == 'ISL':
y = model.test(X)
mse = mean_squared_loss(y.shape[0] * y.shape[1], y, X[:, :, 0][:, :,
np.newaxis])
if not counter:
if test_type == 'ID':
metrics[f'{model_type}_testID_MSE'] = mse
elif test_type == 'OOD':
metrics[f'{model_type}_testOOD_MSE'] = mse
else:
if test_type == 'ID':
metrics[f'{model_type}_counter_testID_MSE'] = mse
elif test_type == 'OOD':
metrics[f'{model_type}_counter_testOOD_MSE'] = mse
return mse
def exp(args, model_type, vertex_label, b_true, X_train, X_test_ID, X_test_OOD):
"""Experiment function."""
torch.set_default_dtype(torch.double)
np.set_printoptions(precision=3)
model, w_est = train(
X_train,
vertex_label=vertex_label,
b_true=b_true,
model_type=model_type,
args=args)
# ID test
test(
model,
X_test_ID,
vertex_label=vertex_label,
b_true=b_true,
model_type=model_type,
args=args,
test_type='ID')
# OOD test
test(
model,
X_test_OOD,
vertex_label=vertex_label,
b_true=b_true,
model_type=model_type,
args=args,
test_type='OOD')
return model, w_est
def find_causal_parents(W):
"""Finds causal parents."""
causal_parents = []
for i in range(W.shape[0]):
if W[i][0] > 0:
causal_parents.append(i)
return causal_parents
def combined_exp(args):
"""Combines experiments."""
args.probs = [eval(prob) for prob in args.probs.split(',')]
args.test_probs = np.random.uniform(low=0.0, high=1.0, size=3)
ISL_param = dict()
ISL_param['num_xy'] = args.num_xy
ISL_param['num_s'] = args.num_s
ISL_param['train'] = True
ISL_param['probs'] = args.probs
ISL_param['test_probs'] = args.test_probs
# train data generation
X_train, vertex_label, b_true = gen_synthetic_env(
args,
args.synthetic_source,
dim=1000,
num_env=3,
random=False,
ISL_param=ISL_param,
castle_param=[args.probs, args.probs],
noise_type=args.noise_type)
# ID test data generation
X_test_ID, _, _ = gen_synthetic_env(
args,
args.synthetic_source,
dim=200,
num_env=3,
random=False,
ISL_param=ISL_param,
noise_type=args.noise_type)
# OOD test data generation
ISL_param['train'] = False
X_test_OOD, _, _ = gen_synthetic_env(
args,
args.synthetic_source,
dim=200,
num_env=3,
random=True,
ISL_param=ISL_param,
noise_type=args.noise_type)
args.synthetic_source += 'trainProb' + str(args.probs)
args.synthetic_source += 'testProb' + str(args.test_probs)
notear_model, _ = exp(args, 'notear-mlp', vertex_label, b_true, X_train,
X_test_ID, X_test_OOD)
notear_ISL_model, w_est = exp(args, 'ISL', vertex_label, b_true, X_train,
X_test_ID, X_test_OOD)
causal_parents = find_causal_parents(w_est)
# passing the causal parents of X to a MLP to predict
X_train = np.vstack(X_train)
X_test_ID = np.vstack(X_test_ID)
X_test_OOD = np.vstack(X_test_OOD)
model_ID, ISL_id_train_mse, ISL_id_test_mse = mlp(
X_train[:, 1:args.num_xy],
X_train[:, 0],
X_test_ID[:, 1:args.num_xy],
X_test_ID[:, 0],
epoches=200)
model_OOD, ISL_ood_train_mse, ISL_ood_test_mse = mlp(
X_train[:, 1:args.num_xy],
X_train[:, 0],
X_test_OOD[:, 1:args.num_xy],
X_test_OOD[:, 0],
epoches=200)
metrics['ISL_id_train_mse'] = ISL_id_train_mse
metrics['ISL_id_test_mse'] = ISL_id_test_mse
metrics['ISL_ood_train_mse'] = ISL_ood_train_mse
metrics['ISL_ood_test_mse'] = ISL_ood_test_mse
with open(args.Output_path + f'{args.synthetic_source}_metrics.json',
'w+') as f:
json.dump(metrics, f, indent=4)
np.savetxt(f'{args.Output_path}X_train.csv', X_train, delimiter=',')
np.savetxt(f'{args.Output_path}X_test_ID.csv', X_test_ID, delimiter=',')
np.savetxt(f'{args.Output_path}X_test_OOD.csv', X_test_OOD, delimiter=',')
torch.save(notear_model, f'{args.Output_path}notear.pt')
# torch.save(notear_castle_model, f'{args.Output_path}notear_castle.pt')
torch.save(notear_ISL_model, f'{args.Output_path}ISL_notear.pt')
return
def counter_exp(args):
"""Experiments with a counter."""
torch.set_default_dtype(torch.double)
X_train = np.loadtxt(f'{args.Output_path}X_train.csv', delimiter=',')
X_test_ID = np.loadtxt(f'{args.Output_path}X_test_ID.csv', delimiter=',')
X_test_OOD = np.loadtxt(f'{args.Output_path}X_test_OOD.csv', delimiter=',')
| |
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
# setup a dictionary of regions to plot from
dfs = {
'>=65N': pd.DataFrame(df65N), '>=65S': pd.DataFrame(df65S),
'Global': pd.DataFrame(dfALL),
}
# - plot up the PDF distribution of each of the variables.
for var2use in vars2use:
print(var2use)
# set a single axis to use.
fig, ax = plt.subplots()
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][var2use]
# Get sample size
N_ = df.shape[0]
# do a dist plot
label = '{} (N={})'.format(dataset, N_)
sns.distplot(df, ax=ax, label=label)
# Make sure the values are correctly scaled
ax.autoscale()
# Plot up the perturbations too
for perturb in perturb2use:
perturb
# Beautify
title_str = "PDF of ancillary input for '{}'"
fig.suptitle(title_str.format(var2use))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# -Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def explore_extracted_data_in_Oi_prj_explore_Arctic_Antarctic_obs(dsA=None,
res='0.125x0.125',
dpi=320):
"""
Analyse the input data for the Arctic and Antarctic
"""
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
# - local variables
# get input variables
if isinstance(dsA, type(None)):
filename = 'Oi_prj_predicted_iodide_{}.nc'.format(res)
# folder = '/shared/earth_home/ts551/labbook/Python_progs/'
folder = '/shared/earth_home/ts551/data/iodide/'
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
dsA = xr.open_dataset(folder + filename)
# ds = xr.open_dataset( filename )
# variables to consider
vars2analyse = list(dsA.data_vars)
# add LWI to array - NOTE: 1 = water in Nature run LWI files !
# ( The above comment is not correct! why is this written here? )
folderLWI = utils.get_file_locations(
'AC_tools')+'/data/LM/TEMP_NASA_Nature_run/'
filenameLWI = 'ctm.nc'
LWI = xr.open_dataset(folderLWI+filenameLWI)
# updates dates (to be Jan=>Dec)
new_dates = [datetime.datetime(1970, i, 1) for i in LWI['time.month']]
LWI.time.values = new_dates
# Sort by new dates
LWI = LWI.loc[{'time': sorted(LWI.coords['time'].values)}]
# LWI = AC.get_LWI_map(res=res)[...,0]
dsA['IS_WATER'] = dsA['WOA_TEMP'].copy()
dsA['IS_WATER'].values = (LWI['LWI'] == 0)
# add is land
dsA['IS_LAND'] = dsA['IS_WATER'].copy()
dsA['IS_LAND'].values = (LWI['LWI'] == 1)
# get surface area
s_area = AC.calc_surface_area_in_grid(res=res) # m2 land map
dsA['AREA'] = dsA['WOA_TEMP'].mean(dim='time')
dsA['AREA'].values = s_area.T
# - Select data of interest by variable for locations
# setup dicts to store the extracted values
df65N, df65S, dfALL = {}, {}, {}
# - setup booleans for the data
# now loop and extract variablesl
vars2use = [
'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
for var_ in vars2use:
# select the boolean for if water
IS_WATER = dsA['IS_WATER'].values
if IS_WATER.shape != dsA[var_].shape:
# special case for depth
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for all
ds_tmp = dsA.copy()
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
else:
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.copy()
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
# setup a dictionary of regions to plot from
dfs = {
'>=65N': pd.DataFrame(df65N), '>=65S': pd.DataFrame(df65S),
'Global': pd.DataFrame(dfALL),
}
# - Loop regions and plot PDFs of variables of interest
# vars2use = dfs[ dfs.keys()[0] ].columns
# set PDF
savetitle = 'Oi_prj_explore_Arctic_Antarctic_ancillaries_space'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# Loop by dataset (region) and plots
datasets = sorted(dfs.keys())
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][vars2use]
# Get sample size
N_ = df.shape[0]
# do a pair plot
g = sns.pairplot(df)
# Add a title
plt.suptitle("Pairplot for '{}' (N={})".format(dataset, N_))
# adjust plots
g.fig.subplots_adjust(top=0.925, left=0.085)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up the PDF distribution of each of the variables.
for var2use in vars2use:
print(var2use)
# set a single axis to use.
fig, ax = plt.subplots()
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][var2use]
# Get sample size
N_ = df.shape[0]
# do a dist plot
label = '{} (N={})'.format(dataset, N_)
sns.distplot(df, ax=ax, label=label)
# Make sure the values are correctly scaled
ax.autoscale()
# Beautify
title_str = "PDF of ancillary input for '{}'"
fig.suptitle(title_str.format(var2use))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up the number of oceanic data points by lat for each lat
# Plot up number of samples for South pole
ds = dsA.sel(lat=(dsA['lat'] <= -65))
var_ = 'WOA_Salinity'
N = {}
for lat in ds['lat'].values:
ds_tmp = ds.sel(lat=lat)
N[lat] = ds_tmp[var_].values[ds_tmp['IS_WATER'].values].shape[-1]
N = pd.Series(N)
N.plot()
plt.ylabel('number of gridboxes in predictor array')
plt.xlabel('Latitude $^{\circ}$N')
plt.title('Number of gridboxes for Antarctic (<= -65N)')
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# Plot up number of samples for North pole
ds = dsA.sel(lat=(dsA['lat'] >= 65))
var_ = 'WOA_Salinity'
N = {}
for lat in ds['lat'].values:
ds_tmp = ds.sel(lat=lat)
N[lat] = ds_tmp[var_].values[ds_tmp['IS_WATER'].values].shape[-1]
N = pd.Series(N)
N.plot()
plt.ylabel('number of gridboxes in predictor array')
plt.xlabel('Latitude $^{\circ}$N')
plt.title('Number of gridboxes')
plt.title('Number of gridboxes for Arctic (>= 65N)')
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def explore_observational_data_in_Arctic_parameter_space(RFR_dict=None,
plt_up_locs4var_conds=False,
testset='Test set (strat. 20%)',
dpi=320):
"""
Analysis the input observational data for the Arctic and Antarctic
"""
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
# - local variables
df = RFR_dict['df']
# Set splits in data to look at
dfs = {}
# All data
dfs['All data'] = df.copy()
# Get all the data above 65 N
dfs['>=65N'] = df.loc[df['Latitude'] >= 65, :]
# Get all the data above 65 N and in the testset
bool_ = dfs['>=65N'][testset] == False
dfs['>=65N (training)'] = dfs['>=65N'].loc[bool_, :]
# Get all the data below 65 S
dfs['<=65S'] = df.loc[df['Latitude'] <= -65, :]
# Get all the data above 65 N and in the testset
bool_ = dfs['<=65S'][testset] == False
dfs['<=65S (training)'] = dfs['<=65S'].loc[bool_, :]
# - variables to explore?
vars2use = [
'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
# - Loop regions and plot pairplots of variables of interest
# set PDF
savetitle = 'Oi_prj_explore_Arctic_Antarctic_obs_space'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# Loop by dataset (region) and plots
datasets = sorted(dfs.keys())
for dataset in datasets:
# select the DataFrame
df = dfs[dataset]
# Get sample size
N_ = df.shape[0]
# do a pair plot
g = sns.pairplot(df[vars2use])
# Add a title
plt.suptitle("Pairplot for '{}' (N={})".format(dataset, N_))
# adjust plots
g.fig.subplots_adjust(top=0.925, left=0.085)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Loop regions and plot PDFs of variables of interest
# Loop by dataset (region) and plots
import seaborn as sns
sns.reset_orig()
datasets = sorted(dfs.keys())
for dataset in datasets:
fig, ax = plt.subplots()
# select the DataFrame
dfA = dfs[dataset]
# Set title
title = "Locations for '{}'".format(dataset)
p_size = 50
alpha = 1
# plot up Non coatal locs
df = dfA.loc[dfA['Coastal'] == False, :]
color = 'blue'
label = 'Non-coastal (N={})'.format(int(df.shape[0]))
m = AC.plot_lons_lats_spatial_on_map(title=title, f_size=15,
lons=df['Longitude'].values,
lats=df['Latitude'].values,
label=label, fig=fig, ax=ax, color=color,
return_axis=True)
# Plot up coatal locs
df = dfA.loc[dfA['Coastal'] == True, :]
color = 'green'
label = 'Coastal (N={})'.format(int(df.shape[0]))
lons = df['Longitude'].values
lats = df['Latitude'].values
m.scatter(lons, lats, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label)
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Loop regions and plot PDFs of variables of interest
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
df = RFR_dict['df']
dfs = {}
# All data
dfs['All data'] = df.copy()
# Get all the data above 65 N
dfs['>=65N'] = df.loc[df['Latitude'] >= 65, | |
not None:
for k in map.get('VideoDNA'):
temp_model = GetMediaDNAResultResponseDNAResultVideoDNA()
temp_model = temp_model.from_map(k)
self.video_dna.append(temp_model)
else:
self.video_dna = None
return self
class DeleteMezzaninesRequest(TeaModel):
def __init__(self, owner_id=None, resource_owner_account=None, resource_owner_id=None, video_ids=None, force=None):
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.video_ids = video_ids
self.force = force
def validate(self):
self.validate_required(self.video_ids, 'video_ids')
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['ResourceOwnerId'] = self.resource_owner_id
result['VideoIds'] = self.video_ids
result['Force'] = self.force
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.resource_owner_id = map.get('ResourceOwnerId')
self.video_ids = map.get('VideoIds')
self.force = map.get('Force')
return self
class DeleteMezzaninesResponse(TeaModel):
def __init__(self, request_id=None, non_exist_video_ids=None, un_removeable_video_ids=None):
self.request_id = request_id
self.non_exist_video_ids = []
self.un_removeable_video_ids = []
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.non_exist_video_ids, 'non_exist_video_ids')
self.validate_required(self.un_removeable_video_ids, 'un_removeable_video_ids')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['NonExistVideoIds'] = []
if self.non_exist_video_ids is not None:
for k in self.non_exist_video_ids:
result['NonExistVideoIds'].append(k)
else:
result['NonExistVideoIds'] = None
result['UnRemoveableVideoIds'] = []
if self.un_removeable_video_ids is not None:
for k in self.un_removeable_video_ids:
result['UnRemoveableVideoIds'].append(k)
else:
result['UnRemoveableVideoIds'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.non_exist_video_ids = []
if map.get('NonExistVideoIds') is not None:
for k in map.get('NonExistVideoIds'):
self.non_exist_video_ids.append(k)
else:
self.non_exist_video_ids = None
self.un_removeable_video_ids = []
if map.get('UnRemoveableVideoIds') is not None:
for k in map.get('UnRemoveableVideoIds'):
self.un_removeable_video_ids.append(k)
else:
self.un_removeable_video_ids = None
return self
class UpdateImageInfosRequest(TeaModel):
def __init__(self, access_key_id=None, owner_id=None, resource_owner_account=None, resource_owner_id=None, resource_real_owner_id=None, update_content=None):
self.access_key_id = access_key_id
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.resource_real_owner_id = resource_real_owner_id
self.update_content = update_content
def validate(self):
self.validate_required(self.update_content, 'update_content')
def to_map(self):
result = {}
result['AccessKeyId'] = self.access_key_id
result['OwnerId'] = self.owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['ResourceOwnerId'] = self.resource_owner_id
result['ResourceRealOwnerId'] = self.resource_real_owner_id
result['UpdateContent'] = self.update_content
return result
def from_map(self, map={}):
self.access_key_id = map.get('AccessKeyId')
self.owner_id = map.get('OwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.resource_owner_id = map.get('ResourceOwnerId')
self.resource_real_owner_id = map.get('ResourceRealOwnerId')
self.update_content = map.get('UpdateContent')
return self
class UpdateImageInfosResponse(TeaModel):
def __init__(self, request_id=None, non_exist_image_ids=None):
self.request_id = request_id
self.non_exist_image_ids = non_exist_image_ids
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.non_exist_image_ids, 'non_exist_image_ids')
if self.non_exist_image_ids:
self.non_exist_image_ids.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
if self.non_exist_image_ids is not None:
result['NonExistImageIds'] = self.non_exist_image_ids.to_map()
else:
result['NonExistImageIds'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
if map.get('NonExistImageIds') is not None:
temp_model = UpdateImageInfosResponseNonExistImageIds()
self.non_exist_image_ids = temp_model.from_map(map['NonExistImageIds'])
else:
self.non_exist_image_ids = None
return self
class UpdateImageInfosResponseNonExistImageIds(TeaModel):
def __init__(self, image_id=None):
self.image_id = []
def validate(self):
self.validate_required(self.image_id, 'image_id')
def to_map(self):
result = {}
result['ImageId'] = []
if self.image_id is not None:
for k in self.image_id:
result['ImageId'].append(k)
else:
result['ImageId'] = None
return result
def from_map(self, map={}):
self.image_id = []
if map.get('ImageId') is not None:
for k in map.get('ImageId'):
self.image_id.append(k)
else:
self.image_id = None
return self
class DeleteImageRequest(TeaModel):
def __init__(self, access_key_id=None, owner_id=None, resource_owner_account=None, resource_owner_id=None, delete_image_type=None, image_urls=None, image_ids=None, video_id=None, image_type=None):
self.access_key_id = access_key_id
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.delete_image_type = delete_image_type
self.image_urls = image_urls
self.image_ids = image_ids
self.video_id = video_id
self.image_type = image_type
def validate(self):
self.validate_required(self.delete_image_type, 'delete_image_type')
def to_map(self):
result = {}
result['AccessKeyId'] = self.access_key_id
result['OwnerId'] = self.owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['ResourceOwnerId'] = self.resource_owner_id
result['DeleteImageType'] = self.delete_image_type
result['ImageURLs'] = self.image_urls
result['ImageIds'] = self.image_ids
result['VideoId'] = self.video_id
result['ImageType'] = self.image_type
return result
def from_map(self, map={}):
self.access_key_id = map.get('AccessKeyId')
self.owner_id = map.get('OwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.resource_owner_id = map.get('ResourceOwnerId')
self.delete_image_type = map.get('DeleteImageType')
self.image_urls = map.get('ImageURLs')
self.image_ids = map.get('ImageIds')
self.video_id = map.get('VideoId')
self.image_type = map.get('ImageType')
return self
class DeleteImageResponse(TeaModel):
def __init__(self, request_id=None):
self.request_id = request_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
return self
class ListAuditSecurityIpRequest(TeaModel):
def __init__(self, security_group_name=None):
self.security_group_name = security_group_name
def validate(self):
pass
def to_map(self):
result = {}
result['SecurityGroupName'] = self.security_group_name
return result
def from_map(self, map={}):
self.security_group_name = map.get('SecurityGroupName')
return self
class ListAuditSecurityIpResponse(TeaModel):
def __init__(self, request_id=None, security_ip_list=None):
self.request_id = request_id
self.security_ip_list = []
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.security_ip_list, 'security_ip_list')
if self.security_ip_list:
for k in self.security_ip_list:
if k :
k.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['SecurityIpList'] = []
if self.security_ip_list is not None:
for k in self.security_ip_list:
result['SecurityIpList'].append(k.to_map() if k else None)
else:
result['SecurityIpList'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.security_ip_list = []
if map.get('SecurityIpList') is not None:
for k in map.get('SecurityIpList'):
temp_model = ListAuditSecurityIpResponseSecurityIpList()
temp_model = temp_model.from_map(k)
self.security_ip_list.append(temp_model)
else:
self.security_ip_list = None
return self
class ListAuditSecurityIpResponseSecurityIpList(TeaModel):
def __init__(self, security_group_name=None, ips=None, creation_time=None, modification_time=None):
self.security_group_name = security_group_name
self.ips = ips
self.creation_time = creation_time
self.modification_time = modification_time
def validate(self):
self.validate_required(self.security_group_name, 'security_group_name')
self.validate_required(self.ips, 'ips')
self.validate_required(self.creation_time, 'creation_time')
self.validate_required(self.modification_time, 'modification_time')
def to_map(self):
result = {}
result['SecurityGroupName'] = self.security_group_name
result['Ips'] = self.ips
result['CreationTime'] = self.creation_time
result['ModificationTime'] = self.modification_time
return result
def from_map(self, map={}):
self.security_group_name = map.get('SecurityGroupName')
self.ips = map.get('Ips')
self.creation_time = map.get('CreationTime')
self.modification_time = map.get('ModificationTime')
return self
class SetAuditSecurityIpRequest(TeaModel):
def __init__(self, security_group_name=None, ips=None, operate_mode=None):
self.security_group_name = security_group_name
self.ips = ips
self.operate_mode = operate_mode
def validate(self):
self.validate_required(self.ips, 'ips')
def to_map(self):
result = {}
result['SecurityGroupName'] = self.security_group_name
result['Ips'] = self.ips
result['OperateMode'] = self.operate_mode
return result
def from_map(self, map={}):
self.security_group_name = map.get('SecurityGroupName')
self.ips = map.get('Ips')
self.operate_mode = map.get('OperateMode')
return self
class SetAuditSecurityIpResponse(TeaModel):
def __init__(self, request_id=None):
self.request_id = request_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
return self
class UploadMediaByURLRequest(TeaModel):
def __init__(self, owner_id=None, resource_owner_account=None, resource_owner_id=None, upload_urls=None, template_group_id=None, storage_location=None, upload_metadatas=None, priority=None, message_callback=None, user_data=None, app_id=None, workflow_id=None):
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.upload_urls = upload_urls
self.template_group_id = template_group_id
self.storage_location = storage_location
self.upload_metadatas = upload_metadatas
self.priority = priority
self.message_callback = message_callback
self.user_data = user_data
self.app_id = app_id
self.workflow_id = workflow_id
def validate(self):
self.validate_required(self.upload_urls, 'upload_urls')
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['ResourceOwnerId'] = self.resource_owner_id
result['UploadURLs'] = self.upload_urls
result['TemplateGroupId'] = self.template_group_id
result['StorageLocation'] = self.storage_location
result['UploadMetadatas'] = self.upload_metadatas
result['Priority'] = self.priority
result['MessageCallback'] = self.message_callback
result['UserData'] = self.user_data
result['AppId'] = self.app_id
result['WorkflowId'] = self.workflow_id
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.resource_owner_id = map.get('ResourceOwnerId')
self.upload_urls = map.get('UploadURLs')
self.template_group_id = map.get('TemplateGroupId')
self.storage_location = map.get('StorageLocation')
self.upload_metadatas = map.get('UploadMetadatas')
self.priority = map.get('Priority')
self.message_callback = map.get('MessageCallback')
self.user_data = map.get('UserData')
self.app_id = map.get('AppId')
self.workflow_id = map.get('WorkflowId')
return self
class UploadMediaByURLResponse(TeaModel):
def __init__(self, request_id=None, upload_jobs=None):
self.request_id = request_id
self.upload_jobs = []
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.upload_jobs, 'upload_jobs')
if self.upload_jobs:
for k in self.upload_jobs:
if k :
k.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['UploadJobs'] = []
if self.upload_jobs is not None:
for k in self.upload_jobs:
result['UploadJobs'].append(k.to_map() if k else None)
else:
result['UploadJobs'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.upload_jobs = []
if map.get('UploadJobs') is not None:
for k in map.get('UploadJobs'):
temp_model = UploadMediaByURLResponseUploadJobs()
temp_model = temp_model.from_map(k)
self.upload_jobs.append(temp_model)
else:
self.upload_jobs = None
return self
class UploadMediaByURLResponseUploadJobs(TeaModel):
def __init__(self, job_id=None, source_url=None):
self.job_id = job_id
self.source_url = source_url
def validate(self):
self.validate_required(self.job_id, 'job_id')
self.validate_required(self.source_url, 'source_url')
def to_map(self):
result = {}
result['JobId'] = self.job_id
result['SourceURL'] = self.source_url
return result
def from_map(self, map={}):
self.job_id = map.get('JobId')
self.source_url = map.get('SourceURL')
return self
class UpdateVideoInfosRequest(TeaModel):
def __init__(self, owner_id=None, resource_owner_account=None, resource_owner_id=None, update_content=None):
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.update_content = update_content
def validate(self):
self.validate_required(self.update_content, 'update_content')
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['ResourceOwnerId'] = self.resource_owner_id
result['UpdateContent'] = self.update_content
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.resource_owner_id = map.get('ResourceOwnerId')
self.update_content = map.get('UpdateContent')
return self
class UpdateVideoInfosResponse(TeaModel):
def __init__(self, request_id=None, non_exist_video_ids=None, forbidden_video_ids=None):
self.request_id = request_id
self.non_exist_video_ids = []
self.forbidden_video_ids = []
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.non_exist_video_ids, 'non_exist_video_ids')
self.validate_required(self.forbidden_video_ids, 'forbidden_video_ids')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['NonExistVideoIds'] = []
if self.non_exist_video_ids is not None:
for k in self.non_exist_video_ids:
result['NonExistVideoIds'].append(k)
else:
result['NonExistVideoIds'] = None
result['ForbiddenVideoIds'] = []
if self.forbidden_video_ids is not None:
for k in self.forbidden_video_ids:
result['ForbiddenVideoIds'].append(k)
else:
result['ForbiddenVideoIds'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.non_exist_video_ids = []
if map.get('NonExistVideoIds') is not None:
for k in map.get('NonExistVideoIds'):
self.non_exist_video_ids.append(k)
else:
self.non_exist_video_ids = None
self.forbidden_video_ids = []
if map.get('ForbiddenVideoIds') is not None:
for k in map.get('ForbiddenVideoIds'):
self.forbidden_video_ids.append(k)
else:
self.forbidden_video_ids = None
return self
class SearchMediaRequest(TeaModel):
def __init__(self, owner_id=None, resource_owner_account=None, resource_owner_id=None, search_type=None, fields=None, match=None, sort_by=None, page_no=None, page_size=None, scroll_token=None, session_id=None):
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.search_type = search_type
self.fields = fields
self.match = match
self.sort_by = sort_by
self.page_no = page_no
self.page_size = page_size
self.scroll_token = scroll_token
self.session_id = session_id
| |
import logging
from datetime import datetime, timezone
from unittest.mock import MagicMock, ANY, call
import json
import pytest
import pandas as pd
from app.tasks import (
send_activated_email, import_analyze_store_list, generate_summary_stats,
send_report, extract_stats, init_list_analysis, update_stored_data,
send_monthly_reports, generate_diffs)
from app.lists import MailChimpImportError
from app.models import ListStats
def test_send_activated_email(mocker):
"""Tests the send_activated_email function."""
mocked_send_email = mocker.patch('app.tasks.send_email')
send_activated_email('<EMAIL>', 'foo')
mocked_send_email.assert_called_with(
ANY,
['<EMAIL>'],
'activated-email.html',
{'title': ANY,
'email_hash': 'foo'})
@pytest.mark.xfail(raises=MailChimpImportError, strict=True)
@pytest.mark.parametrize('user_email', [(None), ('<EMAIL>')])
def test_import_analyze_store_list_maichimpimporterror(mocker, user_email):
"""Tests that the import_analyze_store_list function fails gracefully when
a MailChimpImportError occurs."""
mocker.patch('app.tasks.MailChimpList')
mocked_do_async_import = mocker.patch('app.tasks.do_async_import')
mocked_do_async_import.side_effect = MailChimpImportError('foo', 'bar')
mocked_send_email = mocker.patch('app.tasks.send_email')
mocked_os = mocker.patch('app.tasks.os')
mocked_os.environ.get.side_effect = ['<EMAIL>']
import_analyze_store_list(
{'list_id': 'foo', 'total_count': 'bar', 'key': 'foo-bar1',
'data_center': 'bar1'}, 1, user_email=user_email)
if user_email:
mocked_send_email.assert_called_with(
ANY,
['<EMAIL>', '<EMAIL>'],
'error-email.html',
{'title': ANY,
'error_details': 'bar'})
else:
mocked_send_email.assert_not_called()
def test_import_analyze_store_list(
mocker, fake_list_data, fake_calculation_results, mocked_mailchimp_list):
"""Tests the import_analyze_store_list method."""
mocked_mailchimp_list_instance = mocked_mailchimp_list.return_value
mocked_do_async_import = mocker.patch('app.tasks.do_async_import')
mocked_list_stats = mocker.patch('app.tasks.ListStats', spec=ListStats)
list_stats = import_analyze_store_list(
fake_list_data, fake_list_data['org_id'])
mocked_mailchimp_list.assert_called_with(
fake_list_data['list_id'], fake_list_data['total_count'],
fake_list_data['key'], fake_list_data['data_center'])
mocked_do_async_import.assert_has_calls(
mocked_mailchimp_list_instance.import_list_members.return_value,
mocked_mailchimp_list_instance.import_sub_activity.return_value)
mocked_mailchimp_list_instance.flatten.assert_called()
mocked_mailchimp_list_instance.calc_list_breakdown.assert_called()
mocked_mailchimp_list_instance.calc_open_rate.assert_called_with(
fake_list_data['open_rate'])
mocked_mailchimp_list_instance.calc_frequency.assert_called_with(
fake_list_data['creation_timestamp'], fake_list_data['campaign_count'])
mocked_mailchimp_list_instance.calc_histogram.assert_called()
mocked_mailchimp_list_instance.calc_high_open_rate_pct.assert_called()
mocked_mailchimp_list_instance.calc_cur_yr_stats.assert_called()
assert isinstance(list_stats, ListStats)
mocked_list_stats.assert_called_with(
**{k: (v if k != 'hist_bin_counts' else json.dumps(v))
for k, v in fake_calculation_results.items()},
list_id=fake_list_data['list_id'])
def test_import_analyze_store_list_store_results_in_db( # pylint: disable=unused-argument
mocker, fake_list_data, mocked_mailchimp_list):
"""Tests the import_analyze_store_list function when data
is stored in the db."""
mocker.patch('app.tasks.do_async_import')
mocked_list_stats = mocker.patch('app.tasks.ListStats')
mocked_email_list = mocker.patch('app.tasks.EmailList')
mocked_db = mocker.patch('app.tasks.db')
fake_list_data['monthly_updates'] = True
import_analyze_store_list(fake_list_data, 'foo')
mocked_email_list.assert_called_with(
list_id=fake_list_data['list_id'],
creation_timestamp=fake_list_data['creation_timestamp'],
list_name=fake_list_data['list_name'],
api_key=fake_list_data['key'],
data_center=fake_list_data['data_center'],
store_aggregates=fake_list_data['store_aggregates'],
monthly_updates=fake_list_data['monthly_updates'],
org_id='foo')
mocked_db.session.merge.assert_called_with(mocked_email_list.return_value)
mocked_db.session.add.assert_called_with(mocked_list_stats.return_value)
mocked_db.session.commit.assert_called()
def test_import_analyze_store_list_store_results_in_db_exception( # pylint: disable=unused-argument
mocker, fake_list_data, mocked_mailchimp_list):
"""Tests the import_analyze_store_list function when data
is stored in the db and an exception occurs."""
mocker.patch('app.tasks.do_async_import')
mocker.patch('app.tasks.ListStats')
mocker.patch('app.tasks.EmailList')
mocked_db = mocker.patch('app.tasks.db')
mocked_db.session.commit.side_effect = Exception()
fake_list_data['monthly_updates'] = True
with pytest.raises(Exception):
import_analyze_store_list(fake_list_data, 'foo')
mocked_db.session.rollback.assert_called()
def test_generate_summary_stats_single_analysis(
mocker, fake_list_stats_query_result_as_df,
fake_list_stats_query_result_means):
"""Tests the generate_summary_stats function when passed a single analysis."""
mocked_extract_stats = mocker.patch('app.tasks.extract_stats')
mocked_extract_stats.return_value = {'foo': 1, 'bar': 2}
mocked_list_stats = mocker.patch('app.tasks.ListStats')
mocked_db = mocker.patch('app.tasks.db')
mocked_pd_read_sql = mocker.patch('app.tasks.pd.read_sql')
mocked_pd_read_sql.return_value = fake_list_stats_query_result_as_df
list_stats, agg_stats = generate_summary_stats(['foo'])
mocked_extract_stats.assert_called_once()
mocked_pd_read_sql.assert_called_with(
mocked_list_stats.query.filter.return_value.order_by.return_value
.distinct.return_value.statement,
mocked_db.session.bind)
assert list_stats == {'foo': [1], 'bar': [2]}
assert agg_stats == fake_list_stats_query_result_means
def test_generate_summary_stats_multiple_analyses(
mocker, fake_list_stats_query_result_as_df,
fake_list_stats_query_result_means):
"""Tests the generate_summary_stats function when passed two sets of analysis."""
mocked_extract_stats = mocker.patch('app.tasks.extract_stats')
mocked_extract_stats.return_value = {'foo': 1, 'bar': 2}
mocked_db = mocker.patch('app.tasks.db')
mocked_pd_read_sql = mocker.patch('app.tasks.pd.read_sql')
fake_list_stats_query_result_as_df = pd.concat([
fake_list_stats_query_result_as_df.assign(row_number=1),
fake_list_stats_query_result_as_df.assign(row_number=2)])
mocked_pd_read_sql.return_value = fake_list_stats_query_result_as_df
list_stats, agg_stats = generate_summary_stats(['foo', 'bar'])
mocked_extract_stats.assert_has_calls([call('foo'), call('bar')])
mocked_pd_read_sql.assert_called_with(ANY, mocked_db.session.bind)
assert list_stats == {'foo': [1, 1], 'bar': [2, 2]}
assert agg_stats == {
k: [*v, *v] for k, v in
fake_list_stats_query_result_means.items()
}
def test_generate_diffs():
"""Tests the generate_diffs function."""
fake_list_stats = {
'subscribers': [1, 2],
'open_rate': [0, 0.5]
}
fake_agg_stats = {
'subscribers': [10, 5],
'open_rate': [0.3, 0.4]
}
diffs = generate_diffs(fake_list_stats, fake_agg_stats)
assert diffs == {
'subscribers': ['+100.0%', '-50.0%'],
'open_rate': ['+0.0%', '+33.3%']
}
def test_send_report_no_prev_month(mocker, fake_calculation_results):
"""Tests the send_report function when list_stats and agg_stats only contain
one set of results."""
mocked_generate_diffs = mocker.patch('app.tasks.generate_diffs')
mocker.patch('app.tasks.draw_bar')
mocker.patch('app.tasks.draw_stacked_horizontal_bar')
mocker.patch('app.tasks.draw_histogram')
mocker.patch('app.tasks.draw_donuts')
mocker.patch('app.tasks.send_email')
fake_stats = {k: [v] for k, v in fake_calculation_results.items()}
send_report(fake_stats, fake_stats, '1', 'foo', ['<EMAIL>'])
mocked_generate_diffs.assert_not_called()
def test_send_report_has_prev_month(mocker, fake_calculation_results):
"""Tests the send_report function when list_stats and agg_stats contain two
sets of results."""
mocked_generate_diffs = mocker.patch('app.tasks.generate_diffs')
mocker.patch('app.tasks.draw_bar')
mocker.patch('app.tasks.draw_stacked_horizontal_bar')
mocker.patch('app.tasks.draw_histogram')
mocker.patch('app.tasks.draw_donuts')
mocker.patch('app.tasks.send_email')
fake_stats = {k: [v, v] for k, v in fake_calculation_results.items()}
send_report(fake_stats, fake_stats, '1', 'foo', ['<EMAIL>'])
mocked_generate_diffs.assert_called_with(fake_stats, fake_stats)
def test_send_report(mocker, fake_calculation_results):
"""Tests the send_report function."""
mocked_generate_diffs = mocker.patch('app.tasks.generate_diffs')
mocked_generate_diffs.return_value = {
k: [v, v] for k, v in fake_calculation_results.items()
}
mocked_draw_bar = mocker.patch('app.tasks.draw_bar')
mocked_draw_stacked_horizontal_bar = mocker.patch(
'app.tasks.draw_stacked_horizontal_bar')
mocked_draw_histogram = mocker.patch('app.tasks.draw_histogram')
mocked_draw_donuts = mocker.patch('app.tasks.draw_donuts')
mocked_send_email = mocker.patch('app.tasks.send_email')
mocked_os = mocker.patch('app.tasks.os')
mocked_os.environ.get.side_effect = ['bar']
fake_stats = {k: [v, v] for k, v in fake_calculation_results.items()}
send_report(fake_stats, fake_stats, '1', 'foo', ['<EMAIL>'])
mocked_draw_bar.assert_has_calls([
call(ANY, [2, 2, 2, 2], [2, 2], ANY, ANY),
call(ANY, [0.5, 0.5, 0.5, 0.5], [0.5, 0.5], ANY, ANY, percentage_values=True)
])
mocked_draw_stacked_horizontal_bar.assert_called_with(
ANY,
[('Subscribed %', [0.2, 0.2, 0.2, 0.2]),
('Unsubscribed %', [0.2, 0.2, 0.2, 0.2]),
('Cleaned %', [0.2, 0.2, 0.2, 0.2]),
('Pending %', [0.1, 0.1, 0.1, 0.1])],
[0.2, 0.2], ANY, ANY)
mocked_draw_histogram.assert_called_with(
ANY, {'title': 'Subscribers', 'vals': [0.1, 0.2, 0.3]}, ANY, ANY, ANY)
mocked_draw_donuts.assert_has_calls([
call(ANY,
[(ANY, [0.1, 0.9]), (ANY, [0.1, 0.9]),
(ANY, [0.1, 0.9]), (ANY, [0.1, 0.9])],
[0.1, 0.1], ANY, ANY),
call(ANY,
[(ANY, [0.1, 0.9]), (ANY, [0.1, 0.9]),
(ANY, [0.1, 0.9]), (ANY, [0.1, 0.9])],
[0.1, 0.1], ANY, ANY)
])
mocked_send_email.assert_called_with(
ANY, ['<EMAIL>'], ANY, {
'title': 'We\'ve analyzed the foo list!',
'list_id': '1',
'epoch_time': ANY
}, configuration_set_name='bar')
def test_extract_stats(fake_calculation_results):
"""Tests the extract_stats function."""
fake_calculation_results.pop('frequency')
fake_list_object = MagicMock(
**{k: (json.dumps(v) if k == 'hist_bin_counts' else v)
for k, v in fake_calculation_results.items()}
)
stats = extract_stats(fake_list_object)
assert stats == fake_calculation_results
def test_init_list_analysis_existing_list_update_privacy_options(
mocker, fake_list_data):
"""Tests the init_list_analysis function when the list exists in
the database. Also tests that monthly_updates and store_aggregates
are updated if they differ from that stored in the database."""
mocked_list_stats = mocker.patch('app.tasks.ListStats')
mocked_recent_analyses = (
mocked_list_stats.query.filter_by.return_value.order_by
.return_value.limit.return_value.all.return_value)
mocked_desc = mocker.patch('app.tasks.desc')
mocked_email_list = mocker.patch('app.tasks.EmailList')
mocked_list_object = (
mocked_email_list.query.filter_by.return_value.first.return_value)
mocked_list_object.monthly_updates = True
mocked_list_object.store_aggregates = False
mocked_db = mocker.patch('app.tasks.db')
mocked_generate_summary_stats = mocker.patch(
'app.tasks.generate_summary_stats')
mocked_generate_summary_stats.return_value = 'foo', 'bar'
mocked_send_report = mocker.patch('app.tasks.send_report')
init_list_analysis({'email': '<EMAIL>'}, fake_list_data, 1)
mocked_list_stats.query.filter_by.assert_called_with(
list_id=fake_list_data['list_id'])
mocked_list_stats.query.filter_by.return_value.order_by.assert_called_with(
mocked_desc.return_value)
mocked_email_list.query.filter_by.assert_called_with(
list_id=fake_list_data['list_id'])
mocked_db.session.merge.assert_called_with(mocked_list_object)
mocked_db.session.commit.assert_called()
mocked_generate_summary_stats.assert_called_with(mocked_recent_analyses)
mocked_send_report.assert_called_with(
'foo', 'bar', fake_list_data['list_id'], fake_list_data['list_name'],
['<EMAIL>'])
def test_init_analysis_existing_list_db_error(mocker, fake_list_data):
"""Tests the init_list_analysis function when the list exists in the
database and a database error occurs."""
mocker.patch('app.tasks.ListStats')
mocked_email_list = mocker.patch('app.tasks.EmailList')
mocked_list_object = (
mocked_email_list.query.filter_by.return_value.first.return_value)
mocked_list_object.monthly_updates = True
mocked_list_object.store_aggregates = False
mocked_db = mocker.patch('app.tasks.db')
mocked_db.session.commit.side_effect = Exception()
with pytest.raises(Exception):
init_list_analysis({'email': '<EMAIL>'}, fake_list_data, 1)
mocked_db.session.rollback.assert_called()
def test_init_list_analysis_new_list_no_store(mocker, fake_list_data):
"""Tests the init_list_analysis function when the list does not exist
in the database and the user chose not to store their data."""
mocked_list_stats = mocker.patch('app.tasks.ListStats')
(mocked_list_stats.query.filter_by.return_value.order_by
.return_value.limit.return_value.all.return_value) = None
mocked_import_analyze_store_list = mocker.patch(
'app.tasks.import_analyze_store_list')
mocked_email_list = mocker.patch('app.tasks.EmailList')
mocked_email_list.query.filter_by.return_value.first.return_value = None
mocker.patch('app.tasks.generate_summary_stats', return_value=(
'foo', 'bar'))
mocker.patch('app.tasks.send_report')
init_list_analysis({'email': '<EMAIL>'}, fake_list_data, 1)
mocked_import_analyze_store_list.assert_called_with(
fake_list_data, 1, '<EMAIL>')
def test_init_list_analysis_new_list_monthly_updates(mocker, fake_list_data):
"""Tests the init_list_analysis function when the list does not
exist in the database and the user chose to store their data and
requested monthly updates."""
mocked_list_stats = mocker.patch('app.tasks.ListStats')
(mocked_list_stats.query.filter_by.return_value.order_by
.return_value.limit.return_value.all.return_value) = None
mocker.patch('app.tasks.import_analyze_store_list')
mocked_email_list = mocker.patch('app.tasks.EmailList')
mocked_list_object = (
mocked_email_list.query.filter_by.return_value.first.return_value)
mocked_list_object.monthly_updates = True
mocked_list_object.store_aggregates = False
mocked_associate_user_with_list = mocker.patch(
'app.tasks.associate_user_with_list')
mocker.patch('app.tasks.generate_summary_stats', return_value=(
'foo', 'bar'))
mocker.patch('app.tasks.send_report')
fake_list_data['monthly_updates'] = True
init_list_analysis(
{'email': '<EMAIL>', 'user_id': 2}, fake_list_data, 1)
mocked_associate_user_with_list.assert_called_with(2, mocked_list_object)
def test_update_stored_data_empty_db(mocker, caplog):
"""Tests the update_stored_data function when there are no lists stored in
the database."""
mocked_list_stats = mocker.patch('app.tasks.ListStats')
(mocked_list_stats.query.order_by.return_value.distinct
.return_value.all.return_value) = None
update_stored_data()
assert 'No lists in the database!' in caplog.text
def test_update_stored_data_no_old_analyses(mocker, caplog):
"""Tests the update_stored_data function when there are no analyses older
than 30 days."""
mocked_list_stats = mocker.patch('app.tasks.ListStats')
mocked_analysis = MagicMock(
analysis_timestamp=datetime.now(timezone.utc))
(mocked_list_stats.query.order_by.return_value.distinct
.return_value.all.return_value) = [mocked_analysis]
caplog.set_level(logging.INFO)
update_stored_data()
assert 'No old lists to update' in caplog.text
def test_update_stored_data(mocker, fake_list_data):
"""Tests the update_stored_data function."""
mocked_list_stats = mocker.patch('app.tasks.ListStats')
mocked_list_to_update = MagicMock(
**{('api_key' if k == 'key' else k): v
for k, v in fake_list_data.items()}
)
mocked_analysis = MagicMock(
analysis_timestamp=datetime(2000, 1, 1, tzinfo=timezone.utc),
list=mocked_list_to_update,
list_id=fake_list_data['list_id'])
(mocked_list_stats.query.order_by.return_value.distinct
.return_value.all.return_value) = [mocked_analysis]
mocked_requests = mocker.patch('app.tasks.requests')
mocked_import_analyze_store_list = mocker.patch(
'app.tasks.import_analyze_store_list')
mocked_requests.get.return_value.json.return_value = {
'stats': {
'member_count': 5,
'unsubscribe_count': 6,
'cleaned_count': 7,
'open_rate': 1,
'campaign_count': 10
}
}
update_stored_data()
mocked_requests.get.assert_called_with(
'https://bar1.api.mailchimp.com/3.0/lists/foo',
params=(
('fields', 'stats.member_count,'
'stats.unsubscribe_count,'
'stats.cleaned_count,'
'stats.open_rate,'
'stats.campaign_count'),
),
auth=('shorenstein', '<PASSWORD>'))
mocked_import_analyze_store_list.assert_called_with(
{'list_id': 'foo',
'list_name': 'bar',
'key': 'foo-bar1',
'data_center': 'bar1',
'monthly_updates': False,
'store_aggregates': False,
'total_count': 18,
'open_rate': 1,
'creation_timestamp': 'quux',
'campaign_count': 10},
1)
def test_update_stored_data_keyerror(mocker, fake_list_data, caplog):
"""Tests the update_stored_data function when the list raises a KeyError."""
mocked_list_stats = mocker.patch('app.tasks.ListStats')
mocked_list_to_update = MagicMock(
**{('api_key' if k == 'key' else k): v
for k, v in fake_list_data.items()}
)
mocked_analysis = MagicMock(
analysis_timestamp=datetime(2000, 1, 1, tzinfo=timezone.utc),
list=mocked_list_to_update,
list_id=fake_list_data['list_id'])
(mocked_list_stats.query.order_by.return_value.distinct
.return_value.all.return_value) = [mocked_analysis]
mocked_requests = mocker.patch('app.tasks.requests')
mocked_requests.get.return_value.json.return_value = {}
with pytest.raises(MailChimpImportError):
update_stored_data()
assert ('Error updating list foo. API key is no longer valid or list '
'no longer exists.') in caplog.text
def test_update_stored_data_import_error(mocker, fake_list_data, caplog):
"""Tests the update_stored_data function when the list import raises an error."""
mocked_list_stats = mocker.patch('app.tasks.ListStats')
mocked_list_to_update = MagicMock(
**{('api_key' if k == 'key' else k): v
for k, v in fake_list_data.items()}
)
mocked_analysis = MagicMock(
analysis_timestamp=datetime(2000, 1, 1, tzinfo=timezone.utc),
list=mocked_list_to_update,
list_id=fake_list_data['list_id'])
(mocked_list_stats.query.order_by.return_value.distinct
.return_value.all.return_value) = [mocked_analysis]
mocked_requests = mocker.patch('app.tasks.requests')
mocked_import_analyze_store_list = mocker.patch(
'app.tasks.import_analyze_store_list')
mocked_import_analyze_store_list.side_effect = MailChimpImportError(
'foo', 'bar')
mocked_requests.get.return_value.json.return_value = {
'stats': {
'member_count': 5,
'unsubscribe_count': 6,
'cleaned_count': 7,
'open_rate': 1,
'campaign_count': 10
}
}
with pytest.raises(MailChimpImportError):
update_stored_data()
assert 'Error importing new data for list foo.' in | |
people.</p>
<p>The companies and the few who earn the big money in MLM are NOT going to tell you the real story. FINALLY, there is someone who has the courage to cut through the hype and lies and tell the TRUTH about MLM.</p>
<p>HERE'S GOOD NEWS</p>
<p>There IS an alternative to MLM that WORKS, and works BIG! If you haven't yet abandoned your dreams, then you need to see this. Earning the kind of income you've dreamed about is easier than you think!</p>
<p>With your permission, I'd like to send you a brief letter that will tell you WHY MLM doesn't work for most people and will then introduce you to something so new and refreshing that you'll wonder why you haven't heard of this before.</p>
<p>I promise that there will be NO unwanted follow up, NO sales pitch, no one will call you, and your email address will only be used to send you the information. Period.</p>
<p>To receive this free, life-changing information, simply click Reply, type "Send Info" in the Subject box and hit Send. I'll get the information to you within 24 hours. Just look for the words MLM WALL OF SHAME in your Inbox.</p>
<p>Cordially,</p>
<p>Siddhi</p>
<p>P.S. Someone recently sent the letter to me and it has been the most eye-opening, financially beneficial information I have ever received. I honestly believe that you will feel the same way once you've read it. And it's FREE!</p>
<hr>
<p>This email is NEVER sent unsolicited. THIS IS NOT "SPAM". You are receiving this email because you EXPLICITLY signed yourself up to our list with our online signup form or through use of our FFA Links Page and E-MailDOM systems, which have EXPLICIT terms of use which state that through its use you agree to receive our emailings. You may also be a member of a Altra Computer Systems list or one of many numerous FREE Marketing Services and as such you agreed when you signed up for such list that you would also be receiving this emailing.</p>
<p>Due to the above, this email message cannot be considered unsolicitated, or spam.</p>
<hr>
"""
else:
spammy_html = """\
<center>
<h3>
<font color="blue">
<b>
The Need For Safety Is Real In 2002, You Might Only Get One Chance - Be Ready!
<p>
Free Shipping & Handling Within The (USA) If You Order Before May 25, 2002!
<p>
3 Day Super Sale, Now Until May 7, 2002! Save Up To $30.00 On Some Items!
</b>
</font>
</h3>
</center>
<p>
IT'S GETTING TO BE SPRING AGAIN, PROTECT YOURSELF AS YOU WALK,<br>
JOG AND EXERCISE OUTSIDE. ALSO PROTECT YOUR LOVED ONES AS<br>
THEY RETURN HOME FROM COLLEGE!<br>
<p>
* LEGAL PROTECTION FOR COLLEGE STUDENTS!<br>
* GREAT UP'COMING OUTDOOR PROTECTION GIFTS!<br>
* THERE IS NOTHING WORTH MORE PROTECTING THAN LIFE!<br>
* OUR STUN DEVICES & PEPPER PRODUCTS ARE LEGAL PROTECTION!
<p>
<b>
<font color="red">
JOIN THE WAR ON CRIME!
</b>
</font>
<p>
STUN GUNS AND BATONS
<p>
EFFECTIVE - SAFE - NONLETHAL
<p>
PROTECT YOUR LOVED ONES AND YOURSELF
<p>
No matter who you are, no matter what City or Town you live in,<br>
if you live in America, you will be touched by crime.
<p>
You hear about it on TV. You read about it in the newspaper.<br>
It's no secret that crime is a major problem in the U.S. today.<br>
Criminals are finding it easier to commit crimes all the time.
<p>
Weapons are readily available. Our cities' police forces have<br>
more work than they can handle. Even if these criminal are<br>
caught, they won't be spending long in our nation's overcrowded<br>
jails. And while lawmakers are well aware of the crime problem,<br>
they don't seem to have any effective answers.
<p>
Our Email Address: <a
href="mailto:<EMAIL>"><EMAIL></a>
<p>
INTERESTED:
<p>
You will be protecting yourself within 7 days! Don't Wait,<br>
visit our web page below, and join The War On Crime!
<p>
*****************<br>
<a
href="http://www.geocities.com/realprotection_20022003/">http://www.geocities.com/realprotection_20022003/</a><br>
*****************
<p>
Well, there is an effective answer. Take responsibility for<br>
your own security. Our site has a variety of quality personal<br>
security products. Visit our site, choose the personal security<br>
products that are right for you. Use them, and join the war on
crime!
<p>
FREE PEPPER SPRAY WITH ANY STUN UNIT PURCHASE.<br>
(A Value of $15.95)
<p>
We Ship Orders Within 5 To 7 Days, To Every State In The U.S.A.<br>
by UPS, FEDEX, or U.S. POSTAL SERVICE. Visa, MasterCard, American<br>
Express & Debt Card Gladly Accepted.
<p>
Ask yourself this question, if you don't help your loved ones,
who will?
<p>
INTERESTED:
<p>
*****************<br>
<a
href="http://www.geocities.com/realprotection_20022003/">http://www.geocities.com/realprotection_20022003/</a><br>
*****************
<p>
___The Stun Monster 625,000 Volts ($86.95)<br>
___The Z-Force Slim Style 300,000 Volts ($64.95)<br>
___The StunMaster 300,000 Volts Straight ($59.95)<br>
___The StunMaster 300,000 Volts Curb ($59.95)<br>
___The StunMaster 200,000 Volts Straight ($49.95)<br>
___The StunMaster 200,000 Volts Curb ($49.95)<br>
___The StunBaton 500,000 Volts ($89.95)<br>
___The StunBaton 300,000 Volts ($79.95)<br>
___Pen Knife (One $12.50, Two Or More $9.00)<br>
___Wildfire Pepper Spray (One $15.95, Two Or More $11.75)
<p>
___Add $5.75 For Shipping & Handling Charge.
<p>
To Order by postal mail, please send to the below address.<br>
Make payable to Mega Safety Technology.
<p>
Mega Safety Technology<br>
3215 Merrimac Ave.<br>
Dayton, Ohio 45405<br>
Our Email Address: <a
href="mailto:<EMAIL>"><EMAIL></a>
<p>
Order by 24 Hour Fax!!! 775-257-6657.
<p>
*****<br>
<b><font color="red">Important Credit Card Information! Please Read Below!</b></font>
<br><br>
* Credit Card Address, City, State and Zip Code, must match
billing address to be processed.
<br><br>
CHECK____ MONEYORDER____ VISA____ MASTERCARD____ AmericanExpress___
Debt Card___
<br><br>
Name_______________________________________________________<br>
(As it appears on Check or Credit Card)
<br><br>
Address____________________________________________________<br>
(As it appears on Check or Credit Card)
<br><br>
___________________________________________________<br>
City,State,Zip(As it appears on Check or Credit Card)
<br><br>
___________________________________________________<br>
Country
<br><br>
___________________________________________________<br>
(Credit Card Number)
<br><br>
Expiration Month_____ Year_____
<br><br>
___________________________________________________<br>
Authorized Signature
<br><br>
<b>
*****IMPORTANT NOTE*****
</b>
<br><br>
If Shipping Address Is Different From The Billing Address Above,
Please Fill Out Information Below.
<br><br>
Shipping Name______________________________________________
<br><br>
Shipping Address___________________________________________
<br><br>
___________________________________________________________<br>
Shipping City,State,Zip
<br><br>
___________________________________________________________<br>
Country
<br><br>
___________________________________________________________<br>
Email Address & Phone Number(Please Write Neat)
"""
return spammy_html
def mime_headers(mime_multipart, mime_msg_id, mime_xmailer, mime_timestamp, mime_subject, mime_from_header, mime_to_header, mime_importance, mime_priority):
logging.info("Generating MIME headers")
try:
mime_msg = MIMEMultipart(mime_multipart)
mime_msg['Date'] = mime_timestamp
mime_msg['Subject'] = mime_subject
mime_msg['From'] = mime_from_header
mime_msg['To'] = mime_to_header
mime_msg['Message-Id'] = mime_msg_id
mime_msg['Importance'] = mime_importance
mime_msg['X-Priority'] = mime_priority
mime_msg['X-Mailer'] = mime_xmailer
except Exception, exc:
logging.critical( "Adding MIME headers failed: %s\r\nExiting." % str(exc) )
sys.exit( "Adding MIME headers failed: %s\r\nExiting." % str(exc) ) # give a error message
return mime_msg
def text_mime(text_msg, mime_text, zip_text, url_text, ssn_text, text_charset):
logging.info("Generating text body")
zip_mime_text = ""
ssn_mime_text = ""
url_mime_text = "\r\n"
try:
if zip_text:
logging.info("Adding ZIP text to text body")
zip_mime_text = '\r\nPlease see the attached.\r\nIf needed, password = "<PASSWORD>"\r\n'
if ssn_text:
logging.info("Adding SSN text to text body")
ssn_mime_text = '\r\nHave some SSN numbers:\r\n623-57-9564\r\nSSN 215-79-8735\r\n544 71 7243\r\n112968357\r\n'
if url_text:
logging.info("Adding URL text to text body")
url_mime_text = "\r\nFor awesome stuff and free candy go to http://tapdemo.evilscheme.org/files/tapdemo_313533343139383733322e3939.docx\r\nWe promise it's totally safe!\r\n"
mime_text_body = mime_text + ssn_mime_text + zip_mime_text + url_mime_text
text_part = MIMEText(mime_text_body.encode(text_charset), 'plain', text_charset)
text_msg.attach(text_part)
except Exception, exc:
logging.critical( "Adding text body failed: %s\r\nExiting." % str(exc) )
sys.exit( "Adding text body failed: %s\r\nExiting." % str(exc) ) # give a error message
return text_msg
def html_mime(html_msg, mime_html_text, zip_html, url_html, ssn_html, html_charset):
logging.info("Generating HTML body")
zip_mime_html = "\r\n"
ssn_mime_html = ""
url_mime_html = ""
if zip_html:
logging.info("Adding ZIP text to HTML body")
zip_mime_html = '\r\n <p>Please see the attached.<br>\r\n If needed, password = "<PASSWORD>"</p>\r\n'
if ssn_html:
logging.info("Adding SSN text to HTML body")
ssn_mime_html = '\r\n <p>Have some SSN numbers:<br>\r\n 623-57-9564<br>\r\n SSN 215-79-8735<br>\r\n 544 71 7243<br>\r\n 112968357</p>\r\n'
if url_html:
logging.info("Adding URL text to HTML body")
url_mime_html = ' <p>For awesome stuff and free candy go to <a href="http://tapdemo.evilscheme.org/files/tapdemo_313533343139383733322e3939.docx">totallysafe.unmarkedvan.com</a></p>\r\n'
try:
mime_html1 = """\
<html>
<body>
"""
mime_html2 = mime_html_text + ssn_mime_html + zip_mime_html + url_mime_html
mime_html3 = """\
</body>
</html>
"""
mime_html_body = mime_html1 + "\r\n" + mime_html2 + "\r\n" + mime_html3
html_part = MIMEText(mime_html_body.encode(html_charset), 'html', html_charset)
html_msg.attach(html_part)
except Exception, exc:
logging.critical( "Adding html body failed: %s\r\nExiting." % str(exc) )
sys.exit( "Adding html body failed: %s\r\nExiting." % str(exc) ) # give a error message
return html_msg
def eicar(eicar_msg):
logging.info("Attaching EICAR virus")
try:
eicar_base64 = '<KEY>'
eicar_part = MIMEBase('application','octet-stream')
eicar_part.set_payload(eicar_base64)
eicar_part.add_header('Content-Disposition', 'attachment; filename="eicar.com.txt"')
eicar_part.add_header('Content-Transfer-Encoding', 'base64')
eicar_msg.attach(eicar_part)
except Exception, exc:
logging.critical( "Adding EICAR virus failed: %s\r\nExiting." % str(exc) )
sys.exit( "Adding EICAR virus failed: %s\r\nExiting." % str(exc) ) # give a error message
return eicar_msg
def pass_zip(zip_msg):
logging.info("Attaching ZIP file")
try:
zip_base64 = '<KEY>'
zip_part = MIMEBase('application','octet-stream')
zip_part.set_payload(zip_base64)
zip_part.add_header('Content-Transfer-Encoding', 'base64')
zip_part.add_header('Content-Disposition', 'attachment; filename="../test.zip"')
zip_msg.attach(zip_part)
except Exception, exc:
logging.critical( "Adding zip file failed: %s\r\nExiting." % str(exc) )
sys.exit( "Adding zip file failed: %s\r\nExiting." % str(exc) ) # give a error message
return zip_msg
def attach_file(attach_msg, file_attach):
try:
ctype, encoding = mimetypes.guess_type(file_attach)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
logging.info("Attaching " + file_attach)
logging.info(file_attach + " MIME type = " + ctype)
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
af = open(file_attach, 'r')
attach_part = MIMEText(af.read(), _subtype=subtype)
af.close()
elif maintype == 'image':
af = open(file_attach, 'rb')
attach_part = MIMEImage(af.read(), _subtype=subtype)
af.close()
elif maintype == 'audio':
af = open(file_attach, 'rb')
attach_part = MIMEAudio(af.read(), _subtype=subtype)
af.close()
else:
af = open(file_attach, 'rb')
attach_part = MIMEBase(maintype, subtype)
attach_part.set_payload(af.read())
af.close()
encoders.encode_base64(attach_part)
attach_part.add_header('Content-Disposition', 'attachment', filename=file_attach)
attach_msg.attach(attach_part)
except Exception, exc:
logging.critical( "Adding %s file failed: %s\r\nExiting." % (file_attach, str(exc)) )
sys.exit( "Adding %s file failed: %s\r\nExiting." % (file_attach, str(exc)) ) # give a error message
return attach_msg
def try_tls(tls_serv):
logging.info("Trying TLS")
try:
tls_serv.starttls()
except Exception, exc:
logging.critical( "Email failed: %s\r\nExiting." % str(exc) )
sys.exit( "Email failed: %s\r\nExiting." % str(exc) ) # give a error message
def send_email(send_target, send_port, send_sender, send_recipient, send_body, send_tls):
logging.info("Sending | |
<filename>simulation_4_RN.py
from os import listdir
from os.path import isfile, join
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Concatenate, Lambda, Average, Dropout
# Relation Network functions.
def get_dense(n, units):
r = []
for k in range(n):
r.append(Dense(units, activation='relu'))
return r
def get_MLP(n, denses):
def g(x):
d = x
for k in range(n):
d = denses[k](d)
return d
return g
def dropout_dense(x, units):
y = Dense(units, activation='relu')(x)
y = Dropout(0.5)(y)
return y
def build_tag(conv):
d = K.int_shape(conv)[2]
tag = np.zeros((d,d,2))
for i in range(d):
for j in range(d):
tag[i,j,0] = float(int(i%d))/(d-1)*2-1
tag[i,j,1] = float(int(j%d))/(d-1)*2-1
tag = K.variable(tag)
tag = K.expand_dims(tag, axis=0)
batch_size = K.shape(conv)[0]
tag = K.tile(tag, [batch_size,1,1,1])
return tag
def slice_1(t):
return t[:, 0, :, :]
def slice_2(t):
return t[:, 1:, :, :]
def slice_3(t):
return t[:, 0, :]
def slice_4(t):
return t[:, 1:, :]
def make_ResNet50_relnet(
dataset='SVRT',
resnet_layer='last_size_8',
trainable=False,
secondary_outputs=True):
# Inputs.
image = Input((128, 128, 3))
if dataset=='sort-of-clevr':
question = Input((11,))
elif dataset=='SVRT':
question = Input((2,)) # same-different ([1, 0]) or relative position ([0, 1]).
else:
raise ValueError('dataset not supported!')
# Get CNN features.
base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=image)
if resnet_layer=='last_size_4':
layer_name = 'conv5_block3_out' # shape: (None, 4, 4, 2048)
elif resnet_layer=='last_size_8':
layer_name = 'conv4_block6_out' # shape: (None, 8, 8, 1024)
else:
raise ValueError('layer not supported!')
cnn_features = base_model.get_layer(layer_name).output
# Freeze the base_model.
base_model.trainable = trainable
# Make tag and append to cnn features.
tag = build_tag(cnn_features)
cnn_features = Concatenate()([cnn_features, tag])
# Make list with objects.
shapes = cnn_features.shape
w, h = shapes[1], shapes[2]
slice_layer1 = Lambda(slice_1)
slice_layer2 = Lambda(slice_2)
slice_layer3 = Lambda(slice_3)
slice_layer4 = Lambda(slice_4)
features = []
for k1 in range(w):
features1 = slice_layer1(cnn_features)
cnn_features = slice_layer2(cnn_features)
for k2 in range(h):
features2 = slice_layer3(features1)
features1 = slice_layer4(features1)
features.append(features2)
# Make list with all combinations of objects.
relations = []
concat = Concatenate()
for feature1 in features:
for feature2 in features:
relations.append(concat([feature1, feature2, question]))
# g function.
g_MLP = get_MLP(4, get_dense(4, units=512))
mid_relations = []
for r in relations:
mid_relations.append(g_MLP(r))
combined_relation = Average()(mid_relations)
# f function.
rn = Dense(512, activation='relu')(combined_relation)
rn = dropout_dense(rn, units=512)
# SD answer.
if dataset == 'sort-of-clevr':
output_units = 10
answer = Dense(output_units, activation='softmax')(rn)
elif dataset == 'SVRT':
output_units = 1
answer = Dense(output_units, activation='sigmoid', name='sd')(rn)
if secondary_outputs:
rel_pos = Dense(1, activation='sigmoid', name='rel_pos')(rn)
model = Model(inputs=[image, question], outputs=[answer, rel_pos])
else:
model = Model(inputs=[image, question], outputs=answer)
return base_model, model
def get_dataset(
batch_size,
tfrecord_dir,
is_training=True,
process_img=True,
sd_sample_weight=True,
relnet=False):
# Load dataset.
if type(tfrecord_dir) == list:
raw_image_dataset = tf.data.TFRecordDataset(tfrecord_dir, num_parallel_reads=len(tfrecord_dir))
else:
raw_image_dataset = tf.data.TFRecordDataset(tfrecord_dir)
# Define example reading function.
def read_tfrecord(serialized_example):
# Create a dictionary describing the features.
feature_description = {
'label': tf.io.FixedLenFeature([], tf.int64),
'image_raw': tf.io.FixedLenFeature([], tf.string),
'coordinates': tf.io.FixedLenFeature([], tf.string),
'relative_position': tf.io.FixedLenFeature([], tf.int64)}
# Parse example.
example = tf.io.parse_single_example(serialized_example, feature_description)
# Cast label to int64
label = example['label']
label = tf.cast(label, tf.int64)
# Get image.
image = tf.image.decode_png(example['image_raw'])
# Ensure shape dimensions are constant.
image = tf.reshape(image, [128, 128, 3])
# Process image.
if process_img:
image = tf.cast(image, tf.float64)
image /= 255.0
# Sample-wise center image.
mean = tf.reduce_mean(image)
image -= mean
# Sample-wise std normalization.
std = tf.math.reduce_std(image)
image /= std
# Get coordinates.
b_coors = example['coordinates']
coors = tf.io.parse_tensor(b_coors, out_type=tf.float64) # restore 2D array from byte string
coors = tf.reshape(coors, [4])
# Cast relative position to int64
rel_pos = example['relative_position']
rel_pos = tf.cast(rel_pos, tf.int64)
# Sample weights.
sd_w = tf.constant(1, dtype=tf.int64) if sd_sample_weight else tf.constant(0, dtype=tf.int64)
rp_w = tf.constant(1, dtype=tf.int64)
if relnet:
question = tf.constant([1, 0], dtype=tf.int64) if sd_sample_weight else tf.constant([0, 1], dtype=tf.int64)
rp_w = tf.constant(0, dtype=tf.int64) if sd_sample_weight else tf.constant(1, dtype=tf.int64)
return (image, question), (label, rel_pos), (sd_w, rp_w)
else:
return image, (label, rel_pos), (sd_w, rp_w)
# Parse dataset.
dataset = raw_image_dataset.map(read_tfrecord)
# Always shuffle for simplicity.
dataset = dataset.shuffle(5600)
if is_training:
# Infinite dataset to avoid the potential last partial batch in each epoch.
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
return dataset
def get_master_dataset_relnet(batch_size, dont_include, process_img=True):
"""Builds dataset that samples each batch from one of the training datasets
assiging a same-different sample weight of 1 for all but the dont_include
dataset and a relative-position sample weight 1 for all other datasets."""
# Make datasets and append if it is not the dont_include dataset.
datasets = []
# Original: SD.
ds_original_SD = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/original_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=True,
relnet=True)
if dont_include != 'Original':
datasets.append(ds_original_SD)
# Original: RP.
ds_original_RP = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/original_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=False,
relnet=True)
datasets.append(ds_original_RP)
# Regular: SD.
ds_regular_SD = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/regular_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=True,
relnet=True)
if dont_include != 'Regular':
datasets.append(ds_regular_SD)
# Regular: RP.
ds_regular_RP = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/regular_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=False,
relnet=True)
datasets.append(ds_regular_RP)
# Irregular SD.
ds_irregular_SD = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/irregular_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=True,
relnet=True)
if dont_include != 'Irregular':
datasets.append(ds_irregular_SD)
# Irregular RP.
ds_irregular_RP = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/irregular_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=False,
relnet=True)
datasets.append(ds_irregular_RP)
# Open SD.
ds_open_SD = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/open_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=True,
relnet=True)
if dont_include != 'Open':
datasets.append(ds_open_SD)
# Open RP.
ds_open_RP = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/open_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=False,
relnet=True)
datasets.append(ds_open_RP)
# Wider line SD.
ds_wider_SD = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/wider_line_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=True,
relnet=True)
if dont_include != 'Wider line':
datasets.append(ds_wider_SD)
# Wider line RP.
ds_wider_RP = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/wider_line_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=False,
relnet=True)
datasets.append(ds_wider_RP)
# Scrambled SD.
ds_scrambled_SD = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/scrambled_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=True,
relnet=True)
if dont_include != 'Scrambled':
datasets.append(ds_scrambled_SD)
# Scrambled RP.
ds_scrambled_RP = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/scrambled_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=False,
relnet=True)
datasets.append(ds_scrambled_RP)
# Random color SD.
ds_random_SD = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/random_color_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=True,
relnet=True)
if dont_include != 'Random color':
datasets.append(ds_random_SD)
# Random color RP.
ds_random_RP = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/random_color_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=False,
relnet=True)
datasets.append(ds_random_RP)
# Filled SD.
ds_filled_SD = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/filled_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=True,
relnet=True)
if dont_include != 'Filled':
datasets.append(ds_filled_SD)
# Filled RP.
ds_filled_RP = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/filled_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=False,
relnet=True)
datasets.append(ds_filled_RP)
# Lines SD.
ds_lines_SD = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/lines_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=True,
relnet=True)
if dont_include != 'Lines':
datasets.append(ds_lines_SD)
# Lines RP.
ds_lines_RP = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/lines_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=False,
relnet=True)
datasets.append(ds_lines_RP)
# Arrows SD.
ds_arrows_SD = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/arrows_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=True,
relnet=True)
if dont_include != 'Arrows':
datasets.append(ds_arrows_SD)
# Arrows RP.
ds_arrows_RP = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/arrows_train.tfrecords',
is_training=True,
process_img=process_img,
sd_sample_weight=False,
relnet=True)
datasets.append(ds_arrows_RP)
# Define a dataset containing range(0, 9).
# Note. I don't need to oversample the original dataset here because I'm training
# same/different in all datasets except the one that is going to be tested.
choice_dataset = tf.data.Dataset.range(len(datasets)).repeat()
return tf.data.experimental.choose_from_datasets(datasets, choice_dataset)
def fine_tune_relnet(
train_ds,
val_ds,
save_name,
epochs_top,
epochs,
steps_per_epoch,
validation_steps,
n=10,
lr=0.0001):
# Repate trainign 10 times.
for i in range(n):
# Define best relation network sim1: 'last_size_8'.
base_model, model = make_ResNet50_relnet(
dataset='SVRT',
resnet_layer='last_size_8',
trainable=False,
secondary_outputs=True)
# Compile.
model.compile(
optimizer=tf.keras.optimizers.Adam(0.0003),
loss={'sd': 'binary_crossentropy', 'rel_pos': 'binary_crossentropy'},
metrics={'sd': 'binary_accuracy', 'rel_pos': 'binary_accuracy'})
# Train.
model.fit(
train_ds,
epochs=epochs_top,
steps_per_epoch=steps_per_epoch,
validation_data=val_ds,
validation_steps=validation_steps)
# Unfreeze Resnet50.
base_model.trainable = True
# Re-compile.
model.compile(
optimizer=tf.keras.optimizers.Adam(lr),
loss={'sd': 'binary_crossentropy', 'rel_pos': 'binary_crossentropy'},
metrics={'sd': 'binary_accuracy', 'rel_pos': 'binary_accuracy'})
# Train.
model.fit(
train_ds,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=val_ds,
validation_steps=validation_steps)
# Save weights.
weights_name = save_name + 'instance_' + str(i) + '.hdf5'
model.save_weights(weights_name)
return
def train_in_all_sd_relnet(
epochs_top,
epochs,
steps_per_epoch,
validation_steps,
n=10,
lr=0.0001,
batch_size=64):
# Define all master datasets.
no_irregular_ds = get_master_dataset_relnet(batch_size=batch_size, dont_include='Irregular', process_img=True)
no_regular_ds = get_master_dataset_relnet(batch_size=batch_size, dont_include='Regular', process_img=True)
no_open_ds = get_master_dataset_relnet(batch_size=batch_size, dont_include='Open', process_img=True)
no_wider_ds = get_master_dataset_relnet(batch_size=batch_size, dont_include='Wider line', process_img=True)
no_scrambled_ds = get_master_dataset_relnet(batch_size=batch_size, dont_include='Scrambled', process_img=True)
no_random_ds = get_master_dataset_relnet(batch_size=batch_size, dont_include='Random color', process_img=True)
no_filled_ds = get_master_dataset_relnet(batch_size=batch_size, dont_include='Filled', process_img=True)
no_lines_ds = get_master_dataset_relnet(batch_size=batch_size, dont_include='Lines', process_img=True)
no_arrows_ds = get_master_dataset_relnet(batch_size=batch_size, dont_include='Arrows', process_img=True)
# Validation dataset.
val_ds = get_dataset(
batch_size=batch_size,
tfrecord_dir='data/original_val.tfrecords',
is_training=False,
process_img=True,
relnet=True)
# Train model in each dataset 10 times.
ds_and_names = [
(no_irregular_ds, 'simulation_4/relnet_no_irregular/'),
(no_regular_ds, 'simulation_4/relnet_no_regular/'),
(no_open_ds, 'simulation_4/relnet_no_open/'),
(no_wider_ds, 'simulation_4/relnet_no_wider/'),
(no_scrambled_ds, 'simulation_4/relnet_no_scrambled/'),
(no_random_ds, 'simulation_4/relnet_no_random/'),
(no_filled_ds, 'simulation_4/relnet_no_filled/'),
(no_lines_ds, 'simulation_4/relnet_no_lines/'),
(no_arrows_ds, 'simulation_4/relnet_no_arrows/')]
for | |
# encoding: utf-8
#
# Copyright (C) 2010 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# Author: <NAME> <<EMAIL>>
"""Schema validation for Python data structures.
Given eg. a nested data structure like this:
{
'exclude': ['Users', 'Uptime'],
'include': [],
'set': {
'snmp_community': 'public',
'snmp_timeout': 15,
'snmp_version': '2c',
},
'targets': {
'localhost': {
'exclude': ['Uptime'],
'features': {
'Uptime': {
'retries': 3,
},
'Users': {
'snmp_community': 'monkey',
'snmp_port': 15,
},
},
'include': ['Users'],
'set': {
'snmp_community': 'monkeys',
},
},
},
}
A schema like this:
>>> settings = {
... 'snmp_community': str,
... 'retries': int,
... 'snmp_version': All(Coerce(str), Any('3', '2c', '1')),
... }
>>> features = ['Ping', 'Uptime', 'Http']
>>> schema = Schema({
... 'exclude': features,
... 'include': features,
... 'set': settings,
... 'targets': {
... 'exclude': features,
... 'include': features,
... 'features': {
... str: settings,
... },
... },
... })
Validate like so:
>>> schema({
... 'set': {
... 'snmp_community': 'public',
... 'snmp_version': '2c',
... },
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {
... 'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'},
... },
... },
... }) # doctest: +NORMALIZE_WHITESPACE
{'set': {'snmp_version': '2c', 'snmp_community': 'public'},
'targets': {'exclude': ['Ping'],
'features': {'Uptime': {'retries': 3},
'Users': {'snmp_community': 'monkey'}}}}
"""
import os
import re
import types
import urlparse
__author__ = '<NAME> <<EMAIL>>'
__version__ = '0.4'
class Undefined(object):
def __nonzero__(self):
return False
def __repr__(self):
return '...'
UNDEFINED = Undefined()
class Error(Exception):
"""Base validation exception."""
class SchemaError(Error):
"""An error was encountered in the schema."""
class Invalid(Error):
"""The data was invalid.
:attr msg: The error message.
:attr path: The path to the error, as a list of keys in the source data.
"""
def __init__(self, message, path=None):
Exception.__init__(self, message)
self.path = path or []
@property
def msg(self):
return self.args[0]
def __str__(self):
path = ' @ data[%s]' % ']['.join(map(repr, self.path)) \
if self.path else ''
return Exception.__str__(self) + path
class InvalidList(Invalid):
def __init__(self, errors=None):
self.errors = errors[:] if errors else []
@property
def msg(self):
return self.errors[0].msg
@property
def path(self):
return self.errors[0].path
def add(self, error):
self.errors.append(error)
def __str__(self):
return str(self.errors[0])
class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
"""
def __init__(self, schema, required=False, extra=False):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Keys in the data need not have keys in the schema.
"""
self.schema = schema
self.required = required
self.extra = extra
def __call__(self, data):
"""Validate data against this schema."""
return self.validate([], self.schema, data)
def validate(self, path, schema, data):
try:
if isinstance(schema, dict):
return self.validate_dict(path, schema, data)
elif isinstance(schema, list):
return self.validate_list(path, schema, data)
type_ = type(schema)
if type_ is type:
type_ = schema
if type_ in (int, long, str, unicode, float, complex, object,
list, dict, types.NoneType) or callable(schema):
return self.validate_scalar(path, schema, data)
except InvalidList:
raise
except Invalid, e:
raise InvalidList([e])
raise SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def validate_dict(self, path, schema, data):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> validate([])
Traceback (most recent call last):
...
InvalidList: expected a dictionary
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> validate({'one': 'three'})
Traceback (most recent call last):
...
InvalidList: not a valid value for dictionary value @ data['one']
An invalid key:
>>> validate({'two': 'three'})
Traceback (most recent call last):
...
InvalidList: extra keys not allowed @ data['two']
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not coerce the value:
>>> validate({'10': 'twenty'})
Traceback (most recent call last):
...
InvalidList: extra keys not allowed @ data['10']
Wrap them in the Coerce() function to achieve this:
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
(This is to avoid unexpected surprises.)
"""
if not isinstance(data, dict):
raise Invalid('expected a dictionary', path)
out = type(data)()
required_keys = set(key for key in schema
if
(self.required and not isinstance(key, optional))
or
isinstance(key, Required))
error = None
errors = []
for key, value in data.iteritems():
key_path = path + [key]
for skey, svalue in schema.iteritems():
if skey is Extra:
new_key = key
else:
try:
new_key = self.validate(key_path, skey, key)
except Invalid, e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
try:
out[new_key] = self.validate(key_path, svalue, value)
except Invalid, e:
if len(e.path) > len(key_path):
errors.append(e)
else:
errors.append(Invalid(e.msg + ' for dictionary value',
e.path))
break
# Key and value okay, mark any required() fields as found.
required_keys.discard(skey)
break
else:
if self.extra:
out[key] = value
else:
errors.append(Invalid('extra keys not allowed',
key_path))
for key in required_keys:
errors.append(Invalid('required key not provided', path + [key]))
if errors:
raise InvalidList(errors)
return out
def validate_list(self, path, schema, data):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> validator([3.5])
Traceback (most recent call last):
...
InvalidList: invalid list value @ data[0]
>>> validator([1])
[1]
"""
if not isinstance(data, list):
raise Invalid('expected a list', path)
# Empty list schema, allow any data list.
if not schema:
return data
out = type(data)()
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for s in schema:
try:
out.append(self.validate(index_path, s, value))
break
except Invalid, e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
if len(invalid.path) <= len(index_path):
invalid = Invalid('invalid list value', index_path)
errors.append(invalid)
if errors:
raise InvalidList(errors)
return out
@staticmethod
def validate_scalar(path, schema, data):
"""A scalar value.
The schema can either be a value or a type.
>>> Schema.validate_scalar([], int, 1)
1
>>> Schema.validate_scalar([], float, '1')
Traceback (most recent call last):
...
Invalid: expected float
Callables have
>>> Schema.validate_scalar([], lambda v: float(v), '1')
1.0
As a convenience, ValueError's are trapped:
>>> Schema.validate_scalar([], lambda v: float(v), 'a')
Traceback (most recent call last):
...
Invalid: not a valid value
"""
if type(schema) is type:
if not isinstance(data, schema):
raise Invalid('expected %s' % schema.__name__, path)
elif callable(schema):
try:
return schema(data)
except ValueError, e:
raise Invalid('not a valid value', path)
except Invalid, e:
raise Invalid(e.msg, path + e.path)
else:
if data != schema:
raise Invalid('not a valid value', path)
return data
class Marker(object):
"""Mark nodes for special treatment."""
def __init__(self, schema, msg=None):
self.schema = schema
self._schema = Schema(schema)
self.msg = msg
def __call__(self, v):
try:
return self._schema(v)
except Invalid, e:
if not self.msg or len(e.path) > 1:
raise
raise Invalid(self.msg)
def __str__(self):
return str(self.schema)
def __repr__(self):
return repr(self.schema)
class Optional(Marker):
"""Mark a node in the schema as optional."""
class Required(Marker):
"""Mark a node in the schema as being required."""
def Extra(_):
"""Allow keys in the data that are not present in the schema."""
raise SchemaError('"extra" should never be called')
def Msg(schema, msg):
"""Report a user-friendly message if a schema fails to validate.
>>> validate = Schema(
... Msg(['one', 'two', int],
... 'should be one of "one", "two" or an integer'))
>>> validate(['three'])
Traceback (most recent call last):
...
InvalidList: should be one of "one", "two" or an integer
Messages are only applied to invalid direct descendants of the schema:
>>> validate = Schema(Msg([['one', | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=C0111,C0103,E1101
"""DQN Solver package."""
import json
import os
import numpy as np
import tensorflow as tf
from snake.base import Direc, Pos, PointType
from snake.solver.base import BaseSolver
from snake.solver.dqn.memory import Memory
from snake.solver.dqn.logger import log
from snake.solver.dqn.snakeaction import SnakeAction
from snake.solver.dqn.history import History
_DIR_LOG = "logs"
class DQNSolver(BaseSolver):
PATH_VAR = os.path.join(_DIR_LOG, "solver-var-%d.json")
PATH_NET = os.path.join(_DIR_LOG, "solver-net-%d")
def __init__(self, snake):
super().__init__(snake)
self._USE_RELATIVE = True # Whether to use relative actions
self._USE_VISUAL_ONLY = False # Whether to use visual state only
self._USE_DDQN = False # Whether to use double dqn
self._USE_DUELING = True # Whether to use dueling network
self._EXPLOIT_STEP = 1000000 # Steps that epsilon decreases
self._MAX_LEARN_STEP = 3000000 # Maximum learning steps (require >= self._RESTORE_STEP)
self._RESTORE_STEP = 0 # Which learning step to restore (0 means not restore)
# Rewards
self._RWD_EMPTY = -0.005
self._RWD_DEAD = -0.5
self._RWD_FOOD = 1.0
# Memory
self._MEM_SIZE = 100000
self._MEM_BATCH = 32
# Epsilon-greedy
self._EPSILON_MAX = 1.0
self._EPSILON_MIN = 0.01
self._EPSILON_DEC = (self._EPSILON_MAX - self._EPSILON_MIN) / self._EXPLOIT_STEP
self._LR = 1e-6 # Learning rate
self._MOMENTUM = 0.95 # SGD momentum
self._GAMMA = 0.99 # Reward discount
self._LEAKY_ALPHA = 0.01 # Leaky relu slope
self._TD_UPPER = 1.0 # TD-error clip upper bound
self._TD_LOWER = -1.0 # TD-error clip lower bound
self._PRI_EPSILON = 0.001 # Small positive value to avoid zero priority
self._ALPHA = 0.6 # How much prioritization to use
self._BETA_MIN = 0.4 # How much to compensate for the non-uniform probabilities
self._BETA_INC = (1.0 - self._BETA_MIN) / self._EXPLOIT_STEP
# Frequency
self._FREQ_LEARN = 4 # Number of snake steps
self._FREQ_REPLACE = 10000 # Learning steps
self._FREQ_LOG = 500 # Learning steps
self._FREQ_SAVE = 20000 # Learning steps
self._HISTORY_NUM_AVG = 50 # How many latest history episodes to compute average
if self._USE_RELATIVE:
self._SNAKE_ACTIONS = [SnakeAction.LEFT, SnakeAction.FORWARD, SnakeAction.RIGHT]
else:
self._SNAKE_ACTIONS = [Direc.LEFT, Direc.UP, Direc.RIGHT, Direc.DOWN]
self._NUM_ACTIONS = len(self._SNAKE_ACTIONS)
# State features
self._SHAPE_VISUAL_STATE = (self.map.num_rows - 2, self.map.num_cols - 2, 4)
self._NUM_VISUAL_FEATURES = np.prod(self._SHAPE_VISUAL_STATE)
self._NUM_IMPORTANT_FEATURES = 0 if self._USE_VISUAL_ONLY else self._NUM_ACTIONS
self._NUM_ALL_FEATURES = self._NUM_VISUAL_FEATURES + self._NUM_IMPORTANT_FEATURES
# Replay memory
self._mem = Memory(mem_size=self._MEM_SIZE,
alpha=self._ALPHA,
epsilon=self._PRI_EPSILON)
self._mem_cnt = 0
self._learn_step = 1
self._epsilon = self._EPSILON_MAX
self._beta = self._BETA_MIN
# Learning history
self._history = History(self._HISTORY_NUM_AVG)
eval_params, target_params = self._build_graph()
self._net_saver = tf.train.Saver(var_list=eval_params + target_params,
max_to_keep=500)
self._sess = tf.Session()
self._sess.run(tf.global_variables_initializer())
self._summary_writer = tf.summary.FileWriter(_DIR_LOG, self._sess.graph)
if self._RESTORE_STEP > 0:
self._load_model()
def _save_model(self):
self._net_saver.save(self._sess, DQNSolver.PATH_NET % self._learn_step,
write_meta_graph=False)
with open(DQNSolver.PATH_VAR % self._learn_step, "w") as f:
json.dump({
"epsilon": self._epsilon,
"beta": self._beta,
}, f, indent=2)
def _load_model(self):
self._net_saver.restore(self._sess, DQNSolver.PATH_NET % self._RESTORE_STEP)
with open(DQNSolver.PATH_VAR % self._RESTORE_STEP, "r") as f:
var = json.load(f)
self._epsilon = var["epsilon"]
self._beta = var["beta"]
self._learn_step = self._RESTORE_STEP + 1
log("model loaded | RESTORE_STEP: %d | epsilon: %.6f | beta: %.6f"
% (self._RESTORE_STEP, self._epsilon, self._beta))
def _build_graph(self):
# Input tensor for eval net
self._state_eval = tf.placeholder(
tf.float32, [None, self._NUM_ALL_FEATURES], name="state_eval")
# Input tensor for target net
self._state_target = tf.placeholder(
tf.float32, [None, self._NUM_ALL_FEATURES], name="state_target")
# Input tensor for actions taken by agent
self._action = tf.placeholder(
tf.int32, [None, ], name="action")
# Input tensor for rewards received by agent
self._reward = tf.placeholder(
tf.float32, [None, ], name="reward")
# Input tensor for whether episodes are finished
self._done = tf.placeholder(
tf.bool, [None, ], name="done")
# Input tensor for eval net output of next state
self._q_eval_all_nxt = tf.placeholder(
tf.float32, [None, self._NUM_ACTIONS], name="q_eval_all_nxt")
# Input tensor for importance-sampling weights
self._IS_weights = tf.placeholder(
tf.float32, [None, ], name="IS_weights")
SCOPE_EVAL_NET = "eval_net"
SCOPE_TARGET_NET = "target_net"
w_init = tf.keras.initializers.he_normal()
b_init = tf.constant_initializer(0)
with tf.variable_scope(SCOPE_EVAL_NET):
# Eval net output
self._q_eval_all = self._build_net(self._state_eval, "q_eval_all", w_init, b_init)
with tf.variable_scope("q_eval"):
q_eval = self._filter_actions(self._q_eval_all, self._action)
with tf.variable_scope(SCOPE_TARGET_NET):
# Target net output
q_nxt_all = self._build_net(self._state_target, "q_nxt_all", w_init, b_init)
with tf.variable_scope("q_target"):
max_actions = None
if self._USE_DDQN:
max_actions = tf.argmax(self._q_eval_all_nxt, axis=1, output_type=tf.int32)
else:
max_actions = tf.argmax(q_nxt_all, axis=1, output_type=tf.int32)
q_nxt = self._filter_actions(q_nxt_all, max_actions)
q_target = self._reward + self._GAMMA * q_nxt * \
(1.0 - tf.cast(self._done, tf.float32))
q_target = tf.stop_gradient(q_target)
with tf.variable_scope("loss"):
with tf.variable_scope("td_err"):
td_err = tf.clip_by_value(
q_eval - q_target,
clip_value_min=self._TD_LOWER,
clip_value_max=self._TD_UPPER,
)
self._loss = tf.reduce_mean(self._IS_weights * tf.square(td_err))
self._td_err_abs = tf.abs(td_err, name="td_err_abs") # To update sum tree
with tf.variable_scope("train"):
self._train = tf.train.RMSPropOptimizer(
learning_rate=self._LR, momentum=self._MOMENTUM
).minimize(self._loss)
# Replace target net params with eval net's
with tf.variable_scope("replace"):
eval_params = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope=SCOPE_EVAL_NET)
target_params = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope=SCOPE_TARGET_NET)
self._replace_target = [
tf.assign(t, e) for t, e in zip(target_params, eval_params)
]
return eval_params, target_params
def _build_net(self, features, output_name, w_init_, b_init_):
visual_state = tf.slice(features,
begin=[0, 0],
size=[-1, self._NUM_VISUAL_FEATURES],
name="visual_state")
visual_state_2d = tf.reshape(tensor=visual_state,
shape=[-1,
self._SHAPE_VISUAL_STATE[0],
self._SHAPE_VISUAL_STATE[1],
self._SHAPE_VISUAL_STATE[2]],
name="visual_state_2d")
conv1 = tf.layers.conv2d(inputs=visual_state_2d,
filters=32,
kernel_size=3,
strides=1,
padding='valid',
activation=self._leaky_relu,
kernel_initializer=w_init_,
bias_initializer=b_init_,
name="conv1")
conv2 = tf.layers.conv2d(inputs=conv1,
filters=64,
kernel_size=3,
strides=1,
padding='valid',
activation=self._leaky_relu,
kernel_initializer=w_init_,
bias_initializer=b_init_,
name="conv2")
conv3 = tf.layers.conv2d(inputs=conv2,
filters=128,
kernel_size=2,
strides=1,
padding='valid',
activation=self._leaky_relu,
kernel_initializer=w_init_,
bias_initializer=b_init_,
name="conv3")
conv4 = tf.layers.conv2d(inputs=conv3,
filters=256,
kernel_size=2,
strides=1,
padding='valid',
activation=self._leaky_relu,
kernel_initializer=w_init_,
bias_initializer=b_init_,
name="conv4")
conv4_flat = tf.reshape(tensor=conv4,
shape=[-1, 2 * 2 * 256],
name="conv4_flat")
combined_features = None
if self._USE_VISUAL_ONLY:
combined_features = conv4_flat
else:
important_state = tf.slice(features,
begin=[0, self._NUM_VISUAL_FEATURES],
size=[-1, self._NUM_IMPORTANT_FEATURES],
name="important_state")
combined_features = tf.concat([conv4_flat, important_state],
axis=1,
name="combined_features")
fc1 = tf.layers.dense(inputs=combined_features,
units=1024,
activation=self._leaky_relu,
kernel_initializer=w_init_,
bias_initializer=b_init_,
name="fc1")
q_all = None
if self._USE_DUELING:
fc2_v = tf.layers.dense(inputs=fc1,
units=512,
activation=self._leaky_relu,
kernel_initializer=w_init_,
bias_initializer=b_init_,
name="fc2_v")
fc2_a = tf.layers.dense(inputs=fc1,
units=512,
activation=self._leaky_relu,
kernel_initializer=w_init_,
bias_initializer=b_init_,
name="fc2_a")
v = tf.layers.dense(inputs=fc2_v,
units=1,
activation=self._leaky_relu,
kernel_initializer=w_init_,
bias_initializer=b_init_,
name="v")
a = tf.layers.dense(inputs=fc2_a,
units=self._NUM_ACTIONS,
activation=self._leaky_relu,
kernel_initializer=w_init_,
bias_initializer=b_init_,
name="a")
with tf.variable_scope(output_name):
a_mean = tf.reduce_mean(a, axis=1, keep_dims=True, name="a_mean")
q_all = v + (a - a_mean)
else:
fc2 = tf.layers.dense(inputs=fc1,
units=1024,
activation=self._leaky_relu,
kernel_initializer=w_init_,
bias_initializer=b_init_,
name="fc2")
q_all = tf.layers.dense(inputs=fc2,
units=self._NUM_ACTIONS,
kernel_initializer=w_init_,
bias_initializer=b_init_,
name=output_name)
return q_all # Shape: (None, num_actions)
def _leaky_relu(self, features):
return tf.nn.leaky_relu(features, alpha=self._LEAKY_ALPHA)
def _filter_actions(self, q_all, actions):
with tf.variable_scope("action_filter"):
indices = tf.range(tf.shape(q_all)[0], dtype=tf.int32)
action_indices = tf.stack([indices, actions], axis=1)
return tf.gather_nd(q_all, action_indices) # Shape: (None, )
def next_direc(self):
"""Override super class."""
action = self._SNAKE_ACTIONS[self._choose_action(e_greedy=False)]
if self._USE_RELATIVE:
return SnakeAction.to_direc(action, self.snake.direc)
else:
return action
def plot(self):
self._history.save(self._RESTORE_STEP + 1, self._learn_step - 1)
self._history.plot(self._RESTORE_STEP + 1)
def close(self):
"""Override super class."""
if self._summary_writer:
self._summary_writer.close()
if self._sess:
self._sess.close()
def train(self):
state_cur = self._state()
action = self._choose_action()
reward, state_nxt, done = self._step(action)
self._store_transition(state_cur, action, reward, state_nxt, done)
self._history.add_snake_step(done, reward, self.snake)
if self._mem_cnt >= self._MEM_SIZE:
if self._mem_cnt % self._FREQ_LEARN == 0:
self._learn()
elif self._mem_cnt % self._FREQ_LOG == 0:
log("mem_cnt: %d" % self._mem_cnt)
learn_end = self._learn_step > self._MAX_LEARN_STEP
return done, learn_end
def _state(self):
"""Return a vector indicating current state."""
# Visual state
visual_state = np.zeros(self._SHAPE_VISUAL_STATE, dtype=np.int32)
for i in range(1, self.map.num_rows - 1):
for j in range(1, self.map.num_cols - 1):
pos = Pos(i, j)
if self._USE_RELATIVE:
if self.snake.direc == Direc.LEFT:
pos = Pos(self.map.num_rows - 1 - j, i)
elif self.snake.direc == Direc.UP:
pos = Pos(i, j)
elif self.snake.direc == Direc.RIGHT:
pos = Pos(j, self.map.num_cols - 1 - i)
elif self.snake.direc == Direc.DOWN:
pos = Pos(self.map.num_rows - 1 - i, self.map.num_cols - 1 - j)
t = self.map.point(pos).type
if t == PointType.EMPTY:
visual_state[i - 1][j - 1][0] = 1
elif t == PointType.FOOD:
visual_state[i - 1][j - 1][1] = 1
elif t == PointType.HEAD_L or t == PointType.HEAD_U or \
t == PointType.HEAD_R or t == PointType.HEAD_D:
visual_state[i - 1][j - 1][2] = 1
elif t == PointType.BODY_LU or t == PointType.BODY_UR or \
t == PointType.BODY_RD or t == PointType.BODY_DL or \
t == PointType.BODY_HOR or t == PointType.BODY_VER:
visual_state[i - 1][j - 1][3] = 1
else:
raise ValueError("Unsupported PointType: {}".format(t))
if self._USE_VISUAL_ONLY:
return visual_state.flatten()
else:
# Important state
important_state = np.zeros(self._NUM_IMPORTANT_FEATURES, dtype=np.int32)
head = self.snake.head()
if self._USE_RELATIVE:
for i, action in enumerate([SnakeAction.LEFT, SnakeAction.FORWARD, SnakeAction.RIGHT]):
direc = SnakeAction.to_direc(action, self.snake.direc)
if not self.map.is_safe(head.adj(direc)):
important_state[i] = 1
else:
for i, direc in enumerate([Direc.LEFT, Direc.UP, Direc.RIGHT, Direc.DOWN]):
if not self.map.is_safe(head.adj(direc)):
important_state[i] = 1
return np.hstack((visual_state.flatten(), important_state))
def _choose_action(self, e_greedy=True):
action_idx = None
if e_greedy and np.random.uniform() < self._epsilon:
while True:
action_idx = np.random.randint(0, self._NUM_ACTIONS)
if Direc.opposite(self.snake.direc) != self._SNAKE_ACTIONS[action_idx]:
break
else:
q_eval_all = self._sess.run(
self._q_eval_all,
feed_dict={
self._state_eval: self._state()[np.newaxis, :]
}
)
q_eval_all = q_eval_all[0]
# Find indices of actions with 1st and 2nd largest q value
action_indices = np.argpartition(q_eval_all, q_eval_all.size - 2)
action_idx = action_indices[-1]
# If opposite direction, return direction with 2nd largest q value
if Direc.opposite(self.snake.direc) == self._SNAKE_ACTIONS[action_idx]:
action_idx = action_indices[-2]
| |
self.ghost_academies:
if len(academy.orders) > 0 and academy.orders[0].ability.id in [AbilityId.BUILD_NUKE]:
if self.already_pending(UnitTypeId.GHOST):
for br in self.barracks:
if len(br.orders) > 1:
break
if self.can_afford(UnitTypeId.REAPER):
self.do(br.train(UnitTypeId.REAPER))
if self.chat:
await self._client.chat_send("Nukerush preparations ready.", team_only=False)
return False
return False
async def build_priority_raven(self):
if self.already_pending(UnitTypeId.RAVEN) or self.units(UnitTypeId.RAVEN):
self.priority_raven = False
if not self.barracks.ready and not self.barracksflyings:
return True
if ((not self.factories and not self.factoriesflying)
and not self.already_pending(UnitTypeId.FACTORY)
and (self.barracks.ready.exists or self.barracksflyings)):
if self.can_afford(UnitTypeId.FACTORY):
await self.build_for_me(UnitTypeId.FACTORY)
return False
if (self.structures(UnitTypeId.BARRACKSTECHLAB)
and not self.structures(UnitTypeId.STARPORTTECHLAB)
and self.already_pending(UnitTypeId.STARPORT)
and not self.doner_location):
for br in self.structures(UnitTypeId.BARRACKS).ready.idle:
for techlab in self.structures(UnitTypeId.BARRACKSTECHLAB):
if br.add_on_tag == techlab.tag and len(techlab.orders) <= 0:
self.doner_location = br.position
self.do(br(AbilityId.LIFT))
print("Reason for lift: doner location for priority starport techlab.")
return False
if self.doner_location:
if self.starportflying.idle:
for sp in self.starportflying.idle:
self.do(sp(LAND, self.doner_location))
return False
if self.structures(UnitTypeId.STARPORTTECHLAB):
self.doner_location = None
if not self.starports and not self.starportflying and not self.already_pending(UnitTypeId.STARPORT):
if self.factories.ready:
if self.can_afford(UnitTypeId.STARPORT):
self.refineries_in_first_base = 2
await self.build(UnitTypeId.STARPORT, self.doner_location,
build_worker=self.select_contractor(self.doner_location))
print("Building priority starport")
return False
else:
return True
elif not self.starportflying and self.starports:
for sp in self.starports.ready.idle:
if sp.add_on_tag == 0:
self.do(sp(LIFT))
print("Reason for lift: moving to starport techlab doner location.")
return True
if not self.already_pending(UnitTypeId.RAVEN):
for sp in self.starports.ready.idle:
for addon in self.structures(UnitTypeId.STARPORTTECHLAB):
if sp.add_on_tag == addon.tag:
if self.can_afford(RAVEN) and self.can_feed(RAVEN):
self.do(sp.train(RAVEN))
print("Training raven")
return False
return True
async def build_priority_tank(self):
if self.already_pending(UnitTypeId.SIEGETANK):
self.priority_tank = False
print("Priority tank code completed. continue normal game")
# wait for barracks to be ready
if not self.barracks.ready and not self.barracksflyings:
return True
if self.doner_location:
if self.factories.closer_than(2, self.doner_location):
self.doner_location = None
print("Cleared doner location in priority tank")
if self.marines.amount < 2 and not self.already_pending(UnitTypeId.MARINE) and not self.factories:
for barracks in self.barracks.idle:
self.do(barracks.train(UnitTypeId.MARINE))
print("Training marine to protect priority tank production")
return False
if self.refineries_in_first_base < 2 and self.already_pending(FACTORY):
self.refineries_in_first_base = 2
print("Add more refineries to ensure gas production for tank")
"build factory immediately after BarracksTechlab"
if ((not self.factories and not self.factoriesflying)
and not self.already_pending(FACTORY)
and (self.barracks.ready.exists or self.barracksflyings)):
if self.can_afford(FACTORY):
if self.doner_location:
if await self.can_place(FACTORY, self.doner_location):
await self.build(FACTORY, self.doner_location)
print("building factory in doner location")
return False
self.doner_location = None
await self.build_for_me(FACTORY)
print("building factory.")
return False
elif (not self.structures(TECHLAB)
and not self.structures(FACTORYTECHLAB)
and not self.structures(BARRACKSTECHLAB)
and not self.already_pending(BARRACKSTECHLAB)
and not self.factories.ready):
if self.can_afford(BARRACKSTECHLAB):
for barracks in self.barracks.idle:
self.do(barracks.build(BARRACKSTECHLAB))
print("Building techlab with barracks for factory.")
return False
elif (not self.structures(TECHLAB)
and not self.structures(FACTORYTECHLAB)
and not self.structures(BARRACKSTECHLAB)
and not self.already_pending(BARRACKSTECHLAB)
and self.factories.ready
and self.already_pending(UnitTypeId.BARRACKSREACTOR)):
if self.can_afford(FACTORYTECHLAB):
for factory in self.factories.idle:
self.do(factory.build(FACTORYTECHLAB))
print("Building factory techlab. Should not happend. Check code.")
return False
elif (not self.structures(TECHLAB)
and not self.structures(FACTORYTECHLAB)
and self.structures(BARRACKSTECHLAB).ready):
for barracks in self.barracks:
for addon in self.structures(BARRACKSTECHLAB):
if barracks.add_on_tag == addon.tag:
self.doner_location = barracks.position
print("Lifting barracks and assigning doner location at:", self.doner_location)
self.do(barracks(LIFT))
return False
"move factory to techlab"
if (self.factories.ready.idle
and self.doner_location
and not self.structures(FACTORYTECHLAB)
and not self.already_pending(FACTORYTECHLAB)
and await self.can_place(FACTORY, self.doner_location)):
for factory in self.factories:
self.do(factory(LIFT))
print("Doner location found and lifting factory for transoprt.")
return False
for factory in self.factoriesflying.idle:
if self.doner_location:
if await self.can_place(FACTORY, self.doner_location):
self.do(factory(LAND, self.doner_location))
print("Land factory to doner location")
return False
if self.structures(FACTORYTECHLAB) and self.doner_location:
self.doner_location = None
"build one priority tank"
if (self.factories.ready.idle and self.structures(FACTORYTECHLAB).ready):
for factory in self.factories.ready.idle:
if not self.can_feed(SIEGETANK):
print("Cant feed siegetank!")
return True
if (self.can_afford(SIEGETANK)):
self.do(factory.train(SIEGETANK))
print("Training priority siegetank")
return False
else:
print("Saving money for siegetank")
return False
return True
async def manage_drop(self):
"for 4 player map skip marinedrop"
if self.enemy_start_location == None:
self.marine_drop = False
return True
waypoint = await self.get_waypoint_for_dropship()
fields = self.mineral_field.closer_than(10.0, self.enemy_start_location)
drop_point = fields.furthest_to(self.enemy_natural)
if self.supply_used >= 13 and not self.barracks.ready and self.already_pending(BARRACKS):
return False
"gives load order to dropship (self.load_dropship = True)"
if ((self.marines.ready.amount) >= 7
and not self.dropship_sent
and not self.load_dropship
and self.medivacs.amount >= 1):
self.load_dropship = True
elif ((self.enemy_units.closer_than(self.defence_radius,
self.start_location).amount > 2 or self.enemy_units_on_ground.of_type(
ROACH))
and not self.load_dropship and not self.dropship_sent):
self.marine_drop = False
self.dropship_sent = False
self.first_base_saturation = 0
self.build_cc_home = True
self.priority_tank = True
self.refineries_in_first_base = 2
self.mines_left = 0
self.hellion_left = 0
self.delay_starport = True
if self.already_pending(COMMANDCENTER):
for building in self.cc:
if (await self.has_ability(CANCEL_BUILDINPROGRESS, building)):
self.do(building(AbilityId.CANCEL_BUILDINPROGRESS))
print("CC cancelled")
if self.already_pending(UnitTypeId.STARPORT):
for building in self.starports:
if (await self.has_ability(CANCEL_BUILDINPROGRESS, building)):
self.do(building(AbilityId.CANCEL_BUILDINPROGRESS))
print("Starport cancelled")
if self.already_pending(FACTORYREACTOR):
for factory in self.factories:
abilities = (await self.get_available_abilities(factory))
if CANCEL_FACTORYADDON in abilities:
self.do(factory(AbilityId.CANCEL_FACTORYADDON))
print("Building cancelled")
if self.chat:
await self._client.chat_send("Abort marinedrop strategy. Go turtle.", team_only=False)
"expand if first base saturation reached"
if self.ccANDoc.amount == 1 and not self.already_pending(UnitTypeId.COMMANDCENTER):
for cc in self.ccANDoc:
if cc.assigned_harvesters >= (cc.ideal_harvesters + self.first_base_saturation):
if self.minerals > 400:
await self.expand_now_ANI()
else:
return False
"upgrade to orbital"
if (self.barracks.ready and self.cc.ready.idle and self.ccANDoc.amount > 1):
if self.minerals > 150:
for cc in self.cc.ready.idle:
self.do(cc(AbilityId.UPGRADETOORBITAL_ORBITALCOMMAND))
print("up grade orbital")
return False
"After dropship departure we expand"
if (self.dropship_sent or self.load_dropship) and not self.already_pending(COMMANDCENTER):
expand = True
for cc in self.ccANDoc:
if cc.assigned_harvesters < cc.ideal_harvesters:
expand = False
if expand:
if self.minerals > 400:
await self.expand_now_ANI()
else:
return False
"if dropship is full remove load command from dropship (self.load_dropship = False)"
dropship_is_full = 0
if self.medivacs.amount >= 1:
for dropship in self.medivacs:
abilities = (await self.get_available_abilities(dropship))
if not (LOAD_MEDIVAC in abilities) and (UNLOADALLAT_MEDIVAC in abilities):
dropship_is_full = dropship_is_full + 1
if dropship_is_full >= 1 and self.load_dropship:
self.load_dropship = False
if self.dropship_sent:
if not self.medivacs:
self.marine_drop = False
self.dropship_sent = False
for medivac in self.medivacs:
if self.enemy_structures.of_type([UnitTypeId.MISSILETURRET, UnitTypeId.SPORECRAWLER])\
.closer_than(15, medivac) or medivac.did_take_first_hit:
abilities = (await self.get_available_abilities(medivac))
if AbilityId.UNLOADALLAT_MEDIVAC in abilities:
self.do(medivac(AbilityId.UNLOADALLAT_MEDIVAC, medivac.position))
continue
if len(medivac.orders) < 1:
self.marine_drop = False
self.dropship_sent = False
"land flying starport to rector"
if self.starportflying and self.doner_location:
for starport in self.starportflying:
self.do(starport(LAND, self.doner_location))
return False
"wait for first supplydepot"
if not self.supplydepots or not self.enemy_natural:
return True
expand = random.choice(self.supplydepots)
"make first refinery"
if not self.refineries:
await self.execute_build_refinery()
return False
if self.marines.amount < 2:
for barracks in self.barracks.idle:
self.do(barracks.train(UnitTypeId.MARINE))
return False
"Build 1 barracks"
if (self.barracks.amount + self.barracksflyings.amount) < 1:
if self.minerals > 150:
await self.build_for_me(BARRACKS)
return False
"Build 2nd barracks"
if (self.barracks.amount + self.barracksflyings.amount) < 2 and self.factories:
if self.minerals > 150:
await self.build_for_me(BARRACKS)
return False
"build factory"
if not self.factories and not self.already_pending(FACTORY) and self.barracks.ready:
if self.can_afford(FACTORY):
await self.build_for_me(FACTORY)
return False
"build starport"
if ((self.starports.amount + self.starportflying.amount) == 0
and not self.already_pending(STARPORT)
and self.factories.ready):
if self.can_afford(STARPORT):
await self.build_for_me(STARPORT)
return False
if self.doner_location:
if self.starports.closer_than(1, self.doner_location):
self.doner_location = None
return False
if self.doner_location and self.starports.ready:
sp = random.choice(self.starports.ready)
self.do(sp(LIFT))
print("Reason for lift: doner location detected. Preparing for SP reactor.")
return False
"build medivac and marines"
if ((self.medivacs.amount + self.already_pending(UnitTypeId.MEDIVAC)) < 1):
for starport in self.starports.ready:
if self.can_afford(MEDIVAC) and self.can_feed(MEDIVAC):
print("Training dropship")
self.do(starport.train(MEDIVAC))
return False
elif (self.minerals > 50
and not self.dropship_sent
and dropship_is_full < 1
and not self.load_dropship):
for barracks in self.barracks:
if len(barracks.orders) >= 2:
continue
if len(barracks.orders) >= 1 and barracks.add_on_tag == 0:
continue
self.do(barracks.train(UnitTypeId.MARINE))
return False
if self.minerals > 300:
return True
else:
return False
# load and send dropship
if not self.dropship_sent and self.load_dropship:
for dropship in self.medivacs.idle:
if self.marines and self.load_dropship:
marine = self.marines.closest_to(dropship)
self.do(dropship(AbilityId.LOAD_MEDIVAC, marine, queue=True))
continue # continue for loop, dont execute any of the following
if dropship_is_full >= 1 and not self.dropship_sent:
for dropship in self.medivacs:
self.do(dropship.move(waypoint, queue=True))
self.do(dropship.move(drop_point, queue=True))
self.do(dropship(AbilityId.UNLOADALLAT_MEDIVAC, drop_point, queue=True))
self.do(dropship.move(waypoint, queue=True))
self.do(dropship.move(self.homeBase.position, queue=True))
self.dropship_sent = True
return True
async def build_cc_at_home(self):
build_site = await self.find_placement(UnitTypeId.COMMANDCENTER, near=self.homeBase.position,
max_distance=30)
if build_site:
contractor = self.select_contractor(build_site)
await self.build(COMMANDCENTER, build_site, build_worker=contractor)
else:
await self._client.chat_send("COMMANDCENTER placement error!!!", team_only=False)
self.build_cc_home = False
async def cashe_units_fast_cycle(self):
self.cc = self.townhalls(UnitTypeId.COMMANDCENTER)
self.orbitalcommand = self.townhalls(UnitTypeId.ORBITALCOMMAND)
self.ccANDoc = (self.cc | self.orbitalcommand | self.townhalls(UnitTypeId.PLANETARYFORTRESS))
self.townhalls_flying = (self.townhalls(UnitTypeId.COMMANDCENTERFLYING) |
self.townhalls(UnitTypeId.ORBITALCOMMANDFLYING))
self.outpost = None
self.outpost = await self.get_outpost()
self.reapers = self.units(UnitTypeId.REAPER)
self.marines = self.units(UnitTypeId.MARINE)
self.medivacs = self.units(UnitTypeId.MEDIVAC)
self.marauders = self.units(UnitTypeId.MARAUDER)
self.siegetanks_sieged = self.units(UnitTypeId.SIEGETANKSIEGED)
self.hellions = self.units(UnitTypeId.HELLION)
self.cyclones = self.units(UnitTypeId.CYCLONE)
self.thors = self.units(UnitTypeId.THORAP)
self.banshees = self.units(UnitTypeId.BANSHEE)
self.vikings = self.units(UnitTypeId.VIKINGFIGHTER)
self.enemy_units_and_structures = (self.enemy_units | self.enemy_structures).filter(
lambda x: x.is_visible)
self.enemy_units_on_ground = self.enemy_units_and_structures.not_structure.not_flying.filter(
lambda x: not x.is_hallucination)
self.remember_friendly_units()
self.bunkers = self.structures(UnitTypeId.BUNKER)
self.general = None
if self.dropship_sent and self.hellions:
self.general = self.hellions.furthest_to(self.start_location)
elif self.thors.exists:
self.general = self.thors.furthest_to(self.start_location)
elif | |
# coding: utf-8
import os
import webbrowser
from typing import List
from collections import OrderedDict
from datetime import datetime
import pandas as pd
import traceback
from .objects import Mark, Direction, BI, FakeBI, FX, RawBar, NewBar
from .utils.echarts_plot import kline_pro
from .signals import check_three_fd, check_five_fd, check_seven_fd, check_nine_fd, \
check_eleven_fd, check_thirteen_fd, Signals
from .utils.ta import RSQ
def create_fake_bis(fxs: List[FX]) -> List[FakeBI]:
"""创建 fake_bis 列表
:param fxs: 分型序列,必须顶底分型交替
:return: fake_bis
"""
if len(fxs) % 2 != 0:
fxs = fxs[:-1]
fake_bis = []
for i in range(1, len(fxs)):
fx1 = fxs[i - 1]
fx2 = fxs[i]
# assert fx1.mark != fx2.mark
if fx1.mark == Mark.D:
fake_bi = FakeBI(symbol=fx1.symbol, sdt=fx1.dt, edt=fx2.dt, direction=Direction.Up,
high=fx2.high, low=fx1.low, power=round(fx2.high - fx1.low, 2))
elif fx1.mark == Mark.G:
fake_bi = FakeBI(symbol=fx1.symbol, sdt=fx1.dt, edt=fx2.dt, direction=Direction.Down,
high=fx1.high, low=fx2.low, power=round(fx1.high - fx2.low, 2))
else:
raise ValueError
fake_bis.append(fake_bi)
return fake_bis
def remove_include(k1: NewBar, k2: NewBar, k3: RawBar):
"""去除包含关系:输入三根k线,其中k1和k2为没有包含关系的K线,k3为原始K线"""
if k1.high < k2.high:
direction = Direction.Up
elif k1.high > k2.high:
direction = Direction.Down
else:
k4 = NewBar(symbol=k3.symbol, dt=k3.dt, open=k3.open,
close=k3.close, high=k3.high, low=k3.low, vol=k3.vol, elements=[k3])
return False, k4
# 判断 k2 和 k3 之间是否存在包含关系,有则处理
if (k2.high <= k3.high and k2.low >= k3.low) or (k2.high >= k3.high and k2.low <= k3.low):
if direction == Direction.Up:
high = max(k2.high, k3.high)
low = max(k2.low, k3.low)
dt = k2.dt if k2.high > k3.high else k3.dt
elif direction == Direction.Down:
high = min(k2.high, k3.high)
low = min(k2.low, k3.low)
dt = k2.dt if k2.low < k3.low else k3.dt
else:
raise ValueError
if k3.open > k3.close:
open_ = high
close = low
else:
open_ = low
close = high
vol = k2.vol + k3.vol
# 这里有一个隐藏Bug,len(k2.elements) 在一些及其特殊的场景下会有超大的数量,具体问题还没找到;
# 临时解决方案是直接限定len(k2.elements)<=100
elements = [x for x in k2.elements[:100] if x.dt != k3.dt] + [k3]
k4 = NewBar(symbol=k3.symbol, dt=dt, open=open_,
close=close, high=high, low=low, vol=vol, elements=elements)
return True, k4
else:
k4 = NewBar(symbol=k3.symbol, dt=k3.dt, open=k3.open,
close=k3.close, high=k3.high, low=k3.low, vol=k3.vol, elements=[k3])
return False, k4
def check_fx(k1: NewBar, k2: NewBar, k3: NewBar):
"""查找分型"""
fx = None
if k1.high < k2.high > k3.high:
power = "强" if k3.close < k1.low else "弱"
# 根据 k1 与 k2 是否有缺口,选择 low
low = k1.low if k1.high > k2.low else k2.low
# 不允许分型的 high == low
if low == k2.high:
low = k1.low
fx = FX(symbol=k1.symbol, dt=k2.dt, mark=Mark.G, high=k2.high, low=low,
fx=k2.high, elements=[k1, k2, k3], power=power)
if k1.low > k2.low < k3.low:
power = "强" if k3.close > k1.high else "弱"
# 根据 k1 与 k2 是否有缺口,选择 high
high = k1.high if k1.low < k2.high else k2.high
# 不允许分型的 high == low
if high == k2.low:
high = k1.high
fx = FX(symbol=k1.symbol, dt=k2.dt, mark=Mark.D, high=high, low=k2.low,
fx=k2.low, elements=[k1, k2, k3], power=power)
return fx
def check_fxs(bars: List[NewBar]) -> List[FX]:
"""输入一串无包含关系K线,查找其中所有分型"""
fxs = []
for i in range(1, len(bars) - 1):
fx: FX = check_fx(bars[i - 1], bars[i], bars[i + 1])
if isinstance(fx, FX):
# 这里可能隐含Bug,默认情况下,fxs本身是顶底交替的,但是对于一些特殊情况下不是这样,这是不对的。
# 临时处理方案,强制要求fxs序列顶底交替
if len(fxs) >= 2 and fx.mark == fxs[-1].mark:
fxs.pop()
fxs.append(fx)
return fxs
def check_bi(bars: List[NewBar]):
"""输入一串无包含关系K线,查找其中的一笔"""
fxs = check_fxs(bars)
if len(fxs) < 2:
return None, bars
fx_a = fxs[0]
try:
if fxs[0].mark == Mark.D:
direction = Direction.Up
fxs_b = [x for x in fxs if x.mark == Mark.G and x.dt > fx_a.dt and x.fx > fx_a.fx]
if not fxs_b:
return None, bars
fx_b = fxs_b[0]
for fx in fxs_b:
if fx.high >= fx_b.high:
fx_b = fx
elif fxs[0].mark == Mark.G:
direction = Direction.Down
fxs_b = [x for x in fxs if x.mark == Mark.D and x.dt > fx_a.dt and x.fx < fx_a.fx]
if not fxs_b:
return None, bars
fx_b = fxs_b[0]
for fx in fxs_b[1:]:
if fx.low <= fx_b.low:
fx_b = fx
else:
raise ValueError
except:
traceback.print_exc()
return None, bars
bars_a = [x for x in bars if fx_a.elements[0].dt <= x.dt <= fx_b.elements[2].dt]
bars_b = [x for x in bars if x.dt >= fx_b.elements[0].dt]
# 判断fx_a和fx_b价格区间是否存在包含关系
ab_include = (fx_a.high > fx_b.high and fx_a.low < fx_b.low) or (fx_a.high < fx_b.high and fx_a.low > fx_b.low)
# # 判断fx_b的左侧区间是否突破
# max_b = max([x.high for x in bars_b])
# min_b = min([x.low for x in bars_b])
# fx_b_end = fx_b.high < max_b or fx_b.low > min_b
if len(bars_a) >= 7 and not ab_include:
# 计算笔的相关属性
power_price = round(abs(fx_b.fx - fx_a.fx), 2)
change = round((fx_b.fx - fx_a.fx) / fx_a.fx, 4)
fxs_ = [x for x in fxs if fx_a.elements[0].dt <= x.dt <= fx_b.elements[2].dt]
fake_bis = create_fake_bis(fxs_)
bi = BI(symbol=fx_a.symbol, fx_a=fx_a, fx_b=fx_b, fxs=fxs_, fake_bis=fake_bis,
direction=direction, power=power_price, high=max(fx_a.high, fx_b.high),
low=min(fx_a.low, fx_b.low), bars=bars_a, length=len(bars_a),
rsq=RSQ([x.close for x in bars_a[1:-1]]), change=change)
return bi, bars_b
else:
return None, bars
def get_sub_span(bis: List[BI], start_dt: [datetime, str], end_dt: [datetime, str], direction: Direction) -> List[BI]:
"""获取子区间(这是进行多级别联立分析的关键步骤)
:param bis: 笔的列表
:param start_dt: 子区间开始时间
:param end_dt: 子区间结束时间
:param direction: 方向
:return: 子区间
"""
start_dt = pd.to_datetime(start_dt)
end_dt = pd.to_datetime(end_dt)
sub = []
for bi in bis:
if bi.fx_b.dt > start_dt > bi.fx_a.dt:
sub.append(bi)
elif start_dt <= bi.fx_a.dt < bi.fx_b.dt <= end_dt:
sub.append(bi)
elif bi.fx_a.dt < end_dt < bi.fx_b.dt:
sub.append(bi)
else:
continue
if len(sub) > 0 and sub[0].direction != direction:
sub = sub[1:]
if len(sub) > 0 and sub[-1].direction != direction:
sub = sub[:-1]
return sub
def get_sub_bis(bis: List[BI], bi: BI) -> List[BI]:
"""获取大级别笔对象对应的小级别笔走势
:param bis: 小级别笔列表
:param bi: 大级别笔对象
:return:
"""
sub_bis = get_sub_span(bis, start_dt=bi.fx_a.dt, end_dt=bi.fx_b.dt, direction=bi.direction)
if not sub_bis:
return []
return sub_bis
class CZSC:
def __init__(self, bars: List[RawBar], freq: str, max_bi_count=30):
"""
:param bars: K线数据
:param freq: K线级别
:param max_bi_count: 最大保存的笔数量
默认值为 30,仅使用内置的信号和因子,不需要调整这个参数。
如果进行新的信号计算需要用到更多的笔,可以适当调大这个参数。
"""
self.max_bi_count = max_bi_count
self.bars_raw = [] # 原始K线序列
self.bars_ubi = [] # 未完成笔的无包含K线序列
self.bi_list: List[BI] = []
self.symbol = bars[0].symbol
self.freq = freq
for bar in bars:
self.update(bar)
self.signals = self.get_signals()
def __repr__(self):
return "<CZSC for {}>".format(self.symbol)
def __update_bi(self):
bars_ubi = self.bars_ubi
if len(bars_ubi) < 3:
return
# 查找笔
if not self.bi_list:
# 第一个笔的查找
fxs = check_fxs(bars_ubi)
if not fxs:
return
fx_a = fxs[0]
fxs_a = [x for x in fxs if x.mark == fx_a.mark]
for fx in fxs_a:
if (fx_a.mark == Mark.D and fx.low <= fx_a.low) \
or (fx_a.mark == Mark.G and fx.high >= fx_a.high):
fx_a = fx
bars_ubi = [x for x in bars_ubi if x.dt >= fx_a.elements[0].dt]
bi, bars_ubi_ = check_bi(bars_ubi)
if isinstance(bi, BI):
self.bi_list.append(bi)
self.bars_ubi = bars_ubi_
return
last_bi = self.bi_list[-1]
# 如果上一笔被破坏,将上一笔的bars与bars_ubi进行合并
min_low_ubi = min([x.low for x in bars_ubi[2:]])
max_high_ubi = max([x.high for x in bars_ubi[2:]])
if last_bi.direction == Direction.Up and max_high_ubi > last_bi.high:
if min_low_ubi < last_bi.low and len(self.bi_list) > 2:
bars_ubi_a = self.bi_list[-2].bars \
+ [x for x in self.bi_list[-1].bars if x.dt > self.bi_list[-2].bars[-1].dt] \
+ [x for x in bars_ubi if x.dt > self.bi_list[-1].bars[-1].dt]
self.bi_list.pop(-1)
self.bi_list.pop(-1)
else:
bars_ubi_a = last_bi.bars + [x for x in bars_ubi if x.dt > last_bi.bars[-1].dt]
self.bi_list.pop(-1)
elif last_bi.direction == Direction.Down and min_low_ubi < last_bi.low:
if max_high_ubi > last_bi.high and len(self.bi_list) > 2:
bars_ubi_a = self.bi_list[-2].bars \
+ [x for x in self.bi_list[-1].bars if x.dt > self.bi_list[-2].bars[-1].dt] \
+ [x for x in bars_ubi if x.dt > self.bi_list[-1].bars[-1].dt]
self.bi_list.pop(-1)
self.bi_list.pop(-1)
else:
bars_ubi_a = last_bi.bars + [x for x in bars_ubi if x.dt > last_bi.bars[-1].dt]
self.bi_list.pop(-1)
else:
bars_ubi_a = bars_ubi
if len(bars_ubi_a) > 300:
print("{} - {} 未完成笔延伸超长,延伸数量: {}".format(self.symbol, self.freq, len(bars_ubi_a)))
bi, bars_ubi_ = check_bi(bars_ubi_a)
self.bars_ubi = bars_ubi_
if isinstance(bi, BI):
self.bi_list.append(bi)
def get_signals(self):
s = OrderedDict({"symbol": self.symbol, "dt": self.bars_raw[-1].dt, "close": self.bars_raw[-1].close})
# 倒0,表示未确认完成笔
# 倒1,倒数第1笔的缩写,表示第N笔
# 倒2,倒数第2笔的缩写,表示第N-1笔
# 倒3,倒数第3笔的缩写,表示第N-2笔
# 以此类推
s.update({
"未完成笔长度": len(self.bars_ubi),
"三K形态": Signals.Other.value,
"倒1方向": Signals.Other.value,
"倒1长度": 0,
"倒1价差力度": 0,
"倒1涨跌幅": 0,
"倒1拟合优度": 0,
"倒1分型数量": 0,
"倒1内部形态": Signals.Other.value,
"倒2方向": Signals.Other.value,
"倒2长度": 0,
"倒2价差力度": 0,
"倒2涨跌幅": 0,
"倒2拟合优度": 0,
"倒2分型数量": 0,
"倒3方向": Signals.Other.value,
"倒3长度": 0,
"倒3价差力度": 0,
"倒3涨跌幅": 0,
"倒3拟合优度": 0,
"倒3分型数量": 0,
"倒4方向": Signals.Other.value,
"倒4长度": 0,
"倒4价差力度": 0,
"倒4涨跌幅": 0,
"倒4拟合优度": 0,
"倒4分型数量": 0,
"倒5方向": Signals.Other.value,
"倒5长度": 0,
"倒5价差力度": 0,
"倒5涨跌幅": 0,
"倒5拟合优度": 0,
"倒5分型数量": 0,
"倒1表里关系": Signals.Other.value,
"倒1三笔": Signals.Other.value,
"倒2三笔": Signals.Other.value,
"倒3三笔": Signals.Other.value,
"倒4三笔": Signals.Other.value,
"倒5三笔": Signals.Other.value,
"倒1形态": Signals.Other.value,
"倒2形态": Signals.Other.value,
"倒3形态": Signals.Other.value,
"倒4形态": Signals.Other.value,
"倒5形态": Signals.Other.value,
"倒6形态": Signals.Other.value,
"倒7形态": Signals.Other.value,
})
if len(self.bars_ubi) >= 3:
tri = self.bars_ubi[-3:]
if tri[0].high > tri[1].high < tri[2].high:
s["三K形态"] = Signals.TK1.value
elif tri[0].high < tri[1].high < tri[2].high:
s["三K形态"] = Signals.TK2.value
elif tri[0].high < | |
import collections
import math
from cctbx import sgtbx, uctbx
from libtbx.math_utils import nearest_integer as nint
from scitbx import matrix
from dials.algorithms.integration import filtering
from dials.array_family import flex
from dials.util import tabulate
Slot = collections.namedtuple("Slot", "d_min d_max")
_stats_field_names = [
"d_min_distl_method_1",
"d_min_distl_method_2",
"estimated_d_min",
"n_spots_4A",
"n_spots_no_ice",
"n_spots_total",
"noisiness_method_1",
"noisiness_method_2",
"total_intensity",
]
StatsSingleImage = collections.namedtuple("StatsSingleImage", _stats_field_names)
class StatsMultiImage(collections.namedtuple("StatsMultiImage", _stats_field_names)):
__slots__ = ()
def as_table(self, perm=None, n_rows=None):
if hasattr(self, "image"):
image = self.image
else:
image = flex.int(range(1, len(self.n_spots_total) + 1)).as_string()
rows = [["image", "#spots", "#spots_no_ice", "total_intensity"]]
estimated_d_min = None
d_min_distl_method_1 = None
d_min_distl_method_2 = None
n_indexed = getattr(self, "n_indexed", None)
fraction_indexed = getattr(self, "fraction_indexed", None)
if flex.double(self.estimated_d_min).all_gt(0):
estimated_d_min = self.estimated_d_min
rows[0].append("d_min")
if flex.double(self.d_min_distl_method_1).all_gt(0):
d_min_distl_method_1 = self.d_min_distl_method_1
rows[0].append("d_min (distl method 1)")
if flex.double(self.d_min_distl_method_2).all_gt(0):
d_min_distl_method_2 = self.d_min_distl_method_2
rows[0].append("d_min (distl method 2)")
if n_indexed is not None:
rows[0].append("#indexed")
if fraction_indexed is not None:
rows[0].append("fraction_indexed")
if perm is None:
perm = list(range(len(self.n_spots_total)))
if n_rows is not None:
n_rows = min(n_rows, len(perm))
perm = perm[:n_rows]
for i_image in perm:
d_min_str = ""
method1_str = ""
method2_str = ""
if self.estimated_d_min is not None and self.estimated_d_min[i_image] > 0:
d_min_str = f"{self.estimated_d_min[i_image]:.2f}"
if (
self.d_min_distl_method_1 is not None
and self.d_min_distl_method_1[i_image] > 0
):
method1_str = f"{self.d_min_distl_method_1[i_image]:.2f}"
if self.noisiness_method_1 is not None:
method1_str += f" ({self.noisiness_method_1[i_image]:.2f})"
if (
self.d_min_distl_method_2 is not None
and self.d_min_distl_method_2[i_image] > 0
):
method2_str = f"{self.d_min_distl_method_2[i_image]:.2f}"
if self.noisiness_method_2 is not None:
method2_str += f" ({self.noisiness_method_2[i_image]:.2f})"
row = [
image[i_image],
str(self.n_spots_total[i_image]),
str(self.n_spots_no_ice[i_image]),
f"{self.total_intensity[i_image]:.0f}",
]
if estimated_d_min is not None:
row.append(d_min_str)
if d_min_distl_method_1 is not None:
row.append(method1_str)
if d_min_distl_method_2 is not None:
row.append(method2_str)
if n_indexed is not None:
row.append("%i" % self.n_indexed[i_image])
if fraction_indexed is not None:
row.append(f"{self.fraction_indexed[i_image]:.2f}")
rows.append(row)
return rows
def __str__(self):
return tabulate(self.as_table(), headers="firstrow")
class binner_equal_population:
def __init__(self, d_star_sq, target_n_per_bin=20, max_slots=20, min_slots=5):
n_slots = len(d_star_sq) // target_n_per_bin
if max_slots is not None:
n_slots = min(n_slots, max_slots)
if min_slots is not None:
n_slots = max(n_slots, min_slots)
self.bins = []
n_per_bin = len(d_star_sq) / n_slots
d_star_sq_sorted = flex.sorted(d_star_sq)
d_sorted = uctbx.d_star_sq_as_d(d_star_sq_sorted)
d_max = d_sorted[0]
for i in range(n_slots):
d_min = d_sorted[nint((i + 1) * n_per_bin) - 1]
self.bins.append(Slot(d_min, d_max))
d_max = d_min
class binner_d_star_cubed:
def __init__(self, d_spacings, target_n_per_bin=25, max_slots=40, min_slots=20):
d_spacings = flex.double(list(set(d_spacings)))
d_spacings_sorted = flex.sorted(d_spacings, reverse=True)
d_star_cubed_sorted = flex.pow(1 / d_spacings_sorted, 3)
# choose bin volume such that lowest resolution shell contains 5% of the
# spots, or 25, whichever is greater
low_res_count = int(
math.ceil(
min(
max(target_n_per_bin, 0.05 * len(d_spacings)),
0.25 * len(d_spacings),
)
)
)
bin_step = d_star_cubed_sorted[low_res_count] - d_star_cubed_sorted[0]
assert bin_step > 0
n_slots = int(
math.ceil((d_star_cubed_sorted[-1] - d_star_cubed_sorted[0]) / bin_step)
)
if max_slots is not None:
n_slots = min(n_slots, max_slots)
if min_slots is not None:
n_slots = max(n_slots, min_slots)
bin_step = (d_star_cubed_sorted[-1] - d_star_cubed_sorted[0]) / n_slots
self.bins = []
ds3_max = d_star_cubed_sorted[0]
for i in range(n_slots):
ds3_min = d_star_cubed_sorted[0] + (i + 1) * bin_step
self.bins.append(Slot(1 / ds3_min ** (1 / 3), 1 / ds3_max ** (1 / 3)))
ds3_max = ds3_min
def outlier_rejection(reflections):
# http://scripts.iucr.org/cgi-bin/paper?ba0032
if len(reflections) == 1:
return reflections
intensities = reflections["intensity.sum.value"]
variances = reflections["intensity.sum.variance"]
i_max = flex.max_index(intensities)
sel = flex.bool(len(reflections), True)
sel[i_max] = False
i_test = intensities[i_max]
var_test = variances[i_max]
intensities_subset = intensities.select(sel)
var_subset = variances.select(sel)
var_prior = var_test + 1 / flex.sum(1 / var_subset)
p_prior = (
1
/ math.sqrt(2 * math.pi * var_prior)
* math.exp(-((i_test - flex.mean(intensities_subset)) ** 2) / (2 * var_prior))
)
if p_prior > 1e-10:
return reflections
return outlier_rejection(reflections.select(sel))
def wilson_outliers(reflections, ice_sel=None, p_cutoff=1e-2):
# http://scripts.iucr.org/cgi-bin/paper?ba0032
if ice_sel is None:
ice_sel = flex.bool(len(reflections), False)
E_cutoff = math.sqrt(-math.log(p_cutoff))
intensities = reflections["intensity.sum.value"]
Sigma_n = flex.mean(intensities.select(~ice_sel))
normalised_amplitudes = flex.sqrt(intensities) / math.sqrt(Sigma_n)
outliers = normalised_amplitudes >= E_cutoff
if outliers.count(True):
# iterative outlier rejection
inliers = ~outliers
outliers.set_selected(
inliers,
wilson_outliers(reflections.select(inliers), ice_sel.select(inliers)),
)
return outliers
def estimate_resolution_limit(reflections, ice_sel=None, plot_filename=None):
if ice_sel is None:
ice_sel = flex.bool(len(reflections), False)
d_star_sq = flex.pow2(reflections["rlp"].norms())
d_spacings = uctbx.d_star_sq_as_d(d_star_sq)
intensities = reflections["intensity.sum.value"]
variances = reflections["intensity.sum.variance"]
sel = variances > 0
intensities = intensities.select(sel)
variances = variances.select(sel)
ice_sel = ice_sel.select(sel)
i_over_sigi = intensities / flex.sqrt(variances)
log_i_over_sigi = flex.log(i_over_sigi)
fit = flex.linear_regression(
d_star_sq.select(~ice_sel), log_i_over_sigi.select(~ice_sel)
)
m = fit.slope()
c = fit.y_intercept()
log_i_sigi_lower = flex.double()
d_star_sq_lower = flex.double()
log_i_sigi_upper = flex.double()
d_star_sq_upper = flex.double()
binner = binner_equal_population(
d_star_sq, target_n_per_bin=20, max_slots=20, min_slots=5
)
outliers_all = flex.bool(len(reflections), False)
low_percentile_limit = 0.1
upper_percentile_limit = 1 - low_percentile_limit
for i_slot, slot in enumerate(binner.bins):
sel_all = (d_spacings < slot.d_max) & (d_spacings >= slot.d_min)
sel = ~(ice_sel) & sel_all
if sel.count(True) == 0:
continue
outliers = wilson_outliers(
reflections.select(sel_all), ice_sel=ice_sel.select(sel_all)
)
outliers_all.set_selected(sel_all, outliers)
isel = sel_all.iselection().select(~(outliers) & ~(ice_sel).select(sel_all))
log_i_over_sigi_sel = log_i_over_sigi.select(isel)
d_star_sq_sel = d_star_sq.select(isel)
perm = flex.sort_permutation(log_i_over_sigi_sel)
i_lower = perm[int(math.floor(low_percentile_limit * len(perm)))]
i_upper = perm[int(math.floor(upper_percentile_limit * len(perm)))]
log_i_sigi_lower.append(log_i_over_sigi_sel[i_lower])
log_i_sigi_upper.append(log_i_over_sigi_sel[i_upper])
d_star_sq_upper.append(d_star_sq_sel[i_lower])
d_star_sq_lower.append(d_star_sq_sel[i_upper])
fit_upper = flex.linear_regression(d_star_sq_upper, log_i_sigi_upper)
m_upper = fit_upper.slope()
c_upper = fit_upper.y_intercept()
fit_lower = flex.linear_regression(d_star_sq_lower, log_i_sigi_lower)
m_lower = fit_lower.slope()
c_lower = fit_lower.y_intercept()
if m_upper == m_lower:
intersection = (-1, -1)
resolution_estimate = -1
inside = flex.bool(len(d_star_sq), False)
else:
# http://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Given_the_equations_of_the_lines
# with:
# a_ = m_upper
# b_ = m_lower
# c_ = c_upper
# d_ = c_lower
# intersection == ((d_ - c_) / (a_ - b_), (a_ * d_ - b_ * c_) / (a_ - b_))
intersection = (
(c_lower - c_upper) / (m_upper - m_lower),
(m_upper * c_lower - m_lower * c_upper) / (m_upper - m_lower),
)
inside = points_below_line(d_star_sq, log_i_over_sigi, m_upper, c_upper)
inside = inside & ~outliers_all & ~ice_sel
if inside.count(True) > 0:
d_star_sq_estimate = flex.max(d_star_sq.select(inside))
resolution_estimate = uctbx.d_star_sq_as_d(d_star_sq_estimate)
else:
resolution_estimate = -1
if plot_filename is not None:
from matplotlib import pyplot
fig = pyplot.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(d_star_sq, log_i_over_sigi, marker="+")
ax.scatter(
d_star_sq.select(inside),
log_i_over_sigi.select(inside),
marker="+",
color="green",
)
ax.scatter(
d_star_sq.select(ice_sel),
log_i_over_sigi.select(ice_sel),
marker="+",
color="black",
)
ax.scatter(
d_star_sq.select(outliers_all),
log_i_over_sigi.select(outliers_all),
marker="+",
color="grey",
)
ax.scatter(d_star_sq_upper, log_i_sigi_upper, marker="+", color="red")
ax.scatter(d_star_sq_lower, log_i_sigi_lower, marker="+", color="red")
if intersection[0] <= ax.get_xlim()[1] and intersection[1] <= ax.get_ylim()[1]:
ax.scatter(
[intersection[0]], [intersection[1]], marker="x", s=50, color="b"
)
xlim = pyplot.xlim()
ax.plot(xlim, [(m * x + c) for x in xlim])
ax.plot(xlim, [(m_upper * x + c_upper) for x in xlim], color="red")
ax.plot(xlim, [(m_lower * x + c_lower) for x in xlim], color="red")
ax.set_xlabel("d_star_sq")
ax.set_ylabel("ln(I/sigI)")
ax.set_xlim((max(-xlim[1], -0.05), xlim[1]))
ax.set_ylim((0, ax.get_ylim()[1]))
for i_slot, slot in enumerate(binner.bins):
if i_slot == 0:
ax.vlines(
uctbx.d_as_d_star_sq(slot.d_max),
0,
ax.get_ylim()[1],
linestyle="dotted",
color="grey",
)
ax.vlines(
uctbx.d_as_d_star_sq(slot.d_min),
0,
ax.get_ylim()[1],
linestyle="dotted",
color="grey",
)
ax_ = ax.twiny() # ax2 is responsible for "top" axis and "right" axis
xticks = ax.get_xticks()
xticks_d = [uctbx.d_star_sq_as_d(ds2) if ds2 > 0 else 0 for ds2 in xticks]
ax_.set_xticks(xticks)
ax_.set_xlim(ax.get_xlim())
ax_.set_xlabel(r"Resolution ($\AA$)")
ax_.set_xticklabels([f"{d:.1f}" for d in xticks_d])
pyplot.savefig(plot_filename)
pyplot.close()
return resolution_estimate
def estimate_resolution_limit_distl_method1(reflections, plot_filename=None):
# Implementation of Method 1 (section 2.4.4) of:
# <NAME>, <NAME>, <NAME>, <NAME> and <NAME>
# J. Appl. Cryst. (2006). 39, 112-119
# https://doi.org/10.1107/S0021889805040677
variances = reflections["intensity.sum.variance"]
sel = variances > 0
reflections = reflections.select(sel)
d_star_sq = flex.pow2(reflections["rlp"].norms())
d_spacings = uctbx.d_star_sq_as_d(d_star_sq)
d_star_cubed = flex.pow(reflections["rlp"].norms(), 3)
step = 2
while len(reflections) / step > 40:
step += 1
order = flex.sort_permutation(d_spacings, reverse=True)
ds3_subset = flex.double()
d_subset = flex.double()
for i in range(len(reflections) // step):
ds3_subset.append(d_star_cubed[order[i * step]])
d_subset.append(d_spacings[order[i * step]])
x = flex.double(range(len(ds3_subset)))
# (i)
# Usually, Pm is the last point, that is, m = n. But m could be smaller than
# n if an unusually high number of spots are detected around a certain
# intermediate resolution. In that case, our search for the image resolution
# does not go outside the spot 'bump;. This is particularly useful when
# ice-rings are present.
slopes = (ds3_subset[1:] - ds3_subset[0]) / (x[1:] - x[0])
skip_first = 3
p_m = flex.max_index(slopes[skip_first:]) + 1 + skip_first
# (ii)
x1 = matrix.col((0, ds3_subset[0]))
x2 = matrix.col((p_m, ds3_subset[p_m]))
gaps = flex.double([0])
v = matrix.col(((x2[1] - x1[1]), -(x2[0] - x1[0]))).normalize()
for i in range(1, p_m):
x0 = matrix.col((i, ds3_subset[i]))
r = x1 - x0
g = abs(v.dot(r))
gaps.append(g)
mv = flex.mean_and_variance(gaps)
s = mv.unweighted_sample_standard_deviation()
# (iii)
p_k = flex.max_index(gaps)
g_k = gaps[p_k]
p_g = p_k
for i in range(p_k + 1, len(gaps)):
g_i = gaps[i]
if g_i > (g_k - 0.5 * s):
p_g = i
d_g = d_subset[p_g]
noisiness = 0
n = len(ds3_subset)
| |
azimuth range'},
unfiltered_cross_correlation_ratio: {
'units': '-',
'standard_name': 'copolar_correlation_coefficient',
'long_name': 'Unfiltered copolar correlation coefficient (RHOHV)',
'coordinates': 'elevation azimuth range'},
uncorrected_cross_correlation_ratio: {
'units': '-',
'standard_name': 'copolar_correlation_coefficient',
'long_name': 'Uncorrected copolar correlation coefficient (RHOHV)',
'coordinates': 'elevation azimuth range'},
logarithmic_cross_correlation_ratio: {
'units': 'dB',
'standard_name': 'logarithmic_copolar_correlation_coefficient',
'long_name': 'Logarithmic copolar correlation coefficient (L)',
'coordinates': 'elevation azimuth range'},
cross_correlation_ratio_in_rain: {
'units': '-',
'standard_name': 'copolar_correlation_coefficient_in_rain',
'long_name': 'copolar correlation coefficient in rain',
'coordinates': 'elevation azimuth range'},
normalized_coherent_power: {
'units': '-',
'standard_name': 'normalized_coherent_power',
'long_name': 'Normalized coherent power',
'valid_max': 1.0,
'valid_min': 0.0,
'comment': 'Also know as signal quality index (SQI)',
'coordinates': 'elevation azimuth range'},
differential_phase: {
'units': 'deg',
'standard_name': 'differential_phase',
'long_name': 'Differential propagation phase (PhiDP)',
'valid_max': 180.0,
'valid_min': -180.0,
'coordinates': 'elevation azimuth range'},
unfolded_differential_phase: {
'units': 'deg',
'standard_name': 'differential_phase',
'long_name': 'Unfolded differential propagation phase',
'coordinates': 'elevation azimuth range'},
corrected_differential_phase: {
'units': 'deg',
'standard_name': 'differential_phase',
'long_name': 'Corrected differential propagation phase',
'coordinates': 'elevation azimuth range'},
uncorrected_differential_phase: {
'units': 'deg',
'standard_name': 'differential_phase',
'long_name': 'Uncorrected differential propagation phase',
'coordinates': 'elevation azimuth range'},
uncorrected_unfiltered_differential_phase: {
'units': 'deg',
'standard_name': 'differential_phase',
'long_name': 'Uncorrected unfiltered differential propagation phase',
'coordinates': 'elevation azimuth range'},
system_differential_phase: {
'units': 'deg',
'standard_name': 'system_differential_phase',
'long_name': 'System differential phase (PhiDP0)',
'coordinates': 'elevation azimuth range'},
first_gate_differential_phase: {
'units': 'gate index',
'standard_name': 'first_gate_differential_phase',
'long_name': 'First valid differential phase gate',
'coordinates': 'elevation azimuth'},
specific_differential_phase: {
'units': 'deg/km',
'standard_name': 'specific_differential_phase',
'long_name': 'Specific differential phase (KDP)',
'coordinates': 'elevation azimuth range'},
corrected_specific_differential_phase: {
'units': 'deg/km',
'standard_name': 'specific_differential_phase',
'long_name': 'Corrected specific differential phase (KDP)',
'coordinates': 'elevation azimuth range'},
# Depolarization ratio fields
linear_depolarization_ratio: {
'units': 'dB',
'standard_name': 'linear_depolarization_ratio',
'long_name': 'Linear depolarization ratio',
'coordinates': 'elevation azimuth range'},
linear_depolarization_ratio_h: {
'units': 'dB',
'standard_name': 'linear_depolarization_ratio_h',
'long_name': 'Linear depolarization ratio horizontal',
'coordinates': 'elevation azimuth range'},
linear_depolarization_ratio_v: {
'units': 'dB',
'standard_name': 'linear_depolarization_ratio_v',
'long_name': 'Linear depolarization ratio vertical',
'coordinates': 'elevation azimuth range'},
circular_depolarization_ratio: {
'units': 'dB',
'standard_name': 'circular_depolarization_ratio',
'long_name': 'Circular depolarization ratio',
'coordinates': 'elevation azimuth range'},
# Misc fields
signal_to_noise_ratio: {
'units': 'dB',
'standard_name': 'signal_to_noise_ratio',
'long_name': 'Signal to noise ratio',
'coordinates': 'elevation azimuth range'},
signal_to_noise_ratio_hh: {
'units': 'dB',
'standard_name': 'signal_to_noise_ratio_hh',
'long_name': 'Signal to noise ratio horizontal',
'coordinates': 'elevation azimuth range'},
signal_to_noise_ratio_vv: {
'units': 'dB',
'standard_name': 'signal_to_noise_ratio_vv',
'long_name': 'Signal to noise ratio vertical',
'coordinates': 'elevation azimuth range'},
noisedBZ_hh: {
'units': 'dBZ',
'standard_name': 'noisedBZ_hh',
'long_name': 'noise in dBZ horizontal',
'coordinates': 'elevation azimuth range'},
noisedBZ_vv: {
'units': 'dBZ',
'standard_name': 'noisedBZ_vv',
'long_name': 'noise in dBZ vertical',
'coordinates': 'elevation azimuth range'},
noisedBm_hh: {
'units': 'dBm',
'standard_name': 'noisedBm_hh',
'long_name': 'noise in dBm horizontal',
'coordinates': 'elevation azimuth range'},
noisedBm_vv: {
'units': 'dBm',
'standard_name': 'noisedBm_vv',
'long_name': 'noise in dBm vertical',
'coordinates': 'elevation azimuth range'},
noisedBADU_hh: {
'units': 'dBADU',
'standard_name': 'noisedBADU_hh',
'long_name': 'noise in dBADU horizontal',
'coordinates': 'elevation azimuth range'},
noisedBADU_vv: {
'units': 'dBADU',
'standard_name': 'noisedBADU_vv',
'long_name': 'noise in dBADU vertical',
'coordinates': 'elevation azimuth range'},
noiseADU_hh: {
'units': 'ADU',
'standard_name': 'noiseADU_hh',
'long_name': 'noise in ADU horizontal',
'coordinates': 'elevation azimuth range'},
noiseADU_vv: {
'units': 'dBADU',
'standard_name': 'noiseADU_vv',
'long_name': 'noise in ADU vertical',
'coordinates': 'elevation azimuth range'},
noise_pos_h: {
'units': '-',
'standard_name': 'noise_pos_h',
'long_name': 'noisy radar bins horizontal polarization',
'labels': ['OTHER', 'NOISE'],
'ticks': [1, 2],
'boundaries': [0.5, 1.5, 2.5],
'coordinates': 'elevation azimuth range',
'scale_factor': 1,
'add_offset': 0,
'_FillValue': 0,
'_Write_as_dtype': 'uint8'},
noise_pos_v: {
'units': '-',
'standard_name': 'noise_pos_v',
'long_name': 'noisy radar bins vertical polarization',
'labels': ['OTHER', 'NOISE'],
'ticks': [1, 2],
'boundaries': [0.5, 1.5, 2.5],
'coordinates': 'elevation azimuth range',
'scale_factor': 1,
'add_offset': 0,
'_FillValue': 0,
'_Write_as_dtype': 'uint8'},
transmitted_signal_power_h: {
'units': 'kW',
'standard_name': 'transmitted_signal_power_h',
'long_name': 'Transmitted signal power horizontal',
'coordinates': 'elevation azimuth range'},
transmitted_signal_power_v: {
'units': 'kW',
'standard_name': 'transmitted_signal_power_v',
'long_name': 'Transmitted signal power vertical',
'coordinates': 'elevation azimuth range'},
complex_spectra_hh_ADU: {
'units': 'ADU',
'standard_name': 'complex_spectra_hh_ADU',
'long_name': 'Complex spectra horizontal',
'coordinates': 'elevation azimuth range'},
complex_spectra_vv_ADU: {
'units': 'ADU',
'standard_name': 'complex_spectra_vv_ADU',
'long_name': 'Complex spectra vertical',
'coordinates': 'elevation azimuth range'},
spectral_power_hh_ADU: {
'units': 'ADU',
'standard_name': 'spectral_power_hh_ADU',
'long_name': 'spectral power horizontal',
'coordinates': 'elevation azimuth range'},
spectral_power_vv_ADU: {
'units': 'ADU',
'standard_name': 'spectral_power_vv_ADU',
'long_name': 'spectral power vertical',
'coordinates': 'elevation azimuth range'},
spectral_power_hh_dBADU: {
'units': 'dBADU',
'standard_name': 'spectral_power_hh_dBADU',
'long_name': 'spectral power horizontal',
'coordinates': 'elevation azimuth range'},
spectral_power_vv_dBADU: {
'units': 'dBADU',
'standard_name': 'spectral_power_vv_dBADU',
'long_name': 'spectral power vertical',
'coordinates': 'elevation azimuth range'},
spectral_power_hh_dBm: {
'units': 'dBm',
'standard_name': 'spectral_power_hh_dBm',
'long_name': 'spectral power horizontal',
'coordinates': 'elevation azimuth range'},
spectral_power_vv_dBm: {
'units': 'dBm',
'standard_name': 'spectral_power_vv_dBm',
'long_name': 'spectral power vertical',
'coordinates': 'elevation azimuth range'},
spectral_noise_power_hh_dBZ: {
'units': 'dBZ',
'standard_name': 'spectral_noise_power_hh_dBZ',
'long_name': 'spectral noise power horizontal',
'coordinates': 'elevation azimuth range'},
spectral_noise_power_vv_dBZ: {
'units': 'dBZ',
'standard_name': 'spectral_noise_power_vv_dBZ',
'long_name': 'spectral noise power vertical',
'coordinates': 'elevation azimuth range'},
spectral_noise_power_hh_dBm: {
'units': 'dBm',
'standard_name': 'spectral_noise_power_hh_dBm',
'long_name': 'spectral noise power horizontal',
'coordinates': 'elevation azimuth range'},
spectral_noise_power_vv_dBm: {
'units': 'dBm',
'standard_name': 'spectral_noise_power_hh_dBm',
'long_name': 'spectral noise power vertical',
'coordinates': 'elevation azimuth range'},
spectral_noise_power_hh_dBADU: {
'units': 'dBADU',
'standard_name': 'spectral_noise_power_hh_dBADU',
'long_name': 'spectral noise power horizontal',
'coordinates': 'elevation azimuth range'},
spectral_noise_power_vv_dBADU: {
'units': 'dBADU',
'standard_name': 'spectral_noise_power_hh_dBADU',
'long_name': 'spectral noise power vertical',
'coordinates': 'elevation azimuth range'},
spectral_noise_power_hh_ADU: {
'units': 'ADU',
'standard_name': 'spectral_noise_power_hh_ADU',
'long_name': 'spectral noise power horizontal',
'coordinates': 'elevation azimuth range'},
spectral_noise_power_vv_ADU: {
'units': 'ADU',
'standard_name': 'spectral_noise_power_hh_ADU',
'long_name': 'spectral noise power vertical',
'coordinates': 'elevation azimuth range'},
spectral_phase_hh: {
'units': 'deg',
'standard_name': 'spectral_phase_hh',
'long_name': 'spectral phase horizontal',
'coordinates': 'elevation azimuth range'},
spectral_phase_vv: {
'units': 'deg',
'standard_name': 'spectral_phase_vv',
'long_name': 'spectral phase vertical',
'coordinates': 'elevation azimuth range'},
spectral_reflectivity_hh: {
'units': 'dBZ',
'standard_name': 'spectral_reflectivity_hh',
'long_name': 'Spectral Horizontal Reflectivity',
'coordinates': 'elevation azimuth range'},
spectral_reflectivity_vv: {
'units': 'dBZ',
'standard_name': 'spectral_reflectivity_vv',
'long_name': 'Spectral Vertical Reflectivity',
'coordinates': 'elevation azimuth range'},
spectral_differential_reflectivity: {
'units': 'dBZ',
'standard_name': 'spectral_differential_reflectivity',
'long_name': 'Spectral Differential Reflectivity',
'coordinates': 'elevation azimuth range'},
spectral_differential_phase: {
'units': 'deg',
'standard_name': 'spectral_differential_phase',
'long_name': 'Spectral Differential Phase',
'coordinates': 'elevation azimuth range'},
spectral_copolar_correlation_coefficient: {
'units': '-',
'standard_name': 'spectral_copolar_correlation_coefficient',
'long_name': 'Spectral copolar correlation coefficient (RHOHV)',
'coordinates': 'elevation azimuth range'},
unfiltered_complex_spectra_hh_ADU: {
'units': 'ADU',
'standard_name': 'unfiltered_complex_spectra_hh_ADU',
'long_name': 'Unfiltered complex spectra horizontal',
'coordinates': 'elevation azimuth range'},
unfiltered_complex_spectra_vv_ADU: {
'units': 'ADU',
'standard_name': 'unfiltered_complex_spectra_vv_ADU',
'long_name': 'Unfiltered complex spectra vertical',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_power_hh_ADU: {
'units': 'ADU',
'standard_name': 'unfiltered_spectral_power_hh_ADU',
'long_name': 'Unfiltered spectral power horizontal',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_power_vv_ADU: {
'units': 'ADU',
'standard_name': 'unfiltered_spectral_power_vv_ADU',
'long_name': 'Unfiltered spectral power vertical',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_power_hh_dBADU: {
'units': 'dBADU',
'standard_name': 'unfiltered_spectral_power_hh_dBADU',
'long_name': 'Unfiltered spectral power horizontal',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_power_vv_dBADU: {
'units': 'dBADU',
'standard_name': 'unfiltered_spectral_power_vv_dBADU',
'long_name': 'Unfiltered spectral power vertical',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_power_hh_dBm: {
'units': 'dBm',
'standard_name': 'unfiltered_spectral_power_hh_dBm',
'long_name': 'Unfiltered spectral power horizontal',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_power_vv_dBm: {
'units': 'dBm',
'standard_name': 'unfiltered_spectral_power_vv_dBm',
'long_name': 'Unfiltered spectral power vertical',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_phase_hh: {
'units': 'deg',
'standard_name': 'unfiltered_spectral_phase_hh',
'long_name': 'Unfiltered spectral phase horizontal',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_phase_vv: {
'units': 'deg',
'standard_name': 'unfiltered_spectral_phase_vv',
'long_name': 'Unfiltered spectral phase vertical',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_reflectivity_hh: {
'units': 'dBZ',
'standard_name': 'unfiltered_spectral_reflectivity_hh',
'long_name': 'Unfiltered Spectral Horizontal Reflectivity',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_reflectivity_vv: {
'units': 'dBZ',
'standard_name': 'unfiltered_spectral_reflectivity_vv',
'long_name': 'Unfiltered Spectral Vertical Reflectivity',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_differential_reflectivity: {
'units': 'dBZ',
'standard_name': 'unfiltered_spectral_differential_reflectivity',
'long_name': 'Unfiltered Spectral Differential Reflectivity',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_differential_phase: {
'units': 'deg',
'standard_name': 'unfiltered_spectral_differential_phase',
'long_name': 'Unfiltered Spectral Differential Phase',
'coordinates': 'elevation azimuth range'},
unfiltered_spectral_copolar_correlation_coefficient: {
'units': '-',
'standard_name': 'unfiltered_spectral_copolar_correlation_coefficient',
'long_name': 'Unfiltered Spectral copolar correlation coefficient (RHOHV)',
'coordinates': 'elevation azimuth range'},
IQ_hh_ADU: {
'units': 'ADU',
'standard_name': 'IQ_hh_ADU',
'long_name': 'IQ signal horizontal',
'coordinates': 'elevation azimuth range'},
IQ_vv_ADU: {
'units': 'ADU',
'standard_name': 'IQ_vv_ADU',
'long_name': 'IQ signal vertical',
'coordinates': 'elevation azimuth range'},
IQ_noise_power_hh_dBZ: {
'units': 'dBZ',
'standard_name': 'IQ_noise_power_hh_dBZ',
'long_name': 'IQ noise power horizontal',
'coordinates': 'elevation azimuth range'},
IQ_noise_power_vv_dBZ: {
'units': 'dBZ',
'standard_name': 'IQ_noise_power_vv_dBZ',
'long_name': 'IQ noise power vertical',
'coordinates': 'elevation azimuth range'},
IQ_noise_power_hh_dBm: {
'units': 'dBm',
'standard_name': 'IQ_noise_power_hh_dBm',
'long_name': 'IQ noise power horizontal',
'coordinates': 'elevation azimuth range'},
IQ_noise_power_vv_dBm: {
'units': 'dBm',
'standard_name': 'IQ_noise_power_vv_dBm',
'long_name': 'IQ noise power vertical',
'coordinates': 'elevation azimuth range'},
IQ_noise_power_hh_dBADU: {
'units': 'dBADU',
'standard_name': 'IQ_noise_power_hh_dBADU',
'long_name': 'IQ noise power horizontal',
'coordinates': 'elevation azimuth range'},
IQ_noise_power_vv_dBADU: {
'units': 'dBADU',
'standard_name': 'IQ_noise_power_vv_dBADU',
'long_name': 'IQ noise power vertical',
'coordinates': 'elevation azimuth range'},
IQ_noise_power_hh_ADU: {
'units': 'ADU',
'standard_name': 'IQ_noise_power_hh_ADU',
'long_name': 'IQ noise power horizontal',
'coordinates': 'elevation azimuth range'},
IQ_noise_power_vv_ADU: {
'units': 'ADU',
'standard_name': 'IQ_noise_power_vv_ADU',
'long_name': 'IQ noise power vertical',
'coordinates': 'elevation azimuth range'},
rain_rate: {
'units': 'mm/h',
'standard_name': 'rain_rate',
'long_name': 'Rain rate',
| |
email_re.findall(partner_info['full_name'] or '')
email = emails and emails[0] or ''
if email and self.email_from and email.lower() == self.email_from.lower():
partner_info['full_name'] = '%s <%s>' % (self.contact_name or self.partner_name, email)
break
return result
@api.model
def get_import_templates(self):
return [{
'label': _('Import Template for Leads & Opportunities'),
'template': '/crm/static/xls/crm_lead.xls'
}]
# ----------------------------------------
# Predictive Lead Scoring
# ----------------------------------------
def _pls_get_naive_bayes_probabilities(self, batch_mode=False):
"""
In machine learning, naive Bayes classifiers (NBC) are a family of simple "probabilistic classifiers" based on
applying Bayes theorem with strong (naive) independence assumptions between the variables taken into account.
E.g: will TDE eat m&m's depending on his sleep status, the amount of work he has and the fullness of his stomach?
As we use experience to compute the statistics, every day, we will register the variables state + the result.
As the days pass, we will be able to determine, with more and more precision, if TDE will eat m&m's
for a specific combination :
- did sleep very well, a lot of work and stomach full > Will never happen !
- didn't sleep at all, no work at all and empty stomach > for sure !
Following Bayes' Theorem: the probability that an event occurs (to win) under certain conditions is proportional
to the probability to win under each condition separately and the probability to win. We compute a 'Win score'
-> P(Won | A∩B) ∝ P(A∩B | Won)*P(Won) OR S(Won | A∩B) = P(A∩B | Won)*P(Won)
To compute a percentage of probability to win, we also compute the 'Lost score' that is proportional to the
probability to lose under each condition separately and the probability to lose.
-> Probability = S(Won | A∩B) / ( S(Won | A∩B) + S(Lost | A∩B) )
See https://www.youtube.com/watch?v=CPqOCI0ahss can help to get a quick and simple example.
One issue about NBC is when a event occurence is never observed.
E.g: if when TDE has an empty stomach, he always eat m&m's, than the "not eating m&m's when empty stomach' event
will never be observed.
This is called 'zero frequency' and that leads to division (or at least multiplication) by zero.
To avoid this, we add 0.1 in each frequency. With few data, the computation is than not really realistic.
The more we have records to analyse, the more the estimation will be precise.
:return: probability in percent (and integer rounded) that the lead will be won at the current stage.
"""
lead_probabilities = {}
if len(self) == 0:
return lead_probabilities
LeadScoringFrequency = self.env['crm.lead.scoring.frequency']
# get stages
first_stage_id = self.env['crm.stage'].search([], order='sequence', limit=1)
won_stage_ids = self.env['crm.stage'].search([('is_won', '=', True)]).ids
# Get all leads values, no matter the team_id
leads_values_dict = self._pls_get_lead_pls_values(batch_mode=batch_mode)
if not leads_values_dict:
return lead_probabilities
# Get unique couples to search in frequency table
leads_values = set()
won_leads = set()
for lead_id, values in leads_values_dict.items():
for couple in values['values']:
if couple[0] == 'stage_id' and couple[1] in won_stage_ids:
won_leads.add(lead_id)
leads_values.add(couple)
# get all variable related records from frequency table, no matter the team_id
fields = list(set([lead_value[0] for lead_value in leads_values]))
frequencies = LeadScoringFrequency.search([('variable', 'in', fields)], order="team_id asc")
# get all team_ids from frequencies
frequency_teams = frequencies.mapped('team_id')
frequency_team_ids = [0] + [team.id for team in frequency_teams]
# 1. Compute each variable value count individually
# regroup each variable to be able to compute their own probabilities
# As all the variable does not enter into account (as we reject unset values in the process)
# each value probability must be computed only with their own variable related total count
# special case: for lead for which team_id is not in frequency table,
# we consider all the records, independently from team_id (this is why we add a result[-1])
result = dict((team_id, dict((field, dict(won_total=0, lost_total=0)) for field in fields)) for team_id in frequency_team_ids)
result[-1] = dict((field, dict(won_total=0, lost_total=0)) for field in fields)
for frequency in frequencies:
team_result = result[frequency.team_id.id if frequency.team_id else 0]
field = frequency['variable']
value = frequency['value']
team_result[field][value] = {'won': frequency['won_count'], 'lost': frequency['lost_count']}
team_result[field]['won_total'] += frequency['won_count']
team_result[field]['lost_total'] += frequency['lost_count']
if value not in result[-1][field]:
result[-1][field][value] = {'won': 0, 'lost': 0}
result[-1][field][value]['won'] += frequency['won_count']
result[-1][field][value]['lost'] += frequency['won_count']
result[-1][field]['won_total'] += frequency['won_count']
result[-1][field]['lost_total'] += frequency['won_count']
# Get all won, lost and total count for all records in frequencies per team_id
for team_id in result:
result[team_id]['team_won'], \
result[team_id]['team_lost'], \
result[team_id]['team_total'] = self._pls_get_won_lost_total_count(result[team_id], first_stage_id)
save_team_id = None
p_won, p_lost = 1, 1
for lead_id, lead_values in leads_values_dict.items():
# if stage_id is null, return 0 and bypass computation
lead_fields = [value[0] for value in lead_values.get('values', [])]
if not 'stage_id' in lead_fields:
lead_probabilities[lead_id] = 0
continue
# if lead stage is won, return 100
elif lead_id in won_leads:
lead_probabilities[lead_id] = 100
continue
lead_team_id = lead_values['team_id'] if lead_values['team_id'] else 0 # team_id = None -> Convert to 0
lead_team_id = lead_team_id if lead_team_id in result else -1 # team_id not in frequency Table -> convert to -1
if lead_team_id != save_team_id:
save_team_id = lead_team_id
team_won = result[save_team_id]['team_won']
team_lost = result[save_team_id]['team_lost']
team_total = result[save_team_id]['team_total']
# if one count = 0, we cannot compute lead probability
if not team_won or not team_lost:
continue
p_won = team_won / team_total
p_lost = team_lost / team_total
# 2. Compute won and lost score using each variable's individual probability
s_lead_won, s_lead_lost = p_won, p_lost
for field, value in lead_values['values']:
field_result = result.get(save_team_id, {}).get(field)
value_result = field_result.get(str(value)) if field_result else False
if value_result:
total_won = team_won if field == 'stage_id' else field_result['won_total']
total_lost = team_lost if field == 'stage_id' else field_result['lost_total']
s_lead_won *= value_result['won'] / total_won
s_lead_lost *= value_result['lost'] / total_lost
# 3. Compute Probability to win
lead_probabilities[lead_id] = round(100 * s_lead_won / (s_lead_won + s_lead_lost), 2)
return lead_probabilities
def _cron_update_automated_probabilities(self):
""" This cron will :
- rebuild the lead scoring frequency table
- recompute all the automated_probability and align probability if both were aligned
"""
cron_start_date = datetime.now()
self._rebuild_pls_frequency_table()
self._update_automated_probabilities()
_logger.info("Predictive Lead Scoring : Cron duration = %d seconds" % ((datetime.now() - cron_start_date).total_seconds()))
def _rebuild_pls_frequency_table(self):
# Clear the frequencies table (in sql to speed up the cron)
try:
self.check_access_rights('unlink')
except AccessError:
raise UserError(_("You don't have the access needed to run this cron."))
else:
self._cr.execute('TRUNCATE TABLE crm_lead_scoring_frequency')
# get stages by sequence
stage_ids = self.env['crm.stage'].search_read([], ['sequence', 'name', 'id'], order='sequence')
stage_sequences = {stage['id']: stage['sequence'] for stage in stage_ids}
values_to_create = []
# Compute stat individually for each team
for team in self.env['crm.team'].with_context(active_test=False).search([]):
values_to_create = self._pls_update_frequency_table(values_to_create, stage_ids, stage_sequences, team_id=team.id)
values_to_create = self._pls_update_frequency_table(values_to_create, stage_ids, stage_sequences)
# create all frequencies from all company and team in batch
self.env['crm.lead.scoring.frequency'].create(values_to_create)
_logger.info("Predictive Lead Scoring : crm.lead.scoring.frequency table rebuilt")
def _update_automated_probabilities(self):
""" Recompute all the automated_probability (and align probability if both were aligned) for all the leads
that are active (not won, nor lost).
For performance matter, as there can be a huge amount of leads to recompute, this cron proceed by batch.
Each batch is performed into its own transaction, in order to minimise the lock time on the lead table
(and to avoid complete lock if there was only 1 transaction that would last for too long -> several minutes).
If a concurrent update occurs, it will simply be put in the queue to get the lock.
"""
pls_start_date = self._pls_get_safe_start_date()
if not pls_start_date:
return
# 1. Get all the leads to recompute created after pls_start_date that are nor won nor lost
# (Won : probability = 100 | Lost : probability = 0 or inactive. Here, inactive won't be returned anyway)
# Get also all the lead without probability --> These are the new leads. Activate auto probability on them.
pending_lead_domain = [
'&',
'&',
('stage_id', '!=', False),
('create_date', '>', pls_start_date),
'|',
('probability', '=', False),
'&',
('probability', '<', 100),
('probability', '>', 0)
]
leads_to_update = self.env['crm.lead'].search(pending_lead_domain)
leads_to_update_count = len(leads_to_update)
# 2. Compute by batch to avoid memory error
lead_probabilities = {}
for i in range(0, leads_to_update_count, PLS_COMPUTE_BATCH_STEP):
leads_to_update_part = leads_to_update[i:i + PLS_COMPUTE_BATCH_STEP]
lead_probabilities.update(leads_to_update_part._pls_get_naive_bayes_probabilities(batch_mode=True))
_logger.info("Predictive Lead | |
in statuses
]
if created_by is not None:
data['status_id'] = [
str(Testrail.get_user_by_name(u).id)
for u in created_by
]
if created_after is not None:
data['created_after'] = int(time.mktime(
created_after.timetuple()
))
if created_before is not None:
data['created_before'] = int(time.mktime(
created_before.timetuple()
))
return [Result(r) for r in TestrailAPI.get_results_for_run(self.id,
**data)]
def results_for_case(self,
case,
statuses=None,
limit=None,
offset=None):
"""
Return results list for a test in this run for provided case.
:arg case: A case object which is a 'father' for test
:arg statuses: A list of test status names to filter by
:arg limit: Limit the output to this number of records
:arg offset: Skip this number of records.
:type case: Case
:type statuses: list of [str]
:type limit: int
:type offset: int
:rtype: list of [Result]
"""
data = {
'limit': limit,
'offset': offset,
}
if statuses is not None:
data['status_id'] = [
str(Testrail.get_status_by_name(s).id)
for s in statuses
]
return [Result(r) for r in TestrailAPI.get_results_for_case(self.id,
case.id,
**data)]
def add_result_for_case(self,
case,
status_name,
comment='',
version=None,
elapsed=None,
defects=None,
assignedto=None):
data = {
'status_id': Testrail.get_status_by_name(status_name).id,
'comment': comment
}
if version is not None:
data['version'] = version
if elapsed is not None:
data['elapsed'] = elapsed
if defects is not None:
data['defects'] = defects
if assignedto is not None:
data['assignedto_id'] = Testrail.get_user_by_name(assignedto).id
return Result(TestrailAPI.add_result_for_case(self.id, case.id, **data))
class Section(_TestrailObject):
"""
Section container
Module Attributes:
parent -- Link to parent Section object
suite -- Link to Suite object this section belongs to
Testrail Attributes:
id -- The unique ID of the section
suite_id -- The ID of the test suite this section belongs to
name -- The name of the section
description -- The description of the section
display_order -- The order in the test suite
parent_id -- The ID of the parent section in the test suite
depth -- The level in the section hierarchy of the test suite
"""
cache = {}
def _settle_attributes(self, attributes):
self.id = attributes['id']
self.suite_id = attributes['suite_id']
self.name = attributes['name']
self.description = attributes['description']
self.display_order = attributes['display_order']
self.parent_id = attributes['parent_id']
self.depth = attributes['depth']
self.children = []
@property
def parent(self):
return Testrail.get_section_by_id(self.parent_id)
@property
def suite(self):
return Testrail.get_suite_by_id(self.suite_id)
@staticmethod
def get_one(section_id):
return Section(TestrailAPI.get_section(section_id))
def update(self, name=None, description=None):
"""
Change section parameters.
:rtype: None
"""
raise NotImplementedError
def delete(self):
"""
Delete section.
!!! Deleting a section cannot be undone and also deletes all related
test cases as well as active tests & results, i.e. tests & results
that weren't closed (archived) yet.
"""
raise NotImplementedError
def add_subsection(self, name, description=''):
"""
Creates new section in current suite.
Returns newly created section object.
:arg name: Name of new section
:arg description: Description of new section
:arg parent: Parent section object (if any)
:type name: str
:type description: str
:type parent: Section
:rtype: Section
"""
data = {
'name': name,
'suite_id': self.suite_id,
'description': description,
'parent_id': self.id,
}
return Section(TestrailAPI.add_section(self.suite.project_id, **data))
def cases(self,
include_subsections=False,
types=None,
priorities=None,
milestones=None,
created_by=None,
created_after=None,
created_before=None,
updated_by=None,
updated_after=None,
updated_before=None):
"""
This is most important method to find and filter cases.
:arg include_subsections: if True - search recursive
:arg types: A list of case type names to filter by
:arg priorities: list of priorities names to filter by
:arg milestones: list of milestones names to filter by
:arg created_by: list of user names who created cases to include
:arg created_after: Only return test cases created after this date
:arg created_before: Only return test cases created before this date
:arg updated_by: list of user names who updated cases to include
:arg updated_after: Only return test cases updated after this date
:arg updated_before: Only return test cases updated before this date
:type include_subsections: bool
:type types: list od [str]
:type priorities: list of [str]
:type milestones: list of [str]
:type created_by: list of [str]
:type created_after: datetime.datetime
:type created_before: datetime.datetime
:type updated_by: list of [str]
:type updated_after: datetime.datetime
:type updated_before: datetime.datetime
"""
data = {}
if types is not None:
data['type_id'] = [
str(Testrail.get_case_type_by_name(t).id) for t in types
]
if priorities is not None:
data['priority_id'] = [
str(Testrail.get_priority_by_name(p).id) for p in priorities
]
if milestones is not None:
data['milestone_id'] = [
str(self.suite.project.get_milestone_by_name(m).id) for m in milestones
]
if created_by is not None:
data['created_by'] = [
str(Testrail.get_user_by_name(u).id) for u in created_by
]
if created_after is not None:
data['created_after'] = int(time.mktime(
created_after.timetuple()
))
if created_before is not None:
data['created_before'] = int(time.mktime(
created_before.timetuple()
))
if updated_by is not None:
data['updated_by'] = [
str(Testrail.get_user_by_name(user).id)
for user in updated_by
]
if updated_after is not None:
data['updated_after'] = int(time.mktime(
updated_after.timetuple()
))
if updated_before is not None:
data['updated_before'] = int(time.mktime(
updated_before.timetuple()
))
if not include_subsections:
return [Case(c) for c in TestrailAPI.get_cases(self.suite.project_id,
self.suite_id,
self.id,
**data)]
else:
result = [
Case(c) for c in TestrailAPI.get_cases(self.suite.project_id,
self.suite_id,
self.id,
**data)
]
for sec in self.children:
result.extend(
sec.cases(True, types, priorities, milestones, created_by,
created_after, created_before, updated_by,
updated_after, updated_before)
)
return result
def add_case(self):
raise NotImplementedError
class Case(_TestrailObject):
"""
Test case container
Module Attributes:
suite -- Suite object the test case belongs to
section -- Section object the test case belongs to
case_type -- CaseType object the test case has
priority -- Priority object the test case has
milestone -- Milestone object the test case belongs to
created_by -- User object the test case was created by
updated_by -- User object the test case was updated by
created_on -- datetime object when the test case was created
updated_on -- datetime object when the test case was last updated
Testrail Attributes:
id -- The unique ID of the test case
suite_id -- The ID of the suite the test case belongs to
section_id -- The ID of the section the test case belongs to
title -- The ID of the suite the test case belongs to
type_id -- The ID of the test case type that is linked to
the test case
priority_id -- The ID of the priority that is linked to
the test case
milestone_id -- The ID of the milestone that is linked to
the test case
refs -- A comma-separated list of references/requirements
estimate -- The estimate, e.g. "30s" or "1m 45s"
estimate_forecast -- The estimate forecast, e.g. "30s" or "1m 45s"
created_on_stamp -- The date/time when the test case was created
(as UNIX timestamp)
created_by_id -- The ID of the user who created the test case
updated_on_stamp -- The date/time when the test case was last updated
(as UNIX timestamp)
updated_by_id -- The ID of the user who last updated the test case
+ custom fields...
"""
cache = {}
def _settle_attributes(self, attributes):
self.id = attributes['id']
self.suite_id = attributes['suite_id']
self.section_id = attributes['section_id']
self.title = attributes['title']
self.type_id = attributes['type_id']
self.priority_id = attributes['priority_id']
self.milestone_id = attributes['milestone_id']
self.refs = attributes['refs']
self.estimate = attributes['estimate']
self.estimate_forecast = attributes['estimate_forecast']
self.created_on_stamp = attributes['created_on']
self.created_on = datetime.datetime.fromtimestamp(float(attributes['created_on']))
self.created_by_id = attributes['created_by']
self.updated_on_stamp = attributes['updated_on']
try:
self.updated_on = datetime.datetime.fromtimestamp(float(attributes['updated_on']))
except TypeError:
self.updated_on = None
self.updated_by_id = attributes['updated_by']
# and all the custom fields:
for custom in self.suite.custom_case_fields:
setattr(self, custom.system_name, attributes[custom.system_name])
@property
def suite(self):
return Testrail.get_suite_by_id(self.suite_id)
@property
def section(self):
return Testrail.get_section_by_id(self.section_id)
@property
def milestone(self):
return Testrail.get_milestone_by_id(self.milestone_id)
@property
def case_type(self):
return Testrail.get_case_type_by_id(self.type_id).name
@property
def priority(self):
return Testrail.get_priority_by_id(self.priority_id).short_name
@property
def created_by(self):
return Testrail.get_user_by_id(self.created_by_id).name
@property
def updated_by(self):
return Testrail.get_user_by_id(self.updated_by_id).name
@staticmethod
def get_one(case_id):
return Case(TestrailAPI.get_case(case_id))
def update(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def results_in_run(self,
run,
statuses=None,
limit=None,
offset=None):
"""
Return results list for a test created from this case in provided run.
:arg run: A run object to grab results from
:arg statuses: A list of test status names to filter by
:arg limit: Limit the output to this number of records
:arg offset: Skip this number of records.
:type run: Run
:type statuses: list of [str]
:type limit: int
:type offset: int
:rtype: list of [Result]
"""
data = {
'limit': limit,
'offset': offset
}
if statuses is not None:
data['status_id'] = [
str(Testrail.get_status_by_name(s).id) for s in statuses
]
return [Result(r) for r in TestrailAPI.get_results_for_case(run.id,
self.id,
**data)]
def add_result_in_run(self,
run,
status_name,
comment='',
version=None,
elapsed=None,
defects=None,
assignedto=None):
raise NotImplementedError
class Test(_TestrailObject):
"""
| |
type
cast_match = re.match(r'([a-zA-Z0-9_]+)\s*::\s*([a-zA-Z0-9_]+)', select_statement)
operation = ' '.join(select_statement.split(' ')[:-1])
column_name = select_statement.split(' ')[-1]
print(operation)
# Add the table and schema when a single table/schema is being selected from
# if cast_match:
# yield {
# 'schema': list(aliases.values())[0]['schema'],
# 'table_name': list(aliases.values())[0]['table_name'],
# 'column_name': cast_match.groups()[0],
# 'cast_type': cast_match.groups()[1]
# }
# elif len(aliases) == 1:
# yield {
# 'schema': list(aliases.values())[0]['schema'],
# 'table_name': list(aliases.values())[0]['table_name'],
# 'column_name': column_name,
# }
# else:
# yield {
# 'operation': operation,
# 'column_name': column_name
# }
def _parse_table(self):
# Get the name of the table being created
_table_name = next(token.value for token in self.tokens if isinstance(token, Identifier))
# Add the table metadata to the cached tables to access later.
if len(_table_name.split('.')) == 2 \
and not found_table(_table_name.split('.')[0], _table_name.split('.')[1]):
_table = Table(
_table_name.split('.')[0], _table_name.split('.')[1], self.cursor
).query_data()
self.table = _table
self.table_cache.append(_table)
self.destination_table = _table
elif len(_table_name.split('.')) == 3 \
and not found_table(_table_name.split('.')[1], _table_name.split('.')[2]):
_table = Table(
_table_name.split('.')[1], _table_name.split('.')[2], self.cursor
).query_data()
self.table = _table
self.table_cache.append(_table)
self.destination_table = _table
else:
_table = Table(_table_name, _table_name, self.cursor)
self.table = _table
self.table_cache.append(_table)
def _parse_froms(self, token):
"""Yields the ``FROM`` portion of a query"""
from_seen = False
# Iterate over the differnet tokens
for _token in token.tokens:
if _token.is_whitespace:
continue
if from_seen:
if is_subselect(_token):
print('subselect')
print(_token)
# for __token in extract_from_part(_token, self.cursor):
# yield __token
elif _token.ttype is Keyword or _token.ttype is Punctuation:
from_seen = False
continue
else:
# The alias used to reference the table in the query
alias = _token.get_name()
# When the alias is found as `None`, there is no ``FROM`` found in this query.
# TODO figure out why this condition is here
if alias is None:
# return
continue
# The full table name without the schema
table_real_name = _token.get_real_name()
# The Redshift schema where the table is accessed from
schema = _token.value.replace(f".{table_real_name}", '').split(' ')[0]
# When the schema starts with an opening paranthesis, ``(``, there is a subquery
# used in this FROM statement. It must be recursively iterated upon.
if schema[0] == '(':
_subquery = ParsedStatement(
sqlparse.parse(
re.sub(r'\)\s+' + table_real_name, '', _token.value)[1:]
)[0],
self.file_name,
self.cursor
)
_subquery.parse()
self.subqueries.append(Subquery(table_real_name, _subquery))
# Otherwise, the FROM portion of this statement is referencing another table.
else:
_table = Table(schema, table_real_name, self.cursor, alias)
_table.query_data()
self.table_cache.append(_table)
# self.froms.append(_table)
if _token.ttype is Keyword and _token.value.upper() == 'FROM':
from_seen = True
def _parse_joins(self, token):
"""Yields the ``JOIN`` portion of a query"""
join = None
join_type = None
comparisons = False
for _token in token.tokens:
# Ingore all whitespace tokens.
# NOTE: The sqlparse packages considers comparisons as `whitespace`.
if _token.is_whitespace and not isinstance(_token, Comparison):
continue
# Add the different comparisons used in the join statement
if comparisons and isinstance(_token, Comparison):
# Remove the comments from the token
_token_no_comments = sqlparse.parse(
sqlparse.format(_token.value, strip_comments=True).strip()
)[0].tokens[0]
left_tables = [
_table for _table in self.table_cache
if _table.alias == str(_token_no_comments.left).split('.')[0]
]
right_tables = [
_table for _table in self.table_cache
if _table.alias == str(_token_no_comments.right).split('.')[0]
]
if len(left_tables) == 1:
left_table = left_tables[0]
left_column = left_table.get_column(
str(_token_no_comments.left).split('.')[1]
)
if len(right_tables) == 1:
right_table = right_tables[0]
right_column = right_table.get_column(
str(_token_no_comments.right).split('.')[1]
)
comparison = JoinComparison(
(left_column, left_table),
(right_column, right_table),
_token_no_comments.value
.replace(str(_token_no_comments.left), '')
.replace(str(_token_no_comments.right), '')
.strip()
)
join.add_comparison(comparison)
self.joins.append(join)
if join_type:
# TODO: Implement subquery match with pythonic objects
# Find the different comparisons used in this join. The join type is now known and the
# comparisons must be set.
if _token.ttype is Keyword:
comparisons = True
join_type = None
continue
# Match the value found to see if there is a JOIN using a subquery
subquery_match = re.match(
r"\(([\w\W]+)\)", _token.value[:-len(_token.get_name())], re.MULTILINE
)
# Yield the subquery output when necessary
if subquery_match:
print('MATCHED SUBQUERY!!!')
# subquery = parse_statement(
# sqlparse.parse(subquery_match.groups()[0])[0],
# {}
# )
# print(sqlparse.parse(subquery_match.groups()[0])[0])
# print('subquery')
# print(subquery)
_subquery = ParsedStatement(
sqlparse.parse(subquery_match.groups()[0])[0],
self.file_name,
self.cursor
)
_subquery.parse()
# The alias used to reference the table in the query
alias = _token.get_name()
if not self.has_alias_in_cache(alias):
self.subqueries.append(Subquery(alias, _subquery))
# The full table name without the schema
table_real_name = _token.get_real_name()
# yield {
# alias: {
# 'join_type': join_type,
# 'subquery': subquery,
# 'table_name': table_real_name,
# 'token': _token
# }
# }
# Just the alias of the table is given in this token. Store the table and the alias
# in the object's ``table_cache``.
else:
# The alias used to reference the table in the query
alias = _token.get_name()
# The full table name without the schema
table_real_name = _token.get_real_name()
# The Redshift schema where the table is accessed from
redshift_schema = _token.value.replace(f".{table_real_name}", '').split(' ')[0]
if not self.has_alias_in_cache(alias):
# if not alias in [table.alias for table in tables]:
_table = Table(redshift_schema, table_real_name, self.cursor, alias)
_table.query_data()
self.table_cache.append(_table)
print(f'Appending this table ({_table.alias}):')
# print(this_table)
print([_table.alias for _table in self.table_cache])
# yield {
# alias: {
# 'join_type': join_type,
# 'table_name': table_real_name,
# 'schema': redshift_schema,
# 'token': _token
# }
# }
if _token.ttype is Keyword and _token.value.upper() in (
'JOIN',
'LEFT JOIN',
'RIGHT JOIN',
'INNER JOIN',
'FULL JOIN',
'LEFT OUTER JOIN',
'FULL OUTER JOIN'
):
join_type = _token.value.upper()
join = Join(_token.value.upper())
def parse(self) -> None:
"""Parses the SQL statement for dependencies"""
self._parse_table()
self._parse_froms(self.tokens)
self._parse_joins(self.tokens)
self._parse_selects()
def remove_comments(sql_string:str) -> None:
"""Removes all comments from the given SQL string"""
return '\n'.join([
re.sub(r'\-\-[\s0-9a-zA-Z_\.,\\\/\(\)\':=<>+\-*]*$', '', select_line)
for select_line in sql_string.split('\n')
])
def extract_selects(token, aliases):
"""Gets all the columns selected in a ``SELECT ... FROM`` SQL statement.
Parameters
----------
token: str
aliases: dict
"""
# Remove the comments from the token.
sql_no_comments = remove_comments(token.value.strip())
# Search for all of the ``select`` and ``from`` in this token.
select_matches = list(re.finditer(r'select\s', sql_no_comments, re.MULTILINE|re.IGNORECASE))
from_matches = list(re.finditer(r'from\s', sql_no_comments, re.MULTILINE|re.IGNORECASE))
# Only use the columns in this SELECT statement. This will be all text between the first
# ``select`` and ``from`` found in this token.
if len(select_matches) != len(from_matches):
raise Exception(
'The number of SELECTs and JOINs did not match:\n{}'.format(token.value.strip())
)
if len(select_matches) == 0 or len(from_matches) == 0:
raise Exception(
'No SELECTs and JOINs found in this token:\n{}'.format(token.value.strip())
)
# Get all of the columns used in the SELECT statement by splitting the text between the first
# ``select`` and ``from``.
selected_columns = sql_no_comments[select_matches[0].span()[1]:from_matches[0].span()[0]] \
.split(',')
# Use a list and index to iterate over the different select statements.
select_index = 0
selects_out = []
# Iterate over the different selected columns and group them together by ensuring they
# maintain the same number of opening and closing paranthesis.
while select_index < len(selected_columns):
select_statement = selected_columns[select_index].strip()
if select_statement.count('(') != select_statement.count(')'):
while select_statement.count('(') != select_statement.count(')'):
select_index += 1
select_statement += "," + selected_columns[select_index].strip()
selects_out.append(select_statement)
select_index += 1
else:
selects_out.append(
' '.join([line.strip() for line in select_statement.split('\n')])
)
select_index += 1
# Iterate over the different select statements to find how the column is used
for select_statement in selects_out:
# Find the select statements that have just the schema and the column name from the
# origin table.
same_name_match = re.match(r'([a-zA-Z0-9_]+)\.([a-zA-Z0-9_]+)$', select_statement)
# Find the select statements with the schema, the column name, and this column's
# aliased name with the keyword ``as```.
rename_match_with_as = re.match(
r'([a-zA-Z0-9_]+)\.([a-zA-Z0-9_]+)\s+as\s+([a-zA-Z0-9_]+)$',
select_statement,
re.IGNORECASE
)
# Find the select statements with the schema, the column name, and this column's
# aliased name without the ``as`` keyword.
rename_match_without_as = re.match(
r'([a-zA-Z0-9_]+)\.([a-zA-Z0-9_]+)\s+([a-zA-Z0-9_]+)$', select_statement
)
# Find the functions applied to the column, aliased with another column name with the
# keyword ``as``.
function_match = re.search(
r'([\w\W]+)\s+as\s+([a-zA-Z0-9_]+)$', select_statement, re.MULTILINE|re.IGNORECASE
)
if same_name_match:
table_alias = same_name_match.groups()[0]
column_name = same_name_match.groups()[1]
# print('-----')
# print(table_alias)
# print(column_name)
# print(aliases)
# print(select_statement)
# Yield the subquery and the column name when referencing a subquery
if 'subquery' in aliases[table_alias].keys():
yield {
'column_name': column_name,
'table_alias': table_alias,
'subquery': list(aliases[table_alias]['subquery'].values())[0]
}
else:
yield {
'schema': aliases[table_alias]['schema'],
'table_name': aliases[table_alias]['table_name'],
'column_name': column_name,
'column_from': column_name,
'table_alias': table_alias
}
elif rename_match_with_as or rename_match_without_as:
if rename_match_without_as:
table_alias = rename_match_without_as.groups()[0]
column_from = rename_match_without_as.groups()[1]
column_name = rename_match_without_as.groups()[2]
# Yield the column name and the alias's name when referencing a subquery.
if aliases[table_alias]['schema'][0] == | |
import pandas as pd
import re
from IPython.display import display
import matplotlib.pyplot as plt
import numpy as np
import os
from pathlib import Path
from glob import glob
import black
import natsort
import operator
# from matplotlib.pyplot import cm
import matplotlib as mpl
from constants import *
from matplotlib.lines import Line2D
plt.style.use("style.mplstyle") #matplotlib style sheet
avg_parameter = 3
background_colour = ["r","b","k", "m"]
double_size = 8
layer_start, layer_end = 1,5 # setting of layers for graphs from layer_name
output_files = "output_files"
layer_name = ["Serial", "MPIIO", "PHDF5", "ADIOS2_HDF5", "ADIOS2_BP4"]
def dict_output(
filename, layer_name, array_size
): # function to return array, time, rate from CSV file
mydata = pd.read_csv(
filename,
index_col=False,
skiprows=0,
names=[
"Fields",
"Layername",
"ArraySize",
"NumNodes",
"AverageTime",
"AverageRate"
]
)
"""
Set array filter
"""
num_nodes = mydata.NumNodes[(mydata.Layername == layer_name) & (mydata.ArraySize == array_size)] # number of nodes
rate = mydata.AverageRate[(mydata.Layername == layer_name) & (mydata.ArraySize == array_size)] # Gb/s
return rate, num_nodes
def average_dir(dir, min_size, max_size):
rate = []
rate_avg = []
ranks = []
Global_size = []
rate_persize = []
"""
Obtain paths for subdirectories
"""
dirFiles = glob(
f"{os.getcwd()}/output_dirs/{dir}/*/"
) # assign full output directories for csv file
output_dirs = natsort.natsorted(dirFiles)
# N_size = 2 ** 8 # only checking 2^8 cube len
for layers in range(layer_start, layer_end):
for size in range(min_size,max_size):
cube_len = 2 ** size
for i in range(len(output_dirs)):
trail = os.path.basename(
os.path.normpath(output_dirs[i])
) # gives name of cores, and processors
if "_v1" in trail: # start with first version of file, iterate from then.
rate_avg = 0
avgd_num = 0
for job_num in range(1,avg_parameter+1): #for v1,v2,v3
avgd_dir = output_dirs[i].replace("v1",f"v{job_num}") # replace v1 with v2,v3? Preceeding path can stay same.
avgd_file = f"{avgd_dir}output.csv"
if os.path.isfile(avgd_file): #check if v1, v2, v3 exist for that node config
avgd_num += 1 # accurate avg parameter, iterates for every version that actually exits.
rate, ranks = dict_output(avgd_file,layer_name[layers],cube_len)
rate_avg += rate
rate_avg = rate_avg/avgd_num
Global_size = ((cube_len ** 3) * double_size * ranks.values) / (10 ** 9) # global array size from cube length
rate_persize.append([layer_name[layers], ranks.values[0], cube_len, Global_size, rate_avg.values[0]]) # append plot output data to rate_persize
# # output of rate_persize list array to csv file
output_data = pd.DataFrame(rate_persize, columns=[ 'LayerName','Ranks', 'CubeLen', 'GlobalSize', 'Rate'])
out = output_data.to_csv(f"output_csv/{dir}_avg_output.csv", index=False)
def plot_rate_v_ranks(target_dir, layer_start, layer_end, min_size, max_size, ax1):
max_rate, max_rank = 0, 0
# outputdata_layer_v_size(f"{target_dir}", min_size, max_size) # data processing function, outputs everything to output.csv
average_dir(f"{target_dir}", min_size, max_size) # data processing function, outputs everything to output.csv
input_data = pd.read_csv( # read from output.csv
f"output_csv/{target_dir}_avg_output.csv",
index_col=False,
skiprows=0
)
for x in range(layer_start, layer_end):
for y in range(min_size, max_size):
size_t = 2 ** y
label1 = layer_name[x]
ranks = input_data.Ranks[ (input_data.LayerName == layer_name[x]) & (input_data.CubeLen == size_t) ]
rate = input_data.Rate[ ( input_data.LayerName == layer_name[x]) & (input_data.CubeLen == size_t ) ]
ax1.plot(ranks, rate, marker_desc[x], label=label1, c=background_colour[x-layer_start])
if max_rate < max(rate):
max_rate = max(rate)
max_rank = ranks[rate.idxmax()]
return max_rate, max_rank
def plotting_mult_dirs(target_dir, min_size, max_size, param):
"""
Plot formatting
"""
# fig1 = plt.figure(figsize=(8,6))
fig1 = plt.figure(figsize=(6,5.5))
ax1 = plt.axes()
"""
Inits
"""
max_rate, max_rank, local_rate, local_rank = 0,0,0,0 # init variable for finding max rate and corresponding ranks
for x in target_dir:
local_rate, local_rank = plot_rate_v_ranks(x, layer_start, layer_end, min_size, max_size, ax1)
if max_rate < local_rate: #in case multiple set of directories
max_rate = local_rate
max_rank = local_rank
text = "Rank={:.0f},Max Rate={:.3f}GB/s".format(max_rank,max_rate)
ax1.annotate(text, xy=(max_rank, max_rate), xytext=(0.95, 0.96), **kw)
legend_elements = [
Line2D([0], [0], marker = "D", color='r', lw=1, label='MPIIO'),
Line2D([0], [0], marker= "o", color='b', lw=1, label='PHDF5'),
Line2D([0], [0], marker= "v", color='k', lw=1,label='ADIOS2/HDF5'),
Line2D([0], [0], marker= "^", color='m', lw=1, label='ADIOS2/BP4')]
ax1.legend(handles=legend_elements)
ax1.set_xlabel("MPI ranks")
ax1.set_ylabel("Average Rate (GB/s)")
ax1.set_yscale("log")
ax1.set_xscale("log")
ax1.set_ybound(0,4)
fig1.suptitle(f"I/O rate comparison b/w NextGenIO & Fulhame - {param} striping")
fig1.tight_layout()
def speedup(dir):
"""
Inits
"""
double_size = 8
layer_start = 3 # setting of layers for graphs from layer_name
layer_end = 5
rate_hdf5 = []
num_nodes_hdf5 = []
array_hdf5 = []
"""
Plot formatting
"""
# fig1 = plt.figure(figsize=(8,6))
fig1 = plt.figure(figsize=(8,6))
# plt.rcParams["font.size"] = "10"
ax1 = plt.axes()
"""
Obtain paths for subdirectories
"""
dirFiles = glob(
f"{os.getcwd()}/{dir}/*/"
) # assign full output directories for csv file
output_dirs = natsort.natsorted(dirFiles)
num_dir = len(output_dirs) # number of outputs
# marker_desc = ["-o", "-*", "-+", "-D", "-^", "-<", "-s", "-.", "-p", "-h"]
background_colour = ["r","b","k","m","y"]
"""
Plots
"""
max_rate = 0
max_time = 0
max_time_ar = 0
max_rate_ar = 0
new_rate = []
marker_desc = ["-*","-o","--*","--o"]
iter = 0
for x in range(layer_start, layer_end):
for i in range(num_dir): # number of tails for same proc
trail = os.path.basename(
os.path.normpath(output_dirs[i])
) # gives name of core_no.ofproc
filename = output_dirs[i] + "output.csv"
if f"_v{1}" in trail: # select particular v.
rate, num_nodes, array = output_speedup(filename, layer_name[x])
rate_hdf5, num_nodes_hdf5, array_hdf5 = output_speedup(filename, layer_name[2]) # obtain hdf5 parameters for comparison
new_rate = speedup_comp(rate_hdf5, rate)
layer = f"{layer_name[x]}/{num_nodes.values[x]}"
if num_nodes.values[0] == 1: # hacky way of selecting num_nodes.
ax1.plot(array, new_rate, marker_desc[iter], c=background_colour[iter], label=layer)
iter += 1
if num_nodes.values[0] == 384: # hacky way of selecting num_nodes.
ax1.plot(array, new_rate, marker_desc[iter], c=background_colour[iter], label=layer)
iter += 1
ax1.legend(loc = "upper right")# Don't allow the axis tdo be on top of your data
ax1.set_xlabel("Global Size (GB)")
ax1.set_ylabel("Speedup compared to HDF5")
ax1.set_yscale("log")
ax1.set_xscale("log")
# fig1.suptitle("Benchmarking speedup results w.r.t. I/O rates for HDF5")
fig1.tight_layout()
def output_speedup(
filename, layer_name
): # function to return array, time, rate from CSV file
double_size = 8
mydata = pd.read_csv(
filename,
index_col=False,
skiprows=0,
names=[
"Fields",
"Layername",
"ArraySize",
"NumNodes",
"AverageTime",
"AverageRate"
]
)
"""
Set array filter
"""
array = mydata.ArraySize[(mydata.Layername == layer_name) ] # N1 local array dimension
num_nodes = mydata.NumNodes[(mydata.Layername == layer_name)] # number of nodes
Global_size = ((array** 3) * double_size * num_nodes.values[0]) / (10 ** 9) # global array size from cube length
# time = mydata.AverageTime[(mydata.Layername == layer_name) & (mydata.ArraySize == array_size)] # units sec
rate = mydata.AverageRate[(mydata.Layername == layer_name)] # Gb/s
return rate, num_nodes, Global_size
def speedup_comp(rate_hdf5,rate):
new_rate = []
for x in range(len(rate_hdf5)):
new_rate.append(rate.values[x]/rate_hdf5.values[x])
return new_rate
def xcompact():
"""
Plot formatting
"""
fig1 = plt.figure(figsize=(6,5.5))
# fig1 = plt.figure(figsize=(8,6))
# plt.rcParams["font.size"] = "10"
ax1 = plt.axes()
xcom = pd.read_csv(
"xcompact.csv",
index_col=False,
skiprows=0,
names=[
"Rank",
"TotalTime",
"WriteTime",
"ComputeTime",
"BW"
]
)
arrow_dim = 300
point_dim = 550
plt.arrow(x=point_dim+arrow_dim, y=60, dx=-arrow_dim, dy=0, width=1.5, head_length=30, facecolor='red')
plt.annotate('I/O bottleneck', xy = (point_dim+arrow_dim+50, 58))
ax1.plot(xcom.Rank,xcom.TotalTime, "-^", c="r", label="Total")
ax1.plot(xcom.Rank,xcom.WriteTime,"-o", c="b",label = "Write" )
ax1.plot(xcom.Rank,xcom.ComputeTime, "-<", c="k", label = "Compute")
ax1.set_xlabel("MPI ranks")
ax1.set_ylabel("Time taken (s)")
ax1.legend(loc = "upper right")
ax1.set_yscale("log")
ax1.set_xscale("log")
plt.axvline(x=512, color='k', linestyle='--')
# fig1.suptitle("Benchmarking for XCompact3D")
def outputdata_layer_v_size(filedir, cube_len_start, cube_len_end):
"""
Inits
"""
layer_name = ["Serial", "MPIIO", "PHDF5", "ADIOS2_HDF5", "ADIOS2_BP4"]
rate_persize = []
target_dir = f"{os.getcwd()}/output_dirs/{filedir}/*/"
layer_start, layer_end = 1, len(layer_name)
"""
Obtain paths for subdirectories
"""
dirFiles = glob(
target_dir
) # assign full output directories for csv file
output_dirs = natsort.natsorted(dirFiles)
"""
Find data points recursively from the target dir, selecting based on layer name
"""
for l in range(layer_start, layer_end): # select layer name
for i in range(len(output_dirs)):
filename = output_dirs[i] + "output.csv"
trail = os.path.basename(
os.path.normpath(output_dirs[i])
) # gives name of core_no.ofproc
for x in range (cube_len_start,cube_len_end): # specify max array size parameter
N_size = 2 ** x
rate, ranks = dict_output(f"{output_dirs[i]}output.csv",layer_name[l],N_size) # info from specified directory, against matching layername and cube length
Global_size = ((N_size ** 3) * double_size * ranks.values[0]) / (10 ** 9) # global array size from cube length
rate_persize.append([layer_name[l], ranks.values[0], N_size, Global_size, rate.values[0]]) # append plot output data to rate_persize
# output of rate_persize list array to csv file
output_data = pd.DataFrame(rate_persize, columns=[ 'LayerName','Ranks', 'CubeLen', 'GlobalSize', 'Rate'])
out = output_data.to_csv(f"output_csv/{filedir}_output.csv", index=False)
def compare_benchio_f(): # bar plot to compare benchio with benchmark_c
""" Both run with 1 I/O runs and 1 rank. Array size = 250*250*250.
Can increase this to multiple | |
from .base import *
from .community_damage_sampling import *
from .downtime_logistics import *
def load_results(input_filenames, output_filename, i_analysis, options):
####
[i_damage, i_impeding_factors, i_cordons] = i_analysis
if input_filenames is None:
new_analysis = False
else:
new_analysis = True
[inventory_filename, ground_motion_filename, original_vulnerability_filename, retrofit_vulnerability_filename] = input_filenames
# if output folder does not yet exist, create it
if not os.path.exists(output_filename):
if new_analysis:
_ = h5py.File(output_filename, 'w')
# prepare a group folder for results
with h5py.File(output_filename, 'r+') as hf:
_ = hf.create_group('Results')
print('File created')
else:
print('Analysis file does not exist, specify inputs')
return
# if i_damage results do not yet exist, sample and save the damage
damage_name = 'CommunityDamage_' + str(i_damage)
with h5py.File(output_filename, 'r+') as hf:
if damage_name in hf['Results'].keys():
community_damage = hf['Results'][damage_name]['community_damage'][:]
print('Damage loaded')
elif new_analysis:
if options['n_realizations'] is None:
print('specify the number of realizations')
return
community_damage = sample_community_damage_with_retrofits(inventory_filename, ground_motion_filename,
original_vulnerability_filename, retrofit_vulnerability_filename,
output_filename, i_analysis, options)
print('Damage sampled')
else:
print('Damage scenario does not exist in analysis file')
return
# if i_impeding_factors do not yet exist, sample and save impeding factors
if_name = 'ImpedingFactors_' + str(i_impeding_factors)
with h5py.File(output_filename, 'r+') as hf:
if if_name in hf['Results'][damage_name]['DowntimeLogistics'].keys():
impeding_factors = hf['Results'][damage_name]['DowntimeLogistics'][if_name]['impeding_factors'][:]
print('Impeding factors loaded')
elif new_analysis:
bldgs = pd.read_hdf(output_filename, key='MetaData/buildings', mode='r+')
impeding_factors = sample_impeding_factors(bldgs, community_damage, output_filename, i_analysis, options)
print('Impeding factors sampled')
else:
print('Impeding factors scenario does not exist in analysis file')
return
# if i_cordons do not yet exist, sample and save cordons
cordon_name = 'Cordons_' + str(i_cordons)
with h5py.File(output_filename, 'r+') as hf:
if cordon_name in hf['Results'][damage_name]['DowntimeLogistics'][if_name]['CordonLogistics'].keys():
bldgs = pd.read_hdf(output_filename, key='MetaData/buildings', mode='r+')
community_downtime = \
hf['Results'][damage_name]['DowntimeLogistics'][if_name]['CordonLogistics'][cordon_name][
'community_downtime'][:]
print('Cordons loaded')
elif new_analysis:
bldgs = pd.read_hdf(output_filename, key='MetaData/buildings', mode='r+')
community_downtime = evaluate_cordons(bldgs, community_damage, impeding_factors,
output_filename, i_analysis, options)
print('Cordons evaluated')
else:
print('Cordon scenario does not exist in analysis file')
return
print()
return bldgs, community_damage, community_downtime
def plot_percentile_community_recovery(community_recovery, time, xlim, i_rup, i_occ):
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
q = [5, 33, 50, 67, 95]
q_idx = [[0, 4], [1, 3], [2]]
q_labels = [str(q[0]) + ' and ' + str(q[4]) + 'th %', \
str(q[1]) + ' and ' + str(q[3]) + 'th %', \
'Median']
linestyles = [':', '--', '-']
linewidths = [1, 1.5, 3]
occ_labels = ['Residential', 'Commercial Office', 'All Occupancies']
recovery_labels = ['functional_repair', 'impeding_factor_delay', 'functional_downtime', 'cordon_duration',
'cordon_induced_delay', 'total_delay', 'total_downtime']
repair_idx = recovery_labels.index('functional_repair')
no_cordon_downtime_idx = recovery_labels.index('functional_downtime')
downtime_idx = recovery_labels.index('total_downtime')
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
colors = [colors[2], colors[0], colors[1]]
label_idx = [repair_idx, no_cordon_downtime_idx, downtime_idx]
label_names = ['Repair Time Only', 'Impeding Factor Delays', 'Downtime due to Cordons']
for i in range(len(label_idx)):
color = colors[i]
rec_idx = label_idx[i]
label_name = label_names[i]
recovery_curves = np.transpose(
100 * np.percentile(community_recovery[:, i_rup, i_occ, rec_idx, :], q=q, axis=1))
for j in range(len(q_idx)):
idx = q_idx[j]
_ = plt.plot(time, recovery_curves[:, idx], color=color, label=label_name, linestyle=linestyles[j],
linewidth=linewidths[j])
legend_elements = [
Line2D([0], [0], label=label_names[0], color=colors[0], linewidth=linewidths[-1], linestyle=linestyles[-1]),
Line2D([0], [0], label=label_names[1], color=colors[1], linewidth=linewidths[-1], linestyle=linestyles[-1]),
Line2D([0], [0], label=label_names[2], color=colors[2], linewidth=linewidths[-1], linestyle=linestyles[-1]),
Line2D([0], [0], color='none'),
Line2D([0], [0], label=q_labels[-1], color='gray', linewidth=linewidths[-1], linestyle=linestyles[-1]),
Line2D([0], [0], label=q_labels[1], color='gray', linewidth=linewidths[1], linestyle=linestyles[1]),
Line2D([0], [0], label=q_labels[0], color='gray', linewidth=linewidths[0], linestyle=linestyles[0])]
_ = plt.legend(handles=legend_elements)
_ = plt.xlabel('Days after the earthquake')
_ = plt.ylabel(occ_labels[i_occ] + ',' + '\n' + '% of pre-event sqft', multialignment='center')
_ = plt.ylim([0, 100])
_ = plt.xlim(xlim)
_ = plt.grid(axis='both', linestyle='--', color='lightgray')
_ = plt.show()
def plot_mean_community_recovery(community_recovery, time, xlim, i_rup, i_occ, sqft_totals, title):
fig, ax = plt.subplots(figsize=((10, 5)))
occ_labels = ['Residential', 'Commercial Office', 'All Occupancies']
recovery_labels = ['functional_repair', 'impeding_factor_delay', 'functional_downtime', 'cordon_duration',
'cordon_induced_delay', 'total_delay', 'total_downtime']
repair_idx = recovery_labels.index('functional_repair')
no_cordon_downtime_idx = recovery_labels.index('functional_downtime')
downtime_idx = recovery_labels.index('total_downtime')
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
idx = repair_idx
repair_time = 100 * np.mean(community_recovery[:, i_rup, i_occ, idx, :], axis=1)
plt.fill_between(time, repair_time, 100, color='darkgray')
idx = no_cordon_downtime_idx
no_cordon_downtime = 100 * np.mean(community_recovery[:, i_rup, i_occ, idx, :], axis=1)
if not np.array_equal(no_cordon_downtime, repair_time):
plt.fill_between(time, no_cordon_downtime, repair_time, color=colors[0])
idx = downtime_idx
downtime = 100 * np.mean(community_recovery[:, i_rup, i_occ, idx, :], axis=1)
if not np.array_equal(downtime, no_cordon_downtime):
plt.fill_between(time, downtime, no_cordon_downtime, color=colors[1])
add_downtime_contributions_bar(community_recovery, time, i_rup, i_occ, sqft_totals, ax, xlim)
legend_elements = [Patch(facecolor=colors[0], label='Impeding Factors'),
Patch(facecolor=colors[1], label='Cordon Delays'),
Patch(facecolor='darkgray', label='Repair Time')
]
_ = plt.legend(handles=legend_elements, title=' Average Contribution of: ', loc=(0.59, 0.02))
_ = ax.set_xlabel('Days after the earthquake')
# _ = ax.set_ylabel(occ_labels[i_occ] + ',' + '\n' + '% of pre-event sqft' + '\n' + '(out of ' + '{:.1e}'.format(sqft_totals[i_occ]) + 'sqft)', multialignment='center')
_ = ax.set_ylabel(occ_labels[i_occ] + ',' + '\n' + '% of pre-event sqft', multialignment='center')
_ = ax.set_ylim([0, 100])
_ = ax.set_xlim(xlim)
_ = plt.title(title)
_ = plt.grid(axis='both', linestyle='--', color='lightgray')
_ = plt.show()
def add_downtime_contributions_bar(community_recovery, time, i_rup, i_occ, sqft_totals, ax, xlim):
time_frame = 360
# metric = 'sqft-days'
metric = 'community days'
recovery_labels = ['functional_repair', 'impeding_factor_delay', 'functional_downtime', 'cordon_duration',
'cordon_induced_delay', 'total_delay', 'total_downtime']
repair_idx = recovery_labels.index('functional_repair')
no_cordon_downtime_idx = recovery_labels.index('functional_downtime')
downtime_idx = recovery_labels.index('total_downtime')
[n_time, n_rups, n_blgs, n_parameters, n_sims] = community_recovery.shape
if time_frame is not None:
n_time = np.where(time == time_frame)[0][0] + 1
time = time[:n_time]
community_recovery = community_recovery[:n_time, :, :, :]
baseline_time = np.trapz(np.ones(n_time), time)
idx = repair_idx
repair_time = np.mean(community_recovery[:, i_rup, i_occ, idx, :], axis=1)
repair_time = baseline_time - np.trapz(repair_time, time)
idx = no_cordon_downtime_idx
impeding_factor_time = np.mean(community_recovery[:, i_rup, i_occ, idx, :], axis=1)
impeding_factor_time = baseline_time - np.trapz(impeding_factor_time, time) - repair_time
idx = downtime_idx
cordon_time = np.mean(community_recovery[:, i_rup, i_occ, idx, :], axis=1)
cordon_time = baseline_time - np.trapz(cordon_time, time) - repair_time - impeding_factor_time
idx = downtime_idx
downtime = np.mean(community_recovery[:, i_rup, i_occ, idx, :], axis=1)
downtime = (baseline_time - np.trapz(downtime, time))
delays = [impeding_factor_time, cordon_time, repair_time]
proportional_delay = []
for idx in range(len(delays)):
proportional_delay.append(delays[idx] / downtime)
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
ylim = [0, 100]
y_start = 0.382 * ylim[1]
y_height = 0.1 * ylim[1]
y_mid = (2 * y_start + y_height) / 2
x_start = 0.59 * xlim[1]
x_width = 0.99 * xlim[1] - x_start
patches = []
for idx in range(len(delays)):
width = proportional_delay[idx] * x_width
rect = Rectangle((x_start, y_start), width, y_height)
patches.append(rect)
x_mid = (2 * x_start + width) / 2
x_start = x_start + width
if width > 0:
if False:
ax.text(x_mid, y_mid, ('{:.0f}'.format(100 * proportional_delay[idx]) + '%'), ha='center', va='center',
color='white', fontsize='medium', fontweight='bold', zorder=20)
else:
ax.text(x_mid, y_mid, ('{:.0f}'.format(delays[idx])), ha='center', va='center',
color='white', fontsize='medium', fontweight='bold', zorder=20)
# total downtime patch
x_start = 0.59 * xlim[1]
x_width = 0.99 * xlim[1] - x_start
width = x_width
y_start = y_start + y_height + 1.5
y_mid = (2 * y_start + y_height) / 2
rect = Rectangle((x_start, y_start), width, y_height)
patches.append(rect)
x_mid = (2 * x_start + width) / 2
if metric == 'sqft-days':
downtime = downtime * sqft_totals[i_occ]
ax.text(x_mid, y_mid, ('{:.2e}'.format(downtime) + ' sqft-days'), ha='center', va='center', color='white',
fontsize='medium', fontweight='bold', zorder=20)
else:
ax.text(x_mid, y_mid, ('{:.0f}'.format(downtime) + ' community days'), ha='center', va='center', color='white',
fontsize='medium', fontweight='bold', zorder=20)
pc = PatchCollection(patches, facecolor=[colors[0], colors[1], 'darkgray', 'tab:gray'], zorder=10)
_ = ax.add_collection(pc)
# downtime deaggregation label
y_start = y_start + y_height
y_mid = (2 * y_start + y_height) / 2
x_mid = (2 * x_start + width) / 2
if time_frame is None:
ax.text(x_mid, y_mid, ('Total loss:'), ha='center', va='center', color='black', fontsize='large', zorder=20)
else:
ax.text(x_mid, y_mid, ('Loss in first ' + '{:.0f}'.format(time[-1]) + ' days:'), ha='center', va='center',
color='black', fontsize='large', zorder=20)
# def grid_plot_mean_community_recovery(community_recovery, time, xlim, i_rup, i_occ, sqft_totals, ax):
# occ_labels = ['Residential', 'Commercial Office', 'All Occupancies']
# recovery_labels = ['functional_repair', 'impeding_factor_delay', 'functional_downtime', 'cordon_duration',
# 'cordon_induced_delay', 'total_delay', 'total_downtime']
# repair_idx = recovery_labels.index('functional_repair')
# no_cordon_downtime_idx = recovery_labels.index('functional_downtime')
# downtime_idx = recovery_labels.index('total_downtime')
#
# prop_cycle = plt.rcParams['axes.prop_cycle']
# colors = prop_cycle.by_key()['color']
#
# idx = repair_idx
# repair_time = 100 * np.mean(community_recovery[:, i_rup, i_occ, idx, :], axis=1)
# ax.fill_between(time, repair_time, 100, color='darkgray')
#
# idx = no_cordon_downtime_idx
# no_cordon_downtime = 100 * np.mean(community_recovery[:, i_rup, i_occ, idx, :], axis=1)
# if not np.array_equal(no_cordon_downtime, repair_time):
# ax.fill_between(time, no_cordon_downtime, repair_time, color=colors[0])
#
# idx = downtime_idx
# downtime = 100 * np.mean(community_recovery[:, i_rup, i_occ, idx, :], axis=1)
# if not np.array_equal(downtime, no_cordon_downtime):
# ax.fill_between(time, downtime, no_cordon_downtime, color=colors[1])
#
# add_downtime_contributions_bar(community_recovery, time, i_rup, i_occ, sqft_totals, ax, xlim)
#
# legend_elements = [Patch(facecolor=colors[0], label='Impeding Factors'),
# Patch(facecolor=colors[1], label='Cordon Delays'),
# Patch(facecolor='darkgray', label='Repair Time')
# ]
# _ = ax.legend(handles=legend_elements, title=' Average Contribution of: ', loc=(0.59, 0.02))
#
# _ = ax.grid(axis='both', color='lightgray', alpha=0.5)
def grid_plot_mean_community_recovery(community_recovery, time, xlim, i_rup, i_occ, sqft_totals, ax, legend):
color_values = [0.15, 0.35, 0.65, 0.9]
color_palettes = ['Greens', 'Greys', 'Oranges', 'Blues']
colors = [mpl.cm.get_cmap(color_palettes[i])(color_values[i])[:-1] for i in range(len(color_values))]
if False:
colors = [grayscale_version(colors[i]) | |
#!/usr/bin/env python3
#
# pip install py-algorand-sdk
import argparse
import base64
import glob
import json
import logging
import msgpack
import os
import re
import signal
import sys
import tarfile
import time
import algosdk
from util import maybedecode, mloads, unmsgpack
logger = logging.getLogger(__name__)
# algod = token_addr_from_algod(os.path.join(os.getenv('HOME'),'Algorand/n3/Node1'))
# algod = token_addr_from_algod(os.path.join(os.getenv('HOME'),'mainnet'))
# print(json.dumps(algod.status(), indent=2))
# b=algod.block_info(algod.status()['lastRound'])
# print(json.dumps(b, indent=2))
def token_addr_from_algod(algorand_data):
addr = open(os.path.join(algorand_data, 'algod.net'), 'rt').read().strip()
if not addr.startswith('http'):
addr = 'http://' + addr
token = open(os.path.join(algorand_data, 'algod.token'), 'rt').read().strip()
return token, addr
# b = nextblock(algod, b['round'])
def nextblock(algod, lastround=None):
if lastround is None:
lastround = algod.status()['lastRound']
logger.debug('nextblock status lastRound %s', lastround)
else:
try:
b = algod.block_info(lastround + 1)
return b
except:
pass
status = algod.status_after_block(lastround)
nbr = status['lastRound']
b = algod.block_info(nbr)
return b
def make_ob_json_polite(ob):
if isinstance(ob, dict):
return {k:make_ob_json_polite(v) for k,v in ob.items()}
if isinstance(ob, list):
return [make_ob_json_polite(x) for x in ob]
if isinstance(ob, bytes):
return base64.b64encode(ob).decode()
return ob
class Algobot:
def __init__(self, algorand_data=None, token=None, addr=None, headers=None, block_handlers=None, txn_handlers=None, progress_log_path=None, raw_api=None):
self.algorand_data = algorand_data
self.token = token
self.addr = addr
self.headers = headers
self._algod = None
self.block_handlers = block_handlers or list()
self.txn_handlers = txn_handlers or list()
self.progress_log_path = progress_log_path
self._progresslog = None
self._progresslog_write_count = 0
self.go = True
self.raw_api = raw_api
self.algod_has_block_raw = None
self.blockfiles = None
return
def algod(self):
if self._algod is None:
if self.algorand_data:
token, addr = token_addr_from_algod(self.algorand_data)
else:
token = self.token
addr = self.addr
self._algod = algosdk.algod.AlgodClient(token, addr, headers=self.headers)
return self._algod
def rawblock(self, xround):
"if possible fetches and returns raw block msgpack including block and cert; otherwise None"
algod = self.algod()
if self.algod_has_block_raw or (self.algod_has_block_raw is None):
response = algod.algod_request("GET", "/block/" + str(xround), params={'raw':1}, raw_response=True)
contentType = response.getheader('Content-Type')
if contentType == 'application/json':
logger.debug('got json response, disabling rawblock')
self.algod_has_block_raw = False
return None
if contentType == 'application/x-algorand-block-v1':
self.algod_has_block_raw = True
raw = response.read()
block = unmsgpack(mloads(raw))
return block
raise Exception('unknown response content type {!r}'.format(contentType))
logger.debug('rawblock passing out')
return None
def eitherblock(self, xround):
"return raw block or json info block"
if self.algod_has_block_raw or (self.raw_api != False):
return self.rawblock(xround)
if (self.raw_api != False) and (self.algod_has_block_raw is None):
xb = self.rawblock(xround)
if self.algod_has_block_raw:
return xb
return self.algod().block_info(xround)
def nextblock_from_files(self):
if not self.blockfiles:
logger.debug('empty blockfiles')
self.go = False
return {'block':{'rnd':None}}
#raise Exception("end of blockfiles")
bf = self.blockfiles[0]
logger.debug('block from file %s', bf)
self.blockfiles = self.blockfiles[1:]
with open(bf, 'rb') as fin:
raw = fin.read()
try:
return unmsgpack(mloads(raw))
except Exception as e:
logger.debug('%s: failed to msgpack decode, %s', bf, e)
return json.loads(raw.decode())
def nextblock(self, lastround=None, retries=3):
"from block_info json api simplified block"
trycount = 0
while (trycount < retries) and self.go:
trycount += 1
try:
return self._nextblock_inner(lastround)
except Exception as e:
if trycount >= retries:
logger.error('too many errors in nextblock retries')
raise
else:
logger.warn('error in nextblock(%r) (retrying): %s', lastround, e)
return None
def _nextblock_inner(self, lastround):
if self.blockfiles is not None:
return self.nextblock_from_files()
algod = self.algod()
# TODO: algod block raw
if lastround is None:
lastround = algod.status()['lastRound']
logger.debug('nextblock status lastRound %s', lastround)
else:
try:
return self.eitherblock(lastround + 1)
except:
pass
status = algod.status_after_block(lastround)
nbr = status['lastRound']
while (nbr > lastround + 1) and self.go:
# try lastround+1 one last time
try:
return self.eitherblock(lastround + 1)
except:
break
b = self.eitherblock(nbr)
return b
def loop(self):
lastround = self.recover_progress()
try:
self._loop_inner(lastround)
finally:
self.close()
def _loop_inner(self, lastround):
while self.go:
b = self.nextblock(lastround)
if b is None:
print("got None nextblock. exiting")
return
nowround = blockround(b)
if (lastround is not None) and (nowround != lastround + 1):
logger.info('round jump %d to %d', lastround, nowround)
for bh in self.block_handlers:
bh(self, b)
bb = b.get('block')
if bb:
# raw block case
transactions = bb.get('txns', [])
else:
# json block_info case
txns = b.get('txns', {})
transactions = txns.get('transactions', [])
for txn in transactions:
for th in self.txn_handlers:
th(self, b, txn)
self.record_block_progress(nowround)
lastround = nowround
def record_block_progress(self, round_number):
if self._progresslog_write_count > 100000:
if self._progresslog is not None:
self._progresslog.close()
self._progresslog = None
nextpath = self.progress_log_path + '_next_' + time.strftime('%Y%m%d_%H%M%S', time.gmtime())
nextlog = open(nextpath, 'xt')
nextlog.write('{}\n'.format(round_number))
nextlog.flush()
nextlog.close() # could probably leave this open and keep writing to it
os.replace(nextpath, self.progress_log_path)
self._progresslog_write_count = 0
# new log at standard location will be opened next time
return
if self._progresslog is None:
if self.progress_log_path is None:
return
self._progresslog = open(self.progress_log_path, 'at')
self._progresslog_write_count = 0
self._progresslog.write('{}\n'.format(round_number))
self._progresslog.flush()
self._progresslog_write_count += 1
def recover_progress(self):
if self.progress_log_path is None:
return None
try:
with open(self.progress_log_path, 'rt') as fin:
fin.seek(0, 2)
endpos = fin.tell()
fin.seek(max(0, endpos - 100))
raw = fin.read()
lines = raw.splitlines()
return int(lines[-1])
except Exception as e:
logger.info('could not recover progress: %s', e)
return None
def close(self):
if self._progresslog is not None:
self._progresslog.close()
self._progresslog = None
blocktarParseRe = re.compile(r'(\d+)_(\d+).tar.bz2')
class BlockArchiver:
def __init__(self, algorand_data=None, token=None, addr=None, headers=None, blockdir=None, tardir=None):
self.algorand_data = algorand_data
self.token = token
self.addr = addr
self.headers = headers
self.blockdir = blockdir
self.tardir = tardir
self.storedBlocks = set()
self.lastBlockOkTime = time.time() # pretend things are okay when we start
self.go = True
self._algod = None
return
def algod(self):
if self._algod is None:
if self.algorand_data:
token, addr = token_addr_from_algod(self.algorand_data)
else:
token = self.token
addr = self.addr
self._algod = algosdk.algod.AlgodClient(token, addr, headers=self.headers)
return self._algod
def lastroundFromBlockdir(self):
maxround = None
for fname in os.listdir(self.blockdir):
try:
fround = int(fname)
self.storedBlocks.add(fround)
if maxround is None or fround > maxround:
maxround = fround
except:
logger.warning('junk in blockdir: %r', os.path.join(self.blockdir, fname))
return maxround
def lastroundFromTardir(self):
maxround = None
for fname in os.listdir(self.tardir):
try:
m = blocktarParseRe.match(fname)
if m:
endblock = int(m.group(2))
if maxround is None or endblock > maxround:
maxround = endblock
except:
logger.warning('junk in tardir: %r', os.path.join(self.tardir, fname))
return maxround
def rawblock(self, xround):
algod = self.algod()
logger.debug('get %d', xround)
try:
response = algod.algod_request("GET", "/block/" + str(xround), params={'raw':1}, raw_response=True)
contentType = response.getheader('Content-Type')
if contentType == 'application/x-algorand-block-v1':
raw = response.read()
return raw
return None
except:
self._algod = None
raise
def fetchAndStoreBlock(self, xround):
raw = self.rawblock(xround)
if raw is None:
raise Exception('could not get block {}'.format(xround))
# trivial check
bl = mloads(raw)
if xround == 0:
blockround = bl[b'block'].get(b'rnd', 0)
else:
blockround = bl[b'block'][b'rnd']
if blockround != xround:
raise Exception('fetching round {} retrieved block for round {}'.format(xround, bl[b'block'][b'rnd']))
blockpath = os.path.join(self.blockdir, str(xround))
with open(blockpath, 'wb') as fout:
fout.write(raw)
if xround % 100 == 0:
logger.info('got block %s', blockpath)
else:
logger.debug('got block %s', blockpath)
self.storedBlocks.add(xround)
self.lastBlockOkTime = time.time()
def maybeTarBlocks(self):
minround = min(self.storedBlocks)
maxround = max(self.storedBlocks)
xm = minround - (minround % 1000)
if xm < minround:
xm += 1000
if xm+1000 > maxround:
# not enough blocks
return
for r in range(xm, xm+1000):
if r not in self.storedBlocks:
self.fetchAndStoreBlock(r)
# okay, we have them all
if minround < xm:
# forget incomplete block set?
for x in list(self.storedBlocks):
if x < xm:
self.storedBlocks.discard(x)
logger.warning('stale block in blockdir: %r', os.path.join(self.blockdir, str(x)))
tarname = '{}_{}.tar.bz2'.format(xm, xm+1000-1)
outpath = os.path.join(self.tardir, tarname)
tf = tarfile.open(outpath, 'w:bz2')
for r in range(xm, xm+1000):
bs = str(r)
tf.add(os.path.join(self.blockdir, bs), arcname=bs)
tf.close()
logger.info('%s', tarname)
# tar made, cleanup block files
for r in range(xm, xm+1000):
bs = str(r)
os.remove(os.path.join(self.blockdir, bs))
self.storedBlocks.discard(r)
# TODO: upload tar to s3
return
def _fetchloop(self, lastround):
some = False
while self.go:
try:
self.fetchAndStoreBlock(lastround + 1)
self.maybeTarBlocks()
some = True
except Exception as e:
if not some:
logger.warning('err in fetch (%d), %s', lastround + 1, e)
break
lastround += 1
return lastround
def run(self):
lastround = self.lastroundFromBlockdir()
if lastround is not None:
logger.debug('lastround from blockdir %d', lastround)
if lastround is None:
lastround = self.lastroundFromTardir()
if lastround is not None:
logger.debug('lastround from tardir %d', lastround)
algod = self.algod()
if lastround is None:
lastround = 0
self.fetchAndStoreBlock(lastround)
lastround = self._fetchloop(lastround)
lastlog = None
while self.go:
try:
algod = self.algod()
status = algod.status_after_block(lastround)
logger.debug('status %r', status)
lastround = self._fetchloop(lastround)
except Exception as e:
logger.warning('err in run, %s', e)
# reset the connection
self._algod = None
now = time.time()
dt = now - self.lastBlockOkTime
if dt > 30:
if (lastlog is None) or ((now - lastlog) > 30):
logger.warning('no block for %.1fs',dt)
lastlog = now
time.sleep(1)
def blockround(b):
bb = b.get('block')
if bb:
# raw mode
return bb.get('rnd')
else:
# block_info json mode
return b.get('round')
# block_printer is an example block handler; it takes two | |
import logging
from common_utils_py.agreements.service_agreement import ServiceAgreement
from common_utils_py.metadata.metadata import Metadata
from nevermined_gateway.compute_validations import is_allowed_read_compute
import time
from authlib.common.encoding import to_bytes
from authlib.jose import jwt
from authlib.jose.errors import BadSignatureError, InvalidClaimError
from authlib.oauth2.rfc6749.errors import InvalidClientError
from authlib.oauth2.rfc6749.models import ClientMixin
from authlib.oauth2.rfc6749.resource_protector import TokenValidator
from authlib.oauth2.rfc6750 import InvalidTokenError
from common_utils_py.agreements.service_types import ServiceTypes
from common_utils_py.did import NEVERMINED_PREFIX, id_to_did
from common_utils_py.did_resolver.did_resolver import DIDResolver
from common_utils_py.oauth2.token import NeverminedJWTBearerGrant as _NeverminedJWTBearerGrant
from common_utils_py.oauth2.jwk_utils import account_to_jwk
from nevermined_gateway.conditions import (fulfill_access_condition, fulfill_access_proof_condition, fulfill_compute_condition,
fulfill_escrow_payment_condition, fulfill_nft_holder_and_access_condition, is_nft721_holder,
is_nft_holder)
from nevermined_gateway.constants import (BaseURLs, ConditionState,
ConfigSections)
from nevermined_gateway.identity.jwk_utils import jwk_to_eth_address, recover_public_keys_from_assertion, \
recover_public_keys_from_eth_assertion
from nevermined_gateway.util import (get_config, get_provider_account, get_provider_babyjub_key, is_access_granted, is_owner_granted,
keeper_instance, was_compute_triggered, is_nft_access_condition_fulfilled,
get_asset_url_at_index)
from web3 import Web3
from nevermined_gateway.snark_util import call_prover
from common_utils_py.utils import keytransfer
logger = logging.getLogger(__name__)
class NeverminedOauthClient(ClientMixin):
def __init__(self, claims):
self.address = claims["iss"]
self.resource = claims["aud"]
self.service_agreement_id = claims.get("sub")
self.did = claims.get("did")
self.execution_id = claims.get("execution_id")
def check_grant_type(self, grant_type):
return grant_type == NeverminedJWTBearerGrant.GRANT_TYPE
class NeverminedJWTBearerGrant(_NeverminedJWTBearerGrant):
def __init__(self, request, server):
super().__init__(request, server)
self.provider_account = get_provider_account()
self.provider_key = get_provider_babyjub_key()
self.config = get_config()
def authenticate_user(self, client, claims):
return None
def resolve_public_key(self, headers, payload):
assertion = to_bytes(self.request.data["assertion"])
# with ecdsa this will produce two public keys that can possibly verify the signature.
# we will keep both so that later we can authenticate the client.
# and we can return any of them
possible_public_keys = recover_public_keys_from_assertion(assertion)
# if signing with ethereum this recovery becomes the de-facto signature verification
# since we check if any of these keys match the issuer of the token.
#
# signing with ethereum differs from ES256K
# - it adds a prefix to the message to sign
# - it uses keccak_256 hash function instead of sha256
#
# we then return a public key that verifies the message so that
# authlib doesn't complain with a bad signature
eths = payload.get("eths")
if eths == "personal":
possible_eths_keys = recover_public_keys_from_eth_assertion(assertion)
self.possible_public_keys = possible_eths_keys
else:
self.possible_public_keys = possible_public_keys
return possible_public_keys[0]
def check_ddo(self, did, agreement_id, asset_id, consumer_address, keeper, cond_ids, service_type):
ddo = DIDResolver(keeper.did_registry).resolve(did)
aservice = ddo.get_service(service_type)
token_address = aservice.get_param_value_by_name('_tokenAddress')
if token_address is None or len(token_address) == 0:
token_address = keeper.token.address
(id1, id2, id3) = aservice.generate_agreement_condition_ids(agreement_id, asset_id, consumer_address, keeper, token_address)
ids = [id1, id2, id3]
if ids != cond_ids:
raise InvalidClientError(f"ServiceAgreement {agreement_id} doesn't match ddo")
def authenticate_client(self, claims):
logger.info('Auth client')
possible_eth_addresses = [jwk_to_eth_address(jwk) for jwk in self.possible_public_keys]
try:
received_address = Web3.toChecksumAddress(claims["iss"])
except ValueError:
raise InvalidClientError(f"iss: {claims['iss']} needs to be a valid ethereum address")
if not received_address in possible_eth_addresses:
raise InvalidClientError(
f"iss: {claims['iss']} does not match with the public key used to sign the JwTBearerGrant")
if claims["aud"] == BaseURLs.ASSETS_URL + "/access":
# check if client has access
self.validate_access(claims["sub"], claims["did"], claims["iss"])
elif claims["aud"] == BaseURLs.ASSETS_URL + "/nft-access":
self.validate_nft_access(claims["sub"], claims["did"], claims["iss"])
elif claims["aud"] == BaseURLs.ASSETS_URL + "/access-proof":
self.validate_access_proof(claims["sub"], claims["did"], claims["iss"], claims["buyer"], claims["babysig"])
elif claims["aud"] == BaseURLs.ASSETS_URL + "/download":
self.validate_owner(claims["did"], claims["iss"])
elif claims["aud"] == BaseURLs.ASSETS_URL + "/compute":
self.validate_compute(claims["sub"], claims["execution_id"], claims["iss"])
elif claims["aud"] == BaseURLs.ASSETS_URL + "/execute":
self.validate_execute(claims["sub"], claims["did"], claims["iss"])
return NeverminedOauthClient(claims)
def validate_access(self, agreement_id, did, consumer_address):
keeper = keeper_instance()
if not is_access_granted(
agreement_id,
did,
consumer_address,
keeper):
# 3. If not granted, verification of agreement and conditions
agreement = keeper.agreement_manager.get_agreement(agreement_id)
cond_ids = agreement.condition_ids
asset = DIDResolver(keeper.did_registry).resolve(did)
asset_id = f'0x{did.replace(NEVERMINED_PREFIX, "")}'
self.check_ddo(did, agreement_id, asset_id, consumer_address, keeper, cond_ids, ServiceTypes.ASSET_ACCESS)
access_condition_status = keeper.condition_manager.get_condition_state(cond_ids[0])
lock_condition_status = keeper.condition_manager.get_condition_state(cond_ids[1])
escrow_condition_status = keeper.condition_manager.get_condition_state(
cond_ids[2])
logger.debug('AccessCondition: %d' % access_condition_status)
logger.debug('LockPaymentCondition: %d' % lock_condition_status)
logger.debug('EscrowPaymentCondition: %d' % escrow_condition_status)
if lock_condition_status != ConditionState.Fulfilled.value:
logger.debug('ServiceAgreement %s was not paid. Forbidden' % agreement_id)
raise InvalidClientError(
f"ServiceAgreement {agreement_id} was not paid, LockPaymentCondition status is {lock_condition_status}")
fulfilled = fulfill_access_condition(keeper, agreement_id, cond_ids, asset_id, consumer_address,
self.provider_account)
if not fulfilled:
raise InvalidClientError('Server error fulfilling access condition')
fulfill_escrow_payment_condition(keeper, agreement_id, cond_ids, asset,
self.provider_account)
iteration = 0
access_granted = False
while iteration < ConfigSections.PING_ITERATIONS:
iteration = iteration + 1
logger.debug('Checking if access was granted. Iteration %d' % iteration)
if not is_access_granted(agreement_id, did, consumer_address, keeper):
time.sleep(ConfigSections.PING_SLEEP / 1000)
else:
access_granted = True
break
if not access_granted:
msg = ('Checking access permissions failed. Either consumer address does not have '
'permission to consume this asset or consumer address and/or service '
'agreement '
'id is invalid.')
logger.warning(msg)
raise InvalidClientError(msg)
def validate_access_proof(self, agreement_id, did, eth_address, consumer_address, jubjub_sig):
consumer_pub = ['0x'+consumer_address[0:64], '0x'+consumer_address[64:128]]
provider_pub = [self.provider_key.x, self.provider_key.y]
if not keytransfer.verify([int(consumer_pub[0],16), int(consumer_pub[1],16)], int(eth_address,16), jubjub_sig):
raise InvalidClientError(
f"ServiceAgreement {agreement_id}: babyjubjub signature doesn't match")
keeper = keeper_instance()
agreement = keeper.agreement_manager.get_agreement(agreement_id)
cond_ids = agreement.condition_ids
asset = DIDResolver(keeper.did_registry).resolve(did)
asset_id = did.replace(NEVERMINED_PREFIX, "")
self.check_ddo(did, agreement_id, asset_id, consumer_pub, keeper, cond_ids, ServiceTypes.ASSET_ACCESS_PROOF)
access_condition_status = keeper.condition_manager.get_condition_state(cond_ids[0])
lock_condition_status = keeper.condition_manager.get_condition_state(cond_ids[1])
escrow_condition_status = keeper.condition_manager.get_condition_state(
cond_ids[2])
logger.info('AccessProofCondition: %d' % access_condition_status)
logger.info('LockPaymentCondition: %d' % lock_condition_status)
logger.info('EscrowPaymentCondition: %d' % escrow_condition_status)
if lock_condition_status != ConditionState.Fulfilled.value:
logger.debug('ServiceAgreement %s was not paid. Forbidden' % agreement_id)
raise InvalidClientError(
f"ServiceAgreement {agreement_id} was not paid, LockPaymentCondition status is {lock_condition_status}")
if escrow_condition_status != ConditionState.Fulfilled.value:
# compute the proof
auth_method = asset.authorization.main['service']
url = '0x' + get_asset_url_at_index(0, asset, self.provider_account, auth_method)
res = call_prover(consumer_pub, self.provider_key.secret, url)
# check that the condition ID is correct
cond_id = keeper.access_proof_condition.generate_id(
agreement_id,
['bytes32', 'bytes32', 'bytes32', 'bytes32', 'bytes32'],
[res['hash'], consumer_pub[0], consumer_pub[1], provider_pub[0], provider_pub[1]]
)
if cond_ids[0] != cond_id.hex():
raise InvalidClientError(
f"ServiceAgreement {agreement_id}: public key doesn't match {consumer_address}")
fulfill_access_proof_condition(keeper, agreement_id, cond_ids, res['hash'], consumer_pub, provider_pub, res['cipher'], res['proof'],
self.provider_account)
fulfill_escrow_payment_condition(keeper, agreement_id, cond_ids, asset,
self.provider_account, ServiceTypes.ASSET_ACCESS_PROOF)
def validate_nft_access(self, agreement_id, did, consumer_address):
keeper = keeper_instance()
asset = DIDResolver(keeper.did_registry).resolve(did)
# check which nft access service type is on the ddo
service_type = ServiceTypes.NFT_ACCESS
if asset.get_service(ServiceTypes.NFT721_ACCESS) is not None:
service_type = ServiceTypes.NFT721_ACCESS
sa = ServiceAgreement.from_ddo(service_type, asset)
return self._validate_nft_access(agreement_id, did, consumer_address, sa, service_type)
def _validate_nft_access(self, agreement_id, did, consumer_address, service_agreement, service_type):
keeper = keeper_instance()
asset = DIDResolver(keeper.did_registry).resolve(did)
asset_id = asset.asset_id
sa_name = service_agreement.main['name']
erc721_address = service_agreement.get_param_value_by_name('_contractAddress')
access_granted = False
if agreement_id is None or agreement_id == '0x':
if sa_name == 'nftAccessAgreement':
access_granted = is_nft_holder(keeper, asset_id, service_agreement.get_number_nfts(), consumer_address)
elif sa_name == 'nft721AccessAgreement':
access_granted = is_nft721_holder(keeper, asset_id, consumer_address, erc721_address)
else:
agreement = keeper.agreement_manager.get_agreement(agreement_id)
cond_ids = agreement.condition_ids
access_cond_id = cond_ids[1]
ddo = DIDResolver(keeper.did_registry).resolve(did)
nft_access_service_agreement = ServiceAgreement.from_ddo(service_type, ddo)
(nft_access_cond_id, nft_holder_cond_id) = nft_access_service_agreement.generate_agreement_condition_ids(agreement_id, asset_id, consumer_address, keeper)
if [nft_holder_cond_id, nft_access_cond_id] != cond_ids:
raise InvalidClientError(f"ServiceAgreement {agreement_id} doesn't match ddo")
if not is_nft_access_condition_fulfilled(
agreement_id,
access_cond_id,
consumer_address,
keeper):
# If not granted, verification of agreement and conditions and fulfill
# access_granted = is_nft_holder(keeper, asset_id, sa.get_number_nfts(), consumer_address)
access_granted = fulfill_nft_holder_and_access_condition(
keeper,
agreement_id,
cond_ids,
asset_id,
service_agreement.get_number_nfts(),
consumer_address,
self.provider_account
)
if not access_granted:
msg = ('Checking access permissions failed. Either consumer address does not have '
'permission to consume this NFT or consumer address and/or service '
'agreement '
'id is invalid.')
logger.warning(msg)
raise InvalidClientError(msg)
def validate_owner(self, did, consumer_address):
keeper = keeper_instance()
if not is_owner_granted(
did,
consumer_address,
keeper):
msg = ('Checking access permissions failed. Consumer address does not have '
'permission to download this asset or consumer address and/or did '
'is invalid.')
logger.warning(msg)
raise InvalidClientError(msg)
def validate_execute(self, agreement_id, workflow_did, consumer_address):
keeper = keeper_instance()
asset_id = keeper.agreement_manager.get_agreement(agreement_id).did
did = id_to_did(asset_id)
asset = DIDResolver(keeper.did_registry).resolve(did)
if not was_compute_triggered(agreement_id, did, consumer_address, keeper):
agreement = keeper.agreement_manager.get_agreement(agreement_id)
cond_ids = agreement.condition_ids
self.check_ddo(did, agreement_id, asset_id, consumer_address, keeper, cond_ids, ServiceTypes.CLOUD_COMPUTE)
compute_condition_status = keeper.condition_manager.get_condition_state(cond_ids[0])
lock_condition_status = keeper.condition_manager.get_condition_state(cond_ids[1])
escrow_condition_status = keeper.condition_manager.get_condition_state(
cond_ids[2])
logger.debug('ComputeExecutionCondition: %d' % compute_condition_status)
logger.debug('LockPaymentCondition: %d' % lock_condition_status)
logger.debug('EscrowPaymentCondition: %d' % escrow_condition_status)
if lock_condition_status != ConditionState.Fulfilled.value:
logger.debug('ServiceAgreement %s was not paid. Forbidden' % agreement_id)
raise InvalidClaimError(
f"ServiceAgreement {agreement_id} was not paid, LockPaymentCondition status is {lock_condition_status}")
fulfill_compute_condition(keeper, agreement_id, cond_ids, asset_id, consumer_address,
self.provider_account)
fulfill_escrow_payment_condition(keeper, agreement_id, cond_ids, asset,
self.provider_account,
ServiceTypes.CLOUD_COMPUTE)
iteration = 0
access_granted = False
while iteration < ConfigSections.PING_ITERATIONS:
iteration = iteration + 1
logger.debug('Checking if compute was granted. Iteration %d' % iteration)
if not was_compute_triggered(agreement_id, did, consumer_address, keeper):
time.sleep(ConfigSections.PING_SLEEP / 1000)
else:
access_granted = True
break
if not access_granted:
msg = (
'Scheduling the compute execution failed. Either consumer address does not '
'have permission to execute this workflow or consumer address and/or service '
'agreement id is invalid.')
logger.warning(msg)
raise InvalidClientError(msg)
def validate_compute(self, agreement_id, execution_id, consumer_address):
message, is_allowed = is_allowed_read_compute(agreement_id, execution_id, consumer_address,
None, has_bearer_token=True)
if not is_allowed:
raise InvalidClientError(message)
class NeverminedJWTTokenValidator(TokenValidator):
def __init__(self, realm=None, **extra_attributes):
super().__init__(realm, **extra_attributes)
self.provider_account = get_provider_account()
self.provider_jwk = account_to_jwk(self.provider_account)
def authenticate_token(self, token_string):
claims_options = {
"iss": {
"essential": True,
"value": self.provider_account.address
}
}
try:
claims = jwt.decode(token_string, self.provider_jwk, claims_options=claims_options)
except BadSignatureError as e:
raise InvalidTokenError(description=e.description)
return claims
def validate_token(self, token, scopes):
| |
val in plot_z[model_number]:
idx = (np.abs(z_array_full_allmodels[model_number] - val)).argmin()
plot_snaps.append(idx)
# Then plot only those snapshots.
for count, snap in enumerate(plot_snaps):
label = model_tags[model_number]
ax[count].plot(mstar_bins[:-1] + mstar_bin_width*0.5,
SMF[model_number][snap],
color = ps.colors[model_number],
dashes=ps.dashes[model_number],
label = label)
print("SMF: Snap {0}\t{1}".format(snap, np.sum(SMF[model_number][snap])))
if model_number == 0:
tick_locs = np.arange(6.0, 12.0)
ax[count].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = ps.global_fontsize)
ax[count].set_xlim([6.8, 10.3])
ax[count].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
fontsize = ps.global_labelsize)
ax[count].xaxis.set_minor_locator(plt.MultipleLocator(0.25))
ax[count] = ps.adjust_axis(ax[count], ps.global_axiswidth,
ps.global_tickwidth,
ps.global_major_ticklength,
ps.global_minor_ticklength)
# Since y-axis is shared, only need to do this once.
ax[0].set_yscale('log', nonposy='clip')
ax[0].set_yticklabels([r"$\mathbf{10^{-5}}$",r"$\mathbf{10^{-5}}$",r"$\mathbf{10^{-4}}$", r"$\mathbf{10^{-3}}$",
r"$\mathbf{10^{-2}}$",r"$\mathbf{10^{-1}}$"])
ax[0].set_ylim([1e-5, 3e-1])
#ax[0].set_ylabel(r'\mathbf{$\log_{10} \Phi\ [\mathrm{Mpc}^{-3}\: \mathrm{dex}^{-1}]}$',
ax[0].set_ylabel(r'$\mathbf{\Phi\ [Mpc^{-3}\: dex^{-1}]}$',
fontsize = ps.global_labelsize)
# Now lets overplot the Observational Data.
obs.Get_Data_SMF()
caps = 5
ewidth = 1.5
ps.Plot_SMF_z6(ax[0], cosmology_allmodels[0].H(0).value/100.0, errorwidth=ewidth, capsize=caps)
ps.Plot_SMF_z7(ax[1], cosmology_allmodels[0].H(0).value/100.0, errorwidth=ewidth, capsize=caps)
ps.Plot_SMF_z8(ax[2], cosmology_allmodels[0].H(0).value/100.0, errorwidth=ewidth, capsize=caps)
####
delta_fontsize = 0
ax[0].text(0.7, 0.9, r"$\mathbf{z = 6}$", transform = ax[0].transAxes, fontsize = ps.global_fontsize - delta_fontsize)
ax[1].text(0.7, 0.9, r"$\mathbf{z = 7}$", transform = ax[1].transAxes, fontsize = ps.global_fontsize - delta_fontsize)
ax[2].text(0.7, 0.9, r"$\mathbf{z = 8}$", transform = ax[2].transAxes, fontsize = ps.global_fontsize - delta_fontsize)
leg = ax[0].legend(loc='lower left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False)
for t in leg.get_texts():
t.set_fontsize(ps.global_legendsize - 2)
plt.tight_layout()
outputFile1 = "{0}/{1}.{2}".format(output_dir, output_tag, output_format)
fig1.savefig(outputFile1, bbox_inches='tight')
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
def plot_mstar_fej(mstar_bins, mstar_bin_width, z_array_full_allmodels,
mean_allmodels, std_allmodels, N_allmodels, model_tags,
output_dir, output_tag, output_format,
plot_models_at_snaps=None, plot_snaps_for_models=None):
"""
Plots the fraction of baryons in the ejected reservoir as a function of
stellar mass.
Parameters
----------
mstar_bins : List of floats
Stellar mass bins that the data is binned on. Units are Msun.
mstar_bin_width : Float
The bin separation between the stellar mass bins.
z_array_reion_allmodels : 2D nested list of floats. Outer length is number
of models, inner is number of snapshots.
The redshift at each snapshot for each model.
mean_allmodels, std_allmodels, N_allmodels : 3D nested lists of floats.
Outer length is number of
models, next is number of
snapshots in the model and
final is the number of
``mstar_bins``.
The mean and standard deviation for the ejected fraction in each stellar
mass bin. Also the number of data points in each bin.
model_tags : List of strings. Length is number of models.
Legend entry for each model.
output_dir : String
Directory where the plot is saved.
output_tag : String.
Tag added to the name of the output file.
output_format : String
Format the plot is saved in.
plot_models_at_snaps : 2D nested list of integers. Outer length is number
of models, optional
If not ``None``, plots each model at the specified snapshots. That is,
each panel will be for one model at the specified snapshots.
plot_snaps_for_models : 2D nested list of integers. Outer length is number
of models, optional
If not ``None``, plots all models at a single, specified snapshot. That
is, each panel will be for all models at one specified snapshot.
Returns
---------
None. The figure is saved as "<output_dir>/<output_tag>.<output_format>".
"""
fig1, ax, nrows = plot_2D_line(mstar_bins, mstar_bin_width,
z_array_full_allmodels,
mean_allmodels, std_allmodels, N_allmodels,
model_tags, plot_models_at_snaps,
plot_snaps_for_models)
# Set variables for every column.
tick_locs = np.arange(4.0, 11.0)
for ax_count in range(nrows):
ax[nrows-1, ax_count].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = ps.global_fontsize)
ax[nrows-1, ax_count].set_xlim([4.8, 10.2])
ax[nrows-1, ax_count].xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax[nrows-1, ax_count].xaxis.set_major_locator(mtick.MultipleLocator(1.0))
ax[nrows-1, ax_count].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = ps.global_fontsize)
labels = ax[1,0].xaxis.get_ticklabels()
locs = ax[1,0].xaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
# Set variables for every row.
tick_locs = np.arange(-0.10, 0.80, 0.10)
for ax_count in range(nrows):
ax[ax_count, 0].set_ylabel(r'$\mathbf{\langle f_{ej}\rangle_{M_*}}$',
size = ps.global_labelsize)
ax[ax_count, 0].set_ylim([-0.02, 1.0])
ax[ax_count, 0].yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax[ax_count, 0].yaxis.set_major_locator(mtick.MultipleLocator(0.1))
#ax[ax_count, 0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = ps.global_fontsize)
leg = ax[0,0].legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False)
for t in leg.get_texts():
t.set_fontsize(ps.global_legendsize)
plt.tight_layout()
plt.subplots_adjust(wspace = 0.0, hspace = 0.0)
outputFile1 = "{0}/{1}.{2}".format(output_dir, output_tag, output_format)
fig1.savefig(outputFile1, bbox_inches='tight')
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
def plot_mstar_SFR(mstar_bins, mstar_bin_width, z_array_full_allmodels,
mean_allmodels, std_allmodels, N_allmodels, model_tags,
output_dir, output_tag, output_format,
plot_models_at_snaps=None, plot_snaps_for_models=None):
"""
Plots the star formation rate as a function of stellar mass.
Parameters
----------
mstar_bins : List of floats
Stellar mass bins that the data is binned on. Units are Msun.
mstar_bin_width : Float
The bin separation between the stellar mass bins.
z_array_reion_allmodels : 2D nested list of floats. Outer length is number
of models, inner is number of snapshots.
The redshift at each snapshot for each model.
mean_allmodels, std_allmodels, N_allmodels : 3D nested lists of floats.
Outer length is number of
models, next is number of
snapshots in the model and
final is the number of
``mstar_bins``.
The mean and standard deviation for the SFR in each stellar mass bin.
Also the number of data points in each bin.
model_tags : List of strings. Length is number of models.
Legend entry for each model.
output_dir : String
Directory where the plot is saved.
output_tag : String.
Tag added to the name of the output file.
output_format : String
Format the plot is saved in.
plot_models_at_snaps : 2D nested list of integers. Outer length is number
of models, optional
If not ``None``, plots each model at the specified snapshots. That is,
each panel will be for one model at the specified snapshots.
plot_snaps_for_models : 2D nested list of integers. Outer length is number
of models, optional
If not ``None``, plots all models at a single, specified snapshot. That
is, each panel will be for all models at one specified snapshot.
Returns
---------
None. The figure is saved as "<output_dir>/<output_tag>.<output_format>".
"""
fig1, ax, nrows = plot_2D_line(mstar_bins, mstar_bin_width,
z_array_full_allmodels, mean_allmodels,
std_allmodels, N_allmodels, model_tags,
plot_models_at_snaps, plot_snaps_for_models)
delta_fontsize = 10
# Set variables for every column.
tick_locs = np.arange(4.0, 11.0)
for ax_count in range(nrows):
ax[nrows-1, ax_count].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = ps.global_fontsize)
ax[nrows-1, ax_count].set_xlim([4.8, 10.2])
ax[nrows-1, ax_count].xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax[nrows-1, ax_count].xaxis.set_major_locator(mtick.MultipleLocator(1.0))
ax[nrows-1, ax_count].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = ps.global_fontsize)
labels = ax[1,0].xaxis.get_ticklabels()
locs = ax[1,0].xaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
# Set variables for every row.
tick_locs = np.arange(-0.10, 0.80, 0.10)
for ax_count in range(nrows):
ax[ax_count, 0].set_ylabel(r'$\mathbf{\langle SFR\rangle_{M_*} [M_\odot \: yr^{-1}]}$',
size = ps.global_labelsize-delta_fontsize)
ax[ax_count, 0].set_yscale('log')
#ax[ax_count, 0].set_ylim([-0.02, 1.0])
#ax[ax_count, 0].yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
#ax[ax_count, 0].yaxis.set_major_locator(mtick.MultipleLocator(0.1))
#ax[ax_count, 0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = ps.global_fontsize)
leg = ax[0,0].legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False)
for t in leg.get_texts():
t.set_fontsize(ps.global_legendsize)
plt.tight_layout()
plt.subplots_adjust(wspace = 0.0, hspace = 0.0)
outputFile1 = "{0}/{1}.{2}".format(output_dir, output_tag, output_format)
fig1.savefig(outputFile1, bbox_inches='tight')
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
def plot_UVLF(MUV_bins, MUV_bin_width, z_array_full_allmodels,
cosmology_allmodels, UVLF, dustcorrected_UVLF, plot_z, model_tags,
output_dir, output_tag, output_format):
"""
Plots the UV luminosity function. That is, the number count of galaxies
binned on (Absolute) UV Magnitude.
Parameters
----------
MUV_bins : List of floats
UV Magnitude bins that the data is binned on.
MUV_bin_width : Float
The bin separation between the UV Magnitude bins.
z_array_reion_allmodels : 2D nested list of floats. Outer length is number
of models, inner is number of snapshots.
The redshift at each snapshot for each model.
cosmology_allmodels : List of class ``astropy.cosmology``. Length is number
of models.
``astropy`` class containing the cosmology for each model.
UVLF : 3D nested lists of floats. Outer length is number of models, next is
number of snapshots in the model and final is the number of
``MUV_bins``.
The UV luminosity function at each snapshot for each model. That is,
the number of galaxies within each UV magnitude bin (given by
``MUV_bins``).
dustcorrected_UVLF : Same as ``UVLF`` but corrected for dust attenuation.
plot_z : 2D nested list of floats. Outer length is equal to number of models.
The redshift at which we wish to plot the UV luminosity function for
each model. Specified by user in ``paper_plots.py``.
model_tags : List of strings. Length is number of models.
Legend entry for each model.
output_dir : String
Directory where the plot is saved.
output_tag : String.
Tag added to the name of the output file.
output_format : String
Format the plot is saved in.
Returns
---------
None. The figure is saved as "<output_dir>/<output_tag>.<output_format>".
"""
plot_dust = 1
fig1, ax = plt.subplots(nrows=1, ncols=len(plot_z[0]),
sharex='col', sharey='row', figsize=(16,6))
for model_number in range(len(z_array_full_allmodels)):
| |
match. Write 2 pixels
rowAsm.gen_loadstore_indexed(False, matchReg, regX, offX + 256*self.lineAdvance, "")
# pop this byte command out of the Write list, and reiterate with the same idx to get the next byte command
cmdBytesToWrite.pop(idx)
# make lists of unique byte and word values to write
uniqByteValues = []
uniqWordValues = []
for (offX,offY,byteCmd) in cmdBytesToWrite:
val = byteCmd[1]
if val not in uniqByteValues:
uniqByteValues.append(val)
for (offX,offY,byteCmd1,byteCmd2) in cmdWordsToWrite:
val = (byteCmd1[1] << 8) + byteCmd2[1]
if val not in uniqWordValues:
uniqWordValues.append(val)
# permute across all orderings of word writes to minimize number of loads
score,wordOrder = self.PermuteWordWriteOrder(rowNum, rowAsm.reg, uniqWordValues, [ ])
# we need a scratch register to use while writing bytes that don't match words
# choose one which doesn't destroy a useful register for the first word
if not rowAsm.reg.IsValid(regA):
scratchReg = regA
elif not rowAsm.reg.IsValid(regB):
scratchReg = regB
else:
scratchReg = regB
if len(wordOrder) > 0 and (wordOrder[0] & 0xff) == rowAsm.reg.GetValue(regB):
scratchReg = regA
# make list of byte values to write which don't match any bytes in any word
lonelyByteVals = [ ]
for (offX,offY,byteCmd) in cmdBytesToWrite:
matchWord = False
for (offXW, offYW, byteCmd1W, byteCmd2W) in cmdWordsToWrite:
if byteCmd[1] == byteCmd1W[1] or byteCmd[1] == byteCmd2W[1]:
matchWord = True
# if this byte doesn't match any words to write, add it to lonely byte list
if not matchWord and byteCmd[1] not in lonelyByteVals:
lonelyByteVals.append(byteCmd[1])
# fixme (micro-op): if len(cmdWordsToWrite) == 0, then search in lonelyByteVals for a byte
# which may be written in the following row. If found, put at end of lonelyByteVals list
# emit byte writes for those bytes which don't match any bytes in any word
for lonelyVal in lonelyByteVals:
rowAsm.gen_loadimm_accum(scratchReg, lonelyVal, "")
idx = 0
while idx < len(cmdBytesToWrite):
(offX,offY,byteCmd) = cmdBytesToWrite[idx]
# if this byte to write doesn't match the lonely byte value we're writing, then skip it
if byteCmd[1] != lonelyVal:
idx += 1
continue
# otherwise we will emit this byte write now
rowAsm.gen_loadstore_indexed(False, scratchReg, regX, offX + 256*self.lineAdvance, "")
# pop this byte command out of the Write list, and reiterate with the same idx to get the next byte command
cmdBytesToWrite.pop(idx)
# emit all words write (and matching byte writes)
while len(cmdWordsToWrite) > 0:
# pop command and write the word
(offXW, offYW, byteCmd1W, byteCmd2W) = cmdWordsToWrite.pop(0)
loadA = (not rowAsm.reg.IsValid(regA)) or (byteCmd1W[1] != rowAsm.reg.GetValue(regA))
loadB = (not rowAsm.reg.IsValid(regB)) or (byteCmd2W[1] != rowAsm.reg.GetValue(regB))
if loadA and loadB:
wordVal = (byteCmd1W[1] << 8) + byteCmd2W[1]
rowAsm.gen_loadimm_accum(regD, wordVal, "")
elif loadA:
rowAsm.gen_loadimm_accum(regA, byteCmd1W[1], "")
elif loadB:
rowAsm.gen_loadimm_accum(regB, byteCmd2W[1], "")
rowAsm.gen_loadstore_indexed(False, regD, regX, offXW + 256*self.lineAdvance, "") # std off,x
# write any matching bytes
idx = 0
while idx < len(cmdBytesToWrite):
(offX,offY,byteCmd) = cmdBytesToWrite[idx]
matchReg = None
if byteCmd[1] == rowAsm.reg.GetValue(regA):
matchReg = regA
elif byteCmd[1] == rowAsm.reg.GetValue(regB):
matchReg = regB
if matchReg == None:
idx += 1
continue
# we found a match. Write 2 pixels
rowAsm.gen_loadstore_indexed(False, matchReg, regX, offX + 256*self.lineAdvance, "")
# pop this byte command out of the Write list, and reiterate with the same idx to get the next byte command
cmdBytesToWrite.pop(idx)
# assert that there is nothing left to do
if len(cmdBytesToWrite) != 0:
raise Exception("Error: remaining cmdBytesToWrite after everything is done!")
if len(cmdWordsToWrite) != 0:
raise Exception("Error: remaining cmdWordsToWrite after everything is done!")
# return the generated assembly language code
return rowAsm
def PermuteWordWriteOrder(self, rowNum, regState, uniqWordValues, wordOrder):
# if we are at a leaf, calculate the score and return
if len(uniqWordValues) == 0:
score = 0.0
if len(wordOrder) > 0:
# give 1 point if a byte in the first word matches up with an existing regA/regB known value
firstWordWriteVal = wordOrder[0]
if regState.IsValid(regA) and regState.GetValue(regA) == (firstWordWriteVal >> 8):
score += 1.0
elif regState.IsValid(regB) and regState.GetValue(regB) == (firstWordWriteVal & 0xff):
score += 1.0
# increase the score by the probability that this word (or a sub-byte) will be useful to the next row
lastVal = wordOrder[-1]
if rowNum < self.height - 1:
wordWriteProb = self.wordWriteProbByRow[rowNum+1]
byteWriteProb = self.byteWriteProbByRow[rowNum+1]
if lastVal in wordWriteProb:
score += wordWriteProb[lastVal]
byteProb = 0.0
if (lastVal >> 8) in byteWriteProb:
byteProb = byteWriteProb[lastVal >> 8]
if (lastVal & 0xff) in byteWriteProb:
byteProb = max(byteProb, byteWriteProb[lastVal & 0xff])
score += byteProb
# give 1 point for each byte load that we can avoid when advancing to next word to write
for idx in range(len(wordOrder)-1):
if (wordOrder[idx] & 0xff00) == (wordOrder[idx+1] & 0xff00):
score += 1.0
if (wordOrder[idx] & 0xff) == (wordOrder[idx+1] & 0xff):
score += 1.0
return (score,copy.copy(wordOrder))
# otherwise, try all possible orderings and keep track of the one with the best score
bestScore = 0
bestOrder = None
for idx in range(len(uniqWordValues)):
nextWord = uniqWordValues.pop(idx)
wordOrder.append(nextWord)
tryScore,tryOrder = self.PermuteWordWriteOrder(rowNum, regState, uniqWordValues, wordOrder)
if bestOrder == None or tryScore > bestScore:
bestScore = tryScore
bestOrder = tryOrder
wordOrder.pop()
uniqWordValues.insert(idx, nextWord)
return (bestScore, bestOrder)
# *************************************************************************************************
# Application object: high-level processing, statistics gathering, final assembly dump
# *************************************************************************************************
class App:
def __init__(self, spriteFilename, asmFilename):
self.spriteFilename = spriteFilename
self.asmFilename = asmFilename
self.spriteList = []
self.groupNumber = None
def ReadInput(self):
curSprite = None
spritetext = open(self.spriteFilename).read()
for line in spritetext.split("\n"):
# remove comments and whitespace from line
pivot = line.find("*")
if pivot != -1:
line = line[:pivot]
line = line.strip()
if len(line) < 1:
continue
if line[0] == '[' and line[-1] == ']':
# new sprite definiton
if curSprite != None:
curSprite.FinishDefinition()
newSpriteName = line[1:-1]
curSprite = Sprite(newSpriteName)
self.spriteList.append(curSprite)
continue
if curSprite == None:
pivot = line.find('=')
if pivot != -1:
key = line[0:pivot].strip().lower()
value = line[pivot+1:].strip()
if key == "group":
self.groupNumber = int(value)
continue
print(f"Warning: ignore line before sprite section: {line}")
continue
curSprite.ReadInputLine(line)
if curSprite != None:
curSprite.FinishDefinition()
def PrintRow(self, RowName, Values, datatype):
if len(RowName) < 16:
RowName += " " * (16 - len(RowName))
else:
RowName = RowName[:16]
print(RowName, end=' ')
for val in Values:
if val == None:
s = ""
elif datatype == str:
s = val
elif datatype == int:
s = str(val)
elif datatype == float:
s = f"{val:.2f}"
else:
raise Exception("Invalid data type")
if len(s) >= 8:
print(s[:8], end=' ')
else:
print(" " * (8 - len(s)) + s, end=' ')
print()
def Calculate(self):
for sprite in self.spriteList:
sprite.Process1_PreCalc()
sprite.Process2_GenErase()
sprite.Process3_GenDraw(0)
if sprite.hasSinglePixelPos:
sprite.Process3_GenDraw(1)
# calculate and print statistics for each sprite
Names = []
Pixels = []
Storage = []
MaxCycles = []
CyclesPerPix = []
EraseBytes = []
EraseCycles = []
DrawLBytes = []
DrawLCycles = []
DrawRBytes = []
DrawRCycles = []
TotalErase = 0
TotalDrawL = 0
TotalDrawR = 0
# add data to lists
for sprite in self.spriteList:
name = sprite.name
# skip blank sprites
if sprite.numPixels == 0:
continue
Names.append(name)
Pixels.append(sprite.numPixels)
Storage.append(sprite.numSavedBytes)
EraseBytes.append(sprite.funcErase.metrics.bytes)
EraseCycles.append(sprite.funcErase.metrics.cycles)
DrawLBytes.append(sprite.funcDraw[0].metrics.bytes)
DrawLCycles.append(sprite.funcDraw[0].metrics.cycles)
if sprite.hasSinglePixelPos:
DrawRBytes.append(sprite.funcDraw[1].metrics.bytes)
DrawRCycles.append(sprite.funcDraw[1].metrics.cycles)
MaxDrawCycles = max(sprite.funcDraw[0].metrics.cycles, sprite.funcDraw[1].metrics.cycles)
else:
DrawRBytes.append(None)
DrawRCycles.append(None)
MaxDrawCycles = sprite.funcDraw[0].metrics.cycles
myMaxCycles = MaxDrawCycles + sprite.funcErase.metrics.cycles
MaxCycles.append(myMaxCycles)
CyclesPerPix.append(float(myMaxCycles) / float(sprite.numPixels))
TotalErase += sprite.funcErase.metrics.bytes
TotalDrawL += sprite.funcDraw[0].metrics.bytes
if sprite.hasSinglePixelPos:
TotalDrawR += sprite.funcDraw[1].metrics.bytes
# print summary
numSprites = len(self.spriteList)
print(f"Total number of sprites: {int(numSprites)}")
print(f"Total Erase code bytes: {int(TotalErase)}")
print(f"Total Draw Left code bytes: {int(TotalDrawL)}")
print(f"Total Draw Right code bytes: {int(TotalDrawR)}")
print()
# last column should be averages
Names.append("Average")
Pixels.append(sum(Pixels) / numSprites)
Storage.append(sum(Storage) / numSprites)
MaxCycles.append(sum(MaxCycles) / numSprites)
CyclesPerPix.append(sum(CyclesPerPix) / float(numSprites))
EraseBytes.append(sum(EraseBytes) / numSprites)
EraseCycles.append(sum(EraseCycles) / numSprites)
DrawLBytes.append(sum(DrawLBytes) / numSprites)
DrawLCycles.append(sum(DrawLCycles) / numSprites)
ValidDrawRBytes = [val for val in DrawRBytes if val is not None]
ValidDrawRCycles = [val for val in DrawRCycles if val is not None]
if len(ValidDrawRBytes) > 0:
DrawRBytes.append(sum(ValidDrawRBytes) / len(ValidDrawRBytes))
DrawRCycles.append(sum(ValidDrawRCycles) / len(ValidDrawRCycles))
# print tables
numCols = len(Names)
for startIdx in range(0, numCols, 8):
endIdx = min(startIdx+8, numCols);
self.PrintRow("Sprite Name", Names[startIdx:endIdx], str)
self.PrintRow("Pixels", Pixels[startIdx:endIdx], int)
self.PrintRow("Storage Bytes", Storage[startIdx:endIdx], int)
self.PrintRow("Max Cycles", MaxCycles[startIdx:endIdx], int)
self.PrintRow("Cycles/pixel", CyclesPerPix[startIdx:endIdx], float)
print("**************Erase:")
self.PrintRow("Code bytes", EraseBytes[startIdx:endIdx], int)
self.PrintRow("Clock cycles", EraseCycles[startIdx:endIdx], int)
print("**********Draw_Left:")
self.PrintRow("Code bytes", DrawLBytes[startIdx:endIdx], int)
self.PrintRow("Clock cycles", DrawLCycles[startIdx:endIdx], int)
if len(ValidDrawRBytes) > | |
<reponame>addumb/python-aliyun
# -*- coding:utf-8 -*-
# Copyright 2014, Quixey Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import datetime
import dateutil.parser
import json
import mox
import time
import unittest
from aliyun.ecs.model import (
AutoSnapshotPolicy,
AutoSnapshotExecutionStatus,
AutoSnapshotPolicyStatus,
Disk,
DiskMappingError,
Image,
Instance,
InstanceStatus,
InstanceType,
SecurityGroup,
SecurityGroupInfo,
SecurityGroupPermission,
Snapshot,
Zone
)
from aliyun.ecs import connection as ecs
class MockEcsInstance(object):
def __init__(self, instance_id, zone_id):
self.instance_id = instance_id
self.zone_id = zone_id
class EcsConnectionTest(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.conn = ecs.EcsConnection(region_id='r', access_key_id='a',
secret_access_key='s')
self.mox.StubOutWithMock(self.conn, 'get')
def tearDown(self):
self.mox.UnsetStubs()
class GetAllRegionsTest(EcsConnectionTest):
def testSuccess(self):
get_response = {
'Regions': {
'Region': [
{'RegionId': 'r1', 'LocalName': 'l1'},
{'RegionId': 'r2', 'LocalName': 'l2'}
]
}
}
expected_result = [ecs.Region('r1', 'l1'), ecs.Region('r2', 'l2')]
self.conn.get({'Action': 'DescribeRegions'}).AndReturn(get_response)
self.mox.ReplayAll()
self.assertEqual(expected_result, self.conn.get_all_regions())
self.mox.VerifyAll()
def testGetIds(self):
get_response = {
'Regions': {
'Region': [
{'RegionId': 'r1', 'LocalName': 'l1'},
{'RegionId': 'r2', 'LocalName': 'l2'}
]
}
}
expected_result = ['r1', 'r2']
self.conn.get({'Action': 'DescribeRegions'}).AndReturn(get_response)
self.mox.ReplayAll()
self.assertEqual(expected_result, self.conn.get_all_region_ids())
self.mox.VerifyAll()
class GetAllZonesTest(EcsConnectionTest):
def testSuccess(self):
get_response = {
'Zones': {
'Zone': [
{'ZoneId': 'z1', 'LocalName': 'l1',
'AvailableResourceCreation': {
'ResourceTypes': ['Disk', 'Instance']
},
'AvailableDiskCategories': {
'DiskCategories': ['cloud', 'ephemeral']
}
},
{'ZoneId': 'z2', 'LocalName': 'l2',
'AvailableResourceCreation': {
'ResourceTypes': ['Instance']
},
'AvailableDiskCategories': {
'DiskCategories': []
}
}]
}
}
z1 = Zone('z1', 'l1', ['Disk', 'Instance'], ['cloud', 'ephemeral'])
z2 = Zone('z2', 'l2', ['Instance'])
self.conn.get({'Action': 'DescribeZones'}).AndReturn(get_response)
self.mox.ReplayAll()
self.assertEqual([z1, z2], self.conn.get_all_zones())
self.mox.VerifyAll()
def testZoneIds(self):
z1 = Zone('z1', 'l1')
z2 = Zone('z2', 'l2')
self.mox.StubOutWithMock(self.conn, 'get_all_zones')
self.conn.get_all_zones().AndReturn([z1, z2])
self.mox.ReplayAll()
self.assertEqual(['z1', 'z2'], self.conn.get_all_zone_ids())
self.mox.VerifyAll()
class GetAllClustersTest(EcsConnectionTest):
def testGetAllClusters(self):
resp = {'Clusters': {'Cluster': [
{'ClusterId': 'c1'},
{'ClusterId': 'c2'}
]}}
self.conn.get({'Action': 'DescribeClusters'}).AndReturn(resp)
self.mox.ReplayAll()
self.assertEqual(['c1', 'c2'], self.conn.get_all_clusters())
self.mox.VerifyAll()
class GetAllInstanceStatusTest(EcsConnectionTest):
def testSuccess(self):
get_response = [{
'InstanceStatuses': {
'InstanceStatus': [
{'InstanceId': 'i1', 'Status': 'running'},
{'InstanceId': 'i2', 'Status': 'stopped'}
]
}
},
{
'InstanceStatuses': {
'InstanceStatus': [
{'InstanceId': 'i3', 'Status': 'running'},
]
}
}]
expected_result = [InstanceStatus('i1', 'running'),
InstanceStatus('i2', 'stopped'),
InstanceStatus('i3', 'running')]
self.conn.get({'Action': 'DescribeInstanceStatus', 'ZoneId': 'z'},
paginated=True).AndReturn(get_response)
self.mox.ReplayAll()
self.assertEqual(expected_result,
self.conn.get_all_instance_status(zone_id='z'))
self.mox.VerifyAll()
def testGetIds(self):
get_response = [{
'InstanceStatuses': {
'InstanceStatus': [
{'InstanceId': 'i1', 'Status': 'running'},
{'InstanceId': 'i2', 'Status': 'stopped'}
]
}
},
{
'InstanceStatuses': {
'InstanceStatus': [
{'InstanceId': 'i3', 'Status': 'running'},
]
}
}]
expected_result = ['i1', 'i2', 'i3']
self.conn.get({'Action': 'DescribeInstanceStatus'},
paginated=True).AndReturn(get_response)
self.mox.ReplayAll()
self.assertEqual(expected_result,
self.conn.get_all_instance_ids())
self.mox.VerifyAll()
class GetInstanceTest(EcsConnectionTest):
def testSuccess(self):
get_response = {
'RegionId': 'r',
'InstanceId': 'i1',
'InstanceName': 'name',
'ImageId': 'image',
'InstanceType': 'type',
'HostName': 'hostname',
'Status': 'running',
'InternetChargeType': 'chargetype',
'InternetMaxBandwidthIn': '1',
'InternetMaxBandwidthOut': '2',
'CreationTime': '2014-02-05T00:52:32Z',
'ExpiredTime': '2014-02-05T00:52:32Z',
'InstanceChargeType': 'PostPaid',
'SecurityGroupIds': {'SecurityGroupId': ['sg1', 'sg2']},
'PublicIpAddress': {'IpAddress': ['ip1', 'ip2']},
'InnerIpAddress': {'IpAddress': ['ip3', 'ip4']},
'Description': '',
'ClusterId': '',
'OperationLocks': {'LockReason': []},
'ZoneId': 'z'
}
expected_result = Instance(
'i1', 'name', 'image', 'r', 'type', 'hostname', 'running',
['sg1', 'sg2'], ['ip1', 'ip2'], ['ip3', 'ip4'], 'chargetype', 1, 2,
dateutil.parser.parse('2014-02-05T00:52:32Z'), dateutil.parser.parse('2014-02-05T00:52:32Z'), 'PostPaid', '', '', [], 'z')
self.conn.get({'Action': 'DescribeInstanceAttribute',
'InstanceId': 'i1'}).AndReturn(get_response)
self.mox.ReplayAll()
self.assertEqual(expected_result,
self.conn.get_instance('i1'))
self.mox.VerifyAll()
class InstanceActionsTest(EcsConnectionTest):
def testStart(self):
self.conn.get({'Action': 'StartInstance',
'InstanceId': 'i1'})
self.mox.ReplayAll()
self.conn.start_instance('i1')
self.mox.VerifyAll()
def testStop(self):
self.conn.get({'Action': 'StopInstance',
'InstanceId': 'i1',
'ForceStop': 'false'})
self.mox.ReplayAll()
self.conn.stop_instance('i1')
self.mox.VerifyAll()
def testForceStop(self):
self.conn.get({'Action': 'StopInstance',
'InstanceId': 'i1',
'ForceStop': 'true'})
self.mox.ReplayAll()
self.conn.stop_instance('i1', force=True)
self.mox.VerifyAll()
def testReboot(self):
self.conn.get({'Action': 'RebootInstance',
'InstanceId': 'i1',
'ForceStop': 'false'})
self.mox.ReplayAll()
self.conn.reboot_instance('i1')
self.mox.VerifyAll()
def testForceReboot(self):
self.conn.get({'Action': 'RebootInstance',
'InstanceId': 'i1',
'ForceStop': 'true'})
self.mox.ReplayAll()
self.conn.reboot_instance('i1', force=True)
self.mox.VerifyAll()
def testDelete(self):
self.conn.get({'Action': 'DeleteInstance',
'InstanceId': 'i1'})
self.mox.ReplayAll()
self.conn.delete_instance('i1')
self.mox.VerifyAll()
def testReplaceSystemDisk(self):
self.conn.get({
'Action': 'ReplaceSystemDisk',
'InstanceId': 'i',
'ImageId': 'img'}).AndReturn({'DiskId': 'd'})
self.mox.ReplayAll()
self.assertEqual('d', self.conn.replace_system_disk('i', 'img'))
self.mox.VerifyAll()
def testJoinSecurityGroup(self):
self.conn.get({'Action': 'JoinSecurityGroup',
'InstanceId': 'i1',
'SecurityGroupId': 'sg1'})
self.mox.ReplayAll()
self.conn.join_security_group('i1', 'sg1')
self.mox.VerifyAll()
def testLeaveSecurityGroup(self):
self.conn.get({'Action': 'LeaveSecurityGroup',
'InstanceId': 'i1',
'SecurityGroupId': 'sg1'})
self.mox.ReplayAll()
self.conn.leave_security_group('i1', 'sg1')
self.mox.VerifyAll()
class DiskActionsTest(EcsConnectionTest):
def testCreateDiskSizeFull(self):
self.conn.get({'Action': 'CreateDisk',
'ZoneId': 'z1',
'DiskName': 'name',
'Description': 'desc',
'Size': 5}).AndReturn({'DiskId': 'd'})
self.mox.ReplayAll()
self.conn.create_disk('z1', 'name', 'desc', 5, None)
self.mox.VerifyAll()
def testCreateDiskSnapshot(self):
self.conn.get({'Action': 'CreateDisk',
'ZoneId': 'z1',
'SnapshotId': 'snap'}).AndReturn({'DiskId': 'd1'})
self.mox.ReplayAll()
self.assertEqual('d1', self.conn.create_disk('z1', snapshot_id='snap'))
self.mox.VerifyAll()
def testAttachDisk(self):
self.conn.get({'Action': 'AttachDisk',
'InstanceId': 'i1',
'Device': 'dev',
'DeleteWithInstance': True,
'DiskId': 'd1'})
self.mox.ReplayAll()
self.conn.attach_disk('i1', 'd1', 'dev', True)
self.mox.VerifyAll()
def testAddDisk(self):
self.mox.StubOutWithMock(self.conn, 'get_instance')
self.conn.get_instance('i1').AndReturn(MockEcsInstance('i1', 'z1'))
self.mox.StubOutWithMock(self.conn, 'create_disk')
self.conn.create_disk('z1', 'name', 'desc', None, 'snap').AndReturn('d')
self.mox.StubOutWithMock(self.conn, 'attach_disk')
self.conn.attach_disk('i1', 'd', 'dev', True)
self.mox.ReplayAll()
d = self.conn.add_disk('i1', None, 'snap', 'name', 'desc', 'dev', True)
self.assertEqual(d, 'd')
self.mox.VerifyAll()
def testResetDisk(self):
self.conn.get({'Action': 'ResetDisk', 'DiskId': 'd', 'SnapshotId': 's'})
self.mox.ReplayAll()
self.conn.reset_disk('d', 's')
self.mox.VerifyAll()
def testDeleteDisk(self):
self.conn.get({'Action': 'DeleteDisk',
'DiskId': 'd1'})
self.mox.ReplayAll()
self.conn.delete_disk('d1')
self.mox.VerifyAll()
def testCreateDiskArgs(self):
try:
self.conn.create_disk('i1', size=5, snapshot_id='snap')
except ecs.Error, e:
self.assertTrue(e.message.startswith("Use size or snapshot_id."))
def testDetachDisk(self):
self.conn.get({'Action': 'DetachDisk',
'InstanceId': 'i',
'DiskId': 'd'})
self.mox.ReplayAll()
self.conn.detach_disk('i', 'd')
self.mox.VerifyAll()
def testModifyDisk(self):
self.conn.get({'Action': 'ModifyDiskAttribute',
'DiskId': 'd',
'DiskName': 'name',
'Description': 'desc',
'DeleteWithInstance': True})
self.mox.ReplayAll()
self.conn.modify_disk('d', 'name', 'desc', True)
self.mox.VerifyAll()
def testReInitDisk(self):
self.conn.get({'Action': 'ReInitDisk', 'DiskId': 'd'})
self.mox.ReplayAll()
self.conn.reinit_disk('d')
self.mox.VerifyAll()
def testInstanceDisks(self):
d1 = Disk('d1', 'system', 'cloud', 20)
d2 = Disk('d2', 'system', 'cloud', 20)
d3 = Disk('d3', 'system', 'cloud', 20)
self.mox.StubOutWithMock(self.conn, 'describe_disks')
self.conn.describe_disks(instance_id='i').AndReturn([d1, d2, d3])
self.mox.ReplayAll()
self.assertEqual([d1, d2, d3], self.conn.describe_instance_disks('i'))
self.mox.VerifyAll()
class ModifyInstanceTest(EcsConnectionTest):
def testModifyAll(self):
self.conn.get({'Action': 'ModifyInstanceAttribute',
'InstanceId': 'i1',
'InstanceName': 'name',
'Password': 'pw',
'HostName': 'name',
'SecurityGroupId': 'sg1',
'Description': 'desc'})
self.mox.ReplayAll()
self.conn.modify_instance(
'i1', new_instance_name='name', new_password='pw',
new_hostname='name', new_security_group_id='sg1',
new_description='desc')
self.mox.VerifyAll()
class ModifyInstanceSpecTest(EcsConnectionTest):
def testModifyInstanceSpecType(self):
self.conn.get({'Action': 'ModifyInstanceSpec',
'InstanceId': 'i1',
'InstanceType': 'type1'})
self.mox.ReplayAll()
self.conn.modify_instance_spec('i1', instance_type='type1')
self.mox.VerifyAll()
def testModifyInstanceSpecNetIn(self):
self.conn.get({'Action': 'ModifyInstanceSpec',
'InstanceId': 'i1',
'InternetMaxBandwidthIn': 1})
self.mox.ReplayAll()
self.conn.modify_instance_spec('i1', internet_max_bandwidth_in=1)
self.mox.VerifyAll()
def testModifyInstanceSpecNetOut(self):
self.conn.get({'Action': 'ModifyInstanceSpec',
'InstanceId': 'i1',
'InternetMaxBandwidthOut': 1})
self.mox.ReplayAll()
self.conn.modify_instance_spec('i1', internet_max_bandwidth_out=1)
self.mox.VerifyAll()
def testModifyInstanceSpecAll(self):
self.conn.get({'Action': 'ModifyInstanceSpec',
'InstanceId': 'i1',
'InstanceType': 'type1',
'InternetMaxBandwidthIn': 1,
'InternetMaxBandwidthOut': 2})
self.mox.ReplayAll()
self.conn.modify_instance_spec('i1', instance_type='type1',
internet_max_bandwidth_in=1,
internet_max_bandwidth_out=2)
self.mox.VerifyAll()
class CreateInstanceTest(EcsConnectionTest):
def testMinimalParams(self):
get_response = {'InstanceId': 'i1'}
self.conn.get({'Action': 'CreateInstance',
'ImageId': 'image',
'SecurityGroupId': 'sg1',
'InstanceChargeType': 'PostPaid',
'InstanceType': 'type'}).AndReturn(get_response)
self.mox.ReplayAll()
self.assertEqual(
'i1',
self.conn.create_instance('image', 'type', 'sg1', instance_charge_type='PostPaid'))
self.mox.VerifyAll()
def testMinimalDisks(self):
get_response = {'InstanceId': 'i1'}
self.conn.get({'Action': 'CreateInstance',
'ImageId': 'image',
'SecurityGroupId': 'sg1',
'InstanceType': 'type',
'InstanceChargeType': 'PostPaid',
'DataDisk.1.Category': 'cloud',
'DataDisk.1.Size': 1024}).AndReturn(get_response)
self.mox.ReplayAll()
disks = [('cloud', 1024)]
self.assertEqual(
'i1',
self.conn.create_instance('image', 'type', 'sg1', data_disks=disks, instance_charge_type='PostPaid'))
self.mox.VerifyAll()
def testConflictingDisk(self):
self.mox.ReplayAll()
disks = [('cloud', 1024, 'snap')]
try:
self.conn.create_instance('image', 'type', 'sg1', data_disks=disks)
except DiskMappingError, e:
self.assertTrue(e.__class__.__name__ == 'DiskMappingError')
self.mox.VerifyAll()
def testAllParams(self):
get_response = {'InstanceId': 'i1'}
self.conn.get({'Action': 'CreateInstance',
'ImageId': 'image',
'SecurityGroupId': 'sg1',
'InstanceType': 'type',
'InstanceName': 'name',
'InternetMaxBandwidthIn': '1',
'InternetMaxBandwidthOut': '2',
'HostName': 'hname',
'Password': 'pw',
'SystemDisk.Category': 'cloud',
'InternetChargeType': 'PayByBandwidth',
'InstanceChargeType': 'PostPaid',
'DataDisk.1.Category': 'cloud',
'DataDisk.1.Size': 5,
'DataDisk.1.Description': 'dd-1-desc',
'DataDisk.1.DiskName': 'dd-1-name',
'DataDisk.1.Device': '/dev/xvd-testing',
'DataDisk.2.Category': 'ephemeral',
'DataDisk.2.SnapshotId': 'snap',
'Description': 'desc',
'ZoneId': 'test-zone-a'}).AndReturn(
get_response)
disks = [
{
'category': 'cloud',
'size': 5,
'description': 'dd-1-desc',
'name': 'dd-1-name',
'device': '/dev/xvd-testing'
},
{'category': 'ephemeral', 'snapshot_id': 'snap'}
]
self.mox.ReplayAll()
self.assertEqual(
'i1',
self.conn.create_instance(
'image', 'type', 'sg1', instance_name='name',
internet_max_bandwidth_in=1, internet_max_bandwidth_out=2,
hostname='hname', password='pw', system_disk_type='cloud',
internet_charge_type='PayByBandwidth',
instance_charge_type='PostPaid',
data_disks=disks, description='desc', zone_id='test-zone-a'))
self.mox.VerifyAll()
class CreateAndStartInstanceTest(EcsConnectionTest):
def setUp(self):
super(CreateAndStartInstanceTest, self).setUp()
self.mox.StubOutWithMock(self.conn, 'create_instance')
self.mox.StubOutWithMock(self.conn, 'join_security_group')
self.mox.StubOutWithMock(self.conn, 'start_instance')
self.mox.StubOutWithMock(self.conn, 'get_instance')
self.mox.StubOutWithMock(time, 'sleep')
def testTooManySecurityGroups(self):
try:
self.conn.create_and_start_instance(
'image', 'type', 'sg1',
additional_security_group_ids=[
'sg2', 'sg3', 'sg4', 'sg5', 'sg6'])
self.fail('Should throw error if too many security groups')
except ecs.Error as err:
self.assertTrue('max 5' in str(err))
def testWithMinimalParams(self):
self.conn.create_instance(
'image', 'type', 'sg1',
hostname=None, instance_name=None, internet_charge_type=None,
internet_max_bandwidth_in=None, internet_max_bandwidth_out=None,
password=<PASSWORD>, system_disk_type=None, data_disks=[],
instance_charge_type='PostPaid',
description=None, zone_id=None).AndReturn('i1')
self.conn.get({
'Action': 'AllocatePublicIpAddress',
'InstanceId': 'i1'
})
time.sleep(mox.IsA(int))
self.conn.start_instance('i1')
self.mox.ReplayAll()
self.assertEqual('i1', self.conn.create_and_start_instance(
'image', 'type', 'sg1', block_till_ready=False, instance_charge_type='PostPaid'))
self.mox.VerifyAll()
def testWithAllParams(self):
self.conn.create_instance(
'image', 'type', 'sg1', instance_name='name',
internet_max_bandwidth_in=1, internet_max_bandwidth_out=2,
hostname='hname', password='pw', system_disk_type='cloud',
internet_charge_type='PayByBandwidth',
instance_charge_type='PostPaid',
data_disks=[('cloud', 5)], description='desc',
zone_id='test-zone-a').AndReturn('i1')
time.sleep(mox.IsA(int))
self.conn.start_instance('i1')
self.mox.ReplayAll()
self.assertEqual('i1', self.conn.create_and_start_instance(
'image', 'type', 'sg1', instance_name='name',
internet_max_bandwidth_in=1, internet_max_bandwidth_out=2,
hostname='hname', password='pw', system_disk_type='cloud',
internet_charge_type='PayByBandwidth', assign_public_ip=False,
instance_charge_type='PostPaid',
block_till_ready=False, data_disks=[('cloud', 5)],
description='desc', zone_id='test-zone-a'))
self.mox.VerifyAll()
def testWithAdditionalSecurityGroupsNoBlock(self):
self.conn.create_instance(
'image', 'type', 'sg1',
hostname=None, instance_name=None, internet_charge_type=None,
internet_max_bandwidth_in=None, internet_max_bandwidth_out=None,
password=<PASSWORD>, system_disk_type=None, data_disks=[], instance_charge_type='PostPaid',
description=None, zone_id=None).AndReturn('i1')
time.sleep(mox.IsA(int))
self.conn.join_security_group('i1', 'sg2')
self.conn.join_security_group('i1', 'sg3')
self.conn.get({
'Action': 'AllocatePublicIpAddress',
'InstanceId': 'i1'
})
time.sleep(mox.IsA(int))
self.conn.start_instance('i1')
self.mox.ReplayAll()
self.assertEqual('i1', self.conn.create_and_start_instance(
'image', 'type', 'sg1',
additional_security_group_ids=['sg2', 'sg3'],
instance_charge_type='PostPaid',
block_till_ready=False))
self.mox.VerifyAll()
def testWithBlocking(self):
instance_starting = Instance(
'i1', None, None, None, None, None, 'Starting', None,
None, None, None, None, None, None, None, None, None, None, None, None)
instance_running = Instance(
'i1', None, None, None, None, None, 'Running', None,
None, None, None, None, None, None, None, None, None, None, None, None)
self.conn.create_instance(
'image', 'type', 'sg1',
hostname=None, instance_name=None, internet_charge_type=None,
internet_max_bandwidth_in=None, internet_max_bandwidth_out=None,
password=<PASSWORD>, system_disk_type=None, data_disks=[],
instance_charge_type='PostPaid',
description=None, zone_id=None).AndReturn('i1')
self.conn.get({
'Action': 'AllocatePublicIpAddress',
'InstanceId': 'i1'
})
time.sleep(mox.IsA(int))
self.conn.start_instance('i1')
time.sleep(mox.IsA(int))
self.conn.get_instance('i1').AndReturn(instance_starting)
time.sleep(mox.IsA(int))
self.conn.get_instance('i1').AndReturn(instance_starting)
time.sleep(mox.IsA(int))
self.conn.get_instance('i1').AndReturn(instance_running)
self.mox.ReplayAll()
self.assertEqual('i1', self.conn.create_and_start_instance(
'image', 'type', 'sg1', instance_charge_type='PostPaid'))
self.mox.VerifyAll()
def testWithBlockingTimesOut(self):
instance_starting = Instance(
'i1', None, | |
this data will not load as
stated, it must be on one line.)
A set of examples X1 => Y1, X2 => Y2, and X3 => Y3 will be generated,
forming one episode. However, Y1 => X2 and Y2 => X3 are not created as
separate examples by default.
To change this behavior, you can set opt['label_turns']. The default
value is 'secondspeaker' (i.e., the second speaker's utterances are
used as labels), but 'firstspeaker' and 'both' are also options. In the
case of 'both', two episodes are generated for each conversation.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if not shared:
self.episodes = []
self.num_exs = 0
self.label_turns = opt.get('label_turns')
if opt.get('conversationteacher_datafile') is not None:
self._setup_data(opt.get('conversationteacher_datafile'))
else:
self.episodes = shared['episodes']
self.num_exs = sum(len(e) for e in self.episodes)
self.id = opt['task']
self.reset()
def share(self):
"""
Share the episodes.
"""
shared = super().share()
shared['episodes'] = self.episodes
return shared
def num_examples(self):
"""
Return the number of examples from the data.
"""
return self.num_exs
def num_episodes(self):
"""
Return the number of episodes from the data.
"""
return len(self.episodes)
def get(self, episode_idx, entry_idx=None):
"""
Get a specific example from the dataset.
"""
return Message(self.episodes[episode_idx][entry_idx])
def _setup_data(self, path):
logging.info("[loading data from json file into task:" + path + "]")
self.episodes = []
self.num_exs = 0
eps = []
conversations = Conversations(path)
self.num_exs = 0
for conv in conversations:
if conv.context:
warn_once(
'At least one of these conversations contains a context, which is not being used'
)
turns = [t for t in conv.turns if t.get('id') != 'context']
if len(turns) != len(conv.turns):
warn_once(
'At least one of these conversations contains a context within the dialogue, which is being discarded'
)
turns.insert(0, Message({'text': '__SILENCE__'}))
# train on odd turns as labels (turns w/ first speaker)
if self.label_turns in ['firstspeaker', 'both']:
eps = self._get_ep_from_turns(turns[::2], turns[1::2])
if eps:
self.episodes.append(eps)
self.num_exs += len(eps)
# train on even turns as labels (turns w/ second speaker)
if self.label_turns in ['secondspeaker', 'both']:
eps = self._get_ep_from_turns(turns[1::2], turns[2::2])
if eps:
self.episodes.append(eps)
self.num_exs += len(eps)
def _get_ep_from_turns(self, xturns, yturns):
eps = []
for xturn, yturn in zip(xturns, yturns):
turn = {}
turn['text'] = xturn.get('text').strip()
turn['labels'] = [yturn.get('text').strip()]
turn['episode_done'] = False
eps.append(turn)
if eps:
eps[-1]['episode_done'] = True
return eps
class AbstractImageTeacher(FixedDialogTeacher):
"""
Abstract class to allow easier creation of image + dialogue tasks.
This class handles creating image features via ImageLoader if applicable
(resnet, resnext variants) or loading existing image features from a dict
path as per get_image_features_path().
Important methods and properties (override in subclass if needed):
- get_data_path(): where data file is found (default: <datapath>/<task>)
- get_image_path(): where images found (default: <datapath>/<task>/images)
- get_image_features_path(): dict of image features (default:
<datapath>/<task>/image_features)
- @property image_id_key: which key in data file objects represents image_id
- @property text_key: which key in data file objects represents text
Note: Assumes data files are named <dt>.json
@abstractmethod image_id_to_image_path() must be implemented in subclass
Example with the key defaults (but the keys can be customized):
.. code-block:: python
obs = {
'text': <caption>,
'image': <image features if specified else image>
}
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.opt = opt
self.task = opt['task'].split(':')[1] if ':' in opt['task'] else opt['task']
self.data_path = self.get_data_path(opt)
self.data = self.load_data(self.data_path, self.opt)
self.datatype = DatatypeHelper.fold(opt['datatype'])
# Example of available models: 'resnet152', 'resnext101_32x48d_wsl',
# and ImageLoader supports other resnet and resnext models too
# Raises an Exception if not valid
self._validate_image_mode_name(opt.get('image_mode'))
# IMPORTANT NOTE: this teacher will be instantiated twice. The first
# by build_dict in which case the image_mode is to 'no_image_model' to
# avoid calculating image features twice.
self.image_mode = opt.get('image_mode')
# Not using default image_mode parameter b/c there is a normalization
# (or bug) somewhere in build_dict that is setting it to none
self.include_image = opt.get('image_mode') != 'no_image_model'
self.image_path = self.get_image_path(opt)
self.image_loader = None
self.image_features_dim = opt.get('image_features_dim')
self.blank_image_features = torch.FloatTensor(self.image_features_dim).fill_(0)
if shared and 'data' in shared:
self.data = shared['data']
self.image_loader = shared['image_loader']
if 'image_features_dict' in shared:
self.image_features_dict = shared['image_features_dict']
elif self.include_image:
self.setup_image_features(self.data_path)
else:
# This will happen when building the dictionary - is normal
# build_dict sets image_mode to 'none'
warn_once('AbstractImageTeacher self.include_image was False')
self.image_features_dict = None
# TODO: won't need this after we have proper logging levels set
self.__verbose = False
self.reset()
def get_available_image_mode_names(self):
"""
Available image model names.
resnet and resnext variants available from the ImageLoader. resnext101_XXXXX_wsl
is the open-sourced FB AI model (960m images, 1.5k hashtags, finetuned on
ImageNet).
"""
available_model_names = ImageLoader.get_available_model_names()
return ['no_image_model', 'raw', 'ascii'] + available_model_names
def _validate_image_mode_name(self, a):
"""
Validate the image_mode passed in.
Needed because image_mode used elsewhere in ParlAI is not always consistent with
what the image teacher allows.
"""
if not isinstance(a, str):
raise argparse.ArgumentTypeError(
'%s must be a string representing image model name' % a
)
available_model_names = self.get_available_image_mode_names()
if a not in available_model_names:
raise argparse.ArgumentTypeError(
'\"%s\" unknown image model name. Choose from: %s. Currently suggested resnet is resnet152 and resnext is resnext101_32x48d_wsl.'
% (a, available_model_names)
)
return a
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
# Be sure to call super() if overriding this method b/c
# AbstractImageTeacher has necessary params
parser = super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('AbstractImageTeacher Arguments')
agent.add_argument(
'--image-path',
type=str,
default=None,
help='Optional argument to specify where images for dataset are'
'stored if already downloaded. Most tasks will download the images'
'if not present on the < datapath > / < task > _images / * and * if'
'this argument is not specified.',
)
agent.add_argument(
'--image-features-dim',
type=int,
default=2048,
help='Specify the size of image features Tensors.',
)
return parser
@property
def image_id_key(self):
"""
Which key in the input data dict objects uniquely identify each image.
Common image keys are "image_id" or "image_num". May be implemented by subclass.
"""
return 'image_id'
@property
def text_key(self):
"""
Which key in the input data dict objects identifies the text.
Common keys are "text" or "comment". May be implemented by subclass.
"""
return 'text'
@abstractmethod
def image_id_to_image_path(self, image_id):
"""
Get the path of the image on disk.
Must be implemented by subclass.
"""
pass
def get_data_path(self, opt):
"""
Determines path to the data file.
"""
task_name = opt['task'].split(':')[1] if ':' in opt['task'] else opt['task']
data_path = os.path.join(opt['datapath'], task_name)
return data_path
def get_image_path(self, opt):
"""
Return the path to the data directory and to the image directory.
Is based on opt fields: task, datatype (train, valid, test), datapath.
Subclass can override this.
"""
data_path = self.get_data_path(opt)
if opt.get('image_path', None):
image_path = opt['image_path']
else:
# other common choice: .join(opt['datapath'], task_name + '_images')
image_path = os.path.join(data_path, 'images')
return image_path
def get_image_features_path(self, task, image_model_name, dt):
"""
Image features for the dataset images are stored here.
Can be overridden in subclass to use custom paths. Image features can be manually
copied into this directory or in the case of ImageLoader eligible models, they
will be built and stored here if not already there.
"""
# In default implementation, self.data_path already has task name added
image_features_path = os.path.join(self.data_path, 'image_features')
PathManager.mkdirs(image_features_path)
return os.path.join(
image_features_path, '%s_%s_%s_features_dict' % (task, image_model_name, dt)
)
def is_image_mode_buildable(self, model_name):
"""
Is buildable if features can be calculated by ImageLoader.
Users may wish to compute features for the dataset offline and use in the model,
in which case, the image model should return False and get_image_features()
should be overridden in subclass.
"""
return model_name in ImageLoader.get_available_model_names()
def load_data(self, data_path, opt):
"""
Loading the data file, which is the index to the images and text.
It is often a .json file with the name of the <datatype>.json (i.e.
train.json). Stores in self.data.
Can be override by subclass.
"""
dt = DatatypeHelper.fold(opt['datatype'])
# Sometimes file is named "val" instead of "valid"
if dt not in ['train', 'valid', 'val', 'test']:
raise Exception(
'Unknown dt parameter: %s. Expected either "train", "valid", or "test".'
% dt
)
# Assumes file is train.json or valid.json named
data_file = os.path.join(self.data_path, '%s.json' % dt)
# Load the text | |
dets[d.uid] = d
return dets
def detail_list(self):
##
# Returns a list of Detail objects which are attached to this Node.
#
# @return dets: <i>list</i> :: A list of Detail objects which are attached to this Node.
#
# @code
# for d in n.detail_list():
# print d.anchor() == n # True
# @endcode
dets = []
for d in self.graph.detail_list():
if d.anchor_uid == self.uid:
dets.append(d)
return dets
def out_links(self):
##
# Returns a list of Link objects which originate at this Node.
#
# @return links: <i>list</i> :: A list of Link objects which originate at this Node.
#
# @code
# for link in n.out_links():
# print link.value
# @endcode
links = []
for link in self.graph.link_list():
if link.origin_uid == self.uid:
links.append(link)
return links
def in_links(self):
##
# Returns a list of Link objects which terminate at this Node.
#
# @return links: <i>list</i> :: A list of Link objects which terminate at this Node.
#
# @code
# for link in n.in_links():
# print link.value
# @endcode
links = []
for link in self.graph.link_list():
if link.terminus_uid == self.uid:
links.append(link)
return links
def all_links(self):
##
# Returns a list of Link objects which connect to this Node.
#
# @return links: <i>list</i> :: A list of Link objects which connect to this Node.
#
# @code
# for link in n.all_links():
# print link.value
# @endcode
links = []
for link in self.graph.link_list():
if link.origin_uid == self.uid or link.terminus_uid == self.uid:
links.append(link)
return links
def out_neighbors(self):
##
# Returns a list of Node objects which neighbor this Node by outgoing Link.
#
# @return nodes: <i>list</i> :: A list of Node objects which neighbor this Node by outgoing Link.
#
# @code
# for n2 in n.out_neighbors():
# print n2.name
# @endcode
neighbors = []
for link in self.graph.link_list():
if link.origin_uid == self.uid:
neighbors.append(link.terminus())
return neighbors
def in_neighbors(self):
##
# Returns a list of Node objects which neighbor this Node by incoming Link.
#
# @return nodes: <i>list</i> :: A list of Node objects which neighbor this Node by incoming Link.
#
# @code
# for n2 in n.in_neighbors():
# print n2.name
# @endcode
neighbors = []
for link in self.graph.link_list():
if link.terminus_uid == self.uid:
neighbors.append(link.origin())
return neighbors
def all_neighbors(self):
##
# Returns a list of Node objects which neighbor this Node.
#
# @return nodes: <i>list</i> :: A list of Node objects which neighbor this Node.
#
# @code
# for n2 in n.all_neighbors():
# print n2.name
# @endcode
neighbors = []
for link in self.graph.link_list():
if link.terminus_uid == self.uid:
neighbors.append(link.origin())
elif link.origin_uid == self.uid:
neighbors.append(link.terminus())
return neighbors
def dictionary(self):
##
# Returns a dictionary with this Node's properties, as required by the API. Generally used to build queries.
#
# @return dict: <i>dict</i> :: A dictionary of this Node's properties.
#
# @code
# q = n.dictionary()
# q['query'] = 'newnode'
# g.queue(q)
# @endcode
return {'uid': urllib.quote(self.uid),
'name': urllib.quote(self.name),
'x': str(self.x),
'y': str(self.y),
'radius': self.radius,
'shape': str(self.shape),
'picture': urllib.quote(self.image),
'color': urllib.quote(self.color)}
def update(self, callback=None):
##
# Updates the register of this Node on the server.
#
# @param callback: <i>function</i> :: An optional function to handle the server's response to the query.
#
# @code
# n.name = "Different Node Name"
# n.radius += 5
# n.update()
# @endcode
q = self.dictionary()
q['query'] = "updatenode"
self.graph.queue(q, callback)
##
# Links connect Node objects to each other. They have a LinkType. Detail objects can be attached to them.
#
class Link():
def __init__(self, origin_uid, terminus_uid, type, name="Link", value=1, uid=None):
##
# Constructs a Link object.
#
# @param origin_uid: <i>str</i> :: The uid of the origin Node.
# @param terminus_uid: <i>str</i> :: The uid of the terminus Node.
# @param type: <i>str</i> :: The name of the LinkType that describes this Link.
# @param name: <i>str</i> :: The displayed name of this Link.
# @param value: <i>int</i> :: The value of this Link.
# @param uid: <i>str</i> :: The global unique identifier of this Link.
#
# @code
# my_link = Link(origin_node.uid, terminus_node.uid, 'Money', value=60)
# g.add_link(l)
# @endcode
if not uid:
uid = str(uuid.uuid4())
## <i>str</i> :: The global unique id of the origin node.
self.origin_uid = urllib.unquote(origin_uid)
## <i>str</i> :: The global unique id of the terminus node.
self.terminus_uid = urllib.unquote(terminus_uid)
## <i>str</i> :: The name of the LinkType of this Link
self.type = urllib.unquote(type)
## <i>str</i> :: The display name of this Link
self.name = urllib.unquote(name)
## <i>int</i> :: The value of this Link, between 1 and the LinkType.max
self.value = int(value)
## <i>str</i> :: The global unique id of this Link
self.uid = urllib.unquote(uid)
## <i>bool</i> :: Whether or not this Link has been created on the server.
self.created = False
## <i>Graph</i> :: The Graph to which this Link belongs.
graph = None
def link_type(self):
##
# Returns the LinkType object that this Link is a member of.
#
# @return link_type: <i>LinkType</i> ::
#
# @code
# print link.link_type().color
# @endcode
return self.graph.link_type(self.type)
def add_detail(self, detail, update=True, callback=None):
##
# Attaches a Detail to this Link.
#
# @param detail: <i>Detail</i> :: The Detail to attach to this Link.
# @param update: <i>bool</i> :: Whether or not to immediately enqueue the query.
# @param callback: <i>function</i> :: An optional function to handle the server's response to the query.
#
# @code
# d = Detail(content="http://psymphonic.com", type="link")
# l.add_detail(d)
# @endcode
detail.anchor_type = 'rel'
detail.anchor_uid = self.uid
if not detail.x:
detail.x = self.center()['x']+10
if not detail.y:
num = 0
for d in self.graph.detail_list():
if d.anchor_uid == self.uid:
num += 1
detail.y = self.center()['y']+(20*num)
self.graph.add_detail(detail, update=update, callback=callback)
def detail_list(self):
##
# Returns a list of Detail objects which are attached to this Link.
#
# @return details: <i>list</i> :: A list of Detail objects which are attached to this Link.
#
# @code
# for d in n.detail_list():
# print d.anchor() == n # True
# @endcode
dets = []
for d in self.graph.detail_list:
if d.anchor_uid == self.uid:
dets.append(d)
return dets
def details(self):
##
# Returns a uid-keyed dictionary of Detail objects which are attached to this Link.
#
# @return details: <i>dict</i> :: A uid-keyed dictionary of Detail objects which are attached to this Link.
#
# @code
# for d in n.details():
# print n.details()[d].anchor() == n # True
# @endcode
dets = {}
for d in self.graph.detail_list:
if d.anchor_uid == self.uid:
dets[d.uid] = d
return dets
def dictionary(self):
##
# Returns a dictionary of this Link's properties, as required by the API. Generally used internally to build queries.
#
# @return dict: <i>dict</i> :: A dictionary of this Link's properties.
#
# @code
# q = n.dictionary()
# q['query'] = 'newnode'
# g.queue(q)
# @endcode
return {'uid': urllib.quote(self.uid),
'name': urllib.quote(self.name),
'value': str(self.value),
'rel_type': urllib.quote(self.type),
'o_uid': urllib.quote(self.origin_uid),
't_uid': urllib.quote(self.terminus_uid)}
def origin(self):
##
# Returns the origin Node.
#
# @return origin: <i>Node</i> ::
#
# @code
# print link.origin().name
# @endcode
return self.graph.node(self.origin_uid)
def parallel(self):
##
# Returns a list of Links which are parallel to this Link, meaning that they share two nodes, regardless of direction.
#
# @return links: <i>list</i> :: A list of Links which are parallel to this Link.
#
# @code
# total_value = 0
# for l2 in l.parallel():
# total_value += l2
# print total_value
# @endcode
ps = []
for link in self.graph.link_list():
if link.origin_uid == self.origin_uid and link.terminus_uid == self.terminus_uid:
ps.append(link)
elif link.origin_uid == self.terminus_uid and link.terminus_uid == self.origin_uid:
ps.append(link)
return ps
def terminus(self):
##
# Returns the terminus Node.
#
# @return terminus: <i>Node</i> ::
#
# @code
# print link.terminus().name
# @endcode
return self.graph.node(self.terminus_uid)
def update(self, callback=None):
##
# Updates the server's registry of this Link.
#
# @param callback: <i>function</i> | |
<filename>chemsolve/element.py
#!/usr/bin/env python3
# -*- coding = utf-8 -*-
from __future__ import division
import operator
from chemsolve.utils.periodictable import PeriodicTable
from chemsolve.utils.warnings import ChemsolveDeprecationWarning
from chemsolve.utils.constants import *
from chemsolve.utils.errors import InvalidElementError
__all__ = ['Element', 'SpecialElement']
class Element(object):
"""The core element object class.
This class contains a single element and its attributes are all
different properties of the element, including the simple ones like
mass and atomic number, but also electron configuration (both full and
noble gas abbreviation), atomic radius, electronegativity, ionization,
and electron affinity. These can all be accessed as attributes.
The element can be instantiated traditionally, from its symbol, but can
also be created from molar mass or noble gas/complete electron configuration.
Furthermore, with provided amounts of grams, moles, or molecules, the
element class can automatically calculate the second and third unknown
quantities, such as molecules and moles from grams.
Examples
--------
Create a Barium element.
>>> barium = Element('Ba')
>>> # Access the element's attributes.
>>> print(barium.mass)
>>> print(barium.electronegativity)
Create a Calcium element and calculate the number of moles.
>>> calcium = Element('Ca', grams = 2.0)
>>> # Get the number of molecules and moles.
>>> print(calcium.mole_amount)
>>> print(calcium.molecules)
Parameters
----------
element_symbol: str
The element symbol representing the element you want to initialize.
"""
def __init__(self, element_symbol, **kwargs):
# Initialize class properties from PeriodicTable object.
self._properties = PeriodicTable().get_properties(element_symbol)
# Element Symbol/Name.
self.element_symbol = element_symbol
self.element_name = self.get_element_name()
# Atomic Mass and Number.
self.mass = self._properties['AtomicMass']
self.number = self._properties['AtomicNumber']
# Electron Configurations.
self.electron_configuration = self._properties['ElectronConfiguration']
self.full_electron_configuration = self._get_full_electron_configuration()
# Miscellaneous Properties.
self.radius = self._properties['AtomicRadius']
self.electronegativity = self._properties['Electronegativity']
self.ionization = self._properties['IonizationEnergy']
self.electron_affinity = self._properties['ElectronAffinity']
# Class Value Calculations.
if "moles" in kwargs:
self.mole_amount = kwargs["moles"]
self.gram_amount = round(
operator.mul(self.mole_amount, self.mass), 4)
self.molecules = round(
operator.mul(self.mole_amount, AVOGADRO), 4)
if "grams" in kwargs:
self.gram_amount = kwargs["grams"]
self.mole_amount = round(
operator.__truediv__(self.gram_amount, self.mass), 4)
self.molecules = round(
operator.mul(self.mole_amount, AVOGADRO), 4)
if "molecules" in kwargs:
self.molecules = kwargs["molecules"]
self.mole_amount = round(
operator.__truediv__(self.molecules, AVOGADRO), 4)
self.gram_amount = round(
operator.mul(self.mass, self.mole_amount))
if "percent" in kwargs: # Primarily if you are setting up elements for Compound.from_formula.
if float(kwargs["percent"]) >= 1:
raise TypeError("That is not a valid input for the percent field. Enter a percent as a decimal.")
self.percent_of = kwargs["percent"]
self.mole_amount = False
self.gram_amount = False
if all(x in ["moles", "grams", "kwargs"] for x in [kwargs]):
raise Exception("You cannot provide multiple different quantities "
"of the element at a single time.")
def __str__(self):
# For __str__, return the entire name of the element.
return str(self.element_name.title())
def __repr__(self):
# Return a object-based representation for internal use.
# return f"<Element ({str(self.element_symbol.title())}>"
return str(self.element_symbol.title())
def __gt__(self, other):
# Compare two elements' molar masses.
if isinstance(other, Element):
return self.mass > other.mass
elif isinstance(other, str):
try:
return self.mass > Element(other).mass
except InvalidElementError:
raise InvalidElementError(other)
else:
raise ValueError(f"Expected either an `Element` or a "
f"`str`, got {type(other)}: {other}")
def __lt__(self, other):
# Compare two elements' molar masses.
if isinstance(other, Element):
return self.mass < other.mass
elif isinstance(other, str):
try:
return self.mass < Element(other).mass
except InvalidElementError:
raise InvalidElementError(other)
else:
raise ValueError(f"Expected either an `Element` or a "
f"`str`, got {type(other)}: {other}")
def __ge__(self, other):
# Compare two elements' molar masses.
if isinstance(other, Element):
return self.mass >= other.mass
elif isinstance(other, str):
try:
return self.mass >= Element(other).mass
except InvalidElementError:
raise InvalidElementError(other)
else:
raise ValueError(f"Expected either an `Element` or a "
f"`str`, got {type(other)}: {other}")
def __le__(self, other):
# Compare two Elements' molar masses.
if isinstance(other, Element):
return self.mass <= other.mass
elif isinstance(other, str):
try:
return self.mass <= Element(other).mass
except InvalidElementError:
raise InvalidElementError(other)
else:
raise ValueError(f"Expected either an `Element` or a "
f"`str`, got {type(other)}: {other}")
def __call__(self, **kwargs):
# Update the class calculation quantities for more calculations.
if "moles" in kwargs:
self.mole_amount = kwargs["moles"]
self.gram_amount = round(
operator.mul(self.mole_amount, self.mass), 4)
self.molecules = round(
operator.mul(self.mole_amount, AVOGADRO), 4)
if "grams" in kwargs:
self.gram_amount = kwargs["grams"]
self.mole_amount = round(
operator.__truediv__(self.gram_amount, self.mass), 4)
self.molecules = round(
operator.mul(self.mole_amount, AVOGADRO), 4)
if "molecules" in kwargs:
self.molecules = kwargs["molecules"]
self.mole_amount = round(
operator.__truediv__(self.molecules, AVOGADRO), 4)
self.gram_amount = round(
operator.mul(self.mass, self.mole_amount))
if "percent" in kwargs:
# Primarily if you are setting up elements for Compound.from_formula.
if float(kwargs["percent"]) >= 1:
raise TypeError("That is not a valid input for the percent "
"field. Enter a decimal percent value.")
self.percent_of = kwargs["percent"]
self.mole_amount = False
self.gram_amount = False
if all(x in ["moles", "grams", "kwargs"] for x in [kwargs]):
raise ValueError("You cannot provide multiple quantities "
"of the element at a single time.")
@classmethod
def from_molar_mass(cls, mass):
"""Instantiates an element from a provided molar mass.
Given a certain molar mass value, this method checks to see whether there
is an existing element with a defined molar mass value within 0.05 of the
provided molar mass value, and instantiates the class from that.
Examples
--------
Create a Boron atom from within 0.02 of its actual molar mass.
>>> boron = Element.from_molar_mass(10.8)
Parameters
----------
mass: float
The molar mass of the element which you want to create.
Returns
-------
An instantiated Element class from the molar mass value.
"""
table = PeriodicTable()
for indx, molar_mass in enumerate(table['AtomicMass']):
if abs(mass - molar_mass) <= 0.05:
# If the molar mass provided is close to the actual one.
return cls(table['Symbol'][indx])
raise ValueError(f"Received invalid molar mass {mass}, not "
f"close to any molar masses on the periodic table.")
@classmethod
def from_electron_configuration(cls, config):
"""Instantiates an element from a provided electron configuration.
Given an electron configuration, either the complete one or the noble
gas abbreviation, this method gets the element associated with that
electron configuration and instantiates the Element class from it.
Examples
--------
Create a Magnesium atom from its noble gas electron configuration.
>>> magnesium = Element.from_electron_configuration('[Ne]3s2')
Parameters
---------
config: str
The complete or noble gas abbreviated electron configuration.
Returns
-------
An instantiated Element class from the electron configruation.
"""
table = PeriodicTable()
for indx, configuration in enumerate(table['ElectronConfiguration']):
if config == configuration:
return cls(table['Symbol'][indx])
for indx, element in enumerate(table['Symbol']):
if config == Element(table['Symbol'][indx]).full_electron_configuration:
return cls(table['Symbol'][indx])
raise ValueError(
f"Received invalid electron configuration {config}, not a valid noble gas "
f"configuration or complete configuration on the periodic table.")
@ChemsolveDeprecationWarning('Element.get_element_name', future_version = '2.0.0')
def get_element_name(self):
"""Returns the element name from the symbol."""
try:
return self._properties['Name']
except AttributeError:
raise InvalidElementError(
f"Invalid element {self.element_symbol} received.")
def _get_full_electron_configuration(self):
"""Returns the entire electron configuration of the element."""
if self.electron_configuration[0] == "[":
config = Element(self.electron_configuration[1:3])\
.full_electron_configuration + " "
config += self.electron_configuration[4:]
return config
else:
return self.electron_configuration
@ChemsolveDeprecationWarning('Element.calculate_moles', future_version = '2.0.0')
def calculate_moles(self):
"""Calculates the class mole quantity from grams."""
return round(operator.__truediv__(self.gram_amount, self.mass), 3)
@ChemsolveDeprecationWarning('Element.calculate_grams', future_version = '2.0.0')
def calculate_grams(self):
"""Calculates the class gram quantity from moles."""
return operator.mul(self.mole_amount, self.mass)
@ChemsolveDeprecationWarning('SpecialElement', future_version = '2.0.0')
class SpecialElement(Element):
"""
A special variant of the Element class created for the FormulaCompound class.
Contains the extra parameter of percentage, in order to use percentages to find the formula of the compound.
If percentage is defined, the percentage parameter overrides the gram and mole parameters in the FormulaCompound class.
If grams is defined, the grams parameter overrides the mole and percent parameter. Ideally, moles shouldn't be used in the first place.
If percentage is defined in one SpecialElement, it must be defined in all other elements. Same goes for grams.
**Should only be used for the FormulaCompound class. If simply defining an element, use the Element class.
"""
def __init__(self, element_symbol, **kwargs):
super().__init__(element_symbol = element_symbol, **kwargs)
if len(kwargs) == 0:
raise ValueError("If you are not looking to define any values, you should use the Element class instead.")
if "grams" not in kwargs and "percent" not in kwargs and "moles" not in kwargs:
raise ValueError("If you are not looking to define any numerical values, you should use the Element class instead.")
if "grams" in kwargs:
self.gram_amount = kwargs["grams"]; self.mole_amount = False; self.percent_of = False
else: self.gram_amount = False
if "percent" in kwargs:
if float(kwargs["percent"]) >= 1:
raise TypeError("That is not a valid input for the percent field. Enter a percent as a decimal.")
| |
{4,S} {5,S}
2 Cb u0 {1,S}
3 Cb u0 {1,S}
4 Cd u0 {1,S}
5 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cd)CbCbSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 S2s u0 {1,S}
6 C u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)CbCbSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 S2s u0 {1,S}
6 Cd u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd)CbCbSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 S2s u0 {1,S}
6 Cdd u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)CbCbSs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 S2s u0 {1,S}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-Cd)CbCbSs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 S2s u0 {1,S}
7 C u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-CtCtCtSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Ct u0 {1,S}
3 Ct u0 {1,S}
4 Ct u0 {1,S}
5 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-CbCtCtSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Ct u0 {1,S}
4 Ct u0 {1,S}
5 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-CbCbCtSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Cb u0 {1,S}
4 Ct u0 {1,S}
5 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-CbCbCbSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SCbCsSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cs u0 {1,S}
5 S2s u0 {1,S}
6 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SCsCsSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 S2s u0 {1,S}
6 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cd)(Cds-Cd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {8,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 S2s u0 {1,S}
6 C u0 {3,D}
7 C u0 {4,D}
8 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd)(Cds-Cdd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {8,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 S2s u0 {1,S}
6 Cdd u0 {3,D}
7 Cdd u0 {4,D}
8 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-Cd)(Cds-Cdd-Cd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 CS u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 S2s u0 {1,S}
8 S2d u0 {4,D}
9 C u0 {5,D}
10 C u0 {6,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-S2d)(Cds-Cdd-Cd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 CS u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 S2s u0 {1,S}
8 S2d u0 {4,D}
9 S2d u0 {5,D}
10 C u0 {6,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-S2d)(Cds-Cdd-S2d)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 CS u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 S2s u0 {1,S}
8 S2d u0 {4,D}
9 S2d u0 {5,D}
10 S2d u0 {6,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd)(Cds-Cds)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {8,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 S2s u0 {1,S}
6 Cdd u0 {3,D}
7 Cd u0 {4,D}
8 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-Cd)(Cds-Cds)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 CS u0 {1,S} {8,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 S2s u0 {1,S}
7 Cd u0 {4,D}
8 S2d u0 {3,D}
9 C u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-S2d)(Cds-Cds)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 CS u0 {1,S} {8,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 S2s u0 {1,S}
7 Cd u0 {4,D}
8 S2d u0 {3,D}
9 S2d u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cds)(Cds-Cds)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {8,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 S2s u0 {1,S}
6 Cd u0 {3,D}
7 Cd u0 {4,D}
8 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cd)CtSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Ct u0 {1,S}
5 S2s u0 {1,S}
6 C u0 {3,D}
7 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cds)CtSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Ct u0 {1,S}
5 S2s u0 {1,S}
6 Cd u0 {3,D}
7 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd)CtSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 Cd u0 {1,S} {6,D}
4 Ct u0 {1,S}
5 S2s u0 {1,S}
6 Cdd u0 {3,D}
7 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-S2d)CtSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 CS u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Ct u0 {1,S}
6 S2s u0 {1,S}
7 S2d u0 {3,D}
8 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-Cd)CtSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {5,S} {6,S}
2 Cd u0 {1,S} {4,D}
3 CS u0 {1,S} {7,D}
4 Cdd u0 {2,D} {8,D}
5 Ct u0 {1,S}
6 S2s u0 {1,S}
7 S2d u0 {3,D}
8 C u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SCtCsSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 Ct u0 {1,S}
4 Cs u0 {1,S}
5 S2s u0 {1,S}
6 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=SC=SSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 CS u0 {1,S} {7,D}
4 CS u0 {1,S} {8,D}
5 S2s u0 {1,S}
6 S2d u0 {2,D}
7 S2d u0 {3,D}
8 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=S(Cds-Cd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 | |
<reponame>ATikhonov2/leo-editor<gh_stars>1000+
# Leo colorizer control file for clojure mode.
# This file is in the public domain.
import re
number_pat = re.compile('\\b0x[0-9a-fA-F]+|\\b\\d*\\.\\d+|\\b\\d+\\.?')
def clojure_match_numbers(colorer, s, i):
return colorer.match_compiled_regexp(s, i, 'literal4', number_pat)
# Properties for clojure mode.
properties = {
"lineComment": ";",
"multipleBracketIndent": "true",
"noWordSep": ".*+!-_?/",
"unalignedCloseBrackets": ")",
"unalignedOpenBrackets": "(",
}
# Attributes dict for clojure_main ruleset.
clojure_main_attributes_dict = {
"default": "null",
"digit_re": "([0-9]+)|(([0-9]*\\.[0-9]+)M?)|([0-9]+/[0-9]+)",
"escape": "",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": ".*+!-_?/",
}
# Attributes dict for clojure_strings ruleset.
clojure_strings_attributes_dict = {
"default": "LITERAL1",
"digit_re": "([0-9]+)|(([0-9]*\\.[0-9]+)M?)|([0-9]+/[0-9]+)",
"escape": "",
"highlight_digits": "false",
"ignore_case": "true",
"no_word_sep": ".*+!-_?/",
}
# Attributes dict for clojure_regexps ruleset.
clojure_regexps_attributes_dict = {
"default": "LITERAL1",
"digit_re": "([0-9]+)|(([0-9]*\\.[0-9]+)M?)|([0-9]+/[0-9]+)",
"escape": "",
"highlight_digits": "false",
"ignore_case": "true",
"no_word_sep": ".*+!-_?/",
}
# Dictionary of attributes dictionaries for clojure mode.
attributesDictDict = {
"clojure_main": clojure_main_attributes_dict,
"clojure_regexps": clojure_regexps_attributes_dict,
"clojure_strings": clojure_strings_attributes_dict,
}
# Keywords dict for clojure_main ruleset.
clojure_main_keywords_dict = {
"%": "operator",
"&": "keyword4",
"*": "operator",
"*'": "operator",
"*1": "keyword4",
"*2": "keyword4",
"*3": "keyword4",
"*agent*": "keyword4",
"*clojure-version*": "keyword4",
"*command-line-args*": "keyword4",
"*compile-files*": "keyword4",
"*compile-path*": "keyword4",
"*compiler-options*": "keyword4",
"*current*": "keyword4",
"*data-readers*": "keyword4",
"*default-data-reader-fn*": "keyword4",
"*e": "keyword4",
"*err*": "keyword4",
"*file*": "keyword4",
"*flush-on-newline*": "keyword4",
"*in*": "keyword4",
"*ns*": "keyword4",
"*open-url-script*": "keyword4",
"*out*": "keyword4",
"*print-base*": "keyword4",
"*print-dup*": "keyword4",
"*print-length*": "keyword4",
"*print-level*": "keyword4",
"*print-meta*": "keyword4",
"*print-miser-width*": "keyword4",
"*print-pprint-dispatch*": "keyword4",
"*print-pretty*": "keyword4",
"*print-radix*": "keyword4",
"*print-readably*": "keyword4",
"*print-right-margin*": "keyword4",
"*print-suppress-namespaces*": "keyword4",
"*read-eval*": "keyword4",
"*sb*": "keyword4",
"*stack*": "keyword4",
"*state*": "keyword4",
"*unchecked-math*": "keyword4",
"*warn-on-reflection*": "keyword4",
"+": "operator",
"+'": "operator",
"-": "operator",
"-'": "operator",
".": "keyword3",
"..": "keyword3",
"/": "operator",
"<": "operator",
"=": "operator",
"==": "operator",
">": "operator",
"accessor": "keyword3",
"aclone": "keyword3",
"add-watch": "keyword3",
"agent": "keyword3",
"agent-error": "keyword3",
"agent-errors": "keyword3",
"aget": "keyword3",
"alength": "keyword3",
"alias": "keyword1",
"all-ns": "keyword1",
"alter": "keyword3",
"alter-meta!": "keyword3",
"alter-var-root": "keyword3",
"amap": "keyword3",
"ancestors": "keyword3",
"and": "operator",
"append-child": "function",
"apply": "keyword3",
"are": "function",
"areduce": "keyword3",
"array-map": "keyword3",
"as-": "operator",
"as-file": "function",
"as-relative-path": "function",
"as-url": "function",
"aset": "keyword3",
"aset-boolean": "keyword3",
"aset-byte": "keyword3",
"aset-char": "keyword3",
"aset-double": "keyword3",
"aset-float": "keyword3",
"aset-int": "keyword3",
"aset-long": "keyword3",
"aset-short": "keyword3",
"assert": "keyword1",
"assert-any": "function",
"assert-expr": "function",
"assert-predicate": "function",
"assoc": "keyword3",
"assoc!": "keyword3",
"assoc-in": "keyword3",
"associative?": "literal3",
"atom": "keyword3",
"atom?": "literal3",
"attrs": "function",
"await": "keyword3",
"await-for": "keyword3",
"bases": "keyword3",
"bean": "keyword3",
"bigdec": "keyword3",
"bigint": "keyword3",
"biginteger": "keyword3",
"binding": "keyword1",
"bit-and": "operator",
"bit-and-not": "operator",
"bit-clear": "operator",
"bit-flip": "operator",
"bit-not": "operator",
"bit-or": "operator",
"bit-set": "operator",
"bit-shift-left": "operator",
"bit-shift-right": "operator",
"bit-test": "operator",
"bit-xor": "operator",
"blank?": "literal3",
"boolean": "keyword3",
"boolean-array": "keyword3",
"booleans": "keyword3",
"bound-fn": "keyword1",
"bound-fn*": "keyword1",
"bound?": "literal3",
"branch?": "literal3",
"browse-url": "function",
"butlast": "keyword3",
"byte": "keyword3",
"byte-array": "keyword3",
"bytes": "keyword3",
"capitalize": "function",
"case": "keyword2",
"cast": "keyword3",
"catch": "keyword2",
"char": "keyword3",
"char-array": "keyword3",
"char-escape-string": "keyword3",
"char-name-string": "keyword3",
"char?": "literal3",
"chars": "keyword3",
"children": "function",
"cl-format": "function",
"class": "keyword3",
"class?": "literal3",
"clear-agent-errors": "keyword3",
"clojure-version": "keyword3",
"code-dispatch": "function",
"comment": "keyword1",
"commute": "keyword3",
"comp": "keyword3",
"comparator": "keyword3",
"compare": "keyword3",
"compare-and-set!": "keyword3",
"compile": "keyword1",
"complement": "keyword3",
"compose-fixtures": "function",
"concat": "keyword3",
"cond": "keyword2",
"cond-": "keyword2",
"condp": "keyword2",
"conj": "keyword3",
"conj!": "keyword3",
"cons": "keyword3",
"constantly": "keyword3",
"construct-proxy": "keyword3",
"contains?": "literal3",
"content": "function",
"content-handler": "function",
"copy": "function",
"count": "keyword3",
"counted?": "literal3",
"create-ns": "keyword1",
"create-struct": "keyword3",
"cycle": "keyword3",
"dec": "operator",
"dec'": "operator",
"decimal?": "literal3",
"declare": "keyword1",
"def": "keyword1",
"default-data-readers": "keyword3",
"definline": "keyword1",
"definterceptor": "keyword1",
"definterceptorfn": "keyword1",
"definterface": "keyword1",
"defmacro": "keyword1",
"defmethod": "keyword1",
"defmulti": "keyword1",
"defn": "keyword1",
"defn-": "keyword1",
"defon-request": "keyword1",
"defonce": "keyword1",
"defprotocol": "keyword1",
"defrecord": "keyword1",
"defstruct": "keyword1",
"deftest": "keyword1",
"deftest-": "keyword1",
"deftype": "keyword1",
"delay": "keyword3",
"delay?": "literal3",
"delete-file": "function",
"deliver": "keyword3",
"denominator": "keyword3",
"deref": "keyword3",
"derive": "keyword3",
"descendants": "keyword3",
"destructure": "keyword2",
"diff": "function",
"diff-similar": "function",
"difference": "function",
"disj": "keyword3",
"disj!": "keyword3",
"dissoc": "keyword3",
"dissoc!": "keyword3",
"distinct": "keyword3",
"distinct?": "literal3",
"do": "keyword2",
"doall": "keyword2",
"dorun": "keyword2",
"doseq": "keyword2",
"dosync": "keyword2",
"dotimes": "keyword2",
"doto": "keyword2",
"double": "keyword3",
"double-array": "keyword3",
"doubles": "keyword3",
"down": "function",
"drop": "keyword3",
"drop-last": "keyword3",
"drop-while": "keyword3",
"e": "function",
"edit": "function",
"element": "function",
"emit": "function",
"emit-element": "function",
"empty": "keyword3",
"empty?": "literal3",
"end?": "literal3",
"ensure": "keyword3",
"enumeration-seq": "keyword3",
"error-handler": "keyword3",
"error-mode": "keyword3",
"escape": "function",
"eval": "keyword3",
"even?": "literal3",
"every-pred": "keyword3",
"every?": "literal3",
"ex-data": "keyword3",
"ex-info": "keyword3",
"extend": "keyword1",
"extend-protocol": "keyword1",
"extend-type": "keyword1",
"extenders": "keyword1",
"extends?": "literal3",
"false": "literal4",
"false?": "literal3",
"ffirst": "keyword3",
"file": "function",
"file-seq": "keyword3",
"filter": "keyword3",
"filterv": "keyword3",
"finally": "keyword2",
"find": "keyword3",
"find-keyword": "keyword1",
"find-ns": "keyword1",
"find-var": "keyword3",
"first": "keyword3",
"flatten": "keyword3",
"float": "keyword3",
"float-array": "keyword3",
"float?": "literal3",
"floats": "keyword3",
"flush": "keyword3",
"fn": "keyword1",
"fn?": "literal3",
"fnext": "keyword3",
"fnil": "keyword3",
"for": "keyword2",
"force": "keyword3",
"format": "keyword3",
"formatter": "function",
"formatter-out": "function",
"frequencies": "keyword3",
"fresh-line": "function",
"frest": "keyword3",
"function?": "literal3",
"future": "keyword3",
"future-call": "keyword3",
"future-cancel": "keyword3",
"future-cancelled?": "literal3",
"future-done?": "literal3",
"future?": "literal3",
"gen-class": "keyword1",
"gen-interface": "keyword1",
"gensym": "keyword3",
"get": "keyword3",
"get-in": "keyword3",
"get-method": "keyword1",
"get-pretty-writer": "function",
"get-proxy-class": "keyword3",
"get-thread-bindings": "keyword3",
"get-validator": "keyword3",
"group-by": "keyword3",
"hash": "keyword3",
"hash-combine": "keyword3",
"hash-map": "keyword3",
"hash-ordered-coll": "keyword3",
"hash-set": "keyword3",
"hash-unordered-coll": "keyword3",
"identical?": "literal3",
"identity": "keyword3",
"if": "keyword2",
"if-let": "keyword2",
"if-not": "keyword2",
"if-some": "keyword2",
"ifn?": "literal3",
"import": "keyword1",
"in-ns": "keyword1",
"inc": "operator",
"inc'": "operator",
"index": "function",
"init-proxy": "keyword3",
"input-stream": "function",
"insert-child": "function",
"insert-left": "function",
"insert-right": "function",
"instance?": "literal3",
"int": "keyword3",
"int-array": "keyword3",
"integer?": "literal3",
"interleave": "keyword3",
"intern": "keyword1",
"interpose": "keyword3",
"intersection": "function",
"into": "keyword3",
"into-array": "keyword3",
"ints": "keyword3",
"io!": "keyword3",
"is": "function",
"isa?": "literal3",
"iterate": "keyword3",
"iterator-seq": "keyword3",
"join": "function",
"join-fixtures": "function",
"juxt": "keyword3",
"keep": "keyword3",
"keep-indexed": "keyword3",
"key": "keyword3",
"keys": "keyword3",
"keyword": "keyword3",
"keyword?": "literal3",
"keywordize-keys": "function",
"last": "keyword3",
"lazy-cat": "keyword3",
"lazy-cons": "keyword3",
"lazy-seq": "keyword3",
"left": "function",
"leftmost": "function",
"lefts": "function",
"let": "keyword2",
"letfn": "keyword2",
"line-seq": "keyword3",
"list": "keyword3",
"list*": "keyword3",
"list?": "literal3",
"load": "keyword3",
"load-file": "keyword3",
"load-reader": "keyword3",
"load-string": "keyword3",
"loaded-libs": "keyword1",
"locking": "keyword3",
"long": "keyword3",
"long-array": "keyword3",
"longs": "keyword3",
"loop": "keyword2",
"lower-case": "function",
"macroexpand": "keyword3",
"macroexpand-1": "keyword3",
"macroexpand-all": "function",
"make-array": "keyword3",
"make-hierarchy": "keyword3",
"make-input-stream": "function",
"make-node": "function",
"make-output-stream": "function",
"make-parents": "function",
"make-reader": "function",
"make-writer": "function",
"map": "keyword3",
"map-indexed": "keyword3",
"map-invert": "function",
"map?": "literal3",
"mapcat": "keyword3",
"mapv": "keyword3",
"max": "keyword3",
"max-key": "keyword3",
"memfn": "keyword3",
"memoize": "keyword3",
"merge": "keyword3",
"merge-with": "keyword3",
"meta": "keyword3",
"methods": "keyword1",
"min": "keyword3",
"min-key": "keyword3",
"mix-collection-hash": "keyword3",
"mod": "operator",
"munge": "keyword3",
"name": "keyword3",
"namespace": "keyword3",
"namespace-munge": "keyword3",
"neg?": "literal3",
"new": "keyword3",
"newline": "keyword3",
"next": "function",
"nfirst": "keyword3",
"nil": "literal4",
"nil?": "literal3",
"nnext": "keyword3",
"node": "function",
"not": "operator",
"not-any?": "literal3",
"not-empty": "literal3",
"not-every?": "literal3",
"not=": "operator",
"ns": "keyword1",
"ns-aliases": "keyword1",
"ns-imports": "keyword1",
"ns-interns": "keyword1",
"ns-map": "keyword1",
"ns-name": "keyword1",
"ns-publics": "keyword1",
"ns-refers": "keyword1",
"ns-resolve": "keyword1",
"ns-unalias": "keyword1",
"ns-unmap": "keyword1",
"nth": "keyword3",
"nthnext": "keyword3",
"nthrest": "keyword3",
"num": "keyword3",
"number?": "literal3",
"numerator": "keyword3",
"object-array": "keyword3",
"odd?": "literal3",
"or": "operator",
"output-stream": "function",
"parents": "keyword3",
"parse": "function",
"parse-timestamp": "function",
"partial": "keyword1",
"partition": "keyword3",
"partition-all": "keyword3",
"partition-by": "keyword3",
"path": "function",
"pcalls": "keyword3",
"peek": "keyword3",
"persistent!": "keyword3",
"pmap": "keyword3",
"pop": "keyword3",
"pop!": "keyword3",
"pos?": "literal3",
"postwalk": "function",
"postwalk-demo": "function",
"postwalk-replace": "function",
"pp": "function",
"pprint": "function",
"pprint-indent": "function",
"pprint-logical-block": "function",
"pprint-newline": "function",
"pprint-tab": "function",
"pr": "keyword3",
"pr-str": "keyword3",
"prefer-method": "keyword1",
"prefers": "keyword1",
"prev": "function",
"prewalk": "function",
"prewalk-demo": "function",
"prewalk-replace": "function",
"primitives-classnames": "keyword3",
"print": "keyword3",
"print-cause-trace": "function",
"print-dup": "keyword3",
"print-length-loop": "function",
"print-stack-trace": "function",
"print-str": "keyword3",
"print-table": "function",
"print-throwable": "function",
"print-trace-element": "function",
"printf": "keyword3",
"println": "keyword3",
"println-str": "keyword3",
"prn": "keyword3",
"prn-str": "keyword3",
"project": "function",
"promise": "keyword3",
"proxy": "keyword3",
"proxy-call-with-super": "keyword3",
"proxy-mappings": "keyword3",
"proxy-name": "keyword3",
"proxy-super": "keyword3",
"put": "keyword3",
"pvalues": "keyword3",
"quot": "literal3",
"quote": "keyword3",
"rand": "keyword3",
"rand-int": "keyword3",
"rand-nth": "keyword3",
"range": "keyword3",
"ratio?": "literal3",
"rational?": "literal3",
"rationalize": "keyword3",
"re-find": "keyword3",
"re-groups": "keyword3",
"re-matcher": "keyword3",
"re-matches": "keyword3",
"re-pattern": "keyword3",
"re-quote-replacement": "function",
"re-seq": "keyword3",
"read": "keyword3",
"read-instant-calendar": "function",
"read-instant-date": "function",
"read-instant-timestamp": "function",
"read-line": "keyword3",
"read-string": "keyword3",
"reader": "function",
"realized?": "literal3",
"record?": "literal3",
"recur": "keyword2",
"reduce": "keyword3",
"reduce-kv": "keyword3",
"reduced": "keyword3",
"reduced?": "literal3",
"reductions": "keyword3",
"ref": "keyword3",
"ref-history-count": "keyword3",
"ref-max-history": "keyword3",
"ref-min-history": "keyword3",
"ref-set": "keyword3",
"refer": "keyword1",
"refer-clojure": "keyword1",
"reify": "keyword1",
"release-pending-sends": "keyword3",
"rem": "operator",
"remove": "function",
"remove-all-methods": "keyword1",
"remove-method": "keyword1",
"remove-ns": "keyword1",
"remove-watch": "keyword3",
"rename": "function",
"rename-keys": "function",
"repeat": "keyword3",
"repeatedly": "keyword3",
"replace": "function",
"replace-first": "function",
"replicate": "keyword3",
"require": "keyword1",
"reset!": "keyword3",
"reset-meta!": "keyword3",
"resolve": "keyword3",
"resource": "function",
"rest": "keyword3",
"restart-agent": "keyword3",
"resultset-seq": "keyword3",
"reverse": "keyword3",
"reversible?": "literal3",
"rfirst": "keyword3",
"right": "function",
"rightmost": "function",
"rights": "function",
"root": "function",
"root-cause": "function",
"rrest": "keyword3",
"rseq": "keyword3",
"rsubseq": "keyword3",
"satisfies?": "literal3",
"second": "keyword3",
"select": "function",
"select-keys": "keyword3",
"send": "keyword3",
"send-off": "keyword3",
"send-via": "keyword3",
"seq": "keyword3",
"seq-zip": | |
fname
def _get_keyword_updates(self):
"""
Compare the _fits_keywords list to the _standard_keywords list to find
any differences and add these keywords to the keyword_updates list.
"""
# Use the FitsKeywordList.diff() method to compare the two lists and
# update self.keyword_updates.
updates = self._fits_keywords.diff(self._standard_keywords)
self.keyword_updates = updates
def _get_standard_fits_keywords(self):
"""
Read a FITS template file based on the file types present, create
FitsKeyword objects based on the template and try to add them to
_fits_keywords.
"""
# Get all the FITS standards currently used by file types in self.
all_standards = self.member_fits_standards()
# Iterate through all the standards if any are found.
if all_standards:
for std in all_standards:
# Look up the FITS template file for the current standard.
filename = ".".join([std, "yml"])
filename = os.path.join(self._root,
self._fits_templates_dir,
filename,
)
standard_fits = read_yaml(filename)["KEYWORDS"]
# Create a FitsKeyword for each entry in the template and try
# to add it to self._fits_keywords.
for kw, info in standard_fits.items():
kw_obj = FitsKeyword(kw, parameters=info)
self.add_fits_keyword(kw_obj, standard=True)
static_vals = read_yaml(self._static_values_yaml)
data_type, inst = std.split("_")
self._bulk_add_static_values(static_vals["hlsp"])
try:
self._bulk_add_static_values(static_vals[data_type])
except KeyError:
pass
try:
self._bulk_add_static_values(static_vals[inst])
except KeyError:
pass
def _implement_keyword_updates(self):
"""
Add FITS keyword updates defined in the keyword_updates list to
_fits_keywords.
"""
# Iterate through each entry in self.keyword_updates and try to add it
# to self._fits_keywords.
for kw_obj in self.keyword_updates:
self._add_fits_keyword(kw_obj)
@staticmethod
def _make_value_xml_dict(val):
"""
Format a provided value into a dictionary for lxml ingest.
:param val: A value to add to a formatted dictionary.
:type val: obj
"""
# This is the format expected in a CAOM template XML file.
value_dict = {"source": "VALUE",
"value": val}
return value_dict
def _match_caller(self, caller):
"""
Match a calling function file to an appropriate filepath.
:param caller: The formatted function name.
:type caller: str
"""
# Look for a match among the pre-defined routine files.
if caller == self._check_file_names_out:
path = self._cfn_path
elif caller == self._precheck_metadata_format_out:
path = self._pcdf_path
elif caller == self._check_metadata_format_out:
path = self._cmd_path
else:
path = None
return path
@staticmethod
def _split_name_from_params(entry):
"""
Given a single key dictionary, return the lone key and corresponding
dictionary.
:param entry: Expecting a single-key dictionary such as {key:
{param1: val1, param2: val2, ...}}.
:type entry: dict
"""
# Verify entry only has one top-level key before returning the split
# data.
name = list(entry.keys())
if len(name) > 1:
return None
else:
return name[0], entry[name[0]]
def _update_stage_paths(self):
"""
Construct file paths for resulting files from various stages of HLSP
ingestion.
"""
# Get the expanded path for the 'MAST_HLSP' directory.
cwd = os.getcwd()
self._root = os.path.join(cwd.split(self._root_dir, 1)[0],
self._root_dir,
)
# Default filename should be in the root directory named hlsp_name.hlsp
default_name = self._get_filename()
self._default_path = os.path.join(self._root, default_name)
# Construct file path for check_file_names.py results.
cfn_name = self._get_filename(self._check_file_names_out)
self._cfn_path = os.path.join(self._root,
self._check_file_names_dir,
cfn_name,
)
# Construct file path for precheck_data_format.py reults.
pcdf_name = self._get_filename(self._precheck_metadata_format_out)
self._pcdf_path = os.path.join(self._root,
self._check_metadata_format_dir,
pcdf_name
)
# Construct file path for check_metadata_format.py results.
cmd_name = self._get_filename(self._check_metadata_format_out)
self._cmd_path = os.path.join(self._root,
self._check_metadata_format_dir,
cmd_name,
)
def add_filetype(self, new_filetype):
"""
Add a FileType object to the file_types list.
:param new_filetype: A new file type to be added to self.file_types.
:type new_filetype: FileType
"""
# Check new_filetype for a FileType attribute.
try:
ft = new_filetype.ftype
except AttributeError:
err = ("Only FileType objects should be added to "
"HLSPFile.file_types!"
)
raise TypeError(err)
# If the new file ending is not already an ftype attribute of any
# self.file_types member, add new_filetype.
current_types = [x.ftype for x in self.file_types]
if ft not in current_types:
self.file_types.append(new_filetype)
def add_fits_keyword(self, keyword_obj, standard=None):
"""
Add a new FitsKeyword object to the private _fits_keywords list. Skip
if the keyword is already present.
:param keyword_obj: The potentially new fits keyword to add.
:type keyword_obj: FitsKeyword
"""
# Check keyword_obj for a FitsKeyword attribute.
try:
fits = keyword_obj.fits_keyword
except AttributeError:
err = "HLSPFile expected a <FitsKeyword> type object"
raise TypeError(err)
# If standard is set, add a copy of this FitsKeyword object to the
# _standard_keywords list.
if standard:
std_kw = keyword_obj.copy()
self._standard_keywords.add(std_kw)
self._fits_keywords.add(keyword_obj)
def add_keyword_update(self, keyword):
"""
Add an updated FitsKeyword object to the keyword_updates list.
:param keyword: A potentially new FitsKeyword object to be added to
self.keyword_updates.
:type keyword: FitsKeyword
"""
# Check keyword for a FitsKeyword attribute.
try:
k = keyword.fits_keyword
except AttributeError:
raise TypeError("Only FitsKeyword objects should be added.")
# If self.keyword_updates is empty, just add the new entry.
if len(self.keyword_updates) == 0:
self.keyword_updates.append(keyword)
return
# Check if keyword is already in self.keyword_updates.
for existing_update in self.keyword_updates:
# If found, try to update the existing FitsKeyword with values
# from the new one.
if existing_update.fits_keyword == k:
updated_parameters = keyword.as_dict()[k]
existing_update.update(updated_parameters)
return
# If not found, add the new FitsKeyword.
else:
self.keyword_updates.append(keyword)
def add_unique_parameter(self, caom, parent, value):
"""
Add a new entry in the unique_parameters list.
:param caom: The CAOM keyword we want to add to the XML template file.
:type caom: str
:param parent: The XML parent section to add the new keyword / value.
:type parent: str
:param value: The value to associate with the new CAOM keyword.
:type value: obj
"""
# Get the current XML parents from the self.unique_parameters
# dictionary.
current_parents = self.unique_parameters.keys()
# If parent is not an existing XML parent, make a new entry in the
# self.unique_parameters dictionary.
if parent not in current_parents:
self.unique_parameters[parent] = {}
# Update certain parameters.
if parent == 'provenance':
if caom == 'name' and value.endswith(">"):
value = self.hlsp_name.upper()
if caom == 'reference' and value.endswith(">"):
url_root = "https://archive.stsci.edu/hlsp"
value = "/".join([url_root, self.hlsp_name.lower()])
# Add the keyword / value pair to the parent section.
self.unique_parameters[parent].update({caom: value})
def as_dict(self):
"""
Return the current contents of self as a formatted dictionary. This
is useful for entering the contents into an XML tree or writing to
YAML.
"""
file_formatted_dict = {}
self._get_keyword_updates()
# Iterate through all current attributes.
for key, val in self.__dict__.items():
# Skip any private attributes.
if key[0] == "_":
continue
# Format the keys for writing to YAML.
key = key.split("_")
key = "".join([k.capitalize() for k in key])
# If operating with self.file_types or self.keyword_updates, use
# the class methods to return the object information in dicts.
if key == "FileTypes":
val = list()
for ft in self.file_types:
val.append(ft.as_dict())
elif key == "KeywordUpdates":
val = list()
for kw in self.keyword_updates.keywords:
val.append(kw.as_dict())
file_formatted_dict[key] = val
return file_formatted_dict
def check_ingest_step(self, step_num):
"""
Check the HLSP ingestion status of a particular ingest step.
:param step_num: The ingestion step to check.
:type step_num: int
"""
# The ingest dictionary keys are prepended with the step number, so
# sorting these will allow dictionary access by index.
ind = sorted(self.ingest.keys())[step_num]
return self.ingest[ind]
def find_file_type(self, target_ending):
"""
Find a given file ending in the file_types list.
:param target_ending: A file ending such as "type.extension" to look
for in self.file_types.
:type target_ending: str
"""
# Search self._file_types and return any matching FileType object.
for ft in self.file_types:
if ft.ftype.lower() == target_ending.lower():
return ft
return None
def find_log_file(self, call_file, here=None):
"""
Look for an existing log file for a given HLSP ingestion step.
:param call_file: The function we are searching for a resulting log
file for.
:type call_file: str
"""
self._update_stage_paths()
# Format and look for a match for the provided calling function.
caller = self._format_caller(call_file)
caller = self._match_caller(caller)
filename = call_file.replace(".py", ".log")
# If a calling function match is found, construct a path to look for
# a log file.
if caller and not here:
path = os.path.dirname(caller)
else:
path = os.getcwd()
filepath = os.path.join(path, filename)
# Check if the constructed log file path actually exists.
filepath = cp.check_existing_file(filepath)
return str(filepath)
def fits_keywords(self):
"""
Combine the contents of the designated FITS template and any keyword
updates and return a list of all FitsKeyword objects used by this
HLSPFile.
"""
return self._fits_keywords
def get_check_extensions(self):
ext_list = []
for f in self.file_types:
if f.run_check:
ext_list.append(f.ftype)
return ext_list
def get_data_path(self):
| |
the container to terminate before forcefully removing it `(ms|s|m|h)`.
* `stopSignal` (`str`) - Signal to stop the container.
* `user` (`str`) - The user inside the container.
* `forceUpdate` (`float`)
* `log_driver` (`dict`) - See Log Driver below for details.
* `name` (`str`) - The logging driver to use. Either `(none|json-file|syslog|journald|gelf|fluentd|awslogs|splunk|etwlogs|gcplogs)`.
* `options` (`dict`) - The options for the logging driver, e.g.
* `networks` (`list`) - Ids of the networks in which the container will be put in.
* `placement` (`dict`)
* `constraints` (`list`)
* `platforms` (`list`)
* `architecture` (`str`)
* `os` (`str`)
* `prefs` (`list`)
* `resources` (`dict`)
* `limits` (`dict`) - Describes the resources which can be advertised by a node and requested by a task.
* `nano_cpus` (Optional, int) CPU shares in units of 1/1e9 (or 10^-9) of the CPU. Should be at least 1000000
* `memory_bytes` (Optional, int) The amount of memory in bytes the container allocates
* `generic_resources` (Optional, map) User-defined resources can be either Integer resources (e.g, SSD=3) or String resources (e.g, GPU=UUID1)
* `named_resources_spec` (Optional, set of string) The String resources, delimited by `=`
* `discrete_resources_spec` (Optional, set of string) The Integer resources, delimited by `=`
* `genericResources` (`dict`)
* `discreteResourcesSpecs` (`list`)
* `namedResourcesSpecs` (`list`)
* `memoryBytes` (`float`)
* `nanoCpus` (`float`)
* `reservation` (`dict`) - An object describing the resources which can be advertised by a node and requested by a task.
* `nano_cpus` (Optional, int) CPU shares in units of 1/1e9 (or 10^-9) of the CPU. Should be at least 1000000
* `memory_bytes` (Optional, int) The amount of memory in bytes the container allocates
* `generic_resources` (Optional, map) User-defined resources can be either Integer resources (e.g, SSD=3) or String resources (e.g, GPU=UUID1)
* `named_resources_spec` (Optional, set of string) The String resources
* `discrete_resources_spec` (Optional, set of string) The Integer resources
* `genericResources` (`dict`)
* `discreteResourcesSpecs` (`list`)
* `namedResourcesSpecs` (`list`)
* `memoryBytes` (`float`)
* `nanoCpus` (`float`)
* `restartPolicy` (`dict`)
* `condition` (`str`)
* `delay` (`str`) - Delay between updates `(ns|us|ms|s|m|h)`, e.g. `5s`.
all tasks are up when a service is created, or to check if all tasks are successfully updated on an update. Default: `7s`.
* `maxAttempts` (`float`)
* `window` (`str`)
* `runtime` (`str`)
"""
update_config: pulumi.Output[dict]
"""
See UpdateConfig below for details.
* `delay` (`str`) - Delay between updates `(ns|us|ms|s|m|h)`, e.g. `5s`.
* `failureAction` (`str`) - Action on update failure: `pause|continue|rollback`.
* `maxFailureRatio` (`str`) - The failure rate to tolerate during an update as `float`. **Important:** the `float`need to be wrapped in a `string` to avoid internal
casting and precision errors.
* `monitor` (`str`) - Duration after each task update to monitor for failure `(ns|us|ms|s|m|h)`
* `order` (`str`) - Update order either 'stop-first' or 'start-first'.
* `parallelism` (`float`) - The maximum number of tasks to be updated in one iteration simultaneously (0 to update all at once).
"""
def __init__(__self__, resource_name, opts=None, auth=None, converge_config=None, endpoint_spec=None, labels=None, mode=None, name=None, rollback_config=None, task_spec=None, update_config=None, __props__=None, __name__=None, __opts__=None):
"""
Create a Service resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] auth: See Auth below for details.
:param pulumi.Input[dict] converge_config: See Converge Config below for details.
:param pulumi.Input[dict] endpoint_spec: See EndpointSpec below for details.
:param pulumi.Input[list] labels: See Labels below for details.
:param pulumi.Input[dict] mode: See Mode below for details.
:param pulumi.Input[str] name: The name of the Docker service.
:param pulumi.Input[dict] rollback_config: See RollbackConfig below for details.
:param pulumi.Input[dict] task_spec: See TaskSpec below for details.
:param pulumi.Input[dict] update_config: See UpdateConfig below for details.
The **auth** object supports the following:
* `password` (`pulumi.Input[str]`) - The password to use for authenticating to the registry. If this is blank, the `DOCKER_REGISTRY_PASS` is also be checked.
* `server_address` (`pulumi.Input[str]`) - The address of the registry server
* `username` (`pulumi.Input[str]`) - The username to use for authenticating to the registry. If this is blank, the `DOCKER_REGISTRY_USER` is also be checked.
The **converge_config** object supports the following:
* `delay` (`pulumi.Input[str]`) - Time between each the check to check docker endpoint `(ms|s|m|h)`. For example, to check if
all tasks are up when a service is created, or to check if all tasks are successfully updated on an update. Default: `7s`.
* `timeout` (`pulumi.Input[str]`) - The timeout of the service to reach the desired state `(s|m)`. Default: `3m`.
The **endpoint_spec** object supports the following:
* `mode` (`pulumi.Input[str]`) - The mode of resolution to use for internal load balancing between tasks. `(vip|dnsrr)`. Default: `vip`.
* `ports` (`pulumi.Input[list]`) - See Ports below for details.
* `name` (`pulumi.Input[str]`) - The name of the Docker service.
* `protocol` (`pulumi.Input[str]`) - Protocol that can be used over this port: `tcp|udp|sctp`. Default: `tcp`.
* `publishMode` (`pulumi.Input[str]`) - Represents the mode in which the port is to be published: `ingress|host`
* `publishedPort` (`pulumi.Input[float]`) - The port on the swarm hosts. If not set the value of `target_port` will be used.
* `targetPort` (`pulumi.Input[float]`) - Port inside the container.
The **labels** object supports the following:
* `label` (`pulumi.Input[str]`) - Name of the label
* `value` (Required, string) Value of the label
* `value` (`pulumi.Input[str]`)
The **mode** object supports the following:
* `global` (`pulumi.Input[bool]`) - set it to `true` to run the service in the global mode
* `replicated` (`pulumi.Input[dict]`) - , which contains atm only the amount of `replicas`
* `replicas` (`pulumi.Input[float]`)
The **rollback_config** object supports the following:
* `delay` (`pulumi.Input[str]`) - Delay between updates `(ns|us|ms|s|m|h)`, e.g. `5s`.
all tasks are up when a service is created, or to check if all tasks are successfully updated on an update. Default: `7s`.
* `failureAction` (`pulumi.Input[str]`) - Action on update failure: `pause|continue|rollback`.
* `maxFailureRatio` (`pulumi.Input[str]`) - The failure rate to tolerate during an update as `float`. **Important:** the `float`need to be wrapped in a `string` to avoid internal
casting and precision errors.
* `monitor` (`pulumi.Input[str]`) - Duration after each task update to monitor for failure `(ns|us|ms|s|m|h)`
* `order` (`pulumi.Input[str]`) - Update order either 'stop-first' or 'start-first'.
* `parallelism` (`pulumi.Input[float]`) - The maximum number of tasks to be updated in one iteration simultaneously (0 to update all at once).
The **task_spec** object supports the following:
* `containerSpec` (`pulumi.Input[dict]`)
* `args` (`pulumi.Input[list]`) - Arguments to the command.
* `commands` (`pulumi.Input[list]`) - The command to be run in the image.
* `configs` (`pulumi.Input[list]`) - See Configs below for details.
* `configId` (`pulumi.Input[str]`) - ConfigID represents the ID of the specific config.
* `configName` (`pulumi.Input[str]`) - The name of the config that this references, but internally it is just provided for lookup/display purposes
* `fileGid` (`pulumi.Input[str]`) - Represents the file GID. Defaults: `0`
* `fileMode` (`pulumi.Input[float]`) - Represents the FileMode of the file. Defaults: `0444`
* `fileName` (`pulumi.Input[str]`) - Represents the final filename in the filesystem. The specific target file that the config data is written within the docker container, e.g. `/root/config/config.json`
* `fileUid` (`pulumi.Input[str]`) - Represents the file UID. Defaults: `0`
* `dir` (`pulumi.Input[str]`) - The working directory for commands to run in.
* `dnsConfig` (`pulumi.Input[dict]`) - See DNS Config below for details.
* `nameservers` (`pulumi.Input[list]`) - The IP addresses of the name servers, for example, `8.8.8.8`
* `options` (`pulumi.Input[list]`) - A list of internal resolver variables to be modified, for example, `debug`, `ndots:3`
* `searches` (`pulumi.Input[list]`) - A search list for host-name lookup.
* `env` (`pulumi.Input[dict]`) - A list of environment variables in the form VAR=value.
* `groups` (`pulumi.Input[list]`) - A list of additional groups that the container process will run as.
* `privileges` (Optional, block) See Privileges below for details.
* `healthcheck` (`pulumi.Input[dict]`) - See Healthcheck below for details.
* `interval` (`pulumi.Input[str]`) - Time between running the check `(ms|s|m|h)`. Default: `0s`.
* `retries` (`pulumi.Input[float]`) - Consecutive failures needed to report unhealthy. Default: `0`.
* `startPeriod` (`pulumi.Input[str]`) - Start period for the container to initialize before counting retries | |
<gh_stars>1-10
import numpy as np
import pandas as pd
import tensorflow as tf
import math
from sklearn.cluster import KMeans
import Loaddata
from numpy import random
import time
from datetime import date
import matplotlib.pyplot as plt
import os
from pandas import DataFrame, concat
import multiprocessing as mp
class LSTM_double:
# 定义常量
def __init__(self, data):
self.rnn_unit = 300
self.input_size = 100
self.output_size = 1
self.lr = 0.00006
self.time_step = 1
self.batch_size = 1
self.data = self.series_to_supervised(data, 100)
self.train_begin = 0
self.train_end = len(self.data)
self.test_begin = len(self.data)-1
self.weights = {
'in': tf.Variable(tf.random_normal([self.input_size, self.rnn_unit])),
'out': tf.Variable(tf.random_normal([self.rnn_unit, self.output_size]))
}
self.biases = {
'in': tf.Variable(tf.constant(0.1, shape=[self.rnn_unit, ])),
'out': tf.Variable(tf.constant(0.1, shape=[1, ]))
}
# 定义分割函数
def series_to_supervised(self, data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
agg = concat(cols, axis=1)
agg.columns = names
if dropnan:
agg.dropna(inplace=True)
return agg.values
# 获取训练集
def get_train_data(self):
batch_index = []
data_train = self.data[self.train_begin:self.train_end]
normalized_train_data = data_train/1e8
train_x, train_y = [], [] # 训练集
for i in range(len(normalized_train_data)-self.time_step):
if i % self.batch_size == 0:
batch_index.append(i)
x = normalized_train_data[i:i+self.time_step, :100]
y = normalized_train_data[i:i+self.time_step, 100:]
train_x.append(x.tolist())
train_y.append(y.tolist())
batch_index.append((len(normalized_train_data)-self.time_step))
return batch_index, train_x, train_y
# 获取测试集
def get_test_data(self):
data_test = self.data[self.test_begin:]
normalized_test_data = data_test/1e8
size = (len(normalized_test_data) +
self.time_step)//self.time_step # 有size个sample
test_x, test_y = [], []
for i in range(size-1):
x = normalized_test_data[i *
self.time_step:(i+1)*self.time_step, :100]
y = normalized_test_data[i *
self.time_step:(i+1)*self.time_step, 100]
test_x.append(x.tolist())
test_y.extend(y)
test_x.append(
(normalized_test_data[(i+1)*self.time_step:, :100]).tolist())
test_y.extend(
(normalized_test_data[(i+1)*self.time_step:, 100]).tolist())
return test_x, test_y
# ——————————————————定义神经网络变量——————————————————
def lstm(self, X):
self.batch_size = tf.shape(X)[0]
self.time_step = tf.shape(X)[1]
w_in = self.weights['in']
b_in = self.biases['in']
# 将tensor转成2维进行计算,计算后的结果作为隐藏层的输入
input = tf.reshape(X, [-1, self.input_size])
input_rnn = tf.matmul(input, w_in)+b_in
# 将tensor转成3维,作为lstm cell的输入
input_rnn = tf.reshape(input_rnn, [-1, self.time_step, self.rnn_unit])
cell = tf.nn.rnn_cell.LSTMCell(self.rnn_unit)
init_state = cell.zero_state(self.batch_size, dtype=tf.float32)
# output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果
output_rnn, final_states = tf.nn.dynamic_rnn(
cell, input_rnn, initial_state=init_state, dtype=tf.float32)
output = tf.reshape(output_rnn, [-1, self.rnn_unit]) # 作为输出层的输入
w_out = self.weights['out']
b_out = self.biases['out']
pred = tf.matmul(output, w_out)+b_out
pred = tf.reshape(pred, [-1, self.output_size])
return pred, final_states
# ——————————————————训练模型——————————————————
def train_lstm(self, num_epochs=40, numb_sub=1,numb_class=1,continue_train=False,class_people='purchase'):
X = tf.placeholder(tf.float32, shape=[None, 1, 100])
Y = tf.placeholder(tf.float32, shape=[None, 1, 1])
batch_index, train_x, train_y = self.get_train_data()
with tf.variable_scope("sec_lstm"):
pred, _ = self.lstm(X)
# 损失函数
loss = tf.reduce_mean(
tf.square(tf.reshape(pred, [-1])-tf.reshape(Y, [-1])))
train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=15)
if continue_train==True:
module_file = tf.train.latest_checkpoint('model_save_'+class_people+'_'+
str(numb_sub)+'_'+str(numb_class))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if continue_train==True:
saver.restore(sess, module_file)
# 重复训练
for i in range(num_epochs):
for step in range(len(batch_index)-1):
_, loss_ = sess.run([train_op, loss], feed_dict={
X: train_x[batch_index[step]:batch_index[step+1]], Y: train_y[batch_index[step]:batch_index[step+1]]})
print(i+1, loss_)
if ((i+1) % num_epochs) == 0:
print("保存模型:", saver.save(sess, 'model_save_'+class_people+'_' +
str(numb_sub)+'_'+str(numb_class)+'/modle.ckpt', global_step=i))
# ————————————————预测模型————————————————————
def prediction(self, numb_sub=1,numb_class=1,class_people='purchase'):
self.time_step = 1
self.input_size = 100
self.output_size = 1
X = tf.placeholder(tf.float32, shape=[
None, self.time_step, self.input_size])
Y = tf.placeholder(tf.float32, shape=[
None, self.time_step, self.output_size])
test_x, test_y = self.get_test_data()
with tf.variable_scope("sec_lstm", reuse=tf.AUTO_REUSE):
pred, _ = self.lstm(X)
saver = tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
# 参数恢复
module_file = tf.train.latest_checkpoint(
'model_save_'+class_people+'_'+str(numb_sub)+'_'+str(numb_class))
saver.restore(sess, module_file)
test_x = test_x[:1]
test_x = [a[0] for a in test_x]
test_x = np.array(test_x)
test_x[:, :99] = test_x[:, 1:]
test_x[:, 99:] = test_y[-1]
test_predict = []
for step in range(30):
prob = sess.run(pred, feed_dict={X: [test_x]})
predict = prob.reshape(-1)
test_predict.extend(prob)
test_x[:, :99] = test_x[:, 1:]
test_x[:, 99:] = prob[-1]
test_predict = np.array(test_predict)
test_predict = test_predict[:, 0]
test_predict = test_predict.flatten()
test_predict = np.array(test_predict)*1e8
print(test_predict)
return test_predict
class k_mean(object):
def __init__(self, data):
self.x_train = data
def k_mean_divide(self, cluster_num):
kmeans = KMeans(n_clusters=cluster_num,
random_state=0).fit(self.x_train)
divide_labels = kmeans.labels_
divide_class = {}
for i in range(cluster_num):
divide_answer = (divide_labels == i)
divide = []
for j in range(len(divide_labels)):
if divide_answer[j] == True:
divide.append(j)
divide_class['cluster'+str(i)] = np.array(divide)+1
return divide_class
class genetic(object):
def getEncoding(self, popSize, chromLength): # 生成种群
pop = random.randint(0, 2, size=(popSize, chromLength))
return pop
def binary2decimal(self, pop, chromLength_type, chromLength):
row = pop.shape[0]
chromLength_length = len(chromLength_type) - 1
tempfinal = np.zeros((row, chromLength_length))
position_sum = np.cumsum(chromLength_type)
for i in range(row):
for j in range(chromLength_length):
t = 0
for k in range(position_sum[j], position_sum[j+1]):
t += pop[i, k]*(math.pow(2, k - position_sum[j]))
tempfinal[i, j] = t
tempfinal[:, 0] = tempfinal[:, 0]+1
tempfinal[:, 1:] = tempfinal[:, 1:]/(math.pow(2, 8)-1)*5
return tempfinal
def multiprocess_fitness_purchase(self, j):# 并行计算
multiple_time = np.hstack((self.tempfinal[j, 1], np.tile(
self.tempfinal[j, 2], 7), np.tile(self.tempfinal[j, 3], 12))) # 拼接倍数
for k in range(4, self.tempfinal.shape[1]):
multiple_time = np.hstack((multiple_time, self.tempfinal[j, k]))
user_profile_onehot = self.user_profile_onehot * multiple_time # 将部分向量的权重扩大
model_kmean = k_mean(user_profile_onehot) # 聚类
divide_class = model_kmean.k_mean_divide(int(self.tempfinal[j, 0]))
user_balance = Loaddata.UserBalance()
purchase_predict_class = []
purchase_test_class = []
for i in range(len(divide_class)): # 将这几种分类分别带入网络识别
print('第'+str(j+1)+'个种群 第'+str(i+1)+'个类')
user_balance.CalculateDayPurchaseList(
divide_class['cluster'+str(i)])
user_balance.CalculateDayRedeemList(
divide_class['cluster'+str(i)])
purchase_train, redeem_train = user_balance.GetdataUsedInPredict()
purchase_test, redeem_test = user_balance.GetTestData()
purchase_model = LSTM_double(purchase_train.reshape((-1, 1)))
purchase_model.train_lstm(numb_sub=j+1,numb_class=i+1)
purchase_predict = purchase_model.prediction(numb_sub=j+1,numb_class=i+1)
tf.reset_default_graph()
plt.plot(purchase_predict, 'b')
plt.plot(purchase_test, 'g')
if not os.path.exists('out_lstm_double/'):
os.makedirs('out_lstm_double/')
plt.savefig('out_lstm_double/purchase_the_{}_times_the_{}_gene_the_{}_class.png'.format(
str(self.times_calc), str(j+1), str(i+1)))
plt.close()
purchase_predict_class.append(purchase_predict)
purchase_test_class.append(purchase_test)
purchase_loss_value = np.mean(abs(np.array(purchase_predict_class).sum(
axis=0) - np.array(purchase_test_class).sum(axis=0))/(np.array(purchase_test_class).sum(axis=0)))
return 1/purchase_loss_value
def fitness_purchase(self, tempfinal, user_profile_onehot, times_calc): # 适应度
self.user_profile_onehot = user_profile_onehot
self.tempfinal = tempfinal
self.times_calc = times_calc
pool = mp.Pool(processes=tempfinal.shape[0])
purchase_loss_value = pool.map(
self.multiprocess_fitness_purchase, range(tempfinal.shape[0]))
pool.close()
pool.join()
return np.squeeze(purchase_loss_value)
def fitness_predict_purchase(self,length_best, tempfinal, user_profile_onehot, user_balance):
multiple_time = np.hstack((tempfinal[0, 1], np.tile(
tempfinal[0, 2], 7), np.tile(tempfinal[0, 3], 12))) # 拼接倍数
for k in range(4, tempfinal.shape[1]):
multiple_time = np.hstack((multiple_time, tempfinal[0, k]))
user_profile_onehot = user_profile_onehot * multiple_time # 将部分向量的权重扩大
model_kmean = k_mean(user_profile_onehot) # 聚类
divide_class = model_kmean.k_mean_divide(int(tempfinal[0, 0]))
purchase_predict_class = []
for i in range(len(divide_class)): # 将这几种分类分别带入网络识别
user_balance.CalculateDayPurchaseList(
divide_class['cluster'+str(i)])
user_balance.CalculateDayRedeemList(divide_class['cluster'+str(i)])
purchase_train, redeem_train = user_balance.GetdataAll()
purchase_model = LSTM_double(purchase_train.reshape((-1, 1)))
purchase_model.train_lstm(num_epochs = 10,numb_sub = length_best,numb_class=i+1,continue_train=True)
purchase_predict = purchase_model.prediction(numb_sub=length_best,numb_class=i+1)
tf.reset_default_graph()
purchase_predict_class.append(purchase_predict)
purchase_predict_return = np.array(purchase_predict_class).sum(axis=0)
return purchase_predict_return
def multiprocess_fitness_redeem(self, j):
multiple_time = np.hstack((self.tempfinal[j, 1], np.tile(
self.tempfinal[j, 2], 7), np.tile(self.tempfinal[j, 3], 12))) # 拼接倍数
for k in range(4, self.tempfinal.shape[1]):
multiple_time = np.hstack((multiple_time, self.tempfinal[j, k]))
user_profile_onehot = self.user_profile_onehot * multiple_time # 将部分向量的权重扩大
model_kmean = k_mean(user_profile_onehot) # 聚类
divide_class = model_kmean.k_mean_divide(int(self.tempfinal[j, 0]))
user_balance = Loaddata.UserBalance()
redeem_predict_class = []
redeem_test_class = []
for i in range(len(divide_class)): # 将这几种分类分别带入网络识别
print('第'+str(j+1)+'个种群 第'+str(i+1)+'个类')
user_balance.CalculateDayPurchaseList(
divide_class['cluster'+str(i)]) # 主要时间花在这里!!!!
user_balance.CalculateDayRedeemList(
divide_class['cluster'+str(i)])
purchase_train, redeem_train = user_balance.GetdataUsedInPredict()
purchase_test, redeem_test = user_balance.GetTestData()
redeem_model = LSTM_double(redeem_train.reshape((-1, 1)))
redeem_model.lr = 0.0001
redeem_model.train_lstm(num_epochs=60, numb_sub=j+1,numb_class=i+1,class_people='redeem')
redeem_predict = redeem_model.prediction(numb_sub=j+1,numb_class=i+1,class_people='redeem')
tf.reset_default_graph()
plt.plot(redeem_predict, 'b')
plt.plot(redeem_test, 'g')
plt.savefig('out_lstm_double/redeem_the_{}_times_the_{}_gene_the_{}_class.png'.format(
str(self.times_calc), str(j+1), str(i+1)))
plt.close()
redeem_predict_class.append(redeem_predict)
redeem_test_class.append(redeem_test)
redeem_loss_value = np.mean(abs(np.array(redeem_predict_class).sum(
axis=0) - np.array(redeem_test_class).sum(axis=0))/(np.array(redeem_test_class).sum(axis=0)))
return 1/redeem_loss_value
def fitness_redeem(self, tempfinal, user_profile_onehot, times_calc): # 适应度
self.user_profile_onehot = user_profile_onehot
self.tempfinal = tempfinal
self.times_calc = times_calc
pool = mp.Pool(processes=tempfinal.shape[0])
redeem_loss_value = pool.map(
self.multiprocess_fitness_redeem, range(tempfinal.shape[0]))
pool.close()
pool.join()
return np.squeeze(redeem_loss_value)
def fitness_predict_redeem(self,length_best, tempfinal, user_profile_onehot, user_balance):
multiple_time = np.hstack((tempfinal[0, 1], np.tile(
tempfinal[0, 2], 7), np.tile(tempfinal[0, 3], 12))) # 拼接倍数
for k in range(4, tempfinal.shape[1]):
multiple_time = np.hstack((multiple_time, tempfinal[0, k]))
user_profile_onehot = user_profile_onehot * multiple_time # 将部分向量的权重扩大
model_kmean = k_mean(user_profile_onehot) # 聚类
divide_class = model_kmean.k_mean_divide(int(tempfinal[0, 0]))
redeem_predict_class = []
for i in range(len(divide_class)): # 将这几种分类分别带入网络识别
user_balance.CalculateDayPurchaseList(
divide_class['cluster'+str(i)])
user_balance.CalculateDayRedeemList(divide_class['cluster'+str(i)])
purchase_train, redeem_train = user_balance.GetdataAll()
# LSTM_double
redeem_model = LSTM_double(redeem_train.reshape((-1, 1)))
redeem_model.lr = 0.0001
redeem_model.train_lstm(num_epochs=10,numb_sub = length_best,numb_class=i+1,continue_train=True,class_people='redeem')
redeem_predict = redeem_model.prediction(numb_sub = length_best,numb_class=i+1,class_people='redeem')
tf.reset_default_graph()
redeem_predict_class.append(redeem_predict)
redeem_predict_return = np.array(redeem_predict_class).sum(axis=0)
return redeem_predict_return
def calfitValue(self, value): # 保证损失大于等于0 好像没什么必要的样子
for i in range(value.shape[0]):
if value[i] < 0:
value[i] = 0
return value
def selection(self, pop, value): # 选择
newfitvalue = np.zeros((value.shape[0], 1))
totalValue = sum(value)
accumalator = 0
j = 0
for i in value: # 轮盘赌
newValue = (i*1.0/totalValue)
accumalator += newValue
newfitvalue[j] = (accumalator)
j = j+1
newfitvalue[j-1] = 1
ms = []
for i in range(value.shape[0]):
ms.append(random.random())
ms.sort()
fitin = 0
newin = 0
newpop = pop
while newin < value.shape[0]:
if(ms[newin] < newfitvalue[fitin]):
newpop[newin] = pop[fitin]
newin = newin+1
else:
fitin = fitin+1
return newpop
def crossover(self, pop, crossrate, chromLength): # 交叉
row = pop.shape[0]-1 # 确保有两个基因能够对位交叉
pop = pop.tolist()
for i in range(0, row, 2):
if(random.random() < crossrate): # 对基因块的不同部分进行交叉部位生成
singpoint = random.randint(chromLength)
temp1 = []
temp2 = []
temp1.extend(pop[i][0:singpoint])
temp1.extend(pop[i + 1][singpoint:chromLength])
temp2.extend(pop[i + 1][0:singpoint])
temp2.extend(pop[i][singpoint:chromLength])
pop[i] = temp1 # 生成新子群
pop[i + 1] = temp2
pop = np.array(pop)
return pop
def mutation(self, pop, mutationrate, chromLength): # 变异
row = pop.shape[0]
for i in range(row):
if (random.random() < | |
#! /usr/bin/env python
#
"""
This code uses matplotlib and numpy to produce a window within which a FITS
image can be displayed. The reason for having this and not using the usual
packages already in existence is that I will want specific functions on the
image for data reduction.
Usage:
fits_image_display.py imagename.fits
or just
fits_image_display.py
In the first case the image name given is loaded (if possible) and displayed.
In the second case the widget comes up and one can read in an image.
Note that if the image is of dimension larger than 2 then the first "plane"
is used. There is no mechanism here for using other planes.
"""
import math
import sys
import tkinter as Tk
import tkinter.ttk
import tkinter.filedialog
import tkinter.simpledialog
import tkinter.messagebox
import numpy
from astropy.io import fits
# import matplotlib
# import matplotlib.lines as mlines
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
# from matplotlib.colors import LogNorm
import matplotlib.pyplot as pyplot
import general_utilities
import mpfitexpr
class ImageGUI(Tk.Frame):
"""
This class brings up a separate image display window.
Parameters
----------
Tk.Frame: The base class of the object, matching a Tkinter root or
Toplevel variable
Returns
-------
The class variable is returned, effectively.
"""
# The following section of code concerns the image display functionality.
#
def __init__(self, parent=None, **args):
self.image = None
self.imagefilename = None
self.zscale_flag = False
self.root = None
self.indpi = 100
self.zoom = [1, 0, 0]
self.xposition = None
self.yposition = None
self.angle = None
self.colourBarVariable = None
self.showImageAxes = None
self.imagePosLabel = None
self.imagePosLabelText = None
self.mplfig1 = None
self.mplsubplot1 = None
self.canvas1 = None
self.plotFrame = None
self.imagename = None
self.imagexpos = None
self.imageypos = None
self.transvalues = None
self.p1 = None
self.p2 = None
self.p3 = None
self.yscaleType = None
self.imageHistogramLabel = None
self.imageHistogramLabelText = None
self.rangeType = None
self.scaleType = None
self.minField = None
self.maxField = None
self.zsminField = None
self.zsmaxField = None
self.bin_field = None
self.colourScheme = None
self.colourLabels = None
self.barLabel = None
self.colourBar = None
self.colouBarVariable = None
if parent is not None:
# initialize the window and make the plot area.
Tk.Frame.__init__(self, parent, args)
self.root = parent
def make_image_window(self):
"""
Make the main image display window.
Returns
-------
None.
"""
# make the window
BGCOL = '#F8F8FF'
if self.root is not None:
imagewindow = self.root
else:
imagewindow = Tk.Toplevel()
imagewindow.config(bg=BGCOL)
self.showImageAxes = True
imageLabelFrame = Tk.Frame(imagewindow)
imageLabelFrame.pack(side=Tk.TOP)
self.imagePosLabelText = Tk.StringVar()
self.imagePosLabel = Tk.Label(imageLabelFrame,
textvariable=self.imagePosLabelText,
anchor=Tk.N, width=70)
self.imagePosLabel.pack(side=Tk.LEFT)
self.imagePosLabelText.set("Position: Value:")
controlFrame = Tk.Frame(imagewindow)
controlFrame.pack(side=Tk.LEFT, fill=Tk.Y, expand=1)
self.plotFrame = Tk.Frame(imagewindow)
self.plotFrame.pack()
self.mplfig1 = Figure(figsize=(6, 6), dpi=self.indpi)
self.mplsubplot1 = self.mplfig1.add_subplot(1, 1, 1)
self.canvas1 = FigureCanvasTkAgg(self.mplfig1, master=self.plotFrame)
self.canvas1.draw()
self.canvas1.get_tk_widget().pack(side=Tk.LEFT, fill=Tk.BOTH,
expand=Tk.YES)
self.canvas1.mpl_connect("motion_notify_event", self.setPlotPosition)
self.canvas1.mpl_connect("button_press_event", self.buttonPress)
self.canvas1.mpl_connect("button_release_event", self.buttonRelease)
self.canvas1.mpl_connect("key_press_event", self.keyPress)
newframe = Tk.Frame(controlFrame)
newframe.pack(side=Tk.TOP)
lb = Tk.Label(newframe, text='Colour Scheme')
lb.pack(side=Tk.TOP)
self.colourScheme = tkinter.ttk.Combobox(newframe, width=15)
self.colourLabels = ['jet', 'rainbow', 'gist_ncar', 'viridis',
'gnuplot', 'gist_gray', 'nipy_spectral']
self.colourScheme['values'] = self.colourLabels
self.colourScheme.pack()
self.colourScheme.current(0)
#
lb = Tk.Label(newframe, text='Show Colour Bar')
lb.pack()
selectFrame = Tk.Frame(newframe)
selectFrame.pack()
self.colourBar = Tk.IntVar()
t1 = Tk.Radiobutton(selectFrame, text='vertical',
variable=self.colourBar, value=0,
command=self.displayImage)
t1.pack(side=Tk.LEFT)
t2 = Tk.Radiobutton(selectFrame, text='horizontal',
variable=self.colourBar, value=1,
command=self.displayImage)
t2.pack(side=Tk.LEFT)
t3 = Tk.Radiobutton(selectFrame, text='none', variable=self.colourBar,
value=2, command=self.displayImage)
t3.pack(side=Tk.LEFT)
self.colourBar.set(2)
lb = Tk.Label(newframe, text='Colour Bar Label')
lb.pack()
self.barLabel = Tk.Entry(newframe, width=30)
self.barLabel.pack()
rangeframe = Tk.Frame(newframe)
rangeframe.pack()
fr1 = Tk.Frame(rangeframe)
fr1.pack(side=Tk.LEFT)
lb = Tk.Label(fr1, text='Display Minimum')
lb.pack(side=Tk.TOP)
self.minField = Tk.Entry(fr1, width=10)
self.minField.pack()
fr1 = Tk.Frame(rangeframe)
fr1.pack(side=Tk.LEFT)
Tk.Label(fr1, text=' ').pack()
fr1 = Tk.Frame(rangeframe)
fr1.pack(side=Tk.LEFT)
lb = Tk.Label(fr1, text='Display Maximum')
lb.pack(side=Tk.TOP)
self.maxField = Tk.Entry(fr1, width=10)
self.maxField.pack()
zmin = numpy.min(self.image)
zmax = numpy.max(self.image)
general_utilities.put_value(zmin, self.minField)
general_utilities.put_value(zmax, self.maxField)
rangeframe = Tk.Frame(newframe)
rangeframe.pack()
fr1 = Tk.Frame(rangeframe)
fr1.pack(side=Tk.LEFT)
lb = Tk.Label(fr1, text='Zscale Minimum')
lb.pack(side=Tk.TOP)
self.zsminField = Tk.Entry(fr1, width=10)
self.zsminField.pack()
fr1 = Tk.Frame(rangeframe)
fr1.pack(side=Tk.LEFT)
Tk.Label(fr1, text=' ').pack()
fr1 = Tk.Frame(rangeframe)
fr1.pack(side=Tk.LEFT)
lb = Tk.Label(fr1, text='Zscale Maximum')
lb.pack(side=Tk.TOP)
self.zsmaxField = Tk.Entry(fr1, width=10)
self.zsmaxField.pack()
try:
zmin1, zmax1 = self.get_limits(self.image)
ratio = abs(zmax1/zmin1)
if ratio < 1.2:
if zmin1 < 0.:
zmax1 = zmin1
zmin1 = 3.*zmin1
else:
zmax1 = 3.*zmin1
except:
zmin1 = 0.
zmax1 = 1.
general_utilities.put_value(zmin1, self.zsminField)
general_utilities.put_value(zmax1, self.zsmaxField)
lb = Tk.Label(newframe, text='Image Scaling')
lb.pack()
selectFrame = Tk.Frame(newframe)
selectFrame.pack()
self.scaleType = Tk.IntVar()
t1 = Tk.Radiobutton(selectFrame, text='linear',
variable=self.scaleType, value=0,
command=self.displayImage)
t1.pack(side=Tk.LEFT)
t2 = Tk.Radiobutton(selectFrame, text='log', variable=self.scaleType,
value=1, command=self.displayImage)
t2.pack(side=Tk.LEFT)
t3 = Tk.Radiobutton(selectFrame, text='sqrt',
variable=self.scaleType, value=2,
command=self.displayImage)
t3.pack(side=Tk.LEFT)
self.scaleType.set(0)
lb = Tk.Label(newframe, text='Image Range')
lb.pack()
selectFrame = Tk.Frame(newframe)
selectFrame.pack()
self.rangeType = Tk.IntVar()
t1 = Tk.Radiobutton(
selectFrame, text='full', variable=self.rangeType,
value=0, command=self.toggle_zscale)
t1.pack(side=Tk.LEFT)
t2 = Tk.Radiobutton(
selectFrame, text='zscale', variable=self.rangeType,
value=1, command=self.toggle_zscale)
t2.pack(side=Tk.LEFT)
self.rangeType.set(0)
buttonFrame = Tk.Frame(controlFrame)
buttonFrame.pack(side=Tk.TOP)
subFrame = Tk.Frame(buttonFrame)
subFrame.pack(side=Tk.TOP)
side1 = Tk.Frame(subFrame)
side1.pack(side=Tk.LEFT)
b1 = Tk.Button(side1, text='Toggle Axes',
command=self.toggleAxes)
b1.pack(side=Tk.TOP)
b1 = Tk.Button(side1, text='Auto Scale',
command=self.imageAutoscale)
b1.pack(side=Tk.TOP)
side2 = Tk.Frame(subFrame)
side2.pack(side=Tk.LEFT)
b1 = Tk.Button(side2, text='Image Histogram',
command=self.imageHistogram)
b1.pack(side=Tk.TOP)
b1 = Tk.Button(side2, text='Set Zoom',
command=self.set_zoom)
b1.pack(side=Tk.TOP)
bin_frame = Tk.Frame(buttonFrame)
bin_frame.pack(side=Tk.TOP)
label = Tk.Label(bin_frame, text='bin size/number')
label.grid(row=0, column=0)
self.bin_field = Tk.Entry(bin_frame, width=10)
self.bin_field.grid(row=0, column=1)
self.bin_field.insert(0, '100')
label = Tk.Label(
bin_frame, text='Positive for bin number, negative for \nbin size')
label.grid(row=1, column=0, columnspan=2)
label = Tk.Label(buttonFrame, text='Histogram y scaling:')
label.pack()
yscaleFrame = Tk.Frame(buttonFrame)
yscaleFrame.pack(side=Tk.TOP)
self.yscaleType = Tk.IntVar()
t1 = Tk.Radiobutton(
yscaleFrame, text='linear', variable=self.yscaleType,
value=0)
t1.pack(side=Tk.LEFT)
t2 = Tk.Radiobutton(
yscaleFrame, text='hybrid log', variable=self.yscaleType,
value=1)
t2.pack(side=Tk.LEFT)
self.rangeType.set(0)
b1 = Tk.Button(buttonFrame, text='Save Image as FITS',
command=lambda: general_utilities.save_fits(self.image))
b1.pack(side=Tk.TOP)
b1 = Tk.Button(buttonFrame, text='Save as PNG',
command=lambda: general_utilities.save_png_figure(
self.mplfig1))
b1.pack(side=Tk.TOP)
b1 = Tk.Button(buttonFrame, text='Save as PS',
command=lambda: general_utilities.save_ps_figure(
self.mplfig1))
b1.pack(side=Tk.TOP)
b1 = Tk.Button(buttonFrame, text='Redisplay',
command=self.displayImage)
b1.pack(side=Tk.TOP)
# b1 = Tk.Button(buttonFrame, text='Close',
# command=lambda: self.imageExit(imagewindow))
# b1.pack(side=Tk.TOP)
self.displayImage()
def zoom_corner(self, sh1, zoom, x1, y1):
"""
Given the zoom parameters find the array lower left corner.
Parameters
----------
sh1: A two-element list of the shape of the input image, values being
integers
zoom: A positive integer zoom function to be applied to the image
x1: The x pixel value for the centre of the field to display
(float or integer)
y1: The y pixel value for the centre of the field to display
(float or integer)
Returns
-------
xmin: An integer value for the lower left corner x pixel index
ymin: An integer value for the lower left corner y pixel index
"""
nxpixel = sh1[1] // zoom
nypixel = sh1[0] // zoom
xmin = x1 - nxpixel/2.
ymin = y1 - nypixel/2.
xmin = int(xmin)
ymin = int(ymin)
if xmin < 0:
xmin = 0
if ymin < 0:
ymin = 0
xmax = xmin + nxpixel
ymax = ymin + nypixel
if ymax > sh1[0]:
ymax = sh1[0]
ymin = ymax - nypixel
if xmax > sh1[1]:
xmax = sh1[1]
xmin = xmax - nxpixel
return xmin, ymin
def set_zoom(self):
"""
Bring up a window to set the zoom parameter.
No values are passed to this routine or returned from it. The
self.zoom variable is changed by the routine.
"""
sh1 = self.image.shape
npixel = min(sh1[0], sh1[1])
zoommax = int(npixel/64.)
if zoommax <= 1:
tkinter.messagebox.showinfo(
"Error",
"Zoom is disabled for minimum image size < 128 pixels.")
return
if self.xposition is None:
x1 = sh1[1]/2.
y1 = sh1[0]/2.
else:
x1 = self.xposition
y1 = self.yposition
zoom = tkinter.simpledialog.askinteger(
'Input',
'Set the integer zoom value (1 to %d)' % (zoommax))
if zoom is None:
return
else:
xmin, ymin = self.zoom_corner(sh1, zoom, x1, y1)
self.zoom[0] = zoom
self.zoom[1] = int(xmin)
self.zoom[2] = int(ymin)
self.displayImage()
def toggle_zscale(self):
"""
Toggle the zscale option in the image display
This routine is called in response to the "Image Range" radio button.
It turns the zscale display option on or off via the self.zscale_flag
boolean variable.
No values are passed to this routine or returned form the routine.
"""
ind = self.rangeType.get()
if ind == 1:
self.zscale_flag = True
else:
self.zscale_flag = False
self.displayImage()
def readNewImage(self):
"""
Read a FITS image from a file and display it.
Routine to read a FITS files and extract a two-dimensional image if
possible. The image is then displayed. This routine will only work
if the image display window exists.
No parameters are passed to this routine or returned from this routine.
"""
try:
filename = tkinter.filedialog.askopenfilename(
filetypes=[('FITS', '*.fits')])
if filename is not None:
self.imagefilename = filename
self.image = self.get_image()
if self.image is None:
self.imagefilename = | |
<reponame>acbauer/SimBuilderTemplates<filename>PythonExporters/HydraExporter.py
import smtk
# Common data structures & functions
ConfigData = type('ConfigData', (object,), dict())
def standard_section(attribute_type, title=None, group_name=None, comment=None):
config = ConfigData()
config.type = 'standard'
config.attribute_type = attribute_type
config.title = title
config.group_name = group_name
config.comment = comment
return config
ss = standard_section # shorthand
def boundary_condition_section(attribute_type, title=None):
config = ConfigData()
config.type = 'boundary_condition'
config.attribute_type = attribute_type
config.title = title
return config
bc = boundary_condition_section
def custom_section(section_name):
config = ConfigData()
config.type = 'custom'
config.section_name = section_name
return config
def item_format(item_name, keyword=None, item_format_list=None):
config = ConfigData()
config.name = item_name
config.keyword = item_name if keyword is None else keyword
config.item_format_list = item_format_list # conditional children
return config
fmt = item_format # shorthand
def group_format(group_name, item_format_list):
config = ConfigData()
config.group_name = group_name
config.name = group_name # TODO revist adding Config.type?
config.item_format_list = item_format_list
return config
groupfmt = group_format # shorthand
# Note to self: At some point, we may need to add an optional list to
# standard_section for the strings to write for Item instances
# represented as discrete values. So far haven't needed it.
# ==================================================
#
# List of output section config data
# Standard config is ss(attribute-type, [hydra-title, group-item-name, comment-line])
#
# ==================================================
section_table = [
ss('simulationtime'),
ss('solution_method', 'solution_method', 'SolutionMethod'),
ss('time_integration', 'time_integration', 'TimeIntegration'),
ss('LoadBalancer', 'load_balance'),
custom_section('output'),
ss('energy'),
custom_section('hydrostat'),
custom_section('turbulence'),
ss('Material', 'material', comment='Material model setup & assignment to sets'),
# TODO materialset, probably a custom_section (or part of a custom Material section)
custom_section('plotvar'),
custom_section('histvar'),
custom_section('plotstatvar'),
#ss('InitialConditions', 'initial', 'InitialConditions', comment='Simple IC\'s'),
custom_section('InitialConditions'),
custom_section('BodyForce'),
bc('distancebc', 'distance'),
bc('Pressure', 'pressure'),
#custom_section('distance'), # Wall and Penetration att types
bc('TurbulentViscosity', 'turbnu'),
bc('HeatFlux', 'heatflux'),
custom_section('velocity'), # 6 different att types
#vector_bc('velocity', [
# ('VelXBoundaryCondition', 'velx'),
# ('VelYBoundaryCondition', 'vely'),
# ('VelZBoundaryCondition', 'velz'),
# ],
#)
# TODO remaining boundary condition types
ss('ppesolver', 'ppesolver', 'PressurePoissonSolver'),
ss('momentumsolver', 'momentumsolver', 'MomentumSolver'),
ss('transportsolver', 'transportsolver', 'TransportSolver'),
]
# ==================================================
#
# Dictionary of format config data for items contained in attributes
# Format is fmt(item-name, hydra-keyword-if-different)
# The group_format (groupfmt) identifier should *only* be used for custom sections
#
# ==================================================
format_table = {
'simulationtime': [
fmt('nsteps'),
fmt('deltat'),
fmt('term')
],
'energy': [
fmt('energy')
],
'solution_method': [
fmt('strategy', 'strategy', [
fmt('error_norm'),
fmt('nvec')]
),
fmt('itmax'),
fmt('eps'),
fmt('eps_dist'),
fmt('eps_p0'),
fmt('subcycle'),
fmt('timestep_control'),
fmt('convergence'),
fmt('diagnostics')
],
'time_integration': [
fmt('type'),
fmt('CFLinit'),
fmt('CFLmax'),
fmt('dtmax'),
fmt('dtscale'),
fmt('thetaa'),
fmt('thetaK', 'thetak'),
fmt('thetaf'),
fmt('trimlast')
],
'LoadBalancer': [
fmt('Method', 'method'),
fmt('Load Balance Diagnostics', 'diagnostics')
],
'Output': [
fmt('type', 'filetype'),
groupfmt('FieldOutput', [
fmt('type', 'pltype'),
fmt('frequency', 'plti')
]),
groupfmt('RestartOutput', [
fmt('frequency', 'dump')
])
],
'StatusInformation': [
fmt('minmaxfrequency', 'ttyi'),
fmt('tifrequency', 'thti'),
fmt('PrintLevel', 'prtlev', [
fmt('hcfrequency', 'prti')
]),
],
'BasicTurbulenceModel': [
fmt('Method', 'turbulence', [
fmt('timescale_limiter'),
fmt('c_s'),
fmt('c_w'),
fmt('prandtl'),
fmt('schmidt'),
]),
],
'Material': [
fmt('Density', 'rho'),
fmt('mu')
],
'InitialConditions': [
fmt('Velocity', ['velx', 'vely', 'velz']),
fmt('tv', 'turbnu'),
fmt('tke'),
fmt('itdr', 'eps'),
fmt('temperature')
],
'ppesolver': [
fmt('ppetype', 'type'),
fmt('itmax'),
fmt('itchk'),
fmt('diagnostics'),
fmt('convergence'),
fmt('eps'),
fmt('pivot', 'zeropivot'),
fmt('ppetype', 'type', [
fmt('preconditioner', 'amgpc', [
fmt('hypre_coarsen_type'),
fmt('hypre_smoother'),
fmt('hypre_smoother_dn'),
fmt('hypre_smoother_up'),
fmt('hypre_smoother_co'),
fmt('interp_type'),
fmt('trunc_factor'),
fmt('pmax_elements'),
fmt('agg_num_levels'),
fmt('strong_threshold'),
fmt('max_rowsum'),
fmt('smoother'),
fmt('cycle'),
fmt('solver'),
fmt('pre_smooth'),
fmt('post_smooth'),
fmt('coarse_size'),
fmt('levels'),
])
])
],
'momentumsolver': [
fmt('type'),
fmt('restart'),
fmt('itmax'),
fmt('itchk'),
fmt('diagnostics'),
fmt('convergence'),
fmt('eps'),
],
'transportsolver': [
fmt('type'),
fmt('restart'),
fmt('itmax'),
fmt('itchk'),
fmt('diagnostics'),
fmt('convergence'),
fmt('eps'),
],
}
# Instantiate global dicationary for load curve functions
lcid_dictionary = dict()
# Entry point (main export function)
def ExportCMB(spec):
'''
Entry function, called by CMB to write export file
'''
manager = spec.getSimulationAttributes()
export_manager = spec.getExportAttributes()
#analysis_name = spec.getAnalysisNames()[0] # deprecated
#output_file_name = spec.getOutputPath() # deprecated
ok = True
if manager is None:
print 'No attribute manager found - no output generated'
return False
if export_manager is None:
print 'No export attributes found - no output generated'
return False
att_list = export_manager.findAttributes('ExportSpec')
if len(att_list) < 1:
print 'ERROR - missing ExportSpec attribute'
return False
elif len(att_list) > 1:
print 'ERROR - multiple ExportSpec attributes'
return False
spec_att = att_list[0]
item = spec_att.find('AnalysisTypes')
if item is None:
print 'ERROR - ExportSpec attribute missing AnalysisTypes item'
return False
types_item = smtk.attribute.to_concrete(item)
analysis_type = 'Default'
if types_item.numberOfValues() < 1:
print 'Warning: No analysis type specified'
else:
analysis_type = types_item.value(0)
if types_item.numberOfValues() > 1:
print 'More than 1 Analysis Type specified: using 1st one'
print 'Exporting analysis type', analysis_type
output_file_name = 'output.txt' # default
item = spec_att.find('OutputFile')
if item is not None:
output_item = smtk.attribute.to_concrete(item)
if output_item.isSet(0):
value = output_item.value(0)
if value != '':
output_file_name = value
print 'Writing output file', output_file_name
analysis_dict = {
'Incompressible Navier-Stokes Analysis': 'cc_navierstokes',
'NS and Energy Equation Analysis': 'cc_navierstokes'
}
if analysis_type not in analysis_dict:
print 'Unsupported analysis type \"%s\"" - no output generated' % \
analysis_type
return False
categories = list(manager.analysisCategories(analysis_type))
print 'categories', categories
if not categories:
print 'WARNING: No categories found for analysis \"%s\"' % \
analysis_type
#return False
# Instantiate output file and write contents
with open(output_file_name, 'w') as out:
out.write('title\n')
out.write('Hydra-TH control file generated by Kitware CMB\n')
out.write('\n')
title = analysis_dict.get(analysis_type, 'unknown_analysis')
out.write(title)
out.write('\n')
# Process elements in section_table
for section_config in section_table:
ok = write_section(manager, section_config, categories, out)
# Write load curves last, since ids are assigned when writing atts
write_load_curves(manager, out)
out.write('\n')
out.write('end\n')
out.write('\n')
out.write('exit\n')
print 'Export ok status: %s' % ok
return ok
def get_id_from_name(name):
'''
A hack by acbauer to get the sideset or cell block id from
the model entity's name. it assumes that the last token
in the string is the proper id to be used. This will be
replaced with GridInfo when we have time to do it properly.
'''
# Domain sets are named DomainSetX
domainset_prefix = 'DomainSet'
if name.startswith(domainset_prefix):
l = len(domainset_prefix)
return name[l:]
tokens = name.split()
if tokens: # checks if tokens is empty
return tokens[-1]
return "BAD_VALUE"
def write_output_section(manager, categories, out):
'''
Writes output section, which is "custom" because spans multiple attributes
'''
out.write('\n')
out.write(' # Output options\n')
# This is awkward - must put keyword as last item in the list, instead
# of Item name, because format_table[] is set up that way
# TODO Redo format table to put Item name first?
write_item(manager, categories, out, 'Output', 'FieldOutput', 'type') # pltype
#write_item(manager, categories, out, 'Output', 'RestartOutput', 'type') # filetype
write_item(manager, categories, out, 'Output', 'type') # filetype
write_item(manager, categories, out, 'Output', 'FieldOutput', 'frequency') # plti
write_item(manager, categories, out, 'StatusInformation', 'minmaxfrequency') # ttyi
write_item(manager, categories, out, 'StatusInformation', 'tifrequency') # thti
#write_item(manager, categories, out, 'StatusInformation', 'PrintLevel') # prtlev
# Because PrintLevel has conditional children, use write_item_tree() method
# Suggests some better refactoring of write_item() and write_item_tree()
item = find_item(manager, 'StatusInformation', 'PrintLevel')
if item.isMemberOf(categories):
item_config = find_item_config('StatusInformation', 'PrintLevel')
format_string = ' %s %s\n'
write_item_tree(item, item_config, format_string, out)
write_item(manager, categories, out, 'Output', 'RestartOutput', 'frequency') # dump
return True
def write_turbulence_section(manager, categories, out):
'''
Writes turbulence section for AdvancedTurbulenceModel attribute
Hydra-TH format is slightly nonstandard
'''
att_type = 'BasicTurbulenceModel'
turb_att_list = manager.findAttributes(att_type)
if len(turb_att_list) < 1:
return True
item_format_list = format_table.get(att_type)
if item_format_list is None:
print 'WARNING: No format info for', att_type
return False
attribute = turb_att_list[0] # there should only be a single instance of this attribute
if not attribute.isMemberOf(categories):
return True
item = attribute.find("Method")
if item is None:
return False
if not item.isEnabled():
return True
item = smtk.attribute.to_concrete(item)
out.write('\n')
if item.value(0) in ["WALE", "rng_ke", "smagorinsky"]:
format_string = ' %s %s\n'
for turb_att in turb_att_list:
out.write('\n')
for item_config in item_format_list:
item = turb_att.find(item_config.name)
if item is None:
continue
write_item_tree(item, item_config, format_string, out, indent=' ')
out.write(' end\n')
else:
out.write(' tmodel %s\n' % item.value(0))
return True
def write_plotvar_section(manager, categories, out, name):
'''
Writes plotvar section for [Node/Elem/SideSet]PlotVarOutput attributes
'''
config = {
'plotvar': ('NodePlotVarOutput', 'ElemPlotVarOutput', 'SideSetPlotVarOutput'),
'plotstatvar': ('NodeTempStatVarOutput', 'ElemTempStatVarOutput', 'SideSetTempStatVarOutput'),
}
node_att_list = manager.findAttributes(config[name][0])
elem_att_list = manager.findAttributes(config[name][1])
ss_att_list = manager.findAttributes(config[name][2])
if len(node_att_list) + len(elem_att_list) + len(ss_att_list) < 1:
return True
if name == 'plotstatvar':
out.write('\n')
out.write(' statistics\n')
plotstatvaratt = manager.findAttributes('TempStatVarStatistics')[0]
itemlabels = ['starttime', 'endtime', 'plotwinsize']
groupitem = plotstatvaratt.find('TemporalStatistics')
var_groupitem = smtk.attribute.to_concrete(groupitem)
for i in range(len(itemlabels)):
item = var_groupitem.item(i)
var_item = smtk.attribute.to_concrete(item)
out.write(' %s %s\n' % (itemlabels[i],var_item.value(0)) )
out.write(' end\n')
out.write('\n')
out.write(' %s\n' % name)
types = [ 'node', 'elem']
lists = [ node_att_list, elem_att_list]
# Create list of (type, varname) tuples
ne_tlist = list()
for i in range(len(lists)):
plot_type = types[i]
current_list = lists[i]
for att in current_list:
item = att.find('varname')
var_item = smtk.attribute.to_concrete(item)
t = (plot_type, var_item.value(0))
ne_tlist.append(t)
ne_tlist.sort()
for | |
<reponame>aragubas/OneTrack
#!/usr/bin/python3.7
# Copyright 2020 Aragubas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import pygame
import Library.CoreUtils as Utils
import Library.CorePrimitives as Shape
from OneTrack import MAIN as Main
from OneTrack.MAIN import UI
class Widget_Controller:
def __init__(self, Rectangle):
self.Rectangle = Utils.Convert.List_PygameRect(Rectangle)
self.WidgetCollection = list()
self.LastInteractionID = -1
self.LastInteractionType = None
self.Active = False
self.ClickOffset = (0, 0)
def Clear(self):
self.WidgetCollection.clear()
def Draw(self, DISPLAY):
for widget in self.WidgetCollection:
widget.Render(DISPLAY)
if not self.LastInteractionID == -1:
try:
self.WidgetCollection[self.LastInteractionID].InteractionType = None
except IndexError:
print("Can't set property to unexistent widget.")
def Append(self, Widget):
self.WidgetCollection.append(Widget)
for widget in self.WidgetCollection:
widget.Update()
def Update(self):
self.LastInteractionID = -1
self.LastInteractionType = None
self.Active = pygame.Rect(self.Rectangle[0] + self.ClickOffset[0], self.Rectangle[1] + self.ClickOffset[1], self.Rectangle[2], self.Rectangle[3]).collidepoint((pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1]))
if not self.Active:
for widget in self.WidgetCollection:
widget.Active = False
return
for widget in self.WidgetCollection:
widget.Update()
if not widget.InteractionType is None:
self.LastInteractionID = widget.ID
self.LastInteractionType = widget.InteractionType
def EventUpdate(self, event):
for widget in self.WidgetCollection:
if widget.AwaysUpdate: # -- If always update, update it no matter what
if widget.EventUpdateable:
widget.EventUpdate(event)
continue
else: # -- If not, only update when mouse is hovering it
ColideRect = pygame.Rect(self.ClickOffset[0] + self.Rectangle[0] + widget.Rectangle[0], self.ClickOffset[1] + self.Rectangle[1] + widget.Rectangle[1], widget.Rectangle[2], widget.Rectangle[3])
if ColideRect.collidepoint(pygame.mouse.get_pos()):
if widget.EventUpdateable:
widget.CursorOffset = (ColideRect[0], ColideRect[1])
widget.EventUpdate(event)
widget.Active = True
else:
widget.Active = False
def GetWidget(self, WidgetID):
for widget in self.WidgetCollection:
if widget.ID == WidgetID:
return widget
class Widget_PictureBox:
def __init__(self, Rectangle, ImageName, WidgetID):
if WidgetID == -1:
raise ValueError("WidgetID cannot be -1")
self.Rectangle = Utils.Convert.List_PygameRect(Rectangle)
self.ImageName = ImageName
self.ID = WidgetID
self.InteractionType = None
self.Active = False
self.EventUpdateable = False
self.AwaysUpdate = False
self.CursorOffset = (0, 0)
def Render(self, DISPLAY):
UI.ContentManager.ImageRender(DISPLAY, self.ImageName, self.Rectangle[0], self.Rectangle[1], self.Rectangle[2], self.Rectangle[3])
def Update(self):
pass
def EventUpdate(self, event):
pass
class Widget_ValueChanger:
def __init__(self, Position, TitleName, ChangerInitialValue, WidgetID):
if WidgetID == -1:
raise ValueError("WidgetID cannot be -1")
self.Rectangle = Utils.Convert.List_PygameRect((Position[0], Position[1], 48, 34))
self.TitleName = TitleName
self.ID = WidgetID
self.Changer = UI.EditableNumberView(pygame.Rect(self.Rectangle[0], self.Rectangle[1] + 17, self.Rectangle[2], self.Rectangle[3] - 17), ChangerInitialValue)
self.LastValue = ChangerInitialValue
self.InteractionType = None
self.Active = False
self.EventUpdateable = True
self.AwaysUpdate = False
self.CursorOffset = (0, 0)
def Render(self, DISPLAY):
# -- Render Background -- #
BGColor = UI.ThemesManager_GetProperty("Button_BackgroundColor")
LineColor = UI.ThemesManager_GetProperty("Button_Active_IndicatorColor")
if not self.Active:
LineColor = UI.ThemesManager_GetProperty("Button_Inactive_IndicatorColor")
Shape.Shape_Rectangle(DISPLAY, BGColor, self.Rectangle)
Shape.Shape_Rectangle(DISPLAY, LineColor, self.Rectangle, 1)
# -- Render Change Title -- #
TitleX = self.Rectangle[0] + self.Rectangle[2] / 2 - UI.ContentManager.GetFont_width("/Ubuntu_Bold.ttf", 12, self.TitleName) / 2
UI.ContentManager.FontRender(DISPLAY, "/Ubuntu_Bold.ttf", 12, self.TitleName, (230, 230, 230), TitleX, self.Rectangle[1])
# -- Render EditableNumberView -- #
self.Changer.Render(DISPLAY)
def Update(self):
self.Changer.Update()
if not self.Changer.Value == self.LastValue:
self.LastValue = self.Changer.Value
self.InteractionType = self.Changer.Value
if UI.ContentManager.GetFont_width("/PressStart2P.ttf", 12, self.Changer.Value) > self.Rectangle[2]:
self.Rectangle[2] = self.Rectangle[2] + UI.ContentManager.GetFont_width("/PressStart2P.ttf", 12, self.Changer.Value) + 5
if UI.ContentManager.GetFont_width("/Ubuntu_Bold.ttf", 12, self.TitleName) > self.Rectangle[2]:
self.Rectangle[2] = self.Rectangle[2] + UI.ContentManager.GetFont_width("/Ubuntu_Bold.ttf", 12, self.TitleName)
self.Changer.Rectangle[0] = self.Rectangle[0] + self.Rectangle[2] / 2 - UI.ContentManager.GetFont_width("/PressStart2P.ttf", 12, self.Changer.Value) / 2
def EventUpdate(self, event):
self.Changer.EventUpdate(event)
class Widget_Label:
def __init__(self, FontName, Text, FontSize, Color, X, Y, WidgetID):
if WidgetID == -1:
raise ValueError("WidgetID cannot be -1")
self.ID = WidgetID
self.InteractionType = None
self.Active = False
self.EventUpdateable = False
self.Text = Text
self.FontSize = FontSize
self.FontName = FontName
self.Color = Color
self.X = X
self.Y = Y
self.Rectangle = Utils.Convert.List_PygameRect((X, Y, UI.ContentManager.GetFont_width(self.FontName, FontSize, self.Text), UI.ContentManager.GetFont_height(self.FontName, FontSize, self.Text)))
self.AwaysUpdate = False
self.CursorOffset = (0, 0)
def Render(self, DISPLAY):
UI.ContentManager.FontRender(DISPLAY, self.FontName, self.FontSize, self.Text, self.Color, self.Rectangle[0], self.Rectangle[1])
def Update(self):
self.Rectangle = Utils.Convert.List_PygameRect((self.X, self.Y, UI.ContentManager.GetFont_width(self.FontName, self.FontSize, self.Text), UI.ContentManager.GetFont_height(self.FontName, self.FontSize, self.Text)))
def EventUpdate(self, event):
pass
class Widget_PianoKeys:
def __init__(self, X, Y, WidgetID):
if WidgetID == -1:
raise ValueError("WidgetID cannot be -1")
self.ID = WidgetID
self.InteractionType = None
self.Active = True
self.EventUpdateable = True
self.X = X
self.Y = Y
self.Rectangle = Utils.Convert.List_PygameRect((X, Y, 380, 45))
self.Surface = pygame.Surface((self.Rectangle[2], self.Rectangle[3]))
self.LastRect = pygame.Rect(0, 0, 0, 0)
self.LastNote = -1
self.AwaysUpdate = True
self.CursorOffset = (0, 0)
pygame.key.set_repeat(0, 0)
def Render(self, DISPLAY):
if not self.LastRect == self.Rectangle:
self.Surface = pygame.Surface((self.Rectangle[2], self.Rectangle[3]))
# -- Render Background -- #
self.Surface.fill((190, 190, 190))
Shape.Shape_Rectangle(self.Surface, (100, 100, 100), (0, 0, self.Rectangle[2], self.Rectangle[3]), 5)
for i in range(12):
NoteLabel = self.GetNote_ByIndex(i)
# -- Variables -- #
Width = 30
Height = 25
X = i * (Width + 2)
Y = self.Rectangle[3] - Height
BackgroundColor = (100, 105, 155)
TextColor = (0, 5, 100)
IsHighNote = False
if "#" in NoteLabel:
Width = 30
X = i * (Width + 2)
Width = 35
X -= 2
Y = 0
BackgroundColor = (10, 15, 25)
TextColor = (200, 205, 255)
IsHighNote = True
TextX = X + (Width / 2 - UI.ContentManager.GetFont_width("/PressStart2P.ttf", 12, NoteLabel) / 2)
if self.LastNote == i:
BackgroundColor = (200, 205, 255)
TextColor = (0, 0, 0)
if not IsHighNote:
Shape.Shape_Rectangle(self.Surface, BackgroundColor, (X, Y, Width, Height), 0, 0, 5, 5)
else:
Shape.Shape_Rectangle(self.Surface, BackgroundColor, (X, Y, Width, Height), 0, 0, 0, 0, 5, 5)
UI.ContentManager.FontRender(self.Surface, "/PressStart2P.ttf", 12, NoteLabel, TextColor, TextX, Y + 5)
DISPLAY.blit(self.Surface, (self.Rectangle[0], self.Rectangle[1]))
def GetNote_ByIndex(self, i):
if i == 0:
return "C"
elif i == 1:
return "C#"
elif i == 2:
return "D"
elif i == 3:
return "D#"
elif i == 4:
return "E"
elif i == 5:
return "F"
elif i == 6:
return "F#"
elif i == 7:
return "G"
elif i == 8:
return "G#"
elif i == 9:
return "A"
elif i == 10:
return "A#"
elif i == 11:
return "B"
def Update(self):
pass
def EventUpdate(self, event):
if event.type == pygame.KEYUP:
self.LastNote = -1
if event.type == pygame.KEYDOWN:
# -- Note C -- #
if event.key == pygame.K_z:
self.LastNote = 0
# -- Note C# -- #
if event.key == pygame.K_s:
self.LastNote = 1
# -- Note D -- #
if event.key == pygame.K_x:
self.LastNote = 2
# -- Note D# -- #
if event.key == pygame.K_d:
self.LastNote = 3
# -- Note E -- #
if event.key == pygame.K_c:
self.LastNote = 4
# -- Note F -- #
if event.key == pygame.K_v:
self.LastNote = 5
# -- Note F# -- #
if event.key == pygame.K_g:
self.LastNote = 6
# -- Note G -- #
if event.key == pygame.K_b:
self.LastNote = 7
# -- Note G# -- #
if event.key == pygame.K_h:
self.LastNote = 8
# -- Note A -- #
if event.key == pygame.K_n:
self.LastNote = 9
# -- Note A# -- #
if event.key == pygame.K_j:
self.LastNote = 10
# -- Note B -- #
if event.key == pygame.K_m:
self.LastNote = 11
class Widget_Button:
def __init__(self, Text, FontSize, X, Y, WidgetID):
if WidgetID == -1:
raise ValueError("WidgetID cannot be -1")
self.ID = WidgetID
self.InteractionType = None
self.Active = True
self.EventUpdateable = True
self.AwaysUpdate = False
self.X = X
self.Y = Y
self.Text = Text
self.FontSize = FontSize
self.TextWidth = UI.ContentManager.GetFont_width("/Ubuntu_Bold.ttf", self.FontSize, self.Text)
self.TextHeight = UI.ContentManager.GetFont_height("/Ubuntu_Bold.ttf", self.FontSize, self.Text)
self.Rectangle = Utils.Convert.List_PygameRect((X - 2, Y - 2, self.TextWidth + 4, self.TextHeight + 4))
self.LastRect = self.Rectangle
self.Surface = pygame.Surface((self.Rectangle[2], self.Rectangle[3]))
self.Centred_X = self.Rectangle[2] / 2 - UI.ContentManager.GetFont_width("/Ubuntu_Bold.ttf", self.FontSize - 2, self.Text) / 2
self.Centred_Y = self.Rectangle[3] / 2 - UI.ContentManager.GetFont_height("/Ubuntu_Bold.ttf", self.FontSize - 2, self.Text) / 2
self.ButtonState = 0
self.CursorOffset = (0, 0)
self.BgColor = UI.ThemesManager_GetProperty("Button_BackgroundColor")
self.IndicatorColor = UI.ThemesManager_GetProperty("Button_Inactive_IndicatorColor")
def Render(self, DISPLAY):
# -- Render Background -- #
Shape.Shape_Rectangle(self.Surface, self.BgColor, (0, 0, self.Rectangle[2], self.Rectangle[3]))
# -- Render Indicator -- #
Shape.Shape_Rectangle(self.Surface, self.IndicatorColor, (0, 0, self.Rectangle[2], self.Rectangle[3]), 1)
# -- Render the Button Text -- #
UI.ContentManager.FontRender(self.Surface, "/Ubuntu_Bold.ttf", self.FontSize - 2, self.Text, (240, 240, 240), self.Centred_X, self.Centred_Y)
DISPLAY.blit(self.Surface, (self.Rectangle[0], self.Rectangle[1]))
if self.ButtonState == 2:
self.ButtonState = 0
def Update(self):
# -- Check if surface has the correct size -- #
if not self.LastRect | |
"show media" followed by "show optic <slot>" for each slot
Optionally use snmp snIfOpticalMonitoringInfoTable - ifIndex to optical parameters table
:return:
"""
'''
command = 'show interfaces transceiver'
output = self._send_command(command)
# Check if router supports the command
if '% Invalid input' in output:
return {}
# Formatting data into return data structure
optics_detail = {}
try:
split_output = re.split(r'^---------.*$', output, flags=re.M)[1]
except IndexError:
return {}
split_output = split_output.strip()
for optics_entry in split_output.splitlines():
# Example, Te1/0/1 34.6 3.29 -2.0 -3.5
try:
split_list = optics_entry.split()
except ValueError:
return {}
int_brief = split_list[0]
output_power = split_list[3]
input_power = split_list[4]
port = canonical_interface_name(int_brief)
port_detail = {}
port_detail['physical_channels'] = {}
port_detail['physical_channels']['channel'] = []
# If interface is shutdown it returns "N/A" as output power.
# Converting that to -100.0 float
try:
float(output_power)
except ValueError:
output_power = -100.0
# Defaulting avg, min, max values to -100.0 since device does not
# return these values
optic_states = {
'index': 0,
'state': {
'input_power': {
'instant': (float(input_power) if 'input_power' else -100.0),
'avg': -100.0,
'min': -100.0,
'max': -100.0
},
'output_power': {
'instant': (float(output_power) if 'output_power' else -100.0),
'avg': -100.0,
'min': -100.0,
'max': -100.0
},
'laser_bias_current': {
'instant': 0.0,
'avg': 0.0,
'min': 0.0,
'max': 0.0
}
}
}
port_detail['physical_channels']['channel'].append(optic_states)
optics_detail[port] = port_detail
return optics_detail
'''
raise NotImplementedError
def get_facts(self):
"""get_facts method."""
uptime = None
vendor = 'Brocade'
model = None
hostname = None
version = 'netiron'
serial = None
command = 'show version'
lines = self.device.send_command_timing(command, delay_factor=self._show_command_delay_factor)
for line in lines.splitlines():
r1 = re.match(r'^(System|Chassis):\s+(.*)\s+\(Serial #:\s+(\S+),(.*)', line)
if r1:
model = r1.group(2)
serial = r1.group(3)
r2 = re.match(r'^IronWare : Version\s+(\S+)\s+Copyright \(c\)\s+(.*)', line)
if r2:
version = r2.group(1)
vendor = r2.group(2)
command = 'show uptime'
lines = self.device.send_command_timing(command, delay_factor=self._show_command_delay_factor)
for line in lines.splitlines():
# Get the uptime from the Active MP module
r1 = re.match(r'\s+Active MP(.*)Uptime\s+(\d+)\s+days'
r'\s+(\d+)\s+hours'
r'\s+(\d+)\s+minutes'
r'\s+(\d+)\s+seconds', line)
if r1:
days = int(r1.group(2))
hours = int(r1.group(3))
minutes = int(r1.group(4))
seconds = int(r1.group(5))
uptime = seconds + minutes*60 + hours*3600 + days*86400
# the following is expensive -- should use SNMP GET instead
command = 'show running-config | include ^hostname'
lines = self.device.send_command(command, delay_factor=self._show_command_delay_factor)
for line in lines.splitlines():
r1 = re.match(r'^hostname (\S+)', line)
if r1:
hostname = r1.group(1)
facts = {
'uptime': uptime,
'vendor': str(vendor),
'model': str(model),
'hostname': str(hostname),
# FIXME: fqdn
'fqdn': str("Unknown"),
'os_version': str(version),
'serial_number': str(serial),
'interface_list': []
}
# Get interfaces
if not self.show_int_brief_wide:
self.show_int_brief_wide = self.device.send_command_timing('show int brief wide', delay_factor=self._show_command_delay_factor)
info = textfsm_extractor(
self, "show_interface_brief_wide", self.show_int_brief_wide
)
for interface in info:
port = self.standardize_interface_name(interface['port'])
facts['interface_list'].append(port)
# Add lags to interfaces
lags = self.get_lags()
facts['interface_list'] += list(lags.keys())
return facts
@staticmethod
def __parse_port_change__(last_str):
r1 = re.match("(\d+) days (\d+):(\d+):(\d+)", last_str)
if r1:
days = int(r1.group(1))
hours = int(r1.group(2))
mins = int(r1.group(3))
secs = int(r1.group(4))
return float(secs + (mins * 60) + (hours * 60 * 60) + (days * 24 * 60 * 60))
else:
return float(-1.0)
def _get_interface_detail(self, port):
description = None
mac = None
if port == "mgmt1":
command = "show interface management1"
else:
command = "show interface ethernet {}".format(port)
output = self.device.send_command(command, delay_factor=self._show_command_delay_factor)
output = output.split('\n')
last_flap = "0.0"
speed = "0"
for line in output:
# Port state change is only supported from >5.9? (no support in 5.7b)
r0 = re.match(r"\s+Port state change time: \S+\s+\d+\s+\S+\s+\((.*) ago\)", line)
if r0:
last_flap = self.__class__.__parse_port_change__(r0.group(1))
r1 = re.match(r"\s+No port name", line)
if r1:
description = ""
r2 = re.match(r"\s+Port name is (.*)", line)
if r2:
description = r2.group(1)
r3 = re.match(r"\s+Hardware is \S+, address is (\S+) (.+)", line)
if r3:
mac = r3.group(1)
# Empty modules may not report the speed
# Configured fiber speed auto, configured copper speed auto
# actual unknown, configured fiber duplex fdx, configured copper duplex fdx, actual unknown
r4 = re.match(r"\s+Configured speed (\S+),.+", line)
if r4:
speed = r4.group(1)
if 'auto' in speed:
speed = -1
else:
r = re.match(r'(\d+)([M|G])bit', speed)
if r:
speed = r.group(1)
if r.group(2) == 'M':
speed = int(speed) * 1000
elif r.group(2) == 'G':
speed = int(speed) * 1000000
return [last_flap, description, speed, mac]
def _get_interface_map(self):
''' Return dict mapping ethernet port numbers to full interface name, ie
{
"1/1": "GigabitEthernet1/1",
...
}
'''
if not self.show_int:
self.show_int = self.device.send_command_timing('show interface', delay_factor=self._show_command_delay_factor)
info = textfsm_extractor(
self, "show_interface", self.show_int
)
result = {}
for interface in info:
if 'ethernet' in interface['port'].lower() and 'mgmt' not in interface['port'].lower():
ifnum = re.sub(r'.*(\d+/\d+)', '\\1', interface['port'])
result[ifnum] = interface['port']
return result
def standardize_interface_name(self, port):
if not self.interface_map:
self.interface_map = self._get_interface_map()
port = str(port).strip()
# Convert lbX to LoopbackX
port = re.sub('^lb(\d+)$', 'Loopback\\1', port)
# Convert loopbackX to LoopbackX
port = re.sub('^loopback(\d+)$', 'Loopback\\1', port)
# Convert tnX to tunnelX
port = re.sub('^tn(\d+)$', 'Tunnel\\1', port)
# Convert veX to VeX
port = re.sub('^ve(\d+)$', 'Ve\\1', port)
# Convert mgmt1 to Ethernetmgmt1
if port in ['mgmt1', 'management1']:
port = 'Ethernetmgmt1'
# Convert 1/1 or ethernet1/1 to ethernet1/1
if re.match(r'.*\d+/\d+', port):
ifnum = re.sub(r'.*(\d+/\d+)', '\\1', port)
port = self.interface_map[ifnum]
return port
def get_lags(self):
result = {}
if not self.show_running_config_lag:
self.show_running_config_lag = self.device.send_command_timing('show running-config lag', delay_factor=self._show_command_delay_factor)
info = textfsm_extractor(
self, "show_running_config_lag", self.show_running_config_lag
)
for lag in info:
port = 'lag{}'.format(lag['id'])
result[port] = {
'is_up': True,
'is_enabled': True,
'description': lag['name'],
'last_flapped': -1,
'speed': 0,
'mac_address': '',
'children': self.interfaces_to_list(lag['ports'])
}
return result
def get_interfaces(self):
"""get_interfaces method."""
if not self.show_int:
self.show_int = self.device.send_command_timing('show interface', delay_factor=self._show_command_delay_factor)
info = textfsm_extractor(
self, "show_interface", self.show_int
)
result = {}
for interface in info:
port = self.standardize_interface_name(interface['port'])
# Convert speeds to MB/s
speed = interface['speed']
SPEED_REG = r'^(?P<number>\d+)(?P<unit>\S+)$'
speed_m = re.match(SPEED_REG, speed)
if speed_m:
if speed_m.group('unit') in ['M', 'Mbit']:
speed = int(int(speed_m.group('number')))
elif speed_m.group('unit') in ['G', 'Gbit']:
speed = int(int(speed_m.group('number')) * 10E2)
result[port] = {
'is_up': interface['link'].lower() == 'up',
'is_enabled': interface['link'].lower() != 'disabled',
'description': interface['name'],
'last_flapped': -1,
'speed': speed,
'mac_address': interface['mac'],
'mtu': interface['mtu'],
}
# Get lags
lags = self.get_lags()
result.update(lags)
return result
def get_interfaces_ip(self):
"""get_interfaces_ip method."""
interfaces = {}
output = self.device.send_command_timing('show running-config interface', delay_factor=self._show_command_delay_factor)
info = textfsm_extractor(
self, "show_running_config_interface", output
)
for intf in info:
port = self.standardize_interface_name(intf['interface'] + intf['interfacenum'])
if port not in interfaces:
interfaces[port] = {
'ipv4': {},
'ipv6': {},
}
if intf['ipv4address']:
ipaddress, prefix = intf['ipv4address'].split('/')
interfaces[port]['ipv4'][ipaddress] = { 'prefix_length': prefix }
if intf['ipv6address']:
ipaddress, prefix = intf['ipv6address'].split('/')
interfaces[port]['ipv6'][ipaddress] = { 'prefix_length': prefix }
return interfaces
def get_interfaces_vlans(self):
''' return dict as documented at https://github.com/napalm-automation/napalm/issues/919#issuecomment-485905491 '''
if not self.show_int:
self.show_int = self.device.send_command_timing('show interface', delay_factor=self._show_command_delay_factor)
info = textfsm_extractor(
self, "show_interface", self.show_int
)
result = {}
# Create interfaces structure and correct mode
for interface in info:
intf = self.standardize_interface_name(interface['port'])
if interface['tag'] == 'untagged' or re.match(r'^ve', interface['port'].lower()):
mode = "access"
else:
mode = "trunk"
result[intf] = {
'mode': mode,
'access-vlan': -1,
'trunk-vlans': [],
'native-vlan': -1,
'tagged-native-vlan': False
}
# Add lags
for lag in self.get_lags().keys():
result[lag] = {
'mode': 'trunk',
'access-vlan': -1,
'trunk-vlans': [],
'native-vlan': -1,
'tagged-native-vlan': False
}
if not self.show_vlan:
self.show_vlan = self.device.send_command('show vlan')
info = textfsm_extractor(
self, "show_vlan", self.show_vlan
)
# Assign VLANs to interfaces
for vlan in info:
access_ports = self.interface_list_conversation(
vlan['ve'],
'',
vlan['untaggedports']
)
trunk_ports = self.interface_list_conversation(
'',
vlan['taggedports'],
''
)
for port in access_ports:
if int(vlan['vlan']) <= 4094:
result[port]['access-vlan'] = vlan['vlan']
for port in trunk_ports:
if int(vlan['vlan']) <= 4094:
result[port]['trunk-vlans'].append(vlan['vlan'])
# Add ports with VLANs from VLLs
if not self.show_mpls_config:
self.show_mpls_config = self.device.send_command('show mpls config')
info = textfsm_extractor(
self, "show_mpls_config", self.show_mpls_config
)
for vll in info:
interface = self.standardize_interface_name(vll['interface'])
# Ignore VLLs with no interface
if interface:
result[interface]['trunk-vlans'].append(vll['vlan'])
# Set native vlan for tagged ports
for port, data in result.items():
if data['trunk-vlans'] and data['access-vlan']:
result[port]['native-vlan'] = data['access-vlan']
result[port]['access-vlan'] = -1
return result
def get_vlans(self):
if not self.show_vlan:
self.show_vlan = self.device.send_command('show vlan')
info = textfsm_extractor(
self, "show_vlan", self.show_vlan
)
result = {}
for vlan in info:
result[vlan['vlan']] = {
'name': vlan['name'],
'interfaces': self.interface_list_conversation(
vlan['ve'],
vlan['taggedports'],
vlan['untaggedports']
)
}
# Add ports with VLANs from VLLs
if not self.show_mpls_config:
self.show_mpls_config = self.device.send_command('show mpls config')
info = textfsm_extractor(
self, "show_mpls_config", self.show_mpls_config
)
| |
"""Manipulate semantic versioning (SemVer)
Manipulate semantic version strings:
1. Create a new version number,
1. initialize it with an existing number
1. Read it from an existing project setup.cfg file.
1. Validate the version string.
1. Compare one version number with another.
See also https://semver.org/
"""
import configparser
import logging
from pathlib import Path, WindowsPath, PosixPath
from typing import Union
import re
import tempfile
from beetools.beearchiver import Archiver, msg_error
_PROJ_DESC = __doc__.split("\n")[0]
_PROJ_PATH = Path(__file__)
_PROJ_NAME = _PROJ_PATH.stem
class SemVerIt:
"""Manipulate semantic versioning (SemVer)
Manipulate semantic version numbers. Create a new version number,
initialize it with an existing number or alternatively read it from an
existing project setup.py file. Compare a version number with another.
See also https://semver.org/
"""
def __init__(
self,
p_version: Union[Path, str, list] = None,
# p_setup_cfg_pth: Path = None,
p_parent_log_name: str = None,
p_verbose: bool = True,
) -> None:
"""Create a new SemVerIt instance.
:parm p_version:
Initial version to start with.
:parm p_setup_cfg_pth:
setup.cfg file from where the version number can be read.
:parm p_parent_log_name:
Name of the parent. In combination witt he class name it will
form the logger name.
:parm p_verbose:
Write messages to the console.
:return: SemverIt
:Examples:
>>> import semverit
>>> svit = semverit.SemVerIt()
>>> print(svit)
0.0.0
>>> svit = semverit.SemVerIt('5.5.5')
>>> print(svit)
5.5.5
>>> svit = semverit.SemVerIt([5 ,5 ,5])
>>> print(svit)
5.5.5
>>> svit = semverit.SemVerIt(['5' ,'5' ,'5'])
>>> print(svit)
5.5.5
"""
self.success = True
if p_parent_log_name:
self._log_name = "{}.{}".format(p_parent_log_name, _PROJ_NAME)
self.logger = logging.getLogger(self._log_name)
self.verbose = p_verbose
if (
isinstance(p_version, WindowsPath) or isinstance(p_version, PosixPath)
) and p_version.exists():
self.version = self.get_from_setup_cfg(p_version)
elif isinstance(p_version, str):
self.version = p_version
elif isinstance(p_version, list):
self.version = "{}.{}.{}".format(p_version[0], p_version[1], p_version[2])
else:
self.version = "0.0.0"
self.verify(self.version)
major, minor, patch = self.version.split(".")
self.maj = int(major)
self.min = int(minor)
self.patch = int(patch)
def __eq__(self, p_other: Union[str, "SemVerIt"]) -> bool:
"""Equal: ==
:param p_other: Union[str, 'SemVerIt']
Version strings to compare.
:return: bool
examples::
>>> import semverit
>>> svit = semverit.SemVerIt('5.5.5')
>>> svit == '5.5.5'
True
>>> svit == '5.5.4'
False
"""
rc = False
if isinstance(p_other, str) and self.version == p_other:
rc = True
elif isinstance(p_other, SemVerIt) and self.version == p_other.version:
rc = True
return rc
def __le__(self, p_other):
"""Less or equal: <=
:param p_other:
Version string to compare.
:return: bool
examples::
>>> import semverit
>>> svit = semverit.SemVerIt('5.5.5')
>>> svit <= '5.5.5'
True
>>> svit <= '5.5.4'
False
>>> svit <= '5.5.6'
True
"""
rc = False
if isinstance(p_other, str):
o_major, o_minor, o_patch = p_other.split(".")
elif isinstance(p_other, SemVerIt):
o_major, o_minor, o_patch = p_other.version.split(".")
else:
return rc
o_major = int(o_major)
o_minor = int(o_minor)
o_patch = int(o_patch)
if self.maj < o_major:
rc = True
elif self.maj == o_major:
if self.min < o_minor:
rc = True
elif self.min == o_minor:
if self.patch < o_patch:
rc = True
elif self.patch == o_patch:
rc = True
elif self.patch > o_patch:
rc = False
elif self.min > o_minor:
rc = False
elif self.maj > o_major:
rc = False
return rc
def __lt__(self, p_other):
"""Less than: <
:param p_other:
Version string to compare.
:return: bool
examples::
>>> import semverit
>>> svit = semverit.SemVerIt('5.5.5')
>>> svit < '5.5.5'
False
>>> svit < '5.5.4'
False
>>> svit < '5.5.6'
True
"""
rc = False
if isinstance(p_other, str):
o_major, o_minor, o_patch = p_other.split(".")
elif isinstance(p_other, SemVerIt):
o_major, o_minor, o_patch = p_other.version.split(".")
else:
return rc
o_major = int(o_major)
o_minor = int(o_minor)
o_patch = int(o_patch)
if self.maj < o_major:
rc = True
elif self.maj == o_major:
if self.min < o_minor:
rc = True
elif self.min == o_minor:
if self.patch < o_patch:
rc = True
elif self.patch == o_patch:
rc = False
elif self.patch > o_patch:
rc = False
elif self.min > o_minor:
rc = False
elif self.maj > o_major:
rc = False
return rc
def __ge__(self, p_other) -> bool:
"""Greater or equal: >=
:param p_other:
Version strings to compare.
:return: bool
examples::
>>> import semverit
>>> svit = semverit.SemVerIt('5.5.5')
>>> svit >= '5.5.5'
True
>>> svit >= '5.5.4'
True
>>> svit >= '5.5.6'
False
"""
rc = False
if isinstance(p_other, str):
o_major, o_minor, o_patch = p_other.split(".")
elif isinstance(p_other, SemVerIt):
o_major, o_minor, o_patch = p_other.version.split(".")
else:
return rc
o_major = int(o_major)
o_minor = int(o_minor)
o_patch = int(o_patch)
if self.maj > o_major:
rc = True
elif self.maj == o_major:
if self.min > o_minor:
rc = True
elif self.min == o_minor:
if self.patch >= o_patch:
rc = True
# elif self.patch == o_patch:
# return False
elif self.patch < o_patch:
rc = False
elif self.min < o_minor:
rc = False
elif self.maj < o_major:
rc = False
return rc
def __gt__(self, p_other) -> bool:
"""Greater than: >
:param p_other:
Version string to compare.
:return: bool
examples::
>>> import semverit
>>> svit = semverit.SemVerIt('5.5.5')
>>> svit > '5.5.5'
False
>>> svit > '5.5.4'
True
>>> svit > '5.5.6'
False
"""
rc = False
if isinstance(p_other, str):
o_major, o_minor, o_patch = p_other.split(".")
elif isinstance(p_other, SemVerIt):
o_major, o_minor, o_patch = p_other.version.split(".")
else:
return rc
o_major = int(o_major)
o_minor = int(o_minor)
o_patch = int(o_patch)
if self.maj > o_major:
rc = True
elif self.maj == o_major:
if self.min > o_minor:
rc = True
elif self.min == o_minor:
if self.patch > o_patch:
rc = True
elif self.patch == o_patch:
rc = False
elif self.patch < o_patch:
rc = False
elif self.min < o_minor:
rc = False
elif self.maj < o_major:
rc = False
return rc
def __ne__(self, p_other) -> bool:
"""Not equal: !=
:param p_other:
Version string to compare.
:return: bool
examples::
>>> import semverit
>>> svit = semverit.SemVerIt('5.5.5')
>>> svit != '5.5.5'
False
>>> svit !='5.5.4'
True
>>> svit != '5.5.6'
True
"""
rc = False
if isinstance(p_other, str) and self.version != p_other:
rc = True
elif isinstance(p_other, SemVerIt) and self.version != p_other.version:
rc = True
return rc
def __repr__(self) -> str:
"""printable representation of the object
:return: str
examples::
>>> import semverit
>>> svit = semverit.SemVerIt('5.5.5')
>>> svit
5.5.5
"""
return self.version
def __str__(self) -> str:
"""printable representation of the object
:return: str
examples::
>>> import semverit
>>> svit = semverit.SemVerIt('5.5.5')
>>> svit
5.5.5
"""
return self.version
def bump_maj(self) -> str:
"""Bump the major version.
The major version will be increased by 1. In the process the minor
and patch versions will be reset to 0 i.e.
0.0.1 -> 1.0.0.
0.1.2 -> 1.0.0
:return: str, Complete version string
examples::
>>> import semverit
>>> svit = semverit.SemVerIt('5.5.5')
>>> svit.bump_maj()
'6.0.0'
"""
self.maj += 1
self.min = 0
self.patch = 0
self.version = "{}.{}.{}".format(self.maj, self.min, self.patch)
return self.version
def bump_min(self) -> str:
"""Bump the minor version.
The minor version will be increased by 1. The major version will
stay the same, but the patch version will be reset to 0 i.e.
0.0.1 -> 0.1.0.
0.1.2 -> 0.2.0
:return: str, Complete version string
examples::
>>> import semverit
>>> svit = semverit.SemVerIt('5.5.5')
>>> svit.bump_min()
'5.6.0'
"""
self.min += 1
self.patch = 0
self.version = "{}.{}.{}".format(self.maj, self.min, self.patch)
return self.version
def bump_patch(self) -> str:
"""Bump the patch version.
The patch version will be increased by 1. The major- and the minor
version will stay the same.
0.0.1 -> 0.0.2.
0.1.2 -> 0.1.3
:return: str, Complete version string
examples::
>>> import semverit
>>> svit = semverit.SemVerIt('5.5.5')
>>> svit.bump_patch()
'5.5.6'
"""
self.patch += 1
self.version = "{}.{}.{}".format(self.maj, self.min, self.patch)
return self.version
def get_from_setup_cfg(self, p_pth) -> str:
"""Read the version number from the setup.cfg file.
The project setup.cfg file (should) contain the version number for
the current module and package. Most projects already has a setup.py
file and is most probably also the correct version currently pushed
to git. It makes sense to read it from there.
:parm p_pth:
Path to the setup.cfg file
:return: str, Complete version string
>>> import semverit
>>> import tempfile
>>> from pathlib import Path
>>> cfg = Path(tempfile.mkdtemp(), 'setup.cfg')
>>> cfg.write_text(_setup_cfg_contents)
27
>>> svit = semverit.SemVerIt()
>>> svit.get_from_setup_cfg(p_pth = cfg)
'2.3.4'
>>> cfg.write_text(_setup_cfg_contents_faulty)
33
>>> svit = semverit.SemVerIt()
>>> svit.get_from_setup_cfg(p_pth = cfg)
'0.0.0'
"""
setup_cfg = configparser.ConfigParser(inline_comment_prefixes="#")
setup_cfg.read([p_pth])
if | |
# Copyright The IETF Trust 2016-2019, All Rights Reserved
import debug # pyflakes:ignore
import datetime
from django.utils import timezone
from ietf.doc.factories import WgDraftFactory, IndividualDraftFactory
from ietf.group.factories import ReviewTeamFactory
from ietf.group.models import Group, Role
from ietf.name.models import ReviewerQueuePolicyName
from ietf.person.factories import PersonFactory
from ietf.person.fields import PersonEmailChoiceField
from ietf.person.models import Email
from ietf.review.factories import ReviewAssignmentFactory, ReviewRequestFactory
from ietf.review.models import ReviewerSettings, NextReviewerInTeam, UnavailablePeriod, ReviewWish, \
ReviewTeamSettings
from ietf.review.policies import (AssignmentOrderResolver, LeastRecentlyUsedReviewerQueuePolicy,
get_reviewer_queue_policy, QUEUE_POLICY_NAME_MAPPING)
from ietf.utils.test_data import create_person
from ietf.utils.test_utils import TestCase
class GetReviewerQueuePolicyTest(TestCase):
def test_valid_policy(self):
team = ReviewTeamFactory(acronym="rotationteam", name="Review Team", list_email="<EMAIL>", parent=Group.objects.get(acronym="farfut"), settings__reviewer_queue_policy_id='LeastRecentlyUsed')
policy = get_reviewer_queue_policy(team)
self.assertEqual(policy.__class__, LeastRecentlyUsedReviewerQueuePolicy)
def test_missing_settings(self):
team = ReviewTeamFactory(acronym="rotationteam", name="Review Team", list_email="<EMAIL>", parent=Group.objects.get(acronym="farfut"))
ReviewTeamSettings.objects.all().delete()
with self.assertRaises(ValueError):
get_reviewer_queue_policy(team)
def test_invalid_policy_name(self):
ReviewerQueuePolicyName.objects.create(slug='invalid')
team = ReviewTeamFactory(acronym="rotationteam", name="Review Team", list_email="<EMAIL>", parent=Group.objects.get(acronym="farfut"), settings__reviewer_queue_policy_id='invalid')
with self.assertRaises(ValueError):
get_reviewer_queue_policy(team)
class _Wrapper(TestCase):
"""Wrapper class - exists to prevent UnitTest from trying to run the base class tests"""
def test_all_reviewer_queue_policies_have_tests(self):
"""Every ReviewerQueuePolicy should be tested"""
rqp_test_classes = self.ReviewerQueuePolicyTestCase.__subclasses__()
self.assertCountEqual(
QUEUE_POLICY_NAME_MAPPING.keys(),
[cls.reviewer_queue_policy_id for cls in rqp_test_classes],
)
class ReviewerQueuePolicyTestCase(TestCase):
"""Parent class to define interface / default tests for QueuePolicy implementation tests
To add tests for a new AbstractReviewerQueuePolicy class, you need to:
1. Subclass _Wrapper.ReviewerQueuePolicyTestCase (i.e., this class)
2. Define the reviewer_queue_policy_id class variable in your new class
3. (Maybe) implement a class-specific append_reviewer() method to add a new
reviewer that sorts to the end of default_reviewer_rotation_list()
4. Fill in any tests that raise NotImplemented exceptions
5. Override any other tests that should have different behavior for your new policy
6. Add any policy-specific tests
When adding tests to this default class, be careful not to make assumptions about
the ordering of reviewers. The only guarantee is that append_reviewer() adds a
new reviewer who is later in the default rotation for the next assignment. Once that
assignment is made, the rotation order is entirely unknown! If you need to make
such assumptions, call policy.default_reviewer_rotation_list() or move the test
into a policy-specific subclass.
"""
# Must define reviewer_queue_policy_id in test subclass
reviewer_queue_policy_id = ''
def setUp(self):
self.team = ReviewTeamFactory(acronym="rotationteam",
name="Review Team",
list_email="<EMAIL>",
parent=Group.objects.get(acronym="farfut"))
self.team.reviewteamsettings.reviewer_queue_policy_id = self.reviewer_queue_policy_id
self.team.reviewteamsettings.save()
self.policy = get_reviewer_queue_policy(self.team)
self.reviewers = []
def append_reviewer(self, skip_count=None):
"""Create a reviewer who will appear in the assignee options list
Newly added reviewer must come later in the default_reviewer_rotation_list. The default
implementation creates users whose names are in lexicographic order.
"""
index = len(self.reviewers)
assert(index < 100) # ordering by label will fail if > 100 reviewers are created
label = '{:02d}'.format(index)
reviewer = create_person(self.team,
'reviewer',
name='Test Reviewer{}'.format(label),
username='testreviewer{}'.format(label))
self.reviewers.append(reviewer)
if skip_count is not None:
settings = self.reviewer_settings_for(reviewer)
settings.skip_next = skip_count
settings.save()
return reviewer
def create_old_review_assignment(self, reviewer, **kwargs):
"""Create a review that won't disturb the ordering of reviewers"""
return ReviewAssignmentFactory(reviewer=reviewer.email(), **kwargs)
def reviewer_settings_for(self, person):
return (ReviewerSettings.objects.filter(team=self.team, person=person).first()
or ReviewerSettings(team=self.team, person=person))
def test_return_reviewer_to_rotation_top(self):
# Subclass must implement this
raise NotImplementedError
def test_default_reviewer_rotation_list_ignores_out_of_team_reviewers(self):
available_reviewers, _ = self.set_up_default_reviewer_rotation_list_test()
# This reviewer has an assignment, but is no longer in the team and should not be in rotation.
out_of_team_reviewer = PersonFactory()
ReviewAssignmentFactory(review_request__team=self.team, reviewer=out_of_team_reviewer.email())
# No known assignments, order in PK order.
rotation = self.policy.default_reviewer_rotation_list()
self.assertNotIn(out_of_team_reviewer, rotation)
self.assertEqual(rotation, available_reviewers)
def test_assign_reviewer(self):
"""assign_reviewer() should create a review assignment for the correct user"""
review_req = ReviewRequestFactory(team=self.team)
for _ in range(3):
self.append_reviewer()
self.assertFalse(review_req.reviewassignment_set.exists())
reviewer = self.reviewers[0]
self.policy.assign_reviewer(review_req, reviewer.email(), add_skip=False)
self.assertCountEqual(
review_req.reviewassignment_set.all().values_list('reviewer', flat=True),
[str(reviewer.email())]
)
self.assertEqual(self.reviewer_settings_for(reviewer).skip_next, 0)
def test_assign_reviewer_and_add_skip(self):
"""assign_reviewer() should create a review assignment for the correct user"""
review_req = ReviewRequestFactory(team=self.team)
for _ in range(3):
self.append_reviewer()
self.assertFalse(review_req.reviewassignment_set.exists())
reviewer = self.reviewers[0]
self.policy.assign_reviewer(review_req, reviewer.email(), add_skip=True)
self.assertCountEqual(
review_req.reviewassignment_set.all().values_list('reviewer', flat=True),
[str(reviewer.email())]
)
self.assertEqual(self.reviewer_settings_for(reviewer).skip_next, 1)
def test_assign_reviewer_updates_skip_next_minimal(self):
"""If we skip the first reviewer, their skip_next value should decrement
Different policies handle skipping in different ways.
The only assumption we make in the base test class is that an in-order assignment
to a non-skipped reviewer will decrement the skip_next for any reviewers we skipped.
Any other tests are policy-specific (e.g., the RotateAlphabetically policy will
also decrement any users skipped between the assignee and the next reviewer in the
rotation)
"""
review_req = ReviewRequestFactory(team=self.team)
reviewer_to_skip = self.append_reviewer()
settings = self.reviewer_settings_for(reviewer_to_skip)
settings.skip_next = 1
settings.save()
another_reviewer_to_skip = self.append_reviewer()
settings = self.reviewer_settings_for(another_reviewer_to_skip)
settings.skip_next = 1
settings.save()
reviewer_to_assign = self.append_reviewer()
reviewer_to_ignore = self.append_reviewer()
# Check test assumptions
self.assertEqual(
self.policy.default_reviewer_rotation_list(),
[
reviewer_to_skip,
another_reviewer_to_skip,
reviewer_to_assign,
reviewer_to_ignore,
],
)
self.assertEqual(self.reviewer_settings_for(reviewer_to_skip).skip_next, 1)
self.assertEqual(self.reviewer_settings_for(another_reviewer_to_skip).skip_next, 1)
self.assertEqual(self.reviewer_settings_for(reviewer_to_assign).skip_next, 0)
self.policy.assign_reviewer(review_req, reviewer_to_assign.email(), add_skip=False)
# Check results
self.assertEqual(self.reviewer_settings_for(reviewer_to_skip).skip_next, 0,
'skip_next not updated for first skipped reviewer')
self.assertEqual(self.reviewer_settings_for(another_reviewer_to_skip).skip_next, 0,
'skip_next not updated for second skipped reviewer')
def test_assign_reviewer_updates_skip_next_with_add_skip(self):
"""Skipping reviewers with add_skip=True should update skip_counts properly
Subclasses must implement
"""
raise NotImplementedError
def test_assign_reviewer_updates_skip_next_without_add_skip(self):
"""Skipping reviewers with add_skip=False should update skip_counts properly
Subclasses must implement
"""
raise NotImplementedError
def test_assign_reviewer_ignores_skip_next_on_out_of_order_assignment(self):
"""If assignment is not in-order, skip_next values should not change"""
review_req = ReviewRequestFactory(team=self.team)
reviewer_to_skip = self.append_reviewer()
settings = self.reviewer_settings_for(reviewer_to_skip)
settings.skip_next = 1
settings.save()
reviewer_to_ignore = self.append_reviewer()
reviewer_to_assign = self.append_reviewer()
another_reviewer_to_skip = self.append_reviewer()
settings = self.reviewer_settings_for(another_reviewer_to_skip)
settings.skip_next = 3
settings.save()
# Check test assumptions
self.assertEqual(
self.policy.default_reviewer_rotation_list(),
[
reviewer_to_skip,
reviewer_to_ignore,
reviewer_to_assign,
another_reviewer_to_skip,
],
)
self.assertEqual(self.reviewer_settings_for(reviewer_to_skip).skip_next, 1)
self.assertEqual(self.reviewer_settings_for(reviewer_to_ignore).skip_next, 0)
self.assertEqual(self.reviewer_settings_for(reviewer_to_assign).skip_next, 0)
self.assertEqual(self.reviewer_settings_for(another_reviewer_to_skip).skip_next, 3)
self.policy.assign_reviewer(review_req, reviewer_to_assign.email(), add_skip=False)
# Check results
self.assertEqual(self.reviewer_settings_for(reviewer_to_skip).skip_next, 1,
'skip_next changed unexpectedly for first skipped reviewer')
self.assertEqual(self.reviewer_settings_for(reviewer_to_ignore).skip_next, 0,
'skip_next changed unexpectedly for ignored reviewer')
self.assertEqual(self.reviewer_settings_for(reviewer_to_assign).skip_next, 0,
'skip_next changed unexpectedly for assigned reviewer')
self.assertEqual(self.reviewer_settings_for(another_reviewer_to_skip).skip_next, 3,
'skip_next changed unexpectedly for second skipped reviewer')
def test_assign_reviewer_updates_skip_next_when_canfinish_other_doc(self):
"""Should update skip_next when 'canfinish' set for someone unrelated to this doc"""
completed_req = ReviewRequestFactory(team=self.team, state_id='assigned')
assigned_req = ReviewRequestFactory(team=self.team, state_id='assigned')
new_req = ReviewRequestFactory(team=self.team, doc=assigned_req.doc)
reviewer_to_skip = self.append_reviewer()
settings = self.reviewer_settings_for(reviewer_to_skip)
settings.skip_next = 1
settings.save()
# Has completed a review of some other document - unavailable for current req
canfinish_reviewer = self.append_reviewer()
UnavailablePeriod.objects.create(
team=self.team,
person=canfinish_reviewer,
start_date='2000-01-01',
availability='canfinish',
)
self.create_old_review_assignment(
reviewer=canfinish_reviewer,
review_request=completed_req,
state_id='completed',
)
# Has no review assignments at all
canfinish_reviewer_no_review = self.append_reviewer()
UnavailablePeriod.objects.create(
team=self.team,
person=canfinish_reviewer_no_review,
start_date='2000-01-01',
availability='canfinish',
)
# Has accepted but not completed a review of this document
canfinish_reviewer_no_completed = self.append_reviewer()
UnavailablePeriod.objects.create(
team=self.team,
person=canfinish_reviewer_no_completed,
start_date='2000-01-01',
availability='canfinish',
)
self.create_old_review_assignment(
reviewer=canfinish_reviewer_no_completed,
review_request=assigned_req,
state_id='accepted',
)
reviewer_to_assign = self.append_reviewer()
self.assertEqual(
self.policy.default_reviewer_rotation_list(),
[
reviewer_to_skip,
canfinish_reviewer,
canfinish_reviewer_no_review,
canfinish_reviewer_no_completed,
reviewer_to_assign
],
'Test logic error - reviewers not in expected starting order'
)
# assign the review
self.policy.assign_reviewer(new_req, reviewer_to_assign.email(), add_skip=False)
# Check results
self.assertEqual(self.reviewer_settings_for(reviewer_to_skip).skip_next, 0,
'skip_next not updated for skipped reviewer')
self.assertEqual(self.reviewer_settings_for(canfinish_reviewer).skip_next, 0,
'skip_next changed unexpectedly for "canfinish" unavailable reviewer')
self.assertEqual(self.reviewer_settings_for(canfinish_reviewer_no_review).skip_next, 0,
'skip_next changed unexpectedly for "canfinish" unavailable reviewer with no review')
self.assertEqual(self.reviewer_settings_for(canfinish_reviewer_no_completed).skip_next, 0,
'skip_next changed unexpectedly for "canfinish" unavailable reviewer with no completed review')
self.assertEqual(self.reviewer_settings_for(reviewer_to_assign).skip_next, 0,
'skip_next changed unexpectedly for assigned reviewer')
def test_assign_reviewer_ignores_skip_next_when_canfinish_this_doc(self):
"""Should not update skip_next when 'canfinish' set for prior reviewer of current req
If a reviewer is unavailable but 'canfinish' and has previously completed a review of this
doc, they are a candidate to be assigned to it. In that case, when skip_next == 0, skipping
over them means the assignment was not 'in order' and skip_next should not be updated.
"""
completed_req = ReviewRequestFactory(team=self.team, state_id='assigned')
new_req = ReviewRequestFactory(team=self.team, doc=completed_req.doc)
reviewer_to_skip = self.append_reviewer()
settings = self.reviewer_settings_for(reviewer_to_skip)
settings.skip_next = 1
settings.save()
canfinish_reviewer = self.append_reviewer()
UnavailablePeriod.objects.create(
team=self.team,
person=canfinish_reviewer,
start_date='2000-01-01',
availability='canfinish',
)
self.create_old_review_assignment(
reviewer=canfinish_reviewer,
review_request=completed_req,
state_id='completed',
)
reviewer_to_assign = self.append_reviewer()
self.assertEqual(self.policy.default_reviewer_rotation_list(),
[reviewer_to_skip, canfinish_reviewer, reviewer_to_assign],
'Test logic error - reviewers not in expected starting order')
# assign the review
self.policy.assign_reviewer(new_req, reviewer_to_assign.email(), add_skip=False)
# Check results
self.assertEqual(self.reviewer_settings_for(reviewer_to_skip).skip_next, 1,
'skip_next changed unexpectedly for skipped reviewer')
self.assertEqual(self.reviewer_settings_for(canfinish_reviewer).skip_next, 0,
'skip_next changed unexpectedly for "canfinish" reviewer')
self.assertEqual(self.reviewer_settings_for(reviewer_to_assign).skip_next, 0,
'skip_next changed unexpectedly for assigned reviewer')
def set_up_default_reviewer_rotation_list_test(self):
"""Helper to set up for the test_default_reviewer_rotation_list test and related tests"""
for i in range(5):
self.append_reviewer()
# This reviewer should never be included.
unavailable_reviewer = self.append_reviewer()
UnavailablePeriod.objects.create(
team=self.team,
person=unavailable_reviewer,
start_date='2000-01-01',
availability='unavailable',
)
# This should not have any impact. Canfinish unavailable reviewers are included in
# the default rotation, and filtered further when making assignment choices.
UnavailablePeriod.objects.create(
team=self.team,
person=self.reviewers[1],
start_date='2000-01-01',
availability='canfinish',
)
return (
[r for r in self.reviewers if r is not unavailable_reviewer], # available reviewers
unavailable_reviewer,
)
def test_default_reviewer_rotation_list(self):
available_reviewers, unavailable_reviewer = self.set_up_default_reviewer_rotation_list_test()
rotation | |
<filename>tools/preview/preview.py<gh_stars>1-10
#!/usr/bin/env python3
""" Tool to preview swaps and tweak configuration prior to running a convert """
import logging
import random
import tkinter as tk
from tkinter import ttk
import os
import sys
from configparser import ConfigParser
from threading import Event, Lock
import cv2
import numpy as np
from PIL import Image, ImageTk
from lib.aligner import Extract as AlignerExtract
from lib.cli.args import ConvertArgs
from lib.gui.utils import get_images, get_config, initialize_config, initialize_images
from lib.gui.custom_widgets import Tooltip
from lib.gui.control_helper import ControlPanel, ControlPanelOption
from lib.convert import Converter
from lib.faces_detect import DetectedFace
from lib.multithreading import MultiThread
from lib.utils import FaceswapError
from lib.queue_manager import queue_manager
from scripts.fsmedia import Alignments, Images
from scripts.convert import Predict
from plugins.plugin_loader import PluginLoader
from plugins.convert._config import Config
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Preview(tk.Tk): # pylint:disable=too-few-public-methods
""" This tool is part of the Faceswap Tools suite and should be called from
``python tools.py preview`` command.
Loads up 5 semi-random face swaps and displays them, cropped, in place in the final frame.
Allows user to live tweak settings, before saving the final config to
:file:`./config/convert.ini`
Parameters
----------
arguments: :class:`argparse.Namespace`
The :mod:`argparse` arguments as passed in from :mod:`tools.py`
"""
def __init__(self, arguments):
logger.debug("Initializing %s: (arguments: '%s'", self.__class__.__name__, arguments)
super().__init__()
self._config_tools = ConfigTools()
self._lock = Lock()
self._tk_vars = dict(refresh=tk.BooleanVar(), busy=tk.BooleanVar())
for val in self._tk_vars.values():
val.set(False)
self._display = FacesDisplay(256, 64, self._tk_vars)
trigger_patch = Event()
self._samples = Samples(arguments, 5, self._display, self._lock, trigger_patch)
self._patch = Patch(arguments,
self._available_masks,
self._samples,
self._display,
self._lock,
trigger_patch,
self._config_tools,
self._tk_vars)
self._initialize_tkinter()
self._image_canvas = None
self._opts_book = None
self._cli_frame = None # cli frame holds cli options
logger.debug("Initialized %s", self.__class__.__name__)
@property
def _available_masks(self):
""" list: The mask names that are available for every face in the alignments file """
retval = [key
for key, val in self._samples.alignments.mask_summary.items()
if val == self._samples.alignments.faces_count]
return retval
def _initialize_tkinter(self):
""" Initialize a standalone tkinter instance. """
logger.debug("Initializing tkinter")
initialize_config(self, None, None)
initialize_images()
get_config().set_geometry(940, 600, fullscreen=False)
self.title("Faceswap.py - Convert Settings")
self.tk.call(
"wm",
"iconphoto",
self._w, get_images().icons["favicon"]) # pylint:disable=protected-access
logger.debug("Initialized tkinter")
def process(self):
""" The entry point for the Preview tool from :file:`lib.tools.cli`.
Launch the tkinter preview Window and run main loop.
"""
self._build_ui()
self.mainloop()
def _refresh(self, *args):
""" Load new faces to display in preview.
Parameters
----------
*args: tuple
Unused, but required for tkinter callback.
"""
logger.trace("Refreshing swapped faces. args: %s", args)
self._tk_vars["busy"].set(True)
self._config_tools.update_config()
with self._lock:
self._patch.converter_arguments = self._cli_frame.convert_args
self._patch.current_config = self._config_tools.config
self._patch.trigger.set()
logger.trace("Refreshed swapped faces")
def _build_ui(self):
""" Build the elements for displaying preview images and options panels. """
container = tk.PanedWindow(self,
sashrelief=tk.RIDGE,
sashwidth=4,
sashpad=8,
orient=tk.VERTICAL)
container.pack(fill=tk.BOTH, expand=True)
container.preview_display = self._display
self._image_canvas = ImagesCanvas(container, self._tk_vars)
container.add(self._image_canvas, height=400 * get_config().scaling_factor)
options_frame = ttk.Frame(container)
self._cli_frame = ActionFrame(
options_frame,
self._available_masks,
self._samples.predictor.has_predicted_mask,
self._patch.converter.cli_arguments.color_adjustment.replace("-", "_"),
self._patch.converter.cli_arguments.mask_type.replace("-", "_"),
self._config_tools,
self._refresh,
self._samples.generate,
self._tk_vars)
self._opts_book = OptionsBook(options_frame,
self._config_tools,
self._refresh)
container.add(options_frame)
class Samples():
""" The display samples.
Obtains and holds :attr:`sample_size` semi random test faces for displaying in the
preview GUI.
The file list is split into evenly sized groups of :attr:`sample_size`. When a display set is
generated, a random image from each of the groups is selected to provide an array of images
across the length of the video.
Parameters
----------
arguments: :class:`argparse.Namespace`
The :mod:`argparse` arguments as passed in from :mod:`tools.py`
sample_size: int
The number of samples to take from the input video/images
display: :class:`FacesDisplay`
The display section of the Preview GUI.
lock: :class:`threading.Lock`
A threading lock to prevent multiple GUI updates at the same time.
trigger_patch: :class:`threading.Event`
An event to indicate that a converter patch should be run
"""
def __init__(self, arguments, sample_size, display, lock, trigger_patch):
logger.debug("Initializing %s: (arguments: '%s', sample_size: %s, display: %s, lock: %s, "
"trigger_patch: %s)", self.__class__.__name__, arguments, sample_size,
display, lock, trigger_patch)
self._sample_size = sample_size
self._display = display
self._lock = lock
self._trigger_patch = trigger_patch
self._input_images = list()
self._predicted_images = list()
self._images = Images(arguments)
self._alignments = Alignments(arguments,
is_extract=False,
input_is_video=self._images.is_video)
if not self._alignments.have_alignments_file:
logger.error("Alignments file not found at: '%s'", self._alignments.file)
sys.exit(1)
self._filelist = self._get_filelist()
self._indices = self._get_indices()
self._predictor = Predict(queue_manager.get_queue("preview_predict_in"),
sample_size,
arguments)
self.generate()
logger.debug("Initialized %s", self.__class__.__name__)
@property
def sample_size(self):
""" int: The number of samples to take from the input video/images """
return self._sample_size
@property
def predicted_images(self):
""" list: The predicted faces output from the Faceswap model """
return self._predicted_images
@property
def alignments(self):
""" :class:`~lib.alignments.Alignments`: The alignments for the preview faces """
return self._alignments
@property
def predictor(self):
""" :class:`~scripts.convert.Predict`: The Predictor for the Faceswap model """
return self._predictor
@property
def _random_choice(self):
""" list: Random indices from the :attr:`_indices` group """
retval = [random.choice(indices) for indices in self._indices]
logger.debug(retval)
return retval
def _get_filelist(self):
""" Get a list of files for the input, filtering out those frames which do
not contain faces.
Returns
-------
list
A list of filenames of frames that contain faces.
"""
logger.debug("Filtering file list to frames with faces")
if self._images.is_video:
filelist = ["{}_{:06d}.png".format(os.path.splitext(self._images.input_images)[0],
frame_no)
for frame_no in range(1, self._images.images_found + 1)]
else:
filelist = self._images.input_images
retval = [filename for filename in filelist
if self._alignments.frame_has_faces(os.path.basename(filename))]
logger.debug("Filtered out frames: %s", self._images.images_found - len(retval))
try:
assert retval
except AssertionError as err:
msg = ("No faces were found in any of the frames passed in. Make sure you are passing "
"in a frames source rather than extracted faces, and that you have provided "
"the correct alignments file.")
raise FaceswapError(msg) from err
return retval
def _get_indices(self):
""" Get indices for each sample group.
Obtain :attr:`self.sample_size` evenly sized groups of indices
pertaining to the filtered :attr:`self._file_list`
Returns
-------
list
list of indices relating to the filtered file list, split into groups
"""
# Remove start and end values to get a list divisible by self.sample_size
no_files = len(self._filelist)
crop = no_files % self._sample_size
top_tail = list(range(no_files))[
crop // 2:no_files - (crop - (crop // 2))]
# Partition the indices
size = len(top_tail)
retval = [top_tail[start:start + size // self._sample_size]
for start in range(0, size, size // self._sample_size)]
logger.debug("Indices pools: %s", ["{}: (start: {}, end: {}, size: {})".format(idx,
min(pool),
max(pool),
len(pool))
for idx, pool in enumerate(retval)])
return retval
def generate(self):
""" Generate a sample set.
Selects :attr:`sample_size` random faces. Runs them through prediction to obtain the
swap, then trigger the patch event to run the faces through patching.
"""
self._load_frames()
self._predict()
self._trigger_patch.set()
def _load_frames(self):
""" Load a sample of random frames.
* Picks a random face from each indices group.
* Takes the first face from the image (if there) are multiple faces. Adds the images to \
:attr:`self._input_images`.
* Sets :attr:`_display.source` to the input images and flags that the display should \
be updated
"""
self._input_images = list()
for selection in self._random_choice:
filename = os.path.basename(self._filelist[selection])
image = self._images.load_one_image(self._filelist[selection])
# Get first face only
face = self._alignments.get_faces_in_frame(filename)[0]
detected_face = DetectedFace()
detected_face.from_alignment(face, image=image)
self._input_images.append({"filename": filename,
"image": image,
"detected_faces": [detected_face]})
self._display.source = self._input_images
self._display.update_source = True
logger.debug("Selected frames: %s", [frame["filename"] for frame in self._input_images])
def _predict(self):
""" Predict from the loaded frames.
With a threading lock (to prevent stacking), run the selected faces through the Faceswap
model predict function and add the output to :attr:`predicted`
"""
with self._lock:
self._predicted_images = list()
for frame in self._input_images:
self._predictor.in_queue.put(frame)
idx = 0
while idx < self._sample_size:
logger.debug("Predicting face %s of %s", idx + 1, self._sample_size)
items = self._predictor.out_queue.get()
if items == "EOF":
logger.debug("Received EOF")
break
for item in items:
self._predicted_images.append(item)
logger.debug("Predicted face %s of %s", idx + 1, self._sample_size)
idx += 1
logger.debug("Predicted faces")
class Patch():
""" The Patch pipeline
Runs in it's own thread. Takes the output from the Faceswap model predictor and runs the faces
through the convert pipeline using the currently selected options.
Parameters
----------
arguments: :class:`argparse.Namespace`
The :mod:`argparse` arguments as passed in from :mod:`tools.py`
available_masks: list
The masks that are available for convert
samples: :class:`Samples`
The Samples for display.
display: :class:`FacesDisplay`
The display section of the Preview GUI.
lock: :class:`threading.Lock`
A threading lock to prevent multiple GUI updates at the same time.
trigger: :class:`threading.Event`
An event to indicate that a converter patch should be run
config_tools: :class:`ConfigTools`
Tools for loading and saving configuration files
tk_vars: dict
Global tkinter variables. `Refresh` and `Busy` :class:`tkinter.BooleanVar`
Attributes
----------
converter_arguments: dict
The currently selected converter command line arguments for the patch queue
current_config::class:`lib.config.FaceswapConfig`
The | |
# MIT License
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ==============================================================================
from functools import partial
import os
from os.path import split
import pickle
import shutil
import time
from itertools import repeat, starmap
from subprocess import Popen
from typing import Callable, Dict, Iterable, List, Tuple, Union
import dill
import numpy as np
from astropy.io import fits
from tqdm import tqdm
import morpheus_core.helpers.misc_helper as mh
import morpheus_core.helpers.fits_helper as fh
from morpheus_core import morpheus_core
def get_split_length(
shape: List[int], num_workers: int, window_shape: Tuple[int]
) -> int:
"""Calculate the size of the sub images for classification.
Args:
shape (List[int]): the shape of the array to be split
num_workers (int): the number of splits to make
window_shape (Tuple[int]): The (height, width) tuple describing the size
of the sliding window.
Returns:
The length of each split along axis 0
TODO: Implement splits along other axes
"""
return (shape[0] + (num_workers - 1) * window_shape[0]) // num_workers
def get_split_slice_generator(
shape: Tuple[int], window_shape: Tuple[int], num_workers: int, split_length: int
) -> Iterable[slice]:
"""Creates a generator that yields `slice` objects to split imgs.
Args:
shape (Tuple[int]): The shape of the array to be split
window_shape (Tuple[int]): The (height, width) tuple describing the size
of the sliding window.
num_workers (int): The number of splits to make
split_length (int): The length each slice should be
Returns
A generator that yields slice objects
TODO: Implement splits along other axes
TODO: Refactor to a more functional implementation
"""
start_ys = get_start_y_idxs(
list(repeat(split_length, num_workers - 1)), window_height=window_shape[0]
)
end_ys = start_ys + split_length
end_ys[-1] = shape[0]
return starmap(slice, zip(start_ys, end_ys))
# idx = 0
# for i in range(num_workers):
# start_idx = max(idx - window_shape[0] - 1, 0)
# if i == num_workers - 1:
# end_idx = shape[0]
# else:
# end_idx = start_idx + split_length - 1
# idx = end_idx
# yield slice(start_idx, end_idx)
def make_runnable_file(
path: str,
input_fnames: List[str],
n_classes: int,
batch_size: int,
window_size: Union[Tuple[int], List[int]],
stride: Union[Tuple[int], List[int]],
aggregate_method: str,
) -> None:
"""Creates a file at `path` that classfies local FITS files.
Args:
path (str): The dir to save the file in
input_fnames (List[str]): The list of file names that contain the
arrays to convert into batches and serve to
the model
n_classes (int): The number of classes that the models predicts for
batch_size (int): The batch size for the model to use when classifying
the input
window_size (Union[Tuple[int], List[int]]): The (h, w) of each example
in a batch
stride (Union[Tuple[int], List[int]]): The stride size of the sliding
window
aggregate_method (str): how to process the output from the model. If
AGGREGATION_METHODS.MEAN_VAR record output using
mean and variance, If AGGREGATION_METHODS.RANK_VOTE
record output as the normalized vote count.
Returns:
None
"""
# we need `local` so that we can import morpheus_core just in case the pip env
# doesn't carry over to the new process
local = os.path.dirname(os.path.dirname(__file__))
text = [
"import sys",
f"sys.path.append('{local}')",
"import os",
"import dill",
"import numpy as np",
"from tqdm import tqdm",
"from morpheus_core import morpheus_core",
"def main():",
" output_dir = './output'",
" if 'output' not in os.listdir():",
" os.mkdir('./output')",
"",
" with open('model.pkl', 'rb') as f:",
" model = dill.load(f)",
"",
" model_inputs = [",
" " + ",".join(["'" + i + "'" for i in input_fnames]),
" ]",
"",
" update_map = np.load('update_map.npy', allow_pickle=True)",
"",
" morpheus_core.predict(",
" model,",
" model_inputs,",
f" {n_classes},",
f" {batch_size},",
f" {window_size},",
f" stride={stride},",
" update_map=update_map,",
f" aggregate_method='{aggregate_method}',",
" out_dir=output_dir,",
" )",
" sys.exit(0)",
"if __name__=='__main__':",
" main()",
]
with open(os.path.join(path, "main.py"), "w") as f:
f.write("\n".join(text))
def build_parallel_classification_structure(
model: Callable,
arrs: List[np.ndarray],
arr_fnames: List[str],
n_classes: int,
batch_size: int,
window_shape: Tuple[int],
stride: Union[Tuple[int], List[int]],
update_map: np.ndarray,
aggregate_method: str,
out_dir: str,
workers: List[int],
) -> None:
"""Sets up the subdirs and files to run the parallel classification.
Args:
arrs (List[np.ndarray]): List of arrays to split up in the order HJVZ
arr_fnames (List[str]): The file names that hold the input arrays
`arrs`
workers (List[int]): A list of worker ID's that can either be CUDA GPU
ID's or a list dummy numbers for cpu workers
batch_size (int): The batch size for Morpheus to use when classifying
the input.
window_shape (Tuple[int]): The (height, width) tuple describing the size
of the sliding window.
out_dir (str): the location to place the subdirs in
Returns:
None
TODO: Refactor to a more functional implementation
"""
shape = arrs[0].shape
num_workers = len(workers)
split_slices = get_split_slice_generator(
shape,
window_shape,
num_workers,
get_split_length(shape, num_workers, window_shape),
)
for worker, split_slice in tqdm(zip(sorted(workers), split_slices)):
sub_output_dir = os.path.join(out_dir, str(worker))
os.mkdir(sub_output_dir)
# put sliced input files into subdir
for name, data in zip(arr_fnames, arrs):
tmp_location = os.path.join(sub_output_dir, os.path.split(name)[1])
fits.PrimaryHDU(data=data[split_slice, ...]).writeto(tmp_location)
# put model into subdir
with open(os.path.join(sub_output_dir, "model.pkl"), "wb") as f:
dill.dump(model, f)
# put udpate_map into subdir
if update_map is None:
update_map = np.ones(window_shape)
np.save(os.path.join(sub_output_dir, "update_map.npy"), update_map)
make_runnable_file(
sub_output_dir,
arr_fnames,
n_classes,
batch_size,
window_shape,
stride,
aggregate_method,
)
def worker_to_cmd(is_gpu: bool, worker: int) -> str:
"""Returns a the bash command to run a worker job.
Args:
is_gpu (bool): True if worker is a gpu worker false if cpu worker
worker (int): The worker id, this is the GPU id for gpu workers
Returns:
A string containing the bash command to run a worker job.
"""
if is_gpu:
return f"CUDA_VISIBLE_DEVICES={worker} python main.py"
else:
return f"CUDA_VISIBLE_DEVICES=-1 python main.py"
def check_procs(procs: Dict[int, Popen]) -> List[bool]:
"""Checks on the status of running jobs.
Args:
procs (Dict[int, Popen]): A dictionary where the keys are the worker
ids and the values are the process objects
Returns:
A list of booleans indicating if the processes are finished.
"""
return list(
map(
# if poll() returns None the process is still running
lambda p: procs[p].poll() == None,
procs,
)
)
def monitor_procs(procs: Dict[int, Popen], parallel_check_interval: int) -> None:
"""Monitors the progress of running subprocesses.
Args:
procs (Dict[int, Popen]): A dictionary where the keys are the worker ids
and the values are the process objects
parrallel_check_interval (int): An integer
"""
wait_f = lambda: not bool(time.sleep(parallel_check_interval))
all(
map(
# if there any running processes, then time.sleep will get called
# which always returns None and therefore false which is negated
# to continue the loop
#
# if there are no running processes, then the conditional is
# shortcutted and the expression returns false ending the loop
lambda running_procs: any(running_procs) and wait_f(),
map(check_procs, repeat(procs)),
)
)
def run_parallel_jobs(
workers: List[int], is_gpu: bool, out_dir: str, parallel_check_interval: float
) -> None:
"""Starts and tracks parallel job runs.
WARNING: This will not finish running until all subprocesses are complete
Args:
workers (List[int]): A list of worker ID's to assign to a portion of an
image.
is_gpu (bool): if True the worker ID's belong to NVIDIA GPUs and will
be used as an argument in CUDA_VISIBLE_DEVICES. If False,
then the ID's are assocaited with CPU workers
out_dir (str): the location with the partitioned data
parallel_check_interval (float): If gpus are given, then this is the
number of minutes to wait between
polling each subprocess for
completetion.
Returns:
None
"""
proc_cmds = [worker_to_cmd(is_gpu, w) for w in workers]
subdirs = [os.path.join(out_dir, str(w)) for w in workers]
processes | |
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
class cp2k_motion_free_energy_alchemical_change:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t&ALCHEMICAL_CHANGE\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t&END ALCHEMICAL_CHANGE\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 3:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_free_energy_info_each:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t&END EACH\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 3:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_free_energy_info:
def __init__(self):
self.params = {}
self.status = False
self.each = cp2k_motion_free_energy_free_energy_info_each()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t&FREE_ENERGY_INFO\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t&END FREE_ENERGY_INFO\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 3:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[2] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_motion_free_energy_metadyn_ext_lagrange_fs:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t&EXT_LAGRANGE_FS\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t&END EXT_LAGRANGE_FS\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 4:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_metadyn_ext_lagrange_ss:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t&EXT_LAGRANGE_SS\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t&END EXT_LAGRANGE_SS\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 4:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_metadyn_ext_lagrange_ss0:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t&EXT_LAGRANGE_SS0\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t&END EXT_LAGRANGE_SS0\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 4:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_metadyn_ext_lagrange_vvp:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t&EXT_LAGRANGE_VVP\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t&END EXT_LAGRANGE_VVP\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 4:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_metadyn_metavar_wall_gaussian:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&GAUSSIAN\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END GAUSSIAN\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_metadyn_metavar_wall_quadratic:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&QUADRATIC\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END QUADRATIC\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_metadyn_metavar_wall_quartic:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&QUARTIC\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END QUARTIC\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_metadyn_metavar_wall_reflective:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&REFLECTIVE\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END REFLECTIVE\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_metadyn_metavar_wall:
def __init__(self):
self.params = {}
self.status = False
self.gaussian = cp2k_motion_free_energy_metadyn_metavar_wall_gaussian()
self.quadratic = cp2k_motion_free_energy_metadyn_metavar_wall_quadratic()
self.quartic = cp2k_motion_free_energy_metadyn_metavar_wall_quartic()
self.reflective = cp2k_motion_free_energy_metadyn_metavar_wall_reflective()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&WALL\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.gaussian.status == True:
self.gaussian.to_input(fout)
if self.quadratic.status == True:
self.quadratic.to_input(fout)
if self.quartic.status == True:
self.quartic.to_input(fout)
if self.reflective.status == True:
self.reflective.to_input(fout)
fout.write("\t\t\t\t&END WALL\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "GAUSSIAN":
self.gaussian.set_params({item: params[item]})
elif item.split("-")[4] == "QUADRATIC":
self.quadratic.set_params({item: params[item]})
elif item.split("-")[4] == "QUARTIC":
self.quartic.set_params({item: params[item]})
elif item.split("-")[4] == "REFLECTIVE":
self.reflective.set_params({item: params[item]})
else:
pass
class cp2k_motion_free_energy_metadyn_metavar:
def __init__(self):
self.params = {}
self.status = False
self.wall = cp2k_motion_free_energy_metadyn_metavar_wall()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t&METAVAR\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.wall.status == True:
self.wall.to_input(fout)
fout.write("\t\t\t&END METAVAR\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 4:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[3] == "WALL":
self.wall.set_params({item: params[item]})
else:
pass
class cp2k_motion_free_energy_metadyn_multiple_walkers_walkers_file_name:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&WALKERS_FILE_NAME\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t&END WALKERS_FILE_NAME\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_metadyn_multiple_walkers:
def __init__(self):
self.params = {}
self.status = False
self.walkers_file_name = cp2k_motion_free_energy_metadyn_multiple_walkers_walkers_file_name()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t&MULTIPLE_WALKERS\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.walkers_file_name.status == True:
self.walkers_file_name.to_input(fout)
fout.write("\t\t\t&END MULTIPLE_WALKERS\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 4:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[3] == "WALKERS_FILE_NAME":
self.walkers.file_name.set_params({item: params[item]})
else:
pass
class cp2k_motion_free_energy_metadyn_print_colvar_each:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_metadyn_print_colvar:
def __init__(self):
self.params = {}
self.status = False
self.each = cp2k_motion_free_energy_metadyn_print_colvar_each()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&COLVAR\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END COLVAR\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_motion_free_energy_metadyn_print_hills_each:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_metadyn_print_hills:
def __init__(self):
self.params = {}
self.status = False
self.each = cp2k_motion_free_energy_metadyn_print_hills_each()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&HILLS\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END HILLS\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_motion_free_energy_metadyn_print_program_run_info_each:
def __init__(self):
self.params = {}
self.status = False
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_motion_free_energy_metadyn_print_program_run_info:
def __init__(self):
self.params = {}
self.status = False
self.each = cp2k_motion_free_energy_metadyn_print_program_run_info_each()
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t\t\t&PROGRAM_RUN_INFO\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END PROGRAM_RUN_INFO\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
| |
where
@property
def _analyzer_jvm(self):
"""
Returns the datatype of the column(s)
:return self
"""
return self._deequAnalyzers.DataType(
self.column,
self._jvm.scala.Option.apply(self.where)
)
class Distinctness(_AnalyzerObject):
"""
Count the distinctness of elements in column(s).
Distinctness is the fraction of distinct values of a column(s).
:param str OR list[str] columns: Column(s) in the DataFrame for which data
type is to be analyzed. The column is expected to be a str for single
column or list[str] for multiple columns.
:param str where: additional filter to apply before the analyzer is run.
"""
def __init__(self, columns, where: str = None):
if isinstance(columns, str): columns = [columns]
self.columns = columns
self.where = where
@property
def _analyzer_jvm(self):
"""
Returns the distinctness of the column(s)
:return self: access the value of the distincness analyzer.
"""
return self._deequAnalyzers.Distinctness(
to_scala_seq(self._jvm, self.columns),
self._jvm.scala.Option.apply(self.where)
)
class Entropy(_AnalyzerObject):
"""
Entropy is a measure of the level of information contained in a message.
Given the probability distribution over values in a column, it describes
how many bits are required to identify a value.
:param str column: Column in DataFrame for which entropy is calculated.
:param str where: additional filter to apply before the analyzer is run.
"""
def __init__(self, column, where: str = None):
self.column = column
self.where = where
@property
def _analyzer_jvm(self):
"""
Returns the entropy of the column
:return self
"""
return self._deequAnalyzers.Entropy(
self.column,
self._jvm.scala.Option.apply(self.where)
)
class Histogram(_AnalyzerObject):
"""
Histogram is the summary of values in a column of a DataFrame.
It groups the column's values then calculates the number of rows with
that specific value and the fraction of the value.
:param str column: Column in DataFrame to do histogram analysis.
:param lambda expr binningUdf: Optional binning function to run before
grouping to re-categorize the column values.For example to turn a
numerical value to a categorical value binning functions might be used.
:param int maxDetailBins: Histogram details is only provided for N column
values with top counts. MaxBins sets the N. This limit does not affect
what is being returned as number of bins.It always returns the distinct
value count.
:param str where: additional filter to apply before the analyzer is run.
"""
def __init__(self, column, binningUdf=None, maxDetailBins: int = None, where: str = None):
self.column = column
self.binningUdf = binningUdf
self.maxDetailBins = maxDetailBins
self.where = where
@property
def _analyzer_jvm(self):
"""Returns the histogram summary of values in a column.
:return self
"""
if not self.maxDetailBins:
self.maxDetailBins = getattr(self._jvm.com.amazon.deequ.analyzers.Histogram,
"apply$default$3")()
return self._deequAnalyzers.Histogram(
self.column,
self._jvm.scala.Option.apply(self.binningUdf),
self.maxDetailBins,
self._jvm.scala.Option.apply(self.where)
)
class KLLParameters:
"""
Parameter definition for KLL Sketches.
:param int sketchSize: size of kll sketch.
:param float shrinkingFactor: shrinking factor of kll sketch.
:param int numberOfBuckets: number of buckets.
"""
def __init__(self, spark_session: SparkSession, sketchSize: int, shrinkingFactor: float, numberOfBuckets: int):
self._spark_session = spark_session
self.sketchSize = sketchSize
self.shrinkingFactor = shrinkingFactor
self.numberOfBuckets = numberOfBuckets
@property
def _param(self):
"""
Return the JVM KLLParameter object
"""
return self._spark_session._jvm.com.amazon.deequ.analyzers.KLLParameters(
self.sketchSize,
self.shrinkingFactor,
self.numberOfBuckets
)
class KLLSketch(_AnalyzerObject):
"""
The KLL Sketch analyzer.
:param str column: Column in DataFrame to do histogram analysis.
:param KLLParameters kllParameters: parameters of KLL Sketch
"""
def __init__(self, column: str, kllParameters: KLLParameters):
self.column = column
self.kllParameters = kllParameters
@property
def _analyzer_jvm(self):
"""
Returns the histogram summary of values in a column.
:return self
"""
if not self.kllParameters: self.kllParameters = getattr(self._jvm.com.amazon.deequ.analyzers.KLLSketch,
"apply$default$2")()
return self._deequAnalyzers.KLLSketch(
self.column,
self._jvm.scala.Option.apply(self.kllParameters._param)
)
class Maximum(_AnalyzerObject):
"""Get the maximum of a numeric column."""
def __init__(self, column, where: str = None):
"""
:param str column: column to find the maximum.
:param str where: additional filter to apply before the analyzer is run.
"""
self.column = column
self.where = where
@property
def _analyzer_jvm(self):
"""Returns the maximum value in a column.
:return self
"""
return self._deequAnalyzers.Maximum(
self.column,
self._jvm.scala.Option.apply(self.where)
)
class MaxLength(_AnalyzerObject):
"""MaxLength Analyzer. Get Max length of a str type column.
:param str column: column in DataFrame to find the maximum length.
Column is expected to be a str type.
:param str where: additional filter to apply before the analyzer is run.
"""
def __init__(self, column, where: str = None):
self.column = column
self.where = where
@property
def _analyzer_jvm(self):
"""Returns the maximum length in a column.
:return self
"""
return self._deequAnalyzers.MaxLength(
self.column,
self._jvm.scala.Option.apply(self.where)
)
class Mean(_AnalyzerObject):
"""
Mean Analyzer. Get mean of a column
:param str column: Column in DataFrame to find the mean.
:param str where: additional filter to apply before the analyzer is run.
"""
def __init__(self, column, where: str = None):
self.column = column
self.where = where
@property
def _analyzer_jvm(self):
"""
Returns the mean of a column.
:return self
"""
return self._deequAnalyzers.Mean(
self.column,
self._jvm.scala.Option.apply(self.where)
)
class Minimum(_AnalyzerObject):
"""Count the distinct elements in a single or multiple columns
:param str column: Column in DataFrame to find the minimum value.
:param str where: additional filter to apply before the analyzer is run.
"""
def __init__(self, column, where: str = None):
self.column = column
self.where = where
@property
def _analyzer_jvm(self):
"""Returns the minimum of a column.
:return self
"""
return self._deequAnalyzers.Minimum(
self.column,
self._jvm.scala.Option.apply(self.where)
)
class MinLength(_AnalyzerObject):
"""
Get the minimum length of a column
:param str column: Column in DataFrame to find the minimum Length.
Column is expected to be a str type.
:param str where : additional filter to apply before the analyzer is run.
"""
def __init__(self, column, where: str = None):
self.column = column
self.where = where
@property
def _analyzer_jvm(self):
"""
Returns the minimum length of column.
:return self
"""
return self._deequAnalyzers.MinLength(
self.column,
self._jvm.scala.Option.apply(self.where)
)
class MutualInformation(_AnalyzerObject):
"""
Describes how much information about one column can be inferred from another column.
:param list[str] columns: Columns in DataFrame for mutual information analysis.
:param str where: additional filter to apply before the analyzer is run.
"""
def __init__(self, columns, where: str = None):
self.columns = columns
self.where = where
@property
def _analyzer_jvm(self):
"""
Returns the mutual information of columns.
:return self
"""
return self._deequAnalyzers.MutualInformation(
to_scala_seq(self._jvm, self.columns),
self._jvm.scala.Option.apply(self.where)
)
class PatternMatch(_AnalyzerObject):
"""
PatternMatch is a measure of the fraction of rows that complies with a
given column regex constraint.
E.g A sample dataFrame column has five rows that contain a credit card
number and 10 rows that do not. According to regex, using the
constraint Patterns.CREDITCARD returns a doubleMetric .33 value.
:param str column: Column in DataFrame to check pattern.
:param str pattern_regex: pattern regex
:param str pattern_groupNames: groupNames for pattern regex
:param str where: additional filter to apply before the analyzer is run.
"""
def __init__(self, column, pattern_regex: str, *pattern_groupNames, where: str = None):
self.column = column
self.pattern_regex = pattern_regex,
if pattern_groupNames:
raise NotImplementedError("pattern_groupNames have not been implemented yet.")
self.pattern_groupNames = None
self.where = where
@property
def _analyzer_jvm(self):
"""
Returns the pattern match of column.
:return self
"""
return self._deequAnalyzers.PatternMatch(
self.column,
self._jvm.scala.util.matching.Regex(str(self.pattern_regex), None),
# TODO: revisit bc scala constructor does some weird implicit type casting from python str -> java list
# if we don't cast it to str()
self._jvm.scala.Option.apply(self.where)
)
class Size(_AnalyzerObject):
"""
Size is the number of rows in a DataFrame.
:param str where: additional filter to apply before the analyzer is run.
"""
def __init__(self, where: str = None):
self.where = where
@property
def _analyzer_jvm(self):
"""
Returns the size of DataFrame.
:return self
"""
return self._deequAnalyzers.Size(
self._jvm.scala.Option.apply(self.where)
)
class StandardDeviation(_AnalyzerObject):
"""
Calculates the Standard Deviation of column
:param str column: Column in DataFrame for standard deviation calculation.
:param str where: additional filter to apply before the analyzer is run.
"""
def __init__(self, column, where: str = None):
self.column = column
self.where = where
@property
def _analyzer_jvm(self):
"""
Returns the standard deviation of column.
:return self
"""
return self._deequAnalyzers.StandardDeviation(
self.column,
self._jvm.scala.Option.apply(self.where)
)
class Sum(_AnalyzerObject):
"""
Calculates the sum of a column
:param str column: Column in DataFrame to calculate the sum.
:param str where: additional filter to apply before the analyzer is run.
"""
def __init__(self, column, where: str = None):
self.column = column
self.where = where
@property
def _analyzer_jvm(self):
"""
Returns the sum of column.
:return self
"""
return self._deequAnalyzers.Sum(
self.column,
self._jvm.scala.Option.apply(self.where)
)
class Uniqueness(_AnalyzerObject):
"""
Uniqueness is the fraction of unique values of | |
<filename>machlearn/linear_regression/_linear_regression.py
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import pandas as pd
import statsmodels.api as sm
from sklearn import linear_model
import torch.nn as nn
import numpy as np
class linear_regression_torch(nn.Module):
def __init__(self, in_size, out_size):
super().__init__()
self.model = nn.Linear(in_size, out_size)
def forward(self, x):
return self.model(x)
def linear_regression_assumption_test(X, y, feature_names=None):
"""
1. linearity in the relationship between X and y
2. I.I.D. in residuals: residuals are Independently, Identically Distributed as normal
3. for multiple linear regression, little or no multicollinearity
References:
- https: // jeffmacaluso.github.io/post/LinearRegressionAssumptions/
- https: // towardsdatascience.com/assumptions-of-linear-regression-algorithm-ed9ea32224e1
"""
model = linear_regression_sklearn().fit(X=X, y=y)
y_pred = model.predict(X=X)
data = pd.DataFrame({'y_true': y, 'y_pred': y_pred})
residuals = y - y_pred
from sklearn.preprocessing import scale
X_scaled, y_scaled = scale(X), scale(y)
model_scaled = linear_regression_sklearn().fit(X=X_scaled, y=y_scaled)
y_pred_scaled = model_scaled.predict(X=X_scaled)
data_scaled = pd.DataFrame({'y_true_scaled': y_scaled, 'y_pred_scaled': y_pred_scaled})
residuals_scaled = y_scaled - y_pred_scaled
print("------------------------------------------------------------------------------------------------")
print("Assumption 1: Linearity in the relationship between X and y")
print("to test this, make a scatter plot of y_pred vs. y_true, and check for linear relationship")
import seaborn as sns
sns.lmplot(x='y_true_scaled', y='y_pred_scaled', data=data_scaled, fit_reg=False, height=6)
import matplotlib.pyplot as plt
diagnoal_line_coords = np.linspace(min(min(y_scaled), min(y_pred_scaled)), max(max(y_scaled), max(y_pred_scaled)), 2)
plt.plot(diagnoal_line_coords, diagnoal_line_coords, color='darkorange', linestyle='--')
plt.title('Assumption 1: Linearity in the relationship between X and y')
plt.suptitle('The dots should be scattered around the diagonal')
plt.xlabel('Standardized y_true')
plt.ylabel('Standardized y_pred')
plt.tight_layout()
plt.show()
print("------------------------------------------------------------------------------------------------")
print("Assumption 2: Residuals should be independently distributed")
from statsmodels.stats.stattools import durbin_watson
DW_stat = durbin_watson(residuals_scaled) # 0-4: range, 0-2: positive autocorrelation, 2-4: negative autocorrelation, 1.5-2.5: normal
if DW_stat < 1.5:
print(f"Durbin-Watson stat = {DW_stat:.2f} < 1.5, indicating positive autocorrelation present. Assumption was not met.")
elif DW_stat > 2.5:
print(f"Durbin-Watson stat = {DW_stat:.2f} > 2.5, indicating negative autocorrelation present. Assumption was not met.")
else:
print(f"Durbin-Watson stat = {DW_stat:.2f} is between 1.5 and 2.5, indicating little autocorrelation present. Assumption was met.")
print("------------------------------------------------------------------------------------------------")
print("Assumption 3: Residuals should be identically distributed for all predicted DV scores (homoscedasticity)")
plt.subplots(figsize=(8, 6))
ax = plt.subplot(111)
plt.scatter(x=y_pred_scaled, y=residuals_scaled, alpha=1.0)
ref_line_coords = np.linspace(y_pred_scaled.min(), y_pred_scaled.max(), 2)
plt.plot(ref_line_coords, np.repeat(0, ref_line_coords.shape[0]), color='darkorange', linestyle='--')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.xlabel('Standardized y_pred')
plt.ylabel('Standardized Residuals')
plt.title('Assumption 3: Homoscedasticity')
plt.suptitle('Residuals should vary randomly around 0 and have a constant variance for all values of y_pred')
plt.show()
print("------------------------------------------------------------------------------------------------")
print("Assumption 4: Residuals should be normally distributed")
from statsmodels.stats.diagnostic import normal_ad # the Anderson-Darling test
p_value = normal_ad(residuals_scaled)[1]
if p_value < 0.05:
print(f"p-value for the Anderson-Darling test was {p_value:.4f} < 0.05; residuals are not normally distributed. Assumption was not met.")
else:
print(f"p-value for the Anderson-Darling test was {p_value:.4f} >= 0.05, residuals are normally distributed. Assumption was met.")
plt.subplots(figsize=(8, 6))
plt.title('Assumption 4: Residuals should be normally distributed')
sns.histplot(data=residuals_scaled, stat="density")
sns.kdeplot(data=residuals_scaled, color="darkorange")
plt.xlabel('Standardized Residuals')
plt.show()
print("------------------------------------------------------------------------------------------------")
print("Assumption 5: For multiple linear regression, little or no multicollinearity")
# If violated, issues with interpretability of the coefficients and the standard errors of the coefficients.
from ..model_evaluation import test_for_multicollinearity
test_for_multicollinearity(X, feature_names=feature_names)
print("------------------------------------------------------------------------------------------------")
class OLS(object):
def __init__(self, print_summary=True, use_statsmodels=False, alpha = 1000):
super().__init__()
self.y = None
self.X = None
self.print_summary = print_summary
self.use_statsmodels = use_statsmodels
self.alpha = alpha
self.max_iter = 200000
def model(self, X, y):
pass
def unstandardized_estimate(self):
X_with_intercept = sm.add_constant(self.X) # fit_intercept
fitted_model = self.model(X=X_with_intercept, y=self.y)
if self.print_summary:
if self.use_statsmodels:
try:
print(f"Unstandardized estimates:\n{fitted_model.summary()}\n")
except:
print(f"Unstandardized estimates:\n{fitted_model.params}\n")
else:
print(f"Unstandardized estimates:\n{fitted_model.coef_}\n")
y_pred = fitted_model.predict(X_with_intercept)
from ..model_evaluation import evaluate_continuous_prediction
RMSE, R_squared = evaluate_continuous_prediction(self.y, y_pred)
print(f"RMSE = {RMSE:.3f}, R-squared = {R_squared:.3f}.\n")
return fitted_model
def standardized_estimate(self):
from sklearn.preprocessing import scale
import pandas as pd
X_scaled = pd.DataFrame(scale(self.X), columns=self.X.columns)
y_scaled = pd.Series(scale(self.y), name=self.y.name)
fitted_model_scaled = self.model(X=X_scaled, y=y_scaled)
if self.print_summary:
if self.use_statsmodels:
try:
print(f"Standardized estimates:\n{fitted_model_scaled.summary()}\n")
except:
print(f"Standardized estimates:\n{fitted_model_scaled.params}\n")
else:
print(f"Standardized estimates:\n{fitted_model_scaled.coef_}\n")
y_scaled_pred = fitted_model_scaled.predict(X_scaled)
from ..model_evaluation import evaluate_continuous_prediction
RMSE, R_squared = evaluate_continuous_prediction(y_scaled, y_scaled_pred)
print(f"RMSE = {RMSE:.3f}, R-squared = {R_squared:.3f}.\n")
return fitted_model_scaled
def run(self, X, y, standardized_estimate=False):
"""
- Required arguments:
y: pandas series (1-dim)
X: pandas data frame
"""
self.X = X
self.y = y
import pandas as pd
if isinstance(self.y, pd.DataFrame):
self.y = self.y.squeeze() # convert to data series
estimates = self.unstandardized_estimate()
if standardized_estimate:
self.standardized_estimate()
return estimates
def linear_regression_sklearn(*args, **kwargs):
return linear_model.LinearRegression(*args, **kwargs)
class linear_regression(OLS):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def model(self, X, y):
if self.use_statsmodels:
return sm.OLS(exog=X, endog=y).fit()
else:
return linear_regression_sklearn(fit_intercept=False).fit(X=X, y=y)
class linear_regression_normal_equation(OLS):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_statsmodels = False
def model(self, X, y):
return normal_equation().fit(X=X, y=y)
class normal_equation(object):
def __init__(self):
super().__init__()
self.coef_ = None
def fit(self, X, y):
from numpy.linalg import inv
self.coef_ = inv(X.T.dot(X)).dot(X.T).dot(y)
return self
def predict(self, X):
return X.dot(self.coef_)
class ridge_regression(OLS):
def __init__(self, alpha=1000, *args, **kwargs):
super().__init__(alpha=alpha, *args, **kwargs)
self.alpha = alpha
print(f"alpha = [{alpha}]")
def model(self, X, y):
if self.use_statsmodels:
return sm.OLS(exog=X, endog=y).fit_regularized(method='elastic_net', alpha=self.alpha, L1_wt=0, refit=False)
else:
return linear_model.Ridge(alpha=self.alpha, fit_intercept=False).fit(X=X, y=y)
class lasso_regression(OLS):
def __init__(self, alpha = 1000, *args, **kwargs):
super().__init__(alpha = alpha, *args, **kwargs)
self.alpha = alpha
print(f"alpha = [{alpha}]")
def model(self, X, y):
if self.use_statsmodels:
return sm.OLS(exog=X, endog=y).fit_regularized(method='elastic_net', alpha=self.alpha, L1_wt=1, maxiter=self.max_iter, refit=False)
else:
return linear_model.Lasso(alpha=self.alpha, max_iter=self.max_iter, fit_intercept=False).fit(X=X, y=y)
def identify_best_alpha_for_ridge_regression(X, y, alphas = [0.1, 1.0, 10.0]):
from sklearn.linear_model import RidgeCV
ridge_regression_cv = RidgeCV(alphas=alphas, fit_intercept=False)
model_cv = ridge_regression_cv.fit(X=X, y=y)
return model_cv.alpha_
def identify_best_alpha_for_lasso_regression(X, y, alphas=[0.1, 1.0, 10.0]):
from sklearn.linear_model import LassoCV
lasso_regression_cv = LassoCV(alphas=alphas, fit_intercept=False, max_iter=200000)
model_cv = lasso_regression_cv.fit(X=X, y=y)
return model_cv.alpha_
def _demo_regularization(dataset="Hitters", use_statsmodels=False):
"""
"""
print('\nRegularization is to handle overfitting, which means the model fitted with the training data will not well generalize to the testing data.')
print('Several possibilities would cause overfitting, including (a) multicolinearity among predictors and (b) model being too complex and having trivial predictors.')
print('When (a) there is multicolinearity among predictors, we use L2 Regularization, which adds "squared magnitude" of coefficient (squared L2 norm) as a penalty term to the cost function.')
print('When (b) model is too complex and has trivial predictors, we use L1 Regularization, which adds "magnitude" of coefficient (L1 norm) as a penalty term to the cost function.')
print('\nL2 regularization is also known as Ridge regression, while L1 regularization is also known as Lasso regression, and a combination of them is known as elastic net.')
print('After regularization, we would expect to see better generalization, including reduced RMSE and improved R^2.')
print('After L2 regularization, we would expect to see smaller variances among the coefficient estimates, that is, the estimates less likely changing rapidly and hence more robust.')
print('After L1 regularization, we would expect to see a simpler model with many coefficient estimates = 0.')
print('For either L2 or L1 regularization, there is also a parameter called alpha (or lambda), which governs the amount of regularization. It takes GridCV (e.g., RidgeCV or LassoCV) to identify the optimal number of alpha (or lambda).\n')
import pandas as pd
import patsy
if dataset == "boston":
from ..datasets import public_dataset
data = public_dataset(name="boston")
print(f"{data.head()}\n")
formula = 'MEDV ~ CRIM + ZN + INDUS + CHAS + NOX + RM + AGE + DIS + RAD + TAX + PTRATIO + B + LSTAT - 1'
if dataset == "Longley":
# https://www.statsmodels.org/dev/examples/notebooks/generated/ols.html
from statsmodels.datasets.longley import load_pandas
y = load_pandas().endog
X = load_pandas().exog
data = pd.concat([y, X], axis=1) # https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html
print(f"{data.head()}\n")
formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR - 1' # -1 means no intercept
if dataset == "Hitters":
from ..datasets import public_dataset
data = public_dataset(name="Hitters")
data = data.dropna()
print(f"{data.head()}\n")
data = data.drop(['League', 'NewLeague', 'Division'], axis=1)
formula = 'Salary ~ AtBat + Hits + HmRun + Runs + RBI + Walks + Years + CAtBat + CHits + CHmRun + CRuns + CRBI + CWalks + PutOuts + Assists + Errors - 1' # -1 means no intercept
#formula = 'Salary ~ League + NewLeague + Division + AtBat + Hits + HmRun + Runs + RBI + Walks + Years + CAtBat + CHits + CHmRun + CRuns + CRBI + CWalks + PutOuts + Assists + Errors - 1' # -1 means no intercept
y, X = patsy.dmatrices(formula, data)
feature_names = X.design_info.column_names
X = pd.DataFrame(X, columns=feature_names)
import numpy as np
best_ridge_regression_alpha = identify_best_alpha_for_ridge_regression(X=sm.add_constant(X), y=y.ravel(), alphas=np.exp(np.linspace(-7, 15, 100000)))
print(f"best alpha for ridge regression: {best_ridge_regression_alpha:.2f}")
best_lasso_regression_alpha = identify_best_alpha_for_lasso_regression(X=sm.add_constant(X), y=y.ravel(), alphas=np.exp(np.linspace(-7,15, 10000)))
print(f"best alpha for lasso |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.